1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <fbl/algorithm.h>
6 #include <fbl/auto_call.h>
7 #include <fbl/ref_ptr.h>
8 #include <fbl/unique_ptr.h>
9 #include <lib/zx/thread.h>
10 #include <lib/zx/vmar.h>
11 #include <lib/zx/vmo.h>
12 #include <zircon/status.h>
13 #include <zircon/syscalls.h>
14 
15 #include <assert.h>
16 #include <atomic>
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <getopt.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <threads.h>
26 #include <unistd.h>
27 
28 #include "stress_test.h"
29 
30 class VmStressTest : public StressTest {
31 public:
32     VmStressTest() = default;
33     virtual ~VmStressTest() = default;
34 
35     virtual zx_status_t Start();
36     virtual zx_status_t Stop();
37 
name() const38     virtual const char* name() const { return "VM Stress"; }
39 
40 private:
41     int stress_thread();
42 
43     thrd_t threads_[16]{};
44 
45     // used by the worker threads at runtime
46     std::atomic<bool> shutdown_{false};
47     zx::vmo vmo_{};
48 };
49 
50 // our singleton
51 VmStressTest vmstress;
52 
53 // VM Stresser
54 //
55 // Current algorithm creates a single VMO of fairly large size, hands a handle
56 // to a pool of worker threads that then randomly commit/decommit/read/write/map/unmap
57 // the vmo asynchronously. Intended to pick out any internal races with a single VMO and
58 // with the VMAR mapping/unmapping system.
59 //
60 // Currently does not validate that any given operation was successfully performed, only
61 // that the apis do not return an error.
62 //
63 // Will evolve over time to use multiple VMOs simultaneously along with cloned vmos.
64 
stress_thread()65 int VmStressTest::stress_thread() {
66     zx_status_t status;
67 
68     uintptr_t ptr = 0;
69     uint64_t vmo_size = 0;
70     status = vmo_.get_size(&vmo_size);
71     ZX_ASSERT(vmo_size > 0);
72 
73     // allocate a local buffer
74     const size_t buf_size = PAGE_SIZE * 16;
75     fbl::unique_ptr<uint8_t[]> buf{new uint8_t[buf_size]};
76 
77     // local helper routines to calculate a random range within a vmo and
78     // a range appropriate to read into the local buffer above
79     auto rand_vmo_range = [vmo_size](uint64_t *out_offset, uint64_t *out_size) {
80         *out_offset = rand() % vmo_size;
81         *out_size = fbl::min(rand() % vmo_size, vmo_size - *out_offset);
82     };
83     auto rand_buffer_range = [vmo_size](uint64_t *out_offset, uint64_t *out_size) {
84         *out_size = rand() % buf_size;
85         *out_offset = rand() % (vmo_size - *out_size);
86     };
87 
88     ZX_ASSERT(buf_size < vmo_size);
89 
90     while (!shutdown_.load()) {
91         uint64_t off, len;
92 
93         int r = rand() % 100;
94         switch (r) {
95         case 0 ... 9: // commit a range of the vmo
96             Printf("c");
97             rand_vmo_range(&off, &len);
98             status = vmo_.op_range(ZX_VMO_OP_COMMIT, off, len, nullptr, 0);
99             if (status != ZX_OK) {
100                 fprintf(stderr, "failed to commit range, error %d (%s)\n", status, zx_status_get_string(status));
101             }
102             break;
103         case 10 ... 19: // decommit a range of the vmo
104             Printf("d");
105             rand_vmo_range(&off, &len);
106             status = vmo_.op_range(ZX_VMO_OP_DECOMMIT, off, len, nullptr, 0);
107             if (status != ZX_OK) {
108                 fprintf(stderr, "failed to decommit range, error %d (%s)\n", status, zx_status_get_string(status));
109             }
110             break;
111         case 20 ... 29:
112             if (ptr) {
113                 // unmap the vmo if it already was
114                 Printf("u");
115                 status = zx::vmar::root_self()->unmap(ptr, vmo_size);
116                 if (status != ZX_OK) {
117                     fprintf(stderr, "failed to unmap range, error %d (%s)\n", status, zx_status_get_string(status));
118                 }
119                 ptr = 0;
120             }
121             // map it somewhere
122             Printf("m");
123             status = zx::vmar::root_self()->map(0, vmo_, 0, vmo_size,
124                                                 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, &ptr);
125             if (status != ZX_OK) {
126                 fprintf(stderr, "failed to map range, error %d (%s)\n", status, zx_status_get_string(status));
127             }
128             break;
129         case 30 ... 39:
130             // read from a random range of the vmo
131             Printf("r");
132             rand_buffer_range(&off, &len);
133             status = vmo_.read(buf.get(), off, len);
134             if (status != ZX_OK) {
135                 fprintf(stderr, "error reading from vmo\n");
136             }
137             break;
138         case 40 ... 49:
139             // write to a random range of the vmo
140             Printf("w");
141             rand_buffer_range(&off, &len);
142             status = vmo_.write(buf.get(), off, len);
143             if (status != ZX_OK) {
144                 fprintf(stderr, "error writing to vmo\n");
145             }
146             break;
147         case 50 ... 74:
148             // read from a random range of the vmo via a direct memory reference
149             if (ptr) {
150                 Printf("R");
151                 rand_buffer_range(&off, &len);
152                 memcpy(buf.get(), reinterpret_cast<const void *>(ptr + off), len);
153             }
154             break;
155         case 75 ... 99:
156             // write to a random range of the vmo via a direct memory reference
157             if (ptr) {
158                 Printf("W");
159                 rand_buffer_range(&off, &len);
160                 memcpy(reinterpret_cast<void *>(ptr + off), buf.get(), len);
161             }
162             break;
163         }
164 
165         fflush(stdout);
166     }
167 
168     if (ptr) {
169         status = zx::vmar::root_self()->unmap(ptr, vmo_size);
170     }
171 
172     return 0;
173 }
174 
Start()175 zx_status_t VmStressTest::Start() {
176     const uint64_t free_bytes = kmem_stats_.free_bytes;
177 
178     // scale the size of the VMO we create based on the size of memory in the system.
179     // 1/64th the size of total memory generates a fairly sizeable vmo (16MB per 1GB)
180     const uint64_t vmo_test_size = free_bytes / 64;
181 
182     PrintfAlways("VM stress test: using vmo of size %" PRIu64 "\n", vmo_test_size);
183 
184     // create a test vmo
185     auto status = zx::vmo::create(vmo_test_size, 0, &vmo_);
186     if (status != ZX_OK)
187         return status;
188 
189     // create a pile of threads
190     // TODO: scale based on the number of cores in the system and/or command line arg
191     auto worker = [](void* arg) -> int {
192         VmStressTest* test = static_cast<VmStressTest*>(arg);
193 
194         return test->stress_thread();
195     };
196 
197     for (auto& t : threads_) {
198         thrd_create_with_name(&t, worker, this, "vmstress_worker");
199     }
200 
201     return ZX_OK;
202 }
203 
Stop()204 zx_status_t VmStressTest::Stop() {
205     shutdown_.store(true);
206 
207     for (auto& t : threads_) {
208         thrd_join(t, nullptr);
209     }
210 
211     return ZX_OK;
212 }
213