1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <fbl/algorithm.h>
6#include <fbl/atomic.h>
7#include <fbl/auto_call.h>
8#include <fbl/ref_ptr.h>
9#include <fbl/unique_ptr.h>
10#include <lib/zx/thread.h>
11#include <lib/zx/vmar.h>
12#include <lib/zx/vmo.h>
13#include <zircon/device/sysinfo.h>
14#include <zircon/status.h>
15#include <zircon/syscalls.h>
16
17#include <assert.h>
18#include <errno.h>
19#include <fcntl.h>
20#include <getopt.h>
21#include <inttypes.h>
22#include <limits.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <threads.h>
27#include <unistd.h>
28
29#include "stress_test.h"
30
31class VmStressTest : public StressTest {
32public:
33    VmStressTest() = default;
34    virtual ~VmStressTest() = default;
35
36    virtual zx_status_t Start();
37    virtual zx_status_t Stop();
38
39    virtual const char* name() const { return "VM Stress"; }
40
41private:
42    int stress_thread();
43
44    thrd_t threads_[16]{};
45
46    // used by the worker threads at runtime
47    fbl::atomic<bool> shutdown_{false};
48    zx::vmo vmo_{};
49};
50
51// our singleton
52VmStressTest vmstress;
53
54// VM Stresser
55//
56// Current algorithm creates a single VMO of fairly large size, hands a handle
57// to a pool of worker threads that then randomly commit/decommit/read/write/map/unmap
58// the vmo asynchronously. Intended to pick out any internal races with a single VMO and
59// with the VMAR mapping/unmapping system.
60//
61// Currently does not validate that any given operation was sucessfully performed, only
62// that the apis do not return an error.
63//
64// Will evolve over time to use multiple VMOs simultaneously along with cloned vmos.
65
66int VmStressTest::stress_thread() {
67    zx_status_t status;
68
69    uintptr_t ptr = 0;
70    uint64_t vmo_size = 0;
71    status = vmo_.get_size(&vmo_size);
72    ZX_ASSERT(vmo_size > 0);
73
74    // allocate a local buffer
75    const size_t buf_size = PAGE_SIZE * 16;
76    fbl::unique_ptr<uint8_t[]> buf{new uint8_t[buf_size]};
77
78    // local helper routines to calculate a random range within a vmo and
79    // a range appropriate to read into the local buffer above
80    auto rand_vmo_range = [vmo_size](uint64_t *out_offset, uint64_t *out_size) {
81        *out_offset = rand() % vmo_size;
82        *out_size = fbl::min(rand() % vmo_size, vmo_size - *out_offset);
83    };
84    auto rand_buffer_range = [vmo_size](uint64_t *out_offset, uint64_t *out_size) {
85        *out_size = rand() % buf_size;
86        *out_offset = rand() % (vmo_size - *out_size);
87    };
88
89    ZX_ASSERT(buf_size < vmo_size);
90
91    while (!shutdown_.load()) {
92        uint64_t off, len;
93
94        int r = rand() % 100;
95        switch (r) {
96        case 0 ... 9: // commit a range of the vmo
97            Printf("c");
98            rand_vmo_range(&off, &len);
99            status = vmo_.op_range(ZX_VMO_OP_COMMIT, off, len, nullptr, 0);
100            if (status != ZX_OK) {
101                fprintf(stderr, "failed to commit range, error %d (%s)\n", status, zx_status_get_string(status));
102            }
103            break;
104        case 10 ... 19: // decommit a range of the vmo
105            Printf("d");
106            rand_vmo_range(&off, &len);
107            status = vmo_.op_range(ZX_VMO_OP_DECOMMIT, off, len, nullptr, 0);
108            if (status != ZX_OK) {
109                fprintf(stderr, "failed to decommit range, error %d (%s)\n", status, zx_status_get_string(status));
110            }
111            break;
112        case 20 ... 29:
113            if (ptr) {
114                // unmap the vmo if it already was
115                Printf("u");
116                status = zx::vmar::root_self()->unmap(ptr, vmo_size);
117                if (status != ZX_OK) {
118                    fprintf(stderr, "failed to unmap range, error %d (%s)\n", status, zx_status_get_string(status));
119                }
120                ptr = 0;
121            }
122            // map it somewhere
123            Printf("m");
124            status = zx::vmar::root_self()->map(0, vmo_, 0, vmo_size,
125                                                ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, &ptr);
126            if (status != ZX_OK) {
127                fprintf(stderr, "failed to map range, error %d (%s)\n", status, zx_status_get_string(status));
128            }
129            break;
130        case 30 ... 39:
131            // read from a random range of the vmo
132            Printf("r");
133            rand_buffer_range(&off, &len);
134            status = vmo_.read(buf.get(), off, len);
135            if (status != ZX_OK) {
136                fprintf(stderr, "error reading from vmo\n");
137            }
138            break;
139        case 40 ... 49:
140            // write to a random range of the vmo
141            Printf("w");
142            rand_buffer_range(&off, &len);
143            status = vmo_.write(buf.get(), off, len);
144            if (status != ZX_OK) {
145                fprintf(stderr, "error writing to vmo\n");
146            }
147            break;
148        case 50 ... 74:
149            // read from a random range of the vmo via a direct memory reference
150            if (ptr) {
151                Printf("R");
152                rand_buffer_range(&off, &len);
153                memcpy(buf.get(), reinterpret_cast<const void *>(ptr + off), len);
154            }
155            break;
156        case 75 ... 99:
157            // write to a random range of the vmo via a direct memory reference
158            if (ptr) {
159                Printf("W");
160                rand_buffer_range(&off, &len);
161                memcpy(reinterpret_cast<void *>(ptr + off), buf.get(), len);
162            }
163            break;
164        }
165
166        fflush(stdout);
167    }
168
169    if (ptr) {
170        status = zx::vmar::root_self()->unmap(ptr, vmo_size);
171    }
172
173    return 0;
174}
175
176zx_status_t VmStressTest::Start() {
177    const uint64_t free_bytes = kmem_stats_.free_bytes;
178
179    // scale the size of the VMO we create based on the size of memory in the system.
180    // 1/64th the size of total memory generates a fairly sizeable vmo (16MB per 1GB)
181    const uint64_t vmo_test_size = free_bytes / 64;
182
183    PrintfAlways("VM stress test: using vmo of size %" PRIu64 "\n", vmo_test_size);
184
185    // create a test vmo
186    auto status = zx::vmo::create(vmo_test_size, 0, &vmo_);
187    if (status != ZX_OK)
188        return status;
189
190    // create a pile of threads
191    // TODO: scale based on the number of cores in the system and/or command line arg
192    auto worker = [](void* arg) -> int {
193        VmStressTest* test = static_cast<VmStressTest*>(arg);
194
195        return test->stress_thread();
196    };
197
198    for (auto& t : threads_) {
199        thrd_create_with_name(&t, worker, this, "vmstress_worker");
200    }
201
202    return ZX_OK;
203}
204
205zx_status_t VmStressTest::Stop() {
206    shutdown_.store(true);
207
208    for (auto& t : threads_) {
209        thrd_join(t, nullptr);
210    }
211
212    return ZX_OK;
213}
214