1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <lib/fzl/vmar-manager.h>
6#include <lib/fzl/vmo-mapper.h>
7#include <unittest/unittest.h>
8#include <zircon/limits.h>
9#include <zircon/rights.h>
10
11#include <fbl/algorithm.h>
12
13#include "vmo-probe.h"
14
15namespace {
16
17static constexpr size_t kSubVmarTestSize = 16 << 20;    // 16MB
18static constexpr size_t kVmoTestSize = 512 << 10;       // 512KB
19
20template <typename T>
21using RefPtr = fbl::RefPtr<T>;
22using VmarManager = fzl::VmarManager;
23using VmoMapper = fzl::VmoMapper;
24using AccessType = vmo_probe::AccessType;
25
26template <typename T, typename U>
27bool contained_in(const T& contained, const U& container) {
28    uintptr_t contained_start = reinterpret_cast<uintptr_t>(contained.start());
29    uintptr_t contained_end = contained_start + contained.size();
30    uintptr_t container_start = reinterpret_cast<uintptr_t>(container.start());
31    uintptr_t container_end = container_start + container.size();
32
33    return (contained_start <= contained_end) &&
34           (contained_start >= container_start) &&
35           (contained_end <= container_end);
36}
37
38bool vmar_vmo_core_test(uint32_t vmar_levels, bool test_create) {
39    BEGIN_TEST;
40
41    RefPtr<VmarManager> managers[2];
42    RefPtr<VmarManager> target_vmar;
43
44    ASSERT_LE(vmar_levels, fbl::count_of(managers));
45    size_t vmar_size = kSubVmarTestSize;
46    for (uint32_t i = 0; i < vmar_levels; ++i) {
47        managers[i] = VmarManager::Create(vmar_size, i ? managers[i - 1] : nullptr);
48        ASSERT_NONNULL(managers[i], "Failed to create VMAR manager");
49
50        if (i) {
51            ASSERT_TRUE(contained_in(*managers[i], *managers[i - 1]),
52                        "Sub-VMO is not contained within in its parent!");
53        }
54
55        vmar_size >>= 1u;
56    }
57
58    if (vmar_levels) {
59        target_vmar = managers[vmar_levels - 1];
60    }
61
62    struct {
63        uint32_t access_flags;
64        zx_rights_t vmo_rights;
65        size_t test_offset;
66        size_t test_size;
67        void* start;
68    } kVmoTests[] = {
69        { .access_flags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
70          .vmo_rights = ZX_RIGHT_SAME_RIGHTS,
71          .test_offset = 0,
72          .test_size = kVmoTestSize >> 1,
73          .start = nullptr,
74        },
75        { .access_flags = ZX_VM_PERM_READ,
76          .vmo_rights = ZX_RIGHT_READ | ZX_RIGHT_MAP,
77          .test_offset = 0,
78          .test_size = kVmoTestSize,
79          .start = nullptr,
80        },
81        // TODO(johngro): We are not allowed to map pages as write-only.  Need
82        // to determine if this is WAI or not.
83#if 0
84        { .access_flags = ZX_VM_PERM_WRITE,
85          .vmo_rights = ZX_RIGHT_WRITE | ZX_RIGHT_MAP,
86          .test_offset = 0,
87          .test_size = 0,
88          .start = nullptr,
89        },
90#endif
91        { .access_flags = 0,
92          .vmo_rights = 0,
93          .test_offset = 0,
94          .test_size = 0,
95          .start = nullptr,
96        },
97        { .access_flags = 0,
98          .vmo_rights = 0,
99          .test_offset = kVmoTestSize >> 1,
100          .test_size = 0,
101          .start = nullptr,
102        },
103    };
104
105    for (uint32_t pass = 0; pass < 2; ++pass) {
106        {
107            VmoMapper mappers[fbl::count_of(kVmoTests)];
108            zx::vmo vmo_handles[fbl::count_of(kVmoTests)];
109            zx_status_t res;
110
111            for (size_t i = 0; i < fbl::count_of(kVmoTests); ++i) {
112                auto& t = kVmoTests[i];
113
114                for (uint32_t create_map_pass = 0; create_map_pass < 2; ++create_map_pass) {
115                    // If this is the first create/map pass, the create/map operation should
116                    // succeed.  If this is the second pass, it should fail with BAD_STATE (since we
117                    // should have already created/mapped already)
118                    zx_status_t expected_cm_res = create_map_pass ? ZX_ERR_BAD_STATE : ZX_OK;
119
120                    if (test_create) {
121                        // If we are testing CreateAndMap, call it with the mapping
122                        // rights and the proper rights reduction for the VMO it hands
123                        // back to us.  Hold onto the returned handle in vmo_handles.
124                        res = mappers[i].CreateAndMap(kVmoTestSize,
125                                                      t.access_flags,
126                                                      target_vmar,
127                                                      &vmo_handles[i],
128                                                      t.vmo_rights);
129                        t.test_size = kVmoTestSize;
130
131                        ASSERT_EQ(res, expected_cm_res);
132                        ASSERT_TRUE(vmo_handles[i].is_valid());
133                    } else {
134                        // If we are testing Map, and this is the first pass, create the VMOs we
135                        // will pass to map, then do so.
136                        if (create_map_pass == 0) {
137                            res = zx::vmo::create(kVmoTestSize, 0, &vmo_handles[i]);
138                            ASSERT_EQ(res, ZX_OK);
139                            ASSERT_TRUE(vmo_handles[i].is_valid());
140                        }
141
142                        res = mappers[i].Map(vmo_handles[i],
143                                             t.test_offset,
144                                             t.test_size,
145                                             t.access_flags,
146                                             target_vmar);
147                        ASSERT_EQ(res, expected_cm_res);
148
149                        // If this was the first VMO we have mapped during this test
150                        // run, and we requested only a partial map, and it was mapped
151                        // in a sub-vmar, and the end of the VMO is not aligned with the
152                        // end of the VMAR, then check to make sure that we read or
153                        // write past the end of the partial mapping.
154                        //
155                        // TODO(johngro): It would be nice to always do these checks,
156                        // but we do not have a lot of control of whether or not
157                        // something else may have been mapped adjacent to our mapping,
158                        // hence all of the restrictions described above.
159                        if (!i && !create_map_pass && target_vmar &&
160                            t.test_size && (t.test_size < kVmoTestSize)) {
161                            uintptr_t vmo_end = reinterpret_cast<uintptr_t>(mappers[i].start());
162                            uintptr_t vmar_end = reinterpret_cast<uintptr_t>(target_vmar->start());
163
164                            vmo_end += mappers[i].size();
165                            vmar_end += target_vmar->size();
166                            if (vmo_end < vmar_end) {
167                                void* probe_tgt = reinterpret_cast<void*>(vmo_end);
168                                ASSERT_TRUE(vmo_probe::probe_access(probe_tgt, AccessType::Rd, false));
169                                ASSERT_TRUE(vmo_probe::probe_access(probe_tgt, AccessType::Wr, false));
170                            }
171                        }
172                    }
173                }
174
175                // Stash the address of the mapped VMOs in the test state
176                t.start = mappers[i].start();
177
178                // If we mapped inside of a sub-vmar, then the mapping should be contained within
179                // the VMAR.
180                if (target_vmar != nullptr) {
181                    ASSERT_TRUE(contained_in(mappers[i], *target_vmar));
182                }
183
184                if (test_create) {
185                    // If we created this VMO, make sure that its rights were reduced correctly.
186                    zx_rights_t expected_rights = t.vmo_rights != ZX_RIGHT_SAME_RIGHTS
187                                                ? t.vmo_rights
188                                                : ZX_DEFAULT_VMO_RIGHTS;
189                    zx_info_handle_basic_t info;
190                    res = vmo_handles[i].get_info(ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
191                                                  nullptr, nullptr);
192
193                    ASSERT_EQ(res, ZX_OK, "Failed to get basic object info");
194                    ASSERT_EQ(info.rights, expected_rights, "Rights reduction failure");
195                } else {
196                    // If we mapped this VMO, and we passed zero for the map size, the Mapper should
197                    // have mapped the entire VMO after the offset and its size should reflect that.
198                    if (!t.test_size) {
199                        ASSERT_EQ(mappers[i].size() + t.test_offset, kVmoTestSize);
200                        t.test_size = kVmoTestSize - t.test_offset;
201                    }
202                }
203            }
204
205            // Now that everything has been created and mapped, make sure that
206            // everything checks out by probing and looking for seg-faults
207            // if/when we violate permissions.
208            for (const auto& t : kVmoTests) {
209                ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, t.access_flags));
210            }
211
212            // Release all of our VMO handles, then verify again.  Releasing
213            // these handles should not cause our mapping to go away.
214            for (auto& h : vmo_handles) {
215                h.reset();
216            }
217
218            for (const auto& t : kVmoTests) {
219                ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, t.access_flags));
220            }
221
222            // If this is the first pass, manually unmap all of the VmoMappers
223            // and verify that we can no longer access any of the previously
224            // mapped region.
225            if (!pass) {
226                for (auto& m : mappers) {
227                    m.Unmap();
228                }
229
230                for (const auto& t : kVmoTests) {
231                    ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, 0));
232                }
233            }
234        }
235
236        // If this is the second pass, then we didn't manually call unmap, we
237        // just let the mappers go out of scope.  Make sure that everything
238        // auto-unmapped as it should.
239        if (pass) {
240            for (const auto& t : kVmoTests) {
241                ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, 0));
242            }
243        }
244    }
245
246    // TODO(johngro) : release all of our VMAR references and then make certain
247    // that they were destroyed as they should have been.  Right now this is
248    // rather difficult as we cannot fetch mapping/vmar info for our current
249    // process, so we are skipping the check.
250
251    END_TEST;
252}
253
254bool vmo_create_and_map_root_test() {
255    BEGIN_TEST;
256    ASSERT_TRUE(vmar_vmo_core_test(0, true));
257    END_TEST;
258}
259
260bool vmo_create_and_map_sub_vmar_test() {
261    BEGIN_TEST;
262    ASSERT_TRUE(vmar_vmo_core_test(1, true));
263    END_TEST;
264}
265
266bool vmo_create_and_map_sub_sub_vmar_test() {
267    BEGIN_TEST;
268    ASSERT_TRUE(vmar_vmo_core_test(2, true));
269    END_TEST;
270}
271
272bool vmo_map_root_test() {
273    BEGIN_TEST;
274    ASSERT_TRUE(vmar_vmo_core_test(0, false));
275    END_TEST;
276}
277
278bool vmo_map_sub_vmar_test() {
279    BEGIN_TEST;
280    ASSERT_TRUE(vmar_vmo_core_test(1, false));
281    END_TEST;
282}
283
284bool vmo_map_sub_sub_vmar_test() {
285    BEGIN_TEST;
286    ASSERT_TRUE(vmar_vmo_core_test(2, false));
287    END_TEST;
288}
289
290bool vmo_mapper_move_test() {
291    BEGIN_TEST;
292
293    constexpr uint32_t ACCESS_FLAGS = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
294    void* addr;
295    size_t size;
296    {
297        // Create two mappers, and make sure neither has mapped anything.
298        VmoMapper mapper1, mapper2;
299
300        ASSERT_NULL(mapper1.start());
301        ASSERT_EQ(mapper1.size(), 0);
302        ASSERT_NULL(mapper2.start());
303        ASSERT_EQ(mapper2.size(), 0);
304
305        // Create and map a page in mapper 1, make sure we can probe it.
306        zx_status_t res;
307        res = mapper1.CreateAndMap(ZX_PAGE_SIZE, ACCESS_FLAGS);
308        addr = mapper1.start();
309        size = mapper1.size();
310
311        ASSERT_EQ(res, ZX_OK);
312        ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
313
314        // Move the mapping from mapper1 into mapper2 using assignment.  Make sure
315        // the region is still mapped and has not moved in our address space.
316        mapper2 = fbl::move(mapper1);
317
318        ASSERT_NULL(mapper1.start());
319        ASSERT_EQ(mapper1.size(), 0);
320        ASSERT_EQ(mapper2.start(), addr);
321        ASSERT_EQ(mapper2.size(), size);
322        ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
323
324        // Now do the same thing, but this time move using construction.
325        VmoMapper mapper3(fbl::move(mapper2));
326
327        ASSERT_NULL(mapper2.start());
328        ASSERT_EQ(mapper2.size(), 0);
329        ASSERT_EQ(mapper3.start(), addr);
330        ASSERT_EQ(mapper3.size(), size);
331        ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
332
333        // Map a new region into mapper1, make sure it is OK.
334        res = mapper1.CreateAndMap(ZX_PAGE_SIZE, ACCESS_FLAGS);
335        void* second_addr = mapper1.start();
336        size_t second_size = mapper1.size();
337
338        ASSERT_EQ(res, ZX_OK);
339        ASSERT_TRUE(vmo_probe::probe_verify_region(second_addr, second_size, ACCESS_FLAGS));
340
341        // Now, move mapper3 on top of mapper1 via assignment and make sure that
342        // mapper1's old region is properly unmapped while mapper3's contents remain
343        // mapped and are properly moved.
344        mapper1 = fbl::move(mapper3);
345
346        ASSERT_NULL(mapper3.start());
347        ASSERT_EQ(mapper3.size(), 0);
348        ASSERT_EQ(mapper1.start(), addr);
349        ASSERT_EQ(mapper1.size(), size);
350        ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
351        ASSERT_TRUE(vmo_probe::probe_verify_region(second_addr, second_size, 0));
352    }
353
354    // Finally, now that we have left the scope, the original mapping that we
355    // have been moving around should be gone by now.
356    ASSERT_NONNULL(addr);
357    ASSERT_EQ(size, ZX_PAGE_SIZE);
358    ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, 0));
359
360    END_TEST;
361}
362
363}  // namespace
364
365BEGIN_TEST_CASE(vmo_mapper_vmar_manager_tests)
366RUN_NAMED_TEST("vmo_create_and_map_root", vmo_create_and_map_root_test)
367RUN_NAMED_TEST("vmo_create_and_map_sub_vmar", vmo_create_and_map_sub_vmar_test)
368RUN_NAMED_TEST("vmo_create_and_map_sub_sub_vmar", vmo_create_and_map_sub_sub_vmar_test)
369RUN_NAMED_TEST("vmo_map_root", vmo_map_root_test)
370RUN_NAMED_TEST("vmo_map_sub_vmar", vmo_map_sub_vmar_test)
371RUN_NAMED_TEST("vmo_map_sub_sub_vmar", vmo_map_sub_sub_vmar_test)
372RUN_NAMED_TEST("vmo_mapper_move_test", vmo_mapper_move_test)
373END_TEST_CASE(vmo_mapper_vmar_manager_tests)
374