1// Copyright 2016 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7#include <assert.h>
8#include <err.h>
9#include <fbl/alloc_checker.h>
10#include <fbl/array.h>
11#include <lib/unittest/unittest.h>
12#include <vm/physmap.h>
13#include <vm/vm.h>
14#include <vm/vm_address_region.h>
15#include <vm/vm_aspace.h>
16#include <vm/vm_object.h>
17#include <vm/vm_object_paged.h>
18#include <vm/vm_object_physical.h>
19#include <zircon/types.h>
20
21static const uint kArchRwFlags = ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE;
22
23// Allocates a single page, translates it to a vm_page_t and frees it.
24static bool pmm_smoke_test() {
25    BEGIN_TEST;
26    paddr_t pa;
27    vm_page_t* page;
28
29    zx_status_t status = pmm_alloc_page(0, &page, &pa);
30    ASSERT_EQ(ZX_OK, status, "pmm_alloc single page");
31    ASSERT_NE(nullptr, page, "pmm_alloc single page");
32    ASSERT_NE(0u, pa, "pmm_alloc single page");
33
34    vm_page_t* page2 = paddr_to_vm_page(pa);
35    ASSERT_EQ(page2, page, "paddr_to_vm_page on single page");
36
37    pmm_free_page(page);
38    END_TEST;
39}
40
41// Allocates a bunch of pages then frees them.
42static bool pmm_large_alloc_test() {
43    BEGIN_TEST;
44    list_node list = LIST_INITIAL_VALUE(list);
45
46    static const size_t alloc_count = 1024;
47
48    auto count = pmm_alloc_pages(alloc_count, 0, &list);
49    EXPECT_EQ(alloc_count, count, "pmm_alloc_pages a bunch of pages count");
50    EXPECT_EQ(alloc_count, list_length(&list),
51              "pmm_alloc_pages a bunch of pages list count");
52
53    pmm_free(&list);
54    END_TEST;
55}
56
57// Allocates too many pages and makes sure it fails nicely.
58static bool pmm_oversized_alloc_test() {
59    BEGIN_TEST;
60    list_node list = LIST_INITIAL_VALUE(list);
61
62    static const size_t alloc_count =
63        (128 * 1024 * 1024 * 1024ULL) / PAGE_SIZE; // 128GB
64
65    zx_status_t status = pmm_alloc_pages(alloc_count, 0, &list);
66    EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "pmm_alloc_pages failed to alloc");
67    EXPECT_TRUE(list_is_empty(&list), "pmm_alloc_pages list is empty");
68
69    pmm_free(&list);
70    END_TEST;
71}
72
73// Allocates one page and frees it.
74static bool pmm_alloc_contiguous_one_test() {
75    BEGIN_TEST;
76    list_node list = LIST_INITIAL_VALUE(list);
77    paddr_t pa;
78    size_t count = 1U;
79    zx_status_t status = pmm_alloc_contiguous(count, 0, PAGE_SIZE_SHIFT, &pa, &list);
80    ASSERT_EQ(ZX_OK, status, "pmm_alloc_contiguous returned failure\n");
81    ASSERT_EQ(count, list_length(&list), "pmm_alloc_contiguous list size is wrong");
82    ASSERT_NE(nullptr, paddr_to_physmap(pa), "");
83    pmm_free(&list);
84    END_TEST;
85}
86
87static uint32_t test_rand(uint32_t seed) {
88    return (seed = seed * 1664525 + 1013904223);
89}
90
91// fill a region of memory with a pattern based on the address of the region
92static void fill_region(uintptr_t seed, void* _ptr, size_t len) {
93    uint32_t* ptr = (uint32_t*)_ptr;
94
95    ASSERT(IS_ALIGNED((uintptr_t)ptr, 4));
96
97    uint32_t val = (uint32_t)seed;
98#if UINTPTR_MAX > UINT32_MAX
99    val ^= (uint32_t)(seed >> 32);
100#endif
101    for (size_t i = 0; i < len / 4; i++) {
102        ptr[i] = val;
103
104        val = test_rand(val);
105    }
106}
107
108// test a region of memory against a known pattern
109static bool test_region(uintptr_t seed, void* _ptr, size_t len) {
110    uint32_t* ptr = (uint32_t*)_ptr;
111
112    ASSERT(IS_ALIGNED((uintptr_t)ptr, 4));
113
114    uint32_t val = (uint32_t)seed;
115#if UINTPTR_MAX > UINT32_MAX
116    val ^= (uint32_t)(seed >> 32);
117#endif
118    for (size_t i = 0; i < len / 4; i++) {
119        if (ptr[i] != val) {
120            unittest_printf("value at %p (%zu) is incorrect: 0x%x vs 0x%x\n", &ptr[i], i, ptr[i],
121                            val);
122            return false;
123        }
124
125        val = test_rand(val);
126    }
127
128    return true;
129}
130
131static bool fill_and_test(void* ptr, size_t len) {
132    BEGIN_TEST;
133
134    // fill it with a pattern
135    fill_region((uintptr_t)ptr, ptr, len);
136
137    // test that the pattern is read back properly
138    auto result = test_region((uintptr_t)ptr, ptr, len);
139    EXPECT_TRUE(result, "testing region for corruption");
140
141    END_TEST;
142}
143
144// Allocates a region in kernel space, reads/writes it, then destroys it.
145static bool vmm_alloc_smoke_test() {
146    BEGIN_TEST;
147    static const size_t alloc_size = 256 * 1024;
148
149    // allocate a region of memory
150    void* ptr;
151    auto kaspace = VmAspace::kernel_aspace();
152    auto err = kaspace->Alloc(
153        "test", alloc_size, &ptr, 0, 0, kArchRwFlags);
154    ASSERT_EQ(0, err, "VmAspace::Alloc region of memory");
155    ASSERT_NE(nullptr, ptr, "VmAspace::Alloc region of memory");
156
157    // fill with known pattern and test
158    if (!fill_and_test(ptr, alloc_size)) {
159        all_ok = false;
160    }
161
162    // free the region
163    err = kaspace->FreeRegion(reinterpret_cast<vaddr_t>(ptr));
164    EXPECT_EQ(0, err, "VmAspace::FreeRegion region of memory");
165    END_TEST;
166}
167
168// Allocates a contiguous region in kernel space, reads/writes it,
169// then destroys it.
170static bool vmm_alloc_contiguous_smoke_test() {
171    BEGIN_TEST;
172    static const size_t alloc_size = 256 * 1024;
173
174    // allocate a region of memory
175    void* ptr;
176    auto kaspace = VmAspace::kernel_aspace();
177    auto err = kaspace->AllocContiguous("test",
178                                        alloc_size, &ptr, 0,
179                                        VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
180    ASSERT_EQ(0, err, "VmAspace::AllocContiguous region of memory");
181    ASSERT_NE(nullptr, ptr, "VmAspace::AllocContiguous region of memory");
182
183    // fill with known pattern and test
184    if (!fill_and_test(ptr, alloc_size)) {
185        all_ok = false;
186    }
187
188    // test that it is indeed contiguous
189    unittest_printf("testing that region is contiguous\n");
190    paddr_t last_pa = 0;
191    for (size_t i = 0; i < alloc_size / PAGE_SIZE; i++) {
192        paddr_t pa = vaddr_to_paddr((uint8_t*)ptr + i * PAGE_SIZE);
193        if (last_pa != 0) {
194            EXPECT_EQ(pa, last_pa + PAGE_SIZE, "region is contiguous");
195        }
196
197        last_pa = pa;
198    }
199
200    // free the region
201    err = kaspace->FreeRegion(reinterpret_cast<vaddr_t>(ptr));
202    EXPECT_EQ(0, err, "VmAspace::FreeRegion region of memory");
203    END_TEST;
204}
205
206// Allocates a new address space and creates a few regions in it,
207// then destroys it.
208static bool multiple_regions_test() {
209    BEGIN_TEST;
210    void* ptr;
211    static const size_t alloc_size = 16 * 1024;
212
213    fbl::RefPtr<VmAspace> aspace = VmAspace::Create(0, "test aspace");
214    ASSERT_NE(nullptr, aspace, "VmAspace::Create pointer");
215
216    vmm_aspace_t* old_aspace = get_current_thread()->aspace;
217    vmm_set_active_aspace(reinterpret_cast<vmm_aspace_t*>(aspace.get()));
218
219    // allocate region 0
220    zx_status_t err = aspace->Alloc("test0", alloc_size, &ptr, 0, 0, kArchRwFlags);
221    ASSERT_EQ(0, err, "VmAspace::Alloc region of memory");
222    ASSERT_NE(nullptr, ptr, "VmAspace::Alloc region of memory");
223
224    // fill with known pattern and test
225    if (!fill_and_test(ptr, alloc_size)) {
226        all_ok = false;
227    }
228
229    // allocate region 1
230    err = aspace->Alloc("test1", 16384, &ptr, 0, 0, kArchRwFlags);
231    ASSERT_EQ(0, err, "VmAspace::Alloc region of memory");
232    ASSERT_NE(nullptr, ptr, "VmAspace::Alloc region of memory");
233
234    // fill with known pattern and test
235    if (!fill_and_test(ptr, alloc_size)) {
236        all_ok = false;
237    }
238
239    // allocate region 2
240    err = aspace->Alloc("test2", 16384, &ptr, 0, 0, kArchRwFlags);
241    ASSERT_EQ(0, err, "VmAspace::Alloc region of memory");
242    ASSERT_NE(nullptr, ptr, "VmAspace::Alloc region of memory");
243
244    // fill with known pattern and test
245    if (!fill_and_test(ptr, alloc_size)) {
246        all_ok = false;
247    }
248
249    vmm_set_active_aspace(old_aspace);
250
251    // free the address space all at once
252    err = aspace->Destroy();
253    EXPECT_EQ(0, err, "VmAspace::Destroy");
254    END_TEST;
255}
256
257static bool vmm_alloc_zero_size_fails() {
258    BEGIN_TEST;
259    const size_t zero_size = 0;
260    void* ptr;
261    zx_status_t err = VmAspace::kernel_aspace()->Alloc(
262        "test", zero_size, &ptr, 0, 0, kArchRwFlags);
263    ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
264    END_TEST;
265}
266
267static bool vmm_alloc_bad_specific_pointer_fails() {
268    BEGIN_TEST;
269    // bad specific pointer
270    void* ptr = (void*)1;
271    zx_status_t err = VmAspace::kernel_aspace()->Alloc(
272        "test", 16384, &ptr, 0,
273        VmAspace::VMM_FLAG_VALLOC_SPECIFIC | VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
274    ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
275    END_TEST;
276}
277
278static bool vmm_alloc_contiguous_missing_flag_commit_fails() {
279    BEGIN_TEST;
280    // should have VmAspace::VMM_FLAG_COMMIT
281    const uint zero_vmm_flags = 0;
282    void* ptr;
283    zx_status_t err = VmAspace::kernel_aspace()->AllocContiguous(
284        "test", 4096, &ptr, 0, zero_vmm_flags, kArchRwFlags);
285    ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
286    END_TEST;
287}
288
289static bool vmm_alloc_contiguous_zero_size_fails() {
290    BEGIN_TEST;
291    const size_t zero_size = 0;
292    void* ptr;
293    zx_status_t err = VmAspace::kernel_aspace()->AllocContiguous(
294        "test", zero_size, &ptr, 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
295    ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
296    END_TEST;
297}
298
299// Allocates a vm address space object directly, allows it to go out of scope.
300static bool vmaspace_create_smoke_test() {
301    BEGIN_TEST;
302    auto aspace = VmAspace::Create(0, "test aspace");
303    aspace->Destroy();
304    END_TEST;
305}
306
307// Allocates a vm address space object directly, maps something on it,
308// allows it to go out of scope.
309static bool vmaspace_alloc_smoke_test() {
310    BEGIN_TEST;
311    auto aspace = VmAspace::Create(0, "test aspace2");
312
313    void* ptr;
314    auto err = aspace->Alloc("test", PAGE_SIZE, &ptr, 0, 0, kArchRwFlags);
315    ASSERT_EQ(ZX_OK, err, "allocating region\n");
316
317    // destroy the aspace, which should drop all the internal refs to it
318    aspace->Destroy();
319
320    // drop the ref held by this pointer
321    aspace.reset();
322    END_TEST;
323}
324
325// Doesn't do anything, just prints all aspaces.
326// Should be run after all other tests so that people can manually comb
327// through the output for leaked test aspaces.
328static bool dump_all_aspaces() {
329    BEGIN_TEST;
330    unittest_printf("verify there are no test aspaces left around\n");
331    DumpAllAspaces(/*verbose*/ true);
332    END_TEST;
333}
334
335// Creates a vm object.
336static bool vmo_create_test() {
337    BEGIN_TEST;
338    fbl::RefPtr<VmObject> vmo;
339    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, PAGE_SIZE, &vmo);
340    ASSERT_EQ(status, ZX_OK, "");
341    ASSERT_TRUE(vmo, "");
342    EXPECT_FALSE(vmo->is_contiguous(), "vmo is not contig\n");
343    EXPECT_FALSE(vmo->is_resizable(), "vmo is not resizable\n");
344    END_TEST;
345}
346
347// Creates a vm object, commits memory.
348static bool vmo_commit_test() {
349    BEGIN_TEST;
350    static const size_t alloc_size = PAGE_SIZE * 16;
351    fbl::RefPtr<VmObject> vmo;
352    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
353    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
354    ASSERT_TRUE(vmo, "vmobject creation\n");
355
356    uint64_t committed;
357    auto ret = vmo->CommitRange(0, alloc_size, &committed);
358    ASSERT_EQ(0, ret, "committing vm object\n");
359    EXPECT_EQ(ROUNDUP_PAGE_SIZE(alloc_size), committed,
360              "committing vm object\n");
361    END_TEST;
362}
363
364// Creates a paged VMO, pins it, and tries operations that should unpin it.
365static bool vmo_pin_test() {
366    BEGIN_TEST;
367
368    static const size_t alloc_size = PAGE_SIZE * 16;
369    fbl::RefPtr<VmObject> vmo;
370    zx_status_t status = VmObjectPaged::Create(
371        PMM_ALLOC_FLAG_ANY, VmObjectPaged::kResizable, alloc_size, &vmo);
372    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
373    ASSERT_TRUE(vmo, "vmobject creation\n");
374
375    status = vmo->Pin(PAGE_SIZE, alloc_size);
376    EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, status, "pinning out of range\n");
377    status = vmo->Pin(PAGE_SIZE, 0);
378    EXPECT_EQ(ZX_OK, status, "pinning range of len 0\n");
379    status = vmo->Pin(alloc_size + PAGE_SIZE, 0);
380    EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, status, "pinning out-of-range of len 0\n");
381
382    status = vmo->Pin(PAGE_SIZE, 3 * PAGE_SIZE);
383    EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
384    status = vmo->Pin(0, alloc_size);
385    EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
386
387    uint64_t n;
388    status = vmo->CommitRange(PAGE_SIZE, 3 * PAGE_SIZE, &n);
389    EXPECT_EQ(ZX_OK, status, "committing range\n");
390
391    status = vmo->Pin(0, alloc_size);
392    EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
393    status = vmo->Pin(PAGE_SIZE, 4 * PAGE_SIZE);
394    EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
395    status = vmo->Pin(0, 4 * PAGE_SIZE);
396    EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
397
398    status = vmo->Pin(PAGE_SIZE, 3 * PAGE_SIZE);
399    EXPECT_EQ(ZX_OK, status, "pinning committed range\n");
400
401    status = vmo->DecommitRange(PAGE_SIZE, 3 * PAGE_SIZE, &n);
402    EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
403    status = vmo->DecommitRange(PAGE_SIZE, PAGE_SIZE, &n);
404    EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
405    status = vmo->DecommitRange(3 * PAGE_SIZE, PAGE_SIZE, &n);
406    EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
407
408    vmo->Unpin(PAGE_SIZE, 3 * PAGE_SIZE);
409
410    status = vmo->DecommitRange(PAGE_SIZE, 3 * PAGE_SIZE, &n);
411    EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
412
413    status = vmo->CommitRange(PAGE_SIZE, 3 * PAGE_SIZE, &n);
414    EXPECT_EQ(ZX_OK, status, "committing range\n");
415    status = vmo->Pin(PAGE_SIZE, 3 * PAGE_SIZE);
416    EXPECT_EQ(ZX_OK, status, "pinning committed range\n");
417
418    status = vmo->Resize(0);
419    EXPECT_EQ(ZX_ERR_BAD_STATE, status, "resizing pinned range\n");
420
421    vmo->Unpin(PAGE_SIZE, 3 * PAGE_SIZE);
422
423    status = vmo->Resize(0);
424    EXPECT_EQ(ZX_OK, status, "resizing unpinned range\n");
425
426    END_TEST;
427}
428
429// Creates a page VMO and pins the same pages multiple times
430static bool vmo_multiple_pin_test() {
431    BEGIN_TEST;
432
433    static const size_t alloc_size = PAGE_SIZE * 16;
434    fbl::RefPtr<VmObject> vmo;
435    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
436    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
437    ASSERT_TRUE(vmo, "vmobject creation\n");
438
439    uint64_t n;
440    status = vmo->CommitRange(0, alloc_size, &n);
441    EXPECT_EQ(ZX_OK, status, "committing range\n");
442
443    status = vmo->Pin(0, alloc_size);
444    EXPECT_EQ(ZX_OK, status, "pinning whole range\n");
445    status = vmo->Pin(PAGE_SIZE, 4 * PAGE_SIZE);
446    EXPECT_EQ(ZX_OK, status, "pinning subrange\n");
447
448    for (unsigned int i = 1; i < VM_PAGE_OBJECT_MAX_PIN_COUNT; ++i) {
449        status = vmo->Pin(0, PAGE_SIZE);
450        EXPECT_EQ(ZX_OK, status, "pinning first page max times\n");
451    }
452    status = vmo->Pin(0, PAGE_SIZE);
453    EXPECT_EQ(ZX_ERR_UNAVAILABLE, status, "page is pinned too much\n");
454
455    vmo->Unpin(0, alloc_size);
456    status = vmo->DecommitRange(PAGE_SIZE, 4 * PAGE_SIZE, &n);
457    EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
458    status = vmo->DecommitRange(5 * PAGE_SIZE, alloc_size - 5 * PAGE_SIZE, &n);
459    EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
460
461    vmo->Unpin(PAGE_SIZE, 4 * PAGE_SIZE);
462    status = vmo->DecommitRange(PAGE_SIZE, 4 * PAGE_SIZE, &n);
463    EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
464
465    for (unsigned int i = 2; i < VM_PAGE_OBJECT_MAX_PIN_COUNT; ++i) {
466        vmo->Unpin(0, PAGE_SIZE);
467    }
468    status = vmo->DecommitRange(0, PAGE_SIZE, &n);
469    EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting unpinned range\n");
470
471    vmo->Unpin(0, PAGE_SIZE);
472    status = vmo->DecommitRange(0, PAGE_SIZE, &n);
473    EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
474
475    END_TEST;
476}
477
478// Creates a vm object, commits odd sized memory.
479static bool vmo_odd_size_commit_test() {
480    BEGIN_TEST;
481    static const size_t alloc_size = 15;
482    fbl::RefPtr<VmObject> vmo;
483    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
484    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
485    ASSERT_TRUE(vmo, "vmobject creation\n");
486
487    uint64_t committed;
488    auto ret = vmo->CommitRange(0, alloc_size, &committed);
489    EXPECT_EQ(0, ret, "committing vm object\n");
490    EXPECT_EQ(ROUNDUP_PAGE_SIZE(alloc_size), committed,
491              "committing vm object\n");
492    END_TEST;
493}
494
495static bool vmo_create_physical_test() {
496    BEGIN_TEST;
497
498    paddr_t pa;
499    vm_page_t* vm_page;
500    zx_status_t status = pmm_alloc_page(0, &vm_page, &pa);
501    uint32_t cache_policy;
502
503    ASSERT_EQ(ZX_OK, status, "vm page allocation\n");
504    ASSERT_TRUE(vm_page, "");
505
506    fbl::RefPtr<VmObject> vmo;
507    status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
508    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
509    ASSERT_TRUE(vmo, "vmobject creation\n");
510    EXPECT_EQ(ZX_OK, vmo->GetMappingCachePolicy(&cache_policy), "try get");
511    EXPECT_EQ(ARCH_MMU_FLAG_UNCACHED, cache_policy, "check initial cache policy");
512    EXPECT_TRUE(vmo->is_contiguous(), "check contiguous");
513
514    pmm_free_page(vm_page);
515
516    END_TEST;
517}
518
519// Creates a vm object that commits contiguous memory.
520static bool vmo_create_contiguous_test() {
521    BEGIN_TEST;
522    static const size_t alloc_size = PAGE_SIZE * 16;
523    fbl::RefPtr<VmObject> vmo;
524    zx_status_t status = VmObjectPaged::CreateContiguous(PMM_ALLOC_FLAG_ANY, alloc_size, 0, &vmo);
525    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
526    ASSERT_TRUE(vmo, "vmobject creation\n");
527
528    EXPECT_TRUE(vmo->is_contiguous(), "vmo is contig\n");
529
530    paddr_t last_pa;
531    auto lookup_func = [](void* ctx, size_t offset, size_t index, paddr_t pa) {
532        paddr_t* last_pa = static_cast<paddr_t*>(ctx);
533        if (index != 0 && *last_pa + PAGE_SIZE != pa) {
534            return ZX_ERR_BAD_STATE;
535        }
536        *last_pa = pa;
537        return ZX_OK;
538    };
539    status = vmo->Lookup(0, alloc_size, 0, lookup_func, &last_pa);
540    EXPECT_EQ(status, ZX_OK, "vmo lookup\n");
541
542    END_TEST;
543}
544
545// Make sure decommitting is disallowed
546static bool vmo_contiguous_decommit_test() {
547    BEGIN_TEST;
548
549    static const size_t alloc_size = PAGE_SIZE * 16;
550    fbl::RefPtr<VmObject> vmo;
551    zx_status_t status = VmObjectPaged::CreateContiguous(PMM_ALLOC_FLAG_ANY, alloc_size, 0, &vmo);
552    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
553    ASSERT_TRUE(vmo, "vmobject creation\n");
554
555    uint64_t n;
556    status = vmo->DecommitRange(PAGE_SIZE, 4 * PAGE_SIZE, &n);
557    ASSERT_EQ(status, ZX_ERR_NOT_SUPPORTED, "decommit fails due to pinned pages\n");
558    status = vmo->DecommitRange(0, 4 * PAGE_SIZE, &n);
559    ASSERT_EQ(status, ZX_ERR_NOT_SUPPORTED, "decommit fails due to pinned pages\n");
560    status = vmo->DecommitRange(alloc_size - PAGE_SIZE, PAGE_SIZE, &n);
561    ASSERT_EQ(status, ZX_ERR_NOT_SUPPORTED, "decommit fails due to pinned pages\n");
562
563    // Make sure all pages are still present and contiguous
564    paddr_t last_pa;
565    auto lookup_func = [](void* ctx, size_t offset, size_t index, paddr_t pa) {
566        paddr_t* last_pa = static_cast<paddr_t*>(ctx);
567        if (index != 0 && *last_pa + PAGE_SIZE != pa) {
568            return ZX_ERR_BAD_STATE;
569        }
570        *last_pa = pa;
571        return ZX_OK;
572    };
573    status = vmo->Lookup(0, alloc_size, 0, lookup_func, &last_pa);
574    ASSERT_EQ(status, ZX_OK, "vmo lookup\n");
575
576    END_TEST;
577}
578
579// Creats a vm object, maps it, precommitted.
580static bool vmo_precommitted_map_test() {
581    BEGIN_TEST;
582    static const size_t alloc_size = PAGE_SIZE * 16;
583    fbl::RefPtr<VmObject> vmo;
584    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0, alloc_size, &vmo);
585    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
586    ASSERT_TRUE(vmo, "vmobject creation\n");
587
588    auto ka = VmAspace::kernel_aspace();
589    void* ptr;
590    auto ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
591                                     0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
592    ASSERT_EQ(ZX_OK, ret, "mapping object");
593
594    // fill with known pattern and test
595    if (!fill_and_test(ptr, alloc_size)) {
596        all_ok = false;
597    }
598
599    auto err = ka->FreeRegion((vaddr_t)ptr);
600    EXPECT_EQ(ZX_OK, err, "unmapping object");
601    END_TEST;
602}
603
604// Creates a vm object, maps it, demand paged.
605static bool vmo_demand_paged_map_test() {
606    BEGIN_TEST;
607    static const size_t alloc_size = PAGE_SIZE * 16;
608    fbl::RefPtr<VmObject> vmo;
609    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
610    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
611    ASSERT_TRUE(vmo, "vmobject creation\n");
612
613    auto ka = VmAspace::kernel_aspace();
614    void* ptr;
615    auto ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
616                                     0, 0, kArchRwFlags);
617    ASSERT_EQ(ret, ZX_OK, "mapping object");
618
619    // fill with known pattern and test
620    if (!fill_and_test(ptr, alloc_size)) {
621        all_ok = false;
622    }
623
624    auto err = ka->FreeRegion((vaddr_t)ptr);
625    EXPECT_EQ(ZX_OK, err, "unmapping object");
626    END_TEST;
627}
628
629// Creates a vm object, maps it, drops ref before unmapping.
630static bool vmo_dropped_ref_test() {
631    BEGIN_TEST;
632    static const size_t alloc_size = PAGE_SIZE * 16;
633    fbl::RefPtr<VmObject> vmo;
634    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
635    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
636    ASSERT_TRUE(vmo, "vmobject creation\n");
637
638    auto ka = VmAspace::kernel_aspace();
639    void* ptr;
640    auto ret = ka->MapObjectInternal(fbl::move(vmo), "test", 0, alloc_size, &ptr,
641                                     0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
642    ASSERT_EQ(ret, ZX_OK, "mapping object");
643
644    EXPECT_NULL(vmo, "dropped ref to object");
645
646    // fill with known pattern and test
647    if (!fill_and_test(ptr, alloc_size)) {
648        all_ok = false;
649    }
650
651    auto err = ka->FreeRegion((vaddr_t)ptr);
652    EXPECT_EQ(ZX_OK, err, "unmapping object");
653    END_TEST;
654}
655
656// Creates a vm object, maps it, fills it with data, unmaps,
657// maps again somewhere else.
658static bool vmo_remap_test() {
659    BEGIN_TEST;
660    static const size_t alloc_size = PAGE_SIZE * 16;
661    fbl::RefPtr<VmObject> vmo;
662    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
663    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
664    ASSERT_TRUE(vmo, "vmobject creation\n");
665
666    auto ka = VmAspace::kernel_aspace();
667    void* ptr;
668    auto ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
669                                     0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
670    ASSERT_EQ(ZX_OK, ret, "mapping object");
671
672    // fill with known pattern and test
673    if (!fill_and_test(ptr, alloc_size)) {
674        all_ok = false;
675    }
676
677    auto err = ka->FreeRegion((vaddr_t)ptr);
678    EXPECT_EQ(ZX_OK, err, "unmapping object");
679
680    // map it again
681    ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
682                                0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
683    ASSERT_EQ(ret, ZX_OK, "mapping object");
684
685    // test that the pattern is still valid
686    bool result = test_region((uintptr_t)ptr, ptr, alloc_size);
687    EXPECT_TRUE(result, "testing region for corruption");
688
689    err = ka->FreeRegion((vaddr_t)ptr);
690    EXPECT_EQ(ZX_OK, err, "unmapping object");
691    END_TEST;
692}
693
694// Creates a vm object, maps it, fills it with data, maps it a second time and
695// third time somwehere else.
696static bool vmo_double_remap_test() {
697    BEGIN_TEST;
698    static const size_t alloc_size = PAGE_SIZE * 16;
699    fbl::RefPtr<VmObject> vmo;
700    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
701    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
702    ASSERT_TRUE(vmo, "vmobject creation\n");
703
704    auto ka = VmAspace::kernel_aspace();
705    void* ptr;
706    auto ret = ka->MapObjectInternal(vmo, "test0", 0, alloc_size, &ptr,
707                                     0, 0, kArchRwFlags);
708    ASSERT_EQ(ZX_OK, ret, "mapping object");
709
710    // fill with known pattern and test
711    if (!fill_and_test(ptr, alloc_size)) {
712        all_ok = false;
713    }
714
715    // map it again
716    void* ptr2;
717    ret = ka->MapObjectInternal(vmo, "test1", 0, alloc_size, &ptr2,
718                                0, 0, kArchRwFlags);
719    ASSERT_EQ(ret, ZX_OK, "mapping object second time");
720    EXPECT_NE(ptr, ptr2, "second mapping is different");
721
722    // test that the pattern is still valid
723    bool result = test_region((uintptr_t)ptr, ptr2, alloc_size);
724    EXPECT_TRUE(result, "testing region for corruption");
725
726    // map it a third time with an offset
727    void* ptr3;
728    static const size_t alloc_offset = PAGE_SIZE;
729    ret = ka->MapObjectInternal(vmo, "test2", alloc_offset, alloc_size - alloc_offset,
730                                &ptr3, 0, 0, kArchRwFlags);
731    ASSERT_EQ(ret, ZX_OK, "mapping object third time");
732    EXPECT_NE(ptr3, ptr2, "third mapping is different");
733    EXPECT_NE(ptr3, ptr, "third mapping is different");
734
735    // test that the pattern is still valid
736    int mc =
737        memcmp((uint8_t*)ptr + alloc_offset, ptr3, alloc_size - alloc_offset);
738    EXPECT_EQ(0, mc, "testing region for corruption");
739
740    ret = ka->FreeRegion((vaddr_t)ptr3);
741    EXPECT_EQ(ZX_OK, ret, "unmapping object third time");
742
743    ret = ka->FreeRegion((vaddr_t)ptr2);
744    EXPECT_EQ(ZX_OK, ret, "unmapping object second time");
745
746    ret = ka->FreeRegion((vaddr_t)ptr);
747    EXPECT_EQ(ZX_OK, ret, "unmapping object");
748    END_TEST;
749}
750
751static bool vmo_read_write_smoke_test() {
752    BEGIN_TEST;
753    static const size_t alloc_size = PAGE_SIZE * 16;
754
755    // create object
756    fbl::RefPtr<VmObject> vmo;
757    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0, alloc_size, &vmo);
758    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
759    ASSERT_TRUE(vmo, "vmobject creation\n");
760
761    // create test buffer
762    fbl::AllocChecker ac;
763    fbl::Array<uint8_t> a(new (&ac) uint8_t[alloc_size], alloc_size);
764    ASSERT_TRUE(ac.check(), "");
765    fill_region(99, a.get(), alloc_size);
766
767    // write to it, make sure it seems to work with valid args
768    zx_status_t err = vmo->Write(a.get(), 0, 0);
769    EXPECT_EQ(ZX_OK, err, "writing to object");
770
771    err = vmo->Write(a.get(), 0, 37);
772    EXPECT_EQ(ZX_OK, err, "writing to object");
773
774    err = vmo->Write(a.get(), 99, 37);
775    EXPECT_EQ(ZX_OK, err, "writing to object");
776
777    // can't write past end
778    err = vmo->Write(a.get(), 0, alloc_size + 47);
779    EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, err, "writing to object");
780
781    // can't write past end
782    err = vmo->Write(a.get(), 31, alloc_size + 47);
783    EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, err, "writing to object");
784
785    // should return an error because out of range
786    err = vmo->Write(a.get(), alloc_size + 99, 42);
787    EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, err, "writing to object");
788
789    // map the object
790    auto ka = VmAspace::kernel_aspace();
791    uint8_t* ptr;
792    err = ka->MapObjectInternal(vmo, "test", 0, alloc_size, (void**)&ptr,
793                                0, 0, kArchRwFlags);
794    ASSERT_EQ(ZX_OK, err, "mapping object");
795
796    // write to it at odd offsets
797    err = vmo->Write(a.get(), 31, 4197);
798    EXPECT_EQ(ZX_OK, err, "writing to object");
799    int cmpres = memcmp(ptr + 31, a.get(), 4197);
800    EXPECT_EQ(0, cmpres, "reading from object");
801
802    // write to it, filling the object completely
803    err = vmo->Write(a.get(), 0, alloc_size);
804    EXPECT_EQ(ZX_OK, err, "writing to object");
805
806    // test that the data was actually written to it
807    bool result = test_region(99, ptr, alloc_size);
808    EXPECT_TRUE(result, "writing to object");
809
810    // unmap it
811    ka->FreeRegion((vaddr_t)ptr);
812
813    // test that we can read from it
814    fbl::Array<uint8_t> b(new (&ac) uint8_t[alloc_size], alloc_size);
815    ASSERT_TRUE(ac.check(), "can't allocate buffer");
816
817    err = vmo->Read(b.get(), 0, alloc_size);
818    EXPECT_EQ(ZX_OK, err, "reading from object");
819
820    // validate the buffer is valid
821    cmpres = memcmp(b.get(), a.get(), alloc_size);
822    EXPECT_EQ(0, cmpres, "reading from object");
823
824    // read from it at an offset
825    err = vmo->Read(b.get(), 31, 4197);
826    EXPECT_EQ(ZX_OK, err, "reading from object");
827    cmpres = memcmp(b.get(), a.get() + 31, 4197);
828    EXPECT_EQ(0, cmpres, "reading from object");
829    END_TEST;
830}
831
832static bool vmo_cache_test() {
833    BEGIN_TEST;
834
835    paddr_t pa;
836    vm_page_t* vm_page;
837    zx_status_t status = pmm_alloc_page(0, &vm_page, &pa);
838    auto ka = VmAspace::kernel_aspace();
839    uint32_t cache_policy = ARCH_MMU_FLAG_UNCACHED_DEVICE;
840    uint32_t cache_policy_get;
841    void* ptr;
842
843    ASSERT_TRUE(vm_page, "");
844    // Test that the flags set/get properly
845    {
846        fbl::RefPtr<VmObject> vmo;
847        status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
848        ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
849        ASSERT_TRUE(vmo, "vmobject creation\n");
850        EXPECT_EQ(ZX_OK, vmo->GetMappingCachePolicy(&cache_policy_get), "try get");
851        EXPECT_NE(cache_policy, cache_policy_get, "check initial cache policy");
852        EXPECT_EQ(ZX_OK, vmo->SetMappingCachePolicy(cache_policy), "try set");
853        EXPECT_EQ(ZX_OK, vmo->GetMappingCachePolicy(&cache_policy_get), "try get");
854        EXPECT_EQ(cache_policy, cache_policy_get, "compare flags");
855    }
856
857    // Test valid flags
858    for (uint32_t i = 0; i <= ARCH_MMU_FLAG_CACHE_MASK; i++) {
859        fbl::RefPtr<VmObject> vmo;
860        status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
861        ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
862        ASSERT_TRUE(vmo, "vmobject creation\n");
863        EXPECT_EQ(ZX_OK, vmo->SetMappingCachePolicy(cache_policy), "try setting valid flags");
864    }
865
866    // Test invalid flags
867    for (uint32_t i = ARCH_MMU_FLAG_CACHE_MASK + 1; i < 32; i++) {
868        fbl::RefPtr<VmObject> vmo;
869        status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
870        ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
871        ASSERT_TRUE(vmo, "vmobject creation\n");
872        EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(i), "try set with invalid flags");
873    }
874
875    // Test valid flags with invalid flags
876    {
877        fbl::RefPtr<VmObject> vmo;
878        status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
879        ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
880        ASSERT_TRUE(vmo, "vmobject creation\n");
881        EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0x5), "bad 0x5");
882        EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0xA), "bad 0xA");
883        EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0x55), "bad 0x55");
884        EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0xAA), "bad 0xAA");
885    }
886
887    // Test that changing policy while mapped is blocked
888    {
889        fbl::RefPtr<VmObject> vmo;
890        status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
891        ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
892        ASSERT_TRUE(vmo, "vmobject creation\n");
893        ASSERT_EQ(ZX_OK, ka->MapObjectInternal(vmo, "test", 0, PAGE_SIZE, (void**)&ptr, 0, 0,
894                                               kArchRwFlags),
895                  "map vmo");
896        EXPECT_EQ(ZX_ERR_BAD_STATE, vmo->SetMappingCachePolicy(cache_policy),
897                  "set flags while mapped");
898        EXPECT_EQ(ZX_OK, ka->FreeRegion((vaddr_t)ptr), "unmap vmo");
899        EXPECT_EQ(ZX_OK, vmo->SetMappingCachePolicy(cache_policy), "set flags after unmapping");
900        ASSERT_EQ(ZX_OK, ka->MapObjectInternal(vmo, "test", 0, PAGE_SIZE, (void**)&ptr, 0, 0,
901                                               kArchRwFlags),
902                  "map vmo again");
903        EXPECT_EQ(ZX_OK, ka->FreeRegion((vaddr_t)ptr), "unmap vmo");
904    }
905
906    pmm_free_page(vm_page);
907    END_TEST;
908}
909
910static bool vmo_lookup_test() {
911    BEGIN_TEST;
912
913    static const size_t alloc_size = PAGE_SIZE * 16;
914    fbl::RefPtr<VmObject> vmo;
915    zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
916    ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
917    ASSERT_TRUE(vmo, "vmobject creation\n");
918
919    size_t pages_seen = 0;
920    auto lookup_fn = [](void* context, size_t offset, size_t index, paddr_t pa) {
921        size_t* pages_seen = static_cast<size_t*>(context);
922        (*pages_seen)++;
923        return ZX_OK;
924    };
925    status = vmo->Lookup(0, alloc_size, 0, lookup_fn, &pages_seen);
926    EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "lookup on uncommitted pages\n");
927    EXPECT_EQ(0u, pages_seen, "lookup on uncommitted pages\n");
928    pages_seen = 0;
929
930    uint64_t committed;
931    status = vmo->CommitRange(PAGE_SIZE, PAGE_SIZE, &committed);
932    EXPECT_EQ(ZX_OK, status, "committing vm object\n");
933    EXPECT_EQ(static_cast<size_t>(PAGE_SIZE), committed, "committing vm object\n");
934
935    // Should fail, since first page isn't mapped
936    status = vmo->Lookup(0, alloc_size, 0, lookup_fn, &pages_seen);
937    EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "lookup on partially committed pages\n");
938    EXPECT_EQ(0u, pages_seen, "lookup on partially committed pages\n");
939    pages_seen = 0;
940
941    // Should fail, but see the mapped page
942    status = vmo->Lookup(PAGE_SIZE, alloc_size - PAGE_SIZE, 0, lookup_fn, &pages_seen);
943    EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "lookup on partially committed pages\n");
944    EXPECT_EQ(1u, pages_seen, "lookup on partially committed pages\n");
945    pages_seen = 0;
946
947    // Should succeed
948    status = vmo->Lookup(PAGE_SIZE, PAGE_SIZE, 0, lookup_fn, &pages_seen);
949    EXPECT_EQ(ZX_OK, status, "lookup on partially committed pages\n");
950    EXPECT_EQ(1u, pages_seen, "lookup on partially committed pages\n");
951    pages_seen = 0;
952
953    // Commit the rest
954    status = vmo->CommitRange(0, alloc_size, &committed);
955    EXPECT_EQ(ZX_OK, status, "committing vm object\n");
956    EXPECT_EQ(alloc_size - PAGE_SIZE, committed, "committing vm object\n");
957
958    status = vmo->Lookup(0, alloc_size, 0, lookup_fn, &pages_seen);
959    EXPECT_EQ(ZX_OK, status, "lookup on partially committed pages\n");
960    EXPECT_EQ(alloc_size / PAGE_SIZE, pages_seen, "lookup on partially committed pages\n");
961
962    END_TEST;
963}
964
965// TODO(ZX-1431): The ARM code's error codes are always ZX_ERR_INTERNAL, so
966// special case that.
967#if ARCH_ARM64
968#define MMU_EXPECT_EQ(exp, act, msg) EXPECT_EQ(ZX_ERR_INTERNAL, act, msg)
969#else
970#define MMU_EXPECT_EQ(exp, act, msg) EXPECT_EQ(exp, act, msg)
971#endif
972
973static bool arch_noncontiguous_map() {
974    BEGIN_TEST;
975
976    // Get some phys pages to test on
977    paddr_t phys[3];
978    struct list_node phys_list = LIST_INITIAL_VALUE(phys_list);
979    zx_status_t status = pmm_alloc_pages(fbl::count_of(phys), 0, &phys_list);
980    ASSERT_EQ(ZX_OK, status, "non contig map alloc");
981    {
982        size_t i = 0;
983        vm_page_t* p;
984        list_for_every_entry (&phys_list, p, vm_page_t, queue_node) {
985            phys[i] = p->paddr();
986            ++i;
987        }
988    }
989
990    {
991        ArchVmAspace aspace;
992        status = aspace.Init(USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
993        ASSERT_EQ(ZX_OK, status, "failed to init aspace\n");
994
995        // Attempt to map a set of vm_page_t
996        size_t mapped;
997        vaddr_t base = USER_ASPACE_BASE + 10 * PAGE_SIZE;
998        status = aspace.Map(base, phys, fbl::count_of(phys), ARCH_MMU_FLAG_PERM_READ, &mapped);
999        ASSERT_EQ(ZX_OK, status, "failed first map\n");
1000        EXPECT_EQ(fbl::count_of(phys), mapped, "weird first map\n");
1001        for (size_t i = 0; i < fbl::count_of(phys); ++i) {
1002            paddr_t paddr;
1003            uint mmu_flags;
1004            status = aspace.Query(base + i * PAGE_SIZE, &paddr, &mmu_flags);
1005            EXPECT_EQ(ZX_OK, status, "bad first map\n");
1006            EXPECT_EQ(phys[i], paddr, "bad first map\n");
1007            EXPECT_EQ(ARCH_MMU_FLAG_PERM_READ, mmu_flags, "bad first map\n");
1008        }
1009
1010        // Attempt to map again, should fail
1011        status = aspace.Map(base, phys, fbl::count_of(phys), ARCH_MMU_FLAG_PERM_READ, &mapped);
1012        MMU_EXPECT_EQ(ZX_ERR_ALREADY_EXISTS, status, "double map\n");
1013
1014        // Attempt to map partially ovelapping, should fail
1015        status = aspace.Map(base + 2 * PAGE_SIZE, phys, fbl::count_of(phys),
1016                            ARCH_MMU_FLAG_PERM_READ, &mapped);
1017        MMU_EXPECT_EQ(ZX_ERR_ALREADY_EXISTS, status, "double map\n");
1018        status = aspace.Map(base - 2 * PAGE_SIZE, phys, fbl::count_of(phys),
1019                            ARCH_MMU_FLAG_PERM_READ, &mapped);
1020        MMU_EXPECT_EQ(ZX_ERR_ALREADY_EXISTS, status, "double map\n");
1021
1022        // No entries should have been created by the partial failures
1023        status = aspace.Query(base - 2 * PAGE_SIZE, nullptr, nullptr);
1024        EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1025        status = aspace.Query(base - PAGE_SIZE, nullptr, nullptr);
1026        EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1027        status = aspace.Query(base + 3 * PAGE_SIZE, nullptr, nullptr);
1028        EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1029        status = aspace.Query(base + 4 * PAGE_SIZE, nullptr, nullptr);
1030        EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1031
1032        status = aspace.Destroy();
1033        EXPECT_EQ(ZX_OK, status, "failed to destroy aspace\n");
1034    }
1035
1036    pmm_free(&phys_list);
1037
1038    END_TEST;
1039}
1040
1041// Use the function name as the test name
1042#define VM_UNITTEST(fname) UNITTEST(#fname, fname)
1043
1044UNITTEST_START_TESTCASE(vm_tests)
1045VM_UNITTEST(pmm_smoke_test)
1046// runs the system out of memory, uncomment for debugging
1047//VM_UNITTEST(pmm_large_alloc_test)
1048//VM_UNITTEST(pmm_oversized_alloc_test)
1049VM_UNITTEST(pmm_alloc_contiguous_one_test)
1050VM_UNITTEST(vmm_alloc_smoke_test)
1051VM_UNITTEST(vmm_alloc_contiguous_smoke_test)
1052VM_UNITTEST(multiple_regions_test)
1053VM_UNITTEST(vmm_alloc_zero_size_fails)
1054VM_UNITTEST(vmm_alloc_bad_specific_pointer_fails)
1055VM_UNITTEST(vmm_alloc_contiguous_missing_flag_commit_fails)
1056VM_UNITTEST(vmm_alloc_contiguous_zero_size_fails)
1057VM_UNITTEST(vmaspace_create_smoke_test)
1058VM_UNITTEST(vmaspace_alloc_smoke_test)
1059VM_UNITTEST(vmo_create_test)
1060VM_UNITTEST(vmo_pin_test)
1061VM_UNITTEST(vmo_multiple_pin_test)
1062VM_UNITTEST(vmo_commit_test)
1063VM_UNITTEST(vmo_odd_size_commit_test)
1064VM_UNITTEST(vmo_create_physical_test)
1065VM_UNITTEST(vmo_create_contiguous_test)
1066VM_UNITTEST(vmo_contiguous_decommit_test)
1067VM_UNITTEST(vmo_precommitted_map_test)
1068VM_UNITTEST(vmo_demand_paged_map_test)
1069VM_UNITTEST(vmo_dropped_ref_test)
1070VM_UNITTEST(vmo_remap_test)
1071VM_UNITTEST(vmo_double_remap_test)
1072VM_UNITTEST(vmo_read_write_smoke_test)
1073VM_UNITTEST(vmo_cache_test)
1074VM_UNITTEST(vmo_lookup_test)
1075VM_UNITTEST(arch_noncontiguous_map)
1076// Uncomment for debugging
1077// VM_UNITTEST(dump_all_aspaces)  // Run last
1078UNITTEST_END_TESTCASE(vm_tests, "vmtests", "Virtual memory tests");
1079