1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <assert.h>
6#include <errno.h>
7#include <limits.h>
8#include <stdalign.h>
9#include <unistd.h>
10
11#include <zircon/process.h>
12#include <zircon/syscalls.h>
13#include <zircon/syscalls/exception.h>
14#include <zircon/syscalls/object.h>
15#include <zircon/syscalls/port.h>
16#include <fbl/atomic.h>
17#include <fbl/algorithm.h>
18#include <fbl/limits.h>
19#include <unittest/unittest.h>
20#include <sys/mman.h>
21
22#define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1))
23
24// These tests focus on the semantics of the VMARs themselves.  For heavier
25// testing of the mapping permissions, see the VMO tests.
26
27namespace {
28
29const char kProcessName[] = "test-proc-vmar";
30
31const zx_vm_option_t kRwxMapPerm =
32        ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
33const zx_vm_option_t kRwxAllocPerm =
34        ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_EXECUTE;
35
36
37// Helper routine for other tests.  If bit i (< *page_count*) in *bitmap* is set, then
38// checks that *base* + i * PAGE_SIZE is mapped.  Otherwise checks that it is not mapped.
39bool check_pages_mapped(zx_handle_t process, uintptr_t base, uint64_t bitmap, size_t page_count) {
40    uint8_t buf[1];
41    size_t len;
42
43    size_t i = 0;
44    while (bitmap && i < page_count) {
45        zx_status_t expected = (bitmap & 1) ? ZX_OK : ZX_ERR_NO_MEMORY;
46        if (zx_process_read_memory(process, base + i * PAGE_SIZE, buf, 1, &len) != expected) {
47            return false;
48        }
49        ++i;
50        bitmap >>= 1;
51    }
52    return true;
53}
54
55// Thread run by test_local_address, used to attempt an access to memory
56void test_write_address_thread(uintptr_t address, bool* success) {
57    auto p = reinterpret_cast<fbl::atomic_uint8_t*>(address);
58    p->store(5);
59    *success = true;
60
61    zx_thread_exit();
62}
63// Thread run by test_local_address, used to attempt an access to memory
64void test_read_address_thread(uintptr_t address, bool* success) {
65    auto p = reinterpret_cast<fbl::atomic_uint8_t*>(address);
66    (void)p->load();
67    *success = true;
68
69    zx_thread_exit();
70}
71
72// Helper routine for testing via direct access whether or not an address in the
73// test process's address space is accessible.
74zx_status_t test_local_address(uintptr_t address, bool write, bool* success) {
75    *success = false;
76
77    alignas(16) static uint8_t thread_stack[PAGE_SIZE];
78
79    zx_port_packet_t packet;
80    zx_info_handle_basic_t info;
81    zx_koid_t tid = ZX_KOID_INVALID;
82    bool saw_page_fault = false;
83
84    zx_handle_t thread = ZX_HANDLE_INVALID;
85    zx_handle_t port = ZX_HANDLE_INVALID;
86    uintptr_t entry = reinterpret_cast<uintptr_t>(write ? test_write_address_thread :
87                                                          test_read_address_thread);
88    uintptr_t stack = reinterpret_cast<uintptr_t>(thread_stack + sizeof(thread_stack));
89
90    zx_status_t status = zx_thread_create(zx_process_self(), "vmar_test_addr", 14, 0, &thread);
91    if (status != ZX_OK) {
92        goto err;
93    }
94
95    status = zx_object_get_info(thread, ZX_INFO_HANDLE_BASIC,
96                                &info, sizeof(info), NULL, NULL);
97    if (status != ZX_OK) {
98        goto err;
99    }
100    tid = info.koid;
101
102    // Create an exception port and bind it to the thread to prevent the
103    // thread's illegal access from killing the process.
104    status = zx_port_create(0, &port);
105    if (status != ZX_OK) {
106        goto err;
107    }
108    status = zx_task_bind_exception_port(thread, port, 0, 0);
109    if (status != ZX_OK) {
110        goto err;
111    }
112    status = zx_object_wait_async(thread, port, tid, ZX_THREAD_TERMINATED,
113                                  ZX_WAIT_ASYNC_ONCE);
114    if (status != ZX_OK) {
115        goto err;
116    }
117
118    status = zx_thread_start(thread, entry, stack,
119                             address, reinterpret_cast<uintptr_t>(success));
120    if (status != ZX_OK) {
121        goto err;
122    }
123
124    // Wait for the thread to exit and identify its cause of death.
125    // Keep looping until the thread is gone so that crashlogger doesn't
126    // see the page fault.
127    while (true) {
128        zx_status_t s;
129
130        s = zx_port_wait(port, ZX_TIME_INFINITE, &packet);
131        if (s != ZX_OK && status != ZX_OK) {
132            status = s;
133            break;
134        }
135        if (ZX_PKT_IS_SIGNAL_ONE(packet.type)) {
136            if (packet.key != tid ||
137                !(packet.signal.observed & ZX_THREAD_TERMINATED)) {
138                status = ZX_ERR_BAD_STATE;
139                break;
140            }
141            // Leave status as is.
142            break;
143        }
144        if (!ZX_PKT_IS_EXCEPTION(packet.type)) {
145            status = ZX_ERR_BAD_STATE;
146            break;
147        }
148        if (packet.type == ZX_EXCP_FATAL_PAGE_FAULT) {
149            zx_task_kill(thread);
150            saw_page_fault = true;
151            // Leave status as is.
152        }
153        else {
154            zx_task_kill(thread);
155            if (status != ZX_OK)
156                status = ZX_ERR_BAD_STATE;
157        }
158    }
159
160    if (status == ZX_OK && !saw_page_fault)
161        *success = true;
162
163    // fallthrough to cleanup
164err:
165    if (thread != ZX_HANDLE_INVALID)
166        zx_task_bind_exception_port(thread, ZX_HANDLE_INVALID, 0, 0);
167    zx_handle_close(port);
168    zx_handle_close(thread);
169    return status;
170}
171
172bool destroy_root_test() {
173    BEGIN_TEST;
174
175    zx_handle_t process;
176    zx_handle_t vmar;
177    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
178                                0, &process, &vmar), ZX_OK);
179
180    EXPECT_EQ(zx_vmar_destroy(vmar), ZX_OK);
181
182    zx_handle_t region;
183    uintptr_t region_addr;
184    EXPECT_EQ(zx_vmar_allocate(vmar,
185                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
186                               0, 10 * PAGE_SIZE, &region, &region_addr),
187              ZX_ERR_BAD_STATE);
188
189    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
190    EXPECT_EQ(zx_handle_close(process), ZX_OK);
191
192    END_TEST;
193}
194
195bool basic_allocate_test() {
196    BEGIN_TEST;
197
198    zx_handle_t process;
199    zx_handle_t vmar;
200    zx_handle_t region1, region2;
201    uintptr_t region1_addr, region2_addr;
202
203    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
204                                0, &process, &vmar), ZX_OK);
205
206    const size_t region1_size = PAGE_SIZE * 10;
207    const size_t region2_size = PAGE_SIZE;
208
209    ASSERT_EQ(zx_vmar_allocate(vmar,
210                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
211                                0, region1_size, &region1, &region1_addr),
212              ZX_OK);
213
214    ASSERT_EQ(zx_vmar_allocate(region1,
215                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
216                               0, region2_size, &region2, &region2_addr),
217              ZX_OK);
218    EXPECT_GE(region2_addr, region1_addr);
219    EXPECT_LE(region2_addr + region2_size, region1_addr + region1_size);
220
221    EXPECT_EQ(zx_handle_close(region1), ZX_OK);
222    EXPECT_EQ(zx_handle_close(region2), ZX_OK);
223    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
224    EXPECT_EQ(zx_handle_close(process), ZX_OK);
225
226    END_TEST;
227}
228
229bool map_in_compact_test() {
230    BEGIN_TEST;
231
232    zx_handle_t process;
233    zx_handle_t vmar;
234    zx_handle_t vmo;
235    zx_handle_t region;
236    uintptr_t region_addr, map_addr;
237
238    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
239                                0, &process, &vmar), ZX_OK);
240
241    const size_t region_size = PAGE_SIZE * 10;
242    const size_t map_size = PAGE_SIZE;
243
244    ASSERT_EQ(zx_vmo_create(map_size, 0, &vmo), ZX_OK);
245
246    ASSERT_EQ(zx_vmar_allocate(vmar,
247                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_COMPACT,
248                               0, region_size, &region, &region_addr),
249              ZX_OK);
250
251    ASSERT_EQ(zx_vmar_map(region, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
252                          0, vmo, 0, map_size, &map_addr),
253              ZX_OK);
254    EXPECT_GE(map_addr, region_addr);
255    EXPECT_LE(map_addr + map_size, region_addr + region_size);
256
257    // Make a second allocation
258    ASSERT_EQ(zx_vmar_map(region, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
259                          0, vmo, 0, map_size, &map_addr),
260              ZX_OK);
261    EXPECT_GE(map_addr, region_addr);
262    EXPECT_LE(map_addr + map_size, region_addr + region_size);
263
264    EXPECT_EQ(zx_handle_close(region), ZX_OK);
265    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
266    EXPECT_EQ(zx_handle_close(process), ZX_OK);
267
268    END_TEST;
269}
270
271// Attempt to allocate out of the region bounds
272bool allocate_oob_test() {
273    BEGIN_TEST;
274
275    zx_handle_t process;
276    zx_handle_t vmar;
277    zx_handle_t region1, region2;
278    uintptr_t region1_addr, region2_addr;
279
280    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
281                                0, &process, &vmar), ZX_OK);
282
283    const size_t region1_size = PAGE_SIZE * 10;
284
285    ASSERT_EQ(zx_vmar_allocate(vmar,
286                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
287                               ZX_VM_CAN_MAP_SPECIFIC,
288                               0, region1_size, &region1, &region1_addr),
289              ZX_OK);
290
291    EXPECT_EQ(zx_vmar_allocate(region1,
292                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
293                               ZX_VM_SPECIFIC, region1_size, PAGE_SIZE,
294                               &region2, &region2_addr),
295              ZX_ERR_INVALID_ARGS);
296
297    EXPECT_EQ(zx_vmar_allocate(region1,
298                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
299                               ZX_VM_SPECIFIC,
300                               region1_size - PAGE_SIZE, PAGE_SIZE * 2,
301                               &region2, &region2_addr),
302              ZX_ERR_INVALID_ARGS);
303
304    EXPECT_EQ(zx_handle_close(region1), ZX_OK);
305    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
306    EXPECT_EQ(zx_handle_close(process), ZX_OK);
307
308    END_TEST;
309}
310
311// Attempt to make unsatisfiable allocations
312bool allocate_unsatisfiable_test() {
313    BEGIN_TEST;
314
315    zx_handle_t process;
316    zx_handle_t vmar;
317    zx_handle_t region1, region2, region3;
318    uintptr_t region1_addr, region2_addr, region3_addr;
319
320    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
321                                0, &process, &vmar), ZX_OK);
322
323    const size_t region1_size = PAGE_SIZE * 10;
324
325    ASSERT_EQ(zx_vmar_allocate(vmar,
326                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
327                               ZX_VM_CAN_MAP_SPECIFIC,
328                               0, region1_size, &region1, &region1_addr),
329              ZX_OK);
330
331    // Too large to fit in the region should get ZX_ERR_INVALID_ARGS
332    EXPECT_EQ(zx_vmar_allocate(region1,
333                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
334                               0, region1_size + PAGE_SIZE, &region2, &region2_addr),
335              ZX_ERR_INVALID_ARGS);
336
337    // Allocate the whole range, should work
338    ASSERT_EQ(zx_vmar_allocate(region1,
339                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
340                               0, region1_size, &region2, &region2_addr),
341              ZX_OK);
342    EXPECT_EQ(region2_addr, region1_addr);
343
344    // Attempt to allocate a page inside of the full region
345    EXPECT_EQ(zx_vmar_allocate(region1,
346                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
347                               0, PAGE_SIZE, &region3, &region3_addr),
348              ZX_ERR_NO_MEMORY);
349
350    EXPECT_EQ(zx_handle_close(region2), ZX_OK);
351    EXPECT_EQ(zx_handle_close(region1), ZX_OK);
352    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
353    EXPECT_EQ(zx_handle_close(process), ZX_OK);
354
355    END_TEST;
356}
357
358// Validate that when we destroy a VMAR, all operations on it
359// and its children fail.
360bool destroyed_vmar_test() {
361    BEGIN_TEST;
362
363    zx_handle_t process;
364    zx_handle_t vmar;
365    zx_handle_t vmo;
366    zx_handle_t region[3] = {};
367    uintptr_t region_addr[3];
368    uintptr_t map_addr[2];
369
370    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
371                                0, &process, &vmar), ZX_OK);
372
373    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), ZX_OK);
374
375    ASSERT_EQ(zx_vmar_allocate(vmar,
376                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
377                               0, 10 * PAGE_SIZE, &region[0], &region_addr[0]),
378              ZX_OK);
379
380    // Create a mapping in region[0], so we can try to unmap it later
381    ASSERT_EQ(zx_vmar_map(region[0], ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
382                          0, vmo, 0, PAGE_SIZE, &map_addr[0]),
383              ZX_OK);
384
385    // Create a subregion in region[0], so we can try to operate on it later
386    ASSERT_EQ(zx_vmar_allocate(region[0],
387                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
388                               0, PAGE_SIZE, &region[1], &region_addr[1]),
389              ZX_OK);
390
391    // Create a mapping in region[1], so we can try to unmap it later
392    ASSERT_EQ(zx_vmar_map(region[1],
393                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
394                          0, vmo, 0, PAGE_SIZE, &map_addr[1]),
395              ZX_OK);
396
397    // Check that both mappings work
398    {
399        uint8_t buf = 5;
400        size_t len;
401        EXPECT_EQ(zx_process_write_memory(process, map_addr[0], &buf, 1, &len),
402                  ZX_OK);
403        EXPECT_EQ(len, 1U);
404
405        buf = 0;
406        EXPECT_EQ(zx_process_read_memory(process, map_addr[1], &buf, 1, &len),
407                  ZX_OK);
408        EXPECT_EQ(len, 1U);
409        EXPECT_EQ(buf, 5U);
410    }
411
412    // Destroy region[0], which should also destroy region[1]
413    ASSERT_EQ(zx_vmar_destroy(region[0]), ZX_OK);
414
415    for (size_t i = 0; i < 2; ++i) {
416        // Make sure the handles are still valid
417        EXPECT_EQ(zx_object_get_info(region[i], ZX_INFO_HANDLE_VALID, NULL, 0u, NULL, NULL),
418                  ZX_OK);
419
420        // Make sure we can't access the memory mappings anymore
421        {
422            uint8_t buf;
423            size_t read;
424            EXPECT_EQ(zx_process_read_memory(process, map_addr[i], &buf, 1, &read),
425                      ZX_ERR_NO_MEMORY);
426        }
427
428        // All operations on region[0] and region[1] should fail with ZX_ERR_BAD_STATE
429        EXPECT_EQ(zx_vmar_destroy(region[i]), ZX_ERR_BAD_STATE);
430        EXPECT_EQ(zx_vmar_allocate(region[i],
431                                   ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
432                                   0, PAGE_SIZE, &region[1], &region_addr[2]),
433                  ZX_ERR_BAD_STATE);
434        EXPECT_EQ(zx_vmar_unmap(region[i], map_addr[i], PAGE_SIZE),
435                  ZX_ERR_BAD_STATE);
436        EXPECT_EQ(zx_vmar_protect(region[i], ZX_VM_PERM_READ, map_addr[i], PAGE_SIZE),
437                  ZX_ERR_BAD_STATE);
438        EXPECT_EQ(zx_vmar_map(region[i], ZX_VM_PERM_READ, 0, vmo, 0, PAGE_SIZE, &map_addr[i]),
439                  ZX_ERR_BAD_STATE);
440    }
441
442    // Make sure we can still operate on the parent of region[0]
443    ASSERT_EQ(zx_vmar_allocate(vmar,
444                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
445                               0, PAGE_SIZE, &region[2], &region_addr[2]),
446              ZX_OK);
447
448
449    for (zx_handle_t h : region) {
450        EXPECT_EQ(zx_handle_close(h), ZX_OK);
451    }
452
453    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
454    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
455    EXPECT_EQ(zx_handle_close(process), ZX_OK);
456
457    END_TEST;
458}
459
460// Create a mapping, destroy the VMAR it is in, then attempt to create a new
461// mapping over it.
462bool map_over_destroyed_test() {
463    BEGIN_TEST;
464
465    zx_handle_t process;
466    zx_handle_t vmar;
467    zx_handle_t vmo, vmo2;
468    zx_handle_t region[2] = {};
469    uintptr_t region_addr[2];
470    uintptr_t map_addr;
471    size_t len;
472
473    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
474                                0, &process, &vmar), ZX_OK);
475
476    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), ZX_OK);
477    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo2), ZX_OK);
478
479    ASSERT_EQ(zx_vmar_allocate(vmar,
480                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
481                               ZX_VM_CAN_MAP_SPECIFIC,
482                               0, 10 * PAGE_SIZE, &region[0], &region_addr[0]),
483              ZX_OK);
484
485    // Create a subregion in region[0], so we can try to operate on it later
486    ASSERT_EQ(zx_vmar_allocate(region[0],
487                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
488                               0, PAGE_SIZE,&region[1], &region_addr[1]),
489              ZX_OK);
490
491    // Create a mapping in region[1], so we can try to unmap it later
492    ASSERT_EQ(zx_vmar_map(region[1],
493                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
494                          0, vmo, 0, PAGE_SIZE, &map_addr),
495              ZX_OK);
496
497    // Check that the mapping worked
498    {
499        uint8_t buf = 5;
500        ASSERT_EQ(zx_vmo_write(vmo, &buf, 0, 1), ZX_OK);
501
502        buf = 0;
503        EXPECT_EQ(zx_process_read_memory(process, map_addr, &buf, 1, &len),
504                  ZX_OK);
505        EXPECT_EQ(len, 1U);
506        EXPECT_EQ(buf, 5U);
507    }
508
509    // Destroy region[1], which should unmap the VMO
510    ASSERT_EQ(zx_vmar_destroy(region[1]), ZX_OK);
511
512    // Make sure we can't access the memory mappings anymore
513    {
514        uint8_t buf;
515        size_t read;
516        EXPECT_EQ(zx_process_read_memory(process, map_addr, &buf, 1, &read),
517                  ZX_ERR_NO_MEMORY);
518    }
519
520    uintptr_t new_map_addr;
521    EXPECT_EQ(zx_vmar_map(region[0],
522                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
523                          ZX_VM_SPECIFIC,
524                          map_addr - region_addr[0], vmo2, 0, PAGE_SIZE, &new_map_addr),
525              ZX_OK);
526    EXPECT_EQ(new_map_addr, map_addr);
527
528    // Make sure we can read, and we don't see the old memory mapping
529    {
530        uint8_t buf;
531        size_t read;
532        EXPECT_EQ(zx_process_read_memory(process, map_addr, &buf, 1, &read),
533                  ZX_OK);
534        EXPECT_EQ(read, 1U);
535        EXPECT_EQ(buf, 0U);
536    }
537
538    for (zx_handle_t h : region) {
539        EXPECT_EQ(zx_handle_close(h), ZX_OK);
540    }
541
542    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
543    EXPECT_EQ(zx_handle_close(vmo2), ZX_OK);
544    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
545    EXPECT_EQ(zx_handle_close(process), ZX_OK);
546
547    END_TEST;
548}
549
550
551// Attempt overmapping with FLAG_SPECIFIC to ensure it fails
552bool overmapping_test() {
553    BEGIN_TEST;
554
555    zx_handle_t process;
556    zx_handle_t region[3] = {};
557    zx_handle_t vmar;
558    zx_handle_t vmo, vmo2;
559    uintptr_t region_addr[3];
560    uintptr_t map_addr[2];
561
562    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
563                                0, &process, &vmar), ZX_OK);
564
565    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), ZX_OK);
566    ASSERT_EQ(zx_vmo_create(PAGE_SIZE * 4, 0, &vmo2), ZX_OK);
567
568    ASSERT_EQ(zx_vmar_allocate(vmar,
569                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
570                               ZX_VM_CAN_MAP_SPECIFIC,
571                               0, 10 * PAGE_SIZE, &region[0], &region_addr[0]),
572              ZX_OK);
573
574    // Create a mapping, and try to map on top of it
575    ASSERT_EQ(zx_vmar_map(region[0],
576                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
577                          PAGE_SIZE, vmo, 0, 2 * PAGE_SIZE, &map_addr[0]),
578              ZX_OK);
579
580    // Attempt a full overmapping
581    EXPECT_EQ(zx_vmar_map(region[0],
582                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
583                          ZX_VM_SPECIFIC,
584                          map_addr[0] - region_addr[0], vmo2, 0, 2 * PAGE_SIZE, &map_addr[1]),
585              ZX_ERR_NO_MEMORY);
586
587    // Attempt a partial overmapping
588    EXPECT_EQ(zx_vmar_map(region[0],
589                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
590                          ZX_VM_SPECIFIC,
591                          map_addr[0] - region_addr[0], vmo2, 0, PAGE_SIZE,&map_addr[1]),
592              ZX_ERR_NO_MEMORY);
593
594    // Attempt an overmapping that is larger than the original mapping
595    EXPECT_EQ(zx_vmar_map(region[0], ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
596                          ZX_VM_SPECIFIC,
597                          map_addr[0] - region_addr[0], vmo2, 0,
598                          4 * PAGE_SIZE, &map_addr[1]),
599              ZX_ERR_NO_MEMORY);
600
601    // Attempt to allocate a region on top
602    EXPECT_EQ(zx_vmar_allocate(region[0],
603                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
604                               ZX_VM_SPECIFIC,
605                               map_addr[0] - region_addr[0], PAGE_SIZE, &region[1], &region_addr[1]),
606              ZX_ERR_NO_MEMORY);
607
608    // Unmap the mapping
609    ASSERT_EQ(zx_vmar_unmap(region[0], map_addr[0], 2 * PAGE_SIZE), ZX_OK);
610
611
612    // Create a region, and try to map on top of it
613    ASSERT_EQ(zx_vmar_allocate(region[0],
614                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
615                               ZX_VM_SPECIFIC,
616                               PAGE_SIZE, 2 * PAGE_SIZE, &region[1], &region_addr[1]),
617              ZX_OK);
618
619    // Attempt a full overmapping
620    EXPECT_EQ(zx_vmar_map(region[0],
621                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
622                          ZX_VM_SPECIFIC,
623                          region_addr[1] - region_addr[0], vmo2, 0, 2 * PAGE_SIZE, &map_addr[1]),
624              ZX_ERR_NO_MEMORY);
625
626    // Attempt a partial overmapping
627    EXPECT_EQ(zx_vmar_map(region[0],
628                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
629                          ZX_VM_SPECIFIC, region_addr[1] - region_addr[0],
630                          vmo2, 0, PAGE_SIZE,&map_addr[1]),
631              ZX_ERR_NO_MEMORY);
632
633    // Attempt an overmapping that is larger than the original region
634    EXPECT_EQ(zx_vmar_map(region[0], ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
635                          ZX_VM_SPECIFIC, region_addr[1] - region_addr[0],
636                          vmo2, 0, 4 * PAGE_SIZE, &map_addr[1]),
637              ZX_ERR_NO_MEMORY);
638
639    // Attempt to allocate a region on top
640    EXPECT_EQ(zx_vmar_allocate(region[0],
641                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
642                               ZX_VM_SPECIFIC,
643                               region_addr[1] - region_addr[0], PAGE_SIZE,
644                               &region[2], &region_addr[2]),
645              ZX_ERR_NO_MEMORY);
646
647    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
648    EXPECT_EQ(zx_handle_close(vmo2), ZX_OK);
649    EXPECT_EQ(zx_handle_close(region[0]), ZX_OK);
650    EXPECT_EQ(zx_handle_close(region[1]), ZX_OK);
651    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
652    EXPECT_EQ(zx_handle_close(process), ZX_OK);
653
654    END_TEST;
655}
656
657// Test passing in bad arguments
658bool invalid_args_test() {
659    BEGIN_TEST;
660
661    zx_handle_t process;
662    zx_handle_t vmar;
663    zx_handle_t vmo;
664    zx_handle_t region;
665    uintptr_t region_addr, map_addr;
666
667    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
668                                0, &process, &vmar), ZX_OK);
669    ASSERT_EQ(zx_vmo_create(4 * PAGE_SIZE, 0, &vmo), ZX_OK);
670
671    // Bad handle
672    EXPECT_EQ(zx_vmar_destroy(vmo), ZX_ERR_WRONG_TYPE);
673    EXPECT_EQ(zx_vmar_allocate(vmo,
674                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
675                               0, 10 * PAGE_SIZE, &region, &region_addr),
676              ZX_ERR_WRONG_TYPE);
677    EXPECT_EQ(zx_vmar_map(vmo, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
678                          0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
679              ZX_ERR_WRONG_TYPE);
680    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
681                         0, process, 0, 4 * PAGE_SIZE,  &map_addr),
682              ZX_ERR_WRONG_TYPE);
683    EXPECT_EQ(zx_vmar_unmap(vmo, 0, 0), ZX_ERR_WRONG_TYPE);
684    EXPECT_EQ(zx_vmar_protect(vmo, ZX_VM_PERM_READ, 0, 0), ZX_ERR_WRONG_TYPE);
685
686    // Allocating with non-zero offset and without FLAG_SPECIFIC
687    EXPECT_EQ(zx_vmar_allocate(vmar,
688                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
689                               PAGE_SIZE, 10 * PAGE_SIZE, &region, &region_addr),
690              ZX_ERR_INVALID_ARGS);
691    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
692                          PAGE_SIZE, vmo, 0, 4 * PAGE_SIZE, &map_addr),
693              ZX_ERR_INVALID_ARGS);
694
695    // Allocate with ZX_VM_PERM_READ.
696    EXPECT_EQ(zx_vmar_allocate(vmar, ZX_VM_CAN_MAP_READ | ZX_VM_PERM_READ,
697                               PAGE_SIZE, 10 * PAGE_SIZE, &region, &region_addr),
698              ZX_ERR_INVALID_ARGS);
699
700    // Using MAP_RANGE with SPECIFIC_OVERWRITE
701    EXPECT_EQ(zx_vmar_map(vmar,
702                          ZX_VM_PERM_READ | ZX_VM_SPECIFIC_OVERWRITE |
703                          ZX_VM_MAP_RANGE,
704                          PAGE_SIZE, vmo, 0, 4 * PAGE_SIZE, &map_addr),
705              ZX_ERR_INVALID_ARGS);
706
707    // Bad OUT pointers
708    uintptr_t *bad_addr_ptr = reinterpret_cast<uintptr_t*>(1);
709    zx_handle_t *bad_handle_ptr = reinterpret_cast<zx_handle_t*>(1);
710    EXPECT_EQ(zx_vmar_allocate(vmar,
711                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
712                               0, 10 * PAGE_SIZE, &region, bad_addr_ptr),
713              ZX_ERR_INVALID_ARGS);
714    EXPECT_EQ(zx_vmar_allocate(vmar,
715                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
716                               0, 10 * PAGE_SIZE, bad_handle_ptr, &region_addr),
717              ZX_ERR_INVALID_ARGS);
718    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
719                          0, vmo, 0, 4 * PAGE_SIZE, bad_addr_ptr),
720              ZX_ERR_INVALID_ARGS);
721
722    // Non-page-aligned arguments
723    EXPECT_EQ(zx_vmar_allocate(vmar,
724                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
725                               0, PAGE_SIZE - 1,&region, &region_addr),
726              ZX_ERR_INVALID_ARGS);
727    EXPECT_EQ(zx_vmar_allocate(vmar,
728                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
729                               ZX_VM_CAN_MAP_SPECIFIC,
730                               PAGE_SIZE - 1, PAGE_SIZE, &region, &region_addr),
731              ZX_ERR_INVALID_ARGS);
732    // Try the invalid maps with and without ZX_VM_MAP_RANGE.
733    for (size_t i = 0; i < 2; ++i) {
734        const uint32_t map_range = i ? ZX_VM_MAP_RANGE : 0;
735        // Specific, misaligned vmar offset
736        EXPECT_EQ(zx_vmar_map(vmar,
737                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC | map_range,
738                              PAGE_SIZE - 1, vmo, 0, 4 * PAGE_SIZE,&map_addr),
739                  ZX_ERR_INVALID_ARGS);
740        // Specific, misaligned vmo offset
741        EXPECT_EQ(zx_vmar_map(vmar,
742                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC | map_range,
743                              PAGE_SIZE, vmo, PAGE_SIZE - 1, 3 * PAGE_SIZE, &map_addr),
744                  ZX_ERR_INVALID_ARGS);
745        // Non-specific, misaligned vmo offset
746        EXPECT_EQ(zx_vmar_map(vmar,
747                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | map_range,
748                              0, vmo, PAGE_SIZE - 1, 3 * PAGE_SIZE, &map_addr),
749                  ZX_ERR_INVALID_ARGS);
750    }
751    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
752                          0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
753              ZX_OK);
754    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr + 1, PAGE_SIZE), ZX_ERR_INVALID_ARGS);
755    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, map_addr + 1, PAGE_SIZE),
756              ZX_ERR_INVALID_ARGS);
757    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 4 * PAGE_SIZE), ZX_OK);
758
759    // Overflowing vmo_offset
760    EXPECT_EQ(zx_vmar_map(vmar,
761                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
762                          0, vmo, UINT64_MAX + 1 - PAGE_SIZE, PAGE_SIZE,&map_addr),
763              ZX_ERR_INVALID_ARGS);
764    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
765                          0, vmo, UINT64_MAX + 1 - 2 * PAGE_SIZE, PAGE_SIZE,
766                          &map_addr),
767              ZX_OK);
768    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, PAGE_SIZE), ZX_OK);
769
770    // size=0
771    EXPECT_EQ(zx_vmar_allocate(vmar,
772                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
773                               0, 0, &region, &region_addr),
774              ZX_ERR_INVALID_ARGS);
775    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
776                          0, vmo, 0, 0, &map_addr),
777              ZX_ERR_INVALID_ARGS);
778    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
779                          0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
780              ZX_OK);
781    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 0), ZX_ERR_INVALID_ARGS);
782    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, map_addr, 0),
783              ZX_ERR_INVALID_ARGS);
784    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 4 * PAGE_SIZE), ZX_OK);
785
786    // size rounds up to 0
787    constexpr size_t bad_size = fbl::numeric_limits<size_t>::max() - PAGE_SIZE + 2;
788    static_assert(((bad_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) == 0, "");
789    EXPECT_EQ(zx_vmar_allocate(vmar,
790                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
791                               0, bad_size, &region, &region_addr),
792              ZX_ERR_INVALID_ARGS);
793    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, vmo, 0, bad_size,
794                          &map_addr),
795              ZX_ERR_INVALID_ARGS);
796    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_MAP_RANGE, 0, vmo, 0, bad_size,
797                          &map_addr),
798              ZX_ERR_INVALID_ARGS);
799    // Attempt bad protect/unmaps
800    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
801                          PAGE_SIZE, vmo, 0, 4 * PAGE_SIZE, &map_addr),
802              ZX_OK);
803    for (ssize_t i = -1; i < 2; ++i) {
804        EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, map_addr + PAGE_SIZE * i, bad_size),
805                  ZX_ERR_INVALID_ARGS);
806        EXPECT_EQ(zx_vmar_unmap(vmar, map_addr + PAGE_SIZE * i, bad_size), ZX_ERR_INVALID_ARGS);
807    }
808    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 4 * PAGE_SIZE), ZX_OK);
809
810    // Flags with invalid bits set
811    EXPECT_EQ(zx_vmar_allocate(vmar,
812                               ZX_VM_PERM_READ | ZX_VM_CAN_MAP_READ |
813                               ZX_VM_CAN_MAP_WRITE, 0, 4 * PAGE_SIZE, &region, &region_addr),
814              ZX_ERR_INVALID_ARGS);
815    EXPECT_EQ(zx_vmar_allocate(vmar,
816                               ZX_VM_CAN_MAP_READ | (1<<31),
817                               0, 4 * PAGE_SIZE,&region, &region_addr),
818              ZX_ERR_INVALID_ARGS);
819    EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_CAN_MAP_EXECUTE,
820                          0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
821              ZX_ERR_INVALID_ARGS);
822    EXPECT_EQ(zx_vmar_map(vmar,
823                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | (1<<31),
824                          0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
825              ZX_ERR_INVALID_ARGS);
826    EXPECT_EQ(zx_vmar_map(vmar,
827                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
828                          0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
829              ZX_OK);
830    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ | ZX_VM_CAN_MAP_WRITE,
831                              map_addr, 4 * PAGE_SIZE),
832              ZX_ERR_INVALID_ARGS);
833    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ | (1<<31), map_addr,
834                              4 * PAGE_SIZE),
835              ZX_ERR_INVALID_ARGS);
836    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 4 * PAGE_SIZE), ZX_OK);
837
838    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
839    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
840    EXPECT_EQ(zx_handle_close(process), ZX_OK);
841
842    END_TEST;
843}
844
845// Test passing in unaligned lens to unmap/protect
846bool unaligned_len_test() {
847    BEGIN_TEST;
848
849    zx_handle_t process;
850    zx_handle_t vmar;
851    zx_handle_t vmo;
852    uintptr_t map_addr;
853
854    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
855                                0, &process, &vmar), ZX_OK);
856    ASSERT_EQ(zx_vmo_create(4 * PAGE_SIZE, 0, &vmo), ZX_OK);
857
858    ASSERT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ, 0, vmo, 0, 4 * PAGE_SIZE, &map_addr),
859              ZX_OK);
860    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
861                              map_addr, 4 * PAGE_SIZE - 1),
862              ZX_OK);
863    EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 4 * PAGE_SIZE - 1), ZX_OK);
864
865    // Make sure we can't access the last page of the memory mappings anymore
866    {
867        uint8_t buf;
868        size_t read;
869        EXPECT_EQ(zx_process_read_memory(process, map_addr + 3 * PAGE_SIZE, &buf, 1, &read),
870                  ZX_ERR_NO_MEMORY);
871    }
872
873    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
874    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
875    EXPECT_EQ(zx_handle_close(process), ZX_OK);
876
877    END_TEST;
878}
879
880// Test passing in unaligned lens to map
881bool unaligned_len_map_test() {
882    BEGIN_TEST;
883
884    zx_handle_t process;
885    zx_handle_t vmar;
886    zx_handle_t vmo;
887    uintptr_t map_addr;
888
889    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
890                                0, &process, &vmar), ZX_OK);
891    ASSERT_EQ(zx_vmo_create(4 * PAGE_SIZE, 0, &vmo), ZX_OK);
892
893    for (size_t i = 0; i < 2; ++i) {
894        const uint32_t map_range = i ? ZX_VM_MAP_RANGE : 0;
895        ASSERT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | map_range, 0, vmo, 0, 4 * PAGE_SIZE - 1,
896                              &map_addr),
897                 ZX_OK);
898
899        // Make sure we can access the last page of the memory mapping
900        {
901            uint8_t buf;
902            size_t read;
903            EXPECT_EQ(zx_process_read_memory(process, map_addr + 3 * PAGE_SIZE, &buf, 1, &read),
904                      ZX_OK);
905        }
906
907        EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, 4 * PAGE_SIZE - 1), ZX_OK);
908        // Make sure we can't access the last page of the memory mappings anymore
909        {
910            uint8_t buf;
911            size_t read;
912            EXPECT_EQ(zx_process_read_memory(process, map_addr + 3 * PAGE_SIZE, &buf, 1, &read),
913                      ZX_ERR_NO_MEMORY);
914        }
915    }
916
917    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
918    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
919    EXPECT_EQ(zx_handle_close(process), ZX_OK);
920
921    END_TEST;
922}
923
924// Validate that dropping vmar handle rights affects mapping privileges
925bool rights_drop_test() {
926    BEGIN_TEST;
927
928    zx_handle_t process;
929    zx_handle_t vmar;
930    zx_handle_t vmo;
931    zx_handle_t region;
932    uintptr_t map_addr;
933    uintptr_t region_addr;
934
935    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
936                                0, &process, &vmar), ZX_OK);
937    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), ZX_OK);
938
939    const uint32_t test_rights[][3] = {
940        { ZX_RIGHT_READ, ZX_VM_PERM_READ },
941        { ZX_RIGHT_READ | ZX_RIGHT_WRITE, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE },
942        { ZX_RIGHT_READ | ZX_RIGHT_EXECUTE, ZX_VM_PERM_READ |  ZX_VM_PERM_EXECUTE },
943    };
944    for (size_t i = 0; i < fbl::count_of(test_rights); ++i) {
945        uint32_t right = test_rights[i][0];
946        uint32_t perm = test_rights[i][1];
947
948        zx_handle_t new_h;
949        ASSERT_EQ(zx_handle_duplicate(vmar, right, &new_h), ZX_OK);
950
951        // Try to create a mapping with permissions we don't have
952        EXPECT_EQ(zx_vmar_map(new_h, kRwxMapPerm, 0, vmo, 0, PAGE_SIZE, &map_addr),
953                  ZX_ERR_ACCESS_DENIED);
954
955        // Try to create a mapping with permissions we do have
956        ASSERT_EQ(zx_vmar_map(new_h, perm, 0, vmo, 0, PAGE_SIZE, &map_addr),
957                  ZX_OK);
958
959        // Attempt to use protect to increase privileges
960        EXPECT_EQ(zx_vmar_protect(new_h, kRwxMapPerm, map_addr, PAGE_SIZE),
961                  ZX_ERR_ACCESS_DENIED);
962
963        EXPECT_EQ(zx_vmar_unmap(new_h, map_addr, PAGE_SIZE), ZX_OK);
964
965        // Attempt to create a region that can map write (this would allow us to
966        // then make writeable mappings inside of it).
967        EXPECT_EQ(zx_vmar_allocate(new_h, kRwxAllocPerm, 0, 10 * PAGE_SIZE, &region, &region_addr),
968                  ZX_ERR_ACCESS_DENIED);
969
970        EXPECT_EQ(zx_handle_close(new_h), ZX_OK);
971    }
972
973    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
974    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
975    EXPECT_EQ(zx_handle_close(process), ZX_OK);
976
977    END_TEST;
978}
979
980// Validate that protect can't be used to escalate mapping privileges beyond
981// the VMAR handle's and the original VMO handle's
982bool protect_test() {
983    BEGIN_TEST;
984
985    zx_handle_t process;
986    zx_handle_t vmar;
987    zx_handle_t vmo;
988    uintptr_t map_addr;
989
990    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
991                                0, &process, &vmar), ZX_OK);
992    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), ZX_OK);
993
994    const uint32_t test_rights[][3] = {
995        { ZX_RIGHT_READ, ZX_VM_PERM_READ },
996        { ZX_RIGHT_READ | ZX_RIGHT_WRITE, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE },
997        { ZX_RIGHT_READ | ZX_RIGHT_EXECUTE, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE },
998    };
999    for (size_t i = 0; i < fbl::count_of(test_rights); ++i) {
1000        uint32_t right = test_rights[i][0];
1001        zx_vm_option_t perm = test_rights[i][1];
1002
1003        zx_handle_t new_h;
1004        ASSERT_EQ(zx_handle_duplicate(vmo, right | ZX_RIGHT_MAP, &new_h), ZX_OK);
1005
1006        // Try to create a mapping with permissions we don't have
1007        EXPECT_EQ(zx_vmar_map(vmar, kRwxMapPerm, 0, new_h, 0, PAGE_SIZE, &map_addr),
1008                  ZX_ERR_ACCESS_DENIED);
1009
1010        // Try to create a mapping with permissions we do have
1011        ASSERT_EQ(zx_vmar_map(vmar, perm, 0, new_h, 0, PAGE_SIZE, &map_addr),
1012                  ZX_OK);
1013
1014        // Attempt to use protect to increase privileges to a level allowed by
1015        // the VMAR but not by the VMO handle
1016        EXPECT_EQ(zx_vmar_protect(vmar, kRwxMapPerm, map_addr, PAGE_SIZE),
1017                  ZX_ERR_ACCESS_DENIED);
1018
1019        EXPECT_EQ(zx_handle_close(new_h), ZX_OK);
1020
1021        // Try again now that we closed the VMO handle
1022        EXPECT_EQ(zx_vmar_protect(vmar, kRwxMapPerm, map_addr, PAGE_SIZE),
1023                  ZX_ERR_ACCESS_DENIED);
1024
1025        EXPECT_EQ(zx_vmar_unmap(vmar, map_addr, PAGE_SIZE), ZX_OK);
1026    }
1027
1028    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1029    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1030    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1031
1032    END_TEST;
1033}
1034
1035// Validate that a region can't be created with higher RWX privileges than its
1036// parent.
1037bool nested_region_perms_test() {
1038    BEGIN_TEST;
1039
1040    zx_handle_t process;
1041    zx_handle_t vmar;
1042    zx_handle_t vmo;
1043    zx_handle_t region[2] = {};
1044    uintptr_t region_addr[2];
1045    uintptr_t map_addr;
1046
1047    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1048                                0, &process, &vmar), ZX_OK);
1049
1050    ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), ZX_OK);
1051
1052    // List of pairs of alloc/map perms to try to exclude
1053    const zx_vm_option_t test_perm[][2] = {
1054        { ZX_VM_CAN_MAP_READ, ZX_VM_PERM_READ },
1055        { ZX_VM_CAN_MAP_WRITE, ZX_VM_PERM_WRITE },
1056        { ZX_VM_CAN_MAP_EXECUTE, ZX_VM_PERM_EXECUTE },
1057    };
1058
1059    for (size_t i = 0; i < fbl::count_of(test_perm); ++i) {
1060        const zx_vm_option_t excluded_alloc_perm = test_perm[i][0];
1061        const zx_vm_option_t excluded_map_perm = test_perm[i][1];
1062
1063        ASSERT_EQ(zx_vmar_allocate(vmar,
1064                                   kRwxAllocPerm ^ excluded_alloc_perm,
1065                                   0, 10 * PAGE_SIZE, &region[0], &region_addr[0]),
1066                  ZX_OK);
1067
1068        // Should fail since region[0] does not have the right perms
1069        EXPECT_EQ(zx_vmar_allocate(region[0], kRwxAllocPerm, 0, PAGE_SIZE,
1070                                   &region[1], &region_addr[1]),
1071                  ZX_ERR_ACCESS_DENIED);
1072
1073        // Try to create a mapping in region[0] with the dropped rights
1074        EXPECT_EQ(zx_vmar_map(region[0], kRwxMapPerm, 0, vmo, 0, PAGE_SIZE, &map_addr),
1075                  ZX_ERR_ACCESS_DENIED);
1076
1077        // Successfully create a mapping in region[0] (skip if we excluded READ,
1078        // since all mappings must be readable on most CPUs)
1079        if (excluded_map_perm != ZX_VM_PERM_READ) {
1080            EXPECT_EQ(zx_vmar_map(region[0], kRwxMapPerm ^ excluded_map_perm, 0,
1081                                  vmo, 0, PAGE_SIZE, &map_addr),
1082                      ZX_OK);
1083            EXPECT_EQ(zx_vmar_unmap(region[0], map_addr, PAGE_SIZE), ZX_OK);
1084        }
1085
1086        // Successfully create a subregion in region[0]
1087        EXPECT_EQ(zx_vmar_allocate(region[0], kRwxAllocPerm ^ excluded_alloc_perm,
1088                                   0, PAGE_SIZE, &region[1], &region_addr[1]),
1089                  ZX_OK);
1090        EXPECT_EQ(zx_vmar_destroy(region[1]), ZX_OK);
1091        EXPECT_EQ(zx_handle_close(region[1]), ZX_OK);
1092
1093        EXPECT_EQ(zx_vmar_destroy(region[0]), ZX_OK);
1094        EXPECT_EQ(zx_handle_close(region[0]), ZX_OK);
1095    }
1096
1097    // Make sure we can't use SPECIFIC in a region without CAN_MAP_SPECIFIC
1098    ASSERT_EQ(zx_vmar_allocate(vmar, kRwxAllocPerm, 0, 10 * PAGE_SIZE,
1099                               &region[0], &region_addr[0]),
1100              ZX_OK);
1101    EXPECT_EQ(zx_vmar_map(region[0],
1102                          ZX_VM_SPECIFIC | ZX_VM_PERM_READ,
1103                          PAGE_SIZE, vmo, 0, PAGE_SIZE, &map_addr),
1104              ZX_ERR_ACCESS_DENIED);
1105    EXPECT_EQ(zx_vmar_map(region[0],
1106                          ZX_VM_SPECIFIC_OVERWRITE | ZX_VM_PERM_READ,
1107                          PAGE_SIZE, vmo, 0, PAGE_SIZE, &map_addr),
1108              ZX_ERR_ACCESS_DENIED);
1109    EXPECT_EQ(zx_vmar_destroy(region[0]), ZX_OK);
1110    EXPECT_EQ(zx_handle_close(region[0]), ZX_OK);
1111
1112    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1113    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1114
1115    END_TEST;
1116}
1117
1118bool object_info_test() {
1119    BEGIN_TEST;
1120
1121    zx_handle_t process;
1122    zx_handle_t vmar;
1123    zx_handle_t region;
1124    uintptr_t region_addr;
1125
1126    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1127                                0, &process, &vmar), ZX_OK);
1128
1129    const size_t region_size = PAGE_SIZE * 10;
1130
1131    ASSERT_EQ(zx_vmar_allocate(vmar, ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
1132                               0, region_size,&region, &region_addr),
1133              ZX_OK);
1134
1135    zx_info_vmar_t info;
1136    ASSERT_EQ(zx_object_get_info(region, ZX_INFO_VMAR, &info, sizeof(info), NULL, NULL),
1137              ZX_OK);
1138    EXPECT_EQ(info.base, region_addr);
1139    EXPECT_EQ(info.len, region_size);
1140
1141    EXPECT_EQ(zx_handle_close(region), ZX_OK);
1142    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1143    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1144
1145    END_TEST;
1146}
1147
1148// Verify that we can split a single mapping with an unmap call
1149bool unmap_split_test() {
1150    BEGIN_TEST;
1151
1152    zx_handle_t process;
1153    zx_handle_t vmar;
1154    zx_handle_t vmo;
1155    uintptr_t mapping_addr[3];
1156
1157    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1158                                0, &process, &vmar), ZX_OK);
1159
1160    ASSERT_EQ(zx_vmo_create(4 * PAGE_SIZE, 0, &vmo), ZX_OK);
1161
1162    // Set up mappings to test on
1163    for (uintptr_t& addr : mapping_addr) {
1164        EXPECT_EQ(zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1165                              0, vmo, 0, 4 * PAGE_SIZE, &addr),
1166                  ZX_OK);
1167    }
1168
1169    // Unmap from the left
1170    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 2 * PAGE_SIZE), ZX_OK);
1171    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1100, 4));
1172    // Unmap the rest
1173    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0] + 2 * PAGE_SIZE, 2 * PAGE_SIZE), ZX_OK);
1174    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000, 4));
1175
1176    // Unmap from the right
1177    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[1] + 2 * PAGE_SIZE, 2 * PAGE_SIZE), ZX_OK);
1178    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[1], 0b0011, 4));
1179    // Unmap the rest
1180    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[1], 2 * PAGE_SIZE), ZX_OK);
1181    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[1], 0b0000, 4));
1182
1183    // Unmap from the center
1184    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[2] + PAGE_SIZE, 2 * PAGE_SIZE), ZX_OK);
1185    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[2], 0b1001, 4));
1186    // Unmap the rest
1187    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[2], PAGE_SIZE), ZX_OK);
1188    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[2] + 3 * PAGE_SIZE, PAGE_SIZE), ZX_OK);
1189    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[2], 0b0000, 4));
1190
1191    zx_info_vmar_t info;
1192    ASSERT_EQ(zx_object_get_info(vmar, ZX_INFO_VMAR, &info, sizeof(info), NULL, NULL),
1193              ZX_OK);
1194
1195    // Make sure we can map over these again
1196    for (uintptr_t addr : mapping_addr) {
1197        const size_t offset = addr - info.base;
1198        EXPECT_EQ(zx_vmar_map(vmar,
1199                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1200                              offset, vmo, 0, 4 * PAGE_SIZE, &addr),
1201                  ZX_OK);
1202        EXPECT_TRUE(check_pages_mapped(process, addr, 0b1111, 4));
1203        EXPECT_EQ(zx_vmar_unmap(vmar, addr, 4 * PAGE_SIZE), ZX_OK);
1204    }
1205
1206    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1207    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1208    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1209
1210    END_TEST;
1211}
1212
1213// Verify that we can unmap multiple ranges simultaneously
1214bool unmap_multiple_test() {
1215    BEGIN_TEST;
1216
1217    zx_handle_t process;
1218    zx_handle_t vmar;
1219    zx_handle_t vmo;
1220    zx_handle_t subregion;
1221    uintptr_t mapping_addr[3];
1222    uintptr_t subregion_addr;
1223
1224    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1225                                0, &process, &vmar), ZX_OK);
1226
1227    const size_t mapping_size = 4 * PAGE_SIZE;
1228    ASSERT_EQ(zx_vmo_create(mapping_size, 0, &vmo), ZX_OK);
1229
1230    // Create two mappings
1231    for (size_t i = 0; i < 2; ++i) {
1232        ASSERT_EQ(zx_vmar_map(vmar,
1233                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1234                              i * mapping_size, vmo, 0, mapping_size, &mapping_addr[i]),
1235                  ZX_OK);
1236    }
1237    EXPECT_EQ(mapping_addr[0] + mapping_size, mapping_addr[1]);
1238    // Unmap from the right of the first and the left of the second
1239    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0] + 2 * PAGE_SIZE, 3 * PAGE_SIZE), ZX_OK);
1240    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1110'0011, 8), "");
1241    // Unmap the rest
1242    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 2 * PAGE_SIZE), ZX_OK, "");
1243    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[1] + 1 * PAGE_SIZE, 3 * PAGE_SIZE), ZX_OK, "");
1244    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000, 8));
1245
1246    // Create two mappings with a gap, and verify we can unmap them
1247    for (size_t i = 0; i < 2; ++i) {
1248        ASSERT_EQ(zx_vmar_map(vmar,
1249                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1250                              2 * i * mapping_size, vmo, 0, mapping_size, &mapping_addr[i]),
1251                  ZX_OK);
1252    }
1253    EXPECT_EQ(mapping_addr[0] + 2 * mapping_size, mapping_addr[1]);
1254    // Unmap all of the left one and some of the right one
1255    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 2 * mapping_size + PAGE_SIZE), ZX_OK);
1256    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1110'0000'0000, 12));
1257    // Unmap the rest
1258    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[1] + 1 * PAGE_SIZE, 3 * PAGE_SIZE), ZX_OK);
1259    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1260
1261    // Create two mappings with a subregion between, should be able to unmap
1262    // them (and destroy the subregion in the process).
1263    for (size_t i = 0; i < 2; ++i) {
1264        ASSERT_EQ(zx_vmar_map(vmar,
1265                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1266                              2 * i * mapping_size, vmo, 0, mapping_size, &mapping_addr[i]),
1267                  ZX_OK);
1268    }
1269    ASSERT_EQ(zx_vmar_allocate(vmar,
1270                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
1271                               ZX_VM_CAN_MAP_SPECIFIC | ZX_VM_SPECIFIC,
1272                               mapping_size, mapping_size, &subregion, &subregion_addr),
1273              ZX_OK);
1274    ASSERT_EQ(zx_vmar_map(subregion,
1275                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1276                          0, vmo, 0, PAGE_SIZE, &mapping_addr[2]),
1277              ZX_OK);
1278    EXPECT_EQ(mapping_addr[0] + 2 * mapping_size, mapping_addr[1]);
1279    EXPECT_EQ(mapping_addr[0] + mapping_size, mapping_addr[2]);
1280    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1111'0001'1111, 12));
1281    // Unmap all of the left one and some of the right one
1282    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 2 * mapping_size + PAGE_SIZE), ZX_OK);
1283    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1110'0000'0000, 12));
1284    // Try to map in the subregion again, should fail due to being destroyed
1285    ASSERT_EQ(zx_vmar_map(subregion,
1286                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1287                          PAGE_SIZE, vmo, 0, PAGE_SIZE, &mapping_addr[2]),
1288              ZX_ERR_BAD_STATE);
1289    // Unmap the rest
1290    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[1] + 1 * PAGE_SIZE, 3 * PAGE_SIZE), ZX_OK);
1291    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1292    EXPECT_EQ(zx_handle_close(subregion), ZX_OK);
1293
1294    // Create two mappings with a subregion after.  Partial unmap of the
1295    // subregion should fail, full unmap should succeed.
1296    for (size_t i = 0; i < 2; ++i) {
1297        ASSERT_EQ(zx_vmar_map(vmar,
1298                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1299                              i * mapping_size, vmo, 0, mapping_size, &mapping_addr[i]),
1300                  ZX_OK);
1301    }
1302    ASSERT_EQ(zx_vmar_allocate(vmar,
1303                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
1304                               ZX_VM_CAN_MAP_SPECIFIC | ZX_VM_SPECIFIC,
1305                               2 * mapping_size, mapping_size, &subregion, &subregion_addr),
1306              ZX_OK);
1307    ASSERT_EQ(zx_vmar_map(subregion,
1308                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1309                          0, vmo, 0, PAGE_SIZE, &mapping_addr[2]),
1310              ZX_OK);
1311    EXPECT_EQ(mapping_addr[0] + mapping_size, mapping_addr[1]);
1312    EXPECT_EQ(mapping_addr[0] + 2 * mapping_size, mapping_addr[2]);
1313    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0001'1111'1111, 12));
1314    // Unmap some of the left one through to all but the last page of the subregion
1315    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0] + PAGE_SIZE, 3 * mapping_size - 2 * PAGE_SIZE),
1316              ZX_ERR_INVALID_ARGS);
1317    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0001'1111'1111, 12));
1318    // Try again, but unmapping all of the subregion
1319    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0] + PAGE_SIZE, 3 * mapping_size - PAGE_SIZE),
1320              ZX_OK);
1321    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0001, 12));
1322    // Try to map in the subregion again, should fail due to being destroyed
1323    ASSERT_EQ(zx_vmar_map(subregion,
1324                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1325                          PAGE_SIZE, vmo, 0, PAGE_SIZE, &mapping_addr[2]),
1326              ZX_ERR_BAD_STATE);
1327    // Unmap the rest
1328    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], PAGE_SIZE), ZX_OK);
1329    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1330    EXPECT_EQ(zx_handle_close(subregion), ZX_OK);
1331
1332    // Create two mappings with a subregion before.  Partial unmap of the
1333    // subregion should fail, full unmap should succeed.
1334    for (size_t i = 0; i < 2; ++i) {
1335        ASSERT_EQ(zx_vmar_map(vmar,
1336                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1337                              (i + 1) * mapping_size, vmo, 0, mapping_size, &mapping_addr[i]),
1338                  ZX_OK);
1339    }
1340    ASSERT_EQ(zx_vmar_allocate(vmar,
1341                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
1342                               ZX_VM_CAN_MAP_SPECIFIC | ZX_VM_SPECIFIC,
1343                               0, mapping_size, &subregion, &subregion_addr),
1344              ZX_OK);
1345    ASSERT_EQ(zx_vmar_map(subregion,
1346                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1347                          mapping_size - PAGE_SIZE, vmo, 0, PAGE_SIZE, &mapping_addr[2]),
1348              ZX_OK);
1349    EXPECT_EQ(subregion_addr + mapping_size, mapping_addr[0]);
1350    EXPECT_EQ(subregion_addr + 2 * mapping_size, mapping_addr[1]);
1351    EXPECT_TRUE(check_pages_mapped(process, subregion_addr, 0b1111'1111'1000, 12));
1352    // Try to unmap everything except the first page of the subregion
1353    EXPECT_EQ(zx_vmar_unmap(vmar, subregion_addr + PAGE_SIZE, 3 * mapping_size - PAGE_SIZE),
1354              ZX_ERR_INVALID_ARGS);
1355    EXPECT_TRUE(check_pages_mapped(process, subregion_addr, 0b1111'1111'1000, 12));
1356    // Try again, but unmapping all of the subregion
1357    EXPECT_EQ(zx_vmar_unmap(vmar, subregion_addr, 3 * mapping_size), ZX_OK);
1358    EXPECT_TRUE(check_pages_mapped(process, subregion_addr, 0b0000'0000'0000, 12));
1359    // Try to map in the subregion again, should fail due to being destroyed
1360    ASSERT_EQ(zx_vmar_map(subregion,
1361                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1362                          PAGE_SIZE, vmo, 0, PAGE_SIZE, &mapping_addr[2]),
1363              ZX_ERR_BAD_STATE);
1364    EXPECT_EQ(zx_handle_close(subregion), ZX_OK);
1365
1366    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1367    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1368    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1369
1370    END_TEST;
1371}
1372
1373// Verify that we can unmap multiple ranges simultaneously
1374bool unmap_base_not_mapped_test() {
1375    BEGIN_TEST;
1376
1377    zx_handle_t process;
1378    zx_handle_t vmar;
1379    zx_handle_t vmo;
1380    uintptr_t mapping_addr;
1381
1382    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1383                                0, &process, &vmar), ZX_OK);
1384
1385    const size_t mapping_size = 4 * PAGE_SIZE;
1386    ASSERT_EQ(zx_vmo_create(mapping_size, 0, &vmo), ZX_OK);
1387
1388    ASSERT_EQ(zx_vmar_map(vmar,
1389                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1390                          PAGE_SIZE, vmo, 0, mapping_size, &mapping_addr),
1391              ZX_OK);
1392    ASSERT_EQ(zx_vmar_unmap(vmar, mapping_addr - PAGE_SIZE, mapping_size + PAGE_SIZE),
1393              ZX_OK);
1394
1395    // Try again, but this time with a mapping below where base is
1396    ASSERT_EQ(zx_vmar_map(vmar,
1397                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1398                          0, vmo, 0, mapping_size, &mapping_addr),
1399              ZX_OK);
1400    for (size_t gap = PAGE_SIZE; gap < 3 * PAGE_SIZE; gap += PAGE_SIZE) {
1401        ASSERT_EQ(zx_vmar_map(vmar,
1402                              ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1403                              mapping_size + gap, vmo, 0, mapping_size, &mapping_addr),
1404                  ZX_OK);
1405        ASSERT_EQ(zx_vmar_unmap(vmar, mapping_addr - PAGE_SIZE, mapping_size + PAGE_SIZE),
1406                  ZX_OK);
1407    }
1408
1409    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1410    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1411    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1412
1413    END_TEST;
1414}
1415
1416// Verify that we can overwrite subranges and multiple ranges simultaneously
1417bool map_specific_overwrite_test() {
1418    BEGIN_TEST;
1419
1420    zx_handle_t process;
1421    zx_handle_t vmar;
1422    zx_handle_t vmo, vmo2;
1423    zx_handle_t subregion;
1424    uintptr_t mapping_addr[2];
1425    uintptr_t subregion_addr;
1426    uint8_t buf[1];
1427    size_t len;
1428
1429    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1430                                0, &process, &vmar), ZX_OK);
1431
1432    const size_t mapping_size = 4 * PAGE_SIZE;
1433    ASSERT_EQ(zx_vmo_create(mapping_size * 2, 0, &vmo), ZX_OK);
1434    ASSERT_EQ(zx_vmo_create(mapping_size * 2, 0, &vmo2), ZX_OK);
1435
1436    // Tag each page of the VMOs so we can identify which mappings are from
1437    // which.
1438    for (size_t i = 0; i < mapping_size / PAGE_SIZE; ++i) {
1439        buf[0] = 1;
1440        ASSERT_EQ(zx_vmo_write(vmo, buf, i * PAGE_SIZE, 1), ZX_OK);
1441        buf[0] = 2;
1442        ASSERT_EQ(zx_vmo_write(vmo2, buf, i * PAGE_SIZE, 1), ZX_OK);
1443    }
1444
1445    // Create a single mapping and overwrite it
1446    ASSERT_EQ(zx_vmar_map(vmar,
1447                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1448                          PAGE_SIZE, vmo, 0, mapping_size, &mapping_addr[0]),
1449              ZX_OK);
1450    // Try over mapping with SPECIFIC but not SPECIFIC_OVERWRITE
1451    EXPECT_EQ(zx_vmar_map(vmar,
1452                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
1453                          ZX_VM_SPECIFIC,
1454                          PAGE_SIZE, vmo2, 0, mapping_size, &mapping_addr[1]),
1455              ZX_ERR_NO_MEMORY);
1456    // Try again with SPECIFIC_OVERWRITE
1457    EXPECT_EQ(zx_vmar_map(vmar,
1458                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
1459                          ZX_VM_SPECIFIC_OVERWRITE,
1460                          PAGE_SIZE, vmo2, 0, mapping_size, &mapping_addr[1]),
1461              ZX_OK);
1462    EXPECT_EQ(mapping_addr[0], mapping_addr[1]);
1463    for (size_t i = 0; i < mapping_size / PAGE_SIZE; ++i) {
1464        EXPECT_EQ(zx_process_read_memory(process, mapping_addr[0] + i * PAGE_SIZE, buf, 1, &len),
1465                  ZX_OK);
1466        EXPECT_EQ(buf[0], 2u);
1467    }
1468
1469    // Overmap the middle of it
1470    EXPECT_EQ(zx_vmar_map(vmar,
1471                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
1472                          ZX_VM_SPECIFIC_OVERWRITE,
1473                          2 * PAGE_SIZE, vmo, 0, 2 * PAGE_SIZE, &mapping_addr[0]),
1474              ZX_OK);
1475    EXPECT_EQ(mapping_addr[0], mapping_addr[1] + PAGE_SIZE);
1476    for (size_t i = 0; i < mapping_size / PAGE_SIZE; ++i) {
1477        EXPECT_EQ(zx_process_read_memory(process, mapping_addr[1] + i * PAGE_SIZE, buf, 1, &len),
1478                  ZX_OK);
1479        EXPECT_EQ(buf[0], (i == 0 || i == 3) ? 2u : 1u);
1480    }
1481
1482    // Create an adjacent sub-region, try to overmap it
1483    ASSERT_EQ(zx_vmar_allocate(vmar,
1484                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
1485                               ZX_VM_SPECIFIC,
1486                               PAGE_SIZE + mapping_size, mapping_size, &subregion, &subregion_addr),
1487              ZX_OK);
1488    EXPECT_EQ(subregion_addr, mapping_addr[1] + mapping_size);
1489    EXPECT_EQ(zx_vmar_map(vmar,
1490                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
1491                          ZX_VM_SPECIFIC_OVERWRITE,
1492                          PAGE_SIZE, vmo2, 0, 2 * mapping_size, &mapping_addr[0]),
1493              ZX_ERR_INVALID_ARGS);
1494    // Tear it all down
1495    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[1], 2 * mapping_size),
1496              ZX_OK);
1497
1498    EXPECT_EQ(zx_handle_close(subregion), ZX_OK);
1499
1500    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1501    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1502    EXPECT_EQ(zx_handle_close(vmo2), ZX_OK);
1503    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1504
1505    END_TEST;
1506}
1507
1508// Verify that we can split a single mapping with a protect call
1509bool protect_split_test() {
1510    BEGIN_TEST;
1511
1512    zx_handle_t process;
1513    zx_handle_t vmar;
1514    zx_handle_t vmo;
1515    uintptr_t mapping_addr;
1516
1517    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1518                                0, &process, &vmar), ZX_OK);
1519
1520    ASSERT_EQ(zx_vmo_create(4 * PAGE_SIZE, 0, &vmo), ZX_OK);
1521
1522    // Protect from the left
1523    ASSERT_EQ(zx_vmar_map(vmar,
1524                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1525                          0, vmo, 0, 4 * PAGE_SIZE, &mapping_addr),
1526              ZX_OK);
1527    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr, 2 * PAGE_SIZE),
1528              ZX_OK);
1529    // TODO(teisenbe): Test to validate perms changed, need to export more debug
1530    // info
1531    EXPECT_TRUE(check_pages_mapped(process, mapping_addr, 0b1111, 4));
1532    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr, 4 * PAGE_SIZE), ZX_OK);
1533    EXPECT_TRUE(check_pages_mapped(process, mapping_addr, 0b0000, 4));
1534
1535    // Protect from the right
1536    ASSERT_EQ(zx_vmar_map(vmar,
1537                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1538                          0, vmo, 0, 4 * PAGE_SIZE, &mapping_addr),
1539              ZX_OK);
1540    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr + 2 * PAGE_SIZE,
1541                              2 * PAGE_SIZE),
1542              ZX_OK);
1543    // TODO(teisenbe): Test to validate perms changed, need to export more debug
1544    // info
1545    EXPECT_TRUE(check_pages_mapped(process, mapping_addr, 0b1111, 4));
1546    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr, 4 * PAGE_SIZE), ZX_OK);
1547    EXPECT_TRUE(check_pages_mapped(process, mapping_addr, 0b0000, 4));
1548
1549    // Protect from the center
1550    ASSERT_EQ(zx_vmar_map(vmar,
1551                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1552                          0, vmo, 0, 4 * PAGE_SIZE, &mapping_addr),
1553              ZX_OK);
1554    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr + PAGE_SIZE,
1555                              2 * PAGE_SIZE),
1556              ZX_OK);
1557    // TODO(teisenbe): Test to validate perms changed, need to export more debug
1558    // info
1559    EXPECT_TRUE(check_pages_mapped(process, mapping_addr, 0b1111, 4));
1560    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr, 4 * PAGE_SIZE), ZX_OK);
1561    EXPECT_TRUE(check_pages_mapped(process, mapping_addr, 0b0000, 4));
1562
1563    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1564    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1565    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1566
1567    END_TEST;
1568}
1569
1570// Validate that protect can be used across multiple mappings.  Make sure intersecting a subregion
1571// or gap fails
1572bool protect_multiple_test() {
1573    BEGIN_TEST;
1574
1575    zx_handle_t process;
1576    zx_handle_t vmar;
1577    zx_handle_t vmo, vmo2;
1578    zx_handle_t subregion;
1579    uintptr_t mapping_addr[3];
1580    uintptr_t subregion_addr;
1581
1582    ASSERT_EQ(zx_process_create(zx_job_default(), kProcessName, sizeof(kProcessName) - 1,
1583                                0, &process, &vmar), ZX_OK);
1584    const size_t mapping_size = 4 * PAGE_SIZE;
1585    ASSERT_EQ(zx_vmo_create(mapping_size, 0, &vmo), ZX_OK);
1586    ASSERT_EQ(zx_handle_duplicate(vmo, ZX_RIGHT_MAP | ZX_RIGHT_READ, &vmo2), ZX_OK);
1587
1588    // Protect from the right on the first mapping, all of the second mapping,
1589    // and from the left on the third mapping.
1590    ASSERT_EQ(zx_vmar_map(vmar,
1591                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1592                          0, vmo, 0, mapping_size, &mapping_addr[0]),
1593              ZX_OK);
1594    ASSERT_EQ(zx_vmar_map(vmar,
1595                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1596                          mapping_size, vmo, 0, mapping_size, &mapping_addr[1]),
1597              ZX_OK);
1598    ASSERT_EQ(zx_vmar_map(vmar,
1599                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1600                          2 * mapping_size, vmo, 0, mapping_size, &mapping_addr[2]),
1601              ZX_OK);
1602    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr[0] + PAGE_SIZE,
1603                              3 * mapping_size - 2 * PAGE_SIZE),
1604              ZX_OK);
1605    // TODO(teisenbe): Test to validate perms changed, need to export more debug
1606    // info
1607    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1111'1111'1111, 12));
1608    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 3 * mapping_size), ZX_OK);
1609    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1610
1611    // Same thing, but map middle region with a VMO without the WRITE right
1612    ASSERT_EQ(zx_vmar_map(vmar,
1613                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1614                          0, vmo, 0, mapping_size, &mapping_addr[0]),
1615              ZX_OK);
1616    ASSERT_EQ(zx_vmar_map(vmar,
1617                          ZX_VM_PERM_READ | ZX_VM_SPECIFIC,
1618                          mapping_size, vmo2, 0, mapping_size, &mapping_addr[1]),
1619              ZX_OK);
1620    ASSERT_EQ(zx_vmar_map(vmar,
1621                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1622                          2 * mapping_size, vmo, 0, mapping_size, &mapping_addr[2]),
1623              ZX_OK);
1624    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1625                              mapping_addr[0] + PAGE_SIZE,
1626                              3 * mapping_size - 2 * PAGE_SIZE),
1627              ZX_ERR_ACCESS_DENIED);
1628    // TODO(teisenbe): Test to validate no perms changed, need to export more debug
1629    // info
1630    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1111'1111'1111, 12));
1631    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 3 * mapping_size), ZX_OK);
1632    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1633
1634    // Try to protect across a gap
1635    ASSERT_EQ(zx_vmar_map(vmar,
1636                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1637                          0, vmo, 0, mapping_size, &mapping_addr[0]),
1638              ZX_OK);
1639    ASSERT_EQ(zx_vmar_map(vmar,
1640                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1641                          2 * mapping_size, vmo, 0, mapping_size, &mapping_addr[2]),
1642              ZX_OK);
1643    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr[0] + PAGE_SIZE,
1644                              3 * mapping_size - 2 * PAGE_SIZE),
1645              ZX_ERR_NOT_FOUND);
1646    // TODO(teisenbe): Test to validate no perms changed, need to export more debug
1647    // info
1648    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1111'0000'1111, 12));
1649    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 3 * mapping_size), ZX_OK);
1650    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1651
1652    // Try to protect across an empty subregion
1653    ASSERT_EQ(zx_vmar_map(vmar,
1654                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1655                          0, vmo, 0, mapping_size, &mapping_addr[0]),
1656              ZX_OK);
1657    ASSERT_EQ(zx_vmar_allocate(vmar,
1658                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
1659                               ZX_VM_SPECIFIC,
1660                               mapping_size, mapping_size, &subregion, &subregion_addr),
1661              ZX_OK);
1662    ASSERT_EQ(zx_vmar_map(vmar,
1663                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1664                          2 * mapping_size, vmo, 0, mapping_size, &mapping_addr[2]),
1665              ZX_OK);
1666    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr[0] + PAGE_SIZE,
1667                              3 * mapping_size - 2 * PAGE_SIZE),
1668              ZX_ERR_INVALID_ARGS);
1669    // TODO(teisenbe): Test to validate no perms changed, need to export more debug
1670    // info
1671    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1111'0000'1111, 12));
1672    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 3 * mapping_size), ZX_OK);
1673    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1674    EXPECT_EQ(zx_handle_close(subregion), ZX_OK);
1675
1676    // Try to protect across a subregion filled with mappings
1677    ASSERT_EQ(zx_vmar_map(vmar,
1678                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1679                          0, vmo, 0, mapping_size, &mapping_addr[0]),
1680              ZX_OK);
1681    ASSERT_EQ(zx_vmar_allocate(vmar,
1682                               ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
1683                               ZX_VM_SPECIFIC | ZX_VM_CAN_MAP_SPECIFIC,
1684                               mapping_size, mapping_size, &subregion, &subregion_addr),
1685              ZX_OK);
1686    ASSERT_EQ(zx_vmar_map(subregion,
1687                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1688                          0, vmo, 0, mapping_size, &mapping_addr[1]),
1689              ZX_OK);
1690    ASSERT_EQ(zx_vmar_map(vmar,
1691                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
1692                          2 * mapping_size, vmo, 0, mapping_size, &mapping_addr[2]),
1693              ZX_OK);
1694    EXPECT_EQ(zx_vmar_protect(vmar, ZX_VM_PERM_READ, mapping_addr[0] + PAGE_SIZE,
1695                              3 * mapping_size - 2 * PAGE_SIZE),
1696              ZX_ERR_INVALID_ARGS);
1697    // TODO(teisenbe): Test to validate no perms changed, need to export more debug
1698    // info
1699    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b1111'1111'1111, 12));
1700    EXPECT_EQ(zx_vmar_unmap(vmar, mapping_addr[0], 3 * mapping_size), ZX_OK);
1701    EXPECT_TRUE(check_pages_mapped(process, mapping_addr[0], 0b0000'0000'0000, 12));
1702    EXPECT_EQ(zx_handle_close(subregion), ZX_OK);
1703
1704    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1705    EXPECT_EQ(zx_handle_close(vmo2), ZX_OK);
1706    EXPECT_EQ(zx_handle_close(vmar), ZX_OK);
1707    EXPECT_EQ(zx_handle_close(process), ZX_OK);
1708
1709    END_TEST;
1710}
1711
1712// Verify that we can change protections on a demand paged mapping successfully.
1713bool protect_over_demand_paged_test() {
1714    BEGIN_TEST;
1715
1716    zx_handle_t vmo;
1717    const size_t size = 100 * PAGE_SIZE;
1718    ASSERT_EQ(zx_vmo_create(size, 0, &vmo), ZX_OK);
1719
1720    // TODO(teisenbe): Move this into a separate process; currently we don't
1721    // have an easy way to run a small test routine in another process.
1722    uintptr_t mapping_addr;
1723    ASSERT_EQ(zx_vmar_map(zx_vmar_root_self(),
1724                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1725                          0, vmo, 0, size, &mapping_addr),
1726              ZX_OK);
1727    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1728
1729    fbl::atomic_uint8_t* target =
1730        reinterpret_cast<fbl::atomic_uint8_t*>(mapping_addr);
1731    target[0].store(5);
1732    target[size / 2].store(6);
1733    target[size - 1].store(7);
1734
1735    ASSERT_EQ(zx_vmar_protect(zx_vmar_root_self(), ZX_VM_PERM_READ,
1736                              mapping_addr, size),
1737              ZX_OK);
1738
1739    // Attempt to write to the mapping again
1740    bool success;
1741    EXPECT_EQ(test_local_address(mapping_addr, true, &success), ZX_OK);
1742    EXPECT_FALSE(success, "mapping should no longer be writeable");
1743    EXPECT_EQ(test_local_address(mapping_addr + size / 4, true, &success), ZX_OK);
1744    EXPECT_FALSE(success, "mapping should no longer be writeable");
1745    EXPECT_EQ(test_local_address(mapping_addr + size / 2, true, &success), ZX_OK);
1746    EXPECT_FALSE(success, "mapping should no longer be writeable");
1747    EXPECT_EQ(test_local_address(mapping_addr + size - 1, true, &success), ZX_OK);
1748    EXPECT_FALSE(success, "mapping should no longer be writeable");
1749
1750    EXPECT_EQ(zx_vmar_unmap(zx_vmar_root_self(), mapping_addr, size), ZX_OK);
1751
1752    END_TEST;
1753}
1754
1755// Verify that we can change protections on unmapped pages successfully.
1756bool protect_large_uncommitted_test() {
1757    BEGIN_TEST;
1758
1759    zx_handle_t vmo;
1760    // Create a 1GB VMO
1761    const size_t size = 1ull << 30;
1762    ASSERT_EQ(zx_vmo_create(size, 0, &vmo), ZX_OK);
1763
1764    // TODO(teisenbe): Move this into a separate process; currently we don't
1765    // have an easy way to run a small test routine in another process.
1766    uintptr_t mapping_addr;
1767    ASSERT_EQ(zx_vmar_map(zx_vmar_root_self(),
1768                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1769                          0, vmo, 0, size, &mapping_addr),
1770              ZX_OK);
1771    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1772
1773    // Make sure some pages exist
1774    fbl::atomic_uint8_t* target =
1775        reinterpret_cast<fbl::atomic_uint8_t*>(mapping_addr);
1776    target[0].store(5);
1777    target[size / 2].store(6);
1778    target[size - 1].store(7);
1779
1780    // Ensure we're misaligned relative to a larger paging structure level.
1781    // TODO(teisenbe): Would be nice for this to be more arch aware.
1782    const uintptr_t base = ROUNDUP(mapping_addr, 512 * PAGE_SIZE) + PAGE_SIZE;
1783    const size_t protect_size = mapping_addr + size - base;
1784    ASSERT_EQ(zx_vmar_protect(zx_vmar_root_self(), ZX_VM_PERM_READ, base,
1785                              protect_size),
1786              ZX_OK);
1787
1788    // Attempt to write to the mapping again
1789    bool success;
1790    EXPECT_EQ(test_local_address(mapping_addr, true, &success), ZX_OK);
1791    EXPECT_TRUE(success, "mapping should still be writeable");
1792    EXPECT_EQ(test_local_address(mapping_addr + size / 4, true, &success), ZX_OK);
1793    EXPECT_FALSE(success, "mapping should no longer be writeable");
1794    EXPECT_EQ(test_local_address(mapping_addr + size / 2, true, &success), ZX_OK);
1795    EXPECT_FALSE(success, "mapping should no longer be writeable");
1796    EXPECT_EQ(test_local_address(mapping_addr + size - 1, true, &success), ZX_OK);
1797    EXPECT_FALSE(success, "mapping should no longer be writeable");
1798
1799    EXPECT_EQ(zx_vmar_unmap(zx_vmar_root_self(), mapping_addr, size), ZX_OK);
1800
1801    END_TEST;
1802}
1803
1804// Attempt to unmap a large mostly uncommitted VMO
1805bool unmap_large_uncommitted_test() {
1806    BEGIN_TEST;
1807
1808    zx_handle_t vmo;
1809    // Create a 1GB VMO
1810    const size_t size = 1ull << 30;
1811    ASSERT_EQ(zx_vmo_create(size, 0, &vmo), ZX_OK);
1812
1813    // TODO(teisenbe): Move this into a separate process; currently we don't
1814    // have an easy way to run a small test routine in another process.
1815    uintptr_t mapping_addr;
1816    ASSERT_EQ(zx_vmar_map(zx_vmar_root_self(),
1817                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1818                          0, vmo, 0, size, &mapping_addr),
1819              ZX_OK);
1820    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1821
1822    // Make sure some pages exist
1823    fbl::atomic_uint8_t* target =
1824        reinterpret_cast<fbl::atomic_uint8_t*>(mapping_addr);
1825    target[0].store(5);
1826    target[size / 2].store(6);
1827    target[size - 1].store(7);
1828
1829    // Ensure we're misaligned relative to a larger paging structure level.
1830    // TODO(teisenbe): Would be nice for this to be more arch aware.
1831    const uintptr_t base = ROUNDUP(mapping_addr, 512 * PAGE_SIZE) + PAGE_SIZE;
1832    const size_t unmap_size = mapping_addr + size - base;
1833    ASSERT_EQ(zx_vmar_unmap(zx_vmar_root_self(), base, unmap_size), ZX_OK);
1834
1835    // Attempt to write to the mapping again
1836    bool success;
1837    EXPECT_EQ(test_local_address(mapping_addr, true, &success), ZX_OK);
1838    EXPECT_TRUE(success, "mapping should still be writeable");
1839    EXPECT_EQ(test_local_address(mapping_addr + size / 4, true, &success), ZX_OK);
1840    EXPECT_FALSE(success, "mapping should no longer be writeable");
1841    EXPECT_EQ(test_local_address(mapping_addr + size / 2, true, &success), ZX_OK);
1842    EXPECT_FALSE(success, "mapping should no longer be writeable");
1843    EXPECT_EQ(test_local_address(mapping_addr + size - 1, true, &success), ZX_OK);
1844    EXPECT_FALSE(success, "mapping should no longer be writeable");
1845
1846    EXPECT_EQ(zx_vmar_unmap(zx_vmar_root_self(), mapping_addr, size), ZX_OK);
1847
1848    END_TEST;
1849}
1850
1851bool partial_unmap_and_read() {
1852    BEGIN_TEST;
1853
1854    // Map a two-page VMO.
1855    zx_handle_t vmo;
1856    ASSERT_EQ(zx_vmo_create(PAGE_SIZE * 2, 0, &vmo), ZX_OK);
1857    uintptr_t mapping_addr;
1858    ASSERT_EQ(zx_vmar_map(zx_vmar_root_self(),
1859                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1860                          0, vmo, 0, PAGE_SIZE * 2, &mapping_addr),
1861              ZX_OK);
1862    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1863
1864    char* ptr = (char*)mapping_addr;
1865    memset(ptr, 0, PAGE_SIZE * 2);
1866
1867    // Unmap the second page.
1868    zx_vmar_unmap(zx_vmar_root_self(), mapping_addr + PAGE_SIZE, PAGE_SIZE);
1869
1870    char buffer[PAGE_SIZE * 2];
1871    size_t actual_read;
1872
1873    // First page succeeds.
1874    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr, buffer, PAGE_SIZE, &actual_read),
1875              ZX_OK);
1876    EXPECT_EQ(actual_read, PAGE_SIZE);
1877
1878    // Second page fails.
1879    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr + PAGE_SIZE, buffer, PAGE_SIZE, &actual_read),
1880              ZX_ERR_NO_MEMORY);
1881
1882    // Reading the whole region succeeds, but only reads the first page.
1883    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr, buffer, PAGE_SIZE * 2, &actual_read),
1884              ZX_OK);
1885    EXPECT_EQ(actual_read, PAGE_SIZE);
1886
1887    // Read at the boundary straddling the pages.
1888    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr + PAGE_SIZE - 1, buffer, 2, &actual_read), ZX_OK);
1889    EXPECT_EQ(actual_read, 1);
1890
1891    // Unmap the left over first page.
1892    EXPECT_EQ(zx_vmar_unmap(zx_vmar_root_self(), mapping_addr, PAGE_SIZE), ZX_OK);
1893
1894    END_TEST;
1895}
1896
1897bool partial_unmap_and_write() {
1898    BEGIN_TEST;
1899
1900    // Map a two-page VMO.
1901    zx_handle_t vmo;
1902    ASSERT_EQ(zx_vmo_create(PAGE_SIZE * 2, 0, &vmo), ZX_OK);
1903    uintptr_t mapping_addr;
1904    ASSERT_EQ(zx_vmar_map(zx_vmar_root_self(),
1905                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1906                          0, vmo, 0, PAGE_SIZE * 2, &mapping_addr),
1907              ZX_OK);
1908    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1909
1910    char* ptr = (char*)mapping_addr;
1911    memset(ptr, 0, PAGE_SIZE * 2);
1912
1913    // Unmap the second page.
1914    zx_vmar_unmap(zx_vmar_root_self(), mapping_addr + PAGE_SIZE, PAGE_SIZE);
1915
1916    char buffer[PAGE_SIZE * 2];
1917    size_t actual_written;
1918    memset(buffer, 0, PAGE_SIZE * 2);
1919
1920    // First page succeeds.
1921    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr, buffer, PAGE_SIZE, &actual_written),
1922              ZX_OK);
1923    EXPECT_EQ(actual_written, PAGE_SIZE);
1924
1925    // Second page fails.
1926    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr + PAGE_SIZE, buffer, PAGE_SIZE, &actual_written),
1927              ZX_ERR_NO_MEMORY);
1928
1929    // Writing to the whole region succeeds, but only writes the first page.
1930    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr, buffer, PAGE_SIZE * 2, &actual_written),
1931              ZX_OK);
1932    EXPECT_EQ(actual_written, PAGE_SIZE);
1933
1934    // Write at the boundary straddling the pages.
1935    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr + PAGE_SIZE - 1, buffer, 2, &actual_written), ZX_OK);
1936    EXPECT_EQ(actual_written, 1);
1937
1938    // Unmap the left over first page.
1939    EXPECT_EQ(zx_vmar_unmap(zx_vmar_root_self(), mapping_addr, PAGE_SIZE), ZX_OK);
1940
1941    END_TEST;
1942}
1943
1944bool partial_unmap_with_vmar_offset() {
1945    BEGIN_TEST;
1946
1947    constexpr size_t kOffset = 0x1000;
1948    constexpr size_t kVmoSize = PAGE_SIZE * 10;
1949    // Map a VMO, using an offset into the VMO.
1950    zx_handle_t vmo;
1951    ASSERT_EQ(zx_vmo_create(kVmoSize, 0, &vmo), ZX_OK);
1952    uintptr_t mapping_addr;
1953    ASSERT_EQ(zx_vmar_map(zx_vmar_root_self(),
1954                          ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
1955                          0, vmo, kOffset, kVmoSize - kOffset, &mapping_addr),
1956              ZX_OK);
1957    EXPECT_EQ(zx_handle_close(vmo), ZX_OK);
1958
1959    char* ptr = (char*)mapping_addr;
1960    memset(ptr, 0, kVmoSize - kOffset);
1961
1962    // Make sure both reads and writes to both the beginning and the end are allowed.
1963    char buffer[kVmoSize - kOffset];
1964    size_t actual;
1965    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr, buffer, kVmoSize - kOffset, &actual), ZX_OK);
1966    EXPECT_EQ(actual, kVmoSize - kOffset);
1967
1968    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr, buffer, kVmoSize - kOffset, &actual), ZX_OK);
1969    EXPECT_EQ(actual, kVmoSize - kOffset);
1970
1971    // That reads and writes right at the end are OK.
1972    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr + kVmoSize - kOffset - 1, buffer, 1, &actual),
1973              ZX_OK);
1974    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr + kVmoSize - kOffset - 1, buffer, 1, &actual),
1975              ZX_OK);
1976
1977    // That reads and writes one past the end fail.
1978    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr + kVmoSize - kOffset, buffer, 1, &actual),
1979              ZX_ERR_NO_MEMORY);
1980    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr + kVmoSize - kOffset, buffer, 1, &actual),
1981              ZX_ERR_NO_MEMORY);
1982
1983    // And crossing the boundary works as expected.
1984    EXPECT_EQ(zx_process_write_memory(zx_process_self(), mapping_addr + kVmoSize - kOffset - 1, buffer, 2, &actual),
1985              ZX_OK);
1986    EXPECT_EQ(actual, 1);
1987    EXPECT_EQ(zx_process_read_memory(zx_process_self(), mapping_addr + kVmoSize - kOffset - 1, buffer, 2, &actual),
1988              ZX_OK);
1989    EXPECT_EQ(actual, 1);
1990
1991    END_TEST;
1992}
1993
1994} // namespace
1995
1996BEGIN_TEST_CASE(vmar_tests)
1997RUN_TEST(destroy_root_test);
1998RUN_TEST(basic_allocate_test);
1999RUN_TEST(allocate_oob_test);
2000RUN_TEST(allocate_unsatisfiable_test);
2001RUN_TEST(destroyed_vmar_test);
2002RUN_TEST(map_over_destroyed_test);
2003RUN_TEST(map_in_compact_test);
2004RUN_TEST(overmapping_test);
2005RUN_TEST(invalid_args_test);
2006RUN_TEST(unaligned_len_test);
2007RUN_TEST(unaligned_len_map_test);
2008RUN_TEST(rights_drop_test);
2009RUN_TEST(protect_test);
2010RUN_TEST(nested_region_perms_test);
2011RUN_TEST(object_info_test);
2012RUN_TEST(unmap_split_test);
2013RUN_TEST(unmap_multiple_test);
2014RUN_TEST(unmap_base_not_mapped_test);
2015RUN_TEST(map_specific_overwrite_test);
2016RUN_TEST(protect_split_test);
2017RUN_TEST(protect_multiple_test);
2018RUN_TEST(protect_over_demand_paged_test);
2019RUN_TEST(protect_large_uncommitted_test);
2020RUN_TEST(unmap_large_uncommitted_test);
2021RUN_TEST(partial_unmap_and_read);
2022RUN_TEST(partial_unmap_and_write);
2023RUN_TEST(partial_unmap_with_vmar_offset);
2024END_TEST_CASE(vmar_tests)
2025
2026#ifndef BUILD_COMBINED_TESTS
2027int main(int argc, char** argv) {
2028    bool success = unittest_run_all_tests(argc, argv);
2029    return success ? 0 : -1;
2030}
2031#endif
2032