1// Copyright 2016 The Fuchsia Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include <ctype.h> 6#include <inttypes.h> 7#include <limits.h> 8#include <stdio.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/types.h> 12#include <unistd.h> 13#include <threads.h> 14 15#include <zircon/process.h> 16#include <zircon/syscalls.h> 17#include <zircon/syscalls/object.h> 18#include <fbl/algorithm.h> 19#include <fbl/atomic.h> 20#include <fbl/function.h> 21#include <lib/fzl/memory-probe.h> 22#include <pretty/hexdump.h> 23#include <unittest/unittest.h> 24 25#include "bench.h" 26 27bool vmo_create_test() { 28 BEGIN_TEST; 29 30 zx_status_t status; 31 zx_handle_t vmo[16]; 32 33 // allocate a bunch of vmos then free them 34 for (size_t i = 0; i < fbl::count_of(vmo); i++) { 35 status = zx_vmo_create(i * PAGE_SIZE, 0, &vmo[i]); 36 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 37 } 38 39 for (size_t i = 0; i < fbl::count_of(vmo); i++) { 40 status = zx_handle_close(vmo[i]); 41 EXPECT_EQ(ZX_OK, status, "handle_close"); 42 } 43 44 END_TEST; 45} 46 47bool vmo_read_write_test() { 48 BEGIN_TEST; 49 50 zx_status_t status; 51 zx_handle_t vmo; 52 53 // allocate an object and read/write from it 54 const size_t len = PAGE_SIZE * 4; 55 status = zx_vmo_create(len, 0, &vmo); 56 EXPECT_EQ(status, ZX_OK, "vm_object_create"); 57 58 char buf[len]; 59 status = zx_vmo_read(vmo, buf, 0, sizeof(buf)); 60 EXPECT_EQ(status, ZX_OK, "vm_object_read"); 61 62 // make sure it's full of zeros 63 size_t count = 0; 64 for (auto c: buf) { 65 EXPECT_EQ(c, 0, "zero test"); 66 if (c != 0) { 67 printf("char at offset %#zx is bad\n", count); 68 } 69 count++; 70 } 71 72 memset(buf, 0x99, sizeof(buf)); 73 status = zx_vmo_write(vmo, buf, 0, sizeof(buf)); 74 EXPECT_EQ(status, ZX_OK, "vm_object_write"); 75 76 // map it 77 uintptr_t ptr; 78 status = zx_vmar_map(zx_vmar_root_self(), 79 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, vmo, 0, 80 len, &ptr); 81 EXPECT_EQ(ZX_OK, status, "vm_map"); 82 EXPECT_NE(0u, ptr, "vm_map"); 83 84 // check that it matches what we last wrote into it 85 EXPECT_BYTES_EQ((uint8_t*)buf, (uint8_t*)ptr, sizeof(buf), "mapped buffer"); 86 87 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 88 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 89 90 // close the handle 91 status = zx_handle_close(vmo); 92 EXPECT_EQ(ZX_OK, status, "handle_close"); 93 94 END_TEST; 95} 96 97bool vmo_read_write_range_test() { 98 BEGIN_TEST; 99 100 zx_status_t status; 101 zx_handle_t vmo; 102 103 // allocate an object 104 const size_t len = PAGE_SIZE * 4; 105 status = zx_vmo_create(len, 0, &vmo); 106 EXPECT_EQ(status, ZX_OK, "vm_object_create"); 107 108 // fail to read past end 109 char buf[len * 2]; 110 status = zx_vmo_read(vmo, buf, 0, sizeof(buf)); 111 EXPECT_EQ(status, ZX_ERR_OUT_OF_RANGE, "vm_object_read past end"); 112 113 // Successfully read 0 bytes at end 114 status = zx_vmo_read(vmo, buf, len, 0); 115 EXPECT_EQ(status, ZX_OK, "vm_object_read zero at end"); 116 117 // Fail to read 0 bytes past end 118 status = zx_vmo_read(vmo, buf, len + 1, 0); 119 EXPECT_EQ(status, ZX_ERR_OUT_OF_RANGE, "vm_object_read zero past end"); 120 121 // fail to write past end 122 status = zx_vmo_write(vmo, buf, 0, sizeof(buf)); 123 EXPECT_EQ(status, ZX_ERR_OUT_OF_RANGE, "vm_object_write past end"); 124 125 // Successfully write 0 bytes at end 126 status = zx_vmo_write(vmo, buf, len, 0); 127 EXPECT_EQ(status, ZX_OK, "vm_object_write zero at end"); 128 129 // Fail to read 0 bytes past end 130 status = zx_vmo_write(vmo, buf, len + 1, 0); 131 EXPECT_EQ(status, ZX_ERR_OUT_OF_RANGE, "vm_object_write zero past end"); 132 133 // Test for unsigned wraparound 134 status = zx_vmo_read(vmo, buf, UINT64_MAX - (len / 2), len); 135 EXPECT_EQ(status, ZX_ERR_OUT_OF_RANGE, "vm_object_read offset + len wraparound"); 136 status = zx_vmo_write(vmo, buf, UINT64_MAX - (len / 2), len); 137 EXPECT_EQ(status, ZX_ERR_OUT_OF_RANGE, "vm_object_write offset + len wraparound"); 138 139 // close the handle 140 status = zx_handle_close(vmo); 141 EXPECT_EQ(ZX_OK, status, "handle_close"); 142 143 END_TEST; 144} 145 146bool vmo_map_test() { 147 BEGIN_TEST; 148 149 zx_status_t status; 150 zx_handle_t vmo; 151 uintptr_t ptr[3] = {}; 152 153 // allocate a vmo 154 status = zx_vmo_create(4 * PAGE_SIZE, 0, &vmo); 155 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 156 157 // do a regular map 158 ptr[0] = 0; 159 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, 160 PAGE_SIZE, &ptr[0]); 161 EXPECT_EQ(ZX_OK, status, "map"); 162 EXPECT_NE(0u, ptr[0], "map address"); 163 //printf("mapped %#" PRIxPTR "\n", ptr[0]); 164 165 // try to map something completely out of range without any fixed mapping, should succeed 166 ptr[2] = UINTPTR_MAX; 167 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, 168 PAGE_SIZE, &ptr[2]); 169 EXPECT_EQ(ZX_OK, status, "map"); 170 EXPECT_NE(0u, ptr[2], "map address"); 171 172 // try to map something completely out of range fixed, should fail 173 uintptr_t map_addr; 174 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_SPECIFIC, 175 UINTPTR_MAX, vmo, 0, PAGE_SIZE, &map_addr); 176 EXPECT_EQ(ZX_ERR_INVALID_ARGS, status, "map"); 177 178 // cleanup 179 status = zx_handle_close(vmo); 180 EXPECT_EQ(ZX_OK, status, "handle_close"); 181 182 for (auto p: ptr) { 183 if (p) { 184 status = zx_vmar_unmap(zx_vmar_root_self(), p, PAGE_SIZE); 185 EXPECT_EQ(ZX_OK, status, "unmap"); 186 } 187 } 188 189 END_TEST; 190} 191 192bool vmo_read_only_map_test() { 193 BEGIN_TEST; 194 195 zx_status_t status; 196 zx_handle_t vmo; 197 198 // allocate an object and read/write from it 199 const size_t len = PAGE_SIZE; 200 status = zx_vmo_create(len, 0, &vmo); 201 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 202 203 // map it 204 uintptr_t ptr; 205 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, 206 len, &ptr); 207 EXPECT_EQ(ZX_OK, status, "vm_map"); 208 EXPECT_NE(0u, ptr, "vm_map"); 209 210 EXPECT_EQ(false, probe_for_write((void*)ptr), "write"); 211 212 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 213 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 214 215 // close the handle 216 status = zx_handle_close(vmo); 217 EXPECT_EQ(ZX_OK, status, "handle_close"); 218 219 END_TEST; 220} 221 222bool vmo_no_perm_map_test() { 223 BEGIN_TEST; 224 225 zx_status_t status; 226 zx_handle_t vmo; 227 228 // allocate an object and read/write from it 229 const size_t len = PAGE_SIZE; 230 status = zx_vmo_create(len, 0, &vmo); 231 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 232 233 // map it with read permissions 234 uintptr_t ptr; 235 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, len, &ptr); 236 EXPECT_EQ(ZX_OK, status, "vm_map"); 237 EXPECT_NE(0u, ptr, "vm_map"); 238 239 // protect it to no permissions 240 status = zx_vmar_protect(zx_vmar_root_self(), 0, ptr, len); 241 EXPECT_EQ(ZX_OK, status, "vm_protect"); 242 243 // test reading writing to the mapping 244 EXPECT_EQ(false, probe_for_read(reinterpret_cast<void*>(ptr)), "read"); 245 EXPECT_EQ(false, probe_for_write(reinterpret_cast<void*>(ptr)), "write"); 246 247 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 248 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 249 250 // close the handle 251 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 252 END_TEST; 253} 254 255bool vmo_no_perm_protect_test() { 256 BEGIN_TEST; 257 258 zx_status_t status; 259 zx_handle_t vmo; 260 261 // allocate an object and read/write from it 262 const size_t len = PAGE_SIZE; 263 status = zx_vmo_create(len, 0, &vmo); 264 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 265 266 // map it with no permissions 267 uintptr_t ptr; 268 status = zx_vmar_map(zx_vmar_root_self(), 0, 0, vmo, 0, len, &ptr); 269 EXPECT_EQ(ZX_OK, status, "vm_map"); 270 EXPECT_NE(0u, ptr, "vm_map"); 271 272 // test writing to the mapping 273 EXPECT_EQ(false, probe_for_write(reinterpret_cast<void*>(ptr)), "write"); 274 // test reading to the mapping 275 EXPECT_EQ(false, probe_for_read(reinterpret_cast<void*>(ptr)), "read"); 276 277 // protect it to read permissions and make sure it works as expected 278 status = zx_vmar_protect(zx_vmar_root_self(), ZX_VM_PERM_READ, ptr, len); 279 EXPECT_EQ(ZX_OK, status, "vm_protect"); 280 281 // test writing to the mapping 282 EXPECT_EQ(false, probe_for_write(reinterpret_cast<void*>(ptr)), "write"); 283 284 // test reading from the mapping 285 EXPECT_EQ(true, probe_for_read(reinterpret_cast<void*>(ptr)), "read"); 286 287 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 288 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 289 290 // close the handle 291 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 292 END_TEST; 293} 294 295bool vmo_resize_test() { 296 BEGIN_TEST; 297 298 zx_status_t status; 299 zx_handle_t vmo; 300 301 // allocate an object 302 size_t len = PAGE_SIZE * 4; 303 status = zx_vmo_create(len, 0, &vmo); 304 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 305 306 // get the size that we set it to 307 uint64_t size = 0x99999999; 308 status = zx_vmo_get_size(vmo, &size); 309 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 310 EXPECT_EQ(len, size, "vm_object_get_size"); 311 312 // try to resize it 313 len += PAGE_SIZE; 314 status = zx_vmo_set_size(vmo, len); 315 EXPECT_EQ(ZX_OK, status, "vm_object_set_size"); 316 317 // get the size again 318 size = 0x99999999; 319 status = zx_vmo_get_size(vmo, &size); 320 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 321 EXPECT_EQ(len, size, "vm_object_get_size"); 322 323 // try to resize it to a ludicrous size 324 status = zx_vmo_set_size(vmo, UINT64_MAX); 325 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, status, "vm_object_set_size too big"); 326 327 // resize it to a non aligned size 328 status = zx_vmo_set_size(vmo, len + 1); 329 EXPECT_EQ(ZX_OK, status, "vm_object_set_size"); 330 331 // size should be rounded up to the next page boundary 332 size = 0x99999999; 333 status = zx_vmo_get_size(vmo, &size); 334 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 335 EXPECT_EQ(fbl::round_up(len + 1u, static_cast<size_t>(PAGE_SIZE)), size, "vm_object_get_size"); 336 len = fbl::round_up(len + 1u, static_cast<size_t>(PAGE_SIZE)); 337 338 // map it 339 uintptr_t ptr; 340 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, 341 len, &ptr); 342 EXPECT_EQ(ZX_OK, status, "vm_map"); 343 EXPECT_NE(ptr, 0, "vm_map"); 344 345 // attempt to map expecting an non resizable vmo. 346 uintptr_t ptr2; 347 status = zx_vmar_map( 348 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_REQUIRE_NON_RESIZABLE, 0, 349 vmo, 0, len, &ptr2); 350 EXPECT_EQ(ZX_ERR_NOT_SUPPORTED, status, "vm_map"); 351 352 // resize it with it mapped 353 status = zx_vmo_set_size(vmo, size); 354 EXPECT_EQ(ZX_OK, status, "vm_object_set_size"); 355 356 // unmap it 357 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 358 EXPECT_EQ(ZX_OK, status, "unmap"); 359 360 // close the handle 361 status = zx_handle_close(vmo); 362 EXPECT_EQ(ZX_OK, status, "handle_close"); 363 364 END_TEST; 365} 366 367// Check that non-resizable VMOs cannot get resized. 368static bool vmo_no_resize_helper(zx_handle_t vmo, const size_t len) { 369 BEGIN_TEST; 370 371 EXPECT_NE(vmo, ZX_HANDLE_INVALID); 372 373 zx_status_t status; 374 status = zx_vmo_set_size(vmo, len + PAGE_SIZE); 375 EXPECT_EQ(ZX_ERR_UNAVAILABLE, status, "vm_object_set_size"); 376 377 status = zx_vmo_set_size(vmo, len - PAGE_SIZE); 378 EXPECT_EQ(ZX_ERR_UNAVAILABLE, status, "vm_object_set_size"); 379 380 size_t size; 381 status = zx_vmo_get_size(vmo, &size); 382 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 383 EXPECT_EQ(len, size, "vm_object_get_size"); 384 385 uintptr_t ptr; 386 status = zx_vmar_map( 387 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_REQUIRE_NON_RESIZABLE, 388 0, vmo, 0, len, 389 &ptr); 390 ASSERT_EQ(ZX_OK, status, "vm_map"); 391 ASSERT_NE(ptr, 0, "vm_map"); 392 393 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 394 EXPECT_EQ(ZX_OK, status, "unmap"); 395 396 status = zx_handle_close(vmo); 397 EXPECT_EQ(ZX_OK, status, "handle_close"); 398 399 END_TEST; 400} 401 402bool vmo_no_resize_test() { 403 const size_t len = PAGE_SIZE * 4; 404 zx_handle_t vmo = ZX_HANDLE_INVALID; 405 406 zx_vmo_create(len, ZX_VMO_NON_RESIZABLE, &vmo); 407 return vmo_no_resize_helper(vmo, len); 408} 409 410bool vmo_info_test() { 411 size_t len = PAGE_SIZE * 4; 412 zx_handle_t vmo = ZX_HANDLE_INVALID; 413 zx_info_vmo_t info; 414 zx_status_t status; 415 416 // Create a non-resizeable VMO, query the INFO on it 417 // and dump it. 418 status = zx_vmo_create(len, ZX_VMO_NON_RESIZABLE, &vmo); 419 EXPECT_EQ(ZX_OK, status, "vm_info_test: vmo_create"); 420 421 status = zx_object_get_info(vmo, ZX_INFO_VMO, &info, 422 sizeof(info), nullptr, nullptr); 423 EXPECT_EQ(ZX_OK, status, "vm_info_test: info_vmo"); 424 425 status = zx_handle_close(vmo); 426 EXPECT_EQ(ZX_OK, status, "vm_info_test: handle_close"); 427 428 EXPECT_EQ(info.size_bytes, len, "vm_info_test: info_vmo.size_bytes"); 429 EXPECT_NE(info.create_options, (1u << 0), 430 "vm_info_test: info_vmo.create_options"); 431// printf("NON_Resizeable VMO, size = %lu, create_options = %ux\n", 432// info.size_bytes, info.create_options); 433 434 // Create a resizeable VMO, query the INFO on it and dump it. 435 len = PAGE_SIZE * 8; 436 zx_vmo_create(len, 0, &vmo); 437 EXPECT_EQ(ZX_OK, status, "vm_info_test: vmo_create"); 438 439 status = zx_object_get_info(vmo, ZX_INFO_VMO, &info, 440 sizeof(info), nullptr, nullptr); 441 EXPECT_EQ(ZX_OK, status, "vm_info_test: info_vmo"); 442 443 status = zx_handle_close(vmo); 444 EXPECT_EQ(ZX_OK, status, "vm_info_test: handle_close"); 445 446 EXPECT_EQ(info.size_bytes, len, "vm_info_test: info_vmo.size_bytes"); 447 EXPECT_EQ(info.create_options, (1u << 0), 448 "vm_info_test: info_vmo.create_options"); 449// printf("Resizeable VMO, size = %lu, create_options = %ux\n", 450// info.size_bytes, info.create_options); 451 452 END_TEST; 453} 454 455bool vmo_no_resize_clone_test() { 456 const size_t len = PAGE_SIZE * 4; 457 zx_handle_t vmo = ZX_HANDLE_INVALID; 458 zx_handle_t clone = ZX_HANDLE_INVALID; 459 460 zx_vmo_create(len, 0, &vmo); 461 zx_vmo_clone(vmo, 462 ZX_VMO_CLONE_COPY_ON_WRITE | ZX_VMO_CLONE_NON_RESIZEABLE, 463 0, len, &clone); 464 return vmo_no_resize_helper(clone, len); 465} 466 467bool vmo_size_align_test() { 468 BEGIN_TEST; 469 470 for (uint64_t s = 0; s < PAGE_SIZE * 4; s++) { 471 zx_handle_t vmo; 472 473 // create a new object with nonstandard size 474 zx_status_t status = zx_vmo_create(s, 0, &vmo); 475 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 476 477 // should be the size rounded up to the nearest page boundary 478 uint64_t size = 0x99999999; 479 status = zx_vmo_get_size(vmo, &size); 480 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 481 EXPECT_EQ(fbl::round_up(s, static_cast<size_t>(PAGE_SIZE)), size, "vm_object_get_size"); 482 483 // close the handle 484 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 485 } 486 487 END_TEST; 488} 489 490bool vmo_resize_align_test() { 491 BEGIN_TEST; 492 493 // resize a vmo with a particular size and test that the resulting size is aligned on a page 494 // boundary. 495 zx_handle_t vmo; 496 zx_status_t status = zx_vmo_create(0, 0, &vmo); 497 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 498 499 for (uint64_t s = 0; s < PAGE_SIZE * 4; s++) { 500 // set the size of the object 501 zx_status_t status = zx_vmo_set_size(vmo, s); 502 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 503 504 // should be the size rounded up to the nearest page boundary 505 uint64_t size = 0x99999999; 506 status = zx_vmo_get_size(vmo, &size); 507 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 508 EXPECT_EQ(fbl::round_up(s, static_cast<size_t>(PAGE_SIZE)), size, "vm_object_get_size"); 509 } 510 511 // close the handle 512 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 513 514 END_TEST; 515} 516 517bool vmo_clone_size_align_test() { 518 BEGIN_TEST; 519 520 zx_handle_t vmo; 521 zx_status_t status = zx_vmo_create(0, 0, &vmo); 522 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 523 524 // create clones with different sizes, make sure the created size is a multiple of a page size 525 for (uint64_t s = 0; s < PAGE_SIZE * 4; s++) { 526 zx_handle_t clone_vmo; 527 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, s, &clone_vmo), "vm_clone"); 528 529 // should be the size rounded up to the nearest page boundary 530 uint64_t size = 0x99999999; 531 zx_status_t status = zx_vmo_get_size(clone_vmo, &size); 532 EXPECT_EQ(ZX_OK, status, "vm_object_get_size"); 533 EXPECT_EQ(fbl::round_up(s, static_cast<size_t>(PAGE_SIZE)), size, "vm_object_get_size"); 534 535 // close the handle 536 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo), "handle_close"); 537 } 538 539 // close the handle 540 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 541 542 END_TEST; 543} 544 545static bool rights_test_map_helper( 546 zx_handle_t vmo, size_t len, uint32_t flags, 547 bool expect_success, zx_status_t fail_err_code, const char *msg) { 548 uintptr_t ptr; 549 550 zx_status_t r = zx_vmar_map(zx_vmar_root_self(), flags, 0, vmo, 0, len, 551 &ptr); 552 if (expect_success) { 553 EXPECT_EQ(ZX_OK, r, msg); 554 555 r = zx_vmar_unmap(zx_vmar_root_self(), ptr, len); 556 EXPECT_EQ(ZX_OK, r, "unmap"); 557 } else { 558 EXPECT_EQ(fail_err_code, r, msg); 559 } 560 561 return true; 562} 563 564// Returns zero on failure. 565static zx_rights_t get_handle_rights(zx_handle_t h) { 566 zx_info_handle_basic_t info; 567 zx_status_t s = zx_object_get_info(h, ZX_INFO_HANDLE_BASIC, &info, 568 sizeof(info), nullptr, nullptr); 569 if (s != ZX_OK) { 570 EXPECT_EQ(s, ZX_OK); // Poison the test 571 return 0; 572 } 573 return info.rights; 574} 575 576bool vmo_rights_test() { 577 BEGIN_TEST; 578 579 char buf[4096]; 580 size_t len = PAGE_SIZE * 4; 581 zx_status_t status; 582 zx_handle_t vmo, vmo2; 583 584 // allocate an object 585 status = zx_vmo_create(len, 0, &vmo); 586 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 587 588 // Check that the handle has at least the expected rights. 589 // This list should match the list in docs/syscalls/vmo_create.md. 590 static const zx_rights_t kExpectedRights = 591 ZX_RIGHT_DUPLICATE | 592 ZX_RIGHT_TRANSFER | 593 ZX_RIGHT_WAIT | 594 ZX_RIGHT_READ | 595 ZX_RIGHT_WRITE | 596 ZX_RIGHT_EXECUTE | 597 ZX_RIGHT_MAP | 598 ZX_RIGHT_GET_PROPERTY | 599 ZX_RIGHT_SET_PROPERTY; 600 EXPECT_EQ(kExpectedRights, kExpectedRights & get_handle_rights(vmo)); 601 602 // test that we can read/write it 603 status = zx_vmo_read(vmo, buf, 0, 0); 604 EXPECT_EQ(0, status, "vmo_read"); 605 status = zx_vmo_write(vmo, buf, 0, 0); 606 EXPECT_EQ(0, status, "vmo_write"); 607 608 vmo2 = ZX_HANDLE_INVALID; 609 zx_handle_duplicate(vmo, ZX_RIGHT_READ, &vmo2); 610 status = zx_vmo_read(vmo2, buf, 0, 0); 611 EXPECT_EQ(0, status, "vmo_read"); 612 status = zx_vmo_write(vmo2, buf, 0, 0); 613 EXPECT_EQ(ZX_ERR_ACCESS_DENIED, status, "vmo_write"); 614 zx_handle_close(vmo2); 615 616 vmo2 = ZX_HANDLE_INVALID; 617 zx_handle_duplicate(vmo, ZX_RIGHT_WRITE, &vmo2); 618 status = zx_vmo_read(vmo2, buf, 0, 0); 619 EXPECT_EQ(ZX_ERR_ACCESS_DENIED, status, "vmo_read"); 620 status = zx_vmo_write(vmo2, buf, 0, 0); 621 EXPECT_EQ(0, status, "vmo_write"); 622 zx_handle_close(vmo2); 623 624 vmo2 = ZX_HANDLE_INVALID; 625 zx_handle_duplicate(vmo, 0, &vmo2); 626 status = zx_vmo_read(vmo2, buf, 0, 0); 627 EXPECT_EQ(ZX_ERR_ACCESS_DENIED, status, "vmo_read"); 628 status = zx_vmo_write(vmo2, buf, 0, 0); 629 EXPECT_EQ(ZX_ERR_ACCESS_DENIED, status, "vmo_write"); 630 zx_handle_close(vmo2); 631 632 // full perm test 633 if (!rights_test_map_helper(vmo, len, 0, true, 0, "map_noperms")) return false; 634 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_READ, true, 0, "map_read")) return false; 635 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_WRITE, false, ZX_ERR_INVALID_ARGS, "map_write")) return false; 636 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, true, 0, "map_readwrite")) return false; 637 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, true, 0, "map_readwriteexec")) return false; 638 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, true, 0, "map_readexec")) return false; 639 640 // try most of the permuations of mapping a vmo with various rights dropped 641 vmo2 = ZX_HANDLE_INVALID; 642 zx_handle_duplicate(vmo, ZX_RIGHT_READ | ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE, &vmo2); 643 if (!rights_test_map_helper(vmo2, len, 0, false, ZX_ERR_ACCESS_DENIED, "map_noperms")) return false; 644 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ, false, ZX_ERR_ACCESS_DENIED, "map_read")) return false; 645 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_WRITE, false, ZX_ERR_ACCESS_DENIED, "map_write")) return false; 646 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, false, ZX_ERR_ACCESS_DENIED, "map_readwrite")) return false; 647 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readwriteexec")) return false; 648 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readexec")) return false; 649 zx_handle_close(vmo2); 650 651 vmo2 = ZX_HANDLE_INVALID; 652 zx_handle_duplicate(vmo, ZX_RIGHT_READ | ZX_RIGHT_MAP, &vmo2); 653 if (!rights_test_map_helper(vmo2, len, 0, true, 0, "map_noperms")) return false; 654 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ, true, 0, "map_read")) return false; 655 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_WRITE, false, ZX_ERR_INVALID_ARGS, "map_write")) return false; 656 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, false, ZX_ERR_ACCESS_DENIED, "map_readwrite")) return false; 657 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readwriteexec")) return false; 658 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readexec")) return false; 659 zx_handle_close(vmo2); 660 661 vmo2 = ZX_HANDLE_INVALID; 662 zx_handle_duplicate(vmo, ZX_RIGHT_WRITE | ZX_RIGHT_MAP, &vmo2); 663 if (!rights_test_map_helper(vmo2, len, 0, true, 0, "map_noperms")) return false; 664 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ, false, ZX_ERR_ACCESS_DENIED, "map_read")) return false; 665 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_WRITE, false, ZX_ERR_INVALID_ARGS, "map_write")) return false; 666 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, false, ZX_ERR_ACCESS_DENIED, "map_readwrite")) return false; 667 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readwriteexec")) return false; 668 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readexec")) return false; 669 zx_handle_close(vmo2); 670 671 vmo2 = ZX_HANDLE_INVALID; 672 zx_handle_duplicate(vmo, ZX_RIGHT_READ | ZX_RIGHT_WRITE | ZX_RIGHT_MAP, &vmo2); 673 if (!rights_test_map_helper(vmo2, len, 0, true, 0, "map_noperms")) return false; 674 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ, true, 0, "map_read")) return false; 675 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_WRITE, false, ZX_ERR_INVALID_ARGS, "map_write")) return false; 676 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, true, 0, "map_readwrite")) return false; 677 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readwriteexec")) return false; 678 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readexec")) return false; 679 zx_handle_close(vmo2); 680 681 vmo2 = ZX_HANDLE_INVALID; 682 zx_handle_duplicate(vmo, ZX_RIGHT_READ | ZX_RIGHT_EXECUTE | ZX_RIGHT_MAP, &vmo2); 683 if (!rights_test_map_helper(vmo2, len, 0, true, 0, "map_noperms")) return false; 684 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ, true, 0, "map_read")) return false; 685 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_WRITE, false, ZX_ERR_INVALID_ARGS, "map_write")) return false; 686 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, false, ZX_ERR_ACCESS_DENIED, "map_readwrite")) return false; 687 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, false, ZX_ERR_ACCESS_DENIED, "map_readwriteexec")) return false; 688 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, true, 0, "map_readexec")) return false; 689 zx_handle_close(vmo2); 690 691 vmo2 = ZX_HANDLE_INVALID; 692 zx_handle_duplicate(vmo, ZX_RIGHT_READ | ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_MAP, &vmo2); 693 if (!rights_test_map_helper(vmo2, len, 0, true, 0, "map_noperms")) return false; 694 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ, true, 0, "map_read")) return false; 695 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_WRITE, false, ZX_ERR_INVALID_ARGS, "map_write")) return false; 696 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, true, 0, "map_readwrite")) return false; 697 if (!rights_test_map_helper(vmo2, len, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE, true, 0, "map_readwriteexec")) return false; 698 if (!rights_test_map_helper(vmo, len, ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, true, 0, "map_readexec")) return false; 699 zx_handle_close(vmo2); 700 701 // test that we can get/set a property on it 702 const char *set_name = "test vmo"; 703 status = zx_object_set_property(vmo, ZX_PROP_NAME, set_name, sizeof(set_name)); 704 EXPECT_EQ(ZX_OK, status, "set_property"); 705 char get_name[ZX_MAX_NAME_LEN]; 706 status = zx_object_get_property(vmo, ZX_PROP_NAME, get_name, sizeof(get_name)); 707 EXPECT_EQ(ZX_OK, status, "get_property"); 708 EXPECT_STR_EQ(set_name, get_name, "vmo name"); 709 710 // close the handle 711 status = zx_handle_close(vmo); 712 EXPECT_EQ(ZX_OK, status, "handle_close"); 713 714 END_TEST; 715} 716 717bool vmo_commit_test() { 718 BEGIN_TEST; 719 720 zx_handle_t vmo; 721 zx_status_t status; 722 uintptr_t ptr, ptr2, ptr3; 723 724 // create a vmo 725 const size_t size = 16384; 726 727 status = zx_vmo_create(size, 0, &vmo); 728 EXPECT_EQ(0, status, "vm_object_create"); 729 730 // commit a range of it 731 status = zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, 0, size, nullptr, 0); 732 EXPECT_EQ(0, status, "vm commit"); 733 734 // decommit that range 735 status = zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, 0, size, nullptr, 0); 736 EXPECT_EQ(0, status, "vm decommit"); 737 738 // commit a range of it 739 status = zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, 0, size, nullptr, 0); 740 EXPECT_EQ(0, status, "vm commit"); 741 742 // map it 743 ptr = 0; 744 status = zx_vmar_map(zx_vmar_root_self(), 745 ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 746 0, vmo, 0, size, &ptr); 747 EXPECT_EQ(ZX_OK, status, "map"); 748 EXPECT_NE(ptr, 0, "map address"); 749 750 // second mapping with an offset 751 ptr2 = 0; 752 status = zx_vmar_map(zx_vmar_root_self(), 753 ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 754 0, vmo, PAGE_SIZE, size, &ptr2); 755 EXPECT_EQ(ZX_OK, status, "map2"); 756 EXPECT_NE(ptr2, 0, "map address2"); 757 758 // third mapping with a totally non-overlapping offset 759 ptr3 = 0; 760 status = zx_vmar_map(zx_vmar_root_self(), 761 ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 762 0, vmo, size * 2, size, &ptr3); 763 EXPECT_EQ(ZX_OK, status, "map3"); 764 EXPECT_NE(ptr3, 0, "map address3"); 765 766 // write into it at offset PAGE_SIZE, read it back 767 volatile uint32_t *u32 = (volatile uint32_t *)(ptr + PAGE_SIZE); 768 *u32 = 99; 769 EXPECT_EQ(99u, (*u32), "written memory"); 770 771 // check the alias 772 volatile uint32_t *u32a = (volatile uint32_t *)(ptr2); 773 EXPECT_EQ(99u, (*u32a), "written memory"); 774 775 // decommit page 0 776 status = zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, 0, PAGE_SIZE, nullptr, 0); 777 EXPECT_EQ(0, status, "vm decommit"); 778 779 // verify that it didn't get unmapped 780 EXPECT_EQ(99u, (*u32), "written memory"); 781 // verify that it didn't get unmapped 782 EXPECT_EQ(99u, (*u32a), "written memory2"); 783 784 // decommit page 1 785 status = zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, PAGE_SIZE, PAGE_SIZE, nullptr, 0); 786 EXPECT_EQ(0, status, "vm decommit"); 787 788 // verify that it did get unmapped 789 EXPECT_EQ(0u, (*u32), "written memory"); 790 // verify that it did get unmapped 791 EXPECT_EQ(0u, (*u32a), "written memory2"); 792 793 // unmap our vmos 794 status = zx_vmar_unmap(zx_vmar_root_self(), ptr, size); 795 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 796 status = zx_vmar_unmap(zx_vmar_root_self(), ptr2, size); 797 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 798 status = zx_vmar_unmap(zx_vmar_root_self(), ptr3, size); 799 EXPECT_EQ(ZX_OK, status, "vm_unmap"); 800 801 // close the handle 802 status = zx_handle_close(vmo); 803 EXPECT_EQ(ZX_OK, status, "handle_close"); 804 805 END_TEST; 806} 807 808bool vmo_zero_page_test() { 809 BEGIN_TEST; 810 811 zx_handle_t vmo; 812 zx_status_t status; 813 uintptr_t ptr[3]; 814 815 // create a vmo 816 const size_t size = PAGE_SIZE * 4; 817 818 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 819 820 // make a few mappings of the vmo 821 for (auto &p: ptr) { 822 EXPECT_EQ(ZX_OK, 823 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, vmo, 0, size, &p), 824 "map"); 825 EXPECT_NONNULL(ptr, "map address"); 826 } 827 828 volatile uint32_t *val = (volatile uint32_t *)ptr[0]; 829 volatile uint32_t *val2 = (volatile uint32_t *)ptr[1]; 830 volatile uint32_t *val3 = (volatile uint32_t *)ptr[2]; 831 832 // read fault in the first mapping 833 EXPECT_EQ(0, *val, "read zero"); 834 835 // write fault the second mapping 836 *val2 = 99; 837 EXPECT_EQ(99, *val2, "read back 99"); 838 839 // expect the third mapping to read fault in the new page 840 EXPECT_EQ(99, *val3, "read 99"); 841 842 // expect the first mapping to have gotten updated with the new mapping 843 // and no longer be mapping the zero page 844 EXPECT_EQ(99, *val, "read 99 from former zero page"); 845 846 // read fault in zeros on the second page 847 val = (volatile uint32_t *)(ptr[0] + PAGE_SIZE); 848 EXPECT_EQ(0, *val, "read zero"); 849 850 // write to the page via a vmo_write call 851 uint32_t v = 100; 852 status = zx_vmo_write(vmo, &v, PAGE_SIZE, sizeof(v)); 853 EXPECT_EQ(ZX_OK, status, "writing to vmo"); 854 855 // expect it to read back the new value 856 EXPECT_EQ(100, *val, "read 100 from former zero page"); 857 858 // read fault in zeros on the third page 859 val = (volatile uint32_t *)(ptr[0] + PAGE_SIZE * 2); 860 EXPECT_EQ(0, *val, "read zero"); 861 862 // commit this range of the vmo via a commit call 863 status = zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, PAGE_SIZE * 2, PAGE_SIZE, nullptr, 0); 864 EXPECT_EQ(ZX_OK, status, "committing memory"); 865 866 // write to the third page 867 status = zx_vmo_write(vmo, &v, PAGE_SIZE * 2, sizeof(v)); 868 EXPECT_EQ(ZX_OK, status, "writing to vmo"); 869 870 // expect it to read back the new value 871 EXPECT_EQ(100, *val, "read 100 from former zero page"); 872 873 // unmap 874 for (auto p: ptr) 875 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), p, size), "unmap"); 876 877 // close the handle 878 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 879 880 END_TEST; 881} 882 883// test set 1: create a few clones, close them 884bool vmo_clone_test_1() { 885 BEGIN_TEST; 886 887 zx_handle_t vmo; 888 zx_handle_t clone_vmo[3]; 889 890 // create a vmo 891 const size_t size = PAGE_SIZE * 4; 892 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 893 EXPECT_EQ(ZX_OK, zx_object_set_property(vmo, ZX_PROP_NAME, "test1", 5), "zx_object_set_property"); 894 895 // clone it 896 clone_vmo[0] = ZX_HANDLE_INVALID; 897 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo[0]), "vm_clone"); 898 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo[0], "vm_clone_handle"); 899 char name[ZX_MAX_NAME_LEN]; 900 EXPECT_EQ(ZX_OK, zx_object_get_property(clone_vmo[0], ZX_PROP_NAME, name, ZX_MAX_NAME_LEN), "zx_object_get_property"); 901 EXPECT_TRUE(!strcmp(name, "test1"), "get_name"); 902 903 // clone it a second time 904 clone_vmo[1] = ZX_HANDLE_INVALID; 905 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo[1]), "vm_clone"); 906 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo[1], "vm_clone_handle"); 907 908 // clone the clone 909 clone_vmo[2] = ZX_HANDLE_INVALID; 910 EXPECT_EQ(ZX_OK, zx_vmo_clone(clone_vmo[1], ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo[2]), "vm_clone"); 911 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo[2], "vm_clone_handle"); 912 913 // close the original handle 914 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 915 916 // close the clone handles 917 for (auto h: clone_vmo) 918 EXPECT_EQ(ZX_OK, zx_handle_close(h), "handle_close"); 919 920 END_TEST; 921} 922 923// test set 2: create a clone, verify that it COWs via the read/write interface 924bool vmo_clone_test_2() { 925 BEGIN_TEST; 926 927 zx_handle_t vmo; 928 zx_handle_t clone_vmo[1]; 929 930 // create a vmo 931 const size_t size = PAGE_SIZE * 4; 932 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 933 934 // fill the original with stuff 935 for (size_t off = 0; off < size; off += sizeof(off)) { 936 zx_vmo_write(vmo, &off, off, sizeof(off)); 937 } 938 939 // clone it 940 clone_vmo[0] = ZX_HANDLE_INVALID; 941 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo[0]), "vm_clone"); 942 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo[0], "vm_clone_handle"); 943 944 // verify that the clone reads back as the same 945 for (size_t off = 0; off < size; off += sizeof(off)) { 946 size_t val; 947 948 zx_vmo_read(clone_vmo[0], &val, off, sizeof(val)); 949 950 if (val != off) { 951 EXPECT_EQ(val, off, "vm_clone read back"); 952 break; 953 } 954 } 955 956 // write to part of the clone 957 size_t val = 99; 958 zx_vmo_write(clone_vmo[0], &val, 0, sizeof(val)); 959 960 // verify the clone was written to 961 EXPECT_EQ(ZX_OK, zx_vmo_read(clone_vmo[0], &val, 0, sizeof(val)), "writing to clone"); 962 963 // verify it was written to 964 EXPECT_EQ(99, val, "reading back from clone"); 965 966 // verify that the rest of the page it was written two was cloned 967 for (size_t off = sizeof(val); off < PAGE_SIZE; off += sizeof(off)) { 968 zx_vmo_read(clone_vmo[0], &val, off, sizeof(val)); 969 970 if (val != off) { 971 EXPECT_EQ(val, off, "vm_clone read back"); 972 break; 973 } 974 } 975 976 // verify that it didn't trash the original 977 for (size_t off = 0; off < size; off += sizeof(off)) { 978 zx_vmo_read(vmo, &val, off, sizeof(val)); 979 980 if (val != off) { 981 EXPECT_EQ(val, off, "vm_clone read back of original"); 982 break; 983 } 984 } 985 986 // write to the original in the part that is still visible to the clone 987 val = 99; 988 uint64_t offset = PAGE_SIZE * 2; 989 EXPECT_EQ(ZX_OK, zx_vmo_write(vmo, &val, offset, sizeof(val)), "writing to original"); 990 EXPECT_EQ(ZX_OK, zx_vmo_read(clone_vmo[0], &val, offset, sizeof(val)), "reading back original from clone"); 991 EXPECT_EQ(99, val, "checking value"); 992 993 // close the clone handles 994 for (auto h: clone_vmo) 995 EXPECT_EQ(ZX_OK, zx_handle_close(h), "handle_close"); 996 997 // close the original handle 998 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 999 1000 END_TEST; 1001} 1002 1003// test set 3: test COW via a mapping 1004bool vmo_clone_test_3() { 1005 BEGIN_TEST; 1006 1007 zx_handle_t vmo; 1008 zx_handle_t clone_vmo[1]; 1009 uintptr_t ptr; 1010 uintptr_t clone_ptr; 1011 volatile uint32_t *p; 1012 volatile uint32_t *cp; 1013 1014 // create a vmo 1015 const size_t size = PAGE_SIZE * 4; 1016 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 1017 1018 // map it 1019 EXPECT_EQ(ZX_OK, 1020 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, vmo, 0, size, &ptr), 1021 "map"); 1022 EXPECT_NE(ptr, 0, "map address"); 1023 p = (volatile uint32_t *)ptr; 1024 1025 // clone it 1026 clone_vmo[0] = ZX_HANDLE_INVALID; 1027 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo[0]),"vm_clone"); 1028 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo[0], "vm_clone_handle"); 1029 1030 // Attempt a non-resizable map fails. 1031 EXPECT_EQ(ZX_ERR_NOT_SUPPORTED, 1032 zx_vmar_map(zx_vmar_root_self(), 1033 ZX_VM_PERM_READ|ZX_VM_PERM_WRITE|ZX_VM_REQUIRE_NON_RESIZABLE, 1034 0, clone_vmo[0], 0, size, &clone_ptr), "map"); 1035 1036 // Regular resizable mapping works. 1037 EXPECT_EQ(ZX_OK, 1038 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, clone_vmo[0], 0, size, &clone_ptr), 1039 "map"); 1040 EXPECT_NE(clone_ptr, 0, "map address"); 1041 cp = (volatile uint32_t *)clone_ptr; 1042 1043 // read zeros from both 1044 for (size_t off = 0; off < size / sizeof(off); off++) { 1045 size_t val = p[off]; 1046 1047 if (val != 0) { 1048 EXPECT_EQ(0, val, "reading zeros from original"); 1049 break; 1050 } 1051 } 1052 for (size_t off = 0; off < size / sizeof(off); off++) { 1053 size_t val = cp[off]; 1054 1055 if (val != 0) { 1056 EXPECT_EQ(0, val, "reading zeros from original"); 1057 break; 1058 } 1059 } 1060 1061 // write to both sides and make sure it does a COW 1062 p[0] = 99; 1063 EXPECT_EQ(99, p[0], "wrote to original"); 1064 EXPECT_EQ(99, cp[0], "read back from clone"); 1065 cp[0] = 100; 1066 EXPECT_EQ(100, cp[0], "read back from clone"); 1067 EXPECT_EQ(99, p[0], "read back from original"); 1068 1069 // close the original handle 1070 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 1071 1072 // close the clone handle 1073 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo[0]), "handle_close"); 1074 1075 // unmap 1076 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr, size), "unmap"); 1077 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), clone_ptr, size), "unmap"); 1078 1079 END_TEST; 1080} 1081 1082// verify that the parent is visible through decommited pages 1083bool vmo_clone_decommit_test() { 1084 BEGIN_TEST; 1085 1086 zx_handle_t vmo; 1087 zx_handle_t clone_vmo; 1088 uintptr_t ptr; 1089 uintptr_t clone_ptr; 1090 volatile uint32_t *p; 1091 volatile uint32_t *cp; 1092 1093 // create a vmo 1094 const size_t size = PAGE_SIZE * 4; 1095 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 1096 1097 // map it 1098 EXPECT_EQ(ZX_OK, 1099 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, vmo, 0, size, &ptr), 1100 "map"); 1101 EXPECT_NE(ptr, 0, "map address"); 1102 p = (volatile uint32_t *)ptr; 1103 1104 // clone it and map that 1105 clone_vmo = ZX_HANDLE_INVALID; 1106 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo), "vm_clone"); 1107 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo, "vm_clone_handle"); 1108 EXPECT_EQ(ZX_OK, 1109 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, clone_vmo, 0, size, &clone_ptr), 1110 "map"); 1111 EXPECT_NE(clone_ptr, 0, "map address"); 1112 cp = (volatile uint32_t *)clone_ptr; 1113 1114 // write to parent and make sure clone sees it 1115 p[0] = 99; 1116 EXPECT_EQ(99, p[0], "wrote to original"); 1117 EXPECT_EQ(99, cp[0], "read back from clone"); 1118 1119 // write to clone to get a different state 1120 cp[0] = 100; 1121 EXPECT_EQ(100, cp[0], "read back from clone"); 1122 EXPECT_EQ(99, p[0], "read back from original"); 1123 1124 EXPECT_EQ(ZX_OK, zx_vmo_op_range(clone_vmo, ZX_VMO_OP_DECOMMIT, 0, PAGE_SIZE, NULL, 0)); 1125 1126 // make sure that clone reverted to original, and that parent is unaffected 1127 // by the decommit 1128 EXPECT_EQ(99, cp[0], "read back from clone"); 1129 EXPECT_EQ(99, p[0], "read back from original"); 1130 1131 // make sure the decommited page still has COW semantics 1132 cp[0] = 100; 1133 EXPECT_EQ(100, cp[0], "read back from clone"); 1134 EXPECT_EQ(99, p[0], "read back from original"); 1135 1136 // close the original handle 1137 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 1138 1139 // close the clone handle 1140 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo), "handle_close"); 1141 1142 // unmap 1143 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr, size), "unmap"); 1144 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), clone_ptr, size), "unmap"); 1145 1146 END_TEST; 1147} 1148 1149// verify the affect of commit on a clone 1150bool vmo_clone_commit_test() { 1151 BEGIN_TEST; 1152 1153 zx_handle_t vmo; 1154 zx_handle_t clone_vmo; 1155 uintptr_t ptr; 1156 uintptr_t clone_ptr; 1157 volatile uint32_t *p; 1158 volatile uint32_t *cp; 1159 1160 // create a vmo 1161 const size_t size = PAGE_SIZE * 4; 1162 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 1163 1164 // map it 1165 EXPECT_EQ(ZX_OK, 1166 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, vmo, 0, size, &ptr), 1167 "map"); 1168 EXPECT_NE(ptr, 0, "map address"); 1169 p = (volatile uint32_t *)ptr; 1170 1171 // clone it and map that 1172 clone_vmo = ZX_HANDLE_INVALID; 1173 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo), "vm_clone"); 1174 EXPECT_NE(ZX_HANDLE_INVALID, clone_vmo, "vm_clone_handle"); 1175 EXPECT_EQ(ZX_OK, 1176 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, clone_vmo, 0, size, &clone_ptr), 1177 "map"); 1178 EXPECT_NE(clone_ptr, 0, "map address"); 1179 cp = (volatile uint32_t *)clone_ptr; 1180 1181 // write to parent and make sure clone sees it 1182 memset((void*)p, 0x99, PAGE_SIZE); 1183 EXPECT_EQ(0x99999999, p[0], "wrote to original"); 1184 EXPECT_EQ(0x99999999, cp[0], "read back from clone"); 1185 1186 EXPECT_EQ(ZX_OK, zx_vmo_op_range(clone_vmo, ZX_VMO_OP_COMMIT, 0, PAGE_SIZE, NULL, 0)); 1187 1188 // make sure that clone has the same contents as the parent 1189 for (size_t i = 0; i < PAGE_SIZE / sizeof(*p); ++i) { 1190 EXPECT_EQ(0x99999999, cp[i], "read new page"); 1191 } 1192 EXPECT_EQ(0x99999999, p[0], "read back from original"); 1193 1194 // write to clone and make sure parent doesn't see it 1195 cp[0] = 0; 1196 EXPECT_EQ(0, cp[0], "wrote to clone"); 1197 EXPECT_EQ(0x99999999, p[0], "read back from original"); 1198 1199 EXPECT_EQ(ZX_OK, zx_vmo_op_range(clone_vmo, ZX_VMO_OP_DECOMMIT, 0, PAGE_SIZE, NULL, 0)); 1200 1201 EXPECT_EQ(0x99999999, cp[0], "clone should match orig again"); 1202 EXPECT_EQ(0x99999999, p[0], "read back from original"); 1203 1204 // close the original handle 1205 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 1206 1207 // close the clone handle 1208 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo), "handle_close"); 1209 1210 // unmap 1211 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr, size), "unmap"); 1212 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), clone_ptr, size), "unmap"); 1213 1214 END_TEST; 1215} 1216 1217bool vmo_cache_test() { 1218 BEGIN_TEST; 1219 1220 zx_handle_t vmo; 1221 const size_t size = PAGE_SIZE; 1222 1223 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "creation for cache_policy"); 1224 1225 // clean vmo can have all valid cache policies set 1226 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1227 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_UNCACHED)); 1228 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_UNCACHED_DEVICE)); 1229 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_WRITE_COMBINING)); 1230 1231 // bad cache policy 1232 EXPECT_EQ(ZX_ERR_INVALID_ARGS, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_MASK + 1)); 1233 1234 // commit a page, make sure the policy doesn't set 1235 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, 0, size, nullptr, 0)); 1236 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1237 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, 0, size, nullptr, 0)); 1238 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1239 1240 // map the vmo, make sure policy doesn't set 1241 uintptr_t ptr; 1242 EXPECT_EQ(ZX_OK, zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, size, &ptr)); 1243 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1244 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr, size)); 1245 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1246 1247 // clone the vmo, make sure policy doesn't set 1248 zx_handle_t clone; 1249 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone)); 1250 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1251 EXPECT_EQ(ZX_OK, zx_handle_close(clone)); 1252 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1253 1254 // clone the vmo, try to set policy on the clone 1255 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone)); 1256 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_set_cache_policy(clone, ZX_CACHE_POLICY_CACHED)); 1257 EXPECT_EQ(ZX_OK, zx_handle_close(clone)); 1258 1259 // set the policy, make sure future clones do not go through 1260 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_UNCACHED)); 1261 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone)); 1262 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1263 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone)); 1264 EXPECT_EQ(ZX_OK, zx_handle_close(clone)); 1265 1266 // set the policy, make sure vmo read/write do not work 1267 char c; 1268 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_UNCACHED)); 1269 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_read(vmo, &c, 0, sizeof(c))); 1270 EXPECT_EQ(ZX_ERR_BAD_STATE, zx_vmo_write(vmo, &c, 0, sizeof(c))); 1271 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, ZX_CACHE_POLICY_CACHED)); 1272 EXPECT_EQ(ZX_OK, zx_vmo_read(vmo, &c, 0, sizeof(c))); 1273 EXPECT_EQ(ZX_OK, zx_vmo_write(vmo, &c, 0, sizeof(c))); 1274 1275 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "close handle"); 1276 END_TEST; 1277} 1278 1279bool vmo_cache_map_test() { 1280 BEGIN_TEST; 1281 1282 auto maptest = [](uint32_t policy, const char *type) { 1283 zx_handle_t vmo; 1284 const size_t size = 256*1024; // 256K 1285 1286 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo)); 1287 1288 // set the cache policy 1289 EXPECT_EQ(ZX_OK, zx_vmo_set_cache_policy(vmo, policy)); 1290 1291 // commit it 1292 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, 0, size, nullptr, 0)); 1293 1294 // map it 1295 uintptr_t ptr; 1296 EXPECT_EQ(ZX_OK, zx_vmar_map(zx_vmar_root_self(), 1297 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_MAP_RANGE, 1298 0, vmo, 0, size, &ptr)); 1299 1300 volatile uint32_t *buf = (volatile uint32_t *)ptr; 1301 1302 // write it once, priming the cache 1303 for (size_t i = 0; i < size / 4; i++) 1304 buf[i] = 0; 1305 1306 // write to it 1307 zx_time_t wt = zx_clock_get_monotonic(); 1308 for (size_t i = 0; i < size / 4; i++) 1309 buf[i] = 0; 1310 wt = zx_clock_get_monotonic() - wt; 1311 1312 // read from it 1313 zx_time_t rt = zx_clock_get_monotonic(); 1314 for (size_t i = 0; i < size / 4; i++) 1315 __UNUSED uint32_t hole = buf[i]; 1316 rt = zx_clock_get_monotonic() - rt; 1317 1318 printf("took %" PRIu64 " nsec to write %s memory\n", wt, type); 1319 printf("took %" PRIu64 " nsec to read %s memory\n", rt, type); 1320 1321 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr, size)); 1322 EXPECT_EQ(ZX_OK, zx_handle_close(vmo)); 1323 }; 1324 1325 printf("\n"); 1326 maptest(ZX_CACHE_POLICY_CACHED, "cached"); 1327 maptest(ZX_CACHE_POLICY_UNCACHED, "uncached"); 1328 maptest(ZX_CACHE_POLICY_UNCACHED_DEVICE, "uncached device"); 1329 maptest(ZX_CACHE_POLICY_WRITE_COMBINING, "write combining"); 1330 1331 END_TEST; 1332} 1333 1334bool vmo_cache_op_test() { 1335 BEGIN_TEST; 1336 1337 zx_handle_t vmo; 1338 const size_t size = 0x8000; 1339 1340 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "creation for cache op"); 1341 1342 auto t = [vmo](uint32_t op) { 1343 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0, 1, nullptr, 0), "0 1"); 1344 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0, 1, nullptr, 0), "0 1"); 1345 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 1, 1, nullptr, 0), "1 1"); 1346 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0, size, nullptr, 0), "0 size"); 1347 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 1, size - 1, nullptr, 0), "0 size"); 1348 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0x5200, 1, nullptr, 0), "0x5200 1"); 1349 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0x5200, 0x800, nullptr, 0), "0x5200 0x800"); 1350 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0x5200, 0x1000, nullptr, 0), "0x5200 0x1000"); 1351 EXPECT_EQ(ZX_OK, zx_vmo_op_range(vmo, op, 0x5200, 0x1200, nullptr, 0), "0x5200 0x1200"); 1352 1353 EXPECT_EQ(ZX_ERR_INVALID_ARGS, zx_vmo_op_range(vmo, op, 0, 0, nullptr, 0), "0 0"); 1354 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, zx_vmo_op_range(vmo, op, 1, size, nullptr, 0), "0 size"); 1355 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, zx_vmo_op_range(vmo, op, size, 1, nullptr, 0), "size 1"); 1356 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, zx_vmo_op_range(vmo, op, size+1, 1, nullptr, 0), "size+1 1"); 1357 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, zx_vmo_op_range(vmo, op, UINT64_MAX-1, 1, nullptr, 0), "UINT64_MAX-1 1"); 1358 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, zx_vmo_op_range(vmo, op, UINT64_MAX, 1, nullptr, 0), "UINT64_MAX 1"); 1359 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, zx_vmo_op_range(vmo, op, UINT64_MAX, UINT64_MAX, nullptr, 0), "UINT64_MAX UINT64_MAX"); 1360 }; 1361 1362 t(ZX_VMO_OP_CACHE_SYNC); 1363 t(ZX_VMO_OP_CACHE_CLEAN); 1364 t(ZX_VMO_OP_CACHE_CLEAN_INVALIDATE); 1365 t(ZX_VMO_OP_CACHE_INVALIDATE); 1366 1367 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "close handle"); 1368 END_TEST; 1369} 1370 1371bool vmo_cache_flush_test() { 1372 BEGIN_TEST; 1373 1374 zx_handle_t vmo; 1375 const size_t size = 0x8000; 1376 1377 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "creation for cache op"); 1378 1379 uintptr_t ptr_ro; 1380 EXPECT_EQ(ZX_OK, 1381 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, size, &ptr_ro), 1382 "map"); 1383 EXPECT_NE(ptr_ro, 0, "map address"); 1384 void *pro = (void*)ptr_ro; 1385 1386 uintptr_t ptr_rw; 1387 EXPECT_EQ(ZX_OK, 1388 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, vmo, 0, size, &ptr_rw), 1389 "map"); 1390 EXPECT_NE(ptr_rw, 0, "map address"); 1391 void *prw = (void*)ptr_rw; 1392 1393 zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, 0, size, NULL, 0); 1394 1395 EXPECT_EQ(ZX_OK, zx_cache_flush(prw, size, ZX_CACHE_FLUSH_INSN), "rw flush insn"); 1396 EXPECT_EQ(ZX_OK, zx_cache_flush(prw, size, ZX_CACHE_FLUSH_DATA), "rw clean"); 1397 EXPECT_EQ(ZX_OK, zx_cache_flush(prw, size, ZX_CACHE_FLUSH_DATA | ZX_CACHE_FLUSH_INVALIDATE), "rw clean/invalidate"); 1398 1399 EXPECT_EQ(ZX_OK, zx_cache_flush(pro, size, ZX_CACHE_FLUSH_INSN), "ro flush insn"); 1400 EXPECT_EQ(ZX_OK, zx_cache_flush(pro, size, ZX_CACHE_FLUSH_DATA), "ro clean"); 1401 EXPECT_EQ(ZX_OK, zx_cache_flush(pro, size, ZX_CACHE_FLUSH_DATA | ZX_CACHE_FLUSH_INVALIDATE), "ro clean/invalidate"); 1402 1403 zx_vmar_unmap(zx_vmar_root_self(), ptr_rw, size); 1404 zx_vmar_unmap(zx_vmar_root_self(), ptr_ro, size); 1405 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "close handle"); 1406 END_TEST; 1407} 1408 1409bool vmo_decommit_misaligned_test() { 1410 BEGIN_TEST; 1411 1412 zx_handle_t vmo; 1413 EXPECT_EQ(ZX_OK, zx_vmo_create(PAGE_SIZE * 2, 0, &vmo), "creation for decommit test"); 1414 1415 zx_status_t status = zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, 0x10, 0x100, NULL, 0); 1416 EXPECT_EQ(ZX_OK, status, "decommitting uncommitted memory"); 1417 1418 status = zx_vmo_op_range(vmo, ZX_VMO_OP_COMMIT, 0x10, 0x100, NULL, 0); 1419 EXPECT_EQ(ZX_OK, status, "committing memory"); 1420 1421 status = zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, 0x10, 0x100, NULL, 0); 1422 EXPECT_EQ(ZX_OK, status, "decommitting memory"); 1423 1424 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "close handle"); 1425 END_TEST; 1426} 1427 1428// test set 4: deal with clones with nonzero offsets and offsets that extend beyond the original 1429bool vmo_clone_test_4() { 1430 BEGIN_TEST; 1431 1432 zx_handle_t vmo; 1433 zx_handle_t clone_vmo[1]; 1434 uintptr_t ptr; 1435 uintptr_t clone_ptr; 1436 volatile size_t *p; 1437 volatile size_t *cp; 1438 1439 // create a vmo 1440 const size_t size = PAGE_SIZE * 4; 1441 EXPECT_EQ(ZX_OK, zx_vmo_create(size, 0, &vmo), "vm_object_create"); 1442 1443 // map it 1444 EXPECT_EQ(ZX_OK, 1445 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, vmo, 0, size, &ptr), 1446 "map"); 1447 EXPECT_NE(ptr, 0, "map address"); 1448 p = (volatile size_t *)ptr; 1449 1450 // fill it with stuff 1451 for (size_t off = 0; off < size / sizeof(off); off++) 1452 p[off] = off; 1453 1454 // make sure that non page aligned clones do not work 1455 clone_vmo[0] = ZX_HANDLE_INVALID; 1456 EXPECT_EQ(ZX_ERR_INVALID_ARGS, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 1, size, &clone_vmo[0]), "vm_clone"); 1457 1458 // create a clone that extends beyond the parent by one page 1459 clone_vmo[0] = ZX_HANDLE_INVALID; 1460 EXPECT_EQ(ZX_OK, zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE, PAGE_SIZE, size, &clone_vmo[0]), "vm_clone"); 1461 1462 // map the clone 1463 EXPECT_EQ(ZX_OK, 1464 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ|ZX_VM_PERM_WRITE, 0, clone_vmo[0], 0, size, &clone_ptr), 1465 "map"); 1466 EXPECT_NE(clone_ptr, 0, "map address"); 1467 cp = (volatile size_t *)clone_ptr; 1468 1469 // verify that it seems to be mapping the original at an offset 1470 for (size_t off = 0; off < (size - PAGE_SIZE) / sizeof(off); off++) { 1471 if (cp[off] != off + PAGE_SIZE / sizeof(off)) { 1472 EXPECT_EQ(cp[off], off + PAGE_SIZE / sizeof(off), "reading from clone"); 1473 break; 1474 } 1475 } 1476 1477 // verify that the last page we have mapped is beyond the original and should return zeros 1478 for (size_t off = (size - PAGE_SIZE) / sizeof(off); off < size / sizeof(off); off++) { 1479 if (cp[off] != 0) { 1480 EXPECT_EQ(cp[off], 0, "reading from clone"); 1481 break; 1482 } 1483 } 1484 1485 // resize the original 1486 EXPECT_EQ(ZX_OK, zx_vmo_set_size(vmo, size + PAGE_SIZE), "extend the vmo"); 1487 1488 // verify that the last page we have mapped still returns zeros 1489 for (size_t off = (size - PAGE_SIZE) / sizeof(off); off < size / sizeof(off); off++) { 1490 if (cp[off] != 0) { 1491 EXPECT_EQ(cp[off], 0, "reading from clone"); 1492 break; 1493 } 1494 } 1495 1496 // write to the new part of the original 1497 size_t val = 99; 1498 EXPECT_EQ(ZX_OK, zx_vmo_write(vmo, &val, size, sizeof(val)), "writing to original after extending"); 1499 1500 // verify that it is reflected in the clone 1501 EXPECT_EQ(99, cp[(size - PAGE_SIZE) / sizeof(*cp)], "modified newly exposed part of cow clone"); 1502 1503 // resize the original again, completely extending it beyond he clone 1504 EXPECT_EQ(ZX_OK, zx_vmo_set_size(vmo, size + PAGE_SIZE * 2), "extend the vmo"); 1505 1506 // resize the original to zero 1507 EXPECT_EQ(ZX_OK, zx_vmo_set_size(vmo, 0), "truncate the vmo"); 1508 1509 // verify that the clone now reads completely zeros, since it never COWed 1510 for (size_t off = 0; off < size / sizeof(off); off++) { 1511 if (cp[off] != 0) { 1512 EXPECT_EQ(cp[off], 0, "reading zeros from clone"); 1513 break; 1514 } 1515 } 1516 1517 // close and unmap 1518 EXPECT_EQ(ZX_OK, zx_handle_close(vmo), "handle_close"); 1519 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr, size), "unmap"); 1520 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo[0]), "handle_close"); 1521 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), clone_ptr, size), "unmap"); 1522 1523 END_TEST; 1524} 1525 1526bool vmo_clone_rights_test() { 1527 BEGIN_TEST; 1528 1529 static const char kOldVmoName[] = "original"; 1530 static const char kNewVmoName[] = "clone"; 1531 1532 static const zx_rights_t kOldVmoRights = 1533 ZX_RIGHT_READ | ZX_RIGHT_DUPLICATE; 1534 static const zx_rights_t kNewVmoRights = 1535 kOldVmoRights | ZX_RIGHT_WRITE | 1536 ZX_RIGHT_GET_PROPERTY | ZX_RIGHT_SET_PROPERTY; 1537 1538 zx_handle_t vmo; 1539 ASSERT_EQ(zx_vmo_create(PAGE_SIZE, 0, &vmo), 1540 ZX_OK); 1541 ASSERT_EQ(zx_object_set_property(vmo, ZX_PROP_NAME, 1542 kOldVmoName, sizeof(kOldVmoName)), 1543 ZX_OK); 1544 ASSERT_EQ(get_handle_rights(vmo) & kOldVmoRights, kOldVmoRights); 1545 1546 zx_handle_t reduced_rights_vmo; 1547 ASSERT_EQ(zx_handle_duplicate(vmo, kOldVmoRights, &reduced_rights_vmo), 1548 ZX_OK); 1549 EXPECT_EQ(get_handle_rights(reduced_rights_vmo), kOldVmoRights); 1550 1551 zx_handle_t clone; 1552 ASSERT_EQ(zx_vmo_clone(reduced_rights_vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 1553 0, PAGE_SIZE, &clone), 1554 ZX_OK); 1555 1556 EXPECT_EQ(zx_handle_close(reduced_rights_vmo), ZX_OK); 1557 1558 ASSERT_EQ(zx_object_set_property(clone, ZX_PROP_NAME, 1559 kNewVmoName, sizeof(kNewVmoName)), 1560 ZX_OK); 1561 1562 char oldname[ZX_MAX_NAME_LEN] = "bad"; 1563 EXPECT_EQ(zx_object_get_property(vmo, ZX_PROP_NAME, 1564 oldname, sizeof(oldname)), 1565 ZX_OK); 1566 EXPECT_STR_EQ(oldname, kOldVmoName, "original VMO name"); 1567 1568 char newname[ZX_MAX_NAME_LEN] = "bad"; 1569 EXPECT_EQ(zx_object_get_property(clone, ZX_PROP_NAME, 1570 newname, sizeof(newname)), 1571 ZX_OK); 1572 EXPECT_STR_EQ(newname, kNewVmoName, "clone VMO name"); 1573 1574 EXPECT_EQ(zx_handle_close(vmo), ZX_OK); 1575 EXPECT_EQ(get_handle_rights(clone), kNewVmoRights); 1576 EXPECT_EQ(zx_handle_close(clone), ZX_OK); 1577 1578 END_TEST; 1579} 1580 1581// Resizing a regular mapped VMO causes a fault. 1582bool vmo_resize_hazard() { 1583 BEGIN_TEST; 1584 1585 const size_t size = PAGE_SIZE * 2; 1586 zx_handle_t vmo; 1587 ASSERT_EQ(zx_vmo_create(size, 0, &vmo), ZX_OK); 1588 1589 uintptr_t ptr_rw; 1590 EXPECT_EQ(ZX_OK, zx_vmar_map( 1591 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 1592 0, vmo, 0, size, &ptr_rw), "map"); 1593 1594 auto int_arr = reinterpret_cast<int*>(ptr_rw); 1595 EXPECT_EQ(int_arr[1], 0); 1596 1597 EXPECT_EQ(ZX_OK, zx_vmo_set_size(vmo, 0u)); 1598 1599 EXPECT_EQ(false, probe_for_read(&int_arr[1]), "read probe"); 1600 EXPECT_EQ(false, probe_for_write(&int_arr[1]), "write probe"); 1601 1602 EXPECT_EQ(ZX_OK, zx_handle_close(vmo)); 1603 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr_rw, size), "unmap"); 1604 1605 END_TEST; 1606} 1607 1608// Resizing a cloned VMO causes a fault. 1609bool vmo_clone_resize_clone_hazard() { 1610 BEGIN_TEST; 1611 1612 const size_t size = PAGE_SIZE * 2; 1613 zx_handle_t vmo; 1614 ASSERT_EQ(zx_vmo_create(size, 0, &vmo), ZX_OK); 1615 1616 zx_handle_t clone_vmo; 1617 EXPECT_EQ(ZX_OK, zx_vmo_clone( 1618 vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo), "vm_clone"); 1619 1620 uintptr_t ptr_rw; 1621 EXPECT_EQ(ZX_OK, zx_vmar_map( 1622 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, 1623 clone_vmo, 0, size, &ptr_rw), "map"); 1624 1625 auto int_arr = reinterpret_cast<int*>(ptr_rw); 1626 EXPECT_EQ(int_arr[1], 0); 1627 1628 EXPECT_EQ(ZX_OK, zx_vmo_set_size(clone_vmo, 0u)); 1629 1630 EXPECT_EQ(false, probe_for_read(&int_arr[1]), "read probe"); 1631 EXPECT_EQ(false, probe_for_write(&int_arr[1]), "write probe"); 1632 1633 EXPECT_EQ(ZX_OK, zx_handle_close(vmo)); 1634 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo)); 1635 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr_rw, size), "unmap"); 1636 END_TEST; 1637} 1638 1639// Resizing the parent VMO and accessing via a mapped VMO is ok. 1640bool vmo_clone_resize_parent_ok() { 1641 BEGIN_TEST; 1642 1643 const size_t size = PAGE_SIZE * 2; 1644 zx_handle_t vmo; 1645 ASSERT_EQ(zx_vmo_create(size, 0, &vmo), ZX_OK); 1646 1647 zx_handle_t clone_vmo; 1648 EXPECT_EQ(ZX_OK, zx_vmo_clone( 1649 vmo, ZX_VMO_CLONE_COPY_ON_WRITE, 0, size, &clone_vmo), "vm_clone"); 1650 1651 uintptr_t ptr_rw; 1652 EXPECT_EQ(ZX_OK, zx_vmar_map( 1653 zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, 1654 clone_vmo, 0, size, &ptr_rw), "map"); 1655 1656 auto int_arr = reinterpret_cast<int*>(ptr_rw); 1657 EXPECT_EQ(int_arr[1], 0); 1658 1659 EXPECT_EQ(ZX_OK, zx_vmo_set_size(vmo, 0u)); 1660 1661 EXPECT_EQ(true, probe_for_read(&int_arr[1]), "read probe"); 1662 EXPECT_EQ(true, probe_for_write(&int_arr[1]), "write probe"); 1663 1664 EXPECT_EQ(ZX_OK, zx_handle_close(vmo)); 1665 EXPECT_EQ(ZX_OK, zx_handle_close(clone_vmo)); 1666 EXPECT_EQ(ZX_OK, zx_vmar_unmap(zx_vmar_root_self(), ptr_rw, size), "unmap"); 1667 END_TEST; 1668} 1669 1670bool vmo_unmap_coherency() { 1671 BEGIN_TEST; 1672 1673 // This is an expensive test to try to detect a multi-cpu coherency 1674 // problem with TLB flushing of unmap operations 1675 // 1676 // algorithm: map a relatively large committed VMO. 1677 // Create a worker thread that simply walks through the VMO writing to 1678 // each page. 1679 // In the main thread continually decommit the vmo with a little bit of 1680 // a gap between decommits to allow the worker thread to bring it all back in. 1681 // If the worker thread appears stuck by not making it through a loop in 1682 // a reasonable time, we have failed. 1683 1684 // allocate a vmo 1685 const size_t len = 32*1024*1024; 1686 zx_handle_t vmo; 1687 zx_status_t status = zx_vmo_create(len, 0, &vmo); 1688 EXPECT_EQ(ZX_OK, status, "vm_object_create"); 1689 1690 // do a regular map 1691 uintptr_t ptr = 0; 1692 status = zx_vmar_map(zx_vmar_root_self(), 1693 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 1694 0, vmo, 0, len, &ptr); 1695 EXPECT_EQ(ZX_OK, status, "map"); 1696 EXPECT_NE(0u, ptr, "map address"); 1697 1698 // create a worker thread 1699 struct worker_args { 1700 size_t len; 1701 uintptr_t ptr; 1702 fbl::atomic<bool> exit; 1703 fbl::atomic<bool> exited; 1704 fbl::atomic<size_t> count; 1705 } args = {}; 1706 args.len = len; 1707 args.ptr = ptr; 1708 1709 auto worker = [](void *_args) -> int { 1710 worker_args* a = (worker_args*)_args; 1711 1712 unittest_printf("ptr %#" PRIxPTR " len %zu\n", 1713 a->ptr, a->len); 1714 1715 while (!a->exit.load()) { 1716 // walk through the mapping, writing to every page 1717 for (size_t off = 0; off < a->len; off += PAGE_SIZE) { 1718 *(uint32_t*)(a->ptr + off) = 99; 1719 } 1720 1721 a->count.fetch_add(1); 1722 } 1723 1724 unittest_printf("exiting worker\n"); 1725 1726 a->exited.store(true); 1727 1728 return 0; 1729 }; 1730 1731 thrd_t t; 1732 thrd_create(&t, worker, &args); 1733 1734 const zx_time_t max_duration = ZX_SEC(30); 1735 const zx_time_t max_wait = ZX_SEC(1); 1736 zx_time_t start = zx_clock_get_monotonic(); 1737 for (;;) { 1738 // wait for it to loop at least once 1739 zx_time_t t = zx_clock_get_monotonic(); 1740 size_t last_count = args.count.load(); 1741 while (args.count.load() <= last_count) { 1742 if (zx_clock_get_monotonic() - t > max_wait) { 1743 UNITTEST_FAIL_TRACEF("looper appears stuck!\n"); 1744 break; 1745 } 1746 } 1747 1748 // decommit the vmo 1749 status = zx_vmo_op_range(vmo, ZX_VMO_OP_DECOMMIT, 0, len, nullptr, 0); 1750 EXPECT_EQ(0, status, "vm decommit"); 1751 1752 if (zx_clock_get_monotonic() - start > max_duration) 1753 break; 1754 } 1755 1756 // stop the thread and wait for it to exit 1757 args.exit.store(true); 1758 while (args.exited.load() == false) 1759 ; 1760 1761 END_TEST; 1762} 1763 1764BEGIN_TEST_CASE(vmo_tests) 1765RUN_TEST(vmo_create_test); 1766RUN_TEST(vmo_read_write_test); 1767RUN_TEST(vmo_read_write_range_test); 1768RUN_TEST(vmo_map_test); 1769RUN_TEST(vmo_read_only_map_test); 1770RUN_TEST(vmo_no_perm_map_test); 1771RUN_TEST(vmo_no_perm_protect_test); 1772RUN_TEST(vmo_resize_test); 1773RUN_TEST(vmo_no_resize_test); 1774RUN_TEST(vmo_no_resize_clone_test); 1775RUN_TEST(vmo_size_align_test); 1776RUN_TEST(vmo_resize_align_test); 1777RUN_TEST(vmo_clone_size_align_test); 1778RUN_TEST(vmo_rights_test); 1779RUN_TEST(vmo_commit_test); 1780RUN_TEST(vmo_decommit_misaligned_test); 1781RUN_TEST(vmo_cache_test); 1782RUN_TEST_PERFORMANCE(vmo_cache_map_test); 1783RUN_TEST(vmo_cache_op_test); 1784RUN_TEST(vmo_cache_flush_test); 1785RUN_TEST(vmo_zero_page_test); 1786RUN_TEST(vmo_clone_test_1); 1787RUN_TEST(vmo_clone_test_2); 1788RUN_TEST(vmo_clone_test_3); 1789RUN_TEST(vmo_clone_test_4); 1790RUN_TEST(vmo_clone_decommit_test); 1791RUN_TEST(vmo_clone_commit_test); 1792RUN_TEST(vmo_clone_rights_test); 1793RUN_TEST(vmo_resize_hazard); 1794RUN_TEST(vmo_clone_resize_clone_hazard); 1795RUN_TEST(vmo_clone_resize_parent_ok); 1796RUN_TEST(vmo_info_test); 1797RUN_TEST_LARGE(vmo_unmap_coherency); 1798END_TEST_CASE(vmo_tests) 1799 1800int main(int argc, char** argv) { 1801 bool run_bench = false; 1802 if (argc > 1) { 1803 if (!strcmp(argv[1], "bench")) { 1804 run_bench = true; 1805 } 1806 } 1807 1808 if (!run_bench) { 1809 bool success = unittest_run_all_tests(argc, argv); 1810 return success ? 0 : -1; 1811 } else { 1812 return vmo_run_benchmark(); 1813 } 1814} 1815