1// Copyright 2017 The Fuchsia Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include <errno.h> 6#include <fcntl.h> 7#include <limits.h> 8#include <poll.h> 9#include <stdbool.h> 10#include <stdio.h> 11#include <stdlib.h> 12#include <string.h> 13#include <sys/mman.h> 14#include <sys/stat.h> 15#include <sys/types.h> 16#include <threads.h> 17#include <time.h> 18#include <unistd.h> 19#include <utime.h> 20 21#include <blobfs/format.h> 22#include <block-client/client.h> 23#include <fbl/algorithm.h> 24#include <fbl/auto_lock.h> 25#include <fbl/function.h> 26#include <fbl/limits.h> 27#include <fbl/new.h> 28#include <fbl/ref_counted.h> 29#include <fbl/ref_ptr.h> 30#include <fbl/unique_fd.h> 31#include <fbl/unique_ptr.h> 32#include <fbl/vector.h> 33#include <fs-management/fvm.h> 34#include <fs-management/mount.h> 35#include <fs-management/ramdisk.h> 36#include <fuchsia/io/c/fidl.h> 37#include <fvm/fvm.h> 38#include <lib/async-loop/cpp/loop.h> 39#include <lib/fzl/fdio.h> 40#include <lib/memfs/memfs.h> 41#include <lib/zx/vmo.h> 42#include <minfs/format.h> 43#include <zircon/device/block.h> 44#include <zircon/device/device.h> 45#include <zircon/device/ramdisk.h> 46#include <zircon/device/vfs.h> 47#include <zircon/syscalls.h> 48#include <zircon/thread_annotations.h> 49 50#include <unittest/unittest.h> 51 52#define FVM_DRIVER_LIB "/boot/driver/fvm.so" 53#define STRLEN(s) sizeof(s) / sizeof((s)[0]) 54 55namespace { 56 57/////////////////////// Helper functions for creating FVM: 58 59const char kTmpfsPath[] = "/fvm-tmp"; 60const char kMountPath[] = "/fvm-tmp/minfs_test_mountpath"; 61 62static bool use_real_disk = false; 63static char test_disk_path[PATH_MAX]; 64static uint64_t test_block_size; 65static uint64_t test_block_count; 66 67int StartFVMTest(uint64_t blk_size, uint64_t blk_count, uint64_t slice_size, 68 char* disk_path_out, char* fvm_driver_out) { 69 int fd; 70 ssize_t r; 71 disk_path_out[0] = 0; 72 if (!use_real_disk) { 73 if (create_ramdisk(blk_size, blk_count, disk_path_out)) { 74 fprintf(stderr, "fvm: Could not create ramdisk\n"); 75 goto fail; 76 } 77 } else { 78 strcpy(disk_path_out, test_disk_path); 79 } 80 81 fd = open(disk_path_out, O_RDWR); 82 if (fd < 0) { 83 fprintf(stderr, "fvm: Could not open ramdisk\n"); 84 goto fail; 85 } 86 87 if (fvm_init(fd, slice_size) != ZX_OK) { 88 fprintf(stderr, "fvm: Could not initialize fvm\n"); 89 close(fd); 90 goto fail; 91 } 92 93 r = ioctl_device_bind(fd, FVM_DRIVER_LIB, STRLEN(FVM_DRIVER_LIB)); 94 close(fd); 95 if (r < 0) { 96 fprintf(stderr, "fvm: Error binding to fvm driver\n"); 97 goto fail; 98 } 99 100 char path[PATH_MAX]; 101 snprintf(path, sizeof(path), "%s/fvm", disk_path_out); 102 if (wait_for_device(path, ZX_SEC(3)) != ZX_OK) { 103 fprintf(stderr, "fvm: Error waiting for fvm driver to bind\n"); 104 goto fail; 105 } 106 107 // TODO(security): SEC-70. This may overflow |fvm_driver_out|. 108 strcpy(fvm_driver_out, path); 109 110 return 0; 111 112fail: 113 if (!use_real_disk && disk_path_out[0]) { 114 destroy_ramdisk(disk_path_out); 115 } 116 return -1; 117} 118 119typedef struct { 120 const char* name; 121 size_t number; 122} partition_entry_t; 123 124int FVMRebind(int fvm_fd, char* ramdisk_path, const partition_entry_t* entries, 125 size_t entry_count) { 126 int ramdisk_fd = open(ramdisk_path, O_RDWR); 127 if (ramdisk_fd < 0) { 128 fprintf(stderr, "fvm rebind: Could not open ramdisk\n"); 129 return -1; 130 } 131 132 if (ioctl_block_rr_part(ramdisk_fd) != 0) { 133 fprintf(stderr, "fvm rebind: Rebind hack failed\n"); 134 return -1; 135 } 136 137 close(fvm_fd); 138 close(ramdisk_fd); 139 140 // Wait for the ramdisk to rebind to a block driver 141 if (wait_for_device(ramdisk_path, ZX_SEC(3)) != ZX_OK) { 142 fprintf(stderr, "fvm rebind: Block driver did not rebind to ramdisk\n"); 143 return -1; 144 } 145 146 ramdisk_fd = open(ramdisk_path, O_RDWR); 147 if (ramdisk_fd < 0) { 148 fprintf(stderr, "fvm rebind: Could not open ramdisk\n"); 149 return -1; 150 } 151 152 ssize_t r = ioctl_device_bind(ramdisk_fd, FVM_DRIVER_LIB, STRLEN(FVM_DRIVER_LIB)); 153 close(ramdisk_fd); 154 if (r < 0) { 155 fprintf(stderr, "fvm rebind: Could not bind fvm driver\n"); 156 return -1; 157 } 158 159 char path[PATH_MAX]; 160 snprintf(path, sizeof(path), "%s/fvm", ramdisk_path); 161 if (wait_for_device(path, ZX_SEC(3)) != ZX_OK) { 162 fprintf(stderr, "fvm rebind: Error waiting for fvm driver to bind\n"); 163 return -1; 164 } 165 166 for (size_t i = 0; i < entry_count; i++) { 167 snprintf(path, sizeof(path), "%s/fvm/%s-p-%zu/block", ramdisk_path, entries[i].name, 168 entries[i].number); 169 if (wait_for_device(path, ZX_SEC(3)) != ZX_OK) { 170 fprintf(stderr, " Failed to wait for %s\n", path); 171 return -1; 172 } 173 } 174 175 snprintf(path, sizeof(path), "%s/fvm", ramdisk_path); 176 fvm_fd = open(path, O_RDWR); 177 if (fvm_fd < 0) { 178 fprintf(stderr, "fvm rebind: Failed to open fvm\n"); 179 return -1; 180 } 181 return fvm_fd; 182} 183 184bool FVMCheckSliceSize(const char* fvm_path, size_t expected_slice_size) { 185 BEGIN_HELPER; 186 fbl::unique_fd fd(open(fvm_path, O_RDWR)); 187 ASSERT_TRUE(fd, "Failed to open fvm driver\n"); 188 fvm_info_t fvm_info; 189 ASSERT_GT(ioctl_block_fvm_query(fd.get(), &fvm_info), 0, "Failed to query fvm\n"); 190 ASSERT_EQ(expected_slice_size, fvm_info.slice_size, "Unexpected slice size\n"); 191 END_HELPER; 192} 193 194bool FVMCheckAllocatedCount(int fd, size_t expected_allocated, size_t expected_total) { 195 BEGIN_HELPER; 196 fvm_info_t fvm_info; 197 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 198 ASSERT_EQ(fvm_info.pslice_total_count, expected_total); 199 ASSERT_EQ(fvm_info.pslice_allocated_count, expected_allocated); 200 END_HELPER; 201} 202 203// Unbind FVM driver and removes the backing ramdisk device. 204int EndFVMTest(const char* ramdisk_path) { 205 if (!use_real_disk) { 206 return destroy_ramdisk(ramdisk_path); 207 } else { 208 return fvm_destroy(ramdisk_path); 209 } 210} 211 212/////////////////////// Helper functions, definitions 213 214constexpr uint8_t kTestUniqueGUID[] = { 215 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 216 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f 217}; 218constexpr uint8_t kTestUniqueGUID2[] = { 219 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 220 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f 221}; 222 223// Intentionally avoid aligning these GUIDs with 224// the actual system GUIDs; otherwise, limited versions 225// of Fuchsia may attempt to actually mount these 226// partitions automatically. 227 228#define GUID_TEST_DATA_VALUE { \ 229 0xAA, 0xFF, 0xBB, 0x00, 0x33, 0x44, 0x88, 0x99, \ 230 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 \ 231} 232 233#define GUID_TEST_BLOB_VALUE { \ 234 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, \ 235 0xAA, 0xFF, 0xBB, 0x00, 0x33, 0x44, 0x88, 0x99 \ 236} 237 238#define GUID_TEST_SYS_VALUE { \ 239 0xEE, 0xFF, 0xBB, 0x00, 0x33, 0x44, 0x88, 0x99, \ 240 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 \ 241} 242 243constexpr char kTestPartName1[] = "data"; 244constexpr uint8_t kTestPartGUIDData[] = GUID_TEST_DATA_VALUE; 245 246constexpr char kTestPartName2[] = "blob"; 247constexpr uint8_t kTestPartGUIDBlob[] = GUID_TEST_BLOB_VALUE; 248 249constexpr char kTestPartName3[] = "system"; 250constexpr uint8_t kTestPartGUIDSystem[] = GUID_TEST_SYS_VALUE; 251 252class VmoBuf; 253 254class VmoClient : public fbl::RefCounted<VmoClient> { 255public: 256 static bool Create(int fd, fbl::RefPtr<VmoClient>* out); 257 bool CheckWrite(VmoBuf* vbuf, size_t buf_off, size_t dev_off, size_t len); 258 bool CheckRead(VmoBuf* vbuf, size_t buf_off, size_t dev_off, size_t len); 259 bool Transaction(block_fifo_request_t* requests, size_t count) { 260 BEGIN_HELPER; 261 ASSERT_EQ(block_fifo_txn(client_, &requests[0], count), ZX_OK); END_HELPER; 262 } 263 264 int fd() const { return fd_; } 265 groupid_t group() { return 0; } 266 ~VmoClient() { 267 block_fifo_release_client(client_); 268 } 269private: 270 int fd_; 271 block_info_t info_; 272 fifo_client_t* client_; 273}; 274 275class VmoBuf { 276public: 277 static bool Create(fbl::RefPtr<VmoClient> client, size_t size, 278 fbl::unique_ptr<VmoBuf>* out) { 279 BEGIN_HELPER; 280 281 fbl::AllocChecker ac; 282 fbl::unique_ptr<uint8_t[]> buf(new (&ac) uint8_t[size]); 283 ASSERT_TRUE(ac.check()); 284 285 zx::vmo vmo; 286 ASSERT_EQ(zx::vmo::create(size, 0, &vmo), ZX_OK); 287 288 zx_handle_t xfer_vmo; 289 ASSERT_EQ(zx_handle_duplicate(vmo.get(), ZX_RIGHT_SAME_RIGHTS, 290 &xfer_vmo), ZX_OK); 291 292 vmoid_t vmoid; 293 ASSERT_GT(ioctl_block_attach_vmo(client->fd(), &xfer_vmo, &vmoid), 0); 294 295 fbl::unique_ptr<VmoBuf> vb(new (&ac) VmoBuf(fbl::move(client), 296 fbl::move(vmo), 297 fbl::move(buf), 298 vmoid)); 299 ASSERT_TRUE(ac.check()); 300 *out = fbl::move(vb); 301 END_HELPER; 302 } 303 304 ~VmoBuf() { 305 if (vmo_.is_valid()) { 306 block_fifo_request_t request; 307 request.group = client_->group(); 308 request.vmoid = vmoid_; 309 request.opcode = BLOCKIO_CLOSE_VMO; 310 client_->Transaction(&request, 1); 311 } 312 } 313 314private: 315 friend VmoClient; 316 317 VmoBuf(fbl::RefPtr<VmoClient> client, zx::vmo vmo, 318 fbl::unique_ptr<uint8_t[]> buf, vmoid_t vmoid) : 319 client_(fbl::move(client)), vmo_(fbl::move(vmo)), 320 buf_(fbl::move(buf)), vmoid_(vmoid) {} 321 322 fbl::RefPtr<VmoClient> client_; 323 zx::vmo vmo_; 324 fbl::unique_ptr<uint8_t[]> buf_; 325 vmoid_t vmoid_; 326}; 327 328bool VmoClient::Create(int fd, fbl::RefPtr<VmoClient>* out) { 329 BEGIN_HELPER; 330 fbl::AllocChecker ac; 331 fbl::RefPtr<VmoClient> vc = fbl::AdoptRef(new (&ac) VmoClient()); 332 ASSERT_TRUE(ac.check()); 333 zx_handle_t fifo; 334 ASSERT_GT(ioctl_block_get_fifos(fd, &fifo), 0, "Failed to get FIFO"); 335 ASSERT_GT(ioctl_block_get_info(fd, &vc->info_), 0, "Failed to get block info"); 336 ASSERT_EQ(block_fifo_create_client(fifo, &vc->client_), ZX_OK); 337 vc->fd_ = fd; 338 *out = fbl::move(vc); 339 END_HELPER; 340} 341 342bool VmoClient::CheckWrite(VmoBuf* vbuf, size_t buf_off, size_t dev_off, size_t len) { 343 BEGIN_HELPER; 344 // Write to the client-side buffer 345 for (size_t i = 0; i < len; i++) 346 vbuf->buf_[i + buf_off] = static_cast<uint8_t>(rand()); 347 348 // Write to the registered VMO 349 ASSERT_EQ(vbuf->vmo_.write(&vbuf->buf_[buf_off], buf_off, len), ZX_OK); 350 351 // Write to the block device 352 block_fifo_request_t request; 353 request.group = group(); 354 request.vmoid = vbuf->vmoid_; 355 request.opcode = BLOCKIO_WRITE; 356 ASSERT_EQ(len % info_.block_size, 0); 357 ASSERT_EQ(buf_off % info_.block_size, 0); 358 ASSERT_EQ(dev_off % info_.block_size, 0); 359 request.length = static_cast<uint32_t>(len / info_.block_size); 360 request.vmo_offset = buf_off / info_.block_size; 361 request.dev_offset = dev_off / info_.block_size; 362 ASSERT_TRUE(Transaction(&request, 1)); 363 END_HELPER; 364} 365 366bool VmoClient::CheckRead(VmoBuf* vbuf, size_t buf_off, size_t dev_off, size_t len) { 367 BEGIN_HELPER; 368 369 // Create a comparison buffer 370 fbl::AllocChecker ac; 371 fbl::unique_ptr<uint8_t[]> out(new (&ac) uint8_t[len]); 372 ASSERT_TRUE(ac.check()); 373 memset(out.get(), 0, len); 374 375 // Read from the block device 376 block_fifo_request_t request; 377 request.group = group(); 378 request.vmoid = vbuf->vmoid_; 379 request.opcode = BLOCKIO_READ; 380 ASSERT_EQ(len % info_.block_size, 0); 381 ASSERT_EQ(buf_off % info_.block_size, 0); 382 ASSERT_EQ(dev_off % info_.block_size, 0); 383 request.length = static_cast<uint32_t>(len / info_.block_size); 384 request.vmo_offset = buf_off / info_.block_size; 385 request.dev_offset = dev_off / info_.block_size; 386 ASSERT_TRUE(Transaction(&request, 1)); 387 388 // Read from the registered VMO 389 ASSERT_EQ(vbuf->vmo_.read(out.get(), buf_off, len), ZX_OK); 390 391 ASSERT_EQ(memcmp(&vbuf->buf_[buf_off], out.get(), len), 0); 392 END_HELPER; 393} 394 395bool CheckWrite(int fd, size_t off, size_t len, uint8_t* buf) { 396 BEGIN_HELPER; 397 for (size_t i = 0; i < len; i++) { 398 buf[i] = static_cast<uint8_t>(rand()); 399 } 400 ASSERT_EQ(lseek(fd, off, SEEK_SET), static_cast<ssize_t>(off)); 401 ASSERT_EQ(write(fd, buf, len), static_cast<ssize_t>(len)); 402 END_HELPER; 403} 404 405bool CheckRead(int fd, size_t off, size_t len, const uint8_t* in) { 406 BEGIN_HELPER; 407 fbl::AllocChecker ac; 408 fbl::unique_ptr<uint8_t[]> out(new (&ac) uint8_t[len]); 409 ASSERT_TRUE(ac.check()); 410 memset(out.get(), 0, len); 411 ASSERT_EQ(lseek(fd, off, SEEK_SET), static_cast<ssize_t>(off)); 412 ASSERT_EQ(read(fd, out.get(), len), static_cast<ssize_t>(len)); 413 ASSERT_EQ(memcmp(in, out.get(), len), 0); 414 END_HELPER; 415} 416 417bool CheckWriteColor(int fd, size_t off, size_t len, uint8_t color) { 418 BEGIN_HELPER; 419 fbl::AllocChecker ac; 420 fbl::unique_ptr<uint8_t[]> buf(new (&ac) uint8_t[len]); 421 ASSERT_TRUE(ac.check()); 422 memset(buf.get(), color, len); 423 ASSERT_EQ(lseek(fd, off, SEEK_SET), static_cast<ssize_t>(off)); 424 ASSERT_EQ(write(fd, buf.get(), len), static_cast<ssize_t>(len)); 425 END_HELPER; 426} 427 428bool CheckReadColor(int fd, size_t off, size_t len, uint8_t color) { 429 BEGIN_HELPER; 430 fbl::AllocChecker ac; 431 fbl::unique_ptr<uint8_t[]> buf(new (&ac) uint8_t[len]); 432 ASSERT_TRUE(ac.check()); 433 ASSERT_EQ(lseek(fd, off, SEEK_SET), static_cast<ssize_t>(off)); 434 ASSERT_EQ(read(fd, buf.get(), len), static_cast<ssize_t>(len)); 435 for (size_t i = 0; i < len; i++) { 436 ASSERT_EQ(buf[i], color); 437 } 438 END_HELPER; 439} 440 441bool CheckWriteReadBlock(int fd, size_t block, size_t count) { 442 BEGIN_HELPER; 443 block_info_t info; 444 ASSERT_GE(ioctl_block_get_info(fd, &info), 0); 445 size_t len = info.block_size * count; 446 size_t off = info.block_size * block; 447 fbl::AllocChecker ac; 448 fbl::unique_ptr<uint8_t[]> in(new (&ac) uint8_t[len]); 449 ASSERT_TRUE(ac.check()); 450 ASSERT_TRUE(CheckWrite(fd, off, len, in.get())); 451 ASSERT_TRUE(CheckRead(fd, off, len, in.get())); 452 END_HELPER; 453} 454 455bool CheckNoAccessBlock(int fd, size_t block, size_t count) { 456 BEGIN_HELPER; 457 block_info_t info; 458 ASSERT_GE(ioctl_block_get_info(fd, &info), 0); 459 fbl::AllocChecker ac; 460 fbl::unique_ptr<uint8_t[]> buf(new (&ac) uint8_t[info.block_size * count]); 461 ASSERT_TRUE(ac.check()); 462 size_t len = info.block_size * count; 463 size_t off = info.block_size * block; 464 for (size_t i = 0; i < len; i++) 465 buf[i] = static_cast<uint8_t>(rand()); 466 ASSERT_EQ(lseek(fd, off, SEEK_SET), static_cast<ssize_t>(off)); 467 ASSERT_EQ(write(fd, buf.get(), len), -1); 468 ASSERT_EQ(lseek(fd, off, SEEK_SET), static_cast<ssize_t>(off)); 469 ASSERT_EQ(read(fd, buf.get(), len), -1); 470 END_HELPER; 471} 472 473bool CheckDeadBlock(int fd) { 474 BEGIN_HELPER; 475 block_info_t info; 476 ASSERT_LT(ioctl_block_get_info(fd, &info), 0); 477 fbl::AllocChecker ac; 478 constexpr size_t kBlksize = 8192; 479 fbl::unique_ptr<uint8_t[]> buf(new (&ac) uint8_t[kBlksize]); 480 ASSERT_TRUE(ac.check()); 481 ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0); 482 ASSERT_EQ(write(fd, buf.get(), kBlksize), -1); 483 ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0); 484 ASSERT_EQ(read(fd, buf.get(), kBlksize), -1); 485 END_HELPER; 486} 487 488/////////////////////// Actual tests: 489 490// Test initializing the FVM on a partition that is smaller than a slice 491bool TestTooSmall(void) { 492 BEGIN_TEST; 493 494 if (use_real_disk) { 495 fprintf(stderr, "Test is ramdisk-exclusive; ignoring\n"); 496 return true; 497 } 498 499 char ramdisk_path[PATH_MAX]; 500 uint64_t blk_size = 512; 501 uint64_t blk_count = (1 << 15); 502 ASSERT_GE(create_ramdisk(blk_size, blk_count, ramdisk_path), 0); 503 int fd = open(ramdisk_path, O_RDWR); 504 ASSERT_GT(fd, 0); 505 size_t slice_size = blk_size * blk_count; 506 ASSERT_EQ(fvm_init(fd, slice_size), ZX_ERR_NO_SPACE); 507 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 508 END_TEST; 509} 510 511// Test initializing the FVM on a large partition, with metadata size > the max transfer size 512bool TestLarge(void) { 513 BEGIN_TEST; 514 515 if (use_real_disk) { 516 fprintf(stderr, "Test is ramdisk-exclusive; ignoring\n"); 517 return true; 518 } 519 520 char ramdisk_path[PATH_MAX]; 521 char fvm_path[PATH_MAX]; 522 uint64_t blk_size = 512; 523 uint64_t blk_count = 8 * (1 << 20); 524 ASSERT_GE(create_ramdisk(blk_size, blk_count, ramdisk_path), 0); 525 526 fbl::unique_fd fd(open(ramdisk_path, O_RDWR)); 527 ASSERT_GT(fd.get(), 0); 528 size_t slice_size = 16 * (1 << 10); 529 size_t metadata_size = fvm::MetadataSize(blk_size * blk_count, slice_size); 530 531 block_info_t info; 532 ASSERT_GE(ioctl_block_get_info(fd.get(), &info), 0); 533 ASSERT_LT(info.max_transfer_size, metadata_size); 534 535 ASSERT_EQ(fvm_init(fd.get(), slice_size), ZX_OK); 536 537 ASSERT_EQ(ioctl_device_bind(fd.get(), FVM_DRIVER_LIB, STRLEN(FVM_DRIVER_LIB)), 0); 538 fd.reset(); 539 540 snprintf(fvm_path, sizeof(fvm_path), "%s/fvm", ramdisk_path); 541 ASSERT_EQ(wait_for_device(fvm_path, ZX_SEC(3)), ZX_OK); 542 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 543 END_TEST; 544} 545 546// Load and unload an empty FVM 547bool TestEmpty(void) { 548 BEGIN_TEST; 549 char ramdisk_path[PATH_MAX]; 550 char fvm_driver[PATH_MAX]; 551 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 552 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 553 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 554 END_TEST; 555} 556 557// Test allocating a single partition 558bool TestAllocateOne(void) { 559 BEGIN_TEST; 560 char ramdisk_path[PATH_MAX]; 561 char fvm_driver[PATH_MAX]; 562 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 563 564 int fd = open(fvm_driver, O_RDWR); 565 ASSERT_GT(fd, 0); 566 567 // Allocate one VPart 568 alloc_req_t request; 569 memset(&request, 0, sizeof(request)); 570 request.slice_count = 1; 571 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 572 strcpy(request.name, kTestPartName1); 573 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 574 int vp_fd = fvm_allocate_partition(fd, &request); 575 ASSERT_GT(vp_fd, 0); 576 577 // Check that the name matches what we provided 578 char name[FVM_NAME_LEN + 1]; 579 ASSERT_GE(ioctl_block_get_name(vp_fd, name, sizeof(name)), 0); 580 ASSERT_EQ(memcmp(name, kTestPartName1, strlen(kTestPartName1)), 0); 581 582 // Check that we can read from / write to it. 583 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 584 585 // Try accessing the block again after closing / re-opening it. 586 ASSERT_EQ(close(vp_fd), 0); 587 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 588 ASSERT_GT(vp_fd, 0, "Couldn't re-open Data VPart"); 589 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 590 591 ASSERT_EQ(close(vp_fd), 0); 592 ASSERT_EQ(close(fd), 0); 593 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 594 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 595 END_TEST; 596} 597 598// Test allocating a collection of partitions 599bool TestAllocateMany(void) { 600 BEGIN_TEST; 601 char ramdisk_path[PATH_MAX]; 602 char fvm_driver[PATH_MAX]; 603 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 604 605 int fd = open(fvm_driver, O_RDWR); 606 ASSERT_GT(fd, 0); 607 608 // Test allocation of multiple VPartitions 609 alloc_req_t request; 610 memset(&request, 0, sizeof(request)); 611 request.slice_count = 1; 612 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 613 strcpy(request.name, kTestPartName1); 614 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 615 int data_fd = fvm_allocate_partition(fd, &request); 616 ASSERT_GT(data_fd, 0); 617 618 strcpy(request.name, kTestPartName2); 619 memcpy(request.type, kTestPartGUIDBlob, GUID_LEN); 620 int blob_fd = fvm_allocate_partition(fd, &request); 621 ASSERT_GT(blob_fd, 0); 622 623 strcpy(request.name, kTestPartName3); 624 memcpy(request.type, kTestPartGUIDSystem, GUID_LEN); 625 int sys_fd = fvm_allocate_partition(fd, &request); 626 ASSERT_GT(sys_fd, 0); 627 628 ASSERT_TRUE(CheckWriteReadBlock(data_fd, 0, 1)); 629 ASSERT_TRUE(CheckWriteReadBlock(blob_fd, 0, 1)); 630 ASSERT_TRUE(CheckWriteReadBlock(sys_fd, 0, 1)); 631 632 ASSERT_EQ(close(data_fd), 0); 633 ASSERT_EQ(close(blob_fd), 0); 634 ASSERT_EQ(close(sys_fd), 0); 635 636 ASSERT_EQ(close(fd), 0); 637 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 638 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 639 END_TEST; 640} 641 642// Test that the fvm driver can cope with a sudden close during read / write 643// operations. 644bool TestCloseDuringAccess(void) { 645 BEGIN_TEST; 646 char ramdisk_path[PATH_MAX]; 647 char fvm_driver[PATH_MAX]; 648 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 649 650 int fd = open(fvm_driver, O_RDWR); 651 ASSERT_GT(fd, 0); 652 653 alloc_req_t request; 654 memset(&request, 0, sizeof(request)); 655 request.slice_count = 1; 656 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 657 strcpy(request.name, kTestPartName1); 658 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 659 int vp_fd = fvm_allocate_partition(fd, &request); 660 ASSERT_GT(vp_fd, 0); 661 662 auto bg_thread = [](void* arg) { 663 int vp_fd = *reinterpret_cast<int*>(arg); 664 while (true) { 665 uint8_t in[8192]; 666 memset(in, 'a', sizeof(in)); 667 if (write(vp_fd, in, sizeof(in)) != static_cast<ssize_t>(sizeof(in))) { 668 return 0; 669 } 670 uint8_t out[8192]; 671 memset(out, 0, sizeof(out)); 672 lseek(vp_fd, 0, SEEK_SET); 673 if (read(vp_fd, out, sizeof(out)) != static_cast<ssize_t>(sizeof(out))) { 674 return 0; 675 } 676 // If we DID manage to read it, then the data should be valid... 677 if (memcmp(in, out, sizeof(in)) != 0) { 678 return -1; 679 } 680 } 681 }; 682 683 // Launch a background thread to read from / write to the VPartition 684 thrd_t thread; 685 ASSERT_EQ(thrd_create(&thread, bg_thread, &vp_fd), thrd_success); 686 // Let the background thread warm up a little bit... 687 usleep(10000); 688 // ... and close the fd from underneath it! 689 // 690 // Yes, this is a little unsafe (we risk the bg thread accessing an 691 // unallocated fd), but no one else in this test process should be adding 692 // fds, so we won't risk anyone reusing "vp_fd" within this test case. 693 ASSERT_EQ(close(vp_fd), 0); 694 695 int res; 696 ASSERT_EQ(thrd_join(thread, &res), thrd_success); 697 ASSERT_EQ(res, 0, "Background thread failed"); 698 699 ASSERT_EQ(close(fd), 0); 700 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 701 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 702 END_TEST; 703} 704 705// Test that the fvm driver can cope with a sudden release during read / write 706// operations. 707bool TestReleaseDuringAccess(void) { 708 BEGIN_TEST; 709 710 if (use_real_disk) { 711 fprintf(stderr, "Test is ramdisk-exclusive; ignoring\n"); 712 return true; 713 } 714 715 char ramdisk_path[PATH_MAX]; 716 char fvm_driver[PATH_MAX]; 717 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 718 719 int fd = open(fvm_driver, O_RDWR); 720 ASSERT_GT(fd, 0); 721 722 alloc_req_t request; 723 memset(&request, 0, sizeof(request)); 724 request.slice_count = 1; 725 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 726 strcpy(request.name, kTestPartName1); 727 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 728 int vp_fd = fvm_allocate_partition(fd, &request); 729 ASSERT_GT(vp_fd, 0); 730 731 auto bg_thread = [](void* arg) { 732 int vp_fd = *reinterpret_cast<int*>(arg); 733 while (true) { 734 uint8_t in[8192]; 735 memset(in, 'a', sizeof(in)); 736 if (write(vp_fd, in, sizeof(in)) != static_cast<ssize_t>(sizeof(in))) { 737 return 0; 738 } 739 uint8_t out[8192]; 740 memset(out, 0, sizeof(out)); 741 lseek(vp_fd, 0, SEEK_SET); 742 if (read(vp_fd, out, sizeof(out)) != static_cast<ssize_t>(sizeof(out))) { 743 return 0; 744 } 745 // If we DID manage to read it, then the data should be valid... 746 if (memcmp(in, out, sizeof(in)) != 0) { 747 return -1; 748 } 749 } 750 }; 751 752 // Launch a background thread to read from / write to the VPartition 753 thrd_t thread; 754 ASSERT_EQ(thrd_create(&thread, bg_thread, &vp_fd), thrd_success); 755 // Let the background thread warm up a little bit... 756 usleep(10000); 757 // ... and close the entire ramdisk from underneath it! 758 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 759 760 int res; 761 ASSERT_EQ(thrd_join(thread, &res), thrd_success); 762 ASSERT_EQ(res, 0, "Background thread failed"); 763 764 close(vp_fd); 765 close(fd); 766 END_TEST; 767} 768 769bool TestDestroyDuringAccess(void) { 770 BEGIN_TEST; 771 char ramdisk_path[PATH_MAX]; 772 char fvm_driver[PATH_MAX]; 773 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, 774 "error mounting FVM"); 775 776 int fd = open(fvm_driver, O_RDWR); 777 ASSERT_GT(fd, 0); 778 779 alloc_req_t request; 780 request.slice_count = 1; 781 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 782 strcpy(request.name, kTestPartName1); 783 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 784 int vp_fd = fvm_allocate_partition(fd, &request); 785 ASSERT_GT(vp_fd, 0); 786 787 auto bg_thread = [](void* arg) { 788 int vp_fd = *reinterpret_cast<int*>(arg); 789 unsigned count = 0; 790 while (true) { 791 if (++count % 10000 == 0) { 792 printf("Run %u\n", count); 793 } 794 uint8_t in[8192]; 795 memset(in, 'a', sizeof(in)); 796 if (write(vp_fd, in, sizeof(in)) != static_cast<ssize_t>(sizeof(in))) { 797 return 0; 798 } 799 uint8_t out[8192]; 800 memset(out, 0, sizeof(out)); 801 lseek(vp_fd, 0, SEEK_SET); 802 if (read(vp_fd, out, sizeof(out)) != static_cast<ssize_t>(sizeof(out))) { 803 return 0; 804 } 805 // If we DID manage to read it, then the data should be valid... 806 if (memcmp(in, out, sizeof(in)) != 0) { 807 return -1; 808 } 809 } 810 }; 811 812 // Launch a background thread to read from / write to the VPartition 813 thrd_t thread; 814 ASSERT_EQ(thrd_create(&thread, bg_thread, &vp_fd), thrd_success); 815 // Let the background thread warm up a little bit... 816 usleep(10000); 817 // ... and destroy the vpartition 818 ASSERT_EQ(ioctl_block_fvm_destroy_partition(vp_fd), 0); 819 820 int res; 821 ASSERT_EQ(thrd_join(thread, &res), thrd_success); 822 ASSERT_EQ(res, 0, "Background thread failed"); 823 824 close(vp_fd); 825 close(fd); 826 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 827 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 828 END_TEST; 829} 830 831// Test allocating additional slices to a vpartition. 832bool TestVPartitionExtend(void) { 833 BEGIN_TEST; 834 char ramdisk_path[PATH_MAX]; 835 char fvm_driver[PATH_MAX]; 836 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 837 const size_t kDiskSize = use_real_disk ? test_block_size * test_block_count : 512 * (1 << 20); 838 839 int fd = open(fvm_driver, O_RDWR); 840 ASSERT_GT(fd, 0); 841 fvm_info_t fvm_info; 842 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 843 size_t slice_size = fvm_info.slice_size; 844 size_t slices_total = fvm::UsableSlicesCount(kDiskSize, slice_size); 845 size_t slices_left = slices_total; 846 847 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 848 849 // Allocate one VPart 850 alloc_req_t request; 851 memset(&request, 0, sizeof(request)); 852 size_t slice_count = 1; 853 request.slice_count = slice_count; 854 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 855 strcpy(request.name, kTestPartName1); 856 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 857 int vp_fd = fvm_allocate_partition(fd, &request); 858 ASSERT_GT(vp_fd, 0); 859 slices_left--; 860 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 861 862 // Confirm that the disk reports the correct number of slices 863 block_info_t info; 864 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 865 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 866 867 extend_request_t erequest; 868 869 // Try re-allocating an already allocated vslice 870 erequest.offset = 0; 871 erequest.length = 1; 872 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 873 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 874 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 875 876 // Try again with a portion of the request which is unallocated 877 erequest.length = 2; 878 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 879 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 880 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 881 882 // Allocate OBSCENELY too many slices 883 erequest.offset = slice_count; 884 erequest.length = fbl::numeric_limits<size_t>::max(); 885 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 886 887 // Allocate slices at a too-large offset 888 erequest.offset = fbl::numeric_limits<size_t>::max(); 889 erequest.length = 1; 890 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 891 892 // Attempt to allocate slightly too many slices 893 erequest.offset = slice_count; 894 erequest.length = slices_left + 1; 895 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 896 897 // The number of free slices should be unchanged. 898 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 899 900 // Allocate exactly the remaining number of slices 901 erequest.offset = slice_count; 902 erequest.length = slices_left; 903 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 904 slice_count += slices_left; 905 slices_left = 0; 906 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 907 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 908 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 909 910 // We can't allocate any more to this VPartition 911 erequest.offset = slice_count; 912 erequest.length = 1; 913 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 914 915 // We can't allocate a new VPartition 916 strcpy(request.name, kTestPartName2); 917 memcpy(request.type, kTestPartGUIDBlob, GUID_LEN); 918 ASSERT_LT(ioctl_block_fvm_alloc_partition(fd, &request), 0, "Couldn't allocate VPart"); 919 920 ASSERT_EQ(close(vp_fd), 0); 921 ASSERT_EQ(close(fd), 0); 922 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 923 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 924 END_TEST; 925} 926 927// Test allocating very sparse VPartition 928bool TestVPartitionExtendSparse(void) { 929 BEGIN_TEST; 930 char ramdisk_path[PATH_MAX]; 931 char fvm_driver[PATH_MAX]; 932 uint64_t blk_size = use_real_disk ? test_block_size : 512; 933 uint64_t blk_count = use_real_disk ? test_block_size : 1 << 20; 934 uint64_t slice_size = 16 * blk_size; 935 ASSERT_EQ(StartFVMTest(blk_size, blk_count, slice_size, ramdisk_path, 936 fvm_driver), 0, "error mounting FVM"); 937 938 size_t slices_left = fvm::UsableSlicesCount(blk_size * blk_count, slice_size); 939 int fd = open(fvm_driver, O_RDWR); 940 ASSERT_GT(fd, 0); 941 942 alloc_req_t request; 943 memset(&request, 0, sizeof(request)); 944 request.slice_count = 1; 945 slices_left--; 946 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 947 strcpy(request.name, kTestPartName1); 948 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 949 int vp_fd = fvm_allocate_partition(fd, &request); 950 ASSERT_GT(vp_fd, 0); 951 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 952 953 // Double check that we can access a block at this vslice address 954 // (this isn't always possible; for certain slice sizes, blocks may be 955 // allocatable / freeable, but not addressable). 956 size_t bno = (VSLICE_MAX - 1) * (slice_size / blk_size); 957 ASSERT_EQ(bno / (slice_size / blk_size), (VSLICE_MAX - 1), "bno overflowed"); 958 ASSERT_EQ((bno * blk_size) / blk_size, bno, "block access will overflow"); 959 960 extend_request_t erequest; 961 962 // Try allocating at a location that's slightly too large 963 erequest.offset = VSLICE_MAX; 964 erequest.length = 1; 965 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 966 967 // Try allocating at the largest offset 968 erequest.offset = VSLICE_MAX - 1; 969 erequest.length = 1; 970 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 971 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, bno, 1)); 972 973 // Try freeing beyond largest offset 974 erequest.offset = VSLICE_MAX; 975 erequest.length = 1; 976 ASSERT_LT(ioctl_block_fvm_shrink(vp_fd, &erequest), 0, "Expected request failure"); 977 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, bno, 1)); 978 979 // Try freeing at the largest offset 980 erequest.offset = VSLICE_MAX - 1; 981 erequest.length = 1; 982 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &erequest), 0); 983 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, bno, 1)); 984 985 ASSERT_EQ(close(vp_fd), 0); 986 ASSERT_EQ(close(fd), 0); 987 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, slice_size)); 988 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 989 END_TEST; 990} 991 992// Test removing slices from a VPartition. 993bool TestVPartitionShrink(void) { 994 BEGIN_TEST; 995 char ramdisk_path[PATH_MAX]; 996 char fvm_driver[PATH_MAX]; 997 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 998 const size_t kDiskSize = use_real_disk ? test_block_size * test_block_count : 512 * (1 << 20); 999 1000 int fd = open(fvm_driver, O_RDWR); 1001 ASSERT_GT(fd, 0); 1002 fvm_info_t fvm_info; 1003 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1004 size_t slice_size = fvm_info.slice_size; 1005 size_t slices_total = fvm::UsableSlicesCount(kDiskSize, slice_size); 1006 size_t slices_left = slices_total; 1007 1008 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 1009 1010 // Allocate one VPart 1011 alloc_req_t request; 1012 memset(&request, 0, sizeof(request)); 1013 size_t slice_count = 1; 1014 request.slice_count = slice_count; 1015 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1016 strcpy(request.name, kTestPartName1); 1017 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1018 int vp_fd = fvm_allocate_partition(fd, &request); 1019 ASSERT_GT(vp_fd, 0); 1020 slices_left--; 1021 1022 // Confirm that the disk reports the correct number of slices 1023 block_info_t info; 1024 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1025 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 1026 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, (slice_size / info.block_size) - 1, 1)); 1027 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, (slice_size / info.block_size) - 1, 2)); 1028 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 1029 1030 extend_request_t erequest; 1031 1032 // Try shrinking the 0th vslice 1033 erequest.offset = 0; 1034 erequest.length = 1; 1035 ASSERT_LT(ioctl_block_fvm_shrink(vp_fd, &erequest), 0, "Expected request failure (0th offset)"); 1036 1037 // Try no-op requests 1038 erequest.offset = 1; 1039 erequest.length = 0; 1040 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Zero Length request should be no-op"); 1041 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &erequest), 0, "Zero Length request should be no-op"); 1042 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1043 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 1044 1045 // Try again with a portion of the request which is unallocated 1046 erequest.length = 2; 1047 ASSERT_LT(ioctl_block_fvm_shrink(vp_fd, &erequest), 0, "Expected request failure"); 1048 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1049 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 1050 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 1051 1052 // Allocate exactly the remaining number of slices 1053 erequest.offset = slice_count; 1054 erequest.length = slices_left; 1055 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 1056 slice_count += slices_left; 1057 slices_left = 0; 1058 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1059 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 1060 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, (slice_size / info.block_size) - 1, 1)); 1061 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, (slice_size / info.block_size) - 1, 2)); 1062 ASSERT_TRUE(FVMCheckAllocatedCount(fd, slices_total - slices_left, slices_total)); 1063 1064 // We can't allocate any more to this VPartition 1065 erequest.offset = slice_count; 1066 erequest.length = 1; 1067 ASSERT_LT(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Expected request failure"); 1068 1069 // Try to shrink off the end (okay, since SOME of the slices are allocated) 1070 erequest.offset = 1; 1071 erequest.length = slice_count + 3; 1072 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &erequest), 0); 1073 ASSERT_TRUE(FVMCheckAllocatedCount(fd, 1, slices_total)); 1074 1075 // The same request to shrink should now fail (NONE of the slices are 1076 // allocated) 1077 erequest.offset = 1; 1078 erequest.length = slice_count - 1; 1079 ASSERT_LT(ioctl_block_fvm_shrink(vp_fd, &erequest), 0, "Expected request failure"); 1080 ASSERT_TRUE(FVMCheckAllocatedCount(fd, 1, slices_total)); 1081 1082 // ... unless we re-allocate and try again. 1083 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 1084 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &erequest), 0); 1085 1086 ASSERT_EQ(close(vp_fd), 0); 1087 ASSERT_EQ(close(fd), 0); 1088 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 1089 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1090 END_TEST; 1091} 1092 1093// Test splitting a contiguous slice extent into multiple parts 1094bool TestVPartitionSplit(void) { 1095 BEGIN_TEST; 1096 char ramdisk_path[PATH_MAX]; 1097 char fvm_driver[PATH_MAX]; 1098 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 1099 size_t disk_size = 512 * (1 << 20); 1100 1101 int fd = open(fvm_driver, O_RDWR); 1102 ASSERT_GT(fd, 0); 1103 fvm_info_t fvm_info; 1104 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1105 size_t slice_size = fvm_info.slice_size; 1106 size_t slices_left = fvm::UsableSlicesCount(disk_size, slice_size); 1107 1108 // Allocate one VPart 1109 alloc_req_t request; 1110 memset(&request, 0, sizeof(request)); 1111 size_t slice_count = 5; 1112 request.slice_count = slice_count; 1113 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1114 strcpy(request.name, kTestPartName1); 1115 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1116 int vp_fd = fvm_allocate_partition(fd, &request); 1117 ASSERT_GT(vp_fd, 0); 1118 slices_left--; 1119 1120 // Confirm that the disk reports the correct number of slices 1121 block_info_t info; 1122 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1123 ASSERT_EQ(info.block_count * info.block_size, slice_size * slice_count); 1124 1125 extend_request_t reset_erequest; 1126 reset_erequest.offset = 1; 1127 reset_erequest.length = slice_count - 1; 1128 extend_request_t mid_erequest; 1129 mid_erequest.offset = 2; 1130 mid_erequest.length = 1; 1131 extend_request_t start_erequest; 1132 start_erequest.offset = 1; 1133 start_erequest.length = 1; 1134 extend_request_t end_erequest; 1135 end_erequest.offset = 3; 1136 end_erequest.length = slice_count - 3; 1137 1138 1139 auto verifyExtents = [=](bool start, bool mid, bool end) { 1140 size_t start_block = start_erequest.offset * (slice_size / info.block_size); 1141 size_t mid_block = mid_erequest.offset * (slice_size / info.block_size); 1142 size_t end_block = end_erequest.offset * (slice_size / info.block_size); 1143 if (start) { 1144 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, start_block, 1)); 1145 } else { 1146 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, start_block, 1)); 1147 } 1148 if (mid) { 1149 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, mid_block, 1)); 1150 } else { 1151 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, mid_block, 1)); 1152 } 1153 if (end) { 1154 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, end_block, 1)); 1155 } else { 1156 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, end_block, 1)); 1157 } 1158 return true; 1159 }; 1160 1161 // We should be able to split the extent. 1162 ASSERT_TRUE(verifyExtents(true, true, true)); 1163 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &mid_erequest), 0); 1164 ASSERT_TRUE(verifyExtents(true, false, true)); 1165 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &start_erequest), 0); 1166 ASSERT_TRUE(verifyExtents(false, false, true)); 1167 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &end_erequest), 0); 1168 ASSERT_TRUE(verifyExtents(false, false, false)); 1169 1170 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &reset_erequest), 0); 1171 1172 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &start_erequest), 0); 1173 ASSERT_TRUE(verifyExtents(false, true, true)); 1174 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &mid_erequest), 0); 1175 ASSERT_TRUE(verifyExtents(false, false, true)); 1176 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &end_erequest), 0); 1177 ASSERT_TRUE(verifyExtents(false, false, false)); 1178 1179 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &reset_erequest), 0); 1180 1181 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &end_erequest), 0); 1182 ASSERT_TRUE(verifyExtents(true, true, false)); 1183 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &mid_erequest), 0); 1184 ASSERT_TRUE(verifyExtents(true, false, false)); 1185 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &start_erequest), 0); 1186 ASSERT_TRUE(verifyExtents(false, false, false)); 1187 1188 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &reset_erequest), 0); 1189 1190 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &end_erequest), 0); 1191 ASSERT_TRUE(verifyExtents(true, true, false)); 1192 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &start_erequest), 0); 1193 ASSERT_TRUE(verifyExtents(false, true, false)); 1194 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &mid_erequest), 0); 1195 ASSERT_TRUE(verifyExtents(false, false, false)); 1196 1197 // We should also be able to combine extents 1198 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &mid_erequest), 0); 1199 ASSERT_TRUE(verifyExtents(false, true, false)); 1200 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &start_erequest), 0); 1201 ASSERT_TRUE(verifyExtents(true, true, false)); 1202 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &end_erequest), 0); 1203 ASSERT_TRUE(verifyExtents(true, true, true)); 1204 1205 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &reset_erequest), 0); 1206 1207 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &end_erequest), 0); 1208 ASSERT_TRUE(verifyExtents(false, false, true)); 1209 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &mid_erequest), 0); 1210 ASSERT_TRUE(verifyExtents(false, true, true)); 1211 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &start_erequest), 0); 1212 ASSERT_TRUE(verifyExtents(true, true, true)); 1213 1214 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &reset_erequest), 0); 1215 1216 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &end_erequest), 0); 1217 ASSERT_TRUE(verifyExtents(false, false, true)); 1218 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &start_erequest), 0); 1219 ASSERT_TRUE(verifyExtents(true, false, true)); 1220 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &mid_erequest), 0); 1221 ASSERT_TRUE(verifyExtents(true, true, true)); 1222 1223 ASSERT_EQ(close(vp_fd), 0); 1224 ASSERT_EQ(close(fd), 0); 1225 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 1226 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1227 END_TEST; 1228} 1229 1230// Test removing VPartitions within an FVM 1231bool TestVPartitionDestroy(void) { 1232 BEGIN_TEST; 1233 char ramdisk_path[PATH_MAX]; 1234 char fvm_driver[PATH_MAX]; 1235 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 1236 1237 int fd = open(fvm_driver, O_RDWR); 1238 ASSERT_GT(fd, 0); 1239 1240 // Test allocation of multiple VPartitions 1241 alloc_req_t request; 1242 memset(&request, 0, sizeof(request)); 1243 request.slice_count = 1; 1244 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1245 strcpy(request.name, kTestPartName1); 1246 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1247 int data_fd = fvm_allocate_partition(fd, &request); 1248 ASSERT_GT(data_fd, 0); 1249 strcpy(request.name, kTestPartName2); 1250 memcpy(request.type, kTestPartGUIDBlob, GUID_LEN); 1251 int blob_fd = fvm_allocate_partition(fd, &request); 1252 ASSERT_GT(blob_fd, 0); 1253 strcpy(request.name, kTestPartName3); 1254 memcpy(request.type, kTestPartGUIDSystem, GUID_LEN); 1255 int sys_fd = fvm_allocate_partition(fd, &request); 1256 ASSERT_GT(sys_fd, 0); 1257 1258 // We can access all three... 1259 ASSERT_TRUE(CheckWriteReadBlock(data_fd, 0, 1)); 1260 ASSERT_TRUE(CheckWriteReadBlock(blob_fd, 0, 1)); 1261 ASSERT_TRUE(CheckWriteReadBlock(sys_fd, 0, 1)); 1262 1263 // But not after we destroy the blob partition. 1264 ASSERT_EQ(ioctl_block_fvm_destroy_partition(blob_fd), 0); 1265 ASSERT_TRUE(CheckWriteReadBlock(data_fd, 0, 1)); 1266 ASSERT_TRUE(CheckDeadBlock(blob_fd)); 1267 ASSERT_TRUE(CheckWriteReadBlock(sys_fd, 0, 1)); 1268 1269 // We also can't re-destroy the blob partition. 1270 ASSERT_LT(ioctl_block_fvm_destroy_partition(blob_fd), 0); 1271 1272 // We also can't allocate slices to the destroyed blob partition. 1273 extend_request_t erequest; 1274 erequest.offset = 1; 1275 erequest.length = 1; 1276 ASSERT_LT(ioctl_block_fvm_extend(blob_fd, &erequest), 0); 1277 1278 // Destroy the other two VPartitions. 1279 ASSERT_EQ(ioctl_block_fvm_destroy_partition(data_fd), 0); 1280 ASSERT_TRUE(CheckDeadBlock(data_fd)); 1281 ASSERT_TRUE(CheckDeadBlock(blob_fd)); 1282 ASSERT_TRUE(CheckWriteReadBlock(sys_fd, 0, 1)); 1283 1284 ASSERT_EQ(ioctl_block_fvm_destroy_partition(sys_fd), 0); 1285 ASSERT_TRUE(CheckDeadBlock(data_fd)); 1286 ASSERT_TRUE(CheckDeadBlock(blob_fd)); 1287 ASSERT_TRUE(CheckDeadBlock(sys_fd)); 1288 1289 ASSERT_EQ(close(data_fd), 0); 1290 ASSERT_EQ(close(blob_fd), 0); 1291 ASSERT_EQ(close(sys_fd), 0); 1292 ASSERT_EQ(close(fd), 0); 1293 1294 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 1295 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1296 END_TEST; 1297} 1298 1299bool TestVPartitionQuery(void) { 1300 BEGIN_TEST; 1301 char ramdisk_path[PATH_MAX]; 1302 char fvm_driver[PATH_MAX]; 1303 size_t slice_count = 64; 1304 size_t block_count = 512; 1305 size_t block_size = 1 << 20; 1306 size_t slice_size = (block_count * block_size) / slice_count; 1307 ASSERT_EQ(StartFVMTest(block_count, block_size, slice_size, ramdisk_path, fvm_driver), 1308 0, "error mounting FVM"); 1309 int fd = open(fvm_driver, O_RDWR); 1310 ASSERT_GT(fd, 0); 1311 1312 // Allocate partition 1313 alloc_req_t request; 1314 memset(&request, 0, sizeof(request)); 1315 request.slice_count = 10; 1316 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1317 strcpy(request.name, kTestPartName1); 1318 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1319 int part_fd = fvm_allocate_partition(fd, &request); 1320 ASSERT_GT(part_fd, 0); 1321 1322 // Create non-contiguous extent 1323 extend_request_t extend_request; 1324 extend_request.offset = 20; 1325 extend_request.length = 10; 1326 ASSERT_EQ(ioctl_block_fvm_extend(part_fd, &extend_request), 0); 1327 1328 fvm_info_t fvm_info; 1329 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1330 1331 // Query various vslice ranges 1332 query_request_t query_request; 1333 query_request.count = 6; 1334 query_request.vslice_start[0] = 0; 1335 query_request.vslice_start[1] = 10; 1336 query_request.vslice_start[2] = 20; 1337 query_request.vslice_start[3] = 50; 1338 query_request.vslice_start[4] = 25; 1339 query_request.vslice_start[5] = 15; 1340 1341 // Check response from partition query 1342 query_response_t query_response; 1343 ASSERT_EQ(ioctl_block_fvm_vslice_query(part_fd, &query_request, &query_response), 1344 sizeof(query_response_t)); 1345 ASSERT_EQ(query_response.count, query_request.count); 1346 ASSERT_TRUE(query_response.vslice_range[0].allocated); 1347 ASSERT_EQ(query_response.vslice_range[0].count, 10); 1348 ASSERT_FALSE(query_response.vslice_range[1].allocated); 1349 ASSERT_EQ(query_response.vslice_range[1].count, 10); 1350 ASSERT_TRUE(query_response.vslice_range[2].allocated); 1351 ASSERT_EQ(query_response.vslice_range[2].count, 10); 1352 ASSERT_FALSE(query_response.vslice_range[3].allocated); 1353 ASSERT_EQ(query_response.vslice_range[3].count, fvm_info.vslice_count - 50); 1354 ASSERT_TRUE(query_response.vslice_range[4].allocated); 1355 ASSERT_EQ(query_response.vslice_range[4].count, 5); 1356 ASSERT_FALSE(query_response.vslice_range[5].allocated); 1357 ASSERT_EQ(query_response.vslice_range[5].count, 5); 1358 1359 // Merge the extents! 1360 extend_request.offset = 10; 1361 extend_request.length = 10; 1362 ASSERT_EQ(ioctl_block_fvm_extend(part_fd, &extend_request), 0); 1363 1364 // Check partition query response again after extend 1365 ASSERT_EQ(ioctl_block_fvm_vslice_query(part_fd, &query_request, &query_response), 1366 sizeof(query_response_t)); 1367 ASSERT_EQ(query_response.count, query_request.count); 1368 ASSERT_TRUE(query_response.vslice_range[0].allocated); 1369 ASSERT_EQ(query_response.vslice_range[0].count, 30); 1370 ASSERT_TRUE(query_response.vslice_range[1].allocated); 1371 ASSERT_EQ(query_response.vslice_range[1].count, 20); 1372 ASSERT_TRUE(query_response.vslice_range[2].allocated); 1373 ASSERT_EQ(query_response.vslice_range[2].count, 10); 1374 ASSERT_FALSE(query_response.vslice_range[3].allocated); 1375 ASSERT_EQ(query_response.vslice_range[3].count, fvm_info.vslice_count - 50); 1376 ASSERT_TRUE(query_response.vslice_range[4].allocated); 1377 ASSERT_EQ(query_response.vslice_range[4].count, 5); 1378 ASSERT_TRUE(query_response.vslice_range[5].allocated); 1379 ASSERT_EQ(query_response.vslice_range[5].count, 15); 1380 1381 query_request.vslice_start[0] = fvm_info.vslice_count + 1; 1382 ASSERT_EQ(ioctl_block_fvm_vslice_query(part_fd, &query_request, &query_response), 1383 ZX_ERR_OUT_OF_RANGE); 1384 1385 // Check that request count is valid 1386 query_request.count = MAX_FVM_VSLICE_REQUESTS + 1; 1387 ASSERT_EQ(ioctl_block_fvm_vslice_query(part_fd, &query_request, &query_response), 1388 ZX_ERR_BUFFER_TOO_SMALL); 1389 1390 ASSERT_EQ(close(part_fd), 0); 1391 ASSERT_EQ(close(fd), 0); 1392 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, slice_size)); 1393 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1394 END_TEST; 1395} 1396 1397// Test allocating and accessing slices which are allocated contiguously. 1398bool TestSliceAccessContiguous(void) { 1399 BEGIN_TEST; 1400 char ramdisk_path[PATH_MAX]; 1401 char fvm_driver[PATH_MAX]; 1402 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 1403 1404 int fd = open(fvm_driver, O_RDWR); 1405 ASSERT_GT(fd, 0); 1406 fvm_info_t fvm_info; 1407 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1408 size_t slice_size = fvm_info.slice_size; 1409 1410 // Allocate one VPart 1411 alloc_req_t request; 1412 memset(&request, 0, sizeof(request)); 1413 request.slice_count = 1; 1414 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1415 strcpy(request.name, kTestPartName1); 1416 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1417 int vp_fd = fvm_allocate_partition(fd, &request); 1418 ASSERT_GT(vp_fd, 0); 1419 block_info_t info; 1420 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1421 1422 // This is the last 'accessible' block. 1423 size_t last_block = (slice_size / info.block_size) - 1; 1424 1425 { 1426 fbl::RefPtr<VmoClient> vc; 1427 ASSERT_TRUE(VmoClient::Create(vp_fd, &vc)); 1428 fbl::unique_ptr<VmoBuf> vb; 1429 ASSERT_TRUE(VmoBuf::Create(vc, info.block_size * 2, &vb)); 1430 ASSERT_TRUE(vc->CheckWrite(vb.get(), 0, info.block_size * last_block, info.block_size)); 1431 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, info.block_size * last_block, info.block_size)); 1432 1433 // Try writing out of bounds -- check that we don't have access. 1434 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, (slice_size / info.block_size) - 1, 2)); 1435 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, slice_size / info.block_size, 1)); 1436 1437 // Attempt to access the next contiguous slice 1438 extend_request_t erequest; 1439 erequest.offset = 1; 1440 erequest.length = 1; 1441 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Couldn't extend VPartition"); 1442 1443 // Now we can access the next slice... 1444 ASSERT_TRUE(vc->CheckWrite(vb.get(), info.block_size, 1445 info.block_size * (last_block + 1), info.block_size)); 1446 ASSERT_TRUE(vc->CheckRead(vb.get(), info.block_size, 1447 info.block_size * (last_block + 1), info.block_size)); 1448 // ... We can still access the previous slice... 1449 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, info.block_size * last_block, 1450 info.block_size)); 1451 // ... And we can cross slices 1452 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, info.block_size * last_block, 1453 info.block_size * 2)); 1454 } 1455 1456 ASSERT_EQ(close(vp_fd), 0); 1457 ASSERT_EQ(close(fd), 0); 1458 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 1459 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1460 END_TEST; 1461} 1462 1463// Test allocating and accessing multiple (3+) slices at once. 1464bool TestSliceAccessMany(void) { 1465 BEGIN_TEST; 1466 char ramdisk_path[PATH_MAX]; 1467 char fvm_driver[PATH_MAX]; 1468 // The size of a slice must be carefully constructed for this test 1469 // so that we can hold multiple slices in memory without worrying 1470 // about hitting resource limits. 1471 const size_t kBlockSize = use_real_disk ? test_block_size : 512; 1472 const size_t kBlocksPerSlice = 256; 1473 const size_t kSliceSize = kBlocksPerSlice * kBlockSize; 1474 ASSERT_EQ(StartFVMTest(kBlockSize, (1 << 20), kSliceSize, ramdisk_path, fvm_driver), 0, "error mounting FVM"); 1475 1476 int fd = open(fvm_driver, O_RDWR); 1477 ASSERT_GT(fd, 0); 1478 fvm_info_t fvm_info; 1479 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1480 ASSERT_EQ(fvm_info.slice_size, kSliceSize); 1481 1482 // Allocate one VPart 1483 alloc_req_t request; 1484 memset(&request, 0, sizeof(request)); 1485 request.slice_count = 1; 1486 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1487 strcpy(request.name, kTestPartName1); 1488 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1489 int vp_fd = fvm_allocate_partition(fd, &request); 1490 ASSERT_GT(vp_fd, 0); 1491 block_info_t info; 1492 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1493 ASSERT_EQ(info.block_size, kBlockSize); 1494 1495 { 1496 fbl::RefPtr<VmoClient> vc; 1497 ASSERT_TRUE(VmoClient::Create(vp_fd, &vc)); 1498 fbl::unique_ptr<VmoBuf> vb; 1499 ASSERT_TRUE(VmoBuf::Create(vc, kSliceSize * 3, &vb)); 1500 1501 // Access the first slice 1502 ASSERT_TRUE(vc->CheckWrite(vb.get(), 0, 0, kSliceSize)); 1503 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, 0, kSliceSize)); 1504 1505 // Try writing out of bounds -- check that we don't have access. 1506 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, kBlocksPerSlice - 1, 2)); 1507 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, kBlocksPerSlice, 1)); 1508 1509 // Attempt to access the next contiguous slices 1510 extend_request_t erequest; 1511 erequest.offset = 1; 1512 erequest.length = 2; 1513 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Couldn't extend VPartition"); 1514 1515 // Now we can access the next slices... 1516 ASSERT_TRUE(vc->CheckWrite(vb.get(), kSliceSize, kSliceSize, 2 * kSliceSize)); 1517 ASSERT_TRUE(vc->CheckRead(vb.get(), kSliceSize, kSliceSize, 2 * kSliceSize)); 1518 // ... We can still access the previous slice... 1519 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, 0, kSliceSize)); 1520 // ... And we can cross slices for reading. 1521 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, 0, 3 * kSliceSize)); 1522 1523 // Also, we can cross slices for writing. 1524 ASSERT_TRUE(vc->CheckWrite(vb.get(), 0, 0, 3 * kSliceSize)); 1525 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, 0, 3 * kSliceSize)); 1526 1527 // Additionally, we can access "parts" of slices in a multi-slice 1528 // operation. Here, read one block into the first slice, and read 1529 // up to the last block in the final slice. 1530 ASSERT_TRUE(vc->CheckWrite(vb.get(), 0, kBlockSize, 3 * kSliceSize - 2 * kBlockSize)); 1531 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, kBlockSize, 3 * kSliceSize - 2 * kBlockSize)); 1532 } 1533 1534 ASSERT_EQ(close(vp_fd), 0); 1535 ASSERT_EQ(close(fd), 0); 1536 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, kSliceSize)); 1537 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1538 END_TEST; 1539} 1540 1541// Test allocating and accessing slices which are allocated 1542// virtually contiguously (they appear sequential to the client) but are 1543// actually noncontiguous on the FVM partition. 1544bool TestSliceAccessNonContiguousPhysical(void) { 1545 BEGIN_TEST; 1546 char ramdisk_path[PATH_MAX]; 1547 char fvm_driver[PATH_MAX]; 1548 1549 // This takes 130sec on a fast desktop, target x86 non-kvm qemu. 1550 // On the bots for arm it times out after 200sec. 1551 // For now just disable the timeout. An alternative is to make it 1552 // a large test, but then it won't get run for CQ/CI. 1553 unittest_cancel_timeout(); 1554 1555 ASSERT_EQ(StartFVMTest(512, 1 << 20, 8lu * (1 << 20), ramdisk_path, fvm_driver), 0, 1556 "error mounting FVM"); 1557 const size_t kDiskSize = use_real_disk ? test_block_size * test_block_count : 512 * (1 << 20); 1558 1559 int fd = open(fvm_driver, O_RDWR); 1560 ASSERT_GT(fd, 0); 1561 fvm_info_t fvm_info; 1562 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1563 size_t slice_size = fvm_info.slice_size; 1564 1565 alloc_req_t request; 1566 memset(&request, 0, sizeof(request)); 1567 request.slice_count = 1; 1568 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1569 1570 constexpr size_t kNumVParts = 3; 1571 typedef struct vdata { 1572 int fd; 1573 uint8_t guid[GUID_LEN]; 1574 char name[32]; 1575 size_t slices_used; 1576 } vdata_t; 1577 1578 vdata_t vparts[kNumVParts] = { 1579 {0, GUID_TEST_DATA_VALUE, "data", request.slice_count}, 1580 {0, GUID_TEST_BLOB_VALUE, "blob", request.slice_count}, 1581 {0, GUID_TEST_SYS_VALUE, "sys", request.slice_count}, 1582 }; 1583 1584 for (size_t i = 0; i < fbl::count_of(vparts); i++) { 1585 strcpy(request.name, vparts[i].name); 1586 memcpy(request.type, vparts[i].guid, GUID_LEN); 1587 vparts[i].fd = fvm_allocate_partition(fd, &request); 1588 ASSERT_GT(vparts[i].fd, 0); 1589 } 1590 1591 block_info_t info; 1592 ASSERT_GE(ioctl_block_get_info(vparts[0].fd, &info), 0); 1593 1594 size_t usable_slices_per_vpart = fvm::UsableSlicesCount(kDiskSize, slice_size) / kNumVParts; 1595 size_t i = 0; 1596 while (vparts[i].slices_used < usable_slices_per_vpart) { 1597 int vfd = vparts[i].fd; 1598 // This is the last 'accessible' block. 1599 size_t last_block = (vparts[i].slices_used * (slice_size / info.block_size)) - 1; 1600 fbl::RefPtr<VmoClient> vc; 1601 ASSERT_TRUE(VmoClient::Create(vfd, &vc)); 1602 fbl::unique_ptr<VmoBuf> vb; 1603 ASSERT_TRUE(VmoBuf::Create(vc, info.block_size * 2, &vb)); 1604 1605 ASSERT_TRUE(vc->CheckWrite(vb.get(), 0, info.block_size * last_block, info.block_size)); 1606 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, info.block_size * last_block, info.block_size)); 1607 1608 // Try writing out of bounds -- check that we don't have access. 1609 ASSERT_TRUE(CheckNoAccessBlock(vfd, last_block, 2)); 1610 ASSERT_TRUE(CheckNoAccessBlock(vfd, last_block + 1, 1)); 1611 1612 // Attempt to access the next contiguous slice 1613 extend_request_t erequest; 1614 erequest.offset = vparts[i].slices_used; 1615 erequest.length = 1; 1616 ASSERT_EQ(ioctl_block_fvm_extend(vfd, &erequest), 0, "Couldn't extend VPartition"); 1617 1618 // Now we can access the next slice... 1619 ASSERT_TRUE(vc->CheckWrite(vb.get(), info.block_size, info.block_size * 1620 (last_block + 1), info.block_size)); 1621 ASSERT_TRUE(vc->CheckRead(vb.get(), info.block_size, info.block_size * 1622 (last_block + 1), info.block_size)); 1623 // ... We can still access the previous slice... 1624 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, info.block_size * last_block, 1625 info.block_size)); 1626 // ... And we can cross slices 1627 ASSERT_TRUE(vc->CheckRead(vb.get(), 0, info.block_size * last_block, 1628 info.block_size * 2)); 1629 1630 vparts[i].slices_used++; 1631 i = (i + 1) % kNumVParts; 1632 } 1633 1634 for (size_t i = 0; i < kNumVParts; i++) { 1635 printf("Testing multi-slice operations on vslice %lu\n", i); 1636 1637 // We need at least five slices, so we can access three "middle" 1638 // slices and jitter to test off-by-one errors. 1639 ASSERT_GE(vparts[i].slices_used, 5); 1640 1641 { 1642 fbl::RefPtr<VmoClient> vc; 1643 ASSERT_TRUE(VmoClient::Create(vparts[i].fd, &vc)); 1644 fbl::unique_ptr<VmoBuf> vb; 1645 ASSERT_TRUE(VmoBuf::Create(vc, slice_size * 4, &vb)); 1646 1647 // Try accessing 3 noncontiguous slices at once, with the 1648 // addition of "off by one block". 1649 size_t dev_off_start = slice_size - info.block_size; 1650 size_t dev_off_end = slice_size + info.block_size; 1651 size_t len_start = slice_size * 3 - info.block_size; 1652 size_t len_end = slice_size * 3 + info.block_size; 1653 1654 // Test a variety of: 1655 // Starting device offsets, 1656 size_t bsz = info.block_size; 1657 for (size_t dev_off = dev_off_start; dev_off <= dev_off_end; dev_off += bsz) { 1658 printf(" Testing non-contiguous write/read starting at offset: %zu\n", dev_off); 1659 // Operation lengths, 1660 for (size_t len = len_start; len <= len_end; len += bsz) { 1661 printf(" Testing operation of length: %zu\n", len); 1662 // and starting VMO offsets 1663 for (size_t vmo_off = 0; vmo_off < 3 * bsz; vmo_off += bsz) { 1664 // Try writing & reading the entire section (multiple 1665 // slices) at once. 1666 ASSERT_TRUE(vc->CheckWrite(vb.get(), vmo_off, dev_off, len)); 1667 ASSERT_TRUE(vc->CheckRead(vb.get(), vmo_off, dev_off, len)); 1668 1669 // Try reading the section one slice at a time. 1670 // The results should be the same. 1671 size_t sub_off = 0; 1672 size_t sub_len = slice_size - (dev_off % slice_size); 1673 while (sub_off < len) { 1674 ASSERT_TRUE(vc->CheckRead(vb.get(), vmo_off + sub_off, 1675 dev_off + sub_off, sub_len)); 1676 sub_off += sub_len; 1677 sub_len = fbl::min(slice_size, len - sub_off); 1678 } 1679 } 1680 } 1681 } 1682 } 1683 ASSERT_EQ(close(vparts[i].fd), 0); 1684 } 1685 1686 ASSERT_EQ(close(fd), 0); 1687 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, slice_size)); 1688 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1689 END_TEST; 1690} 1691 1692// Test allocating and accessing slices which are 1693// allocated noncontiguously from the client's perspective. 1694bool TestSliceAccessNonContiguousVirtual(void) { 1695 BEGIN_TEST; 1696 char ramdisk_path[PATH_MAX]; 1697 char fvm_driver[PATH_MAX]; 1698 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 1699 const size_t kDiskSize = 512 * (1 << 20); 1700 1701 int fd = open(fvm_driver, O_RDWR); 1702 ASSERT_GT(fd, 0); 1703 fvm_info_t fvm_info; 1704 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1705 size_t slice_size = fvm_info.slice_size; 1706 1707 alloc_req_t request; 1708 memset(&request, 0, sizeof(request)); 1709 request.slice_count = 1; 1710 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1711 1712 constexpr size_t kNumVParts = 3; 1713 typedef struct vdata { 1714 int fd; 1715 uint8_t guid[GUID_LEN]; 1716 char name[32]; 1717 size_t slices_used; 1718 size_t last_slice; 1719 } vdata_t; 1720 1721 vdata_t vparts[kNumVParts] = { 1722 {0, GUID_TEST_DATA_VALUE, "data", request.slice_count, request.slice_count}, 1723 {0, GUID_TEST_BLOB_VALUE, "blob", request.slice_count, request.slice_count}, 1724 {0, GUID_TEST_SYS_VALUE, "sys", request.slice_count, request.slice_count}, 1725 }; 1726 1727 for (size_t i = 0; i < fbl::count_of(vparts); i++) { 1728 strcpy(request.name, vparts[i].name); 1729 memcpy(request.type, vparts[i].guid, GUID_LEN); 1730 vparts[i].fd = fvm_allocate_partition(fd, &request); 1731 ASSERT_GT(vparts[i].fd, 0); 1732 } 1733 1734 block_info_t info; 1735 ASSERT_GE(ioctl_block_get_info(vparts[0].fd, &info), 0); 1736 1737 size_t usable_slices_per_vpart = fvm::UsableSlicesCount(kDiskSize, slice_size) / kNumVParts; 1738 size_t i = 0; 1739 while (vparts[i].slices_used < usable_slices_per_vpart) { 1740 int vfd = vparts[i].fd; 1741 // This is the last 'accessible' block. 1742 size_t last_block = (vparts[i].last_slice * (slice_size / info.block_size)) - 1; 1743 ASSERT_TRUE(CheckWriteReadBlock(vfd, last_block, 1)); 1744 1745 // Try writing out of bounds -- check that we don't have access. 1746 ASSERT_TRUE(CheckNoAccessBlock(vfd, last_block, 2)); 1747 ASSERT_TRUE(CheckNoAccessBlock(vfd, last_block + 1, 1)); 1748 1749 // Attempt to access a non-contiguous slice 1750 extend_request_t erequest; 1751 erequest.offset = vparts[i].last_slice + 2; 1752 erequest.length = 1; 1753 ASSERT_EQ(ioctl_block_fvm_extend(vfd, &erequest), 0, "Couldn't extend VPartition"); 1754 1755 // We still don't have access to the next slice... 1756 ASSERT_TRUE(CheckNoAccessBlock(vfd, last_block, 2)); 1757 ASSERT_TRUE(CheckNoAccessBlock(vfd, last_block + 1, 1)); 1758 1759 // But we have access to the slice we asked for! 1760 size_t requested_block = (erequest.offset * slice_size) / info.block_size; 1761 ASSERT_TRUE(CheckWriteReadBlock(vfd, requested_block, 1)); 1762 1763 vparts[i].slices_used++; 1764 vparts[i].last_slice = erequest.offset; 1765 i = (i + 1) % kNumVParts; 1766 } 1767 1768 for (size_t i = 0; i < kNumVParts; i++) { 1769 ASSERT_EQ(close(vparts[i].fd), 0); 1770 } 1771 1772 ASSERT_EQ(close(fd), 0); 1773 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver,slice_size)); 1774 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1775 END_TEST; 1776} 1777 1778// Test that the FVM driver actually persists updates. 1779bool TestPersistenceSimple(void) { 1780 BEGIN_TEST; 1781 char ramdisk_path[PATH_MAX]; 1782 char fvm_driver[PATH_MAX]; 1783 constexpr uint64_t kBlkSize = 512; 1784 constexpr uint64_t kBlkCount = 1 << 20; 1785 constexpr uint64_t kSliceSize = 64 * (1 << 20); 1786 ASSERT_EQ(StartFVMTest(kBlkSize, kBlkCount, kSliceSize, ramdisk_path, 1787 fvm_driver), 0, "error mounting FVM"); 1788 1789 constexpr uint64_t kDiskSize = kBlkSize * kBlkCount; 1790 size_t slices_left = fvm::UsableSlicesCount(kDiskSize, kSliceSize); 1791 const uint64_t kSliceCount = slices_left; 1792 1793 int fd = open(fvm_driver, O_RDWR); 1794 ASSERT_GT(fd, 0); 1795 fvm_info_t fvm_info; 1796 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1797 size_t slice_size = fvm_info.slice_size; 1798 1799 // Allocate one VPart 1800 alloc_req_t request; 1801 memset(&request, 0, sizeof(request)); 1802 request.slice_count = 1; 1803 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 1804 strcpy(request.name, kTestPartName1); 1805 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 1806 int vp_fd = fvm_allocate_partition(fd, &request); 1807 ASSERT_GT(vp_fd, 0); 1808 slices_left--; 1809 1810 // Check that the name matches what we provided 1811 char name[FVM_NAME_LEN + 1]; 1812 ASSERT_GE(ioctl_block_get_name(vp_fd, name, sizeof(name)), 0); 1813 ASSERT_EQ(memcmp(name, kTestPartName1, strlen(kTestPartName1)), 0); 1814 block_info_t info; 1815 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1816 fbl::AllocChecker ac; 1817 fbl::unique_ptr<uint8_t[]> buf(new (&ac) uint8_t[info.block_size * 2]); 1818 ASSERT_TRUE(ac.check()); 1819 1820 // Check that we can read from / write to it 1821 ASSERT_TRUE(CheckWrite(vp_fd, 0, info.block_size, buf.get())); 1822 ASSERT_TRUE(CheckRead(vp_fd, 0, info.block_size, buf.get())); 1823 ASSERT_EQ(close(vp_fd), 0); 1824 1825 // Check that it still exists after rebinding the driver 1826 const partition_entry_t entries[] = { 1827 {kTestPartName1, 1}, 1828 }; 1829 fd = FVMRebind(fd, ramdisk_path, entries, 1); 1830 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 1831 1832 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 1833 ASSERT_GT(vp_fd, 0, "Couldn't re-open Data VPart"); 1834 ASSERT_TRUE(CheckRead(vp_fd, 0, info.block_size, buf.get())); 1835 1836 // Try extending the vpartition, and checking that the extension persists. 1837 // This is the last 'accessible' block. 1838 size_t last_block = (slice_size / info.block_size) - 1; 1839 ASSERT_TRUE(CheckWrite(vp_fd, info.block_size * last_block, info.block_size, &buf[0])); 1840 ASSERT_TRUE(CheckRead(vp_fd, info.block_size * last_block, info.block_size, &buf[0])); 1841 1842 // Try writing out of bounds -- check that we don't have access. 1843 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, (slice_size / info.block_size) - 1, 2)); 1844 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, slice_size / info.block_size, 1)); 1845 extend_request_t erequest; 1846 erequest.offset = 1; 1847 erequest.length = 1; 1848 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Couldn't extend VPartition"); 1849 slices_left--; 1850 1851 // Rebind the FVM driver, check the extension has succeeded. 1852 fd = FVMRebind(fd, ramdisk_path, entries, 1); 1853 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 1854 1855 // Now we can access the next slice... 1856 ASSERT_TRUE(CheckWrite(vp_fd, info.block_size * (last_block + 1), 1857 info.block_size, &buf[info.block_size])); 1858 ASSERT_TRUE(CheckRead(vp_fd, info.block_size * (last_block + 1), 1859 info.block_size, &buf[info.block_size])); 1860 // ... We can still access the previous slice... 1861 ASSERT_TRUE(CheckRead(vp_fd, info.block_size * last_block, 1862 info.block_size, &buf[0])); 1863 // ... And we can cross slices 1864 ASSERT_TRUE(CheckRead(vp_fd, info.block_size * last_block, 1865 info.block_size * 2, &buf[0])); 1866 1867 // Try allocating the rest of the slices, rebinding, and ensuring 1868 // that the size stays updated. 1869 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1870 ASSERT_EQ(info.block_count * info.block_size, kSliceSize * 2); 1871 erequest.offset = 2; 1872 erequest.length = slices_left; 1873 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0, "Couldn't extend VPartition"); 1874 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1875 ASSERT_EQ(info.block_count * info.block_size, kSliceSize * kSliceCount); 1876 1877 ASSERT_EQ(close(vp_fd), 0); 1878 fd = FVMRebind(fd, ramdisk_path, entries, 1); 1879 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 1880 1881 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 1882 ASSERT_GT(vp_fd, 0, "Couldn't re-open Data VPart"); 1883 1884 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 1885 ASSERT_EQ(info.block_count * info.block_size, kSliceSize * kSliceCount); 1886 1887 ASSERT_EQ(close(vp_fd), 0); 1888 ASSERT_EQ(close(fd), 0); 1889 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 1890 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 1891 END_TEST; 1892} 1893 1894bool CorruptMountHelper(const char* partition_path, disk_format_t disk_format, 1895 const query_request_t& query_request) { 1896 BEGIN_HELPER; 1897 1898 // Format the VPart as |disk_format|. 1899 ASSERT_EQ(mkfs(partition_path, disk_format, launch_stdio_sync, 1900 &default_mkfs_options), 1901 ZX_OK); 1902 1903 int vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 1904 ASSERT_GT(vp_fd, 0); 1905 1906 // Check initial slice allocation. 1907 query_response_t query_response; 1908 ASSERT_EQ(ioctl_block_fvm_vslice_query(vp_fd, &query_request, &query_response), 1909 sizeof(query_response_t)); 1910 ASSERT_EQ(query_request.count, query_response.count); 1911 1912 for (unsigned i = 0; i < query_request.count; i++) { 1913 ASSERT_TRUE(query_response.vslice_range[i].allocated); 1914 ASSERT_EQ(query_response.vslice_range[i].count, 1); 1915 } 1916 1917 // Manually shrink slices so FVM will differ from the partition. 1918 extend_request_t extend_request; 1919 extend_request.length = 1; 1920 extend_request.offset = query_request.vslice_start[0]; 1921 ASSERT_EQ(ioctl_block_fvm_shrink(vp_fd, &extend_request), 0); 1922 1923 // Check slice allocation after manual grow/shrink 1924 ASSERT_EQ(ioctl_block_fvm_vslice_query(vp_fd, &query_request, &query_response), 1925 sizeof(query_response_t)); 1926 ASSERT_FALSE(query_response.vslice_range[0].allocated); 1927 ASSERT_EQ(query_response.vslice_range[0].count, 1928 query_request.vslice_start[1] - query_request.vslice_start[0]); 1929 1930 // Try to mount the VPart. 1931 ASSERT_NE(mount(vp_fd, kMountPath, disk_format, &default_mount_options, 1932 launch_stdio_async), ZX_OK); 1933 1934 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 1935 ASSERT_GT(vp_fd, 0); 1936 1937 // Grow back the slice we shrunk earlier. 1938 extend_request.length = 1; 1939 extend_request.offset = query_request.vslice_start[0]; 1940 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &extend_request), 0); 1941 1942 // Verify grow was successful. 1943 ASSERT_EQ(ioctl_block_fvm_vslice_query(vp_fd, &query_request, &query_response), 1944 sizeof(query_response_t)); 1945 ASSERT_EQ(query_request.count, query_response.count); 1946 ASSERT_TRUE(query_response.vslice_range[0].allocated); 1947 ASSERT_EQ(query_response.vslice_range[0].count, 1); 1948 1949 // Now extend all extents by some number of additional slices. 1950 for (unsigned i = 0; i < query_request.count; i++) { 1951 extend_request_t extend_request; 1952 extend_request.length = query_request.count - i; 1953 extend_request.offset = query_request.vslice_start[i] + 1; 1954 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &extend_request), 0); 1955 } 1956 1957 // Verify that the extensions were successful. 1958 ASSERT_EQ(ioctl_block_fvm_vslice_query(vp_fd, &query_request, &query_response), 1959 sizeof(query_response_t)); 1960 ASSERT_EQ(query_request.count, query_response.count); 1961 for (unsigned i = 0; i < query_request.count; i++) { 1962 ASSERT_TRUE(query_response.vslice_range[i].allocated); 1963 ASSERT_EQ(query_response.vslice_range[i].count, 1 + query_request.count - i); 1964 } 1965 1966 // Try mount again. 1967 ASSERT_EQ(mount(vp_fd, kMountPath, disk_format, &default_mount_options, 1968 launch_stdio_async), ZX_OK); 1969 ASSERT_EQ(umount(kMountPath), ZX_OK); 1970 1971 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 1972 ASSERT_GT(vp_fd, 0); 1973 1974 // Verify that slices were fixed on mount. 1975 ASSERT_EQ(ioctl_block_fvm_vslice_query(vp_fd, &query_request, &query_response), 1976 sizeof(query_response_t)); 1977 ASSERT_EQ(query_request.count, query_response.count); 1978 1979 for (unsigned i = 0; i < query_request.count; i++) { 1980 ASSERT_TRUE(query_response.vslice_range[i].allocated); 1981 ASSERT_EQ(query_response.vslice_range[i].count, 1); 1982 } 1983 1984 END_HELPER; 1985} 1986 1987bool TestCorruptMount(void) { 1988 BEGIN_TEST; 1989 char ramdisk_path[PATH_MAX]; 1990 char fvm_driver[PATH_MAX]; 1991 size_t slice_size = 1 << 20; 1992 ASSERT_EQ(StartFVMTest(512, 1 << 20, slice_size, ramdisk_path, fvm_driver), 0, 1993 "error mounting FVM"); 1994 1995 int fd = open(fvm_driver, O_RDWR); 1996 ASSERT_GT(fd, 0); 1997 fvm_info_t fvm_info; 1998 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 1999 ASSERT_EQ(slice_size, fvm_info.slice_size); 2000 2001 // Allocate one VPart 2002 alloc_req_t request; 2003 memset(&request, 0, sizeof(request)); 2004 request.slice_count = 1; 2005 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2006 strcpy(request.name, kTestPartName1); 2007 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2008 int vp_fd = fvm_allocate_partition(fd, &request); 2009 ASSERT_GT(vp_fd, 0); 2010 ASSERT_EQ(close(vp_fd), 0); 2011 2012 ASSERT_EQ(mkdir(kMountPath, 0666), 0); 2013 2014 char partition_path[PATH_MAX]; 2015 snprintf(partition_path, sizeof(partition_path), "%s/%s-p-1/block", 2016 fvm_driver, kTestPartName1); 2017 2018 size_t kMinfsBlocksPerSlice = slice_size / minfs::kMinfsBlockSize; 2019 query_request_t query_request; 2020 query_request.count = 4; 2021 query_request.vslice_start[0] = minfs::kFVMBlockInodeBmStart / kMinfsBlocksPerSlice; 2022 query_request.vslice_start[1] = minfs::kFVMBlockDataBmStart / kMinfsBlocksPerSlice; 2023 query_request.vslice_start[2] = minfs::kFVMBlockInodeStart / kMinfsBlocksPerSlice; 2024 query_request.vslice_start[3] = minfs::kFVMBlockDataStart / kMinfsBlocksPerSlice; 2025 2026 // Run the test for Minfs. 2027 ASSERT_TRUE(CorruptMountHelper(partition_path, DISK_FORMAT_MINFS, query_request)); 2028 2029 size_t kBlobfsBlocksPerSlice = slice_size / blobfs::kBlobfsBlockSize; 2030 query_request.count = 3; 2031 query_request.vslice_start[0] = blobfs::kFVMBlockMapStart / kBlobfsBlocksPerSlice; 2032 query_request.vslice_start[1] = blobfs::kFVMNodeMapStart / kBlobfsBlocksPerSlice; 2033 query_request.vslice_start[2] = blobfs::kFVMDataStart / kBlobfsBlocksPerSlice; 2034 2035 // Run the test for Blobfs. 2036 ASSERT_TRUE(CorruptMountHelper(partition_path, DISK_FORMAT_BLOBFS, query_request)); 2037 2038 // Clean up 2039 ASSERT_EQ(rmdir(kMountPath), 0); 2040 ASSERT_EQ(close(fd), 0); 2041 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2042 END_TEST; 2043} 2044 2045bool TestVPartitionUpgrade(void) { 2046 BEGIN_TEST; 2047 char ramdisk_path[PATH_MAX]; 2048 char fvm_driver[PATH_MAX]; 2049 constexpr uint64_t kBlkSize = 512; 2050 constexpr uint64_t kBlkCount = 1 << 20; 2051 constexpr uint64_t kSliceSize = 64 * (1 << 20); 2052 ASSERT_EQ(StartFVMTest(kBlkSize, kBlkCount, kSliceSize, ramdisk_path, 2053 fvm_driver), 0, "error mounting FVM"); 2054 2055 int fd = open(fvm_driver, O_RDWR); 2056 ASSERT_GT(fd, 0); 2057 2058 // Short-hand for asking if we can open a partition. 2059 auto openable = [](const uint8_t* instanceGUID, const uint8_t* typeGUID) { 2060 int fd = open_partition(instanceGUID, typeGUID, 0, nullptr); 2061 if (fd < 0) { 2062 return false; 2063 } 2064 ASSERT_EQ(close(fd), 0); 2065 return true; 2066 }; 2067 2068 // Allocate two VParts, one active, and one inactive. 2069 alloc_req_t request; 2070 memset(&request, 0, sizeof(request)); 2071 request.flags = fvm::kVPartFlagInactive; 2072 request.slice_count = 1; 2073 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2074 strcpy(request.name, kTestPartName1); 2075 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2076 int vp_fd = fvm_allocate_partition(fd, &request); 2077 ASSERT_GT(vp_fd, 0); 2078 ASSERT_EQ(close(vp_fd), 0); 2079 2080 request.flags = 0; 2081 memcpy(request.guid, kTestUniqueGUID2, GUID_LEN); 2082 strcpy(request.name, kTestPartName2); 2083 vp_fd = fvm_allocate_partition(fd, &request); 2084 ASSERT_GT(vp_fd, 0); 2085 ASSERT_EQ(close(vp_fd), 0); 2086 2087 const partition_entry_t entries[] = { 2088 {kTestPartName2, 2}, 2089 }; 2090 fd = FVMRebind(fd, ramdisk_path, entries, 1); 2091 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2092 2093 // We shouldn't be able to re-open the inactive partition... 2094 ASSERT_FALSE(openable(kTestUniqueGUID, kTestPartGUIDData)); 2095 // ... but we SHOULD be able to re-open the active partition. 2096 ASSERT_TRUE(openable(kTestUniqueGUID2, kTestPartGUIDData)); 2097 2098 // Try to upgrade the partition (from GUID2 --> GUID) 2099 request.flags = fvm::kVPartFlagInactive; 2100 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2101 strcpy(request.name, kTestPartName1); 2102 int new_fd = fvm_allocate_partition(fd, &request); 2103 ASSERT_GT(new_fd, 0); 2104 ASSERT_EQ(close(new_fd), 0); 2105 2106 upgrade_req_t upgrade; 2107 memcpy(upgrade.old_guid, kTestUniqueGUID2, GUID_LEN); 2108 memcpy(upgrade.new_guid, kTestUniqueGUID, GUID_LEN); 2109 ASSERT_EQ(ioctl_block_fvm_upgrade(fd, &upgrade), ZX_OK); 2110 2111 // After upgrading, we should be able to open both partitions 2112 ASSERT_TRUE(openable(kTestUniqueGUID, kTestPartGUIDData)); 2113 ASSERT_TRUE(openable(kTestUniqueGUID2, kTestPartGUIDData)); 2114 2115 // Rebind the FVM driver, check the upgrade has succeeded. 2116 // The original (GUID2) should be deleted, and the new partition (GUID) 2117 // should exist. 2118 const partition_entry_t upgraded_entries[] = { 2119 {kTestPartName1, 1}, 2120 }; 2121 fd = FVMRebind(fd, ramdisk_path, upgraded_entries, 1); 2122 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2123 2124 ASSERT_TRUE(openable(kTestUniqueGUID, kTestPartGUIDData)); 2125 ASSERT_FALSE(openable(kTestUniqueGUID2, kTestPartGUIDData)); 2126 2127 // Try upgrading when the "new" version doesn't exist. 2128 // (It should return an error and have no noticable effect). 2129 memcpy(upgrade.old_guid, kTestUniqueGUID, GUID_LEN); 2130 memcpy(upgrade.new_guid, kTestUniqueGUID2, GUID_LEN); 2131 ASSERT_EQ(ioctl_block_fvm_upgrade(fd, &upgrade), ZX_ERR_NOT_FOUND); 2132 2133 fd = FVMRebind(fd, ramdisk_path, upgraded_entries, 1); 2134 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2135 2136 ASSERT_TRUE(openable(kTestUniqueGUID, kTestPartGUIDData)); 2137 ASSERT_FALSE(openable(kTestUniqueGUID2, kTestPartGUIDData)); 2138 2139 // Try upgrading when the "old" version doesn't exist. 2140 request.flags = fvm::kVPartFlagInactive; 2141 memcpy(request.guid, kTestUniqueGUID2, GUID_LEN); 2142 strcpy(request.name, kTestPartName2); 2143 new_fd = fvm_allocate_partition(fd, &request); 2144 ASSERT_GT(new_fd, 0); 2145 ASSERT_EQ(close(new_fd), 0); 2146 2147 char fake_guid[GUID_LEN]; 2148 memset(fake_guid, 0, GUID_LEN); 2149 memcpy(upgrade.old_guid, fake_guid, GUID_LEN); 2150 memcpy(upgrade.new_guid, kTestUniqueGUID2, GUID_LEN); 2151 ASSERT_EQ(ioctl_block_fvm_upgrade(fd, &upgrade), ZX_OK); 2152 2153 const partition_entry_t upgraded_entries_both[] = { 2154 {kTestPartName1, 1}, 2155 {kTestPartName2, 2}, 2156 }; 2157 fd = FVMRebind(fd, ramdisk_path, upgraded_entries_both, 2); 2158 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2159 2160 // We should be able to open both partitions again. 2161 ASSERT_TRUE(openable(kTestUniqueGUID, kTestPartGUIDData)); 2162 ASSERT_TRUE(openable(kTestUniqueGUID2, kTestPartGUIDData)); 2163 2164 // Destroy and reallocate the first partition as inactive. 2165 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 2166 ASSERT_GT(vp_fd, 0); 2167 ASSERT_EQ(ioctl_block_fvm_destroy_partition(vp_fd), 0); 2168 ASSERT_EQ(close(vp_fd), 0); 2169 request.flags = fvm::kVPartFlagInactive; 2170 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2171 strcpy(request.name, kTestPartName1); 2172 new_fd = fvm_allocate_partition(fd, &request); 2173 ASSERT_GT(new_fd, 0); 2174 ASSERT_EQ(close(new_fd), 0); 2175 2176 // Upgrade the partition with old_guid == new_guid. 2177 // This should activate the partition. 2178 memcpy(upgrade.old_guid, kTestUniqueGUID, GUID_LEN); 2179 memcpy(upgrade.new_guid, kTestUniqueGUID, GUID_LEN); 2180 ASSERT_EQ(ioctl_block_fvm_upgrade(fd, &upgrade), ZX_OK); 2181 2182 fd = FVMRebind(fd, ramdisk_path, upgraded_entries_both, 2); 2183 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2184 2185 // We should be able to open both partitions again. 2186 ASSERT_TRUE(openable(kTestUniqueGUID, kTestPartGUIDData)); 2187 ASSERT_TRUE(openable(kTestUniqueGUID2, kTestPartGUIDData)); 2188 2189 ASSERT_EQ(close(fd), 0); 2190 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2191 END_TEST; 2192} 2193 2194// Test that the FVM driver can mount filesystems. 2195bool TestMounting(void) { 2196 BEGIN_TEST; 2197 char ramdisk_path[PATH_MAX]; 2198 char fvm_driver[PATH_MAX]; 2199 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 2200 2201 int fd = open(fvm_driver, O_RDWR); 2202 ASSERT_GT(fd, 0); 2203 fvm_info_t fvm_info; 2204 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 2205 size_t slice_size = fvm_info.slice_size; 2206 2207 // Allocate one VPart 2208 alloc_req_t request; 2209 request.slice_count = 1; 2210 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2211 strcpy(request.name, kTestPartName1); 2212 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2213 int vp_fd = fvm_allocate_partition(fd, &request); 2214 ASSERT_GT(vp_fd, 0); 2215 2216 // Format the VPart as minfs 2217 char partition_path[PATH_MAX]; 2218 snprintf(partition_path, sizeof(partition_path), "%s/%s-p-1/block", 2219 fvm_driver, kTestPartName1); 2220 ASSERT_EQ(mkfs(partition_path, DISK_FORMAT_MINFS, launch_stdio_sync, 2221 &default_mkfs_options), 2222 ZX_OK); 2223 2224 // Mount the VPart 2225 ASSERT_EQ(mkdir(kMountPath, 0666), 0); 2226 ASSERT_EQ(mount(vp_fd, kMountPath, DISK_FORMAT_MINFS, &default_mount_options, 2227 launch_stdio_async), 2228 ZX_OK); 2229 2230 // Verify that the mount was successful. 2231 fbl::unique_fd rootfd(open(kMountPath, O_RDONLY | O_DIRECTORY)); 2232 ASSERT_TRUE(rootfd); 2233 zx_status_t status; 2234 fuchsia_io_FilesystemInfo info; 2235 fzl::FdioCaller caller(fbl::move(rootfd)); 2236 ASSERT_EQ(fuchsia_io_DirectoryAdminQueryFilesystem(caller.borrow_channel(), &status, 2237 &info), ZX_OK); 2238 const char* kFsName = "minfs"; 2239 const char* name = reinterpret_cast<const char*>(info.name); 2240 ASSERT_EQ(strncmp(name, kFsName, strlen(kFsName)), 0, "Unexpected filesystem mounted"); 2241 2242 // Verify that MinFS does not try to use more of the VPartition than 2243 // was originally allocated. 2244 ASSERT_LE(info.total_bytes, slice_size * request.slice_count); 2245 2246 // Clean up. 2247 ASSERT_EQ(umount(kMountPath), ZX_OK); 2248 ASSERT_EQ(rmdir(kMountPath), 0); 2249 ASSERT_EQ(close(fd), 0); 2250 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 2251 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2252 END_TEST; 2253} 2254 2255// Test that FVM-aware filesystem can be reformatted. 2256bool TestMkfs(void) { 2257 BEGIN_TEST; 2258 char ramdisk_path[PATH_MAX]; 2259 char fvm_driver[PATH_MAX]; 2260 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 2261 0, "error mounting FVM"); 2262 2263 int fd = open(fvm_driver, O_RDWR); 2264 ASSERT_GT(fd, 0); 2265 fvm_info_t fvm_info; 2266 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 2267 size_t slice_size = fvm_info.slice_size; 2268 2269 // Allocate one VPart. 2270 alloc_req_t request; 2271 request.slice_count = 1; 2272 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2273 strcpy(request.name, kTestPartName1); 2274 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2275 int vp_fd = fvm_allocate_partition(fd, &request); 2276 ASSERT_GT(vp_fd, 0); 2277 2278 // Format the VPart as minfs. 2279 char partition_path[PATH_MAX]; 2280 snprintf(partition_path, sizeof(partition_path), "%s/%s-p-1/block", 2281 fvm_driver, kTestPartName1); 2282 ASSERT_EQ(mkfs(partition_path, DISK_FORMAT_MINFS, launch_stdio_sync, 2283 &default_mkfs_options), ZX_OK); 2284 2285 // Format it as MinFS again, even though it is already formatted. 2286 ASSERT_EQ(mkfs(partition_path, DISK_FORMAT_MINFS, launch_stdio_sync, 2287 &default_mkfs_options), ZX_OK); 2288 2289 // Now try reformatting as blobfs. 2290 ASSERT_EQ(mkfs(partition_path, DISK_FORMAT_BLOBFS, launch_stdio_sync, 2291 &default_mkfs_options), ZX_OK); 2292 2293 // Demonstrate that mounting as minfs will fail, but mounting as blobfs 2294 // is successful. 2295 ASSERT_EQ(mkdir(kMountPath, 0666), 0); 2296 ASSERT_NE(mount(vp_fd, kMountPath, DISK_FORMAT_MINFS, &default_mount_options, 2297 launch_stdio_sync), ZX_OK); 2298 vp_fd = open(partition_path, O_RDWR); 2299 ASSERT_GE(vp_fd, 0); 2300 ASSERT_EQ(mount(vp_fd, kMountPath, DISK_FORMAT_BLOBFS, &default_mount_options, 2301 launch_stdio_async), ZX_OK); 2302 ASSERT_EQ(umount(kMountPath), ZX_OK); 2303 2304 // ... and reformat back to MinFS again. 2305 ASSERT_EQ(mkfs(partition_path, DISK_FORMAT_MINFS, launch_stdio_sync, 2306 &default_mkfs_options), ZX_OK); 2307 2308 // Mount the VPart. 2309 vp_fd = open(partition_path, O_RDWR); 2310 ASSERT_GE(vp_fd, 0); 2311 ASSERT_EQ(mount(vp_fd, kMountPath, DISK_FORMAT_MINFS, &default_mount_options, 2312 launch_stdio_async), ZX_OK); 2313 2314 // Verify that the mount was successful. 2315 fbl::unique_fd rootfd(open(kMountPath, O_RDONLY | O_DIRECTORY)); 2316 ASSERT_TRUE(rootfd); 2317 zx_status_t status; 2318 fuchsia_io_FilesystemInfo info; 2319 fzl::FdioCaller caller(fbl::move(rootfd)); 2320 ASSERT_EQ(fuchsia_io_DirectoryAdminQueryFilesystem(caller.borrow_channel(), &status, 2321 &info), ZX_OK); 2322 const char* kFsName = "minfs"; 2323 const char* name = reinterpret_cast<const char*>(info.name); 2324 ASSERT_EQ(strncmp(name, kFsName, strlen(kFsName)), 0, "Unexpected filesystem mounted"); 2325 2326 // Verify that MinFS does not try to use more of the VPartition than 2327 // was originally allocated. 2328 ASSERT_LE(info.total_bytes, slice_size * request.slice_count); 2329 2330 // Clean up. 2331 ASSERT_EQ(umount(kMountPath), ZX_OK); 2332 ASSERT_EQ(rmdir(kMountPath), 0); 2333 ASSERT_EQ(close(fd), 0); 2334 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 2335 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2336 END_TEST; 2337} 2338 2339// Test that the FVM can recover when one copy of 2340// metadata becomes corrupt. 2341bool TestCorruptionOk(void) { 2342 BEGIN_TEST; 2343 char ramdisk_path[PATH_MAX]; 2344 char fvm_driver[PATH_MAX]; 2345 2346 size_t kDiskSize = use_real_disk ? test_block_size * test_block_count : 512 * (1 << 20); 2347 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, 2348 "error mounting FVM"); 2349 2350 int ramdisk_fd = open(ramdisk_path, O_RDWR); 2351 ASSERT_GT(ramdisk_fd, 0); 2352 2353 int fd = open(fvm_driver, O_RDWR); 2354 ASSERT_GT(fd, 0); 2355 fvm_info_t fvm_info; 2356 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 2357 size_t slice_size = fvm_info.slice_size; 2358 2359 // Allocate one VPart (writes to backup) 2360 alloc_req_t request; 2361 memset(&request, 0, sizeof(request)); 2362 request.slice_count = 1; 2363 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2364 strcpy(request.name, kTestPartName1); 2365 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2366 int vp_fd = fvm_allocate_partition(fd, &request); 2367 ASSERT_GT(vp_fd, 0); 2368 2369 // Extend the vpart (writes to primary) 2370 extend_request_t erequest; 2371 erequest.offset = 1; 2372 erequest.length = 1; 2373 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 2374 block_info_t info; 2375 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 2376 ASSERT_EQ(info.block_count * info.block_size, slice_size * 2); 2377 2378 // Initial slice access 2379 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 2380 // Extended slice access 2381 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, slice_size / info.block_size, 1)); 2382 2383 ASSERT_EQ(close(vp_fd), 0); 2384 2385 // Corrupt the (backup) metadata and rebind. 2386 // The 'primary' was the last one written, so it'll be used. 2387 off_t off = fvm::BackupStart(kDiskSize, slice_size); 2388 uint8_t buf[FVM_BLOCK_SIZE]; 2389 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2390 ASSERT_EQ(read(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2391 // Modify an arbitrary byte (not the magic bits; we still want it to mount!) 2392 buf[128]++; 2393 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2394 ASSERT_EQ(write(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2395 2396 const partition_entry_t entries[] = { 2397 {kTestPartName1, 1}, 2398 }; 2399 2400 fd = FVMRebind(fd, ramdisk_path, entries, 1); 2401 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2402 2403 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 2404 ASSERT_GT(vp_fd, 0, "Couldn't re-open Data VPart"); 2405 2406 // The slice extension is still accessible. 2407 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 2408 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, slice_size / info.block_size, 1)); 2409 2410 // Clean up 2411 ASSERT_EQ(close(vp_fd), 0); 2412 ASSERT_EQ(close(fd), 0); 2413 ASSERT_EQ(close(ramdisk_fd), 0); 2414 2415 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 2416 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2417 END_TEST; 2418} 2419 2420bool TestCorruptionRegression(void) { 2421 BEGIN_TEST; 2422 char ramdisk_path[PATH_MAX]; 2423 char fvm_driver[PATH_MAX]; 2424 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 2425 int ramdisk_fd = open(ramdisk_path, O_RDWR); 2426 ASSERT_GT(ramdisk_fd, 0); 2427 2428 int fd = open(fvm_driver, O_RDWR); 2429 ASSERT_GT(fd, 0); 2430 fvm_info_t fvm_info; 2431 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 2432 size_t slice_size = fvm_info.slice_size; 2433 2434 // Allocate one VPart (writes to backup) 2435 alloc_req_t request; 2436 memset(&request, 0, sizeof(request)); 2437 request.slice_count = 1; 2438 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2439 strcpy(request.name, kTestPartName1); 2440 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2441 int vp_fd = fvm_allocate_partition(fd, &request); 2442 ASSERT_GT(vp_fd, 0); 2443 2444 // Extend the vpart (writes to primary) 2445 extend_request_t erequest; 2446 erequest.offset = 1; 2447 erequest.length = 1; 2448 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 2449 block_info_t info; 2450 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 2451 ASSERT_EQ(info.block_count * info.block_size, slice_size * 2); 2452 2453 // Initial slice access 2454 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 2455 // Extended slice access 2456 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, slice_size / info.block_size, 1)); 2457 2458 ASSERT_EQ(close(vp_fd), 0); 2459 2460 // Corrupt the (primary) metadata and rebind. 2461 // The 'primary' was the last one written, so the backup will be used. 2462 off_t off = 0; 2463 uint8_t buf[FVM_BLOCK_SIZE]; 2464 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2465 ASSERT_EQ(read(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2466 buf[128]++; 2467 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2468 ASSERT_EQ(write(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2469 2470 const partition_entry_t entries[] = { 2471 {kTestPartName1, 1}, 2472 }; 2473 fd = FVMRebind(fd, ramdisk_path, entries, 1); 2474 ASSERT_GT(fd, 0, "Failed to rebind FVM driver"); 2475 vp_fd = open_partition(kTestUniqueGUID, kTestPartGUIDData, 0, nullptr); 2476 ASSERT_GT(vp_fd, 0); 2477 2478 // The slice extension is no longer accessible 2479 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 2480 ASSERT_TRUE(CheckNoAccessBlock(vp_fd, slice_size / info.block_size, 1)); 2481 2482 // Clean up 2483 ASSERT_EQ(close(vp_fd), 0); 2484 ASSERT_EQ(close(fd), 0); 2485 ASSERT_EQ(close(ramdisk_fd), 0); 2486 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, 64lu * (1 << 20))); 2487 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2488 END_TEST; 2489} 2490 2491bool TestCorruptionUnrecoverable(void) { 2492 BEGIN_TEST; 2493 char ramdisk_path[PATH_MAX]; 2494 char fvm_driver[PATH_MAX]; 2495 ASSERT_EQ(StartFVMTest(512, 1 << 20, 64lu * (1 << 20), ramdisk_path, fvm_driver), 0, "error mounting FVM"); 2496 const size_t kDiskSize = use_real_disk ? test_block_size * test_block_count : 512 * (1 << 20); 2497 int ramdisk_fd = open(ramdisk_path, O_RDWR); 2498 ASSERT_GT(ramdisk_fd, 0); 2499 2500 int fd = open(fvm_driver, O_RDWR); 2501 ASSERT_GT(fd, 0); 2502 fvm_info_t fvm_info; 2503 ASSERT_GT(ioctl_block_fvm_query(fd, &fvm_info), 0); 2504 size_t slice_size = fvm_info.slice_size; 2505 2506 // Allocate one VPart (writes to backup) 2507 alloc_req_t request; 2508 memset(&request, 0, sizeof(request)); 2509 request.slice_count = 1; 2510 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2511 strcpy(request.name, kTestPartName1); 2512 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2513 int vp_fd = fvm_allocate_partition(fd, &request); 2514 ASSERT_GT(vp_fd, 0); 2515 2516 // Extend the vpart (writes to primary) 2517 extend_request_t erequest; 2518 erequest.offset = 1; 2519 erequest.length = 1; 2520 ASSERT_EQ(ioctl_block_fvm_extend(vp_fd, &erequest), 0); 2521 block_info_t info; 2522 ASSERT_GE(ioctl_block_get_info(vp_fd, &info), 0); 2523 ASSERT_EQ(info.block_count * info.block_size, slice_size * 2); 2524 2525 // Initial slice access 2526 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, 0, 1)); 2527 // Extended slice access 2528 ASSERT_TRUE(CheckWriteReadBlock(vp_fd, slice_size / info.block_size, 1)); 2529 2530 ASSERT_EQ(close(vp_fd), 0); 2531 2532 // Corrupt both copies of the metadata. 2533 // The 'primary' was the last one written, so the backup will be used. 2534 off_t off = 0; 2535 uint8_t buf[FVM_BLOCK_SIZE]; 2536 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2537 ASSERT_EQ(read(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2538 buf[128]++; 2539 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2540 ASSERT_EQ(write(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2541 off = fvm::BackupStart(kDiskSize, slice_size); 2542 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2543 ASSERT_EQ(read(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2544 buf[128]++; 2545 ASSERT_EQ(lseek(ramdisk_fd, off, SEEK_SET), off); 2546 ASSERT_EQ(write(ramdisk_fd, buf, sizeof(buf)), sizeof(buf)); 2547 2548 const partition_entry_t entries[] = { 2549 {kTestPartName1, 1}, 2550 }; 2551 ASSERT_LT(FVMRebind(fd, ramdisk_path, entries, 1), 0, "FVM Should have failed to rebind"); 2552 2553 // Clean up 2554 ASSERT_EQ(close(ramdisk_fd), 0); 2555 2556 // FVM is no longer valid - only need to remove if using ramdisk 2557 if (!use_real_disk) { 2558 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2559 } else { 2560 fvm_overwrite(ramdisk_path, slice_size); 2561 } 2562 END_TEST; 2563} 2564 2565typedef struct { 2566 // Both in units of "slice" 2567 size_t start; 2568 size_t len; 2569} fvm_extent_t; 2570 2571typedef struct { 2572 int vp_fd; 2573 fbl::Vector<fvm_extent_t> extents; 2574 thrd_t thr; 2575} fvm_thread_state_t; 2576 2577template <size_t ThreadCount> 2578struct fvm_test_state_t { 2579 size_t block_size; 2580 size_t slice_size; 2581 size_t slices_total; 2582 fvm_thread_state_t thread_states[ThreadCount]; 2583 2584 fbl::Mutex lock; 2585 size_t slices_left TA_GUARDED(lock); 2586}; 2587 2588template <size_t ThreadCount> 2589struct thrd_args_t { 2590 size_t tid; 2591 fvm_test_state_t<ThreadCount>* st; 2592}; 2593 2594template <size_t ThreadCount> 2595int random_access_thread(void* arg) { 2596 auto ta = static_cast<thrd_args_t<ThreadCount>*>(arg); 2597 uint8_t color = static_cast<uint8_t>(ta->tid); 2598 auto st = ta->st; 2599 auto self = &st->thread_states[color]; 2600 2601 unsigned int seed = static_cast<unsigned int>(zx_ticks_get()); 2602 unittest_printf("random_access_thread using seed: %u\n", seed); 2603 2604 // Before we begin, color our first slice. 2605 // We'll identify our own slices by the "color", which 2606 // is distinct between threads. 2607 ASSERT_TRUE(CheckWriteColor(self->vp_fd, 0, st->slice_size, color)); 2608 ASSERT_TRUE(CheckReadColor(self->vp_fd, 0, st->slice_size, color)); 2609 2610 size_t num_ops = 100; 2611 for (size_t i = 0; i < num_ops; ++i) { 2612 switch (rand_r(&seed) % 5) { 2613 case 0: { 2614 // Extend and color slice, if possible 2615 size_t extent_index = rand_r(&seed) % self->extents.size(); 2616 size_t extension_length = 0; 2617 { 2618 fbl::AutoLock al(&st->lock); 2619 if (!st->slices_left) { 2620 continue; 2621 } 2622 extension_length = fbl::min((rand_r(&seed) % st->slices_left) + 1, 5lu); 2623 st->slices_left -= extension_length; 2624 } 2625 extend_request_t erequest; 2626 erequest.offset = self->extents[extent_index].start + self->extents[extent_index].len; 2627 erequest.length = extension_length; 2628 size_t off = erequest.offset * st->slice_size; 2629 size_t len = extension_length * st->slice_size; 2630 ASSERT_TRUE(CheckNoAccessBlock(self->vp_fd, off / st->block_size, 2631 len / st->block_size)); 2632 ASSERT_EQ(ioctl_block_fvm_extend(self->vp_fd, &erequest), 0); 2633 self->extents[extent_index].len += extension_length; 2634 2635 ASSERT_TRUE(CheckWriteColor(self->vp_fd, off, len, color)); 2636 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2637 break; 2638 } 2639 case 1: { 2640 // Allocate a new slice, if possible 2641 fvm_extent_t extent; 2642 // Space out the starting offsets far enough that there 2643 // is no risk of collision between fvm extents 2644 extent.start = (self->extents.end() - 1)->start + st->slices_total; 2645 { 2646 fbl::AutoLock al(&st->lock); 2647 if (!st->slices_left) { 2648 continue; 2649 } 2650 extent.len = fbl::min((rand_r(&seed) % st->slices_left) + 1, 5lu); 2651 st->slices_left -= extent.len; 2652 } 2653 extend_request_t erequest; 2654 erequest.offset = extent.start; 2655 erequest.length = extent.len; 2656 size_t off = erequest.offset * st->slice_size; 2657 size_t len = extent.len * st->slice_size; 2658 ASSERT_TRUE(CheckNoAccessBlock(self->vp_fd, off / st->block_size, 2659 len / st->block_size)); 2660 ASSERT_EQ(ioctl_block_fvm_extend(self->vp_fd, &erequest), 0); 2661 ASSERT_TRUE(CheckWriteColor(self->vp_fd, off, len, color)); 2662 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2663 fbl::AllocChecker ac; 2664 self->extents.push_back(fbl::move(extent), &ac); 2665 ASSERT_TRUE(ac.check()); 2666 break; 2667 } 2668 case 2: { 2669 // Shrink slice, if possible 2670 size_t extent_index = rand_r(&seed) % self->extents.size(); 2671 if (self->extents[extent_index].len == 1) { 2672 continue; 2673 } 2674 size_t shrink_length = (rand_r(&seed) % (self->extents[extent_index].len - 1)) + 1; 2675 2676 extend_request_t erequest; 2677 erequest.offset = self->extents[extent_index].start + 2678 self->extents[extent_index].len - shrink_length; 2679 erequest.length = shrink_length; 2680 size_t off = self->extents[extent_index].start * st->slice_size; 2681 size_t len = self->extents[extent_index].len * st->slice_size; 2682 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2683 ASSERT_EQ(ioctl_block_fvm_shrink(self->vp_fd, &erequest), 0); 2684 self->extents[extent_index].len -= shrink_length; 2685 len = self->extents[extent_index].len * st->slice_size; 2686 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2687 { 2688 fbl::AutoLock al(&st->lock); 2689 st->slices_left += shrink_length; 2690 } 2691 break; 2692 } 2693 case 3: { 2694 // Split slice, if possible 2695 size_t extent_index = rand_r(&seed) % self->extents.size(); 2696 if (self->extents[extent_index].len < 3) { 2697 continue; 2698 } 2699 size_t shrink_length = (rand_r(&seed) % (self->extents[extent_index].len - 2)) + 1; 2700 extend_request_t erequest; 2701 erequest.offset = self->extents[extent_index].start + 1; 2702 erequest.length = shrink_length; 2703 size_t off = self->extents[extent_index].start * st->slice_size; 2704 size_t len = self->extents[extent_index].len * st->slice_size; 2705 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2706 ASSERT_EQ(ioctl_block_fvm_shrink(self->vp_fd, &erequest), 0); 2707 2708 // We can read the slice before... 2709 off = self->extents[extent_index].start * st->slice_size; 2710 len = st->slice_size; 2711 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2712 // ... and the slices after... 2713 off = (self->extents[extent_index].start + 1 + shrink_length) * st->slice_size; 2714 len = (self->extents[extent_index].len - shrink_length - 1) * st->slice_size; 2715 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2716 // ... but not in the middle. 2717 off = (self->extents[extent_index].start + 1) * st->slice_size; 2718 len = (shrink_length) * st->slice_size; 2719 ASSERT_TRUE(CheckNoAccessBlock(self->vp_fd, off / st->block_size, 2720 len / st->block_size)); 2721 2722 // To avoid collisions between test extents, let's remove the 2723 // trailing extent. 2724 erequest.offset = self->extents[extent_index].start + 1 + shrink_length; 2725 erequest.length = self->extents[extent_index].len - shrink_length - 1; 2726 ASSERT_EQ(ioctl_block_fvm_shrink(self->vp_fd, &erequest), 0); 2727 2728 self->extents[extent_index].len = 1; 2729 off = self->extents[extent_index].start * st->slice_size; 2730 len = self->extents[extent_index].len * st->slice_size; 2731 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2732 { 2733 fbl::AutoLock al(&st->lock); 2734 st->slices_left += shrink_length; 2735 } 2736 break; 2737 } 2738 case 4: { 2739 // Deallocate a slice 2740 size_t extent_index = rand_r(&seed) % self->extents.size(); 2741 if (extent_index == 0) { 2742 // We must keep the 0th slice 2743 continue; 2744 } 2745 extend_request_t erequest; 2746 erequest.offset = self->extents[extent_index].start; 2747 erequest.length = self->extents[extent_index].len; 2748 size_t off = self->extents[extent_index].start * st->slice_size; 2749 size_t len = self->extents[extent_index].len * st->slice_size; 2750 ASSERT_TRUE(CheckReadColor(self->vp_fd, off, len, color)); 2751 ASSERT_EQ(ioctl_block_fvm_shrink(self->vp_fd, &erequest), 0); 2752 ASSERT_TRUE(CheckNoAccessBlock(self->vp_fd, off / st->block_size, 2753 len / st->block_size)); 2754 { 2755 fbl::AutoLock al(&st->lock); 2756 st->slices_left += self->extents[extent_index].len; 2757 } 2758 for (size_t i = extent_index; i < self->extents.size() - 1; i++) { 2759 self->extents[i] = fbl::move(self->extents[i + 1]); 2760 } 2761 self->extents.pop_back(); 2762 break; 2763 } 2764 } 2765 } 2766 return 0; 2767} 2768 2769template <size_t ThreadCount, bool Persistence> 2770bool TestRandomOpMultithreaded(void) { 2771 BEGIN_TEST; 2772 char ramdisk_path[PATH_MAX]; 2773 char fvm_driver[PATH_MAX]; 2774 const size_t kBlockSize = use_real_disk ? test_block_size : 512; 2775 const size_t kBlockCount = use_real_disk ? test_block_count : 1 << 20; 2776 const size_t kBlocksPerSlice = 256; 2777 const size_t kSliceSize = kBlocksPerSlice * kBlockSize; 2778 ASSERT_EQ(StartFVMTest(kBlockSize, kBlockCount, kSliceSize, ramdisk_path, 2779 fvm_driver), 2780 0, "error mounting FVM"); 2781 2782 const size_t kDiskSize = kBlockSize * kBlockCount; 2783 const size_t kSlicesCount = fvm::UsableSlicesCount(kDiskSize, kSliceSize); 2784 2785 if (use_real_disk && kSlicesCount <= ThreadCount * 2) { 2786 printf("Not enough slices to distribute between threads: ignoring test\n"); 2787 return true; 2788 } 2789 2790 ASSERT_GT(kSlicesCount, ThreadCount * 2, "Not enough slices to distribute between threads"); 2791 2792 fvm_test_state_t<ThreadCount> s{}; 2793 s.block_size = kBlockSize; 2794 s.slice_size = kSliceSize; 2795 { 2796 fbl::AutoLock al(&s.lock); 2797 s.slices_left = kSlicesCount - ThreadCount; 2798 s.slices_total = kSlicesCount; 2799 } 2800 2801 int fd = open(fvm_driver, O_RDWR); 2802 ASSERT_GT(fd, 0); 2803 2804 alloc_req_t request; 2805 memset(&request, 0, sizeof(request)); 2806 size_t slice_count = 1; 2807 request.slice_count = slice_count; 2808 strcpy(request.name, "TestPartition"); 2809 memcpy(request.type, kTestPartGUIDData, GUID_LEN); 2810 memcpy(request.guid, kTestUniqueGUID, GUID_LEN); 2811 2812 for (size_t i = 0; i < ThreadCount; i++) { 2813 // Change the GUID enough to be distinct for each thread 2814 request.guid[0] = static_cast<uint8_t>(i); 2815 s.thread_states[i].vp_fd = fvm_allocate_partition(fd, &request); 2816 ASSERT_GT(s.thread_states[i].vp_fd, 0); 2817 } 2818 2819 thrd_args_t<ThreadCount> ta[ThreadCount]; 2820 2821 // Initialize and launch all threads 2822 for (size_t i = 0; i < ThreadCount; i++) { 2823 ta[i].tid = i; 2824 ta[i].st = &s; 2825 2826 EXPECT_EQ(s.thread_states[i].extents.size(), 0); 2827 fvm_extent_t extent; 2828 extent.start = 0; 2829 extent.len = 1; 2830 fbl::AllocChecker ac; 2831 s.thread_states[i].extents.push_back(fbl::move(extent), &ac); 2832 EXPECT_TRUE(ac.check()); 2833 EXPECT_TRUE(CheckWriteReadBlock(s.thread_states[i].vp_fd, 0, kBlocksPerSlice)); 2834 EXPECT_EQ(thrd_create(&s.thread_states[i].thr, 2835 random_access_thread<ThreadCount>, &ta[i]), 2836 thrd_success); 2837 } 2838 2839 if (Persistence) { 2840 partition_entry_t entries[ThreadCount]; 2841 2842 // Join all threads 2843 for (size_t i = 0; i < ThreadCount; i++) { 2844 int r; 2845 EXPECT_EQ(thrd_join(s.thread_states[i].thr, &r), thrd_success); 2846 EXPECT_EQ(r, 0); 2847 EXPECT_EQ(close(s.thread_states[i].vp_fd), 0); 2848 entries[i].name = request.name; 2849 entries[i].number = i + 1; 2850 } 2851 2852 // Rebind the FVM (simulating rebooting) 2853 fd = FVMRebind(fd, ramdisk_path, entries, fbl::count_of(entries)); 2854 ASSERT_GT(fd, 0); 2855 2856 // Re-open all partitions, re-launch the worker threads 2857 for (size_t i = 0; i < ThreadCount; i++) { 2858 request.guid[0] = static_cast<uint8_t>(i); 2859 int vp_fd = open_partition(request.guid, request.type, 0, nullptr); 2860 ASSERT_GT(vp_fd, 0); 2861 s.thread_states[i].vp_fd = vp_fd; 2862 EXPECT_EQ(thrd_create(&s.thread_states[i].thr, 2863 random_access_thread<ThreadCount>, &ta[i]), 2864 thrd_success); 2865 } 2866 } 2867 2868 // Join all the threads, verify their initial block is still valid, and 2869 // destroy them. 2870 for (size_t i = 0; i < ThreadCount; i++) { 2871 int r; 2872 EXPECT_EQ(thrd_join(s.thread_states[i].thr, &r), thrd_success); 2873 EXPECT_EQ(r, 0); 2874 EXPECT_TRUE(CheckWriteReadBlock(s.thread_states[i].vp_fd, 0, kBlocksPerSlice)); 2875 EXPECT_EQ(ioctl_block_fvm_destroy_partition(s.thread_states[i].vp_fd), 0); 2876 EXPECT_EQ(close(s.thread_states[i].vp_fd), 0); 2877 } 2878 2879 ASSERT_EQ(close(fd), 0); 2880 ASSERT_TRUE(FVMCheckSliceSize(fvm_driver, kSliceSize)); 2881 ASSERT_EQ(EndFVMTest(ramdisk_path), 0, "unmounting FVM"); 2882 END_TEST; 2883} 2884 2885} // namespace 2886 2887BEGIN_TEST_CASE(fvm_tests) 2888RUN_TEST_MEDIUM(TestTooSmall) 2889RUN_TEST_MEDIUM(TestLarge) 2890RUN_TEST_MEDIUM(TestEmpty) 2891RUN_TEST_MEDIUM(TestAllocateOne) 2892RUN_TEST_MEDIUM(TestAllocateMany) 2893RUN_TEST_MEDIUM(TestCloseDuringAccess) 2894RUN_TEST_MEDIUM(TestReleaseDuringAccess) 2895RUN_TEST_MEDIUM(TestDestroyDuringAccess) 2896RUN_TEST_MEDIUM(TestVPartitionExtend) 2897RUN_TEST_MEDIUM(TestVPartitionExtendSparse) 2898RUN_TEST_MEDIUM(TestVPartitionShrink) 2899RUN_TEST_MEDIUM(TestVPartitionSplit) 2900RUN_TEST_MEDIUM(TestVPartitionDestroy) 2901RUN_TEST_MEDIUM(TestVPartitionQuery) 2902RUN_TEST_MEDIUM(TestSliceAccessContiguous) 2903RUN_TEST_MEDIUM(TestSliceAccessMany) 2904RUN_TEST_MEDIUM(TestSliceAccessNonContiguousPhysical) 2905RUN_TEST_MEDIUM(TestSliceAccessNonContiguousVirtual) 2906RUN_TEST_MEDIUM(TestPersistenceSimple) 2907RUN_TEST_LARGE(TestVPartitionUpgrade) 2908RUN_TEST_LARGE(TestMounting) 2909RUN_TEST_LARGE(TestMkfs) 2910RUN_TEST_MEDIUM(TestCorruptionOk) 2911RUN_TEST_MEDIUM(TestCorruptionRegression) 2912RUN_TEST_MEDIUM(TestCorruptionUnrecoverable) 2913RUN_TEST_LARGE((TestRandomOpMultithreaded<1, /* persistent= */ false>)) 2914RUN_TEST_LARGE((TestRandomOpMultithreaded<3, /* persistent= */ false>)) 2915RUN_TEST_LARGE((TestRandomOpMultithreaded<5, /* persistent= */ false>)) 2916RUN_TEST_LARGE((TestRandomOpMultithreaded<10, /* persistent= */ false>)) 2917RUN_TEST_LARGE((TestRandomOpMultithreaded<25, /* persistent= */ false>)) 2918RUN_TEST_LARGE((TestRandomOpMultithreaded<1, /* persistent= */ true>)) 2919RUN_TEST_LARGE((TestRandomOpMultithreaded<3, /* persistent= */ true>)) 2920RUN_TEST_LARGE((TestRandomOpMultithreaded<5, /* persistent= */ true>)) 2921RUN_TEST_LARGE((TestRandomOpMultithreaded<10, /* persistent= */ true>)) 2922RUN_TEST_LARGE((TestRandomOpMultithreaded<25, /* persistent= */ true>)) 2923RUN_TEST_MEDIUM(TestCorruptMount) 2924END_TEST_CASE(fvm_tests) 2925 2926int main(int argc, char** argv) { 2927 int i = 1; 2928 while (i < argc - 1) { 2929 if (!strcmp(argv[i], "-d")) { 2930 if (strnlen(argv[i + 1], PATH_MAX) > 0) { 2931 int fd = open(argv[i + 1], O_RDWR); 2932 2933 if (fd < 0) { 2934 fprintf(stderr, "[fs] Could not open block device\n"); 2935 return -1; 2936 } else if (ioctl_device_get_topo_path(fd, test_disk_path, PATH_MAX) < 0) { 2937 fprintf(stderr, "[fs] Could not acquire topological path of block device\n"); 2938 return -1; 2939 } 2940 2941 block_info_t block_info; 2942 ssize_t rc = ioctl_block_get_info(fd, &block_info); 2943 2944 if (rc < 0 || rc != sizeof(block_info)) { 2945 fprintf(stderr, "[fs] Could not query block device info\n"); 2946 return -1; 2947 } 2948 2949 // If there is already an FVM on this partition, remove it 2950 fvm_destroy(test_disk_path); 2951 2952 use_real_disk = true; 2953 test_block_size = block_info.block_size; 2954 test_block_count = block_info.block_count; 2955 close(fd); 2956 break; 2957 } 2958 } 2959 i += 1; 2960 } 2961 2962 // Initialize tmpfs. 2963 async::Loop loop(&kAsyncLoopConfigNoAttachToThread); 2964 if (loop.StartThread() != ZX_OK) { 2965 fprintf(stderr, "Error: Cannot initialize local tmpfs loop\n"); 2966 return -1; 2967 } 2968 if (memfs_install_at(loop.dispatcher(), kTmpfsPath) != ZX_OK) { 2969 fprintf(stderr, "Error: Cannot install local tmpfs\n"); 2970 return -1; 2971 } 2972 2973 return unittest_run_all_tests(argc, argv) ? 0 : -1; 2974} 2975