1// Copyright 2017 The Fuchsia Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include <errno.h> 6#include <stddef.h> 7#include <stdint.h> 8#include <string.h> 9#include <sys/stat.h> 10 11#include <crypto/bytes.h> 12#include <crypto/cipher.h> 13#include <fbl/unique_fd.h> 14#include <fvm/fvm.h> 15#include <unittest/unittest.h> 16#include <zircon/device/block.h> 17#include <zircon/device/ramdisk.h> 18#include <zircon/errors.h> 19#include <zircon/types.h> 20#include <zxcrypt/volume.h> 21 22#include "test-device.h" 23 24namespace zxcrypt { 25namespace testing { 26namespace { 27 28// See test-device.h; the following macros allow reusing tests for each of the supported versions. 29#define EACH_PARAM(OP, Test) OP(Test, Volume, AES256_XTS_SHA256) 30 31bool TestBind(Volume::Version version, bool fvm) { 32 BEGIN_TEST; 33 34 TestDevice device; 35 EXPECT_TRUE(device.Bind(version, fvm)); 36 37 END_TEST; 38} 39DEFINE_EACH_DEVICE(TestBind); 40 41// TODO(aarongreen): When ZX-1130 is resolved, add tests that check zxcrypt_rekey and zxcrypt_shred. 42 43// Device::DdkGetSize tests 44bool TestDdkGetSize(Volume::Version version, bool fvm) { 45 BEGIN_TEST; 46 47 TestDevice device; 48 ASSERT_TRUE(device.Bind(version, fvm)); 49 fbl::unique_fd parent = device.parent(); 50 fbl::unique_fd zxcrypt = device.zxcrypt(); 51 52 struct stat parent_buf, zxcrypt_buf; 53 ASSERT_EQ(fstat(parent.get(), &parent_buf), 0, strerror(errno)); 54 ASSERT_EQ(fstat(zxcrypt.get(), &zxcrypt_buf), 0, strerror(errno)); 55 56 ASSERT_GT(parent_buf.st_size, zxcrypt_buf.st_size); 57 EXPECT_EQ((parent_buf.st_size - zxcrypt_buf.st_size) / device.block_size(), 58 device.reserved_blocks()); 59 60 END_TEST; 61} 62DEFINE_EACH_DEVICE(TestDdkGetSize); 63 64// Device::DdkIoctl tests 65bool TestBlockGetInfo(Volume::Version version, bool fvm) { 66 BEGIN_TEST; 67 68 TestDevice device; 69 ASSERT_TRUE(device.Bind(version, fvm)); 70 fbl::unique_fd parent = device.parent(); 71 fbl::unique_fd zxcrypt = device.zxcrypt(); 72 73 block_info_t parent_blk, zxcrypt_blk; 74 EXPECT_EQ(ioctl_block_get_info(parent.get(), nullptr), 75 ioctl_block_get_info(zxcrypt.get(), nullptr)); 76 EXPECT_GE(ioctl_block_get_info(parent.get(), &parent_blk), 0); 77 EXPECT_GE(ioctl_block_get_info(zxcrypt.get(), &zxcrypt_blk), 0); 78 79 EXPECT_EQ(parent_blk.block_size, zxcrypt_blk.block_size); 80 EXPECT_GE(parent_blk.block_count, zxcrypt_blk.block_count + device.reserved_blocks()); 81 82 END_TEST; 83} 84DEFINE_EACH_DEVICE(TestBlockGetInfo); 85 86bool TestBlockFvmQuery(Volume::Version version, bool fvm) { 87 BEGIN_TEST; 88 89 TestDevice device; 90 ASSERT_TRUE(device.Bind(version, fvm)); 91 fbl::unique_fd parent = device.parent(); 92 fbl::unique_fd zxcrypt = device.zxcrypt(); 93 94 fvm_info_t parent_fvm, zxcrypt_fvm; 95 if (!fvm) { 96 // Send FVM query to non-FVM device 97 EXPECT_EQ(ioctl_block_fvm_query(zxcrypt.get(), &zxcrypt_fvm), ZX_ERR_NOT_SUPPORTED); 98 } else { 99 // Get the zxcrypt info 100 EXPECT_EQ(ioctl_block_fvm_query(parent.get(), nullptr), 101 ioctl_block_fvm_query(zxcrypt.get(), nullptr)); 102 EXPECT_GE(ioctl_block_fvm_query(parent.get(), &parent_fvm), 0); 103 EXPECT_GE(ioctl_block_fvm_query(zxcrypt.get(), &zxcrypt_fvm), 0); 104 EXPECT_EQ(parent_fvm.slice_size, zxcrypt_fvm.slice_size); 105 EXPECT_EQ(parent_fvm.vslice_count, zxcrypt_fvm.vslice_count + device.reserved_slices()); 106 } 107 108 END_TEST; 109} 110DEFINE_EACH_DEVICE(TestBlockFvmQuery); 111 112bool QueryLeadingFvmSlice(const TestDevice& device) { 113 BEGIN_HELPER; 114 115 fbl::unique_fd parent = device.parent(); 116 fbl::unique_fd zxcrypt = device.zxcrypt(); 117 118 query_request_t req; 119 req.count = 1; 120 req.vslice_start[0] = 0; 121 query_response_t parent_resp, zxcrypt_resp; 122 123 ssize_t res = ioctl_block_fvm_vslice_query(parent.get(), &req, &parent_resp); 124 EXPECT_EQ(res, ioctl_block_fvm_vslice_query(zxcrypt.get(), &req, &zxcrypt_resp)); 125 if (res >= 0) { 126 // Query zxcrypt about the slices, which should omit those reserved 127 ASSERT_EQ(parent_resp.count, 1U); 128 EXPECT_TRUE(parent_resp.vslice_range[0].allocated); 129 130 ASSERT_EQ(zxcrypt_resp.count, 1U); 131 EXPECT_TRUE(zxcrypt_resp.vslice_range[0].allocated); 132 133 EXPECT_EQ(parent_resp.vslice_range[0].count, 134 zxcrypt_resp.vslice_range[0].count + device.reserved_slices()); 135 } 136 END_HELPER; 137} 138 139bool TestBlockFvmVSliceQuery(Volume::Version version, bool fvm) { 140 BEGIN_TEST; 141 142 TestDevice device; 143 ASSERT_TRUE(device.Bind(version, fvm)); 144 EXPECT_TRUE(QueryLeadingFvmSlice(device)); 145 END_TEST; 146} 147DEFINE_EACH_DEVICE(TestBlockFvmVSliceQuery); 148 149bool TestBlockFvmShrinkAndExtend(Volume::Version version, bool fvm) { 150 BEGIN_TEST; 151 152 TestDevice device; 153 ASSERT_TRUE(device.Bind(version, fvm)); 154 fbl::unique_fd zxcrypt = device.zxcrypt(); 155 156 extend_request_t mod; 157 mod.offset = 1; 158 mod.length = 1; 159 160 if (!fvm) { 161 // Send FVM ioctl to non-FVM device 162 EXPECT_EQ(ioctl_block_fvm_shrink(zxcrypt.get(), &mod), ZX_ERR_NOT_SUPPORTED); 163 EXPECT_EQ(ioctl_block_fvm_extend(zxcrypt.get(), &mod), ZX_ERR_NOT_SUPPORTED); 164 } else { 165 // Shrink the FVM partition and make sure the change in size is reflected 166 EXPECT_GE(ioctl_block_fvm_shrink(zxcrypt.get(), &mod), 0); 167 EXPECT_TRUE(QueryLeadingFvmSlice(device)); 168 169 // Extend the FVM partition and make sure the change in size is reflected 170 EXPECT_GE(ioctl_block_fvm_extend(zxcrypt.get(), &mod), 0); 171 EXPECT_TRUE(QueryLeadingFvmSlice(device)); 172 } 173 END_TEST; 174} 175DEFINE_EACH_DEVICE(TestBlockFvmShrinkAndExtend); 176 177// Device::DdkIotxnQueue tests 178bool TestFdZeroLength(Volume::Version version, bool fvm) { 179 BEGIN_TEST; 180 181 TestDevice device; 182 ASSERT_TRUE(device.Bind(version, fvm)); 183 184 EXPECT_TRUE(device.WriteFd(0, 0)); 185 EXPECT_TRUE(device.ReadFd(0, 0)); 186 187 END_TEST; 188} 189DEFINE_EACH_DEVICE(TestFdZeroLength); 190 191bool TestFdFirstBlock(Volume::Version version, bool fvm) { 192 BEGIN_TEST; 193 194 TestDevice device; 195 ASSERT_TRUE(device.Bind(version, fvm)); 196 size_t one = device.block_size(); 197 198 EXPECT_TRUE(device.WriteFd(0, one)); 199 EXPECT_TRUE(device.ReadFd(0, one)); 200 201 END_TEST; 202} 203DEFINE_EACH_DEVICE(TestFdFirstBlock); 204 205bool TestFdLastBlock(Volume::Version version, bool fvm) { 206 BEGIN_TEST; 207 208 TestDevice device; 209 ASSERT_TRUE(device.Bind(version, fvm)); 210 size_t n = device.size(); 211 size_t one = device.block_size(); 212 213 EXPECT_TRUE(device.WriteFd(n - one, one)); 214 EXPECT_TRUE(device.ReadFd(n - one, one)); 215 216 END_TEST; 217} 218DEFINE_EACH_DEVICE(TestFdLastBlock); 219 220bool TestFdAllBlocks(Volume::Version version, bool fvm) { 221 BEGIN_TEST; 222 223 TestDevice device; 224 ASSERT_TRUE(device.Bind(version, fvm)); 225 size_t n = device.size(); 226 227 EXPECT_TRUE(device.WriteFd(0, n)); 228 EXPECT_TRUE(device.ReadFd(0, n)); 229 230 END_TEST; 231} 232DEFINE_EACH_DEVICE(TestFdAllBlocks); 233 234bool TestFdUnaligned(Volume::Version version, bool fvm) { 235 BEGIN_TEST; 236 237 TestDevice device; 238 ASSERT_TRUE(device.Bind(version, fvm)); 239 size_t one = device.block_size(); 240 ssize_t one_s = static_cast<ssize_t>(one); 241 242 ASSERT_TRUE(device.WriteFd(one, one)); 243 ASSERT_TRUE(device.ReadFd(one, one)); 244 245 EXPECT_EQ(device.lseek(one - 1), one_s - 1); 246 EXPECT_LT(device.write(one, one), 0); 247 EXPECT_LT(device.read(one, one), 0); 248 249 EXPECT_EQ(device.lseek(one + 1), one_s + 1); 250 EXPECT_LT(device.write(one, one), 0); 251 EXPECT_LT(device.read(one, one), 0); 252 253 EXPECT_EQ(device.lseek(one), one_s); 254 EXPECT_LT(device.write(one, one - 1), 0); 255 EXPECT_LT(device.read(one, one - 1), 0); 256 257 EXPECT_EQ(device.lseek(one), one_s); 258 EXPECT_LT(device.write(one, one + 1), 0); 259 EXPECT_LT(device.read(one, one + 1), 0); 260 261 END_TEST; 262} 263DEFINE_EACH_DEVICE(TestFdUnaligned); 264 265bool TestFdOutOfBounds(Volume::Version version, bool fvm) { 266 BEGIN_TEST; 267 268 TestDevice device; 269 ASSERT_TRUE(device.Bind(version, fvm)); 270 size_t n = device.size(); 271 ssize_t n_s = static_cast<ssize_t>(n); 272 273 size_t one = device.block_size(); 274 ssize_t one_s = static_cast<ssize_t>(one); 275 276 size_t two = one + one; 277 ssize_t two_s = static_cast<ssize_t>(two); 278 279 ASSERT_TRUE(device.WriteFd(0, one)); 280 281 EXPECT_EQ(device.lseek(n), n_s); 282 EXPECT_NE(device.write(n, one), one_s); 283 284 EXPECT_EQ(device.lseek(n - one), n_s - one_s); 285 EXPECT_NE(device.write(n - one, two), two_s); 286 287 EXPECT_EQ(device.lseek(two), two_s); 288 EXPECT_NE(device.write(two, n - one), n_s - one_s); 289 290 EXPECT_EQ(device.lseek(one), one_s); 291 EXPECT_NE(device.write(one, n), n_s); 292 293 ASSERT_TRUE(device.ReadFd(0, one)); 294 295 EXPECT_EQ(device.lseek(n), n_s); 296 EXPECT_NE(device.read(n, one), one_s); 297 298 EXPECT_EQ(device.lseek(n - one), n_s - one_s); 299 EXPECT_NE(device.read(n - one, two), two_s); 300 301 EXPECT_EQ(device.lseek(two), two_s); 302 EXPECT_NE(device.read(two, n - one), n_s - one_s); 303 304 EXPECT_EQ(device.lseek(one), one_s); 305 EXPECT_NE(device.read(one, n), n_s); 306 307 END_TEST; 308} 309DEFINE_EACH_DEVICE(TestFdOutOfBounds); 310 311bool TestFdOneToMany(Volume::Version version, bool fvm) { 312 BEGIN_TEST; 313 314 TestDevice device; 315 ASSERT_TRUE(device.Bind(version, fvm)); 316 size_t n = device.size(); 317 size_t one = device.block_size(); 318 319 ASSERT_TRUE(device.WriteFd(0, n)); 320 ASSERT_TRUE(device.Rebind()); 321 322 for (size_t off = 0; off < n; off += one) { 323 EXPECT_TRUE(device.ReadFd(off, one)); 324 } 325 326 END_TEST; 327} 328DEFINE_EACH_DEVICE(TestFdOneToMany); 329 330bool TestFdManyToOne(Volume::Version version, bool fvm) { 331 BEGIN_TEST; 332 333 TestDevice device; 334 ASSERT_TRUE(device.Bind(version, fvm)); 335 size_t n = device.size(); 336 size_t one = device.block_size(); 337 338 for (size_t off = 0; off < n; off += one) { 339 EXPECT_TRUE(device.WriteFd(off, one)); 340 } 341 342 ASSERT_TRUE(device.Rebind()); 343 EXPECT_TRUE(device.ReadFd(0, n)); 344 345 END_TEST; 346} 347DEFINE_EACH_DEVICE(TestFdManyToOne); 348 349// Device::BlockWrite and Device::BlockRead tests 350bool TestVmoZeroLength(Volume::Version version, bool fvm) { 351 BEGIN_TEST; 352 353 TestDevice device; 354 ASSERT_TRUE(device.Bind(version, fvm)); 355 356 // Zero length is illegal for the block fifo 357 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_WRITE, 0, 0), ZX_ERR_INVALID_ARGS); 358 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_READ, 0, 0), ZX_ERR_INVALID_ARGS); 359 360 END_TEST; 361} 362DEFINE_EACH_DEVICE(TestVmoZeroLength); 363 364bool TestVmoFirstBlock(Volume::Version version, bool fvm) { 365 BEGIN_TEST; 366 367 TestDevice device; 368 ASSERT_TRUE(device.Bind(version, fvm)); 369 370 EXPECT_TRUE(device.WriteVmo(0, 1)); 371 EXPECT_TRUE(device.ReadVmo(0, 1)); 372 373 END_TEST; 374} 375DEFINE_EACH_DEVICE(TestVmoFirstBlock); 376 377bool TestVmoLastBlock(Volume::Version version, bool fvm) { 378 BEGIN_TEST; 379 380 TestDevice device; 381 ASSERT_TRUE(device.Bind(version, fvm)); 382 size_t n = device.block_count(); 383 384 EXPECT_TRUE(device.WriteVmo(n - 1, 1)); 385 EXPECT_TRUE(device.ReadVmo(n - 1, 1)); 386 387 END_TEST; 388} 389DEFINE_EACH_DEVICE(TestVmoLastBlock); 390 391bool TestVmoAllBlocks(Volume::Version version, bool fvm) { 392 BEGIN_TEST; 393 394 TestDevice device; 395 ASSERT_TRUE(device.Bind(version, fvm)); 396 size_t n = device.block_count(); 397 398 EXPECT_TRUE(device.WriteVmo(0, n)); 399 EXPECT_TRUE(device.ReadVmo(0, n)); 400 401 END_TEST; 402} 403DEFINE_EACH_DEVICE(TestVmoAllBlocks); 404 405bool TestVmoOutOfBounds(Volume::Version version, bool fvm) { 406 BEGIN_TEST; 407 408 TestDevice device; 409 ASSERT_TRUE(device.Bind(version, fvm)); 410 size_t n = device.block_count(); 411 412 ASSERT_TRUE(device.WriteVmo(0, 1)); 413 414 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_WRITE, n, 1), ZX_ERR_OUT_OF_RANGE); 415 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_WRITE, n - 1, 2), ZX_ERR_OUT_OF_RANGE); 416 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_WRITE, 2, n - 1), ZX_ERR_OUT_OF_RANGE); 417 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_WRITE, 1, n), ZX_ERR_OUT_OF_RANGE); 418 419 ASSERT_TRUE(device.ReadVmo(0, 1)); 420 421 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_READ, n, 1), ZX_ERR_OUT_OF_RANGE); 422 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_READ, n - 1, 2), ZX_ERR_OUT_OF_RANGE); 423 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_READ, 2, n - 1), ZX_ERR_OUT_OF_RANGE); 424 EXPECT_ZX(device.block_fifo_txn(BLOCKIO_READ, 1, n), ZX_ERR_OUT_OF_RANGE); 425 426 END_TEST; 427} 428DEFINE_EACH_DEVICE(TestVmoOutOfBounds); 429 430bool TestVmoOneToMany(Volume::Version version, bool fvm) { 431 BEGIN_TEST; 432 433 TestDevice device; 434 ASSERT_TRUE(device.Bind(version, fvm)); 435 size_t n = device.block_count(); 436 437 EXPECT_TRUE(device.WriteVmo(0, n)); 438 ASSERT_TRUE(device.Rebind()); 439 for (size_t off = 0; off < n; ++off) { 440 EXPECT_TRUE(device.ReadVmo(off, 1)); 441 } 442 443 END_TEST; 444} 445DEFINE_EACH_DEVICE(TestVmoOneToMany); 446 447bool TestVmoManyToOne(Volume::Version version, bool fvm) { 448 BEGIN_TEST; 449 450 TestDevice device; 451 ASSERT_TRUE(device.Bind(version, fvm)); 452 size_t n = device.block_count(); 453 454 for (size_t off = 0; off < n; ++off) { 455 EXPECT_TRUE(device.WriteVmo(off, 1)); 456 } 457 458 ASSERT_TRUE(device.Rebind()); 459 EXPECT_TRUE(device.ReadVmo(0, n)); 460 461 END_TEST; 462} 463DEFINE_EACH_DEVICE(TestVmoManyToOne); 464 465bool TestVmoStall(Volume::Version version, bool fvm) { 466 BEGIN_TEST; 467 TestDevice device; 468 ASSERT_TRUE(device.Bind(version, fvm)); 469 fbl::unique_fd zxcrypt = device.zxcrypt(); 470 471 // The device can have up to 4 * max_transfer_size bytes in flight before it begins queuing them 472 // internally. 473 block_info_t zxcrypt_blk; 474 EXPECT_GE(ioctl_block_get_info(zxcrypt.get(), &zxcrypt_blk), 0); 475 size_t blks_per_req = 4; 476 size_t max = Volume::kBufferSize / (device.block_size() * blks_per_req); 477 size_t num = max + 1; 478 fbl::AllocChecker ac; 479 fbl::unique_ptr<block_fifo_request_t[]> requests(new (&ac) block_fifo_request_t[num]); 480 ASSERT_TRUE(ac.check()); 481 for (size_t i = 0; i < num; ++i) { 482 requests[i].opcode = (i % 2 == 0 ? BLOCKIO_WRITE : BLOCKIO_READ); 483 requests[i].length = static_cast<uint32_t>(blks_per_req); 484 requests[i].dev_offset = 0; 485 requests[i].vmo_offset = 0; 486 } 487 488 EXPECT_TRUE(device.SleepUntil(max, true /* defer transactions */)); 489 EXPECT_EQ(device.block_fifo_txn(requests.get(), num), ZX_OK); 490 EXPECT_TRUE(device.WakeUp()); 491 492 END_TEST; 493} 494DEFINE_EACH_DEVICE(TestVmoStall); 495 496bool TestWriteAfterFvmExtend(Volume::Version version) { 497 BEGIN_TEST; 498 499 TestDevice device; 500 ASSERT_TRUE(device.Bind(version, true)); 501 fbl::unique_fd zxcrypt = device.zxcrypt(); 502 503 size_t n = device.size(); 504 ssize_t n_s = static_cast<ssize_t>(n); 505 506 size_t one = device.block_size(); 507 ssize_t one_s = static_cast<ssize_t>(one); 508 509 EXPECT_EQ(device.lseek(n), n_s); 510 EXPECT_NE(device.write(n, one), one_s); 511 512 fvm_info_t info; 513 EXPECT_GE(ioctl_block_fvm_query(zxcrypt.get(), &info), 0); 514 515 extend_request_t mod; 516 mod.offset = device.size() / info.slice_size; 517 mod.length = 1; 518 519 EXPECT_GE(ioctl_block_fvm_extend(zxcrypt.get(), &mod), 0); 520 EXPECT_EQ(device.lseek(n), n_s); 521 EXPECT_EQ(device.write(n, one), one_s); 522 523 END_TEST; 524} 525DEFINE_EACH(TestWriteAfterFvmExtend); 526 527// TODO(aarongreen): Currently, we're using XTS, which provides no data integrity. When possible, 528// we should switch to an AEAD, which would allow us to detect data corruption when doing I/O. 529// bool TestBadData(void) { 530// BEGIN_TEST; 531// END_TEST; 532// } 533 534BEGIN_TEST_CASE(ZxcryptTest) 535RUN_EACH_DEVICE(TestBind) 536RUN_EACH_DEVICE(TestDdkGetSize) 537RUN_EACH_DEVICE(TestBlockGetInfo) 538RUN_EACH_DEVICE(TestBlockFvmQuery) 539RUN_EACH_DEVICE(TestBlockFvmVSliceQuery) 540RUN_EACH_DEVICE(TestBlockFvmShrinkAndExtend) 541RUN_EACH_DEVICE(TestFdZeroLength) 542RUN_EACH_DEVICE(TestFdFirstBlock) 543RUN_EACH_DEVICE(TestFdLastBlock) 544RUN_EACH_DEVICE(TestFdAllBlocks) 545RUN_EACH_DEVICE(TestFdUnaligned) 546RUN_EACH_DEVICE(TestFdOutOfBounds) 547RUN_EACH_DEVICE(TestFdOneToMany) 548RUN_EACH_DEVICE(TestFdManyToOne) 549RUN_EACH_DEVICE(TestVmoZeroLength) 550RUN_EACH_DEVICE(TestVmoFirstBlock) 551RUN_EACH_DEVICE(TestVmoLastBlock) 552RUN_EACH_DEVICE(TestVmoAllBlocks) 553RUN_EACH_DEVICE(TestVmoOutOfBounds) 554RUN_EACH_DEVICE(TestVmoOneToMany) 555RUN_EACH_DEVICE(TestVmoManyToOne) 556// Disabled (See ZX-2112): RUN_EACH_DEVICE(TestVmoStall) 557RUN_EACH(TestWriteAfterFvmExtend) 558END_TEST_CASE(ZxcryptTest) 559 560} // namespace 561} // namespace testing 562} // namespace zxcrypt 563