ztest.c revision 224177
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011 by Delphix. All rights reserved. 24 */ 25 26/* 27 * The objective of this program is to provide a DMU/ZAP/SPA stress test 28 * that runs entirely in userland, is easy to use, and easy to extend. 29 * 30 * The overall design of the ztest program is as follows: 31 * 32 * (1) For each major functional area (e.g. adding vdevs to a pool, 33 * creating and destroying datasets, reading and writing objects, etc) 34 * we have a simple routine to test that functionality. These 35 * individual routines do not have to do anything "stressful". 36 * 37 * (2) We turn these simple functionality tests into a stress test by 38 * running them all in parallel, with as many threads as desired, 39 * and spread across as many datasets, objects, and vdevs as desired. 40 * 41 * (3) While all this is happening, we inject faults into the pool to 42 * verify that self-healing data really works. 43 * 44 * (4) Every time we open a dataset, we change its checksum and compression 45 * functions. Thus even individual objects vary from block to block 46 * in which checksum they use and whether they're compressed. 47 * 48 * (5) To verify that we never lose on-disk consistency after a crash, 49 * we run the entire test in a child of the main process. 50 * At random times, the child self-immolates with a SIGKILL. 51 * This is the software equivalent of pulling the power cord. 52 * The parent then runs the test again, using the existing 53 * storage pool, as many times as desired. 54 * 55 * (6) To verify that we don't have future leaks or temporal incursions, 56 * many of the functional tests record the transaction group number 57 * as part of their data. When reading old data, they verify that 58 * the transaction group number is less than the current, open txg. 59 * If you add a new test, please do this if applicable. 60 * 61 * When run with no arguments, ztest runs for about five minutes and 62 * produces no output if successful. To get a little bit of information, 63 * specify -V. To get more information, specify -VV, and so on. 64 * 65 * To turn this into an overnight stress test, use -T to specify run time. 66 * 67 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 68 * to increase the pool capacity, fanout, and overall stress level. 69 * 70 * The -N(okill) option will suppress kills, so each child runs to completion. 71 * This can be useful when you're trying to distinguish temporal incursions 72 * from plain old race conditions. 73 */ 74 75#include <sys/zfs_context.h> 76#include <sys/spa.h> 77#include <sys/dmu.h> 78#include <sys/txg.h> 79#include <sys/dbuf.h> 80#include <sys/zap.h> 81#include <sys/dmu_objset.h> 82#include <sys/poll.h> 83#include <sys/stat.h> 84#include <sys/time.h> 85#include <sys/wait.h> 86#include <sys/mman.h> 87#include <sys/resource.h> 88#include <sys/zio.h> 89#include <sys/zil.h> 90#include <sys/zil_impl.h> 91#include <sys/vdev_impl.h> 92#include <sys/vdev_file.h> 93#include <sys/spa_impl.h> 94#include <sys/metaslab_impl.h> 95#include <sys/dsl_prop.h> 96#include <sys/dsl_dataset.h> 97#include <sys/dsl_scan.h> 98#include <sys/zio_checksum.h> 99#include <sys/refcount.h> 100#include <stdio.h> 101#include <stdio_ext.h> 102#include <stdlib.h> 103#include <unistd.h> 104#include <signal.h> 105#include <umem.h> 106#include <dlfcn.h> 107#include <ctype.h> 108#include <math.h> 109#include <errno.h> 110#include <sys/fs/zfs.h> 111#include <libnvpair.h> 112 113static char cmdname[] = "ztest"; 114static char *zopt_pool = cmdname; 115static char *progname; 116 117static uint64_t zopt_vdevs = 5; 118static uint64_t zopt_vdevtime; 119static int zopt_ashift = SPA_MINBLOCKSHIFT; 120static int zopt_mirrors = 2; 121static int zopt_raidz = 4; 122static int zopt_raidz_parity = 1; 123static size_t zopt_vdev_size = SPA_MINDEVSIZE; 124static int zopt_datasets = 7; 125static int zopt_threads = 23; 126static uint64_t zopt_passtime = 60; /* 60 seconds */ 127static uint64_t zopt_killrate = 70; /* 70% kill rate */ 128static int zopt_verbose = 0; 129static int zopt_init = 1; 130static char *zopt_dir = "/tmp"; 131static uint64_t zopt_time = 300; /* 5 minutes */ 132static uint64_t zopt_maxloops = 50; /* max loops during spa_freeze() */ 133 134#define BT_MAGIC 0x123456789abcdefULL 135#define MAXFAULTS() (MAX(zs->zs_mirrors, 1) * (zopt_raidz_parity + 1) - 1) 136 137enum ztest_io_type { 138 ZTEST_IO_WRITE_TAG, 139 ZTEST_IO_WRITE_PATTERN, 140 ZTEST_IO_WRITE_ZEROES, 141 ZTEST_IO_TRUNCATE, 142 ZTEST_IO_SETATTR, 143 ZTEST_IO_TYPES 144}; 145 146typedef struct ztest_block_tag { 147 uint64_t bt_magic; 148 uint64_t bt_objset; 149 uint64_t bt_object; 150 uint64_t bt_offset; 151 uint64_t bt_gen; 152 uint64_t bt_txg; 153 uint64_t bt_crtxg; 154} ztest_block_tag_t; 155 156typedef struct bufwad { 157 uint64_t bw_index; 158 uint64_t bw_txg; 159 uint64_t bw_data; 160} bufwad_t; 161 162/* 163 * XXX -- fix zfs range locks to be generic so we can use them here. 164 */ 165typedef enum { 166 RL_READER, 167 RL_WRITER, 168 RL_APPEND 169} rl_type_t; 170 171typedef struct rll { 172 void *rll_writer; 173 int rll_readers; 174 mutex_t rll_lock; 175 cond_t rll_cv; 176} rll_t; 177 178typedef struct rl { 179 uint64_t rl_object; 180 uint64_t rl_offset; 181 uint64_t rl_size; 182 rll_t *rl_lock; 183} rl_t; 184 185#define ZTEST_RANGE_LOCKS 64 186#define ZTEST_OBJECT_LOCKS 64 187 188/* 189 * Object descriptor. Used as a template for object lookup/create/remove. 190 */ 191typedef struct ztest_od { 192 uint64_t od_dir; 193 uint64_t od_object; 194 dmu_object_type_t od_type; 195 dmu_object_type_t od_crtype; 196 uint64_t od_blocksize; 197 uint64_t od_crblocksize; 198 uint64_t od_gen; 199 uint64_t od_crgen; 200 char od_name[MAXNAMELEN]; 201} ztest_od_t; 202 203/* 204 * Per-dataset state. 205 */ 206typedef struct ztest_ds { 207 objset_t *zd_os; 208 zilog_t *zd_zilog; 209 uint64_t zd_seq; 210 ztest_od_t *zd_od; /* debugging aid */ 211 char zd_name[MAXNAMELEN]; 212 mutex_t zd_dirobj_lock; 213 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 214 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 215} ztest_ds_t; 216 217/* 218 * Per-iteration state. 219 */ 220typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 221 222typedef struct ztest_info { 223 ztest_func_t *zi_func; /* test function */ 224 uint64_t zi_iters; /* iterations per execution */ 225 uint64_t *zi_interval; /* execute every <interval> seconds */ 226 uint64_t zi_call_count; /* per-pass count */ 227 uint64_t zi_call_time; /* per-pass time */ 228 uint64_t zi_call_next; /* next time to call this function */ 229} ztest_info_t; 230 231/* 232 * Note: these aren't static because we want dladdr() to work. 233 */ 234ztest_func_t ztest_dmu_read_write; 235ztest_func_t ztest_dmu_write_parallel; 236ztest_func_t ztest_dmu_object_alloc_free; 237ztest_func_t ztest_dmu_commit_callbacks; 238ztest_func_t ztest_zap; 239ztest_func_t ztest_zap_parallel; 240ztest_func_t ztest_zil_commit; 241ztest_func_t ztest_dmu_read_write_zcopy; 242ztest_func_t ztest_dmu_objset_create_destroy; 243ztest_func_t ztest_dmu_prealloc; 244ztest_func_t ztest_fzap; 245ztest_func_t ztest_dmu_snapshot_create_destroy; 246ztest_func_t ztest_dsl_prop_get_set; 247ztest_func_t ztest_spa_prop_get_set; 248ztest_func_t ztest_spa_create_destroy; 249ztest_func_t ztest_fault_inject; 250ztest_func_t ztest_ddt_repair; 251ztest_func_t ztest_dmu_snapshot_hold; 252ztest_func_t ztest_spa_rename; 253ztest_func_t ztest_scrub; 254ztest_func_t ztest_dsl_dataset_promote_busy; 255ztest_func_t ztest_vdev_attach_detach; 256ztest_func_t ztest_vdev_LUN_growth; 257ztest_func_t ztest_vdev_add_remove; 258ztest_func_t ztest_vdev_aux_add_remove; 259ztest_func_t ztest_split_pool; 260 261uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 262uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 263uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 264uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 265uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 266 267ztest_info_t ztest_info[] = { 268 { ztest_dmu_read_write, 1, &zopt_always }, 269 { ztest_dmu_write_parallel, 10, &zopt_always }, 270 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 271 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 272 { ztest_zap, 30, &zopt_always }, 273 { ztest_zap_parallel, 100, &zopt_always }, 274 { ztest_split_pool, 1, &zopt_always }, 275 { ztest_zil_commit, 1, &zopt_incessant }, 276 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 277 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 278 { ztest_dsl_prop_get_set, 1, &zopt_often }, 279 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 280#if 0 281 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 282#endif 283 { ztest_fzap, 1, &zopt_sometimes }, 284 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 285 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 286 { ztest_fault_inject, 1, &zopt_sometimes }, 287 { ztest_ddt_repair, 1, &zopt_sometimes }, 288 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 289 { ztest_spa_rename, 1, &zopt_rarely }, 290 { ztest_scrub, 1, &zopt_rarely }, 291 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 292 { ztest_vdev_attach_detach, 1, &zopt_rarely }, 293 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 294 { ztest_vdev_add_remove, 1, &zopt_vdevtime }, 295 { ztest_vdev_aux_add_remove, 1, &zopt_vdevtime }, 296}; 297 298#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 299 300/* 301 * The following struct is used to hold a list of uncalled commit callbacks. 302 * The callbacks are ordered by txg number. 303 */ 304typedef struct ztest_cb_list { 305 mutex_t zcl_callbacks_lock; 306 list_t zcl_callbacks; 307} ztest_cb_list_t; 308 309/* 310 * Stuff we need to share writably between parent and child. 311 */ 312typedef struct ztest_shared { 313 char *zs_pool; 314 spa_t *zs_spa; 315 hrtime_t zs_proc_start; 316 hrtime_t zs_proc_stop; 317 hrtime_t zs_thread_start; 318 hrtime_t zs_thread_stop; 319 hrtime_t zs_thread_kill; 320 uint64_t zs_enospc_count; 321 uint64_t zs_vdev_next_leaf; 322 uint64_t zs_vdev_aux; 323 uint64_t zs_alloc; 324 uint64_t zs_space; 325 mutex_t zs_vdev_lock; 326 rwlock_t zs_name_lock; 327 ztest_info_t zs_info[ZTEST_FUNCS]; 328 uint64_t zs_splits; 329 uint64_t zs_mirrors; 330 ztest_ds_t zs_zd[]; 331} ztest_shared_t; 332 333#define ID_PARALLEL -1ULL 334 335static char ztest_dev_template[] = "%s/%s.%llua"; 336static char ztest_aux_template[] = "%s/%s.%s.%llu"; 337ztest_shared_t *ztest_shared; 338uint64_t *ztest_seq; 339 340static int ztest_random_fd; 341static int ztest_dump_core = 1; 342 343static boolean_t ztest_exiting; 344 345/* Global commit callback list */ 346static ztest_cb_list_t zcl; 347 348extern uint64_t metaslab_gang_bang; 349extern uint64_t metaslab_df_alloc_threshold; 350static uint64_t metaslab_sz; 351 352enum ztest_object { 353 ZTEST_META_DNODE = 0, 354 ZTEST_DIROBJ, 355 ZTEST_OBJECTS 356}; 357 358static void usage(boolean_t) __NORETURN; 359 360/* 361 * These libumem hooks provide a reasonable set of defaults for the allocator's 362 * debugging facilities. 363 */ 364const char * 365_umem_debug_init() 366{ 367 return ("default,verbose"); /* $UMEM_DEBUG setting */ 368} 369 370const char * 371_umem_logging_init(void) 372{ 373 return ("fail,contents"); /* $UMEM_LOGGING setting */ 374} 375 376#define FATAL_MSG_SZ 1024 377 378char *fatal_msg; 379 380static void 381fatal(int do_perror, char *message, ...) 382{ 383 va_list args; 384 int save_errno = errno; 385 char buf[FATAL_MSG_SZ]; 386 387 (void) fflush(stdout); 388 389 va_start(args, message); 390 (void) sprintf(buf, "ztest: "); 391 /* LINTED */ 392 (void) vsprintf(buf + strlen(buf), message, args); 393 va_end(args); 394 if (do_perror) { 395 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 396 ": %s", strerror(save_errno)); 397 } 398 (void) fprintf(stderr, "%s\n", buf); 399 fatal_msg = buf; /* to ease debugging */ 400 if (ztest_dump_core) 401 abort(); 402 exit(3); 403} 404 405static int 406str2shift(const char *buf) 407{ 408 const char *ends = "BKMGTPEZ"; 409 int i; 410 411 if (buf[0] == '\0') 412 return (0); 413 for (i = 0; i < strlen(ends); i++) { 414 if (toupper(buf[0]) == ends[i]) 415 break; 416 } 417 if (i == strlen(ends)) { 418 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 419 buf); 420 usage(B_FALSE); 421 } 422 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 423 return (10*i); 424 } 425 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 426 usage(B_FALSE); 427 /* NOTREACHED */ 428} 429 430static uint64_t 431nicenumtoull(const char *buf) 432{ 433 char *end; 434 uint64_t val; 435 436 val = strtoull(buf, &end, 0); 437 if (end == buf) { 438 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 439 usage(B_FALSE); 440 } else if (end[0] == '.') { 441 double fval = strtod(buf, &end); 442 fval *= pow(2, str2shift(end)); 443 if (fval > UINT64_MAX) { 444 (void) fprintf(stderr, "ztest: value too large: %s\n", 445 buf); 446 usage(B_FALSE); 447 } 448 val = (uint64_t)fval; 449 } else { 450 int shift = str2shift(end); 451 if (shift >= 64 || (val << shift) >> shift != val) { 452 (void) fprintf(stderr, "ztest: value too large: %s\n", 453 buf); 454 usage(B_FALSE); 455 } 456 val <<= shift; 457 } 458 return (val); 459} 460 461static void 462usage(boolean_t requested) 463{ 464 char nice_vdev_size[10]; 465 char nice_gang_bang[10]; 466 FILE *fp = requested ? stdout : stderr; 467 468 nicenum(zopt_vdev_size, nice_vdev_size); 469 nicenum(metaslab_gang_bang, nice_gang_bang); 470 471 (void) fprintf(fp, "Usage: %s\n" 472 "\t[-v vdevs (default: %llu)]\n" 473 "\t[-s size_of_each_vdev (default: %s)]\n" 474 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 475 "\t[-m mirror_copies (default: %d)]\n" 476 "\t[-r raidz_disks (default: %d)]\n" 477 "\t[-R raidz_parity (default: %d)]\n" 478 "\t[-d datasets (default: %d)]\n" 479 "\t[-t threads (default: %d)]\n" 480 "\t[-g gang_block_threshold (default: %s)]\n" 481 "\t[-i init_count (default: %d)] initialize pool i times\n" 482 "\t[-k kill_percentage (default: %llu%%)]\n" 483 "\t[-p pool_name (default: %s)]\n" 484 "\t[-f dir (default: %s)] file directory for vdev files\n" 485 "\t[-V] verbose (use multiple times for ever more blather)\n" 486 "\t[-E] use existing pool instead of creating new one\n" 487 "\t[-T time (default: %llu sec)] total run time\n" 488 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 489 "\t[-P passtime (default: %llu sec)] time per pass\n" 490 "\t[-h] (print help)\n" 491 "", 492 cmdname, 493 (u_longlong_t)zopt_vdevs, /* -v */ 494 nice_vdev_size, /* -s */ 495 zopt_ashift, /* -a */ 496 zopt_mirrors, /* -m */ 497 zopt_raidz, /* -r */ 498 zopt_raidz_parity, /* -R */ 499 zopt_datasets, /* -d */ 500 zopt_threads, /* -t */ 501 nice_gang_bang, /* -g */ 502 zopt_init, /* -i */ 503 (u_longlong_t)zopt_killrate, /* -k */ 504 zopt_pool, /* -p */ 505 zopt_dir, /* -f */ 506 (u_longlong_t)zopt_time, /* -T */ 507 (u_longlong_t)zopt_maxloops, /* -F */ 508 (u_longlong_t)zopt_passtime); /* -P */ 509 exit(requested ? 0 : 1); 510} 511 512static void 513process_options(int argc, char **argv) 514{ 515 int opt; 516 uint64_t value; 517 518 /* Remember program name. */ 519 progname = argv[0]; 520 521 /* By default, test gang blocks for blocks 32K and greater */ 522 metaslab_gang_bang = 32 << 10; 523 524 while ((opt = getopt(argc, argv, 525 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:")) != EOF) { 526 value = 0; 527 switch (opt) { 528 case 'v': 529 case 's': 530 case 'a': 531 case 'm': 532 case 'r': 533 case 'R': 534 case 'd': 535 case 't': 536 case 'g': 537 case 'i': 538 case 'k': 539 case 'T': 540 case 'P': 541 case 'F': 542 value = nicenumtoull(optarg); 543 } 544 switch (opt) { 545 case 'v': 546 zopt_vdevs = value; 547 break; 548 case 's': 549 zopt_vdev_size = MAX(SPA_MINDEVSIZE, value); 550 break; 551 case 'a': 552 zopt_ashift = value; 553 break; 554 case 'm': 555 zopt_mirrors = value; 556 break; 557 case 'r': 558 zopt_raidz = MAX(1, value); 559 break; 560 case 'R': 561 zopt_raidz_parity = MIN(MAX(value, 1), 3); 562 break; 563 case 'd': 564 zopt_datasets = MAX(1, value); 565 break; 566 case 't': 567 zopt_threads = MAX(1, value); 568 break; 569 case 'g': 570 metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, value); 571 break; 572 case 'i': 573 zopt_init = value; 574 break; 575 case 'k': 576 zopt_killrate = value; 577 break; 578 case 'p': 579 zopt_pool = strdup(optarg); 580 break; 581 case 'f': 582 zopt_dir = strdup(optarg); 583 break; 584 case 'V': 585 zopt_verbose++; 586 break; 587 case 'E': 588 zopt_init = 0; 589 break; 590 case 'T': 591 zopt_time = value; 592 break; 593 case 'P': 594 zopt_passtime = MAX(1, value); 595 break; 596 case 'F': 597 zopt_maxloops = MAX(1, value); 598 break; 599 case 'h': 600 usage(B_TRUE); 601 break; 602 case '?': 603 default: 604 usage(B_FALSE); 605 break; 606 } 607 } 608 609 zopt_raidz_parity = MIN(zopt_raidz_parity, zopt_raidz - 1); 610 611 zopt_vdevtime = (zopt_vdevs > 0 ? zopt_time * NANOSEC / zopt_vdevs : 612 UINT64_MAX >> 2); 613} 614 615static void 616ztest_kill(ztest_shared_t *zs) 617{ 618 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(zs->zs_spa)); 619 zs->zs_space = metaslab_class_get_space(spa_normal_class(zs->zs_spa)); 620 (void) kill(getpid(), SIGKILL); 621} 622 623static uint64_t 624ztest_random(uint64_t range) 625{ 626 uint64_t r; 627 628 if (range == 0) 629 return (0); 630 631 if (read(ztest_random_fd, &r, sizeof (r)) != sizeof (r)) 632 fatal(1, "short read from /dev/urandom"); 633 634 return (r % range); 635} 636 637/* ARGSUSED */ 638static void 639ztest_record_enospc(const char *s) 640{ 641 ztest_shared->zs_enospc_count++; 642} 643 644static uint64_t 645ztest_get_ashift(void) 646{ 647 if (zopt_ashift == 0) 648 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 649 return (zopt_ashift); 650} 651 652static nvlist_t * 653make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift) 654{ 655 char pathbuf[MAXPATHLEN]; 656 uint64_t vdev; 657 nvlist_t *file; 658 659 if (ashift == 0) 660 ashift = ztest_get_ashift(); 661 662 if (path == NULL) { 663 path = pathbuf; 664 665 if (aux != NULL) { 666 vdev = ztest_shared->zs_vdev_aux; 667 (void) sprintf(path, ztest_aux_template, 668 zopt_dir, zopt_pool, aux, vdev); 669 } else { 670 vdev = ztest_shared->zs_vdev_next_leaf++; 671 (void) sprintf(path, ztest_dev_template, 672 zopt_dir, zopt_pool, vdev); 673 } 674 } 675 676 if (size != 0) { 677 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 678 if (fd == -1) 679 fatal(1, "can't open %s", path); 680 if (ftruncate(fd, size) != 0) 681 fatal(1, "can't ftruncate %s", path); 682 (void) close(fd); 683 } 684 685 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 686 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 687 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 688 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 689 690 return (file); 691} 692 693static nvlist_t * 694make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r) 695{ 696 nvlist_t *raidz, **child; 697 int c; 698 699 if (r < 2) 700 return (make_vdev_file(path, aux, size, ashift)); 701 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 702 703 for (c = 0; c < r; c++) 704 child[c] = make_vdev_file(path, aux, size, ashift); 705 706 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 707 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 708 VDEV_TYPE_RAIDZ) == 0); 709 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 710 zopt_raidz_parity) == 0); 711 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 712 child, r) == 0); 713 714 for (c = 0; c < r; c++) 715 nvlist_free(child[c]); 716 717 umem_free(child, r * sizeof (nvlist_t *)); 718 719 return (raidz); 720} 721 722static nvlist_t * 723make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift, 724 int r, int m) 725{ 726 nvlist_t *mirror, **child; 727 int c; 728 729 if (m < 1) 730 return (make_vdev_raidz(path, aux, size, ashift, r)); 731 732 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 733 734 for (c = 0; c < m; c++) 735 child[c] = make_vdev_raidz(path, aux, size, ashift, r); 736 737 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 738 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 739 VDEV_TYPE_MIRROR) == 0); 740 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 741 child, m) == 0); 742 743 for (c = 0; c < m; c++) 744 nvlist_free(child[c]); 745 746 umem_free(child, m * sizeof (nvlist_t *)); 747 748 return (mirror); 749} 750 751static nvlist_t * 752make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift, 753 int log, int r, int m, int t) 754{ 755 nvlist_t *root, **child; 756 int c; 757 758 ASSERT(t > 0); 759 760 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 761 762 for (c = 0; c < t; c++) { 763 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m); 764 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 765 log) == 0); 766 } 767 768 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 769 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 770 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 771 child, t) == 0); 772 773 for (c = 0; c < t; c++) 774 nvlist_free(child[c]); 775 776 umem_free(child, t * sizeof (nvlist_t *)); 777 778 return (root); 779} 780 781static int 782ztest_random_blocksize(void) 783{ 784 return (1 << (SPA_MINBLOCKSHIFT + 785 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 786} 787 788static int 789ztest_random_ibshift(void) 790{ 791 return (DN_MIN_INDBLKSHIFT + 792 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 793} 794 795static uint64_t 796ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 797{ 798 uint64_t top; 799 vdev_t *rvd = spa->spa_root_vdev; 800 vdev_t *tvd; 801 802 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 803 804 do { 805 top = ztest_random(rvd->vdev_children); 806 tvd = rvd->vdev_child[top]; 807 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 808 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 809 810 return (top); 811} 812 813static uint64_t 814ztest_random_dsl_prop(zfs_prop_t prop) 815{ 816 uint64_t value; 817 818 do { 819 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 820 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 821 822 return (value); 823} 824 825static int 826ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 827 boolean_t inherit) 828{ 829 const char *propname = zfs_prop_to_name(prop); 830 const char *valname; 831 char setpoint[MAXPATHLEN]; 832 uint64_t curval; 833 int error; 834 835 error = dsl_prop_set(osname, propname, 836 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), 837 sizeof (value), 1, &value); 838 839 if (error == ENOSPC) { 840 ztest_record_enospc(FTAG); 841 return (error); 842 } 843 ASSERT3U(error, ==, 0); 844 845 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval), 846 1, &curval, setpoint), ==, 0); 847 848 if (zopt_verbose >= 6) { 849 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 850 (void) printf("%s %s = %s at '%s'\n", 851 osname, propname, valname, setpoint); 852 } 853 854 return (error); 855} 856 857static int 858ztest_spa_prop_set_uint64(ztest_shared_t *zs, zpool_prop_t prop, uint64_t value) 859{ 860 spa_t *spa = zs->zs_spa; 861 nvlist_t *props = NULL; 862 int error; 863 864 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 865 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 866 867 error = spa_prop_set(spa, props); 868 869 nvlist_free(props); 870 871 if (error == ENOSPC) { 872 ztest_record_enospc(FTAG); 873 return (error); 874 } 875 ASSERT3U(error, ==, 0); 876 877 return (error); 878} 879 880static void 881ztest_rll_init(rll_t *rll) 882{ 883 rll->rll_writer = NULL; 884 rll->rll_readers = 0; 885 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 886 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 887} 888 889static void 890ztest_rll_destroy(rll_t *rll) 891{ 892 ASSERT(rll->rll_writer == NULL); 893 ASSERT(rll->rll_readers == 0); 894 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 895 VERIFY(cond_destroy(&rll->rll_cv) == 0); 896} 897 898static void 899ztest_rll_lock(rll_t *rll, rl_type_t type) 900{ 901 VERIFY(mutex_lock(&rll->rll_lock) == 0); 902 903 if (type == RL_READER) { 904 while (rll->rll_writer != NULL) 905 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 906 rll->rll_readers++; 907 } else { 908 while (rll->rll_writer != NULL || rll->rll_readers) 909 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 910 rll->rll_writer = curthread; 911 } 912 913 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 914} 915 916static void 917ztest_rll_unlock(rll_t *rll) 918{ 919 VERIFY(mutex_lock(&rll->rll_lock) == 0); 920 921 if (rll->rll_writer) { 922 ASSERT(rll->rll_readers == 0); 923 rll->rll_writer = NULL; 924 } else { 925 ASSERT(rll->rll_readers != 0); 926 ASSERT(rll->rll_writer == NULL); 927 rll->rll_readers--; 928 } 929 930 if (rll->rll_writer == NULL && rll->rll_readers == 0) 931 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 932 933 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 934} 935 936static void 937ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 938{ 939 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 940 941 ztest_rll_lock(rll, type); 942} 943 944static void 945ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 946{ 947 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 948 949 ztest_rll_unlock(rll); 950} 951 952static rl_t * 953ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 954 uint64_t size, rl_type_t type) 955{ 956 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 957 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 958 rl_t *rl; 959 960 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 961 rl->rl_object = object; 962 rl->rl_offset = offset; 963 rl->rl_size = size; 964 rl->rl_lock = rll; 965 966 ztest_rll_lock(rll, type); 967 968 return (rl); 969} 970 971static void 972ztest_range_unlock(rl_t *rl) 973{ 974 rll_t *rll = rl->rl_lock; 975 976 ztest_rll_unlock(rll); 977 978 umem_free(rl, sizeof (*rl)); 979} 980 981static void 982ztest_zd_init(ztest_ds_t *zd, objset_t *os) 983{ 984 zd->zd_os = os; 985 zd->zd_zilog = dmu_objset_zil(os); 986 zd->zd_seq = 0; 987 dmu_objset_name(os, zd->zd_name); 988 989 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 990 991 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 992 ztest_rll_init(&zd->zd_object_lock[l]); 993 994 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 995 ztest_rll_init(&zd->zd_range_lock[l]); 996} 997 998static void 999ztest_zd_fini(ztest_ds_t *zd) 1000{ 1001 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1002 1003 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1004 ztest_rll_destroy(&zd->zd_object_lock[l]); 1005 1006 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1007 ztest_rll_destroy(&zd->zd_range_lock[l]); 1008} 1009 1010#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1011 1012static uint64_t 1013ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1014{ 1015 uint64_t txg; 1016 int error; 1017 1018 /* 1019 * Attempt to assign tx to some transaction group. 1020 */ 1021 error = dmu_tx_assign(tx, txg_how); 1022 if (error) { 1023 if (error == ERESTART) { 1024 ASSERT(txg_how == TXG_NOWAIT); 1025 dmu_tx_wait(tx); 1026 } else { 1027 ASSERT3U(error, ==, ENOSPC); 1028 ztest_record_enospc(tag); 1029 } 1030 dmu_tx_abort(tx); 1031 return (0); 1032 } 1033 txg = dmu_tx_get_txg(tx); 1034 ASSERT(txg != 0); 1035 return (txg); 1036} 1037 1038static void 1039ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1040{ 1041 uint64_t *ip = buf; 1042 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1043 1044 while (ip < ip_end) 1045 *ip++ = value; 1046} 1047 1048static boolean_t 1049ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1050{ 1051 uint64_t *ip = buf; 1052 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1053 uint64_t diff = 0; 1054 1055 while (ip < ip_end) 1056 diff |= (value - *ip++); 1057 1058 return (diff == 0); 1059} 1060 1061static void 1062ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1063 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1064{ 1065 bt->bt_magic = BT_MAGIC; 1066 bt->bt_objset = dmu_objset_id(os); 1067 bt->bt_object = object; 1068 bt->bt_offset = offset; 1069 bt->bt_gen = gen; 1070 bt->bt_txg = txg; 1071 bt->bt_crtxg = crtxg; 1072} 1073 1074static void 1075ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1076 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1077{ 1078 ASSERT(bt->bt_magic == BT_MAGIC); 1079 ASSERT(bt->bt_objset == dmu_objset_id(os)); 1080 ASSERT(bt->bt_object == object); 1081 ASSERT(bt->bt_offset == offset); 1082 ASSERT(bt->bt_gen <= gen); 1083 ASSERT(bt->bt_txg <= txg); 1084 ASSERT(bt->bt_crtxg == crtxg); 1085} 1086 1087static ztest_block_tag_t * 1088ztest_bt_bonus(dmu_buf_t *db) 1089{ 1090 dmu_object_info_t doi; 1091 ztest_block_tag_t *bt; 1092 1093 dmu_object_info_from_db(db, &doi); 1094 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1095 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1096 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1097 1098 return (bt); 1099} 1100 1101/* 1102 * ZIL logging ops 1103 */ 1104 1105#define lrz_type lr_mode 1106#define lrz_blocksize lr_uid 1107#define lrz_ibshift lr_gid 1108#define lrz_bonustype lr_rdev 1109#define lrz_bonuslen lr_crtime[1] 1110 1111static void 1112ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1113{ 1114 char *name = (void *)(lr + 1); /* name follows lr */ 1115 size_t namesize = strlen(name) + 1; 1116 itx_t *itx; 1117 1118 if (zil_replaying(zd->zd_zilog, tx)) 1119 return; 1120 1121 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1122 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1123 sizeof (*lr) + namesize - sizeof (lr_t)); 1124 1125 zil_itx_assign(zd->zd_zilog, itx, tx); 1126} 1127 1128static void 1129ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1130{ 1131 char *name = (void *)(lr + 1); /* name follows lr */ 1132 size_t namesize = strlen(name) + 1; 1133 itx_t *itx; 1134 1135 if (zil_replaying(zd->zd_zilog, tx)) 1136 return; 1137 1138 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1139 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1140 sizeof (*lr) + namesize - sizeof (lr_t)); 1141 1142 itx->itx_oid = object; 1143 zil_itx_assign(zd->zd_zilog, itx, tx); 1144} 1145 1146static void 1147ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1148{ 1149 itx_t *itx; 1150 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1151 1152 if (zil_replaying(zd->zd_zilog, tx)) 1153 return; 1154 1155 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1156 write_state = WR_INDIRECT; 1157 1158 itx = zil_itx_create(TX_WRITE, 1159 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1160 1161 if (write_state == WR_COPIED && 1162 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1163 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1164 zil_itx_destroy(itx); 1165 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1166 write_state = WR_NEED_COPY; 1167 } 1168 itx->itx_private = zd; 1169 itx->itx_wr_state = write_state; 1170 itx->itx_sync = (ztest_random(8) == 0); 1171 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1172 1173 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1174 sizeof (*lr) - sizeof (lr_t)); 1175 1176 zil_itx_assign(zd->zd_zilog, itx, tx); 1177} 1178 1179static void 1180ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1181{ 1182 itx_t *itx; 1183 1184 if (zil_replaying(zd->zd_zilog, tx)) 1185 return; 1186 1187 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1188 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1189 sizeof (*lr) - sizeof (lr_t)); 1190 1191 itx->itx_sync = B_FALSE; 1192 zil_itx_assign(zd->zd_zilog, itx, tx); 1193} 1194 1195static void 1196ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1197{ 1198 itx_t *itx; 1199 1200 if (zil_replaying(zd->zd_zilog, tx)) 1201 return; 1202 1203 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1204 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1205 sizeof (*lr) - sizeof (lr_t)); 1206 1207 itx->itx_sync = B_FALSE; 1208 zil_itx_assign(zd->zd_zilog, itx, tx); 1209} 1210 1211/* 1212 * ZIL replay ops 1213 */ 1214static int 1215ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1216{ 1217 char *name = (void *)(lr + 1); /* name follows lr */ 1218 objset_t *os = zd->zd_os; 1219 ztest_block_tag_t *bbt; 1220 dmu_buf_t *db; 1221 dmu_tx_t *tx; 1222 uint64_t txg; 1223 int error = 0; 1224 1225 if (byteswap) 1226 byteswap_uint64_array(lr, sizeof (*lr)); 1227 1228 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1229 ASSERT(name[0] != '\0'); 1230 1231 tx = dmu_tx_create(os); 1232 1233 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1234 1235 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1236 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1237 } else { 1238 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1239 } 1240 1241 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1242 if (txg == 0) 1243 return (ENOSPC); 1244 1245 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1246 1247 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1248 if (lr->lr_foid == 0) { 1249 lr->lr_foid = zap_create(os, 1250 lr->lrz_type, lr->lrz_bonustype, 1251 lr->lrz_bonuslen, tx); 1252 } else { 1253 error = zap_create_claim(os, lr->lr_foid, 1254 lr->lrz_type, lr->lrz_bonustype, 1255 lr->lrz_bonuslen, tx); 1256 } 1257 } else { 1258 if (lr->lr_foid == 0) { 1259 lr->lr_foid = dmu_object_alloc(os, 1260 lr->lrz_type, 0, lr->lrz_bonustype, 1261 lr->lrz_bonuslen, tx); 1262 } else { 1263 error = dmu_object_claim(os, lr->lr_foid, 1264 lr->lrz_type, 0, lr->lrz_bonustype, 1265 lr->lrz_bonuslen, tx); 1266 } 1267 } 1268 1269 if (error) { 1270 ASSERT3U(error, ==, EEXIST); 1271 ASSERT(zd->zd_zilog->zl_replay); 1272 dmu_tx_commit(tx); 1273 return (error); 1274 } 1275 1276 ASSERT(lr->lr_foid != 0); 1277 1278 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1279 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1280 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1281 1282 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1283 bbt = ztest_bt_bonus(db); 1284 dmu_buf_will_dirty(db, tx); 1285 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1286 dmu_buf_rele(db, FTAG); 1287 1288 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1289 &lr->lr_foid, tx)); 1290 1291 (void) ztest_log_create(zd, tx, lr); 1292 1293 dmu_tx_commit(tx); 1294 1295 return (0); 1296} 1297 1298static int 1299ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1300{ 1301 char *name = (void *)(lr + 1); /* name follows lr */ 1302 objset_t *os = zd->zd_os; 1303 dmu_object_info_t doi; 1304 dmu_tx_t *tx; 1305 uint64_t object, txg; 1306 1307 if (byteswap) 1308 byteswap_uint64_array(lr, sizeof (*lr)); 1309 1310 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1311 ASSERT(name[0] != '\0'); 1312 1313 VERIFY3U(0, ==, 1314 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1315 ASSERT(object != 0); 1316 1317 ztest_object_lock(zd, object, RL_WRITER); 1318 1319 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1320 1321 tx = dmu_tx_create(os); 1322 1323 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1324 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1325 1326 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1327 if (txg == 0) { 1328 ztest_object_unlock(zd, object); 1329 return (ENOSPC); 1330 } 1331 1332 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1333 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1334 } else { 1335 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1336 } 1337 1338 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1339 1340 (void) ztest_log_remove(zd, tx, lr, object); 1341 1342 dmu_tx_commit(tx); 1343 1344 ztest_object_unlock(zd, object); 1345 1346 return (0); 1347} 1348 1349static int 1350ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1351{ 1352 objset_t *os = zd->zd_os; 1353 void *data = lr + 1; /* data follows lr */ 1354 uint64_t offset, length; 1355 ztest_block_tag_t *bt = data; 1356 ztest_block_tag_t *bbt; 1357 uint64_t gen, txg, lrtxg, crtxg; 1358 dmu_object_info_t doi; 1359 dmu_tx_t *tx; 1360 dmu_buf_t *db; 1361 arc_buf_t *abuf = NULL; 1362 rl_t *rl; 1363 1364 if (byteswap) 1365 byteswap_uint64_array(lr, sizeof (*lr)); 1366 1367 offset = lr->lr_offset; 1368 length = lr->lr_length; 1369 1370 /* If it's a dmu_sync() block, write the whole block */ 1371 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1372 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1373 if (length < blocksize) { 1374 offset -= offset % blocksize; 1375 length = blocksize; 1376 } 1377 } 1378 1379 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1380 byteswap_uint64_array(bt, sizeof (*bt)); 1381 1382 if (bt->bt_magic != BT_MAGIC) 1383 bt = NULL; 1384 1385 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1386 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1387 1388 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1389 1390 dmu_object_info_from_db(db, &doi); 1391 1392 bbt = ztest_bt_bonus(db); 1393 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1394 gen = bbt->bt_gen; 1395 crtxg = bbt->bt_crtxg; 1396 lrtxg = lr->lr_common.lrc_txg; 1397 1398 tx = dmu_tx_create(os); 1399 1400 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1401 1402 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1403 P2PHASE(offset, length) == 0) 1404 abuf = dmu_request_arcbuf(db, length); 1405 1406 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1407 if (txg == 0) { 1408 if (abuf != NULL) 1409 dmu_return_arcbuf(abuf); 1410 dmu_buf_rele(db, FTAG); 1411 ztest_range_unlock(rl); 1412 ztest_object_unlock(zd, lr->lr_foid); 1413 return (ENOSPC); 1414 } 1415 1416 if (bt != NULL) { 1417 /* 1418 * Usually, verify the old data before writing new data -- 1419 * but not always, because we also want to verify correct 1420 * behavior when the data was not recently read into cache. 1421 */ 1422 ASSERT(offset % doi.doi_data_block_size == 0); 1423 if (ztest_random(4) != 0) { 1424 int prefetch = ztest_random(2) ? 1425 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1426 ztest_block_tag_t rbt; 1427 1428 VERIFY(dmu_read(os, lr->lr_foid, offset, 1429 sizeof (rbt), &rbt, prefetch) == 0); 1430 if (rbt.bt_magic == BT_MAGIC) { 1431 ztest_bt_verify(&rbt, os, lr->lr_foid, 1432 offset, gen, txg, crtxg); 1433 } 1434 } 1435 1436 /* 1437 * Writes can appear to be newer than the bonus buffer because 1438 * the ztest_get_data() callback does a dmu_read() of the 1439 * open-context data, which may be different than the data 1440 * as it was when the write was generated. 1441 */ 1442 if (zd->zd_zilog->zl_replay) { 1443 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1444 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1445 bt->bt_crtxg); 1446 } 1447 1448 /* 1449 * Set the bt's gen/txg to the bonus buffer's gen/txg 1450 * so that all of the usual ASSERTs will work. 1451 */ 1452 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1453 } 1454 1455 if (abuf == NULL) { 1456 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1457 } else { 1458 bcopy(data, abuf->b_data, length); 1459 dmu_assign_arcbuf(db, offset, abuf, tx); 1460 } 1461 1462 (void) ztest_log_write(zd, tx, lr); 1463 1464 dmu_buf_rele(db, FTAG); 1465 1466 dmu_tx_commit(tx); 1467 1468 ztest_range_unlock(rl); 1469 ztest_object_unlock(zd, lr->lr_foid); 1470 1471 return (0); 1472} 1473 1474static int 1475ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1476{ 1477 objset_t *os = zd->zd_os; 1478 dmu_tx_t *tx; 1479 uint64_t txg; 1480 rl_t *rl; 1481 1482 if (byteswap) 1483 byteswap_uint64_array(lr, sizeof (*lr)); 1484 1485 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1486 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1487 RL_WRITER); 1488 1489 tx = dmu_tx_create(os); 1490 1491 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1492 1493 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1494 if (txg == 0) { 1495 ztest_range_unlock(rl); 1496 ztest_object_unlock(zd, lr->lr_foid); 1497 return (ENOSPC); 1498 } 1499 1500 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1501 lr->lr_length, tx) == 0); 1502 1503 (void) ztest_log_truncate(zd, tx, lr); 1504 1505 dmu_tx_commit(tx); 1506 1507 ztest_range_unlock(rl); 1508 ztest_object_unlock(zd, lr->lr_foid); 1509 1510 return (0); 1511} 1512 1513static int 1514ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1515{ 1516 objset_t *os = zd->zd_os; 1517 dmu_tx_t *tx; 1518 dmu_buf_t *db; 1519 ztest_block_tag_t *bbt; 1520 uint64_t txg, lrtxg, crtxg; 1521 1522 if (byteswap) 1523 byteswap_uint64_array(lr, sizeof (*lr)); 1524 1525 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1526 1527 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1528 1529 tx = dmu_tx_create(os); 1530 dmu_tx_hold_bonus(tx, lr->lr_foid); 1531 1532 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1533 if (txg == 0) { 1534 dmu_buf_rele(db, FTAG); 1535 ztest_object_unlock(zd, lr->lr_foid); 1536 return (ENOSPC); 1537 } 1538 1539 bbt = ztest_bt_bonus(db); 1540 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1541 crtxg = bbt->bt_crtxg; 1542 lrtxg = lr->lr_common.lrc_txg; 1543 1544 if (zd->zd_zilog->zl_replay) { 1545 ASSERT(lr->lr_size != 0); 1546 ASSERT(lr->lr_mode != 0); 1547 ASSERT(lrtxg != 0); 1548 } else { 1549 /* 1550 * Randomly change the size and increment the generation. 1551 */ 1552 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1553 sizeof (*bbt); 1554 lr->lr_mode = bbt->bt_gen + 1; 1555 ASSERT(lrtxg == 0); 1556 } 1557 1558 /* 1559 * Verify that the current bonus buffer is not newer than our txg. 1560 */ 1561 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1562 MAX(txg, lrtxg), crtxg); 1563 1564 dmu_buf_will_dirty(db, tx); 1565 1566 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1567 ASSERT3U(lr->lr_size, <=, db->db_size); 1568 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0); 1569 bbt = ztest_bt_bonus(db); 1570 1571 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1572 1573 dmu_buf_rele(db, FTAG); 1574 1575 (void) ztest_log_setattr(zd, tx, lr); 1576 1577 dmu_tx_commit(tx); 1578 1579 ztest_object_unlock(zd, lr->lr_foid); 1580 1581 return (0); 1582} 1583 1584zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1585 NULL, /* 0 no such transaction type */ 1586 ztest_replay_create, /* TX_CREATE */ 1587 NULL, /* TX_MKDIR */ 1588 NULL, /* TX_MKXATTR */ 1589 NULL, /* TX_SYMLINK */ 1590 ztest_replay_remove, /* TX_REMOVE */ 1591 NULL, /* TX_RMDIR */ 1592 NULL, /* TX_LINK */ 1593 NULL, /* TX_RENAME */ 1594 ztest_replay_write, /* TX_WRITE */ 1595 ztest_replay_truncate, /* TX_TRUNCATE */ 1596 ztest_replay_setattr, /* TX_SETATTR */ 1597 NULL, /* TX_ACL */ 1598 NULL, /* TX_CREATE_ACL */ 1599 NULL, /* TX_CREATE_ATTR */ 1600 NULL, /* TX_CREATE_ACL_ATTR */ 1601 NULL, /* TX_MKDIR_ACL */ 1602 NULL, /* TX_MKDIR_ATTR */ 1603 NULL, /* TX_MKDIR_ACL_ATTR */ 1604 NULL, /* TX_WRITE2 */ 1605}; 1606 1607/* 1608 * ZIL get_data callbacks 1609 */ 1610 1611static void 1612ztest_get_done(zgd_t *zgd, int error) 1613{ 1614 ztest_ds_t *zd = zgd->zgd_private; 1615 uint64_t object = zgd->zgd_rl->rl_object; 1616 1617 if (zgd->zgd_db) 1618 dmu_buf_rele(zgd->zgd_db, zgd); 1619 1620 ztest_range_unlock(zgd->zgd_rl); 1621 ztest_object_unlock(zd, object); 1622 1623 if (error == 0 && zgd->zgd_bp) 1624 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1625 1626 umem_free(zgd, sizeof (*zgd)); 1627} 1628 1629static int 1630ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1631{ 1632 ztest_ds_t *zd = arg; 1633 objset_t *os = zd->zd_os; 1634 uint64_t object = lr->lr_foid; 1635 uint64_t offset = lr->lr_offset; 1636 uint64_t size = lr->lr_length; 1637 blkptr_t *bp = &lr->lr_blkptr; 1638 uint64_t txg = lr->lr_common.lrc_txg; 1639 uint64_t crtxg; 1640 dmu_object_info_t doi; 1641 dmu_buf_t *db; 1642 zgd_t *zgd; 1643 int error; 1644 1645 ztest_object_lock(zd, object, RL_READER); 1646 error = dmu_bonus_hold(os, object, FTAG, &db); 1647 if (error) { 1648 ztest_object_unlock(zd, object); 1649 return (error); 1650 } 1651 1652 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1653 1654 if (crtxg == 0 || crtxg > txg) { 1655 dmu_buf_rele(db, FTAG); 1656 ztest_object_unlock(zd, object); 1657 return (ENOENT); 1658 } 1659 1660 dmu_object_info_from_db(db, &doi); 1661 dmu_buf_rele(db, FTAG); 1662 db = NULL; 1663 1664 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1665 zgd->zgd_zilog = zd->zd_zilog; 1666 zgd->zgd_private = zd; 1667 1668 if (buf != NULL) { /* immediate write */ 1669 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1670 RL_READER); 1671 1672 error = dmu_read(os, object, offset, size, buf, 1673 DMU_READ_NO_PREFETCH); 1674 ASSERT(error == 0); 1675 } else { 1676 size = doi.doi_data_block_size; 1677 if (ISP2(size)) { 1678 offset = P2ALIGN(offset, size); 1679 } else { 1680 ASSERT(offset < size); 1681 offset = 0; 1682 } 1683 1684 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1685 RL_READER); 1686 1687 error = dmu_buf_hold(os, object, offset, zgd, &db, 1688 DMU_READ_NO_PREFETCH); 1689 1690 if (error == 0) { 1691 zgd->zgd_db = db; 1692 zgd->zgd_bp = bp; 1693 1694 ASSERT(db->db_offset == offset); 1695 ASSERT(db->db_size == size); 1696 1697 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1698 ztest_get_done, zgd); 1699 1700 if (error == 0) 1701 return (0); 1702 } 1703 } 1704 1705 ztest_get_done(zgd, error); 1706 1707 return (error); 1708} 1709 1710static void * 1711ztest_lr_alloc(size_t lrsize, char *name) 1712{ 1713 char *lr; 1714 size_t namesize = name ? strlen(name) + 1 : 0; 1715 1716 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1717 1718 if (name) 1719 bcopy(name, lr + lrsize, namesize); 1720 1721 return (lr); 1722} 1723 1724void 1725ztest_lr_free(void *lr, size_t lrsize, char *name) 1726{ 1727 size_t namesize = name ? strlen(name) + 1 : 0; 1728 1729 umem_free(lr, lrsize + namesize); 1730} 1731 1732/* 1733 * Lookup a bunch of objects. Returns the number of objects not found. 1734 */ 1735static int 1736ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1737{ 1738 int missing = 0; 1739 int error; 1740 1741 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1742 1743 for (int i = 0; i < count; i++, od++) { 1744 od->od_object = 0; 1745 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1746 sizeof (uint64_t), 1, &od->od_object); 1747 if (error) { 1748 ASSERT(error == ENOENT); 1749 ASSERT(od->od_object == 0); 1750 missing++; 1751 } else { 1752 dmu_buf_t *db; 1753 ztest_block_tag_t *bbt; 1754 dmu_object_info_t doi; 1755 1756 ASSERT(od->od_object != 0); 1757 ASSERT(missing == 0); /* there should be no gaps */ 1758 1759 ztest_object_lock(zd, od->od_object, RL_READER); 1760 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1761 od->od_object, FTAG, &db)); 1762 dmu_object_info_from_db(db, &doi); 1763 bbt = ztest_bt_bonus(db); 1764 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1765 od->od_type = doi.doi_type; 1766 od->od_blocksize = doi.doi_data_block_size; 1767 od->od_gen = bbt->bt_gen; 1768 dmu_buf_rele(db, FTAG); 1769 ztest_object_unlock(zd, od->od_object); 1770 } 1771 } 1772 1773 return (missing); 1774} 1775 1776static int 1777ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1778{ 1779 int missing = 0; 1780 1781 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1782 1783 for (int i = 0; i < count; i++, od++) { 1784 if (missing) { 1785 od->od_object = 0; 1786 missing++; 1787 continue; 1788 } 1789 1790 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1791 1792 lr->lr_doid = od->od_dir; 1793 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1794 lr->lrz_type = od->od_crtype; 1795 lr->lrz_blocksize = od->od_crblocksize; 1796 lr->lrz_ibshift = ztest_random_ibshift(); 1797 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1798 lr->lrz_bonuslen = dmu_bonus_max(); 1799 lr->lr_gen = od->od_crgen; 1800 lr->lr_crtime[0] = time(NULL); 1801 1802 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 1803 ASSERT(missing == 0); 1804 od->od_object = 0; 1805 missing++; 1806 } else { 1807 od->od_object = lr->lr_foid; 1808 od->od_type = od->od_crtype; 1809 od->od_blocksize = od->od_crblocksize; 1810 od->od_gen = od->od_crgen; 1811 ASSERT(od->od_object != 0); 1812 } 1813 1814 ztest_lr_free(lr, sizeof (*lr), od->od_name); 1815 } 1816 1817 return (missing); 1818} 1819 1820static int 1821ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 1822{ 1823 int missing = 0; 1824 int error; 1825 1826 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1827 1828 od += count - 1; 1829 1830 for (int i = count - 1; i >= 0; i--, od--) { 1831 if (missing) { 1832 missing++; 1833 continue; 1834 } 1835 1836 if (od->od_object == 0) 1837 continue; 1838 1839 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1840 1841 lr->lr_doid = od->od_dir; 1842 1843 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 1844 ASSERT3U(error, ==, ENOSPC); 1845 missing++; 1846 } else { 1847 od->od_object = 0; 1848 } 1849 ztest_lr_free(lr, sizeof (*lr), od->od_name); 1850 } 1851 1852 return (missing); 1853} 1854 1855static int 1856ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 1857 void *data) 1858{ 1859 lr_write_t *lr; 1860 int error; 1861 1862 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 1863 1864 lr->lr_foid = object; 1865 lr->lr_offset = offset; 1866 lr->lr_length = size; 1867 lr->lr_blkoff = 0; 1868 BP_ZERO(&lr->lr_blkptr); 1869 1870 bcopy(data, lr + 1, size); 1871 1872 error = ztest_replay_write(zd, lr, B_FALSE); 1873 1874 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 1875 1876 return (error); 1877} 1878 1879static int 1880ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 1881{ 1882 lr_truncate_t *lr; 1883 int error; 1884 1885 lr = ztest_lr_alloc(sizeof (*lr), NULL); 1886 1887 lr->lr_foid = object; 1888 lr->lr_offset = offset; 1889 lr->lr_length = size; 1890 1891 error = ztest_replay_truncate(zd, lr, B_FALSE); 1892 1893 ztest_lr_free(lr, sizeof (*lr), NULL); 1894 1895 return (error); 1896} 1897 1898static int 1899ztest_setattr(ztest_ds_t *zd, uint64_t object) 1900{ 1901 lr_setattr_t *lr; 1902 int error; 1903 1904 lr = ztest_lr_alloc(sizeof (*lr), NULL); 1905 1906 lr->lr_foid = object; 1907 lr->lr_size = 0; 1908 lr->lr_mode = 0; 1909 1910 error = ztest_replay_setattr(zd, lr, B_FALSE); 1911 1912 ztest_lr_free(lr, sizeof (*lr), NULL); 1913 1914 return (error); 1915} 1916 1917static void 1918ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 1919{ 1920 objset_t *os = zd->zd_os; 1921 dmu_tx_t *tx; 1922 uint64_t txg; 1923 rl_t *rl; 1924 1925 txg_wait_synced(dmu_objset_pool(os), 0); 1926 1927 ztest_object_lock(zd, object, RL_READER); 1928 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 1929 1930 tx = dmu_tx_create(os); 1931 1932 dmu_tx_hold_write(tx, object, offset, size); 1933 1934 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1935 1936 if (txg != 0) { 1937 dmu_prealloc(os, object, offset, size, tx); 1938 dmu_tx_commit(tx); 1939 txg_wait_synced(dmu_objset_pool(os), txg); 1940 } else { 1941 (void) dmu_free_long_range(os, object, offset, size); 1942 } 1943 1944 ztest_range_unlock(rl); 1945 ztest_object_unlock(zd, object); 1946} 1947 1948static void 1949ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 1950{ 1951 ztest_block_tag_t wbt; 1952 dmu_object_info_t doi; 1953 enum ztest_io_type io_type; 1954 uint64_t blocksize; 1955 void *data; 1956 1957 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 1958 blocksize = doi.doi_data_block_size; 1959 data = umem_alloc(blocksize, UMEM_NOFAIL); 1960 1961 /* 1962 * Pick an i/o type at random, biased toward writing block tags. 1963 */ 1964 io_type = ztest_random(ZTEST_IO_TYPES); 1965 if (ztest_random(2) == 0) 1966 io_type = ZTEST_IO_WRITE_TAG; 1967 1968 switch (io_type) { 1969 1970 case ZTEST_IO_WRITE_TAG: 1971 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 1972 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 1973 break; 1974 1975 case ZTEST_IO_WRITE_PATTERN: 1976 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 1977 if (ztest_random(2) == 0) { 1978 /* 1979 * Induce fletcher2 collisions to ensure that 1980 * zio_ddt_collision() detects and resolves them 1981 * when using fletcher2-verify for deduplication. 1982 */ 1983 ((uint64_t *)data)[0] ^= 1ULL << 63; 1984 ((uint64_t *)data)[4] ^= 1ULL << 63; 1985 } 1986 (void) ztest_write(zd, object, offset, blocksize, data); 1987 break; 1988 1989 case ZTEST_IO_WRITE_ZEROES: 1990 bzero(data, blocksize); 1991 (void) ztest_write(zd, object, offset, blocksize, data); 1992 break; 1993 1994 case ZTEST_IO_TRUNCATE: 1995 (void) ztest_truncate(zd, object, offset, blocksize); 1996 break; 1997 1998 case ZTEST_IO_SETATTR: 1999 (void) ztest_setattr(zd, object); 2000 break; 2001 } 2002 2003 umem_free(data, blocksize); 2004} 2005 2006/* 2007 * Initialize an object description template. 2008 */ 2009static void 2010ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2011 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2012{ 2013 od->od_dir = ZTEST_DIROBJ; 2014 od->od_object = 0; 2015 2016 od->od_crtype = type; 2017 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2018 od->od_crgen = gen; 2019 2020 od->od_type = DMU_OT_NONE; 2021 od->od_blocksize = 0; 2022 od->od_gen = 0; 2023 2024 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2025 tag, (int64_t)id, index); 2026} 2027 2028/* 2029 * Lookup or create the objects for a test using the od template. 2030 * If the objects do not all exist, or if 'remove' is specified, 2031 * remove any existing objects and create new ones. Otherwise, 2032 * use the existing objects. 2033 */ 2034static int 2035ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2036{ 2037 int count = size / sizeof (*od); 2038 int rv = 0; 2039 2040 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2041 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2042 (ztest_remove(zd, od, count) != 0 || 2043 ztest_create(zd, od, count) != 0)) 2044 rv = -1; 2045 zd->zd_od = od; 2046 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2047 2048 return (rv); 2049} 2050 2051/* ARGSUSED */ 2052void 2053ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2054{ 2055 zilog_t *zilog = zd->zd_zilog; 2056 2057 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2058 2059 /* 2060 * Remember the committed values in zd, which is in parent/child 2061 * shared memory. If we die, the next iteration of ztest_run() 2062 * will verify that the log really does contain this record. 2063 */ 2064 mutex_enter(&zilog->zl_lock); 2065 ASSERT(zd->zd_seq <= zilog->zl_commit_lr_seq); 2066 zd->zd_seq = zilog->zl_commit_lr_seq; 2067 mutex_exit(&zilog->zl_lock); 2068} 2069 2070/* 2071 * Verify that we can't destroy an active pool, create an existing pool, 2072 * or create a pool with a bad vdev spec. 2073 */ 2074/* ARGSUSED */ 2075void 2076ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2077{ 2078 ztest_shared_t *zs = ztest_shared; 2079 spa_t *spa; 2080 nvlist_t *nvroot; 2081 2082 /* 2083 * Attempt to create using a bad file. 2084 */ 2085 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); 2086 VERIFY3U(ENOENT, ==, 2087 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL)); 2088 nvlist_free(nvroot); 2089 2090 /* 2091 * Attempt to create using a bad mirror. 2092 */ 2093 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1); 2094 VERIFY3U(ENOENT, ==, 2095 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL)); 2096 nvlist_free(nvroot); 2097 2098 /* 2099 * Attempt to create an existing pool. It shouldn't matter 2100 * what's in the nvroot; we should fail with EEXIST. 2101 */ 2102 (void) rw_rdlock(&zs->zs_name_lock); 2103 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); 2104 VERIFY3U(EEXIST, ==, spa_create(zs->zs_pool, nvroot, NULL, NULL, NULL)); 2105 nvlist_free(nvroot); 2106 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG)); 2107 VERIFY3U(EBUSY, ==, spa_destroy(zs->zs_pool)); 2108 spa_close(spa, FTAG); 2109 2110 (void) rw_unlock(&zs->zs_name_lock); 2111} 2112 2113static vdev_t * 2114vdev_lookup_by_path(vdev_t *vd, const char *path) 2115{ 2116 vdev_t *mvd; 2117 2118 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2119 return (vd); 2120 2121 for (int c = 0; c < vd->vdev_children; c++) 2122 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2123 NULL) 2124 return (mvd); 2125 2126 return (NULL); 2127} 2128 2129/* 2130 * Find the first available hole which can be used as a top-level. 2131 */ 2132int 2133find_vdev_hole(spa_t *spa) 2134{ 2135 vdev_t *rvd = spa->spa_root_vdev; 2136 int c; 2137 2138 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2139 2140 for (c = 0; c < rvd->vdev_children; c++) { 2141 vdev_t *cvd = rvd->vdev_child[c]; 2142 2143 if (cvd->vdev_ishole) 2144 break; 2145 } 2146 return (c); 2147} 2148 2149/* 2150 * Verify that vdev_add() works as expected. 2151 */ 2152/* ARGSUSED */ 2153void 2154ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2155{ 2156 ztest_shared_t *zs = ztest_shared; 2157 spa_t *spa = zs->zs_spa; 2158 uint64_t leaves; 2159 uint64_t guid; 2160 nvlist_t *nvroot; 2161 int error; 2162 2163 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 2164 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * zopt_raidz; 2165 2166 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2167 2168 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2169 2170 /* 2171 * If we have slogs then remove them 1/4 of the time. 2172 */ 2173 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2174 /* 2175 * Grab the guid from the head of the log class rotor. 2176 */ 2177 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2178 2179 spa_config_exit(spa, SCL_VDEV, FTAG); 2180 2181 /* 2182 * We have to grab the zs_name_lock as writer to 2183 * prevent a race between removing a slog (dmu_objset_find) 2184 * and destroying a dataset. Removing the slog will 2185 * grab a reference on the dataset which may cause 2186 * dmu_objset_destroy() to fail with EBUSY thus 2187 * leaving the dataset in an inconsistent state. 2188 */ 2189 VERIFY(rw_wrlock(&ztest_shared->zs_name_lock) == 0); 2190 error = spa_vdev_remove(spa, guid, B_FALSE); 2191 VERIFY(rw_unlock(&ztest_shared->zs_name_lock) == 0); 2192 2193 if (error && error != EEXIST) 2194 fatal(0, "spa_vdev_remove() = %d", error); 2195 } else { 2196 spa_config_exit(spa, SCL_VDEV, FTAG); 2197 2198 /* 2199 * Make 1/4 of the devices be log devices. 2200 */ 2201 nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0, 2202 ztest_random(4) == 0, zopt_raidz, zs->zs_mirrors, 1); 2203 2204 error = spa_vdev_add(spa, nvroot); 2205 nvlist_free(nvroot); 2206 2207 if (error == ENOSPC) 2208 ztest_record_enospc("spa_vdev_add"); 2209 else if (error != 0) 2210 fatal(0, "spa_vdev_add() = %d", error); 2211 } 2212 2213 VERIFY(mutex_unlock(&ztest_shared->zs_vdev_lock) == 0); 2214} 2215 2216/* 2217 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2218 */ 2219/* ARGSUSED */ 2220void 2221ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2222{ 2223 ztest_shared_t *zs = ztest_shared; 2224 spa_t *spa = zs->zs_spa; 2225 vdev_t *rvd = spa->spa_root_vdev; 2226 spa_aux_vdev_t *sav; 2227 char *aux; 2228 uint64_t guid = 0; 2229 int error; 2230 2231 if (ztest_random(2) == 0) { 2232 sav = &spa->spa_spares; 2233 aux = ZPOOL_CONFIG_SPARES; 2234 } else { 2235 sav = &spa->spa_l2cache; 2236 aux = ZPOOL_CONFIG_L2CACHE; 2237 } 2238 2239 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 2240 2241 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2242 2243 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2244 /* 2245 * Pick a random device to remove. 2246 */ 2247 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2248 } else { 2249 /* 2250 * Find an unused device we can add. 2251 */ 2252 zs->zs_vdev_aux = 0; 2253 for (;;) { 2254 char path[MAXPATHLEN]; 2255 int c; 2256 (void) sprintf(path, ztest_aux_template, zopt_dir, 2257 zopt_pool, aux, zs->zs_vdev_aux); 2258 for (c = 0; c < sav->sav_count; c++) 2259 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2260 path) == 0) 2261 break; 2262 if (c == sav->sav_count && 2263 vdev_lookup_by_path(rvd, path) == NULL) 2264 break; 2265 zs->zs_vdev_aux++; 2266 } 2267 } 2268 2269 spa_config_exit(spa, SCL_VDEV, FTAG); 2270 2271 if (guid == 0) { 2272 /* 2273 * Add a new device. 2274 */ 2275 nvlist_t *nvroot = make_vdev_root(NULL, aux, 2276 (zopt_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2277 error = spa_vdev_add(spa, nvroot); 2278 if (error != 0) 2279 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2280 nvlist_free(nvroot); 2281 } else { 2282 /* 2283 * Remove an existing device. Sometimes, dirty its 2284 * vdev state first to make sure we handle removal 2285 * of devices that have pending state changes. 2286 */ 2287 if (ztest_random(2) == 0) 2288 (void) vdev_online(spa, guid, 0, NULL); 2289 2290 error = spa_vdev_remove(spa, guid, B_FALSE); 2291 if (error != 0 && error != EBUSY) 2292 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2293 } 2294 2295 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2296} 2297 2298/* 2299 * split a pool if it has mirror tlvdevs 2300 */ 2301/* ARGSUSED */ 2302void 2303ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2304{ 2305 ztest_shared_t *zs = ztest_shared; 2306 spa_t *spa = zs->zs_spa; 2307 vdev_t *rvd = spa->spa_root_vdev; 2308 nvlist_t *tree, **child, *config, *split, **schild; 2309 uint_t c, children, schildren = 0, lastlogid = 0; 2310 int error = 0; 2311 2312 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 2313 2314 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2315 if (zs->zs_mirrors < 3 || zopt_raidz > 1) { 2316 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2317 return; 2318 } 2319 2320 /* clean up the old pool, if any */ 2321 (void) spa_destroy("splitp"); 2322 2323 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2324 2325 /* generate a config from the existing config */ 2326 mutex_enter(&spa->spa_props_lock); 2327 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2328 &tree) == 0); 2329 mutex_exit(&spa->spa_props_lock); 2330 2331 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2332 &children) == 0); 2333 2334 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2335 for (c = 0; c < children; c++) { 2336 vdev_t *tvd = rvd->vdev_child[c]; 2337 nvlist_t **mchild; 2338 uint_t mchildren; 2339 2340 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2341 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2342 0) == 0); 2343 VERIFY(nvlist_add_string(schild[schildren], 2344 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2345 VERIFY(nvlist_add_uint64(schild[schildren], 2346 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2347 if (lastlogid == 0) 2348 lastlogid = schildren; 2349 ++schildren; 2350 continue; 2351 } 2352 lastlogid = 0; 2353 VERIFY(nvlist_lookup_nvlist_array(child[c], 2354 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2355 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2356 } 2357 2358 /* OK, create a config that can be used to split */ 2359 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2360 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2361 VDEV_TYPE_ROOT) == 0); 2362 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2363 lastlogid != 0 ? lastlogid : schildren) == 0); 2364 2365 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2366 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2367 2368 for (c = 0; c < schildren; c++) 2369 nvlist_free(schild[c]); 2370 free(schild); 2371 nvlist_free(split); 2372 2373 spa_config_exit(spa, SCL_VDEV, FTAG); 2374 2375 (void) rw_wrlock(&zs->zs_name_lock); 2376 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2377 (void) rw_unlock(&zs->zs_name_lock); 2378 2379 nvlist_free(config); 2380 2381 if (error == 0) { 2382 (void) printf("successful split - results:\n"); 2383 mutex_enter(&spa_namespace_lock); 2384 show_pool_stats(spa); 2385 show_pool_stats(spa_lookup("splitp")); 2386 mutex_exit(&spa_namespace_lock); 2387 ++zs->zs_splits; 2388 --zs->zs_mirrors; 2389 } 2390 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2391 2392} 2393 2394/* 2395 * Verify that we can attach and detach devices. 2396 */ 2397/* ARGSUSED */ 2398void 2399ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2400{ 2401 ztest_shared_t *zs = ztest_shared; 2402 spa_t *spa = zs->zs_spa; 2403 spa_aux_vdev_t *sav = &spa->spa_spares; 2404 vdev_t *rvd = spa->spa_root_vdev; 2405 vdev_t *oldvd, *newvd, *pvd; 2406 nvlist_t *root; 2407 uint64_t leaves; 2408 uint64_t leaf, top; 2409 uint64_t ashift = ztest_get_ashift(); 2410 uint64_t oldguid, pguid; 2411 size_t oldsize, newsize; 2412 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2413 int replacing; 2414 int oldvd_has_siblings = B_FALSE; 2415 int newvd_is_spare = B_FALSE; 2416 int oldvd_is_log; 2417 int error, expected_error; 2418 2419 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 2420 leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz; 2421 2422 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2423 2424 /* 2425 * Decide whether to do an attach or a replace. 2426 */ 2427 replacing = ztest_random(2); 2428 2429 /* 2430 * Pick a random top-level vdev. 2431 */ 2432 top = ztest_random_vdev_top(spa, B_TRUE); 2433 2434 /* 2435 * Pick a random leaf within it. 2436 */ 2437 leaf = ztest_random(leaves); 2438 2439 /* 2440 * Locate this vdev. 2441 */ 2442 oldvd = rvd->vdev_child[top]; 2443 if (zs->zs_mirrors >= 1) { 2444 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2445 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2446 oldvd = oldvd->vdev_child[leaf / zopt_raidz]; 2447 } 2448 if (zopt_raidz > 1) { 2449 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2450 ASSERT(oldvd->vdev_children == zopt_raidz); 2451 oldvd = oldvd->vdev_child[leaf % zopt_raidz]; 2452 } 2453 2454 /* 2455 * If we're already doing an attach or replace, oldvd may be a 2456 * mirror vdev -- in which case, pick a random child. 2457 */ 2458 while (oldvd->vdev_children != 0) { 2459 oldvd_has_siblings = B_TRUE; 2460 ASSERT(oldvd->vdev_children >= 2); 2461 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2462 } 2463 2464 oldguid = oldvd->vdev_guid; 2465 oldsize = vdev_get_min_asize(oldvd); 2466 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2467 (void) strcpy(oldpath, oldvd->vdev_path); 2468 pvd = oldvd->vdev_parent; 2469 pguid = pvd->vdev_guid; 2470 2471 /* 2472 * If oldvd has siblings, then half of the time, detach it. 2473 */ 2474 if (oldvd_has_siblings && ztest_random(2) == 0) { 2475 spa_config_exit(spa, SCL_VDEV, FTAG); 2476 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2477 if (error != 0 && error != ENODEV && error != EBUSY && 2478 error != ENOTSUP) 2479 fatal(0, "detach (%s) returned %d", oldpath, error); 2480 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2481 return; 2482 } 2483 2484 /* 2485 * For the new vdev, choose with equal probability between the two 2486 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2487 */ 2488 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2489 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2490 newvd_is_spare = B_TRUE; 2491 (void) strcpy(newpath, newvd->vdev_path); 2492 } else { 2493 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2494 zopt_dir, zopt_pool, top * leaves + leaf); 2495 if (ztest_random(2) == 0) 2496 newpath[strlen(newpath) - 1] = 'b'; 2497 newvd = vdev_lookup_by_path(rvd, newpath); 2498 } 2499 2500 if (newvd) { 2501 newsize = vdev_get_min_asize(newvd); 2502 } else { 2503 /* 2504 * Make newsize a little bigger or smaller than oldsize. 2505 * If it's smaller, the attach should fail. 2506 * If it's larger, and we're doing a replace, 2507 * we should get dynamic LUN growth when we're done. 2508 */ 2509 newsize = 10 * oldsize / (9 + ztest_random(3)); 2510 } 2511 2512 /* 2513 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2514 * unless it's a replace; in that case any non-replacing parent is OK. 2515 * 2516 * If newvd is already part of the pool, it should fail with EBUSY. 2517 * 2518 * If newvd is too small, it should fail with EOVERFLOW. 2519 */ 2520 if (pvd->vdev_ops != &vdev_mirror_ops && 2521 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2522 pvd->vdev_ops == &vdev_replacing_ops || 2523 pvd->vdev_ops == &vdev_spare_ops)) 2524 expected_error = ENOTSUP; 2525 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2526 expected_error = ENOTSUP; 2527 else if (newvd == oldvd) 2528 expected_error = replacing ? 0 : EBUSY; 2529 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2530 expected_error = EBUSY; 2531 else if (newsize < oldsize) 2532 expected_error = EOVERFLOW; 2533 else if (ashift > oldvd->vdev_top->vdev_ashift) 2534 expected_error = EDOM; 2535 else 2536 expected_error = 0; 2537 2538 spa_config_exit(spa, SCL_VDEV, FTAG); 2539 2540 /* 2541 * Build the nvlist describing newpath. 2542 */ 2543 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0, 2544 ashift, 0, 0, 0, 1); 2545 2546 error = spa_vdev_attach(spa, oldguid, root, replacing); 2547 2548 nvlist_free(root); 2549 2550 /* 2551 * If our parent was the replacing vdev, but the replace completed, 2552 * then instead of failing with ENOTSUP we may either succeed, 2553 * fail with ENODEV, or fail with EOVERFLOW. 2554 */ 2555 if (expected_error == ENOTSUP && 2556 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2557 expected_error = error; 2558 2559 /* 2560 * If someone grew the LUN, the replacement may be too small. 2561 */ 2562 if (error == EOVERFLOW || error == EBUSY) 2563 expected_error = error; 2564 2565 /* XXX workaround 6690467 */ 2566 if (error != expected_error && expected_error != EBUSY) { 2567 fatal(0, "attach (%s %llu, %s %llu, %d) " 2568 "returned %d, expected %d", 2569 oldpath, (longlong_t)oldsize, newpath, 2570 (longlong_t)newsize, replacing, error, expected_error); 2571 } 2572 2573 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2574} 2575 2576/* 2577 * Callback function which expands the physical size of the vdev. 2578 */ 2579vdev_t * 2580grow_vdev(vdev_t *vd, void *arg) 2581{ 2582 spa_t *spa = vd->vdev_spa; 2583 size_t *newsize = arg; 2584 size_t fsize; 2585 int fd; 2586 2587 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2588 ASSERT(vd->vdev_ops->vdev_op_leaf); 2589 2590 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2591 return (vd); 2592 2593 fsize = lseek(fd, 0, SEEK_END); 2594 (void) ftruncate(fd, *newsize); 2595 2596 if (zopt_verbose >= 6) { 2597 (void) printf("%s grew from %lu to %lu bytes\n", 2598 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2599 } 2600 (void) close(fd); 2601 return (NULL); 2602} 2603 2604/* 2605 * Callback function which expands a given vdev by calling vdev_online(). 2606 */ 2607/* ARGSUSED */ 2608vdev_t * 2609online_vdev(vdev_t *vd, void *arg) 2610{ 2611 spa_t *spa = vd->vdev_spa; 2612 vdev_t *tvd = vd->vdev_top; 2613 uint64_t guid = vd->vdev_guid; 2614 uint64_t generation = spa->spa_config_generation + 1; 2615 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2616 int error; 2617 2618 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2619 ASSERT(vd->vdev_ops->vdev_op_leaf); 2620 2621 /* Calling vdev_online will initialize the new metaslabs */ 2622 spa_config_exit(spa, SCL_STATE, spa); 2623 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2624 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2625 2626 /* 2627 * If vdev_online returned an error or the underlying vdev_open 2628 * failed then we abort the expand. The only way to know that 2629 * vdev_open fails is by checking the returned newstate. 2630 */ 2631 if (error || newstate != VDEV_STATE_HEALTHY) { 2632 if (zopt_verbose >= 5) { 2633 (void) printf("Unable to expand vdev, state %llu, " 2634 "error %d\n", (u_longlong_t)newstate, error); 2635 } 2636 return (vd); 2637 } 2638 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2639 2640 /* 2641 * Since we dropped the lock we need to ensure that we're 2642 * still talking to the original vdev. It's possible this 2643 * vdev may have been detached/replaced while we were 2644 * trying to online it. 2645 */ 2646 if (generation != spa->spa_config_generation) { 2647 if (zopt_verbose >= 5) { 2648 (void) printf("vdev configuration has changed, " 2649 "guid %llu, state %llu, expected gen %llu, " 2650 "got gen %llu\n", 2651 (u_longlong_t)guid, 2652 (u_longlong_t)tvd->vdev_state, 2653 (u_longlong_t)generation, 2654 (u_longlong_t)spa->spa_config_generation); 2655 } 2656 return (vd); 2657 } 2658 return (NULL); 2659} 2660 2661/* 2662 * Traverse the vdev tree calling the supplied function. 2663 * We continue to walk the tree until we either have walked all 2664 * children or we receive a non-NULL return from the callback. 2665 * If a NULL callback is passed, then we just return back the first 2666 * leaf vdev we encounter. 2667 */ 2668vdev_t * 2669vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 2670{ 2671 if (vd->vdev_ops->vdev_op_leaf) { 2672 if (func == NULL) 2673 return (vd); 2674 else 2675 return (func(vd, arg)); 2676 } 2677 2678 for (uint_t c = 0; c < vd->vdev_children; c++) { 2679 vdev_t *cvd = vd->vdev_child[c]; 2680 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 2681 return (cvd); 2682 } 2683 return (NULL); 2684} 2685 2686/* 2687 * Verify that dynamic LUN growth works as expected. 2688 */ 2689/* ARGSUSED */ 2690void 2691ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 2692{ 2693 ztest_shared_t *zs = ztest_shared; 2694 spa_t *spa = zs->zs_spa; 2695 vdev_t *vd, *tvd; 2696 metaslab_class_t *mc; 2697 metaslab_group_t *mg; 2698 size_t psize, newsize; 2699 uint64_t top; 2700 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 2701 2702 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 2703 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2704 2705 top = ztest_random_vdev_top(spa, B_TRUE); 2706 2707 tvd = spa->spa_root_vdev->vdev_child[top]; 2708 mg = tvd->vdev_mg; 2709 mc = mg->mg_class; 2710 old_ms_count = tvd->vdev_ms_count; 2711 old_class_space = metaslab_class_get_space(mc); 2712 2713 /* 2714 * Determine the size of the first leaf vdev associated with 2715 * our top-level device. 2716 */ 2717 vd = vdev_walk_tree(tvd, NULL, NULL); 2718 ASSERT3P(vd, !=, NULL); 2719 ASSERT(vd->vdev_ops->vdev_op_leaf); 2720 2721 psize = vd->vdev_psize; 2722 2723 /* 2724 * We only try to expand the vdev if it's healthy, less than 4x its 2725 * original size, and it has a valid psize. 2726 */ 2727 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 2728 psize == 0 || psize >= 4 * zopt_vdev_size) { 2729 spa_config_exit(spa, SCL_STATE, spa); 2730 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2731 return; 2732 } 2733 ASSERT(psize > 0); 2734 newsize = psize + psize / 8; 2735 ASSERT3U(newsize, >, psize); 2736 2737 if (zopt_verbose >= 6) { 2738 (void) printf("Expanding LUN %s from %lu to %lu\n", 2739 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 2740 } 2741 2742 /* 2743 * Growing the vdev is a two step process: 2744 * 1). expand the physical size (i.e. relabel) 2745 * 2). online the vdev to create the new metaslabs 2746 */ 2747 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 2748 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 2749 tvd->vdev_state != VDEV_STATE_HEALTHY) { 2750 if (zopt_verbose >= 5) { 2751 (void) printf("Could not expand LUN because " 2752 "the vdev configuration changed.\n"); 2753 } 2754 spa_config_exit(spa, SCL_STATE, spa); 2755 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2756 return; 2757 } 2758 2759 spa_config_exit(spa, SCL_STATE, spa); 2760 2761 /* 2762 * Expanding the LUN will update the config asynchronously, 2763 * thus we must wait for the async thread to complete any 2764 * pending tasks before proceeding. 2765 */ 2766 for (;;) { 2767 boolean_t done; 2768 mutex_enter(&spa->spa_async_lock); 2769 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 2770 mutex_exit(&spa->spa_async_lock); 2771 if (done) 2772 break; 2773 txg_wait_synced(spa_get_dsl(spa), 0); 2774 (void) poll(NULL, 0, 100); 2775 } 2776 2777 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2778 2779 tvd = spa->spa_root_vdev->vdev_child[top]; 2780 new_ms_count = tvd->vdev_ms_count; 2781 new_class_space = metaslab_class_get_space(mc); 2782 2783 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 2784 if (zopt_verbose >= 5) { 2785 (void) printf("Could not verify LUN expansion due to " 2786 "intervening vdev offline or remove.\n"); 2787 } 2788 spa_config_exit(spa, SCL_STATE, spa); 2789 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2790 return; 2791 } 2792 2793 /* 2794 * Make sure we were able to grow the vdev. 2795 */ 2796 if (new_ms_count <= old_ms_count) 2797 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 2798 old_ms_count, new_ms_count); 2799 2800 /* 2801 * Make sure we were able to grow the pool. 2802 */ 2803 if (new_class_space <= old_class_space) 2804 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 2805 old_class_space, new_class_space); 2806 2807 if (zopt_verbose >= 5) { 2808 char oldnumbuf[6], newnumbuf[6]; 2809 2810 nicenum(old_class_space, oldnumbuf); 2811 nicenum(new_class_space, newnumbuf); 2812 (void) printf("%s grew from %s to %s\n", 2813 spa->spa_name, oldnumbuf, newnumbuf); 2814 } 2815 2816 spa_config_exit(spa, SCL_STATE, spa); 2817 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 2818} 2819 2820/* 2821 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 2822 */ 2823/* ARGSUSED */ 2824static void 2825ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 2826{ 2827 /* 2828 * Create the objects common to all ztest datasets. 2829 */ 2830 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 2831 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 2832} 2833 2834static int 2835ztest_dataset_create(char *dsname) 2836{ 2837 uint64_t zilset = ztest_random(100); 2838 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 2839 ztest_objset_create_cb, NULL); 2840 2841 if (err || zilset < 80) 2842 return (err); 2843 2844 (void) printf("Setting dataset %s to sync always\n", dsname); 2845 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 2846 ZFS_SYNC_ALWAYS, B_FALSE)); 2847} 2848 2849/* ARGSUSED */ 2850static int 2851ztest_objset_destroy_cb(const char *name, void *arg) 2852{ 2853 objset_t *os; 2854 dmu_object_info_t doi; 2855 int error; 2856 2857 /* 2858 * Verify that the dataset contains a directory object. 2859 */ 2860 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os)); 2861 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 2862 if (error != ENOENT) { 2863 /* We could have crashed in the middle of destroying it */ 2864 ASSERT3U(error, ==, 0); 2865 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 2866 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 2867 } 2868 dmu_objset_rele(os, FTAG); 2869 2870 /* 2871 * Destroy the dataset. 2872 */ 2873 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE)); 2874 return (0); 2875} 2876 2877static boolean_t 2878ztest_snapshot_create(char *osname, uint64_t id) 2879{ 2880 char snapname[MAXNAMELEN]; 2881 int error; 2882 2883 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 2884 (u_longlong_t)id); 2885 2886 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1, 2887 NULL, NULL, B_FALSE, B_FALSE, -1); 2888 if (error == ENOSPC) { 2889 ztest_record_enospc(FTAG); 2890 return (B_FALSE); 2891 } 2892 if (error != 0 && error != EEXIST) 2893 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error); 2894 return (B_TRUE); 2895} 2896 2897static boolean_t 2898ztest_snapshot_destroy(char *osname, uint64_t id) 2899{ 2900 char snapname[MAXNAMELEN]; 2901 int error; 2902 2903 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 2904 (u_longlong_t)id); 2905 2906 error = dmu_objset_destroy(snapname, B_FALSE); 2907 if (error != 0 && error != ENOENT) 2908 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 2909 return (B_TRUE); 2910} 2911 2912/* ARGSUSED */ 2913void 2914ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 2915{ 2916 ztest_shared_t *zs = ztest_shared; 2917 ztest_ds_t zdtmp; 2918 int iters; 2919 int error; 2920 objset_t *os, *os2; 2921 char name[MAXNAMELEN]; 2922 zilog_t *zilog; 2923 2924 (void) rw_rdlock(&zs->zs_name_lock); 2925 2926 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 2927 zs->zs_pool, (u_longlong_t)id); 2928 2929 /* 2930 * If this dataset exists from a previous run, process its replay log 2931 * half of the time. If we don't replay it, then dmu_objset_destroy() 2932 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 2933 */ 2934 if (ztest_random(2) == 0 && 2935 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 2936 ztest_zd_init(&zdtmp, os); 2937 zil_replay(os, &zdtmp, ztest_replay_vector); 2938 ztest_zd_fini(&zdtmp); 2939 dmu_objset_disown(os, FTAG); 2940 } 2941 2942 /* 2943 * There may be an old instance of the dataset we're about to 2944 * create lying around from a previous run. If so, destroy it 2945 * and all of its snapshots. 2946 */ 2947 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 2948 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 2949 2950 /* 2951 * Verify that the destroyed dataset is no longer in the namespace. 2952 */ 2953 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os)); 2954 2955 /* 2956 * Verify that we can create a new dataset. 2957 */ 2958 error = ztest_dataset_create(name); 2959 if (error) { 2960 if (error == ENOSPC) { 2961 ztest_record_enospc(FTAG); 2962 (void) rw_unlock(&zs->zs_name_lock); 2963 return; 2964 } 2965 fatal(0, "dmu_objset_create(%s) = %d", name, error); 2966 } 2967 2968 VERIFY3U(0, ==, 2969 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 2970 2971 ztest_zd_init(&zdtmp, os); 2972 2973 /* 2974 * Open the intent log for it. 2975 */ 2976 zilog = zil_open(os, ztest_get_data); 2977 2978 /* 2979 * Put some objects in there, do a little I/O to them, 2980 * and randomly take a couple of snapshots along the way. 2981 */ 2982 iters = ztest_random(5); 2983 for (int i = 0; i < iters; i++) { 2984 ztest_dmu_object_alloc_free(&zdtmp, id); 2985 if (ztest_random(iters) == 0) 2986 (void) ztest_snapshot_create(name, i); 2987 } 2988 2989 /* 2990 * Verify that we cannot create an existing dataset. 2991 */ 2992 VERIFY3U(EEXIST, ==, 2993 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 2994 2995 /* 2996 * Verify that we can hold an objset that is also owned. 2997 */ 2998 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 2999 dmu_objset_rele(os2, FTAG); 3000 3001 /* 3002 * Verify that we cannot own an objset that is already owned. 3003 */ 3004 VERIFY3U(EBUSY, ==, 3005 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3006 3007 zil_close(zilog); 3008 dmu_objset_disown(os, FTAG); 3009 ztest_zd_fini(&zdtmp); 3010 3011 (void) rw_unlock(&zs->zs_name_lock); 3012} 3013 3014/* 3015 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3016 */ 3017void 3018ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3019{ 3020 ztest_shared_t *zs = ztest_shared; 3021 3022 (void) rw_rdlock(&zs->zs_name_lock); 3023 (void) ztest_snapshot_destroy(zd->zd_name, id); 3024 (void) ztest_snapshot_create(zd->zd_name, id); 3025 (void) rw_unlock(&zs->zs_name_lock); 3026} 3027 3028/* 3029 * Cleanup non-standard snapshots and clones. 3030 */ 3031void 3032ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3033{ 3034 char snap1name[MAXNAMELEN]; 3035 char clone1name[MAXNAMELEN]; 3036 char snap2name[MAXNAMELEN]; 3037 char clone2name[MAXNAMELEN]; 3038 char snap3name[MAXNAMELEN]; 3039 int error; 3040 3041 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3042 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3043 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3044 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3045 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3046 3047 error = dmu_objset_destroy(clone2name, B_FALSE); 3048 if (error && error != ENOENT) 3049 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error); 3050 error = dmu_objset_destroy(snap3name, B_FALSE); 3051 if (error && error != ENOENT) 3052 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error); 3053 error = dmu_objset_destroy(snap2name, B_FALSE); 3054 if (error && error != ENOENT) 3055 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error); 3056 error = dmu_objset_destroy(clone1name, B_FALSE); 3057 if (error && error != ENOENT) 3058 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error); 3059 error = dmu_objset_destroy(snap1name, B_FALSE); 3060 if (error && error != ENOENT) 3061 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error); 3062} 3063 3064/* 3065 * Verify dsl_dataset_promote handles EBUSY 3066 */ 3067void 3068ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3069{ 3070 ztest_shared_t *zs = ztest_shared; 3071 objset_t *clone; 3072 dsl_dataset_t *ds; 3073 char snap1name[MAXNAMELEN]; 3074 char clone1name[MAXNAMELEN]; 3075 char snap2name[MAXNAMELEN]; 3076 char clone2name[MAXNAMELEN]; 3077 char snap3name[MAXNAMELEN]; 3078 char *osname = zd->zd_name; 3079 int error; 3080 3081 (void) rw_rdlock(&zs->zs_name_lock); 3082 3083 ztest_dsl_dataset_cleanup(osname, id); 3084 3085 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3086 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3087 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3088 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3089 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3090 3091 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1, 3092 NULL, NULL, B_FALSE, B_FALSE, -1); 3093 if (error && error != EEXIST) { 3094 if (error == ENOSPC) { 3095 ztest_record_enospc(FTAG); 3096 goto out; 3097 } 3098 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3099 } 3100 3101 error = dmu_objset_hold(snap1name, FTAG, &clone); 3102 if (error) 3103 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error); 3104 3105 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0); 3106 dmu_objset_rele(clone, FTAG); 3107 if (error) { 3108 if (error == ENOSPC) { 3109 ztest_record_enospc(FTAG); 3110 goto out; 3111 } 3112 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3113 } 3114 3115 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1, 3116 NULL, NULL, B_FALSE, B_FALSE, -1); 3117 if (error && error != EEXIST) { 3118 if (error == ENOSPC) { 3119 ztest_record_enospc(FTAG); 3120 goto out; 3121 } 3122 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3123 } 3124 3125 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1, 3126 NULL, NULL, B_FALSE, B_FALSE, -1); 3127 if (error && error != EEXIST) { 3128 if (error == ENOSPC) { 3129 ztest_record_enospc(FTAG); 3130 goto out; 3131 } 3132 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3133 } 3134 3135 error = dmu_objset_hold(snap3name, FTAG, &clone); 3136 if (error) 3137 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3138 3139 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0); 3140 dmu_objset_rele(clone, FTAG); 3141 if (error) { 3142 if (error == ENOSPC) { 3143 ztest_record_enospc(FTAG); 3144 goto out; 3145 } 3146 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3147 } 3148 3149 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds); 3150 if (error) 3151 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error); 3152 error = dsl_dataset_promote(clone2name, NULL); 3153 if (error != EBUSY) 3154 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3155 error); 3156 dsl_dataset_disown(ds, FTAG); 3157 3158out: 3159 ztest_dsl_dataset_cleanup(osname, id); 3160 3161 (void) rw_unlock(&zs->zs_name_lock); 3162} 3163 3164/* 3165 * Verify that dmu_object_{alloc,free} work as expected. 3166 */ 3167void 3168ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3169{ 3170 ztest_od_t od[4]; 3171 int batchsize = sizeof (od) / sizeof (od[0]); 3172 3173 for (int b = 0; b < batchsize; b++) 3174 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3175 3176 /* 3177 * Destroy the previous batch of objects, create a new batch, 3178 * and do some I/O on the new objects. 3179 */ 3180 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3181 return; 3182 3183 while (ztest_random(4 * batchsize) != 0) 3184 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3185 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3186} 3187 3188/* 3189 * Verify that dmu_{read,write} work as expected. 3190 */ 3191void 3192ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3193{ 3194 objset_t *os = zd->zd_os; 3195 ztest_od_t od[2]; 3196 dmu_tx_t *tx; 3197 int i, freeit, error; 3198 uint64_t n, s, txg; 3199 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3200 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3201 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3202 uint64_t regions = 997; 3203 uint64_t stride = 123456789ULL; 3204 uint64_t width = 40; 3205 int free_percent = 5; 3206 3207 /* 3208 * This test uses two objects, packobj and bigobj, that are always 3209 * updated together (i.e. in the same tx) so that their contents are 3210 * in sync and can be compared. Their contents relate to each other 3211 * in a simple way: packobj is a dense array of 'bufwad' structures, 3212 * while bigobj is a sparse array of the same bufwads. Specifically, 3213 * for any index n, there are three bufwads that should be identical: 3214 * 3215 * packobj, at offset n * sizeof (bufwad_t) 3216 * bigobj, at the head of the nth chunk 3217 * bigobj, at the tail of the nth chunk 3218 * 3219 * The chunk size is arbitrary. It doesn't have to be a power of two, 3220 * and it doesn't have any relation to the object blocksize. 3221 * The only requirement is that it can hold at least two bufwads. 3222 * 3223 * Normally, we write the bufwad to each of these locations. 3224 * However, free_percent of the time we instead write zeroes to 3225 * packobj and perform a dmu_free_range() on bigobj. By comparing 3226 * bigobj to packobj, we can verify that the DMU is correctly 3227 * tracking which parts of an object are allocated and free, 3228 * and that the contents of the allocated blocks are correct. 3229 */ 3230 3231 /* 3232 * Read the directory info. If it's the first time, set things up. 3233 */ 3234 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3235 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3236 3237 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3238 return; 3239 3240 bigobj = od[0].od_object; 3241 packobj = od[1].od_object; 3242 chunksize = od[0].od_gen; 3243 ASSERT(chunksize == od[1].od_gen); 3244 3245 /* 3246 * Prefetch a random chunk of the big object. 3247 * Our aim here is to get some async reads in flight 3248 * for blocks that we may free below; the DMU should 3249 * handle this race correctly. 3250 */ 3251 n = ztest_random(regions) * stride + ztest_random(width); 3252 s = 1 + ztest_random(2 * width - 1); 3253 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3254 3255 /* 3256 * Pick a random index and compute the offsets into packobj and bigobj. 3257 */ 3258 n = ztest_random(regions) * stride + ztest_random(width); 3259 s = 1 + ztest_random(width - 1); 3260 3261 packoff = n * sizeof (bufwad_t); 3262 packsize = s * sizeof (bufwad_t); 3263 3264 bigoff = n * chunksize; 3265 bigsize = s * chunksize; 3266 3267 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3268 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3269 3270 /* 3271 * free_percent of the time, free a range of bigobj rather than 3272 * overwriting it. 3273 */ 3274 freeit = (ztest_random(100) < free_percent); 3275 3276 /* 3277 * Read the current contents of our objects. 3278 */ 3279 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3280 DMU_READ_PREFETCH); 3281 ASSERT3U(error, ==, 0); 3282 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3283 DMU_READ_PREFETCH); 3284 ASSERT3U(error, ==, 0); 3285 3286 /* 3287 * Get a tx for the mods to both packobj and bigobj. 3288 */ 3289 tx = dmu_tx_create(os); 3290 3291 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3292 3293 if (freeit) 3294 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3295 else 3296 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3297 3298 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3299 if (txg == 0) { 3300 umem_free(packbuf, packsize); 3301 umem_free(bigbuf, bigsize); 3302 return; 3303 } 3304 3305 dmu_object_set_checksum(os, bigobj, 3306 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx); 3307 3308 dmu_object_set_compress(os, bigobj, 3309 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx); 3310 3311 /* 3312 * For each index from n to n + s, verify that the existing bufwad 3313 * in packobj matches the bufwads at the head and tail of the 3314 * corresponding chunk in bigobj. Then update all three bufwads 3315 * with the new values we want to write out. 3316 */ 3317 for (i = 0; i < s; i++) { 3318 /* LINTED */ 3319 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3320 /* LINTED */ 3321 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3322 /* LINTED */ 3323 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3324 3325 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3326 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3327 3328 if (pack->bw_txg > txg) 3329 fatal(0, "future leak: got %llx, open txg is %llx", 3330 pack->bw_txg, txg); 3331 3332 if (pack->bw_data != 0 && pack->bw_index != n + i) 3333 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3334 pack->bw_index, n, i); 3335 3336 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3337 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3338 3339 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3340 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3341 3342 if (freeit) { 3343 bzero(pack, sizeof (bufwad_t)); 3344 } else { 3345 pack->bw_index = n + i; 3346 pack->bw_txg = txg; 3347 pack->bw_data = 1 + ztest_random(-2ULL); 3348 } 3349 *bigH = *pack; 3350 *bigT = *pack; 3351 } 3352 3353 /* 3354 * We've verified all the old bufwads, and made new ones. 3355 * Now write them out. 3356 */ 3357 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3358 3359 if (freeit) { 3360 if (zopt_verbose >= 7) { 3361 (void) printf("freeing offset %llx size %llx" 3362 " txg %llx\n", 3363 (u_longlong_t)bigoff, 3364 (u_longlong_t)bigsize, 3365 (u_longlong_t)txg); 3366 } 3367 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3368 } else { 3369 if (zopt_verbose >= 7) { 3370 (void) printf("writing offset %llx size %llx" 3371 " txg %llx\n", 3372 (u_longlong_t)bigoff, 3373 (u_longlong_t)bigsize, 3374 (u_longlong_t)txg); 3375 } 3376 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3377 } 3378 3379 dmu_tx_commit(tx); 3380 3381 /* 3382 * Sanity check the stuff we just wrote. 3383 */ 3384 { 3385 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3386 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3387 3388 VERIFY(0 == dmu_read(os, packobj, packoff, 3389 packsize, packcheck, DMU_READ_PREFETCH)); 3390 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3391 bigsize, bigcheck, DMU_READ_PREFETCH)); 3392 3393 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3394 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3395 3396 umem_free(packcheck, packsize); 3397 umem_free(bigcheck, bigsize); 3398 } 3399 3400 umem_free(packbuf, packsize); 3401 umem_free(bigbuf, bigsize); 3402} 3403 3404void 3405compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3406 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3407{ 3408 uint64_t i; 3409 bufwad_t *pack; 3410 bufwad_t *bigH; 3411 bufwad_t *bigT; 3412 3413 /* 3414 * For each index from n to n + s, verify that the existing bufwad 3415 * in packobj matches the bufwads at the head and tail of the 3416 * corresponding chunk in bigobj. Then update all three bufwads 3417 * with the new values we want to write out. 3418 */ 3419 for (i = 0; i < s; i++) { 3420 /* LINTED */ 3421 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3422 /* LINTED */ 3423 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3424 /* LINTED */ 3425 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3426 3427 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3428 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3429 3430 if (pack->bw_txg > txg) 3431 fatal(0, "future leak: got %llx, open txg is %llx", 3432 pack->bw_txg, txg); 3433 3434 if (pack->bw_data != 0 && pack->bw_index != n + i) 3435 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3436 pack->bw_index, n, i); 3437 3438 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3439 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3440 3441 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3442 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3443 3444 pack->bw_index = n + i; 3445 pack->bw_txg = txg; 3446 pack->bw_data = 1 + ztest_random(-2ULL); 3447 3448 *bigH = *pack; 3449 *bigT = *pack; 3450 } 3451} 3452 3453void 3454ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3455{ 3456 objset_t *os = zd->zd_os; 3457 ztest_od_t od[2]; 3458 dmu_tx_t *tx; 3459 uint64_t i; 3460 int error; 3461 uint64_t n, s, txg; 3462 bufwad_t *packbuf, *bigbuf; 3463 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3464 uint64_t blocksize = ztest_random_blocksize(); 3465 uint64_t chunksize = blocksize; 3466 uint64_t regions = 997; 3467 uint64_t stride = 123456789ULL; 3468 uint64_t width = 9; 3469 dmu_buf_t *bonus_db; 3470 arc_buf_t **bigbuf_arcbufs; 3471 dmu_object_info_t doi; 3472 3473 /* 3474 * This test uses two objects, packobj and bigobj, that are always 3475 * updated together (i.e. in the same tx) so that their contents are 3476 * in sync and can be compared. Their contents relate to each other 3477 * in a simple way: packobj is a dense array of 'bufwad' structures, 3478 * while bigobj is a sparse array of the same bufwads. Specifically, 3479 * for any index n, there are three bufwads that should be identical: 3480 * 3481 * packobj, at offset n * sizeof (bufwad_t) 3482 * bigobj, at the head of the nth chunk 3483 * bigobj, at the tail of the nth chunk 3484 * 3485 * The chunk size is set equal to bigobj block size so that 3486 * dmu_assign_arcbuf() can be tested for object updates. 3487 */ 3488 3489 /* 3490 * Read the directory info. If it's the first time, set things up. 3491 */ 3492 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3493 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3494 3495 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3496 return; 3497 3498 bigobj = od[0].od_object; 3499 packobj = od[1].od_object; 3500 blocksize = od[0].od_blocksize; 3501 chunksize = blocksize; 3502 ASSERT(chunksize == od[1].od_gen); 3503 3504 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3505 VERIFY(ISP2(doi.doi_data_block_size)); 3506 VERIFY(chunksize == doi.doi_data_block_size); 3507 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3508 3509 /* 3510 * Pick a random index and compute the offsets into packobj and bigobj. 3511 */ 3512 n = ztest_random(regions) * stride + ztest_random(width); 3513 s = 1 + ztest_random(width - 1); 3514 3515 packoff = n * sizeof (bufwad_t); 3516 packsize = s * sizeof (bufwad_t); 3517 3518 bigoff = n * chunksize; 3519 bigsize = s * chunksize; 3520 3521 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3522 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3523 3524 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3525 3526 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3527 3528 /* 3529 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3530 * Iteration 1 test zcopy to already referenced dbufs. 3531 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3532 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3533 * Iteration 4 test zcopy when dbuf is no longer dirty. 3534 * Iteration 5 test zcopy when it can't be done. 3535 * Iteration 6 one more zcopy write. 3536 */ 3537 for (i = 0; i < 7; i++) { 3538 uint64_t j; 3539 uint64_t off; 3540 3541 /* 3542 * In iteration 5 (i == 5) use arcbufs 3543 * that don't match bigobj blksz to test 3544 * dmu_assign_arcbuf() when it can't directly 3545 * assign an arcbuf to a dbuf. 3546 */ 3547 for (j = 0; j < s; j++) { 3548 if (i != 5) { 3549 bigbuf_arcbufs[j] = 3550 dmu_request_arcbuf(bonus_db, chunksize); 3551 } else { 3552 bigbuf_arcbufs[2 * j] = 3553 dmu_request_arcbuf(bonus_db, chunksize / 2); 3554 bigbuf_arcbufs[2 * j + 1] = 3555 dmu_request_arcbuf(bonus_db, chunksize / 2); 3556 } 3557 } 3558 3559 /* 3560 * Get a tx for the mods to both packobj and bigobj. 3561 */ 3562 tx = dmu_tx_create(os); 3563 3564 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3565 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3566 3567 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3568 if (txg == 0) { 3569 umem_free(packbuf, packsize); 3570 umem_free(bigbuf, bigsize); 3571 for (j = 0; j < s; j++) { 3572 if (i != 5) { 3573 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3574 } else { 3575 dmu_return_arcbuf( 3576 bigbuf_arcbufs[2 * j]); 3577 dmu_return_arcbuf( 3578 bigbuf_arcbufs[2 * j + 1]); 3579 } 3580 } 3581 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3582 dmu_buf_rele(bonus_db, FTAG); 3583 return; 3584 } 3585 3586 /* 3587 * 50% of the time don't read objects in the 1st iteration to 3588 * test dmu_assign_arcbuf() for the case when there're no 3589 * existing dbufs for the specified offsets. 3590 */ 3591 if (i != 0 || ztest_random(2) != 0) { 3592 error = dmu_read(os, packobj, packoff, 3593 packsize, packbuf, DMU_READ_PREFETCH); 3594 ASSERT3U(error, ==, 0); 3595 error = dmu_read(os, bigobj, bigoff, bigsize, 3596 bigbuf, DMU_READ_PREFETCH); 3597 ASSERT3U(error, ==, 0); 3598 } 3599 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3600 n, chunksize, txg); 3601 3602 /* 3603 * We've verified all the old bufwads, and made new ones. 3604 * Now write them out. 3605 */ 3606 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3607 if (zopt_verbose >= 7) { 3608 (void) printf("writing offset %llx size %llx" 3609 " txg %llx\n", 3610 (u_longlong_t)bigoff, 3611 (u_longlong_t)bigsize, 3612 (u_longlong_t)txg); 3613 } 3614 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3615 dmu_buf_t *dbt; 3616 if (i != 5) { 3617 bcopy((caddr_t)bigbuf + (off - bigoff), 3618 bigbuf_arcbufs[j]->b_data, chunksize); 3619 } else { 3620 bcopy((caddr_t)bigbuf + (off - bigoff), 3621 bigbuf_arcbufs[2 * j]->b_data, 3622 chunksize / 2); 3623 bcopy((caddr_t)bigbuf + (off - bigoff) + 3624 chunksize / 2, 3625 bigbuf_arcbufs[2 * j + 1]->b_data, 3626 chunksize / 2); 3627 } 3628 3629 if (i == 1) { 3630 VERIFY(dmu_buf_hold(os, bigobj, off, 3631 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3632 } 3633 if (i != 5) { 3634 dmu_assign_arcbuf(bonus_db, off, 3635 bigbuf_arcbufs[j], tx); 3636 } else { 3637 dmu_assign_arcbuf(bonus_db, off, 3638 bigbuf_arcbufs[2 * j], tx); 3639 dmu_assign_arcbuf(bonus_db, 3640 off + chunksize / 2, 3641 bigbuf_arcbufs[2 * j + 1], tx); 3642 } 3643 if (i == 1) { 3644 dmu_buf_rele(dbt, FTAG); 3645 } 3646 } 3647 dmu_tx_commit(tx); 3648 3649 /* 3650 * Sanity check the stuff we just wrote. 3651 */ 3652 { 3653 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3654 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3655 3656 VERIFY(0 == dmu_read(os, packobj, packoff, 3657 packsize, packcheck, DMU_READ_PREFETCH)); 3658 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3659 bigsize, bigcheck, DMU_READ_PREFETCH)); 3660 3661 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3662 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3663 3664 umem_free(packcheck, packsize); 3665 umem_free(bigcheck, bigsize); 3666 } 3667 if (i == 2) { 3668 txg_wait_open(dmu_objset_pool(os), 0); 3669 } else if (i == 3) { 3670 txg_wait_synced(dmu_objset_pool(os), 0); 3671 } 3672 } 3673 3674 dmu_buf_rele(bonus_db, FTAG); 3675 umem_free(packbuf, packsize); 3676 umem_free(bigbuf, bigsize); 3677 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3678} 3679 3680/* ARGSUSED */ 3681void 3682ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 3683{ 3684 ztest_od_t od[1]; 3685 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 3686 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3687 3688 /* 3689 * Have multiple threads write to large offsets in an object 3690 * to verify that parallel writes to an object -- even to the 3691 * same blocks within the object -- doesn't cause any trouble. 3692 */ 3693 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 3694 3695 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3696 return; 3697 3698 while (ztest_random(10) != 0) 3699 ztest_io(zd, od[0].od_object, offset); 3700} 3701 3702void 3703ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 3704{ 3705 ztest_od_t od[1]; 3706 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 3707 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3708 uint64_t count = ztest_random(20) + 1; 3709 uint64_t blocksize = ztest_random_blocksize(); 3710 void *data; 3711 3712 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3713 3714 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 3715 return; 3716 3717 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 3718 return; 3719 3720 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 3721 3722 data = umem_zalloc(blocksize, UMEM_NOFAIL); 3723 3724 while (ztest_random(count) != 0) { 3725 uint64_t randoff = offset + (ztest_random(count) * blocksize); 3726 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 3727 data) != 0) 3728 break; 3729 while (ztest_random(4) != 0) 3730 ztest_io(zd, od[0].od_object, randoff); 3731 } 3732 3733 umem_free(data, blocksize); 3734} 3735 3736/* 3737 * Verify that zap_{create,destroy,add,remove,update} work as expected. 3738 */ 3739#define ZTEST_ZAP_MIN_INTS 1 3740#define ZTEST_ZAP_MAX_INTS 4 3741#define ZTEST_ZAP_MAX_PROPS 1000 3742 3743void 3744ztest_zap(ztest_ds_t *zd, uint64_t id) 3745{ 3746 objset_t *os = zd->zd_os; 3747 ztest_od_t od[1]; 3748 uint64_t object; 3749 uint64_t txg, last_txg; 3750 uint64_t value[ZTEST_ZAP_MAX_INTS]; 3751 uint64_t zl_ints, zl_intsize, prop; 3752 int i, ints; 3753 dmu_tx_t *tx; 3754 char propname[100], txgname[100]; 3755 int error; 3756 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 3757 3758 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 3759 3760 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 3761 return; 3762 3763 object = od[0].od_object; 3764 3765 /* 3766 * Generate a known hash collision, and verify that 3767 * we can lookup and remove both entries. 3768 */ 3769 tx = dmu_tx_create(os); 3770 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 3771 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3772 if (txg == 0) 3773 return; 3774 for (i = 0; i < 2; i++) { 3775 value[i] = i; 3776 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 3777 1, &value[i], tx)); 3778 } 3779 for (i = 0; i < 2; i++) { 3780 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 3781 sizeof (uint64_t), 1, &value[i], tx)); 3782 VERIFY3U(0, ==, 3783 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 3784 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 3785 ASSERT3U(zl_ints, ==, 1); 3786 } 3787 for (i = 0; i < 2; i++) { 3788 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 3789 } 3790 dmu_tx_commit(tx); 3791 3792 /* 3793 * Generate a buch of random entries. 3794 */ 3795 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 3796 3797 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 3798 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 3799 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 3800 bzero(value, sizeof (value)); 3801 last_txg = 0; 3802 3803 /* 3804 * If these zap entries already exist, validate their contents. 3805 */ 3806 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 3807 if (error == 0) { 3808 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 3809 ASSERT3U(zl_ints, ==, 1); 3810 3811 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 3812 zl_ints, &last_txg) == 0); 3813 3814 VERIFY(zap_length(os, object, propname, &zl_intsize, 3815 &zl_ints) == 0); 3816 3817 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 3818 ASSERT3U(zl_ints, ==, ints); 3819 3820 VERIFY(zap_lookup(os, object, propname, zl_intsize, 3821 zl_ints, value) == 0); 3822 3823 for (i = 0; i < ints; i++) { 3824 ASSERT3U(value[i], ==, last_txg + object + i); 3825 } 3826 } else { 3827 ASSERT3U(error, ==, ENOENT); 3828 } 3829 3830 /* 3831 * Atomically update two entries in our zap object. 3832 * The first is named txg_%llu, and contains the txg 3833 * in which the property was last updated. The second 3834 * is named prop_%llu, and the nth element of its value 3835 * should be txg + object + n. 3836 */ 3837 tx = dmu_tx_create(os); 3838 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 3839 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3840 if (txg == 0) 3841 return; 3842 3843 if (last_txg > txg) 3844 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 3845 3846 for (i = 0; i < ints; i++) 3847 value[i] = txg + object + i; 3848 3849 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 3850 1, &txg, tx)); 3851 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 3852 ints, value, tx)); 3853 3854 dmu_tx_commit(tx); 3855 3856 /* 3857 * Remove a random pair of entries. 3858 */ 3859 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 3860 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 3861 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 3862 3863 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 3864 3865 if (error == ENOENT) 3866 return; 3867 3868 ASSERT3U(error, ==, 0); 3869 3870 tx = dmu_tx_create(os); 3871 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 3872 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3873 if (txg == 0) 3874 return; 3875 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 3876 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 3877 dmu_tx_commit(tx); 3878} 3879 3880/* 3881 * Testcase to test the upgrading of a microzap to fatzap. 3882 */ 3883void 3884ztest_fzap(ztest_ds_t *zd, uint64_t id) 3885{ 3886 objset_t *os = zd->zd_os; 3887 ztest_od_t od[1]; 3888 uint64_t object, txg; 3889 3890 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 3891 3892 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 3893 return; 3894 3895 object = od[0].od_object; 3896 3897 /* 3898 * Add entries to this ZAP and make sure it spills over 3899 * and gets upgraded to a fatzap. Also, since we are adding 3900 * 2050 entries we should see ptrtbl growth and leaf-block split. 3901 */ 3902 for (int i = 0; i < 2050; i++) { 3903 char name[MAXNAMELEN]; 3904 uint64_t value = i; 3905 dmu_tx_t *tx; 3906 int error; 3907 3908 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 3909 id, value); 3910 3911 tx = dmu_tx_create(os); 3912 dmu_tx_hold_zap(tx, object, B_TRUE, name); 3913 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3914 if (txg == 0) 3915 return; 3916 error = zap_add(os, object, name, sizeof (uint64_t), 1, 3917 &value, tx); 3918 ASSERT(error == 0 || error == EEXIST); 3919 dmu_tx_commit(tx); 3920 } 3921} 3922 3923/* ARGSUSED */ 3924void 3925ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 3926{ 3927 objset_t *os = zd->zd_os; 3928 ztest_od_t od[1]; 3929 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 3930 dmu_tx_t *tx; 3931 int i, namelen, error; 3932 int micro = ztest_random(2); 3933 char name[20], string_value[20]; 3934 void *data; 3935 3936 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 3937 3938 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3939 return; 3940 3941 object = od[0].od_object; 3942 3943 /* 3944 * Generate a random name of the form 'xxx.....' where each 3945 * x is a random printable character and the dots are dots. 3946 * There are 94 such characters, and the name length goes from 3947 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 3948 */ 3949 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 3950 3951 for (i = 0; i < 3; i++) 3952 name[i] = '!' + ztest_random('~' - '!' + 1); 3953 for (; i < namelen - 1; i++) 3954 name[i] = '.'; 3955 name[i] = '\0'; 3956 3957 if ((namelen & 1) || micro) { 3958 wsize = sizeof (txg); 3959 wc = 1; 3960 data = &txg; 3961 } else { 3962 wsize = 1; 3963 wc = namelen; 3964 data = string_value; 3965 } 3966 3967 count = -1ULL; 3968 VERIFY(zap_count(os, object, &count) == 0); 3969 ASSERT(count != -1ULL); 3970 3971 /* 3972 * Select an operation: length, lookup, add, update, remove. 3973 */ 3974 i = ztest_random(5); 3975 3976 if (i >= 2) { 3977 tx = dmu_tx_create(os); 3978 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 3979 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3980 if (txg == 0) 3981 return; 3982 bcopy(name, string_value, namelen); 3983 } else { 3984 tx = NULL; 3985 txg = 0; 3986 bzero(string_value, namelen); 3987 } 3988 3989 switch (i) { 3990 3991 case 0: 3992 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 3993 if (error == 0) { 3994 ASSERT3U(wsize, ==, zl_wsize); 3995 ASSERT3U(wc, ==, zl_wc); 3996 } else { 3997 ASSERT3U(error, ==, ENOENT); 3998 } 3999 break; 4000 4001 case 1: 4002 error = zap_lookup(os, object, name, wsize, wc, data); 4003 if (error == 0) { 4004 if (data == string_value && 4005 bcmp(name, data, namelen) != 0) 4006 fatal(0, "name '%s' != val '%s' len %d", 4007 name, data, namelen); 4008 } else { 4009 ASSERT3U(error, ==, ENOENT); 4010 } 4011 break; 4012 4013 case 2: 4014 error = zap_add(os, object, name, wsize, wc, data, tx); 4015 ASSERT(error == 0 || error == EEXIST); 4016 break; 4017 4018 case 3: 4019 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4020 break; 4021 4022 case 4: 4023 error = zap_remove(os, object, name, tx); 4024 ASSERT(error == 0 || error == ENOENT); 4025 break; 4026 } 4027 4028 if (tx != NULL) 4029 dmu_tx_commit(tx); 4030} 4031 4032/* 4033 * Commit callback data. 4034 */ 4035typedef struct ztest_cb_data { 4036 list_node_t zcd_node; 4037 uint64_t zcd_txg; 4038 int zcd_expected_err; 4039 boolean_t zcd_added; 4040 boolean_t zcd_called; 4041 spa_t *zcd_spa; 4042} ztest_cb_data_t; 4043 4044/* This is the actual commit callback function */ 4045static void 4046ztest_commit_callback(void *arg, int error) 4047{ 4048 ztest_cb_data_t *data = arg; 4049 uint64_t synced_txg; 4050 4051 VERIFY(data != NULL); 4052 VERIFY3S(data->zcd_expected_err, ==, error); 4053 VERIFY(!data->zcd_called); 4054 4055 synced_txg = spa_last_synced_txg(data->zcd_spa); 4056 if (data->zcd_txg > synced_txg) 4057 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4058 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4059 synced_txg); 4060 4061 data->zcd_called = B_TRUE; 4062 4063 if (error == ECANCELED) { 4064 ASSERT3U(data->zcd_txg, ==, 0); 4065 ASSERT(!data->zcd_added); 4066 4067 /* 4068 * The private callback data should be destroyed here, but 4069 * since we are going to check the zcd_called field after 4070 * dmu_tx_abort(), we will destroy it there. 4071 */ 4072 return; 4073 } 4074 4075 /* Was this callback added to the global callback list? */ 4076 if (!data->zcd_added) 4077 goto out; 4078 4079 ASSERT3U(data->zcd_txg, !=, 0); 4080 4081 /* Remove our callback from the list */ 4082 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4083 list_remove(&zcl.zcl_callbacks, data); 4084 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4085 4086out: 4087 umem_free(data, sizeof (ztest_cb_data_t)); 4088} 4089 4090/* Allocate and initialize callback data structure */ 4091static ztest_cb_data_t * 4092ztest_create_cb_data(objset_t *os, uint64_t txg) 4093{ 4094 ztest_cb_data_t *cb_data; 4095 4096 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4097 4098 cb_data->zcd_txg = txg; 4099 cb_data->zcd_spa = dmu_objset_spa(os); 4100 4101 return (cb_data); 4102} 4103 4104/* 4105 * If a number of txgs equal to this threshold have been created after a commit 4106 * callback has been registered but not called, then we assume there is an 4107 * implementation bug. 4108 */ 4109#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4110 4111/* 4112 * Commit callback test. 4113 */ 4114void 4115ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4116{ 4117 objset_t *os = zd->zd_os; 4118 ztest_od_t od[1]; 4119 dmu_tx_t *tx; 4120 ztest_cb_data_t *cb_data[3], *tmp_cb; 4121 uint64_t old_txg, txg; 4122 int i, error; 4123 4124 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4125 4126 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4127 return; 4128 4129 tx = dmu_tx_create(os); 4130 4131 cb_data[0] = ztest_create_cb_data(os, 0); 4132 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4133 4134 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4135 4136 /* Every once in a while, abort the transaction on purpose */ 4137 if (ztest_random(100) == 0) 4138 error = -1; 4139 4140 if (!error) 4141 error = dmu_tx_assign(tx, TXG_NOWAIT); 4142 4143 txg = error ? 0 : dmu_tx_get_txg(tx); 4144 4145 cb_data[0]->zcd_txg = txg; 4146 cb_data[1] = ztest_create_cb_data(os, txg); 4147 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4148 4149 if (error) { 4150 /* 4151 * It's not a strict requirement to call the registered 4152 * callbacks from inside dmu_tx_abort(), but that's what 4153 * it's supposed to happen in the current implementation 4154 * so we will check for that. 4155 */ 4156 for (i = 0; i < 2; i++) { 4157 cb_data[i]->zcd_expected_err = ECANCELED; 4158 VERIFY(!cb_data[i]->zcd_called); 4159 } 4160 4161 dmu_tx_abort(tx); 4162 4163 for (i = 0; i < 2; i++) { 4164 VERIFY(cb_data[i]->zcd_called); 4165 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4166 } 4167 4168 return; 4169 } 4170 4171 cb_data[2] = ztest_create_cb_data(os, txg); 4172 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4173 4174 /* 4175 * Read existing data to make sure there isn't a future leak. 4176 */ 4177 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4178 &old_txg, DMU_READ_PREFETCH)); 4179 4180 if (old_txg > txg) 4181 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4182 old_txg, txg); 4183 4184 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4185 4186 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4187 4188 /* 4189 * Since commit callbacks don't have any ordering requirement and since 4190 * it is theoretically possible for a commit callback to be called 4191 * after an arbitrary amount of time has elapsed since its txg has been 4192 * synced, it is difficult to reliably determine whether a commit 4193 * callback hasn't been called due to high load or due to a flawed 4194 * implementation. 4195 * 4196 * In practice, we will assume that if after a certain number of txgs a 4197 * commit callback hasn't been called, then most likely there's an 4198 * implementation bug.. 4199 */ 4200 tmp_cb = list_head(&zcl.zcl_callbacks); 4201 if (tmp_cb != NULL && 4202 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) { 4203 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4204 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4205 } 4206 4207 /* 4208 * Let's find the place to insert our callbacks. 4209 * 4210 * Even though the list is ordered by txg, it is possible for the 4211 * insertion point to not be the end because our txg may already be 4212 * quiescing at this point and other callbacks in the open txg 4213 * (from other objsets) may have sneaked in. 4214 */ 4215 tmp_cb = list_tail(&zcl.zcl_callbacks); 4216 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4217 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4218 4219 /* Add the 3 callbacks to the list */ 4220 for (i = 0; i < 3; i++) { 4221 if (tmp_cb == NULL) 4222 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4223 else 4224 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4225 cb_data[i]); 4226 4227 cb_data[i]->zcd_added = B_TRUE; 4228 VERIFY(!cb_data[i]->zcd_called); 4229 4230 tmp_cb = cb_data[i]; 4231 } 4232 4233 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4234 4235 dmu_tx_commit(tx); 4236} 4237 4238/* ARGSUSED */ 4239void 4240ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4241{ 4242 zfs_prop_t proplist[] = { 4243 ZFS_PROP_CHECKSUM, 4244 ZFS_PROP_COMPRESSION, 4245 ZFS_PROP_COPIES, 4246 ZFS_PROP_DEDUP 4247 }; 4248 ztest_shared_t *zs = ztest_shared; 4249 4250 (void) rw_rdlock(&zs->zs_name_lock); 4251 4252 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4253 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4254 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4255 4256 (void) rw_unlock(&zs->zs_name_lock); 4257} 4258 4259/* ARGSUSED */ 4260void 4261ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4262{ 4263 ztest_shared_t *zs = ztest_shared; 4264 nvlist_t *props = NULL; 4265 4266 (void) rw_rdlock(&zs->zs_name_lock); 4267 4268 (void) ztest_spa_prop_set_uint64(zs, ZPOOL_PROP_DEDUPDITTO, 4269 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4270 4271 VERIFY3U(spa_prop_get(zs->zs_spa, &props), ==, 0); 4272 4273 if (zopt_verbose >= 6) 4274 dump_nvlist(props, 4); 4275 4276 nvlist_free(props); 4277 4278 (void) rw_unlock(&zs->zs_name_lock); 4279} 4280 4281/* 4282 * Test snapshot hold/release and deferred destroy. 4283 */ 4284void 4285ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4286{ 4287 int error; 4288 objset_t *os = zd->zd_os; 4289 objset_t *origin; 4290 char snapname[100]; 4291 char fullname[100]; 4292 char clonename[100]; 4293 char tag[100]; 4294 char osname[MAXNAMELEN]; 4295 4296 (void) rw_rdlock(&ztest_shared->zs_name_lock); 4297 4298 dmu_objset_name(os, osname); 4299 4300 (void) snprintf(snapname, 100, "sh1_%llu", id); 4301 (void) snprintf(fullname, 100, "%s@%s", osname, snapname); 4302 (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id); 4303 (void) snprintf(tag, 100, "%tag_%llu", id); 4304 4305 /* 4306 * Clean up from any previous run. 4307 */ 4308 (void) dmu_objset_destroy(clonename, B_FALSE); 4309 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE); 4310 (void) dmu_objset_destroy(fullname, B_FALSE); 4311 4312 /* 4313 * Create snapshot, clone it, mark snap for deferred destroy, 4314 * destroy clone, verify snap was also destroyed. 4315 */ 4316 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE, 4317 FALSE, -1); 4318 if (error) { 4319 if (error == ENOSPC) { 4320 ztest_record_enospc("dmu_objset_snapshot"); 4321 goto out; 4322 } 4323 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4324 } 4325 4326 error = dmu_objset_hold(fullname, FTAG, &origin); 4327 if (error) 4328 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4329 4330 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0); 4331 dmu_objset_rele(origin, FTAG); 4332 if (error) { 4333 if (error == ENOSPC) { 4334 ztest_record_enospc("dmu_objset_clone"); 4335 goto out; 4336 } 4337 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4338 } 4339 4340 error = dmu_objset_destroy(fullname, B_TRUE); 4341 if (error) { 4342 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d", 4343 fullname, error); 4344 } 4345 4346 error = dmu_objset_destroy(clonename, B_FALSE); 4347 if (error) 4348 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error); 4349 4350 error = dmu_objset_hold(fullname, FTAG, &origin); 4351 if (error != ENOENT) 4352 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4353 4354 /* 4355 * Create snapshot, add temporary hold, verify that we can't 4356 * destroy a held snapshot, mark for deferred destroy, 4357 * release hold, verify snapshot was destroyed. 4358 */ 4359 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE, 4360 FALSE, -1); 4361 if (error) { 4362 if (error == ENOSPC) { 4363 ztest_record_enospc("dmu_objset_snapshot"); 4364 goto out; 4365 } 4366 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4367 } 4368 4369 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE, 4370 B_TRUE, -1); 4371 if (error) 4372 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag); 4373 4374 error = dmu_objset_destroy(fullname, B_FALSE); 4375 if (error != EBUSY) { 4376 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d", 4377 fullname, error); 4378 } 4379 4380 error = dmu_objset_destroy(fullname, B_TRUE); 4381 if (error) { 4382 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d", 4383 fullname, error); 4384 } 4385 4386 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE); 4387 if (error) 4388 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag); 4389 4390 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT); 4391 4392out: 4393 (void) rw_unlock(&ztest_shared->zs_name_lock); 4394} 4395 4396/* 4397 * Inject random faults into the on-disk data. 4398 */ 4399/* ARGSUSED */ 4400void 4401ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4402{ 4403 ztest_shared_t *zs = ztest_shared; 4404 spa_t *spa = zs->zs_spa; 4405 int fd; 4406 uint64_t offset; 4407 uint64_t leaves; 4408 uint64_t bad = 0x1990c0ffeedecadeULL; 4409 uint64_t top, leaf; 4410 char path0[MAXPATHLEN]; 4411 char pathrand[MAXPATHLEN]; 4412 size_t fsize; 4413 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4414 int iters = 1000; 4415 int maxfaults; 4416 int mirror_save; 4417 vdev_t *vd0 = NULL; 4418 uint64_t guid0 = 0; 4419 boolean_t islog = B_FALSE; 4420 4421 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 4422 maxfaults = MAXFAULTS(); 4423 leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz; 4424 mirror_save = zs->zs_mirrors; 4425 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 4426 4427 ASSERT(leaves >= 1); 4428 4429 /* 4430 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4431 */ 4432 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4433 4434 if (ztest_random(2) == 0) { 4435 /* 4436 * Inject errors on a normal data device or slog device. 4437 */ 4438 top = ztest_random_vdev_top(spa, B_TRUE); 4439 leaf = ztest_random(leaves) + zs->zs_splits; 4440 4441 /* 4442 * Generate paths to the first leaf in this top-level vdev, 4443 * and to the random leaf we selected. We'll induce transient 4444 * write failures and random online/offline activity on leaf 0, 4445 * and we'll write random garbage to the randomly chosen leaf. 4446 */ 4447 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4448 zopt_dir, zopt_pool, top * leaves + zs->zs_splits); 4449 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4450 zopt_dir, zopt_pool, top * leaves + leaf); 4451 4452 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4453 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4454 islog = B_TRUE; 4455 4456 if (vd0 != NULL && maxfaults != 1) { 4457 /* 4458 * Make vd0 explicitly claim to be unreadable, 4459 * or unwriteable, or reach behind its back 4460 * and close the underlying fd. We can do this if 4461 * maxfaults == 0 because we'll fail and reexecute, 4462 * and we can do it if maxfaults >= 2 because we'll 4463 * have enough redundancy. If maxfaults == 1, the 4464 * combination of this with injection of random data 4465 * corruption below exceeds the pool's fault tolerance. 4466 */ 4467 vdev_file_t *vf = vd0->vdev_tsd; 4468 4469 if (vf != NULL && ztest_random(3) == 0) { 4470 (void) close(vf->vf_vnode->v_fd); 4471 vf->vf_vnode->v_fd = -1; 4472 } else if (ztest_random(2) == 0) { 4473 vd0->vdev_cant_read = B_TRUE; 4474 } else { 4475 vd0->vdev_cant_write = B_TRUE; 4476 } 4477 guid0 = vd0->vdev_guid; 4478 } 4479 } else { 4480 /* 4481 * Inject errors on an l2cache device. 4482 */ 4483 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4484 4485 if (sav->sav_count == 0) { 4486 spa_config_exit(spa, SCL_STATE, FTAG); 4487 return; 4488 } 4489 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4490 guid0 = vd0->vdev_guid; 4491 (void) strcpy(path0, vd0->vdev_path); 4492 (void) strcpy(pathrand, vd0->vdev_path); 4493 4494 leaf = 0; 4495 leaves = 1; 4496 maxfaults = INT_MAX; /* no limit on cache devices */ 4497 } 4498 4499 spa_config_exit(spa, SCL_STATE, FTAG); 4500 4501 /* 4502 * If we can tolerate two or more faults, or we're dealing 4503 * with a slog, randomly online/offline vd0. 4504 */ 4505 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4506 if (ztest_random(10) < 6) { 4507 int flags = (ztest_random(2) == 0 ? 4508 ZFS_OFFLINE_TEMPORARY : 0); 4509 4510 /* 4511 * We have to grab the zs_name_lock as writer to 4512 * prevent a race between offlining a slog and 4513 * destroying a dataset. Offlining the slog will 4514 * grab a reference on the dataset which may cause 4515 * dmu_objset_destroy() to fail with EBUSY thus 4516 * leaving the dataset in an inconsistent state. 4517 */ 4518 if (islog) 4519 (void) rw_wrlock(&ztest_shared->zs_name_lock); 4520 4521 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4522 4523 if (islog) 4524 (void) rw_unlock(&ztest_shared->zs_name_lock); 4525 } else { 4526 (void) vdev_online(spa, guid0, 0, NULL); 4527 } 4528 } 4529 4530 if (maxfaults == 0) 4531 return; 4532 4533 /* 4534 * We have at least single-fault tolerance, so inject data corruption. 4535 */ 4536 fd = open(pathrand, O_RDWR); 4537 4538 if (fd == -1) /* we hit a gap in the device namespace */ 4539 return; 4540 4541 fsize = lseek(fd, 0, SEEK_END); 4542 4543 while (--iters != 0) { 4544 offset = ztest_random(fsize / (leaves << bshift)) * 4545 (leaves << bshift) + (leaf << bshift) + 4546 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4547 4548 if (offset >= fsize) 4549 continue; 4550 4551 VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); 4552 if (mirror_save != zs->zs_mirrors) { 4553 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 4554 (void) close(fd); 4555 return; 4556 } 4557 4558 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4559 fatal(1, "can't inject bad word at 0x%llx in %s", 4560 offset, pathrand); 4561 4562 VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); 4563 4564 if (zopt_verbose >= 7) 4565 (void) printf("injected bad word into %s," 4566 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4567 } 4568 4569 (void) close(fd); 4570} 4571 4572/* 4573 * Verify that DDT repair works as expected. 4574 */ 4575void 4576ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4577{ 4578 ztest_shared_t *zs = ztest_shared; 4579 spa_t *spa = zs->zs_spa; 4580 objset_t *os = zd->zd_os; 4581 ztest_od_t od[1]; 4582 uint64_t object, blocksize, txg, pattern, psize; 4583 enum zio_checksum checksum = spa_dedup_checksum(spa); 4584 dmu_buf_t *db; 4585 dmu_tx_t *tx; 4586 void *buf; 4587 blkptr_t blk; 4588 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4589 4590 blocksize = ztest_random_blocksize(); 4591 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4592 4593 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4594 4595 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4596 return; 4597 4598 /* 4599 * Take the name lock as writer to prevent anyone else from changing 4600 * the pool and dataset properies we need to maintain during this test. 4601 */ 4602 (void) rw_wrlock(&zs->zs_name_lock); 4603 4604 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4605 B_FALSE) != 0 || 4606 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4607 B_FALSE) != 0) { 4608 (void) rw_unlock(&zs->zs_name_lock); 4609 return; 4610 } 4611 4612 object = od[0].od_object; 4613 blocksize = od[0].od_blocksize; 4614 pattern = spa_guid(spa) ^ dmu_objset_fsid_guid(os); 4615 4616 ASSERT(object != 0); 4617 4618 tx = dmu_tx_create(os); 4619 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 4620 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 4621 if (txg == 0) { 4622 (void) rw_unlock(&zs->zs_name_lock); 4623 return; 4624 } 4625 4626 /* 4627 * Write all the copies of our block. 4628 */ 4629 for (int i = 0; i < copies; i++) { 4630 uint64_t offset = i * blocksize; 4631 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db, 4632 DMU_READ_NO_PREFETCH) == 0); 4633 ASSERT(db->db_offset == offset); 4634 ASSERT(db->db_size == blocksize); 4635 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 4636 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 4637 dmu_buf_will_fill(db, tx); 4638 ztest_pattern_set(db->db_data, db->db_size, pattern); 4639 dmu_buf_rele(db, FTAG); 4640 } 4641 4642 dmu_tx_commit(tx); 4643 txg_wait_synced(spa_get_dsl(spa), txg); 4644 4645 /* 4646 * Find out what block we got. 4647 */ 4648 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db, 4649 DMU_READ_NO_PREFETCH) == 0); 4650 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 4651 dmu_buf_rele(db, FTAG); 4652 4653 /* 4654 * Damage the block. Dedup-ditto will save us when we read it later. 4655 */ 4656 psize = BP_GET_PSIZE(&blk); 4657 buf = zio_buf_alloc(psize); 4658 ztest_pattern_set(buf, psize, ~pattern); 4659 4660 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 4661 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 4662 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 4663 4664 zio_buf_free(buf, psize); 4665 4666 (void) rw_unlock(&zs->zs_name_lock); 4667} 4668 4669/* 4670 * Scrub the pool. 4671 */ 4672/* ARGSUSED */ 4673void 4674ztest_scrub(ztest_ds_t *zd, uint64_t id) 4675{ 4676 ztest_shared_t *zs = ztest_shared; 4677 spa_t *spa = zs->zs_spa; 4678 4679 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4680 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 4681 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4682} 4683 4684/* 4685 * Rename the pool to a different name and then rename it back. 4686 */ 4687/* ARGSUSED */ 4688void 4689ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 4690{ 4691 ztest_shared_t *zs = ztest_shared; 4692 char *oldname, *newname; 4693 spa_t *spa; 4694 4695 (void) rw_wrlock(&zs->zs_name_lock); 4696 4697 oldname = zs->zs_pool; 4698 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 4699 (void) strcpy(newname, oldname); 4700 (void) strcat(newname, "_tmp"); 4701 4702 /* 4703 * Do the rename 4704 */ 4705 VERIFY3U(0, ==, spa_rename(oldname, newname)); 4706 4707 /* 4708 * Try to open it under the old name, which shouldn't exist 4709 */ 4710 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 4711 4712 /* 4713 * Open it under the new name and make sure it's still the same spa_t. 4714 */ 4715 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 4716 4717 ASSERT(spa == zs->zs_spa); 4718 spa_close(spa, FTAG); 4719 4720 /* 4721 * Rename it back to the original 4722 */ 4723 VERIFY3U(0, ==, spa_rename(newname, oldname)); 4724 4725 /* 4726 * Make sure it can still be opened 4727 */ 4728 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 4729 4730 ASSERT(spa == zs->zs_spa); 4731 spa_close(spa, FTAG); 4732 4733 umem_free(newname, strlen(newname) + 1); 4734 4735 (void) rw_unlock(&zs->zs_name_lock); 4736} 4737 4738/* 4739 * Verify pool integrity by running zdb. 4740 */ 4741static void 4742ztest_run_zdb(char *pool) 4743{ 4744 int status; 4745 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 4746 char zbuf[1024]; 4747 char *bin; 4748 char *ztest; 4749 char *isa; 4750 int isalen; 4751 FILE *fp; 4752 4753 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 4754 4755 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 4756 bin = strstr(zdb, "/usr/bin/"); 4757 ztest = strstr(bin, "/ztest"); 4758 isa = bin + 8; 4759 isalen = ztest - isa; 4760 isa = strdup(isa); 4761 /* LINTED */ 4762 (void) sprintf(bin, 4763 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s", 4764 isalen, 4765 isa, 4766 zopt_verbose >= 3 ? "s" : "", 4767 zopt_verbose >= 4 ? "v" : "", 4768 spa_config_path, 4769 pool); 4770 free(isa); 4771 4772 if (zopt_verbose >= 5) 4773 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 4774 4775 fp = popen(zdb, "r"); 4776 assert(fp != NULL); 4777 4778 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 4779 if (zopt_verbose >= 3) 4780 (void) printf("%s", zbuf); 4781 4782 status = pclose(fp); 4783 4784 if (status == 0) 4785 return; 4786 4787 ztest_dump_core = 0; 4788 if (WIFEXITED(status)) 4789 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 4790 else 4791 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 4792} 4793 4794static void 4795ztest_walk_pool_directory(char *header) 4796{ 4797 spa_t *spa = NULL; 4798 4799 if (zopt_verbose >= 6) 4800 (void) printf("%s\n", header); 4801 4802 mutex_enter(&spa_namespace_lock); 4803 while ((spa = spa_next(spa)) != NULL) 4804 if (zopt_verbose >= 6) 4805 (void) printf("\t%s\n", spa_name(spa)); 4806 mutex_exit(&spa_namespace_lock); 4807} 4808 4809static void 4810ztest_spa_import_export(char *oldname, char *newname) 4811{ 4812 nvlist_t *config, *newconfig; 4813 uint64_t pool_guid; 4814 spa_t *spa; 4815 4816 if (zopt_verbose >= 4) { 4817 (void) printf("import/export: old = %s, new = %s\n", 4818 oldname, newname); 4819 } 4820 4821 /* 4822 * Clean up from previous runs. 4823 */ 4824 (void) spa_destroy(newname); 4825 4826 /* 4827 * Get the pool's configuration and guid. 4828 */ 4829 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 4830 4831 /* 4832 * Kick off a scrub to tickle scrub/export races. 4833 */ 4834 if (ztest_random(2) == 0) 4835 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4836 4837 pool_guid = spa_guid(spa); 4838 spa_close(spa, FTAG); 4839 4840 ztest_walk_pool_directory("pools before export"); 4841 4842 /* 4843 * Export it. 4844 */ 4845 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 4846 4847 ztest_walk_pool_directory("pools after export"); 4848 4849 /* 4850 * Try to import it. 4851 */ 4852 newconfig = spa_tryimport(config); 4853 ASSERT(newconfig != NULL); 4854 nvlist_free(newconfig); 4855 4856 /* 4857 * Import it under the new name. 4858 */ 4859 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0)); 4860 4861 ztest_walk_pool_directory("pools after import"); 4862 4863 /* 4864 * Try to import it again -- should fail with EEXIST. 4865 */ 4866 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 4867 4868 /* 4869 * Try to import it under a different name -- should fail with EEXIST. 4870 */ 4871 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 4872 4873 /* 4874 * Verify that the pool is no longer visible under the old name. 4875 */ 4876 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 4877 4878 /* 4879 * Verify that we can open and close the pool using the new name. 4880 */ 4881 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 4882 ASSERT(pool_guid == spa_guid(spa)); 4883 spa_close(spa, FTAG); 4884 4885 nvlist_free(config); 4886} 4887 4888static void 4889ztest_resume(spa_t *spa) 4890{ 4891 if (spa_suspended(spa) && zopt_verbose >= 6) 4892 (void) printf("resuming from suspended state\n"); 4893 spa_vdev_state_enter(spa, SCL_NONE); 4894 vdev_clear(spa, NULL); 4895 (void) spa_vdev_state_exit(spa, NULL, 0); 4896 (void) zio_resume(spa); 4897} 4898 4899static void * 4900ztest_resume_thread(void *arg) 4901{ 4902 spa_t *spa = arg; 4903 4904 while (!ztest_exiting) { 4905 if (spa_suspended(spa)) 4906 ztest_resume(spa); 4907 (void) poll(NULL, 0, 100); 4908 } 4909 return (NULL); 4910} 4911 4912static void * 4913ztest_deadman_thread(void *arg) 4914{ 4915 ztest_shared_t *zs = arg; 4916 int grace = 300; 4917 hrtime_t delta; 4918 4919 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace; 4920 4921 (void) poll(NULL, 0, (int)(1000 * delta)); 4922 4923 fatal(0, "failed to complete within %d seconds of deadline", grace); 4924 4925 return (NULL); 4926} 4927 4928static void 4929ztest_execute(ztest_info_t *zi, uint64_t id) 4930{ 4931 ztest_shared_t *zs = ztest_shared; 4932 ztest_ds_t *zd = &zs->zs_zd[id % zopt_datasets]; 4933 hrtime_t functime = gethrtime(); 4934 4935 for (int i = 0; i < zi->zi_iters; i++) 4936 zi->zi_func(zd, id); 4937 4938 functime = gethrtime() - functime; 4939 4940 atomic_add_64(&zi->zi_call_count, 1); 4941 atomic_add_64(&zi->zi_call_time, functime); 4942 4943 if (zopt_verbose >= 4) { 4944 Dl_info dli; 4945 (void) dladdr((void *)zi->zi_func, &dli); 4946 (void) printf("%6.2f sec in %s\n", 4947 (double)functime / NANOSEC, dli.dli_sname); 4948 } 4949} 4950 4951static void * 4952ztest_thread(void *arg) 4953{ 4954 uint64_t id = (uintptr_t)arg; 4955 ztest_shared_t *zs = ztest_shared; 4956 uint64_t call_next; 4957 hrtime_t now; 4958 ztest_info_t *zi; 4959 4960 while ((now = gethrtime()) < zs->zs_thread_stop) { 4961 /* 4962 * See if it's time to force a crash. 4963 */ 4964 if (now > zs->zs_thread_kill) 4965 ztest_kill(zs); 4966 4967 /* 4968 * If we're getting ENOSPC with some regularity, stop. 4969 */ 4970 if (zs->zs_enospc_count > 10) 4971 break; 4972 4973 /* 4974 * Pick a random function to execute. 4975 */ 4976 zi = &zs->zs_info[ztest_random(ZTEST_FUNCS)]; 4977 call_next = zi->zi_call_next; 4978 4979 if (now >= call_next && 4980 atomic_cas_64(&zi->zi_call_next, call_next, call_next + 4981 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) 4982 ztest_execute(zi, id); 4983 } 4984 4985 return (NULL); 4986} 4987 4988static void 4989ztest_dataset_name(char *dsname, char *pool, int d) 4990{ 4991 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 4992} 4993 4994static void 4995ztest_dataset_destroy(ztest_shared_t *zs, int d) 4996{ 4997 char name[MAXNAMELEN]; 4998 4999 ztest_dataset_name(name, zs->zs_pool, d); 5000 5001 if (zopt_verbose >= 3) 5002 (void) printf("Destroying %s to free up space\n", name); 5003 5004 /* 5005 * Cleanup any non-standard clones and snapshots. In general, 5006 * ztest thread t operates on dataset (t % zopt_datasets), 5007 * so there may be more than one thing to clean up. 5008 */ 5009 for (int t = d; t < zopt_threads; t += zopt_datasets) 5010 ztest_dsl_dataset_cleanup(name, t); 5011 5012 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5013 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5014} 5015 5016static void 5017ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5018{ 5019 uint64_t usedobjs, dirobjs, scratch; 5020 5021 /* 5022 * ZTEST_DIROBJ is the object directory for the entire dataset. 5023 * Therefore, the number of objects in use should equal the 5024 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5025 * If not, we have an object leak. 5026 * 5027 * Note that we can only check this in ztest_dataset_open(), 5028 * when the open-context and syncing-context values agree. 5029 * That's because zap_count() returns the open-context value, 5030 * while dmu_objset_space() returns the rootbp fill count. 5031 */ 5032 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5033 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5034 ASSERT3U(dirobjs + 1, ==, usedobjs); 5035} 5036 5037static int 5038ztest_dataset_open(ztest_shared_t *zs, int d) 5039{ 5040 ztest_ds_t *zd = &zs->zs_zd[d]; 5041 uint64_t committed_seq = zd->zd_seq; 5042 objset_t *os; 5043 zilog_t *zilog; 5044 char name[MAXNAMELEN]; 5045 int error; 5046 5047 ztest_dataset_name(name, zs->zs_pool, d); 5048 5049 (void) rw_rdlock(&zs->zs_name_lock); 5050 5051 error = ztest_dataset_create(name); 5052 if (error == ENOSPC) { 5053 (void) rw_unlock(&zs->zs_name_lock); 5054 ztest_record_enospc(FTAG); 5055 return (error); 5056 } 5057 ASSERT(error == 0 || error == EEXIST); 5058 5059 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0); 5060 (void) rw_unlock(&zs->zs_name_lock); 5061 5062 ztest_zd_init(zd, os); 5063 5064 zilog = zd->zd_zilog; 5065 5066 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5067 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5068 fatal(0, "missing log records: claimed %llu < committed %llu", 5069 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5070 5071 ztest_dataset_dirobj_verify(zd); 5072 5073 zil_replay(os, zd, ztest_replay_vector); 5074 5075 ztest_dataset_dirobj_verify(zd); 5076 5077 if (zopt_verbose >= 6) 5078 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5079 zd->zd_name, 5080 (u_longlong_t)zilog->zl_parse_blk_count, 5081 (u_longlong_t)zilog->zl_parse_lr_count, 5082 (u_longlong_t)zilog->zl_replaying_seq); 5083 5084 zilog = zil_open(os, ztest_get_data); 5085 5086 if (zilog->zl_replaying_seq != 0 && 5087 zilog->zl_replaying_seq < committed_seq) 5088 fatal(0, "missing log records: replayed %llu < committed %llu", 5089 zilog->zl_replaying_seq, committed_seq); 5090 5091 return (0); 5092} 5093 5094static void 5095ztest_dataset_close(ztest_shared_t *zs, int d) 5096{ 5097 ztest_ds_t *zd = &zs->zs_zd[d]; 5098 5099 zil_close(zd->zd_zilog); 5100 dmu_objset_rele(zd->zd_os, zd); 5101 5102 ztest_zd_fini(zd); 5103} 5104 5105/* 5106 * Kick off threads to run tests on all datasets in parallel. 5107 */ 5108static void 5109ztest_run(ztest_shared_t *zs) 5110{ 5111 thread_t *tid; 5112 spa_t *spa; 5113 thread_t resume_tid; 5114 int error; 5115 5116 ztest_exiting = B_FALSE; 5117 5118 /* 5119 * Initialize parent/child shared state. 5120 */ 5121 VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0); 5122 VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0); 5123 5124 zs->zs_thread_start = gethrtime(); 5125 zs->zs_thread_stop = zs->zs_thread_start + zopt_passtime * NANOSEC; 5126 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5127 zs->zs_thread_kill = zs->zs_thread_stop; 5128 if (ztest_random(100) < zopt_killrate) 5129 zs->zs_thread_kill -= ztest_random(zopt_passtime * NANOSEC); 5130 5131 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5132 5133 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5134 offsetof(ztest_cb_data_t, zcd_node)); 5135 5136 /* 5137 * Open our pool. 5138 */ 5139 kernel_init(FREAD | FWRITE); 5140 VERIFY(spa_open(zs->zs_pool, &spa, FTAG) == 0); 5141 spa->spa_debug = B_TRUE; 5142 zs->zs_spa = spa; 5143 5144 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5145 5146 /* 5147 * We don't expect the pool to suspend unless maxfaults == 0, 5148 * in which case ztest_fault_inject() temporarily takes away 5149 * the only valid replica. 5150 */ 5151 if (MAXFAULTS() == 0) 5152 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5153 else 5154 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5155 5156 /* 5157 * Create a thread to periodically resume suspended I/O. 5158 */ 5159 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5160 &resume_tid) == 0); 5161 5162 /* 5163 * Create a deadman thread to abort() if we hang. 5164 */ 5165 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5166 NULL) == 0); 5167 5168 /* 5169 * Verify that we can safely inquire about about any object, 5170 * whether it's allocated or not. To make it interesting, 5171 * we probe a 5-wide window around each power of two. 5172 * This hits all edge cases, including zero and the max. 5173 */ 5174 for (int t = 0; t < 64; t++) { 5175 for (int d = -5; d <= 5; d++) { 5176 error = dmu_object_info(spa->spa_meta_objset, 5177 (1ULL << t) + d, NULL); 5178 ASSERT(error == 0 || error == ENOENT || 5179 error == EINVAL); 5180 } 5181 } 5182 5183 /* 5184 * If we got any ENOSPC errors on the previous run, destroy something. 5185 */ 5186 if (zs->zs_enospc_count != 0) { 5187 int d = ztest_random(zopt_datasets); 5188 ztest_dataset_destroy(zs, d); 5189 } 5190 zs->zs_enospc_count = 0; 5191 5192 tid = umem_zalloc(zopt_threads * sizeof (thread_t), UMEM_NOFAIL); 5193 5194 if (zopt_verbose >= 4) 5195 (void) printf("starting main threads...\n"); 5196 5197 /* 5198 * Kick off all the tests that run in parallel. 5199 */ 5200 for (int t = 0; t < zopt_threads; t++) { 5201 if (t < zopt_datasets && ztest_dataset_open(zs, t) != 0) 5202 return; 5203 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5204 THR_BOUND, &tid[t]) == 0); 5205 } 5206 5207 /* 5208 * Wait for all of the tests to complete. We go in reverse order 5209 * so we don't close datasets while threads are still using them. 5210 */ 5211 for (int t = zopt_threads - 1; t >= 0; t--) { 5212 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5213 if (t < zopt_datasets) 5214 ztest_dataset_close(zs, t); 5215 } 5216 5217 txg_wait_synced(spa_get_dsl(spa), 0); 5218 5219 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5220 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5221 5222 umem_free(tid, zopt_threads * sizeof (thread_t)); 5223 5224 /* Kill the resume thread */ 5225 ztest_exiting = B_TRUE; 5226 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5227 ztest_resume(spa); 5228 5229 /* 5230 * Right before closing the pool, kick off a bunch of async I/O; 5231 * spa_close() should wait for it to complete. 5232 */ 5233 for (uint64_t object = 1; object < 50; object++) 5234 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5235 5236 spa_close(spa, FTAG); 5237 5238 /* 5239 * Verify that we can loop over all pools. 5240 */ 5241 mutex_enter(&spa_namespace_lock); 5242 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5243 if (zopt_verbose > 3) 5244 (void) printf("spa_next: found %s\n", spa_name(spa)); 5245 mutex_exit(&spa_namespace_lock); 5246 5247 /* 5248 * Verify that we can export the pool and reimport it under a 5249 * different name. 5250 */ 5251 if (ztest_random(2) == 0) { 5252 char name[MAXNAMELEN]; 5253 (void) snprintf(name, MAXNAMELEN, "%s_import", zs->zs_pool); 5254 ztest_spa_import_export(zs->zs_pool, name); 5255 ztest_spa_import_export(name, zs->zs_pool); 5256 } 5257 5258 kernel_fini(); 5259 5260 list_destroy(&zcl.zcl_callbacks); 5261 5262 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5263 5264 (void) rwlock_destroy(&zs->zs_name_lock); 5265 (void) _mutex_destroy(&zs->zs_vdev_lock); 5266} 5267 5268static void 5269ztest_freeze(ztest_shared_t *zs) 5270{ 5271 ztest_ds_t *zd = &zs->zs_zd[0]; 5272 spa_t *spa; 5273 int numloops = 0; 5274 5275 if (zopt_verbose >= 3) 5276 (void) printf("testing spa_freeze()...\n"); 5277 5278 kernel_init(FREAD | FWRITE); 5279 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG)); 5280 VERIFY3U(0, ==, ztest_dataset_open(zs, 0)); 5281 5282 /* 5283 * Force the first log block to be transactionally allocated. 5284 * We have to do this before we freeze the pool -- otherwise 5285 * the log chain won't be anchored. 5286 */ 5287 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5288 ztest_dmu_object_alloc_free(zd, 0); 5289 zil_commit(zd->zd_zilog, 0); 5290 } 5291 5292 txg_wait_synced(spa_get_dsl(spa), 0); 5293 5294 /* 5295 * Freeze the pool. This stops spa_sync() from doing anything, 5296 * so that the only way to record changes from now on is the ZIL. 5297 */ 5298 spa_freeze(spa); 5299 5300 /* 5301 * Run tests that generate log records but don't alter the pool config 5302 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5303 * We do a txg_wait_synced() after each iteration to force the txg 5304 * to increase well beyond the last synced value in the uberblock. 5305 * The ZIL should be OK with that. 5306 */ 5307 while (ztest_random(10) != 0 && numloops++ < zopt_maxloops) { 5308 ztest_dmu_write_parallel(zd, 0); 5309 ztest_dmu_object_alloc_free(zd, 0); 5310 txg_wait_synced(spa_get_dsl(spa), 0); 5311 } 5312 5313 /* 5314 * Commit all of the changes we just generated. 5315 */ 5316 zil_commit(zd->zd_zilog, 0); 5317 txg_wait_synced(spa_get_dsl(spa), 0); 5318 5319 /* 5320 * Close our dataset and close the pool. 5321 */ 5322 ztest_dataset_close(zs, 0); 5323 spa_close(spa, FTAG); 5324 kernel_fini(); 5325 5326 /* 5327 * Open and close the pool and dataset to induce log replay. 5328 */ 5329 kernel_init(FREAD | FWRITE); 5330 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG)); 5331 VERIFY3U(0, ==, ztest_dataset_open(zs, 0)); 5332 ztest_dataset_close(zs, 0); 5333 spa_close(spa, FTAG); 5334 kernel_fini(); 5335} 5336 5337void 5338print_time(hrtime_t t, char *timebuf) 5339{ 5340 hrtime_t s = t / NANOSEC; 5341 hrtime_t m = s / 60; 5342 hrtime_t h = m / 60; 5343 hrtime_t d = h / 24; 5344 5345 s -= m * 60; 5346 m -= h * 60; 5347 h -= d * 24; 5348 5349 timebuf[0] = '\0'; 5350 5351 if (d) 5352 (void) sprintf(timebuf, 5353 "%llud%02lluh%02llum%02llus", d, h, m, s); 5354 else if (h) 5355 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5356 else if (m) 5357 (void) sprintf(timebuf, "%llum%02llus", m, s); 5358 else 5359 (void) sprintf(timebuf, "%llus", s); 5360} 5361 5362static nvlist_t * 5363make_random_props() 5364{ 5365 nvlist_t *props; 5366 5367 if (ztest_random(2) == 0) 5368 return (NULL); 5369 5370 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5371 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5372 5373 (void) printf("props:\n"); 5374 dump_nvlist(props, 4); 5375 5376 return (props); 5377} 5378 5379/* 5380 * Create a storage pool with the given name and initial vdev size. 5381 * Then test spa_freeze() functionality. 5382 */ 5383static void 5384ztest_init(ztest_shared_t *zs) 5385{ 5386 spa_t *spa; 5387 nvlist_t *nvroot, *props; 5388 5389 VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0); 5390 VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0); 5391 5392 kernel_init(FREAD | FWRITE); 5393 5394 /* 5395 * Create the storage pool. 5396 */ 5397 (void) spa_destroy(zs->zs_pool); 5398 ztest_shared->zs_vdev_next_leaf = 0; 5399 zs->zs_splits = 0; 5400 zs->zs_mirrors = zopt_mirrors; 5401 nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0, 5402 0, zopt_raidz, zs->zs_mirrors, 1); 5403 props = make_random_props(); 5404 VERIFY3U(0, ==, spa_create(zs->zs_pool, nvroot, props, NULL, NULL)); 5405 nvlist_free(nvroot); 5406 5407 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG)); 5408 metaslab_sz = 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5409 spa_close(spa, FTAG); 5410 5411 kernel_fini(); 5412 5413 ztest_run_zdb(zs->zs_pool); 5414 5415 ztest_freeze(zs); 5416 5417 ztest_run_zdb(zs->zs_pool); 5418 5419 (void) rwlock_destroy(&zs->zs_name_lock); 5420 (void) _mutex_destroy(&zs->zs_vdev_lock); 5421} 5422 5423int 5424main(int argc, char **argv) 5425{ 5426 int kills = 0; 5427 int iters = 0; 5428 ztest_shared_t *zs; 5429 size_t shared_size; 5430 ztest_info_t *zi; 5431 char timebuf[100]; 5432 char numbuf[6]; 5433 spa_t *spa; 5434 5435 (void) setvbuf(stdout, NULL, _IOLBF, 0); 5436 5437 ztest_random_fd = open("/dev/urandom", O_RDONLY); 5438 5439 process_options(argc, argv); 5440 5441 /* Override location of zpool.cache */ 5442 (void) asprintf((char **)&spa_config_path, "%s/zpool.cache", zopt_dir); 5443 5444 /* 5445 * Blow away any existing copy of zpool.cache 5446 */ 5447 if (zopt_init != 0) 5448 (void) remove(spa_config_path); 5449 5450 shared_size = sizeof (*zs) + zopt_datasets * sizeof (ztest_ds_t); 5451 5452 zs = ztest_shared = (void *)mmap(0, 5453 P2ROUNDUP(shared_size, getpagesize()), 5454 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0); 5455 5456 if (zopt_verbose >= 1) { 5457 (void) printf("%llu vdevs, %d datasets, %d threads," 5458 " %llu seconds...\n", 5459 (u_longlong_t)zopt_vdevs, zopt_datasets, zopt_threads, 5460 (u_longlong_t)zopt_time); 5461 } 5462 5463 /* 5464 * Create and initialize our storage pool. 5465 */ 5466 for (int i = 1; i <= zopt_init; i++) { 5467 bzero(zs, sizeof (ztest_shared_t)); 5468 if (zopt_verbose >= 3 && zopt_init != 1) 5469 (void) printf("ztest_init(), pass %d\n", i); 5470 zs->zs_pool = zopt_pool; 5471 ztest_init(zs); 5472 } 5473 5474 zs->zs_pool = zopt_pool; 5475 zs->zs_proc_start = gethrtime(); 5476 zs->zs_proc_stop = zs->zs_proc_start + zopt_time * NANOSEC; 5477 5478 for (int f = 0; f < ZTEST_FUNCS; f++) { 5479 zi = &zs->zs_info[f]; 5480 *zi = ztest_info[f]; 5481 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 5482 zi->zi_call_next = UINT64_MAX; 5483 else 5484 zi->zi_call_next = zs->zs_proc_start + 5485 ztest_random(2 * zi->zi_interval[0] + 1); 5486 } 5487 5488 /* 5489 * Run the tests in a loop. These tests include fault injection 5490 * to verify that self-healing data works, and forced crashes 5491 * to verify that we never lose on-disk consistency. 5492 */ 5493 while (gethrtime() < zs->zs_proc_stop) { 5494 int status; 5495 pid_t pid; 5496 5497 /* 5498 * Initialize the workload counters for each function. 5499 */ 5500 for (int f = 0; f < ZTEST_FUNCS; f++) { 5501 zi = &zs->zs_info[f]; 5502 zi->zi_call_count = 0; 5503 zi->zi_call_time = 0; 5504 } 5505 5506 /* Set the allocation switch size */ 5507 metaslab_df_alloc_threshold = ztest_random(metaslab_sz / 4) + 1; 5508 5509 pid = fork(); 5510 5511 if (pid == -1) 5512 fatal(1, "fork failed"); 5513 5514 if (pid == 0) { /* child */ 5515 struct rlimit rl = { 1024, 1024 }; 5516 (void) setrlimit(RLIMIT_NOFILE, &rl); 5517 (void) enable_extended_FILE_stdio(-1, -1); 5518 ztest_run(zs); 5519 exit(0); 5520 } 5521 5522 while (waitpid(pid, &status, 0) != pid) 5523 continue; 5524 5525 if (WIFEXITED(status)) { 5526 if (WEXITSTATUS(status) != 0) { 5527 (void) fprintf(stderr, 5528 "child exited with code %d\n", 5529 WEXITSTATUS(status)); 5530 exit(2); 5531 } 5532 } else if (WIFSIGNALED(status)) { 5533 if (WTERMSIG(status) != SIGKILL) { 5534 (void) fprintf(stderr, 5535 "child died with signal %d\n", 5536 WTERMSIG(status)); 5537 exit(3); 5538 } 5539 kills++; 5540 } else { 5541 (void) fprintf(stderr, "something strange happened " 5542 "to child\n"); 5543 exit(4); 5544 } 5545 5546 iters++; 5547 5548 if (zopt_verbose >= 1) { 5549 hrtime_t now = gethrtime(); 5550 5551 now = MIN(now, zs->zs_proc_stop); 5552 print_time(zs->zs_proc_stop - now, timebuf); 5553 nicenum(zs->zs_space, numbuf); 5554 5555 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 5556 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 5557 iters, 5558 WIFEXITED(status) ? "Complete" : "SIGKILL", 5559 (u_longlong_t)zs->zs_enospc_count, 5560 100.0 * zs->zs_alloc / zs->zs_space, 5561 numbuf, 5562 100.0 * (now - zs->zs_proc_start) / 5563 (zopt_time * NANOSEC), timebuf); 5564 } 5565 5566 if (zopt_verbose >= 2) { 5567 (void) printf("\nWorkload summary:\n\n"); 5568 (void) printf("%7s %9s %s\n", 5569 "Calls", "Time", "Function"); 5570 (void) printf("%7s %9s %s\n", 5571 "-----", "----", "--------"); 5572 for (int f = 0; f < ZTEST_FUNCS; f++) { 5573 Dl_info dli; 5574 5575 zi = &zs->zs_info[f]; 5576 print_time(zi->zi_call_time, timebuf); 5577 (void) dladdr((void *)zi->zi_func, &dli); 5578 (void) printf("%7llu %9s %s\n", 5579 (u_longlong_t)zi->zi_call_count, timebuf, 5580 dli.dli_sname); 5581 } 5582 (void) printf("\n"); 5583 } 5584 5585 /* 5586 * It's possible that we killed a child during a rename test, 5587 * in which case we'll have a 'ztest_tmp' pool lying around 5588 * instead of 'ztest'. Do a blind rename in case this happened. 5589 */ 5590 kernel_init(FREAD); 5591 if (spa_open(zopt_pool, &spa, FTAG) == 0) { 5592 spa_close(spa, FTAG); 5593 } else { 5594 char tmpname[MAXNAMELEN]; 5595 kernel_fini(); 5596 kernel_init(FREAD | FWRITE); 5597 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 5598 zopt_pool); 5599 (void) spa_rename(tmpname, zopt_pool); 5600 } 5601 kernel_fini(); 5602 5603 ztest_run_zdb(zopt_pool); 5604 } 5605 5606 if (zopt_verbose >= 1) { 5607 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 5608 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 5609 } 5610 5611 return (0); 5612} 5613