ztest.c revision 244087
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 */ 27 28/* 29 * The objective of this program is to provide a DMU/ZAP/SPA stress test 30 * that runs entirely in userland, is easy to use, and easy to extend. 31 * 32 * The overall design of the ztest program is as follows: 33 * 34 * (1) For each major functional area (e.g. adding vdevs to a pool, 35 * creating and destroying datasets, reading and writing objects, etc) 36 * we have a simple routine to test that functionality. These 37 * individual routines do not have to do anything "stressful". 38 * 39 * (2) We turn these simple functionality tests into a stress test by 40 * running them all in parallel, with as many threads as desired, 41 * and spread across as many datasets, objects, and vdevs as desired. 42 * 43 * (3) While all this is happening, we inject faults into the pool to 44 * verify that self-healing data really works. 45 * 46 * (4) Every time we open a dataset, we change its checksum and compression 47 * functions. Thus even individual objects vary from block to block 48 * in which checksum they use and whether they're compressed. 49 * 50 * (5) To verify that we never lose on-disk consistency after a crash, 51 * we run the entire test in a child of the main process. 52 * At random times, the child self-immolates with a SIGKILL. 53 * This is the software equivalent of pulling the power cord. 54 * The parent then runs the test again, using the existing 55 * storage pool, as many times as desired. If backwards compatability 56 * testing is enabled ztest will sometimes run the "older" version 57 * of ztest after a SIGKILL. 58 * 59 * (6) To verify that we don't have future leaks or temporal incursions, 60 * many of the functional tests record the transaction group number 61 * as part of their data. When reading old data, they verify that 62 * the transaction group number is less than the current, open txg. 63 * If you add a new test, please do this if applicable. 64 * 65 * When run with no arguments, ztest runs for about five minutes and 66 * produces no output if successful. To get a little bit of information, 67 * specify -V. To get more information, specify -VV, and so on. 68 * 69 * To turn this into an overnight stress test, use -T to specify run time. 70 * 71 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 72 * to increase the pool capacity, fanout, and overall stress level. 73 * 74 * Use the -k option to set the desired frequency of kills. 75 * 76 * When ztest invokes itself it passes all relevant information through a 77 * temporary file which is mmap-ed in the child process. This allows shared 78 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 79 * stored at offset 0 of this file and contains information on the size and 80 * number of shared structures in the file. The information stored in this file 81 * must remain backwards compatible with older versions of ztest so that 82 * ztest can invoke them during backwards compatibility testing (-B). 83 */ 84 85#include <sys/zfs_context.h> 86#include <sys/spa.h> 87#include <sys/dmu.h> 88#include <sys/txg.h> 89#include <sys/dbuf.h> 90#include <sys/zap.h> 91#include <sys/dmu_objset.h> 92#include <sys/poll.h> 93#include <sys/stat.h> 94#include <sys/time.h> 95#include <sys/wait.h> 96#include <sys/mman.h> 97#include <sys/resource.h> 98#include <sys/zio.h> 99#include <sys/zil.h> 100#include <sys/zil_impl.h> 101#include <sys/vdev_impl.h> 102#include <sys/vdev_file.h> 103#include <sys/spa_impl.h> 104#include <sys/metaslab_impl.h> 105#include <sys/dsl_prop.h> 106#include <sys/dsl_dataset.h> 107#include <sys/dsl_scan.h> 108#include <sys/zio_checksum.h> 109#include <sys/refcount.h> 110#include <sys/zfeature.h> 111#include <stdio.h> 112#include <stdio_ext.h> 113#include <stdlib.h> 114#include <unistd.h> 115#include <signal.h> 116#include <umem.h> 117#include <dlfcn.h> 118#include <ctype.h> 119#include <math.h> 120#include <errno.h> 121#include <sys/fs/zfs.h> 122#include <libnvpair.h> 123 124#define ZTEST_FD_DATA 3 125#define ZTEST_FD_RAND 4 126 127typedef struct ztest_shared_hdr { 128 uint64_t zh_hdr_size; 129 uint64_t zh_opts_size; 130 uint64_t zh_size; 131 uint64_t zh_stats_size; 132 uint64_t zh_stats_count; 133 uint64_t zh_ds_size; 134 uint64_t zh_ds_count; 135} ztest_shared_hdr_t; 136 137static ztest_shared_hdr_t *ztest_shared_hdr; 138 139typedef struct ztest_shared_opts { 140 char zo_pool[MAXNAMELEN]; 141 char zo_dir[MAXNAMELEN]; 142 char zo_alt_ztest[MAXNAMELEN]; 143 char zo_alt_libpath[MAXNAMELEN]; 144 uint64_t zo_vdevs; 145 uint64_t zo_vdevtime; 146 size_t zo_vdev_size; 147 int zo_ashift; 148 int zo_mirrors; 149 int zo_raidz; 150 int zo_raidz_parity; 151 int zo_datasets; 152 int zo_threads; 153 uint64_t zo_passtime; 154 uint64_t zo_killrate; 155 int zo_verbose; 156 int zo_init; 157 uint64_t zo_time; 158 uint64_t zo_maxloops; 159 uint64_t zo_metaslab_gang_bang; 160} ztest_shared_opts_t; 161 162static const ztest_shared_opts_t ztest_opts_defaults = { 163 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 164 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 165 .zo_alt_ztest = { '\0' }, 166 .zo_alt_libpath = { '\0' }, 167 .zo_vdevs = 5, 168 .zo_ashift = SPA_MINBLOCKSHIFT, 169 .zo_mirrors = 2, 170 .zo_raidz = 4, 171 .zo_raidz_parity = 1, 172 .zo_vdev_size = SPA_MINDEVSIZE, 173 .zo_datasets = 7, 174 .zo_threads = 23, 175 .zo_passtime = 60, /* 60 seconds */ 176 .zo_killrate = 70, /* 70% kill rate */ 177 .zo_verbose = 0, 178 .zo_init = 1, 179 .zo_time = 300, /* 5 minutes */ 180 .zo_maxloops = 50, /* max loops during spa_freeze() */ 181 .zo_metaslab_gang_bang = 32 << 10 182}; 183 184extern uint64_t metaslab_gang_bang; 185extern uint64_t metaslab_df_alloc_threshold; 186 187static ztest_shared_opts_t *ztest_shared_opts; 188static ztest_shared_opts_t ztest_opts; 189 190typedef struct ztest_shared_ds { 191 uint64_t zd_seq; 192} ztest_shared_ds_t; 193 194static ztest_shared_ds_t *ztest_shared_ds; 195#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 196 197#define BT_MAGIC 0x123456789abcdefULL 198#define MAXFAULTS() \ 199 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 200 201enum ztest_io_type { 202 ZTEST_IO_WRITE_TAG, 203 ZTEST_IO_WRITE_PATTERN, 204 ZTEST_IO_WRITE_ZEROES, 205 ZTEST_IO_TRUNCATE, 206 ZTEST_IO_SETATTR, 207 ZTEST_IO_REWRITE, 208 ZTEST_IO_TYPES 209}; 210 211typedef struct ztest_block_tag { 212 uint64_t bt_magic; 213 uint64_t bt_objset; 214 uint64_t bt_object; 215 uint64_t bt_offset; 216 uint64_t bt_gen; 217 uint64_t bt_txg; 218 uint64_t bt_crtxg; 219} ztest_block_tag_t; 220 221typedef struct bufwad { 222 uint64_t bw_index; 223 uint64_t bw_txg; 224 uint64_t bw_data; 225} bufwad_t; 226 227/* 228 * XXX -- fix zfs range locks to be generic so we can use them here. 229 */ 230typedef enum { 231 RL_READER, 232 RL_WRITER, 233 RL_APPEND 234} rl_type_t; 235 236typedef struct rll { 237 void *rll_writer; 238 int rll_readers; 239 mutex_t rll_lock; 240 cond_t rll_cv; 241} rll_t; 242 243typedef struct rl { 244 uint64_t rl_object; 245 uint64_t rl_offset; 246 uint64_t rl_size; 247 rll_t *rl_lock; 248} rl_t; 249 250#define ZTEST_RANGE_LOCKS 64 251#define ZTEST_OBJECT_LOCKS 64 252 253/* 254 * Object descriptor. Used as a template for object lookup/create/remove. 255 */ 256typedef struct ztest_od { 257 uint64_t od_dir; 258 uint64_t od_object; 259 dmu_object_type_t od_type; 260 dmu_object_type_t od_crtype; 261 uint64_t od_blocksize; 262 uint64_t od_crblocksize; 263 uint64_t od_gen; 264 uint64_t od_crgen; 265 char od_name[MAXNAMELEN]; 266} ztest_od_t; 267 268/* 269 * Per-dataset state. 270 */ 271typedef struct ztest_ds { 272 ztest_shared_ds_t *zd_shared; 273 objset_t *zd_os; 274 rwlock_t zd_zilog_lock; 275 zilog_t *zd_zilog; 276 ztest_od_t *zd_od; /* debugging aid */ 277 char zd_name[MAXNAMELEN]; 278 mutex_t zd_dirobj_lock; 279 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 280 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 281} ztest_ds_t; 282 283/* 284 * Per-iteration state. 285 */ 286typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 287 288typedef struct ztest_info { 289 ztest_func_t *zi_func; /* test function */ 290 uint64_t zi_iters; /* iterations per execution */ 291 uint64_t *zi_interval; /* execute every <interval> seconds */ 292} ztest_info_t; 293 294typedef struct ztest_shared_callstate { 295 uint64_t zc_count; /* per-pass count */ 296 uint64_t zc_time; /* per-pass time */ 297 uint64_t zc_next; /* next time to call this function */ 298} ztest_shared_callstate_t; 299 300static ztest_shared_callstate_t *ztest_shared_callstate; 301#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 302 303/* 304 * Note: these aren't static because we want dladdr() to work. 305 */ 306ztest_func_t ztest_dmu_read_write; 307ztest_func_t ztest_dmu_write_parallel; 308ztest_func_t ztest_dmu_object_alloc_free; 309ztest_func_t ztest_dmu_commit_callbacks; 310ztest_func_t ztest_zap; 311ztest_func_t ztest_zap_parallel; 312ztest_func_t ztest_zil_commit; 313ztest_func_t ztest_zil_remount; 314ztest_func_t ztest_dmu_read_write_zcopy; 315ztest_func_t ztest_dmu_objset_create_destroy; 316ztest_func_t ztest_dmu_prealloc; 317ztest_func_t ztest_fzap; 318ztest_func_t ztest_dmu_snapshot_create_destroy; 319ztest_func_t ztest_dsl_prop_get_set; 320ztest_func_t ztest_spa_prop_get_set; 321ztest_func_t ztest_spa_create_destroy; 322ztest_func_t ztest_fault_inject; 323ztest_func_t ztest_ddt_repair; 324ztest_func_t ztest_dmu_snapshot_hold; 325ztest_func_t ztest_spa_rename; 326ztest_func_t ztest_scrub; 327ztest_func_t ztest_dsl_dataset_promote_busy; 328ztest_func_t ztest_vdev_attach_detach; 329ztest_func_t ztest_vdev_LUN_growth; 330ztest_func_t ztest_vdev_add_remove; 331ztest_func_t ztest_vdev_aux_add_remove; 332ztest_func_t ztest_split_pool; 333ztest_func_t ztest_reguid; 334ztest_func_t ztest_spa_upgrade; 335 336uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 337uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 338uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 339uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 340uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 341 342ztest_info_t ztest_info[] = { 343 { ztest_dmu_read_write, 1, &zopt_always }, 344 { ztest_dmu_write_parallel, 10, &zopt_always }, 345 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 346 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 347 { ztest_zap, 30, &zopt_always }, 348 { ztest_zap_parallel, 100, &zopt_always }, 349 { ztest_split_pool, 1, &zopt_always }, 350 { ztest_zil_commit, 1, &zopt_incessant }, 351 { ztest_zil_remount, 1, &zopt_sometimes }, 352 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 353 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 354 { ztest_dsl_prop_get_set, 1, &zopt_often }, 355 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 356#if 0 357 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 358#endif 359 { ztest_fzap, 1, &zopt_sometimes }, 360 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 361 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 362 { ztest_fault_inject, 1, &zopt_sometimes }, 363 { ztest_ddt_repair, 1, &zopt_sometimes }, 364 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 365 { ztest_reguid, 1, &zopt_sometimes }, 366 { ztest_spa_rename, 1, &zopt_rarely }, 367 { ztest_scrub, 1, &zopt_rarely }, 368 { ztest_spa_upgrade, 1, &zopt_rarely }, 369 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 370 { ztest_vdev_attach_detach, 1, &zopt_rarely }, 371 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 372 { ztest_vdev_add_remove, 1, 373 &ztest_opts.zo_vdevtime }, 374 { ztest_vdev_aux_add_remove, 1, 375 &ztest_opts.zo_vdevtime }, 376}; 377 378#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 379 380/* 381 * The following struct is used to hold a list of uncalled commit callbacks. 382 * The callbacks are ordered by txg number. 383 */ 384typedef struct ztest_cb_list { 385 mutex_t zcl_callbacks_lock; 386 list_t zcl_callbacks; 387} ztest_cb_list_t; 388 389/* 390 * Stuff we need to share writably between parent and child. 391 */ 392typedef struct ztest_shared { 393 boolean_t zs_do_init; 394 hrtime_t zs_proc_start; 395 hrtime_t zs_proc_stop; 396 hrtime_t zs_thread_start; 397 hrtime_t zs_thread_stop; 398 hrtime_t zs_thread_kill; 399 uint64_t zs_enospc_count; 400 uint64_t zs_vdev_next_leaf; 401 uint64_t zs_vdev_aux; 402 uint64_t zs_alloc; 403 uint64_t zs_space; 404 uint64_t zs_splits; 405 uint64_t zs_mirrors; 406 uint64_t zs_metaslab_sz; 407 uint64_t zs_metaslab_df_alloc_threshold; 408 uint64_t zs_guid; 409} ztest_shared_t; 410 411#define ID_PARALLEL -1ULL 412 413static char ztest_dev_template[] = "%s/%s.%llua"; 414static char ztest_aux_template[] = "%s/%s.%s.%llu"; 415ztest_shared_t *ztest_shared; 416 417static spa_t *ztest_spa = NULL; 418static ztest_ds_t *ztest_ds; 419 420static mutex_t ztest_vdev_lock; 421 422/* 423 * The ztest_name_lock protects the pool and dataset namespace used by 424 * the individual tests. To modify the namespace, consumers must grab 425 * this lock as writer. Grabbing the lock as reader will ensure that the 426 * namespace does not change while the lock is held. 427 */ 428static rwlock_t ztest_name_lock; 429 430static boolean_t ztest_dump_core = B_TRUE; 431static boolean_t ztest_exiting; 432 433/* Global commit callback list */ 434static ztest_cb_list_t zcl; 435 436enum ztest_object { 437 ZTEST_META_DNODE = 0, 438 ZTEST_DIROBJ, 439 ZTEST_OBJECTS 440}; 441 442static void usage(boolean_t) __NORETURN; 443 444/* 445 * These libumem hooks provide a reasonable set of defaults for the allocator's 446 * debugging facilities. 447 */ 448const char * 449_umem_debug_init() 450{ 451 return ("default,verbose"); /* $UMEM_DEBUG setting */ 452} 453 454const char * 455_umem_logging_init(void) 456{ 457 return ("fail,contents"); /* $UMEM_LOGGING setting */ 458} 459 460#define FATAL_MSG_SZ 1024 461 462char *fatal_msg; 463 464static void 465fatal(int do_perror, char *message, ...) 466{ 467 va_list args; 468 int save_errno = errno; 469 char buf[FATAL_MSG_SZ]; 470 471 (void) fflush(stdout); 472 473 va_start(args, message); 474 (void) sprintf(buf, "ztest: "); 475 /* LINTED */ 476 (void) vsprintf(buf + strlen(buf), message, args); 477 va_end(args); 478 if (do_perror) { 479 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 480 ": %s", strerror(save_errno)); 481 } 482 (void) fprintf(stderr, "%s\n", buf); 483 fatal_msg = buf; /* to ease debugging */ 484 if (ztest_dump_core) 485 abort(); 486 exit(3); 487} 488 489static int 490str2shift(const char *buf) 491{ 492 const char *ends = "BKMGTPEZ"; 493 int i; 494 495 if (buf[0] == '\0') 496 return (0); 497 for (i = 0; i < strlen(ends); i++) { 498 if (toupper(buf[0]) == ends[i]) 499 break; 500 } 501 if (i == strlen(ends)) { 502 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 503 buf); 504 usage(B_FALSE); 505 } 506 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 507 return (10*i); 508 } 509 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 510 usage(B_FALSE); 511 /* NOTREACHED */ 512} 513 514static uint64_t 515nicenumtoull(const char *buf) 516{ 517 char *end; 518 uint64_t val; 519 520 val = strtoull(buf, &end, 0); 521 if (end == buf) { 522 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 523 usage(B_FALSE); 524 } else if (end[0] == '.') { 525 double fval = strtod(buf, &end); 526 fval *= pow(2, str2shift(end)); 527 if (fval > UINT64_MAX) { 528 (void) fprintf(stderr, "ztest: value too large: %s\n", 529 buf); 530 usage(B_FALSE); 531 } 532 val = (uint64_t)fval; 533 } else { 534 int shift = str2shift(end); 535 if (shift >= 64 || (val << shift) >> shift != val) { 536 (void) fprintf(stderr, "ztest: value too large: %s\n", 537 buf); 538 usage(B_FALSE); 539 } 540 val <<= shift; 541 } 542 return (val); 543} 544 545static void 546usage(boolean_t requested) 547{ 548 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 549 550 char nice_vdev_size[10]; 551 char nice_gang_bang[10]; 552 FILE *fp = requested ? stdout : stderr; 553 554 nicenum(zo->zo_vdev_size, nice_vdev_size); 555 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 556 557 (void) fprintf(fp, "Usage: %s\n" 558 "\t[-v vdevs (default: %llu)]\n" 559 "\t[-s size_of_each_vdev (default: %s)]\n" 560 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 561 "\t[-m mirror_copies (default: %d)]\n" 562 "\t[-r raidz_disks (default: %d)]\n" 563 "\t[-R raidz_parity (default: %d)]\n" 564 "\t[-d datasets (default: %d)]\n" 565 "\t[-t threads (default: %d)]\n" 566 "\t[-g gang_block_threshold (default: %s)]\n" 567 "\t[-i init_count (default: %d)] initialize pool i times\n" 568 "\t[-k kill_percentage (default: %llu%%)]\n" 569 "\t[-p pool_name (default: %s)]\n" 570 "\t[-f dir (default: %s)] file directory for vdev files\n" 571 "\t[-V] verbose (use multiple times for ever more blather)\n" 572 "\t[-E] use existing pool instead of creating new one\n" 573 "\t[-T time (default: %llu sec)] total run time\n" 574 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 575 "\t[-P passtime (default: %llu sec)] time per pass\n" 576 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 577 "\t[-h] (print help)\n" 578 "", 579 zo->zo_pool, 580 (u_longlong_t)zo->zo_vdevs, /* -v */ 581 nice_vdev_size, /* -s */ 582 zo->zo_ashift, /* -a */ 583 zo->zo_mirrors, /* -m */ 584 zo->zo_raidz, /* -r */ 585 zo->zo_raidz_parity, /* -R */ 586 zo->zo_datasets, /* -d */ 587 zo->zo_threads, /* -t */ 588 nice_gang_bang, /* -g */ 589 zo->zo_init, /* -i */ 590 (u_longlong_t)zo->zo_killrate, /* -k */ 591 zo->zo_pool, /* -p */ 592 zo->zo_dir, /* -f */ 593 (u_longlong_t)zo->zo_time, /* -T */ 594 (u_longlong_t)zo->zo_maxloops, /* -F */ 595 (u_longlong_t)zo->zo_passtime); 596 exit(requested ? 0 : 1); 597} 598 599static void 600process_options(int argc, char **argv) 601{ 602 char *path; 603 ztest_shared_opts_t *zo = &ztest_opts; 604 605 int opt; 606 uint64_t value; 607 char altdir[MAXNAMELEN] = { 0 }; 608 609 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 610 611 while ((opt = getopt(argc, argv, 612 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 613 value = 0; 614 switch (opt) { 615 case 'v': 616 case 's': 617 case 'a': 618 case 'm': 619 case 'r': 620 case 'R': 621 case 'd': 622 case 't': 623 case 'g': 624 case 'i': 625 case 'k': 626 case 'T': 627 case 'P': 628 case 'F': 629 value = nicenumtoull(optarg); 630 } 631 switch (opt) { 632 case 'v': 633 zo->zo_vdevs = value; 634 break; 635 case 's': 636 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 637 break; 638 case 'a': 639 zo->zo_ashift = value; 640 break; 641 case 'm': 642 zo->zo_mirrors = value; 643 break; 644 case 'r': 645 zo->zo_raidz = MAX(1, value); 646 break; 647 case 'R': 648 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 649 break; 650 case 'd': 651 zo->zo_datasets = MAX(1, value); 652 break; 653 case 't': 654 zo->zo_threads = MAX(1, value); 655 break; 656 case 'g': 657 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 658 value); 659 break; 660 case 'i': 661 zo->zo_init = value; 662 break; 663 case 'k': 664 zo->zo_killrate = value; 665 break; 666 case 'p': 667 (void) strlcpy(zo->zo_pool, optarg, 668 sizeof (zo->zo_pool)); 669 break; 670 case 'f': 671 path = realpath(optarg, NULL); 672 if (path == NULL) { 673 (void) fprintf(stderr, "error: %s: %s\n", 674 optarg, strerror(errno)); 675 usage(B_FALSE); 676 } else { 677 (void) strlcpy(zo->zo_dir, path, 678 sizeof (zo->zo_dir)); 679 } 680 break; 681 case 'V': 682 zo->zo_verbose++; 683 break; 684 case 'E': 685 zo->zo_init = 0; 686 break; 687 case 'T': 688 zo->zo_time = value; 689 break; 690 case 'P': 691 zo->zo_passtime = MAX(1, value); 692 break; 693 case 'F': 694 zo->zo_maxloops = MAX(1, value); 695 break; 696 case 'B': 697 (void) strlcpy(altdir, optarg, sizeof (altdir)); 698 break; 699 case 'h': 700 usage(B_TRUE); 701 break; 702 case '?': 703 default: 704 usage(B_FALSE); 705 break; 706 } 707 } 708 709 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 710 711 zo->zo_vdevtime = 712 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 713 UINT64_MAX >> 2); 714 715 if (strlen(altdir) > 0) { 716 char cmd[MAXNAMELEN]; 717 char realaltdir[MAXNAMELEN]; 718 char *bin; 719 char *ztest; 720 char *isa; 721 int isalen; 722 723 (void) realpath(getexecname(), cmd); 724 if (0 != access(altdir, F_OK)) { 725 ztest_dump_core = B_FALSE; 726 fatal(B_TRUE, "invalid alternate ztest path: %s", 727 altdir); 728 } 729 VERIFY(NULL != realpath(altdir, realaltdir)); 730 731 /* 732 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 733 * We want to extract <isa> to determine if we should use 734 * 32 or 64 bit binaries. 735 */ 736 bin = strstr(cmd, "/usr/bin/"); 737 ztest = strstr(bin, "/ztest"); 738 isa = bin + 9; 739 isalen = ztest - isa; 740 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 741 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 742 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 743 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 744 745 if (0 != access(zo->zo_alt_ztest, X_OK)) { 746 ztest_dump_core = B_FALSE; 747 fatal(B_TRUE, "invalid alternate ztest: %s", 748 zo->zo_alt_ztest); 749 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 750 ztest_dump_core = B_FALSE; 751 fatal(B_TRUE, "invalid alternate lib directory %s", 752 zo->zo_alt_libpath); 753 } 754 } 755} 756 757static void 758ztest_kill(ztest_shared_t *zs) 759{ 760 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 761 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 762 (void) kill(getpid(), SIGKILL); 763} 764 765static uint64_t 766ztest_random(uint64_t range) 767{ 768 uint64_t r; 769 770 if (range == 0) 771 return (0); 772 773 if (read(ZTEST_FD_RAND, &r, sizeof (r)) != sizeof (r)) 774 fatal(1, "short read from /dev/urandom"); 775 776 return (r % range); 777} 778 779/* ARGSUSED */ 780static void 781ztest_record_enospc(const char *s) 782{ 783 ztest_shared->zs_enospc_count++; 784} 785 786static uint64_t 787ztest_get_ashift(void) 788{ 789 if (ztest_opts.zo_ashift == 0) 790 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 791 return (ztest_opts.zo_ashift); 792} 793 794static nvlist_t * 795make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 796{ 797 char pathbuf[MAXPATHLEN]; 798 uint64_t vdev; 799 nvlist_t *file; 800 801 if (ashift == 0) 802 ashift = ztest_get_ashift(); 803 804 if (path == NULL) { 805 path = pathbuf; 806 807 if (aux != NULL) { 808 vdev = ztest_shared->zs_vdev_aux; 809 (void) snprintf(path, sizeof (pathbuf), 810 ztest_aux_template, ztest_opts.zo_dir, 811 pool == NULL ? ztest_opts.zo_pool : pool, 812 aux, vdev); 813 } else { 814 vdev = ztest_shared->zs_vdev_next_leaf++; 815 (void) snprintf(path, sizeof (pathbuf), 816 ztest_dev_template, ztest_opts.zo_dir, 817 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 818 } 819 } 820 821 if (size != 0) { 822 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 823 if (fd == -1) 824 fatal(1, "can't open %s", path); 825 if (ftruncate(fd, size) != 0) 826 fatal(1, "can't ftruncate %s", path); 827 (void) close(fd); 828 } 829 830 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 831 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 832 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 833 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 834 835 return (file); 836} 837 838static nvlist_t * 839make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 840 uint64_t ashift, int r) 841{ 842 nvlist_t *raidz, **child; 843 int c; 844 845 if (r < 2) 846 return (make_vdev_file(path, aux, pool, size, ashift)); 847 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 848 849 for (c = 0; c < r; c++) 850 child[c] = make_vdev_file(path, aux, pool, size, ashift); 851 852 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 853 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 854 VDEV_TYPE_RAIDZ) == 0); 855 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 856 ztest_opts.zo_raidz_parity) == 0); 857 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 858 child, r) == 0); 859 860 for (c = 0; c < r; c++) 861 nvlist_free(child[c]); 862 863 umem_free(child, r * sizeof (nvlist_t *)); 864 865 return (raidz); 866} 867 868static nvlist_t * 869make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 870 uint64_t ashift, int r, int m) 871{ 872 nvlist_t *mirror, **child; 873 int c; 874 875 if (m < 1) 876 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 877 878 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 879 880 for (c = 0; c < m; c++) 881 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 882 883 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 884 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 885 VDEV_TYPE_MIRROR) == 0); 886 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 887 child, m) == 0); 888 889 for (c = 0; c < m; c++) 890 nvlist_free(child[c]); 891 892 umem_free(child, m * sizeof (nvlist_t *)); 893 894 return (mirror); 895} 896 897static nvlist_t * 898make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 899 int log, int r, int m, int t) 900{ 901 nvlist_t *root, **child; 902 int c; 903 904 ASSERT(t > 0); 905 906 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 907 908 for (c = 0; c < t; c++) { 909 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 910 r, m); 911 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 912 log) == 0); 913 } 914 915 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 916 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 917 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 918 child, t) == 0); 919 920 for (c = 0; c < t; c++) 921 nvlist_free(child[c]); 922 923 umem_free(child, t * sizeof (nvlist_t *)); 924 925 return (root); 926} 927 928/* 929 * Find a random spa version. Returns back a random spa version in the 930 * range [initial_version, SPA_VERSION_FEATURES]. 931 */ 932static uint64_t 933ztest_random_spa_version(uint64_t initial_version) 934{ 935 uint64_t version = initial_version; 936 937 if (version <= SPA_VERSION_BEFORE_FEATURES) { 938 version = version + 939 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 940 } 941 942 if (version > SPA_VERSION_BEFORE_FEATURES) 943 version = SPA_VERSION_FEATURES; 944 945 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 946 return (version); 947} 948 949static int 950ztest_random_blocksize(void) 951{ 952 return (1 << (SPA_MINBLOCKSHIFT + 953 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 954} 955 956static int 957ztest_random_ibshift(void) 958{ 959 return (DN_MIN_INDBLKSHIFT + 960 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 961} 962 963static uint64_t 964ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 965{ 966 uint64_t top; 967 vdev_t *rvd = spa->spa_root_vdev; 968 vdev_t *tvd; 969 970 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 971 972 do { 973 top = ztest_random(rvd->vdev_children); 974 tvd = rvd->vdev_child[top]; 975 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 976 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 977 978 return (top); 979} 980 981static uint64_t 982ztest_random_dsl_prop(zfs_prop_t prop) 983{ 984 uint64_t value; 985 986 do { 987 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 988 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 989 990 return (value); 991} 992 993static int 994ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 995 boolean_t inherit) 996{ 997 const char *propname = zfs_prop_to_name(prop); 998 const char *valname; 999 char setpoint[MAXPATHLEN]; 1000 uint64_t curval; 1001 int error; 1002 1003 error = dsl_prop_set(osname, propname, 1004 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), 1005 sizeof (value), 1, &value); 1006 1007 if (error == ENOSPC) { 1008 ztest_record_enospc(FTAG); 1009 return (error); 1010 } 1011 ASSERT0(error); 1012 1013 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval), 1014 1, &curval, setpoint), ==, 0); 1015 1016 if (ztest_opts.zo_verbose >= 6) { 1017 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1018 (void) printf("%s %s = %s at '%s'\n", 1019 osname, propname, valname, setpoint); 1020 } 1021 1022 return (error); 1023} 1024 1025static int 1026ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1027{ 1028 spa_t *spa = ztest_spa; 1029 nvlist_t *props = NULL; 1030 int error; 1031 1032 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1033 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1034 1035 error = spa_prop_set(spa, props); 1036 1037 nvlist_free(props); 1038 1039 if (error == ENOSPC) { 1040 ztest_record_enospc(FTAG); 1041 return (error); 1042 } 1043 ASSERT0(error); 1044 1045 return (error); 1046} 1047 1048static void 1049ztest_rll_init(rll_t *rll) 1050{ 1051 rll->rll_writer = NULL; 1052 rll->rll_readers = 0; 1053 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1054 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1055} 1056 1057static void 1058ztest_rll_destroy(rll_t *rll) 1059{ 1060 ASSERT(rll->rll_writer == NULL); 1061 ASSERT(rll->rll_readers == 0); 1062 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1063 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1064} 1065 1066static void 1067ztest_rll_lock(rll_t *rll, rl_type_t type) 1068{ 1069 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1070 1071 if (type == RL_READER) { 1072 while (rll->rll_writer != NULL) 1073 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1074 rll->rll_readers++; 1075 } else { 1076 while (rll->rll_writer != NULL || rll->rll_readers) 1077 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1078 rll->rll_writer = curthread; 1079 } 1080 1081 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1082} 1083 1084static void 1085ztest_rll_unlock(rll_t *rll) 1086{ 1087 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1088 1089 if (rll->rll_writer) { 1090 ASSERT(rll->rll_readers == 0); 1091 rll->rll_writer = NULL; 1092 } else { 1093 ASSERT(rll->rll_readers != 0); 1094 ASSERT(rll->rll_writer == NULL); 1095 rll->rll_readers--; 1096 } 1097 1098 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1099 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1100 1101 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1102} 1103 1104static void 1105ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1106{ 1107 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1108 1109 ztest_rll_lock(rll, type); 1110} 1111 1112static void 1113ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1114{ 1115 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1116 1117 ztest_rll_unlock(rll); 1118} 1119 1120static rl_t * 1121ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1122 uint64_t size, rl_type_t type) 1123{ 1124 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1125 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1126 rl_t *rl; 1127 1128 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1129 rl->rl_object = object; 1130 rl->rl_offset = offset; 1131 rl->rl_size = size; 1132 rl->rl_lock = rll; 1133 1134 ztest_rll_lock(rll, type); 1135 1136 return (rl); 1137} 1138 1139static void 1140ztest_range_unlock(rl_t *rl) 1141{ 1142 rll_t *rll = rl->rl_lock; 1143 1144 ztest_rll_unlock(rll); 1145 1146 umem_free(rl, sizeof (*rl)); 1147} 1148 1149static void 1150ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1151{ 1152 zd->zd_os = os; 1153 zd->zd_zilog = dmu_objset_zil(os); 1154 zd->zd_shared = szd; 1155 dmu_objset_name(os, zd->zd_name); 1156 1157 if (zd->zd_shared != NULL) 1158 zd->zd_shared->zd_seq = 0; 1159 1160 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1161 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1162 1163 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1164 ztest_rll_init(&zd->zd_object_lock[l]); 1165 1166 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1167 ztest_rll_init(&zd->zd_range_lock[l]); 1168} 1169 1170static void 1171ztest_zd_fini(ztest_ds_t *zd) 1172{ 1173 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1174 1175 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1176 ztest_rll_destroy(&zd->zd_object_lock[l]); 1177 1178 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1179 ztest_rll_destroy(&zd->zd_range_lock[l]); 1180} 1181 1182#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1183 1184static uint64_t 1185ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1186{ 1187 uint64_t txg; 1188 int error; 1189 1190 /* 1191 * Attempt to assign tx to some transaction group. 1192 */ 1193 error = dmu_tx_assign(tx, txg_how); 1194 if (error) { 1195 if (error == ERESTART) { 1196 ASSERT(txg_how == TXG_NOWAIT); 1197 dmu_tx_wait(tx); 1198 } else { 1199 ASSERT3U(error, ==, ENOSPC); 1200 ztest_record_enospc(tag); 1201 } 1202 dmu_tx_abort(tx); 1203 return (0); 1204 } 1205 txg = dmu_tx_get_txg(tx); 1206 ASSERT(txg != 0); 1207 return (txg); 1208} 1209 1210static void 1211ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1212{ 1213 uint64_t *ip = buf; 1214 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1215 1216 while (ip < ip_end) 1217 *ip++ = value; 1218} 1219 1220static boolean_t 1221ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1222{ 1223 uint64_t *ip = buf; 1224 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1225 uint64_t diff = 0; 1226 1227 while (ip < ip_end) 1228 diff |= (value - *ip++); 1229 1230 return (diff == 0); 1231} 1232 1233static void 1234ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1235 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1236{ 1237 bt->bt_magic = BT_MAGIC; 1238 bt->bt_objset = dmu_objset_id(os); 1239 bt->bt_object = object; 1240 bt->bt_offset = offset; 1241 bt->bt_gen = gen; 1242 bt->bt_txg = txg; 1243 bt->bt_crtxg = crtxg; 1244} 1245 1246static void 1247ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1248 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1249{ 1250 ASSERT(bt->bt_magic == BT_MAGIC); 1251 ASSERT(bt->bt_objset == dmu_objset_id(os)); 1252 ASSERT(bt->bt_object == object); 1253 ASSERT(bt->bt_offset == offset); 1254 ASSERT(bt->bt_gen <= gen); 1255 ASSERT(bt->bt_txg <= txg); 1256 ASSERT(bt->bt_crtxg == crtxg); 1257} 1258 1259static ztest_block_tag_t * 1260ztest_bt_bonus(dmu_buf_t *db) 1261{ 1262 dmu_object_info_t doi; 1263 ztest_block_tag_t *bt; 1264 1265 dmu_object_info_from_db(db, &doi); 1266 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1267 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1268 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1269 1270 return (bt); 1271} 1272 1273/* 1274 * ZIL logging ops 1275 */ 1276 1277#define lrz_type lr_mode 1278#define lrz_blocksize lr_uid 1279#define lrz_ibshift lr_gid 1280#define lrz_bonustype lr_rdev 1281#define lrz_bonuslen lr_crtime[1] 1282 1283static void 1284ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1285{ 1286 char *name = (void *)(lr + 1); /* name follows lr */ 1287 size_t namesize = strlen(name) + 1; 1288 itx_t *itx; 1289 1290 if (zil_replaying(zd->zd_zilog, tx)) 1291 return; 1292 1293 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1294 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1295 sizeof (*lr) + namesize - sizeof (lr_t)); 1296 1297 zil_itx_assign(zd->zd_zilog, itx, tx); 1298} 1299 1300static void 1301ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1302{ 1303 char *name = (void *)(lr + 1); /* name follows lr */ 1304 size_t namesize = strlen(name) + 1; 1305 itx_t *itx; 1306 1307 if (zil_replaying(zd->zd_zilog, tx)) 1308 return; 1309 1310 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1311 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1312 sizeof (*lr) + namesize - sizeof (lr_t)); 1313 1314 itx->itx_oid = object; 1315 zil_itx_assign(zd->zd_zilog, itx, tx); 1316} 1317 1318static void 1319ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1320{ 1321 itx_t *itx; 1322 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1323 1324 if (zil_replaying(zd->zd_zilog, tx)) 1325 return; 1326 1327 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1328 write_state = WR_INDIRECT; 1329 1330 itx = zil_itx_create(TX_WRITE, 1331 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1332 1333 if (write_state == WR_COPIED && 1334 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1335 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1336 zil_itx_destroy(itx); 1337 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1338 write_state = WR_NEED_COPY; 1339 } 1340 itx->itx_private = zd; 1341 itx->itx_wr_state = write_state; 1342 itx->itx_sync = (ztest_random(8) == 0); 1343 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1344 1345 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1346 sizeof (*lr) - sizeof (lr_t)); 1347 1348 zil_itx_assign(zd->zd_zilog, itx, tx); 1349} 1350 1351static void 1352ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1353{ 1354 itx_t *itx; 1355 1356 if (zil_replaying(zd->zd_zilog, tx)) 1357 return; 1358 1359 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1360 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1361 sizeof (*lr) - sizeof (lr_t)); 1362 1363 itx->itx_sync = B_FALSE; 1364 zil_itx_assign(zd->zd_zilog, itx, tx); 1365} 1366 1367static void 1368ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1369{ 1370 itx_t *itx; 1371 1372 if (zil_replaying(zd->zd_zilog, tx)) 1373 return; 1374 1375 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1376 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1377 sizeof (*lr) - sizeof (lr_t)); 1378 1379 itx->itx_sync = B_FALSE; 1380 zil_itx_assign(zd->zd_zilog, itx, tx); 1381} 1382 1383/* 1384 * ZIL replay ops 1385 */ 1386static int 1387ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1388{ 1389 char *name = (void *)(lr + 1); /* name follows lr */ 1390 objset_t *os = zd->zd_os; 1391 ztest_block_tag_t *bbt; 1392 dmu_buf_t *db; 1393 dmu_tx_t *tx; 1394 uint64_t txg; 1395 int error = 0; 1396 1397 if (byteswap) 1398 byteswap_uint64_array(lr, sizeof (*lr)); 1399 1400 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1401 ASSERT(name[0] != '\0'); 1402 1403 tx = dmu_tx_create(os); 1404 1405 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1406 1407 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1408 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1409 } else { 1410 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1411 } 1412 1413 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1414 if (txg == 0) 1415 return (ENOSPC); 1416 1417 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1418 1419 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1420 if (lr->lr_foid == 0) { 1421 lr->lr_foid = zap_create(os, 1422 lr->lrz_type, lr->lrz_bonustype, 1423 lr->lrz_bonuslen, tx); 1424 } else { 1425 error = zap_create_claim(os, lr->lr_foid, 1426 lr->lrz_type, lr->lrz_bonustype, 1427 lr->lrz_bonuslen, tx); 1428 } 1429 } else { 1430 if (lr->lr_foid == 0) { 1431 lr->lr_foid = dmu_object_alloc(os, 1432 lr->lrz_type, 0, lr->lrz_bonustype, 1433 lr->lrz_bonuslen, tx); 1434 } else { 1435 error = dmu_object_claim(os, lr->lr_foid, 1436 lr->lrz_type, 0, lr->lrz_bonustype, 1437 lr->lrz_bonuslen, tx); 1438 } 1439 } 1440 1441 if (error) { 1442 ASSERT3U(error, ==, EEXIST); 1443 ASSERT(zd->zd_zilog->zl_replay); 1444 dmu_tx_commit(tx); 1445 return (error); 1446 } 1447 1448 ASSERT(lr->lr_foid != 0); 1449 1450 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1451 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1452 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1453 1454 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1455 bbt = ztest_bt_bonus(db); 1456 dmu_buf_will_dirty(db, tx); 1457 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1458 dmu_buf_rele(db, FTAG); 1459 1460 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1461 &lr->lr_foid, tx)); 1462 1463 (void) ztest_log_create(zd, tx, lr); 1464 1465 dmu_tx_commit(tx); 1466 1467 return (0); 1468} 1469 1470static int 1471ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1472{ 1473 char *name = (void *)(lr + 1); /* name follows lr */ 1474 objset_t *os = zd->zd_os; 1475 dmu_object_info_t doi; 1476 dmu_tx_t *tx; 1477 uint64_t object, txg; 1478 1479 if (byteswap) 1480 byteswap_uint64_array(lr, sizeof (*lr)); 1481 1482 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1483 ASSERT(name[0] != '\0'); 1484 1485 VERIFY3U(0, ==, 1486 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1487 ASSERT(object != 0); 1488 1489 ztest_object_lock(zd, object, RL_WRITER); 1490 1491 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1492 1493 tx = dmu_tx_create(os); 1494 1495 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1496 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1497 1498 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1499 if (txg == 0) { 1500 ztest_object_unlock(zd, object); 1501 return (ENOSPC); 1502 } 1503 1504 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1505 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1506 } else { 1507 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1508 } 1509 1510 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1511 1512 (void) ztest_log_remove(zd, tx, lr, object); 1513 1514 dmu_tx_commit(tx); 1515 1516 ztest_object_unlock(zd, object); 1517 1518 return (0); 1519} 1520 1521static int 1522ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1523{ 1524 objset_t *os = zd->zd_os; 1525 void *data = lr + 1; /* data follows lr */ 1526 uint64_t offset, length; 1527 ztest_block_tag_t *bt = data; 1528 ztest_block_tag_t *bbt; 1529 uint64_t gen, txg, lrtxg, crtxg; 1530 dmu_object_info_t doi; 1531 dmu_tx_t *tx; 1532 dmu_buf_t *db; 1533 arc_buf_t *abuf = NULL; 1534 rl_t *rl; 1535 1536 if (byteswap) 1537 byteswap_uint64_array(lr, sizeof (*lr)); 1538 1539 offset = lr->lr_offset; 1540 length = lr->lr_length; 1541 1542 /* If it's a dmu_sync() block, write the whole block */ 1543 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1544 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1545 if (length < blocksize) { 1546 offset -= offset % blocksize; 1547 length = blocksize; 1548 } 1549 } 1550 1551 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1552 byteswap_uint64_array(bt, sizeof (*bt)); 1553 1554 if (bt->bt_magic != BT_MAGIC) 1555 bt = NULL; 1556 1557 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1558 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1559 1560 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1561 1562 dmu_object_info_from_db(db, &doi); 1563 1564 bbt = ztest_bt_bonus(db); 1565 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1566 gen = bbt->bt_gen; 1567 crtxg = bbt->bt_crtxg; 1568 lrtxg = lr->lr_common.lrc_txg; 1569 1570 tx = dmu_tx_create(os); 1571 1572 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1573 1574 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1575 P2PHASE(offset, length) == 0) 1576 abuf = dmu_request_arcbuf(db, length); 1577 1578 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1579 if (txg == 0) { 1580 if (abuf != NULL) 1581 dmu_return_arcbuf(abuf); 1582 dmu_buf_rele(db, FTAG); 1583 ztest_range_unlock(rl); 1584 ztest_object_unlock(zd, lr->lr_foid); 1585 return (ENOSPC); 1586 } 1587 1588 if (bt != NULL) { 1589 /* 1590 * Usually, verify the old data before writing new data -- 1591 * but not always, because we also want to verify correct 1592 * behavior when the data was not recently read into cache. 1593 */ 1594 ASSERT(offset % doi.doi_data_block_size == 0); 1595 if (ztest_random(4) != 0) { 1596 int prefetch = ztest_random(2) ? 1597 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1598 ztest_block_tag_t rbt; 1599 1600 VERIFY(dmu_read(os, lr->lr_foid, offset, 1601 sizeof (rbt), &rbt, prefetch) == 0); 1602 if (rbt.bt_magic == BT_MAGIC) { 1603 ztest_bt_verify(&rbt, os, lr->lr_foid, 1604 offset, gen, txg, crtxg); 1605 } 1606 } 1607 1608 /* 1609 * Writes can appear to be newer than the bonus buffer because 1610 * the ztest_get_data() callback does a dmu_read() of the 1611 * open-context data, which may be different than the data 1612 * as it was when the write was generated. 1613 */ 1614 if (zd->zd_zilog->zl_replay) { 1615 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1616 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1617 bt->bt_crtxg); 1618 } 1619 1620 /* 1621 * Set the bt's gen/txg to the bonus buffer's gen/txg 1622 * so that all of the usual ASSERTs will work. 1623 */ 1624 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1625 } 1626 1627 if (abuf == NULL) { 1628 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1629 } else { 1630 bcopy(data, abuf->b_data, length); 1631 dmu_assign_arcbuf(db, offset, abuf, tx); 1632 } 1633 1634 (void) ztest_log_write(zd, tx, lr); 1635 1636 dmu_buf_rele(db, FTAG); 1637 1638 dmu_tx_commit(tx); 1639 1640 ztest_range_unlock(rl); 1641 ztest_object_unlock(zd, lr->lr_foid); 1642 1643 return (0); 1644} 1645 1646static int 1647ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1648{ 1649 objset_t *os = zd->zd_os; 1650 dmu_tx_t *tx; 1651 uint64_t txg; 1652 rl_t *rl; 1653 1654 if (byteswap) 1655 byteswap_uint64_array(lr, sizeof (*lr)); 1656 1657 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1658 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1659 RL_WRITER); 1660 1661 tx = dmu_tx_create(os); 1662 1663 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1664 1665 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1666 if (txg == 0) { 1667 ztest_range_unlock(rl); 1668 ztest_object_unlock(zd, lr->lr_foid); 1669 return (ENOSPC); 1670 } 1671 1672 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1673 lr->lr_length, tx) == 0); 1674 1675 (void) ztest_log_truncate(zd, tx, lr); 1676 1677 dmu_tx_commit(tx); 1678 1679 ztest_range_unlock(rl); 1680 ztest_object_unlock(zd, lr->lr_foid); 1681 1682 return (0); 1683} 1684 1685static int 1686ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1687{ 1688 objset_t *os = zd->zd_os; 1689 dmu_tx_t *tx; 1690 dmu_buf_t *db; 1691 ztest_block_tag_t *bbt; 1692 uint64_t txg, lrtxg, crtxg; 1693 1694 if (byteswap) 1695 byteswap_uint64_array(lr, sizeof (*lr)); 1696 1697 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1698 1699 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1700 1701 tx = dmu_tx_create(os); 1702 dmu_tx_hold_bonus(tx, lr->lr_foid); 1703 1704 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1705 if (txg == 0) { 1706 dmu_buf_rele(db, FTAG); 1707 ztest_object_unlock(zd, lr->lr_foid); 1708 return (ENOSPC); 1709 } 1710 1711 bbt = ztest_bt_bonus(db); 1712 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1713 crtxg = bbt->bt_crtxg; 1714 lrtxg = lr->lr_common.lrc_txg; 1715 1716 if (zd->zd_zilog->zl_replay) { 1717 ASSERT(lr->lr_size != 0); 1718 ASSERT(lr->lr_mode != 0); 1719 ASSERT(lrtxg != 0); 1720 } else { 1721 /* 1722 * Randomly change the size and increment the generation. 1723 */ 1724 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1725 sizeof (*bbt); 1726 lr->lr_mode = bbt->bt_gen + 1; 1727 ASSERT(lrtxg == 0); 1728 } 1729 1730 /* 1731 * Verify that the current bonus buffer is not newer than our txg. 1732 */ 1733 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1734 MAX(txg, lrtxg), crtxg); 1735 1736 dmu_buf_will_dirty(db, tx); 1737 1738 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1739 ASSERT3U(lr->lr_size, <=, db->db_size); 1740 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1741 bbt = ztest_bt_bonus(db); 1742 1743 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1744 1745 dmu_buf_rele(db, FTAG); 1746 1747 (void) ztest_log_setattr(zd, tx, lr); 1748 1749 dmu_tx_commit(tx); 1750 1751 ztest_object_unlock(zd, lr->lr_foid); 1752 1753 return (0); 1754} 1755 1756zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1757 NULL, /* 0 no such transaction type */ 1758 ztest_replay_create, /* TX_CREATE */ 1759 NULL, /* TX_MKDIR */ 1760 NULL, /* TX_MKXATTR */ 1761 NULL, /* TX_SYMLINK */ 1762 ztest_replay_remove, /* TX_REMOVE */ 1763 NULL, /* TX_RMDIR */ 1764 NULL, /* TX_LINK */ 1765 NULL, /* TX_RENAME */ 1766 ztest_replay_write, /* TX_WRITE */ 1767 ztest_replay_truncate, /* TX_TRUNCATE */ 1768 ztest_replay_setattr, /* TX_SETATTR */ 1769 NULL, /* TX_ACL */ 1770 NULL, /* TX_CREATE_ACL */ 1771 NULL, /* TX_CREATE_ATTR */ 1772 NULL, /* TX_CREATE_ACL_ATTR */ 1773 NULL, /* TX_MKDIR_ACL */ 1774 NULL, /* TX_MKDIR_ATTR */ 1775 NULL, /* TX_MKDIR_ACL_ATTR */ 1776 NULL, /* TX_WRITE2 */ 1777}; 1778 1779/* 1780 * ZIL get_data callbacks 1781 */ 1782 1783static void 1784ztest_get_done(zgd_t *zgd, int error) 1785{ 1786 ztest_ds_t *zd = zgd->zgd_private; 1787 uint64_t object = zgd->zgd_rl->rl_object; 1788 1789 if (zgd->zgd_db) 1790 dmu_buf_rele(zgd->zgd_db, zgd); 1791 1792 ztest_range_unlock(zgd->zgd_rl); 1793 ztest_object_unlock(zd, object); 1794 1795 if (error == 0 && zgd->zgd_bp) 1796 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1797 1798 umem_free(zgd, sizeof (*zgd)); 1799} 1800 1801static int 1802ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1803{ 1804 ztest_ds_t *zd = arg; 1805 objset_t *os = zd->zd_os; 1806 uint64_t object = lr->lr_foid; 1807 uint64_t offset = lr->lr_offset; 1808 uint64_t size = lr->lr_length; 1809 blkptr_t *bp = &lr->lr_blkptr; 1810 uint64_t txg = lr->lr_common.lrc_txg; 1811 uint64_t crtxg; 1812 dmu_object_info_t doi; 1813 dmu_buf_t *db; 1814 zgd_t *zgd; 1815 int error; 1816 1817 ztest_object_lock(zd, object, RL_READER); 1818 error = dmu_bonus_hold(os, object, FTAG, &db); 1819 if (error) { 1820 ztest_object_unlock(zd, object); 1821 return (error); 1822 } 1823 1824 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1825 1826 if (crtxg == 0 || crtxg > txg) { 1827 dmu_buf_rele(db, FTAG); 1828 ztest_object_unlock(zd, object); 1829 return (ENOENT); 1830 } 1831 1832 dmu_object_info_from_db(db, &doi); 1833 dmu_buf_rele(db, FTAG); 1834 db = NULL; 1835 1836 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1837 zgd->zgd_zilog = zd->zd_zilog; 1838 zgd->zgd_private = zd; 1839 1840 if (buf != NULL) { /* immediate write */ 1841 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1842 RL_READER); 1843 1844 error = dmu_read(os, object, offset, size, buf, 1845 DMU_READ_NO_PREFETCH); 1846 ASSERT(error == 0); 1847 } else { 1848 size = doi.doi_data_block_size; 1849 if (ISP2(size)) { 1850 offset = P2ALIGN(offset, size); 1851 } else { 1852 ASSERT(offset < size); 1853 offset = 0; 1854 } 1855 1856 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1857 RL_READER); 1858 1859 error = dmu_buf_hold(os, object, offset, zgd, &db, 1860 DMU_READ_NO_PREFETCH); 1861 1862 if (error == 0) { 1863 blkptr_t *obp = dmu_buf_get_blkptr(db); 1864 if (obp) { 1865 ASSERT(BP_IS_HOLE(bp)); 1866 *bp = *obp; 1867 } 1868 1869 zgd->zgd_db = db; 1870 zgd->zgd_bp = bp; 1871 1872 ASSERT(db->db_offset == offset); 1873 ASSERT(db->db_size == size); 1874 1875 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1876 ztest_get_done, zgd); 1877 1878 if (error == 0) 1879 return (0); 1880 } 1881 } 1882 1883 ztest_get_done(zgd, error); 1884 1885 return (error); 1886} 1887 1888static void * 1889ztest_lr_alloc(size_t lrsize, char *name) 1890{ 1891 char *lr; 1892 size_t namesize = name ? strlen(name) + 1 : 0; 1893 1894 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1895 1896 if (name) 1897 bcopy(name, lr + lrsize, namesize); 1898 1899 return (lr); 1900} 1901 1902void 1903ztest_lr_free(void *lr, size_t lrsize, char *name) 1904{ 1905 size_t namesize = name ? strlen(name) + 1 : 0; 1906 1907 umem_free(lr, lrsize + namesize); 1908} 1909 1910/* 1911 * Lookup a bunch of objects. Returns the number of objects not found. 1912 */ 1913static int 1914ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1915{ 1916 int missing = 0; 1917 int error; 1918 1919 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1920 1921 for (int i = 0; i < count; i++, od++) { 1922 od->od_object = 0; 1923 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1924 sizeof (uint64_t), 1, &od->od_object); 1925 if (error) { 1926 ASSERT(error == ENOENT); 1927 ASSERT(od->od_object == 0); 1928 missing++; 1929 } else { 1930 dmu_buf_t *db; 1931 ztest_block_tag_t *bbt; 1932 dmu_object_info_t doi; 1933 1934 ASSERT(od->od_object != 0); 1935 ASSERT(missing == 0); /* there should be no gaps */ 1936 1937 ztest_object_lock(zd, od->od_object, RL_READER); 1938 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1939 od->od_object, FTAG, &db)); 1940 dmu_object_info_from_db(db, &doi); 1941 bbt = ztest_bt_bonus(db); 1942 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1943 od->od_type = doi.doi_type; 1944 od->od_blocksize = doi.doi_data_block_size; 1945 od->od_gen = bbt->bt_gen; 1946 dmu_buf_rele(db, FTAG); 1947 ztest_object_unlock(zd, od->od_object); 1948 } 1949 } 1950 1951 return (missing); 1952} 1953 1954static int 1955ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1956{ 1957 int missing = 0; 1958 1959 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1960 1961 for (int i = 0; i < count; i++, od++) { 1962 if (missing) { 1963 od->od_object = 0; 1964 missing++; 1965 continue; 1966 } 1967 1968 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1969 1970 lr->lr_doid = od->od_dir; 1971 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1972 lr->lrz_type = od->od_crtype; 1973 lr->lrz_blocksize = od->od_crblocksize; 1974 lr->lrz_ibshift = ztest_random_ibshift(); 1975 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1976 lr->lrz_bonuslen = dmu_bonus_max(); 1977 lr->lr_gen = od->od_crgen; 1978 lr->lr_crtime[0] = time(NULL); 1979 1980 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 1981 ASSERT(missing == 0); 1982 od->od_object = 0; 1983 missing++; 1984 } else { 1985 od->od_object = lr->lr_foid; 1986 od->od_type = od->od_crtype; 1987 od->od_blocksize = od->od_crblocksize; 1988 od->od_gen = od->od_crgen; 1989 ASSERT(od->od_object != 0); 1990 } 1991 1992 ztest_lr_free(lr, sizeof (*lr), od->od_name); 1993 } 1994 1995 return (missing); 1996} 1997 1998static int 1999ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2000{ 2001 int missing = 0; 2002 int error; 2003 2004 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 2005 2006 od += count - 1; 2007 2008 for (int i = count - 1; i >= 0; i--, od--) { 2009 if (missing) { 2010 missing++; 2011 continue; 2012 } 2013 2014 /* 2015 * No object was found. 2016 */ 2017 if (od->od_object == 0) 2018 continue; 2019 2020 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2021 2022 lr->lr_doid = od->od_dir; 2023 2024 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2025 ASSERT3U(error, ==, ENOSPC); 2026 missing++; 2027 } else { 2028 od->od_object = 0; 2029 } 2030 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2031 } 2032 2033 return (missing); 2034} 2035 2036static int 2037ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2038 void *data) 2039{ 2040 lr_write_t *lr; 2041 int error; 2042 2043 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2044 2045 lr->lr_foid = object; 2046 lr->lr_offset = offset; 2047 lr->lr_length = size; 2048 lr->lr_blkoff = 0; 2049 BP_ZERO(&lr->lr_blkptr); 2050 2051 bcopy(data, lr + 1, size); 2052 2053 error = ztest_replay_write(zd, lr, B_FALSE); 2054 2055 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2056 2057 return (error); 2058} 2059 2060static int 2061ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2062{ 2063 lr_truncate_t *lr; 2064 int error; 2065 2066 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2067 2068 lr->lr_foid = object; 2069 lr->lr_offset = offset; 2070 lr->lr_length = size; 2071 2072 error = ztest_replay_truncate(zd, lr, B_FALSE); 2073 2074 ztest_lr_free(lr, sizeof (*lr), NULL); 2075 2076 return (error); 2077} 2078 2079static int 2080ztest_setattr(ztest_ds_t *zd, uint64_t object) 2081{ 2082 lr_setattr_t *lr; 2083 int error; 2084 2085 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2086 2087 lr->lr_foid = object; 2088 lr->lr_size = 0; 2089 lr->lr_mode = 0; 2090 2091 error = ztest_replay_setattr(zd, lr, B_FALSE); 2092 2093 ztest_lr_free(lr, sizeof (*lr), NULL); 2094 2095 return (error); 2096} 2097 2098static void 2099ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2100{ 2101 objset_t *os = zd->zd_os; 2102 dmu_tx_t *tx; 2103 uint64_t txg; 2104 rl_t *rl; 2105 2106 txg_wait_synced(dmu_objset_pool(os), 0); 2107 2108 ztest_object_lock(zd, object, RL_READER); 2109 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2110 2111 tx = dmu_tx_create(os); 2112 2113 dmu_tx_hold_write(tx, object, offset, size); 2114 2115 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2116 2117 if (txg != 0) { 2118 dmu_prealloc(os, object, offset, size, tx); 2119 dmu_tx_commit(tx); 2120 txg_wait_synced(dmu_objset_pool(os), txg); 2121 } else { 2122 (void) dmu_free_long_range(os, object, offset, size); 2123 } 2124 2125 ztest_range_unlock(rl); 2126 ztest_object_unlock(zd, object); 2127} 2128 2129static void 2130ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2131{ 2132 int err; 2133 ztest_block_tag_t wbt; 2134 dmu_object_info_t doi; 2135 enum ztest_io_type io_type; 2136 uint64_t blocksize; 2137 void *data; 2138 2139 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2140 blocksize = doi.doi_data_block_size; 2141 data = umem_alloc(blocksize, UMEM_NOFAIL); 2142 2143 /* 2144 * Pick an i/o type at random, biased toward writing block tags. 2145 */ 2146 io_type = ztest_random(ZTEST_IO_TYPES); 2147 if (ztest_random(2) == 0) 2148 io_type = ZTEST_IO_WRITE_TAG; 2149 2150 (void) rw_rdlock(&zd->zd_zilog_lock); 2151 2152 switch (io_type) { 2153 2154 case ZTEST_IO_WRITE_TAG: 2155 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2156 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2157 break; 2158 2159 case ZTEST_IO_WRITE_PATTERN: 2160 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2161 if (ztest_random(2) == 0) { 2162 /* 2163 * Induce fletcher2 collisions to ensure that 2164 * zio_ddt_collision() detects and resolves them 2165 * when using fletcher2-verify for deduplication. 2166 */ 2167 ((uint64_t *)data)[0] ^= 1ULL << 63; 2168 ((uint64_t *)data)[4] ^= 1ULL << 63; 2169 } 2170 (void) ztest_write(zd, object, offset, blocksize, data); 2171 break; 2172 2173 case ZTEST_IO_WRITE_ZEROES: 2174 bzero(data, blocksize); 2175 (void) ztest_write(zd, object, offset, blocksize, data); 2176 break; 2177 2178 case ZTEST_IO_TRUNCATE: 2179 (void) ztest_truncate(zd, object, offset, blocksize); 2180 break; 2181 2182 case ZTEST_IO_SETATTR: 2183 (void) ztest_setattr(zd, object); 2184 break; 2185 2186 case ZTEST_IO_REWRITE: 2187 (void) rw_rdlock(&ztest_name_lock); 2188 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2189 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2190 B_FALSE); 2191 VERIFY(err == 0 || err == ENOSPC); 2192 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2193 ZFS_PROP_COMPRESSION, 2194 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2195 B_FALSE); 2196 VERIFY(err == 0 || err == ENOSPC); 2197 (void) rw_unlock(&ztest_name_lock); 2198 2199 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2200 DMU_READ_NO_PREFETCH)); 2201 2202 (void) ztest_write(zd, object, offset, blocksize, data); 2203 break; 2204 } 2205 2206 (void) rw_unlock(&zd->zd_zilog_lock); 2207 2208 umem_free(data, blocksize); 2209} 2210 2211/* 2212 * Initialize an object description template. 2213 */ 2214static void 2215ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2216 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2217{ 2218 od->od_dir = ZTEST_DIROBJ; 2219 od->od_object = 0; 2220 2221 od->od_crtype = type; 2222 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2223 od->od_crgen = gen; 2224 2225 od->od_type = DMU_OT_NONE; 2226 od->od_blocksize = 0; 2227 od->od_gen = 0; 2228 2229 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2230 tag, (int64_t)id, index); 2231} 2232 2233/* 2234 * Lookup or create the objects for a test using the od template. 2235 * If the objects do not all exist, or if 'remove' is specified, 2236 * remove any existing objects and create new ones. Otherwise, 2237 * use the existing objects. 2238 */ 2239static int 2240ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2241{ 2242 int count = size / sizeof (*od); 2243 int rv = 0; 2244 2245 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2246 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2247 (ztest_remove(zd, od, count) != 0 || 2248 ztest_create(zd, od, count) != 0)) 2249 rv = -1; 2250 zd->zd_od = od; 2251 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2252 2253 return (rv); 2254} 2255 2256/* ARGSUSED */ 2257void 2258ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2259{ 2260 zilog_t *zilog = zd->zd_zilog; 2261 2262 (void) rw_rdlock(&zd->zd_zilog_lock); 2263 2264 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2265 2266 /* 2267 * Remember the committed values in zd, which is in parent/child 2268 * shared memory. If we die, the next iteration of ztest_run() 2269 * will verify that the log really does contain this record. 2270 */ 2271 mutex_enter(&zilog->zl_lock); 2272 ASSERT(zd->zd_shared != NULL); 2273 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2274 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2275 mutex_exit(&zilog->zl_lock); 2276 2277 (void) rw_unlock(&zd->zd_zilog_lock); 2278} 2279 2280/* 2281 * This function is designed to simulate the operations that occur during a 2282 * mount/unmount operation. We hold the dataset across these operations in an 2283 * attempt to expose any implicit assumptions about ZIL management. 2284 */ 2285/* ARGSUSED */ 2286void 2287ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2288{ 2289 objset_t *os = zd->zd_os; 2290 2291 /* 2292 * We grab the zd_dirobj_lock to ensure that no other thread is 2293 * updating the zil (i.e. adding in-memory log records) and the 2294 * zd_zilog_lock to block any I/O. 2295 */ 2296 VERIFY0(mutex_lock(&zd->zd_dirobj_lock)); 2297 (void) rw_wrlock(&zd->zd_zilog_lock); 2298 2299 /* zfsvfs_teardown() */ 2300 zil_close(zd->zd_zilog); 2301 2302 /* zfsvfs_setup() */ 2303 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2304 zil_replay(os, zd, ztest_replay_vector); 2305 2306 (void) rw_unlock(&zd->zd_zilog_lock); 2307 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2308} 2309 2310/* 2311 * Verify that we can't destroy an active pool, create an existing pool, 2312 * or create a pool with a bad vdev spec. 2313 */ 2314/* ARGSUSED */ 2315void 2316ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2317{ 2318 ztest_shared_opts_t *zo = &ztest_opts; 2319 spa_t *spa; 2320 nvlist_t *nvroot; 2321 2322 /* 2323 * Attempt to create using a bad file. 2324 */ 2325 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2326 VERIFY3U(ENOENT, ==, 2327 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL)); 2328 nvlist_free(nvroot); 2329 2330 /* 2331 * Attempt to create using a bad mirror. 2332 */ 2333 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2334 VERIFY3U(ENOENT, ==, 2335 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL)); 2336 nvlist_free(nvroot); 2337 2338 /* 2339 * Attempt to create an existing pool. It shouldn't matter 2340 * what's in the nvroot; we should fail with EEXIST. 2341 */ 2342 (void) rw_rdlock(&ztest_name_lock); 2343 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2344 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL)); 2345 nvlist_free(nvroot); 2346 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2347 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2348 spa_close(spa, FTAG); 2349 2350 (void) rw_unlock(&ztest_name_lock); 2351} 2352 2353/* ARGSUSED */ 2354void 2355ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2356{ 2357 spa_t *spa; 2358 uint64_t initial_version = SPA_VERSION_INITIAL; 2359 uint64_t version, newversion; 2360 nvlist_t *nvroot, *props; 2361 char *name; 2362 2363 VERIFY0(mutex_lock(&ztest_vdev_lock)); 2364 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2365 2366 /* 2367 * Clean up from previous runs. 2368 */ 2369 (void) spa_destroy(name); 2370 2371 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2372 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2373 2374 /* 2375 * If we're configuring a RAIDZ device then make sure that the 2376 * the initial version is capable of supporting that feature. 2377 */ 2378 switch (ztest_opts.zo_raidz_parity) { 2379 case 0: 2380 case 1: 2381 initial_version = SPA_VERSION_INITIAL; 2382 break; 2383 case 2: 2384 initial_version = SPA_VERSION_RAIDZ2; 2385 break; 2386 case 3: 2387 initial_version = SPA_VERSION_RAIDZ3; 2388 break; 2389 } 2390 2391 /* 2392 * Create a pool with a spa version that can be upgraded. Pick 2393 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2394 */ 2395 do { 2396 version = ztest_random_spa_version(initial_version); 2397 } while (version > SPA_VERSION_BEFORE_FEATURES); 2398 2399 props = fnvlist_alloc(); 2400 fnvlist_add_uint64(props, 2401 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2402 VERIFY0(spa_create(name, nvroot, props, NULL, NULL)); 2403 fnvlist_free(nvroot); 2404 fnvlist_free(props); 2405 2406 VERIFY0(spa_open(name, &spa, FTAG)); 2407 VERIFY3U(spa_version(spa), ==, version); 2408 newversion = ztest_random_spa_version(version + 1); 2409 2410 if (ztest_opts.zo_verbose >= 4) { 2411 (void) printf("upgrading spa version from %llu to %llu\n", 2412 (u_longlong_t)version, (u_longlong_t)newversion); 2413 } 2414 2415 spa_upgrade(spa, newversion); 2416 VERIFY3U(spa_version(spa), >, version); 2417 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2418 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2419 spa_close(spa, FTAG); 2420 2421 strfree(name); 2422 VERIFY0(mutex_unlock(&ztest_vdev_lock)); 2423} 2424 2425static vdev_t * 2426vdev_lookup_by_path(vdev_t *vd, const char *path) 2427{ 2428 vdev_t *mvd; 2429 2430 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2431 return (vd); 2432 2433 for (int c = 0; c < vd->vdev_children; c++) 2434 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2435 NULL) 2436 return (mvd); 2437 2438 return (NULL); 2439} 2440 2441/* 2442 * Find the first available hole which can be used as a top-level. 2443 */ 2444int 2445find_vdev_hole(spa_t *spa) 2446{ 2447 vdev_t *rvd = spa->spa_root_vdev; 2448 int c; 2449 2450 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2451 2452 for (c = 0; c < rvd->vdev_children; c++) { 2453 vdev_t *cvd = rvd->vdev_child[c]; 2454 2455 if (cvd->vdev_ishole) 2456 break; 2457 } 2458 return (c); 2459} 2460 2461/* 2462 * Verify that vdev_add() works as expected. 2463 */ 2464/* ARGSUSED */ 2465void 2466ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2467{ 2468 ztest_shared_t *zs = ztest_shared; 2469 spa_t *spa = ztest_spa; 2470 uint64_t leaves; 2471 uint64_t guid; 2472 nvlist_t *nvroot; 2473 int error; 2474 2475 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2476 leaves = 2477 MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2478 2479 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2480 2481 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2482 2483 /* 2484 * If we have slogs then remove them 1/4 of the time. 2485 */ 2486 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2487 /* 2488 * Grab the guid from the head of the log class rotor. 2489 */ 2490 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2491 2492 spa_config_exit(spa, SCL_VDEV, FTAG); 2493 2494 /* 2495 * We have to grab the zs_name_lock as writer to 2496 * prevent a race between removing a slog (dmu_objset_find) 2497 * and destroying a dataset. Removing the slog will 2498 * grab a reference on the dataset which may cause 2499 * dmu_objset_destroy() to fail with EBUSY thus 2500 * leaving the dataset in an inconsistent state. 2501 */ 2502 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2503 error = spa_vdev_remove(spa, guid, B_FALSE); 2504 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2505 2506 if (error && error != EEXIST) 2507 fatal(0, "spa_vdev_remove() = %d", error); 2508 } else { 2509 spa_config_exit(spa, SCL_VDEV, FTAG); 2510 2511 /* 2512 * Make 1/4 of the devices be log devices. 2513 */ 2514 nvroot = make_vdev_root(NULL, NULL, NULL, 2515 ztest_opts.zo_vdev_size, 0, 2516 ztest_random(4) == 0, ztest_opts.zo_raidz, 2517 zs->zs_mirrors, 1); 2518 2519 error = spa_vdev_add(spa, nvroot); 2520 nvlist_free(nvroot); 2521 2522 if (error == ENOSPC) 2523 ztest_record_enospc("spa_vdev_add"); 2524 else if (error != 0) 2525 fatal(0, "spa_vdev_add() = %d", error); 2526 } 2527 2528 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2529} 2530 2531/* 2532 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2533 */ 2534/* ARGSUSED */ 2535void 2536ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2537{ 2538 ztest_shared_t *zs = ztest_shared; 2539 spa_t *spa = ztest_spa; 2540 vdev_t *rvd = spa->spa_root_vdev; 2541 spa_aux_vdev_t *sav; 2542 char *aux; 2543 uint64_t guid = 0; 2544 int error; 2545 2546 if (ztest_random(2) == 0) { 2547 sav = &spa->spa_spares; 2548 aux = ZPOOL_CONFIG_SPARES; 2549 } else { 2550 sav = &spa->spa_l2cache; 2551 aux = ZPOOL_CONFIG_L2CACHE; 2552 } 2553 2554 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2555 2556 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2557 2558 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2559 /* 2560 * Pick a random device to remove. 2561 */ 2562 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2563 } else { 2564 /* 2565 * Find an unused device we can add. 2566 */ 2567 zs->zs_vdev_aux = 0; 2568 for (;;) { 2569 char path[MAXPATHLEN]; 2570 int c; 2571 (void) snprintf(path, sizeof (path), ztest_aux_template, 2572 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2573 zs->zs_vdev_aux); 2574 for (c = 0; c < sav->sav_count; c++) 2575 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2576 path) == 0) 2577 break; 2578 if (c == sav->sav_count && 2579 vdev_lookup_by_path(rvd, path) == NULL) 2580 break; 2581 zs->zs_vdev_aux++; 2582 } 2583 } 2584 2585 spa_config_exit(spa, SCL_VDEV, FTAG); 2586 2587 if (guid == 0) { 2588 /* 2589 * Add a new device. 2590 */ 2591 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2592 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2593 error = spa_vdev_add(spa, nvroot); 2594 if (error != 0) 2595 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2596 nvlist_free(nvroot); 2597 } else { 2598 /* 2599 * Remove an existing device. Sometimes, dirty its 2600 * vdev state first to make sure we handle removal 2601 * of devices that have pending state changes. 2602 */ 2603 if (ztest_random(2) == 0) 2604 (void) vdev_online(spa, guid, 0, NULL); 2605 2606 error = spa_vdev_remove(spa, guid, B_FALSE); 2607 if (error != 0 && error != EBUSY) 2608 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2609 } 2610 2611 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2612} 2613 2614/* 2615 * split a pool if it has mirror tlvdevs 2616 */ 2617/* ARGSUSED */ 2618void 2619ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2620{ 2621 ztest_shared_t *zs = ztest_shared; 2622 spa_t *spa = ztest_spa; 2623 vdev_t *rvd = spa->spa_root_vdev; 2624 nvlist_t *tree, **child, *config, *split, **schild; 2625 uint_t c, children, schildren = 0, lastlogid = 0; 2626 int error = 0; 2627 2628 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2629 2630 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2631 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2632 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2633 return; 2634 } 2635 2636 /* clean up the old pool, if any */ 2637 (void) spa_destroy("splitp"); 2638 2639 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2640 2641 /* generate a config from the existing config */ 2642 mutex_enter(&spa->spa_props_lock); 2643 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2644 &tree) == 0); 2645 mutex_exit(&spa->spa_props_lock); 2646 2647 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2648 &children) == 0); 2649 2650 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2651 for (c = 0; c < children; c++) { 2652 vdev_t *tvd = rvd->vdev_child[c]; 2653 nvlist_t **mchild; 2654 uint_t mchildren; 2655 2656 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2657 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2658 0) == 0); 2659 VERIFY(nvlist_add_string(schild[schildren], 2660 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2661 VERIFY(nvlist_add_uint64(schild[schildren], 2662 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2663 if (lastlogid == 0) 2664 lastlogid = schildren; 2665 ++schildren; 2666 continue; 2667 } 2668 lastlogid = 0; 2669 VERIFY(nvlist_lookup_nvlist_array(child[c], 2670 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2671 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2672 } 2673 2674 /* OK, create a config that can be used to split */ 2675 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2676 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2677 VDEV_TYPE_ROOT) == 0); 2678 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2679 lastlogid != 0 ? lastlogid : schildren) == 0); 2680 2681 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2682 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2683 2684 for (c = 0; c < schildren; c++) 2685 nvlist_free(schild[c]); 2686 free(schild); 2687 nvlist_free(split); 2688 2689 spa_config_exit(spa, SCL_VDEV, FTAG); 2690 2691 (void) rw_wrlock(&ztest_name_lock); 2692 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2693 (void) rw_unlock(&ztest_name_lock); 2694 2695 nvlist_free(config); 2696 2697 if (error == 0) { 2698 (void) printf("successful split - results:\n"); 2699 mutex_enter(&spa_namespace_lock); 2700 show_pool_stats(spa); 2701 show_pool_stats(spa_lookup("splitp")); 2702 mutex_exit(&spa_namespace_lock); 2703 ++zs->zs_splits; 2704 --zs->zs_mirrors; 2705 } 2706 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2707 2708} 2709 2710/* 2711 * Verify that we can attach and detach devices. 2712 */ 2713/* ARGSUSED */ 2714void 2715ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2716{ 2717 ztest_shared_t *zs = ztest_shared; 2718 spa_t *spa = ztest_spa; 2719 spa_aux_vdev_t *sav = &spa->spa_spares; 2720 vdev_t *rvd = spa->spa_root_vdev; 2721 vdev_t *oldvd, *newvd, *pvd; 2722 nvlist_t *root; 2723 uint64_t leaves; 2724 uint64_t leaf, top; 2725 uint64_t ashift = ztest_get_ashift(); 2726 uint64_t oldguid, pguid; 2727 size_t oldsize, newsize; 2728 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2729 int replacing; 2730 int oldvd_has_siblings = B_FALSE; 2731 int newvd_is_spare = B_FALSE; 2732 int oldvd_is_log; 2733 int error, expected_error; 2734 2735 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2736 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2737 2738 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2739 2740 /* 2741 * Decide whether to do an attach or a replace. 2742 */ 2743 replacing = ztest_random(2); 2744 2745 /* 2746 * Pick a random top-level vdev. 2747 */ 2748 top = ztest_random_vdev_top(spa, B_TRUE); 2749 2750 /* 2751 * Pick a random leaf within it. 2752 */ 2753 leaf = ztest_random(leaves); 2754 2755 /* 2756 * Locate this vdev. 2757 */ 2758 oldvd = rvd->vdev_child[top]; 2759 if (zs->zs_mirrors >= 1) { 2760 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2761 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2762 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2763 } 2764 if (ztest_opts.zo_raidz > 1) { 2765 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2766 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2767 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2768 } 2769 2770 /* 2771 * If we're already doing an attach or replace, oldvd may be a 2772 * mirror vdev -- in which case, pick a random child. 2773 */ 2774 while (oldvd->vdev_children != 0) { 2775 oldvd_has_siblings = B_TRUE; 2776 ASSERT(oldvd->vdev_children >= 2); 2777 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2778 } 2779 2780 oldguid = oldvd->vdev_guid; 2781 oldsize = vdev_get_min_asize(oldvd); 2782 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2783 (void) strcpy(oldpath, oldvd->vdev_path); 2784 pvd = oldvd->vdev_parent; 2785 pguid = pvd->vdev_guid; 2786 2787 /* 2788 * If oldvd has siblings, then half of the time, detach it. 2789 */ 2790 if (oldvd_has_siblings && ztest_random(2) == 0) { 2791 spa_config_exit(spa, SCL_VDEV, FTAG); 2792 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2793 if (error != 0 && error != ENODEV && error != EBUSY && 2794 error != ENOTSUP) 2795 fatal(0, "detach (%s) returned %d", oldpath, error); 2796 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2797 return; 2798 } 2799 2800 /* 2801 * For the new vdev, choose with equal probability between the two 2802 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2803 */ 2804 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2805 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2806 newvd_is_spare = B_TRUE; 2807 (void) strcpy(newpath, newvd->vdev_path); 2808 } else { 2809 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2810 ztest_opts.zo_dir, ztest_opts.zo_pool, 2811 top * leaves + leaf); 2812 if (ztest_random(2) == 0) 2813 newpath[strlen(newpath) - 1] = 'b'; 2814 newvd = vdev_lookup_by_path(rvd, newpath); 2815 } 2816 2817 if (newvd) { 2818 newsize = vdev_get_min_asize(newvd); 2819 } else { 2820 /* 2821 * Make newsize a little bigger or smaller than oldsize. 2822 * If it's smaller, the attach should fail. 2823 * If it's larger, and we're doing a replace, 2824 * we should get dynamic LUN growth when we're done. 2825 */ 2826 newsize = 10 * oldsize / (9 + ztest_random(3)); 2827 } 2828 2829 /* 2830 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2831 * unless it's a replace; in that case any non-replacing parent is OK. 2832 * 2833 * If newvd is already part of the pool, it should fail with EBUSY. 2834 * 2835 * If newvd is too small, it should fail with EOVERFLOW. 2836 */ 2837 if (pvd->vdev_ops != &vdev_mirror_ops && 2838 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2839 pvd->vdev_ops == &vdev_replacing_ops || 2840 pvd->vdev_ops == &vdev_spare_ops)) 2841 expected_error = ENOTSUP; 2842 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2843 expected_error = ENOTSUP; 2844 else if (newvd == oldvd) 2845 expected_error = replacing ? 0 : EBUSY; 2846 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2847 expected_error = EBUSY; 2848 else if (newsize < oldsize) 2849 expected_error = EOVERFLOW; 2850 else if (ashift > oldvd->vdev_top->vdev_ashift) 2851 expected_error = EDOM; 2852 else 2853 expected_error = 0; 2854 2855 spa_config_exit(spa, SCL_VDEV, FTAG); 2856 2857 /* 2858 * Build the nvlist describing newpath. 2859 */ 2860 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 2861 ashift, 0, 0, 0, 1); 2862 2863 error = spa_vdev_attach(spa, oldguid, root, replacing); 2864 2865 nvlist_free(root); 2866 2867 /* 2868 * If our parent was the replacing vdev, but the replace completed, 2869 * then instead of failing with ENOTSUP we may either succeed, 2870 * fail with ENODEV, or fail with EOVERFLOW. 2871 */ 2872 if (expected_error == ENOTSUP && 2873 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2874 expected_error = error; 2875 2876 /* 2877 * If someone grew the LUN, the replacement may be too small. 2878 */ 2879 if (error == EOVERFLOW || error == EBUSY) 2880 expected_error = error; 2881 2882 /* XXX workaround 6690467 */ 2883 if (error != expected_error && expected_error != EBUSY) { 2884 fatal(0, "attach (%s %llu, %s %llu, %d) " 2885 "returned %d, expected %d", 2886 oldpath, (longlong_t)oldsize, newpath, 2887 (longlong_t)newsize, replacing, error, expected_error); 2888 } 2889 2890 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2891} 2892 2893/* 2894 * Callback function which expands the physical size of the vdev. 2895 */ 2896vdev_t * 2897grow_vdev(vdev_t *vd, void *arg) 2898{ 2899 spa_t *spa = vd->vdev_spa; 2900 size_t *newsize = arg; 2901 size_t fsize; 2902 int fd; 2903 2904 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2905 ASSERT(vd->vdev_ops->vdev_op_leaf); 2906 2907 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2908 return (vd); 2909 2910 fsize = lseek(fd, 0, SEEK_END); 2911 (void) ftruncate(fd, *newsize); 2912 2913 if (ztest_opts.zo_verbose >= 6) { 2914 (void) printf("%s grew from %lu to %lu bytes\n", 2915 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2916 } 2917 (void) close(fd); 2918 return (NULL); 2919} 2920 2921/* 2922 * Callback function which expands a given vdev by calling vdev_online(). 2923 */ 2924/* ARGSUSED */ 2925vdev_t * 2926online_vdev(vdev_t *vd, void *arg) 2927{ 2928 spa_t *spa = vd->vdev_spa; 2929 vdev_t *tvd = vd->vdev_top; 2930 uint64_t guid = vd->vdev_guid; 2931 uint64_t generation = spa->spa_config_generation + 1; 2932 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2933 int error; 2934 2935 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2936 ASSERT(vd->vdev_ops->vdev_op_leaf); 2937 2938 /* Calling vdev_online will initialize the new metaslabs */ 2939 spa_config_exit(spa, SCL_STATE, spa); 2940 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2941 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2942 2943 /* 2944 * If vdev_online returned an error or the underlying vdev_open 2945 * failed then we abort the expand. The only way to know that 2946 * vdev_open fails is by checking the returned newstate. 2947 */ 2948 if (error || newstate != VDEV_STATE_HEALTHY) { 2949 if (ztest_opts.zo_verbose >= 5) { 2950 (void) printf("Unable to expand vdev, state %llu, " 2951 "error %d\n", (u_longlong_t)newstate, error); 2952 } 2953 return (vd); 2954 } 2955 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2956 2957 /* 2958 * Since we dropped the lock we need to ensure that we're 2959 * still talking to the original vdev. It's possible this 2960 * vdev may have been detached/replaced while we were 2961 * trying to online it. 2962 */ 2963 if (generation != spa->spa_config_generation) { 2964 if (ztest_opts.zo_verbose >= 5) { 2965 (void) printf("vdev configuration has changed, " 2966 "guid %llu, state %llu, expected gen %llu, " 2967 "got gen %llu\n", 2968 (u_longlong_t)guid, 2969 (u_longlong_t)tvd->vdev_state, 2970 (u_longlong_t)generation, 2971 (u_longlong_t)spa->spa_config_generation); 2972 } 2973 return (vd); 2974 } 2975 return (NULL); 2976} 2977 2978/* 2979 * Traverse the vdev tree calling the supplied function. 2980 * We continue to walk the tree until we either have walked all 2981 * children or we receive a non-NULL return from the callback. 2982 * If a NULL callback is passed, then we just return back the first 2983 * leaf vdev we encounter. 2984 */ 2985vdev_t * 2986vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 2987{ 2988 if (vd->vdev_ops->vdev_op_leaf) { 2989 if (func == NULL) 2990 return (vd); 2991 else 2992 return (func(vd, arg)); 2993 } 2994 2995 for (uint_t c = 0; c < vd->vdev_children; c++) { 2996 vdev_t *cvd = vd->vdev_child[c]; 2997 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 2998 return (cvd); 2999 } 3000 return (NULL); 3001} 3002 3003/* 3004 * Verify that dynamic LUN growth works as expected. 3005 */ 3006/* ARGSUSED */ 3007void 3008ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3009{ 3010 spa_t *spa = ztest_spa; 3011 vdev_t *vd, *tvd; 3012 metaslab_class_t *mc; 3013 metaslab_group_t *mg; 3014 size_t psize, newsize; 3015 uint64_t top; 3016 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3017 3018 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 3019 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3020 3021 top = ztest_random_vdev_top(spa, B_TRUE); 3022 3023 tvd = spa->spa_root_vdev->vdev_child[top]; 3024 mg = tvd->vdev_mg; 3025 mc = mg->mg_class; 3026 old_ms_count = tvd->vdev_ms_count; 3027 old_class_space = metaslab_class_get_space(mc); 3028 3029 /* 3030 * Determine the size of the first leaf vdev associated with 3031 * our top-level device. 3032 */ 3033 vd = vdev_walk_tree(tvd, NULL, NULL); 3034 ASSERT3P(vd, !=, NULL); 3035 ASSERT(vd->vdev_ops->vdev_op_leaf); 3036 3037 psize = vd->vdev_psize; 3038 3039 /* 3040 * We only try to expand the vdev if it's healthy, less than 4x its 3041 * original size, and it has a valid psize. 3042 */ 3043 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3044 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3045 spa_config_exit(spa, SCL_STATE, spa); 3046 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3047 return; 3048 } 3049 ASSERT(psize > 0); 3050 newsize = psize + psize / 8; 3051 ASSERT3U(newsize, >, psize); 3052 3053 if (ztest_opts.zo_verbose >= 6) { 3054 (void) printf("Expanding LUN %s from %lu to %lu\n", 3055 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3056 } 3057 3058 /* 3059 * Growing the vdev is a two step process: 3060 * 1). expand the physical size (i.e. relabel) 3061 * 2). online the vdev to create the new metaslabs 3062 */ 3063 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3064 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3065 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3066 if (ztest_opts.zo_verbose >= 5) { 3067 (void) printf("Could not expand LUN because " 3068 "the vdev configuration changed.\n"); 3069 } 3070 spa_config_exit(spa, SCL_STATE, spa); 3071 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3072 return; 3073 } 3074 3075 spa_config_exit(spa, SCL_STATE, spa); 3076 3077 /* 3078 * Expanding the LUN will update the config asynchronously, 3079 * thus we must wait for the async thread to complete any 3080 * pending tasks before proceeding. 3081 */ 3082 for (;;) { 3083 boolean_t done; 3084 mutex_enter(&spa->spa_async_lock); 3085 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3086 mutex_exit(&spa->spa_async_lock); 3087 if (done) 3088 break; 3089 txg_wait_synced(spa_get_dsl(spa), 0); 3090 (void) poll(NULL, 0, 100); 3091 } 3092 3093 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3094 3095 tvd = spa->spa_root_vdev->vdev_child[top]; 3096 new_ms_count = tvd->vdev_ms_count; 3097 new_class_space = metaslab_class_get_space(mc); 3098 3099 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3100 if (ztest_opts.zo_verbose >= 5) { 3101 (void) printf("Could not verify LUN expansion due to " 3102 "intervening vdev offline or remove.\n"); 3103 } 3104 spa_config_exit(spa, SCL_STATE, spa); 3105 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3106 return; 3107 } 3108 3109 /* 3110 * Make sure we were able to grow the vdev. 3111 */ 3112 if (new_ms_count <= old_ms_count) 3113 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 3114 old_ms_count, new_ms_count); 3115 3116 /* 3117 * Make sure we were able to grow the pool. 3118 */ 3119 if (new_class_space <= old_class_space) 3120 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 3121 old_class_space, new_class_space); 3122 3123 if (ztest_opts.zo_verbose >= 5) { 3124 char oldnumbuf[6], newnumbuf[6]; 3125 3126 nicenum(old_class_space, oldnumbuf); 3127 nicenum(new_class_space, newnumbuf); 3128 (void) printf("%s grew from %s to %s\n", 3129 spa->spa_name, oldnumbuf, newnumbuf); 3130 } 3131 3132 spa_config_exit(spa, SCL_STATE, spa); 3133 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3134} 3135 3136/* 3137 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3138 */ 3139/* ARGSUSED */ 3140static void 3141ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3142{ 3143 /* 3144 * Create the objects common to all ztest datasets. 3145 */ 3146 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3147 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3148} 3149 3150static int 3151ztest_dataset_create(char *dsname) 3152{ 3153 uint64_t zilset = ztest_random(100); 3154 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3155 ztest_objset_create_cb, NULL); 3156 3157 if (err || zilset < 80) 3158 return (err); 3159 3160 if (ztest_opts.zo_verbose >= 6) 3161 (void) printf("Setting dataset %s to sync always\n", dsname); 3162 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3163 ZFS_SYNC_ALWAYS, B_FALSE)); 3164} 3165 3166/* ARGSUSED */ 3167static int 3168ztest_objset_destroy_cb(const char *name, void *arg) 3169{ 3170 objset_t *os; 3171 dmu_object_info_t doi; 3172 int error; 3173 3174 /* 3175 * Verify that the dataset contains a directory object. 3176 */ 3177 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os)); 3178 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3179 if (error != ENOENT) { 3180 /* We could have crashed in the middle of destroying it */ 3181 ASSERT0(error); 3182 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3183 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3184 } 3185 dmu_objset_rele(os, FTAG); 3186 3187 /* 3188 * Destroy the dataset. 3189 */ 3190 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE)); 3191 return (0); 3192} 3193 3194static boolean_t 3195ztest_snapshot_create(char *osname, uint64_t id) 3196{ 3197 char snapname[MAXNAMELEN]; 3198 int error; 3199 3200 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3201 (u_longlong_t)id); 3202 3203 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1, 3204 NULL, NULL, B_FALSE, B_FALSE, -1); 3205 if (error == ENOSPC) { 3206 ztest_record_enospc(FTAG); 3207 return (B_FALSE); 3208 } 3209 if (error != 0 && error != EEXIST) 3210 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error); 3211 return (B_TRUE); 3212} 3213 3214static boolean_t 3215ztest_snapshot_destroy(char *osname, uint64_t id) 3216{ 3217 char snapname[MAXNAMELEN]; 3218 int error; 3219 3220 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3221 (u_longlong_t)id); 3222 3223 error = dmu_objset_destroy(snapname, B_FALSE); 3224 if (error != 0 && error != ENOENT) 3225 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3226 return (B_TRUE); 3227} 3228 3229/* ARGSUSED */ 3230void 3231ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3232{ 3233 ztest_ds_t zdtmp; 3234 int iters; 3235 int error; 3236 objset_t *os, *os2; 3237 char name[MAXNAMELEN]; 3238 zilog_t *zilog; 3239 3240 (void) rw_rdlock(&ztest_name_lock); 3241 3242 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 3243 ztest_opts.zo_pool, (u_longlong_t)id); 3244 3245 /* 3246 * If this dataset exists from a previous run, process its replay log 3247 * half of the time. If we don't replay it, then dmu_objset_destroy() 3248 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3249 */ 3250 if (ztest_random(2) == 0 && 3251 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3252 ztest_zd_init(&zdtmp, NULL, os); 3253 zil_replay(os, &zdtmp, ztest_replay_vector); 3254 ztest_zd_fini(&zdtmp); 3255 dmu_objset_disown(os, FTAG); 3256 } 3257 3258 /* 3259 * There may be an old instance of the dataset we're about to 3260 * create lying around from a previous run. If so, destroy it 3261 * and all of its snapshots. 3262 */ 3263 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3264 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3265 3266 /* 3267 * Verify that the destroyed dataset is no longer in the namespace. 3268 */ 3269 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os)); 3270 3271 /* 3272 * Verify that we can create a new dataset. 3273 */ 3274 error = ztest_dataset_create(name); 3275 if (error) { 3276 if (error == ENOSPC) { 3277 ztest_record_enospc(FTAG); 3278 (void) rw_unlock(&ztest_name_lock); 3279 return; 3280 } 3281 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3282 } 3283 3284 VERIFY3U(0, ==, 3285 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3286 3287 ztest_zd_init(&zdtmp, NULL, os); 3288 3289 /* 3290 * Open the intent log for it. 3291 */ 3292 zilog = zil_open(os, ztest_get_data); 3293 3294 /* 3295 * Put some objects in there, do a little I/O to them, 3296 * and randomly take a couple of snapshots along the way. 3297 */ 3298 iters = ztest_random(5); 3299 for (int i = 0; i < iters; i++) { 3300 ztest_dmu_object_alloc_free(&zdtmp, id); 3301 if (ztest_random(iters) == 0) 3302 (void) ztest_snapshot_create(name, i); 3303 } 3304 3305 /* 3306 * Verify that we cannot create an existing dataset. 3307 */ 3308 VERIFY3U(EEXIST, ==, 3309 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3310 3311 /* 3312 * Verify that we can hold an objset that is also owned. 3313 */ 3314 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3315 dmu_objset_rele(os2, FTAG); 3316 3317 /* 3318 * Verify that we cannot own an objset that is already owned. 3319 */ 3320 VERIFY3U(EBUSY, ==, 3321 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3322 3323 zil_close(zilog); 3324 dmu_objset_disown(os, FTAG); 3325 ztest_zd_fini(&zdtmp); 3326 3327 (void) rw_unlock(&ztest_name_lock); 3328} 3329 3330/* 3331 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3332 */ 3333void 3334ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3335{ 3336 (void) rw_rdlock(&ztest_name_lock); 3337 (void) ztest_snapshot_destroy(zd->zd_name, id); 3338 (void) ztest_snapshot_create(zd->zd_name, id); 3339 (void) rw_unlock(&ztest_name_lock); 3340} 3341 3342/* 3343 * Cleanup non-standard snapshots and clones. 3344 */ 3345void 3346ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3347{ 3348 char snap1name[MAXNAMELEN]; 3349 char clone1name[MAXNAMELEN]; 3350 char snap2name[MAXNAMELEN]; 3351 char clone2name[MAXNAMELEN]; 3352 char snap3name[MAXNAMELEN]; 3353 int error; 3354 3355 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3356 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3357 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3358 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3359 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3360 3361 error = dmu_objset_destroy(clone2name, B_FALSE); 3362 if (error && error != ENOENT) 3363 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error); 3364 error = dmu_objset_destroy(snap3name, B_FALSE); 3365 if (error && error != ENOENT) 3366 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error); 3367 error = dmu_objset_destroy(snap2name, B_FALSE); 3368 if (error && error != ENOENT) 3369 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error); 3370 error = dmu_objset_destroy(clone1name, B_FALSE); 3371 if (error && error != ENOENT) 3372 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error); 3373 error = dmu_objset_destroy(snap1name, B_FALSE); 3374 if (error && error != ENOENT) 3375 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error); 3376} 3377 3378/* 3379 * Verify dsl_dataset_promote handles EBUSY 3380 */ 3381void 3382ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3383{ 3384 objset_t *clone; 3385 dsl_dataset_t *ds; 3386 char snap1name[MAXNAMELEN]; 3387 char clone1name[MAXNAMELEN]; 3388 char snap2name[MAXNAMELEN]; 3389 char clone2name[MAXNAMELEN]; 3390 char snap3name[MAXNAMELEN]; 3391 char *osname = zd->zd_name; 3392 int error; 3393 3394 (void) rw_rdlock(&ztest_name_lock); 3395 3396 ztest_dsl_dataset_cleanup(osname, id); 3397 3398 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3399 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3400 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3401 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3402 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3403 3404 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1, 3405 NULL, NULL, B_FALSE, B_FALSE, -1); 3406 if (error && error != EEXIST) { 3407 if (error == ENOSPC) { 3408 ztest_record_enospc(FTAG); 3409 goto out; 3410 } 3411 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3412 } 3413 3414 error = dmu_objset_hold(snap1name, FTAG, &clone); 3415 if (error) 3416 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error); 3417 3418 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0); 3419 dmu_objset_rele(clone, FTAG); 3420 if (error) { 3421 if (error == ENOSPC) { 3422 ztest_record_enospc(FTAG); 3423 goto out; 3424 } 3425 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3426 } 3427 3428 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1, 3429 NULL, NULL, B_FALSE, B_FALSE, -1); 3430 if (error && error != EEXIST) { 3431 if (error == ENOSPC) { 3432 ztest_record_enospc(FTAG); 3433 goto out; 3434 } 3435 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3436 } 3437 3438 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1, 3439 NULL, NULL, B_FALSE, B_FALSE, -1); 3440 if (error && error != EEXIST) { 3441 if (error == ENOSPC) { 3442 ztest_record_enospc(FTAG); 3443 goto out; 3444 } 3445 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3446 } 3447 3448 error = dmu_objset_hold(snap3name, FTAG, &clone); 3449 if (error) 3450 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3451 3452 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0); 3453 dmu_objset_rele(clone, FTAG); 3454 if (error) { 3455 if (error == ENOSPC) { 3456 ztest_record_enospc(FTAG); 3457 goto out; 3458 } 3459 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3460 } 3461 3462 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds); 3463 if (error) 3464 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error); 3465 error = dsl_dataset_promote(clone2name, NULL); 3466 if (error != EBUSY) 3467 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3468 error); 3469 dsl_dataset_disown(ds, FTAG); 3470 3471out: 3472 ztest_dsl_dataset_cleanup(osname, id); 3473 3474 (void) rw_unlock(&ztest_name_lock); 3475} 3476 3477/* 3478 * Verify that dmu_object_{alloc,free} work as expected. 3479 */ 3480void 3481ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3482{ 3483 ztest_od_t od[4]; 3484 int batchsize = sizeof (od) / sizeof (od[0]); 3485 3486 for (int b = 0; b < batchsize; b++) 3487 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3488 3489 /* 3490 * Destroy the previous batch of objects, create a new batch, 3491 * and do some I/O on the new objects. 3492 */ 3493 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3494 return; 3495 3496 while (ztest_random(4 * batchsize) != 0) 3497 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3498 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3499} 3500 3501/* 3502 * Verify that dmu_{read,write} work as expected. 3503 */ 3504void 3505ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3506{ 3507 objset_t *os = zd->zd_os; 3508 ztest_od_t od[2]; 3509 dmu_tx_t *tx; 3510 int i, freeit, error; 3511 uint64_t n, s, txg; 3512 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3513 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3514 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3515 uint64_t regions = 997; 3516 uint64_t stride = 123456789ULL; 3517 uint64_t width = 40; 3518 int free_percent = 5; 3519 3520 /* 3521 * This test uses two objects, packobj and bigobj, that are always 3522 * updated together (i.e. in the same tx) so that their contents are 3523 * in sync and can be compared. Their contents relate to each other 3524 * in a simple way: packobj is a dense array of 'bufwad' structures, 3525 * while bigobj is a sparse array of the same bufwads. Specifically, 3526 * for any index n, there are three bufwads that should be identical: 3527 * 3528 * packobj, at offset n * sizeof (bufwad_t) 3529 * bigobj, at the head of the nth chunk 3530 * bigobj, at the tail of the nth chunk 3531 * 3532 * The chunk size is arbitrary. It doesn't have to be a power of two, 3533 * and it doesn't have any relation to the object blocksize. 3534 * The only requirement is that it can hold at least two bufwads. 3535 * 3536 * Normally, we write the bufwad to each of these locations. 3537 * However, free_percent of the time we instead write zeroes to 3538 * packobj and perform a dmu_free_range() on bigobj. By comparing 3539 * bigobj to packobj, we can verify that the DMU is correctly 3540 * tracking which parts of an object are allocated and free, 3541 * and that the contents of the allocated blocks are correct. 3542 */ 3543 3544 /* 3545 * Read the directory info. If it's the first time, set things up. 3546 */ 3547 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3548 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3549 3550 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3551 return; 3552 3553 bigobj = od[0].od_object; 3554 packobj = od[1].od_object; 3555 chunksize = od[0].od_gen; 3556 ASSERT(chunksize == od[1].od_gen); 3557 3558 /* 3559 * Prefetch a random chunk of the big object. 3560 * Our aim here is to get some async reads in flight 3561 * for blocks that we may free below; the DMU should 3562 * handle this race correctly. 3563 */ 3564 n = ztest_random(regions) * stride + ztest_random(width); 3565 s = 1 + ztest_random(2 * width - 1); 3566 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3567 3568 /* 3569 * Pick a random index and compute the offsets into packobj and bigobj. 3570 */ 3571 n = ztest_random(regions) * stride + ztest_random(width); 3572 s = 1 + ztest_random(width - 1); 3573 3574 packoff = n * sizeof (bufwad_t); 3575 packsize = s * sizeof (bufwad_t); 3576 3577 bigoff = n * chunksize; 3578 bigsize = s * chunksize; 3579 3580 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3581 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3582 3583 /* 3584 * free_percent of the time, free a range of bigobj rather than 3585 * overwriting it. 3586 */ 3587 freeit = (ztest_random(100) < free_percent); 3588 3589 /* 3590 * Read the current contents of our objects. 3591 */ 3592 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3593 DMU_READ_PREFETCH); 3594 ASSERT0(error); 3595 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3596 DMU_READ_PREFETCH); 3597 ASSERT0(error); 3598 3599 /* 3600 * Get a tx for the mods to both packobj and bigobj. 3601 */ 3602 tx = dmu_tx_create(os); 3603 3604 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3605 3606 if (freeit) 3607 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3608 else 3609 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3610 3611 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3612 if (txg == 0) { 3613 umem_free(packbuf, packsize); 3614 umem_free(bigbuf, bigsize); 3615 return; 3616 } 3617 3618 dmu_object_set_checksum(os, bigobj, 3619 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx); 3620 3621 dmu_object_set_compress(os, bigobj, 3622 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx); 3623 3624 /* 3625 * For each index from n to n + s, verify that the existing bufwad 3626 * in packobj matches the bufwads at the head and tail of the 3627 * corresponding chunk in bigobj. Then update all three bufwads 3628 * with the new values we want to write out. 3629 */ 3630 for (i = 0; i < s; i++) { 3631 /* LINTED */ 3632 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3633 /* LINTED */ 3634 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3635 /* LINTED */ 3636 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3637 3638 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3639 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3640 3641 if (pack->bw_txg > txg) 3642 fatal(0, "future leak: got %llx, open txg is %llx", 3643 pack->bw_txg, txg); 3644 3645 if (pack->bw_data != 0 && pack->bw_index != n + i) 3646 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3647 pack->bw_index, n, i); 3648 3649 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3650 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3651 3652 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3653 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3654 3655 if (freeit) { 3656 bzero(pack, sizeof (bufwad_t)); 3657 } else { 3658 pack->bw_index = n + i; 3659 pack->bw_txg = txg; 3660 pack->bw_data = 1 + ztest_random(-2ULL); 3661 } 3662 *bigH = *pack; 3663 *bigT = *pack; 3664 } 3665 3666 /* 3667 * We've verified all the old bufwads, and made new ones. 3668 * Now write them out. 3669 */ 3670 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3671 3672 if (freeit) { 3673 if (ztest_opts.zo_verbose >= 7) { 3674 (void) printf("freeing offset %llx size %llx" 3675 " txg %llx\n", 3676 (u_longlong_t)bigoff, 3677 (u_longlong_t)bigsize, 3678 (u_longlong_t)txg); 3679 } 3680 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3681 } else { 3682 if (ztest_opts.zo_verbose >= 7) { 3683 (void) printf("writing offset %llx size %llx" 3684 " txg %llx\n", 3685 (u_longlong_t)bigoff, 3686 (u_longlong_t)bigsize, 3687 (u_longlong_t)txg); 3688 } 3689 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3690 } 3691 3692 dmu_tx_commit(tx); 3693 3694 /* 3695 * Sanity check the stuff we just wrote. 3696 */ 3697 { 3698 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3699 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3700 3701 VERIFY(0 == dmu_read(os, packobj, packoff, 3702 packsize, packcheck, DMU_READ_PREFETCH)); 3703 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3704 bigsize, bigcheck, DMU_READ_PREFETCH)); 3705 3706 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3707 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3708 3709 umem_free(packcheck, packsize); 3710 umem_free(bigcheck, bigsize); 3711 } 3712 3713 umem_free(packbuf, packsize); 3714 umem_free(bigbuf, bigsize); 3715} 3716 3717void 3718compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3719 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3720{ 3721 uint64_t i; 3722 bufwad_t *pack; 3723 bufwad_t *bigH; 3724 bufwad_t *bigT; 3725 3726 /* 3727 * For each index from n to n + s, verify that the existing bufwad 3728 * in packobj matches the bufwads at the head and tail of the 3729 * corresponding chunk in bigobj. Then update all three bufwads 3730 * with the new values we want to write out. 3731 */ 3732 for (i = 0; i < s; i++) { 3733 /* LINTED */ 3734 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3735 /* LINTED */ 3736 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3737 /* LINTED */ 3738 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3739 3740 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3741 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3742 3743 if (pack->bw_txg > txg) 3744 fatal(0, "future leak: got %llx, open txg is %llx", 3745 pack->bw_txg, txg); 3746 3747 if (pack->bw_data != 0 && pack->bw_index != n + i) 3748 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3749 pack->bw_index, n, i); 3750 3751 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3752 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3753 3754 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3755 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3756 3757 pack->bw_index = n + i; 3758 pack->bw_txg = txg; 3759 pack->bw_data = 1 + ztest_random(-2ULL); 3760 3761 *bigH = *pack; 3762 *bigT = *pack; 3763 } 3764} 3765 3766void 3767ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3768{ 3769 objset_t *os = zd->zd_os; 3770 ztest_od_t od[2]; 3771 dmu_tx_t *tx; 3772 uint64_t i; 3773 int error; 3774 uint64_t n, s, txg; 3775 bufwad_t *packbuf, *bigbuf; 3776 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3777 uint64_t blocksize = ztest_random_blocksize(); 3778 uint64_t chunksize = blocksize; 3779 uint64_t regions = 997; 3780 uint64_t stride = 123456789ULL; 3781 uint64_t width = 9; 3782 dmu_buf_t *bonus_db; 3783 arc_buf_t **bigbuf_arcbufs; 3784 dmu_object_info_t doi; 3785 3786 /* 3787 * This test uses two objects, packobj and bigobj, that are always 3788 * updated together (i.e. in the same tx) so that their contents are 3789 * in sync and can be compared. Their contents relate to each other 3790 * in a simple way: packobj is a dense array of 'bufwad' structures, 3791 * while bigobj is a sparse array of the same bufwads. Specifically, 3792 * for any index n, there are three bufwads that should be identical: 3793 * 3794 * packobj, at offset n * sizeof (bufwad_t) 3795 * bigobj, at the head of the nth chunk 3796 * bigobj, at the tail of the nth chunk 3797 * 3798 * The chunk size is set equal to bigobj block size so that 3799 * dmu_assign_arcbuf() can be tested for object updates. 3800 */ 3801 3802 /* 3803 * Read the directory info. If it's the first time, set things up. 3804 */ 3805 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3806 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3807 3808 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3809 return; 3810 3811 bigobj = od[0].od_object; 3812 packobj = od[1].od_object; 3813 blocksize = od[0].od_blocksize; 3814 chunksize = blocksize; 3815 ASSERT(chunksize == od[1].od_gen); 3816 3817 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3818 VERIFY(ISP2(doi.doi_data_block_size)); 3819 VERIFY(chunksize == doi.doi_data_block_size); 3820 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3821 3822 /* 3823 * Pick a random index and compute the offsets into packobj and bigobj. 3824 */ 3825 n = ztest_random(regions) * stride + ztest_random(width); 3826 s = 1 + ztest_random(width - 1); 3827 3828 packoff = n * sizeof (bufwad_t); 3829 packsize = s * sizeof (bufwad_t); 3830 3831 bigoff = n * chunksize; 3832 bigsize = s * chunksize; 3833 3834 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3835 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3836 3837 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3838 3839 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3840 3841 /* 3842 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3843 * Iteration 1 test zcopy to already referenced dbufs. 3844 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3845 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3846 * Iteration 4 test zcopy when dbuf is no longer dirty. 3847 * Iteration 5 test zcopy when it can't be done. 3848 * Iteration 6 one more zcopy write. 3849 */ 3850 for (i = 0; i < 7; i++) { 3851 uint64_t j; 3852 uint64_t off; 3853 3854 /* 3855 * In iteration 5 (i == 5) use arcbufs 3856 * that don't match bigobj blksz to test 3857 * dmu_assign_arcbuf() when it can't directly 3858 * assign an arcbuf to a dbuf. 3859 */ 3860 for (j = 0; j < s; j++) { 3861 if (i != 5) { 3862 bigbuf_arcbufs[j] = 3863 dmu_request_arcbuf(bonus_db, chunksize); 3864 } else { 3865 bigbuf_arcbufs[2 * j] = 3866 dmu_request_arcbuf(bonus_db, chunksize / 2); 3867 bigbuf_arcbufs[2 * j + 1] = 3868 dmu_request_arcbuf(bonus_db, chunksize / 2); 3869 } 3870 } 3871 3872 /* 3873 * Get a tx for the mods to both packobj and bigobj. 3874 */ 3875 tx = dmu_tx_create(os); 3876 3877 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3878 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3879 3880 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3881 if (txg == 0) { 3882 umem_free(packbuf, packsize); 3883 umem_free(bigbuf, bigsize); 3884 for (j = 0; j < s; j++) { 3885 if (i != 5) { 3886 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3887 } else { 3888 dmu_return_arcbuf( 3889 bigbuf_arcbufs[2 * j]); 3890 dmu_return_arcbuf( 3891 bigbuf_arcbufs[2 * j + 1]); 3892 } 3893 } 3894 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3895 dmu_buf_rele(bonus_db, FTAG); 3896 return; 3897 } 3898 3899 /* 3900 * 50% of the time don't read objects in the 1st iteration to 3901 * test dmu_assign_arcbuf() for the case when there're no 3902 * existing dbufs for the specified offsets. 3903 */ 3904 if (i != 0 || ztest_random(2) != 0) { 3905 error = dmu_read(os, packobj, packoff, 3906 packsize, packbuf, DMU_READ_PREFETCH); 3907 ASSERT0(error); 3908 error = dmu_read(os, bigobj, bigoff, bigsize, 3909 bigbuf, DMU_READ_PREFETCH); 3910 ASSERT0(error); 3911 } 3912 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3913 n, chunksize, txg); 3914 3915 /* 3916 * We've verified all the old bufwads, and made new ones. 3917 * Now write them out. 3918 */ 3919 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3920 if (ztest_opts.zo_verbose >= 7) { 3921 (void) printf("writing offset %llx size %llx" 3922 " txg %llx\n", 3923 (u_longlong_t)bigoff, 3924 (u_longlong_t)bigsize, 3925 (u_longlong_t)txg); 3926 } 3927 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3928 dmu_buf_t *dbt; 3929 if (i != 5) { 3930 bcopy((caddr_t)bigbuf + (off - bigoff), 3931 bigbuf_arcbufs[j]->b_data, chunksize); 3932 } else { 3933 bcopy((caddr_t)bigbuf + (off - bigoff), 3934 bigbuf_arcbufs[2 * j]->b_data, 3935 chunksize / 2); 3936 bcopy((caddr_t)bigbuf + (off - bigoff) + 3937 chunksize / 2, 3938 bigbuf_arcbufs[2 * j + 1]->b_data, 3939 chunksize / 2); 3940 } 3941 3942 if (i == 1) { 3943 VERIFY(dmu_buf_hold(os, bigobj, off, 3944 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3945 } 3946 if (i != 5) { 3947 dmu_assign_arcbuf(bonus_db, off, 3948 bigbuf_arcbufs[j], tx); 3949 } else { 3950 dmu_assign_arcbuf(bonus_db, off, 3951 bigbuf_arcbufs[2 * j], tx); 3952 dmu_assign_arcbuf(bonus_db, 3953 off + chunksize / 2, 3954 bigbuf_arcbufs[2 * j + 1], tx); 3955 } 3956 if (i == 1) { 3957 dmu_buf_rele(dbt, FTAG); 3958 } 3959 } 3960 dmu_tx_commit(tx); 3961 3962 /* 3963 * Sanity check the stuff we just wrote. 3964 */ 3965 { 3966 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3967 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3968 3969 VERIFY(0 == dmu_read(os, packobj, packoff, 3970 packsize, packcheck, DMU_READ_PREFETCH)); 3971 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3972 bigsize, bigcheck, DMU_READ_PREFETCH)); 3973 3974 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3975 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3976 3977 umem_free(packcheck, packsize); 3978 umem_free(bigcheck, bigsize); 3979 } 3980 if (i == 2) { 3981 txg_wait_open(dmu_objset_pool(os), 0); 3982 } else if (i == 3) { 3983 txg_wait_synced(dmu_objset_pool(os), 0); 3984 } 3985 } 3986 3987 dmu_buf_rele(bonus_db, FTAG); 3988 umem_free(packbuf, packsize); 3989 umem_free(bigbuf, bigsize); 3990 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3991} 3992 3993/* ARGSUSED */ 3994void 3995ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 3996{ 3997 ztest_od_t od[1]; 3998 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 3999 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4000 4001 /* 4002 * Have multiple threads write to large offsets in an object 4003 * to verify that parallel writes to an object -- even to the 4004 * same blocks within the object -- doesn't cause any trouble. 4005 */ 4006 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4007 4008 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4009 return; 4010 4011 while (ztest_random(10) != 0) 4012 ztest_io(zd, od[0].od_object, offset); 4013} 4014 4015void 4016ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4017{ 4018 ztest_od_t od[1]; 4019 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4020 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4021 uint64_t count = ztest_random(20) + 1; 4022 uint64_t blocksize = ztest_random_blocksize(); 4023 void *data; 4024 4025 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4026 4027 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4028 return; 4029 4030 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4031 return; 4032 4033 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4034 4035 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4036 4037 while (ztest_random(count) != 0) { 4038 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4039 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4040 data) != 0) 4041 break; 4042 while (ztest_random(4) != 0) 4043 ztest_io(zd, od[0].od_object, randoff); 4044 } 4045 4046 umem_free(data, blocksize); 4047} 4048 4049/* 4050 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4051 */ 4052#define ZTEST_ZAP_MIN_INTS 1 4053#define ZTEST_ZAP_MAX_INTS 4 4054#define ZTEST_ZAP_MAX_PROPS 1000 4055 4056void 4057ztest_zap(ztest_ds_t *zd, uint64_t id) 4058{ 4059 objset_t *os = zd->zd_os; 4060 ztest_od_t od[1]; 4061 uint64_t object; 4062 uint64_t txg, last_txg; 4063 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4064 uint64_t zl_ints, zl_intsize, prop; 4065 int i, ints; 4066 dmu_tx_t *tx; 4067 char propname[100], txgname[100]; 4068 int error; 4069 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4070 4071 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4072 4073 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4074 return; 4075 4076 object = od[0].od_object; 4077 4078 /* 4079 * Generate a known hash collision, and verify that 4080 * we can lookup and remove both entries. 4081 */ 4082 tx = dmu_tx_create(os); 4083 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4084 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4085 if (txg == 0) 4086 return; 4087 for (i = 0; i < 2; i++) { 4088 value[i] = i; 4089 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4090 1, &value[i], tx)); 4091 } 4092 for (i = 0; i < 2; i++) { 4093 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4094 sizeof (uint64_t), 1, &value[i], tx)); 4095 VERIFY3U(0, ==, 4096 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4097 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4098 ASSERT3U(zl_ints, ==, 1); 4099 } 4100 for (i = 0; i < 2; i++) { 4101 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4102 } 4103 dmu_tx_commit(tx); 4104 4105 /* 4106 * Generate a buch of random entries. 4107 */ 4108 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4109 4110 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4111 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4112 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4113 bzero(value, sizeof (value)); 4114 last_txg = 0; 4115 4116 /* 4117 * If these zap entries already exist, validate their contents. 4118 */ 4119 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4120 if (error == 0) { 4121 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4122 ASSERT3U(zl_ints, ==, 1); 4123 4124 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4125 zl_ints, &last_txg) == 0); 4126 4127 VERIFY(zap_length(os, object, propname, &zl_intsize, 4128 &zl_ints) == 0); 4129 4130 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4131 ASSERT3U(zl_ints, ==, ints); 4132 4133 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4134 zl_ints, value) == 0); 4135 4136 for (i = 0; i < ints; i++) { 4137 ASSERT3U(value[i], ==, last_txg + object + i); 4138 } 4139 } else { 4140 ASSERT3U(error, ==, ENOENT); 4141 } 4142 4143 /* 4144 * Atomically update two entries in our zap object. 4145 * The first is named txg_%llu, and contains the txg 4146 * in which the property was last updated. The second 4147 * is named prop_%llu, and the nth element of its value 4148 * should be txg + object + n. 4149 */ 4150 tx = dmu_tx_create(os); 4151 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4152 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4153 if (txg == 0) 4154 return; 4155 4156 if (last_txg > txg) 4157 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4158 4159 for (i = 0; i < ints; i++) 4160 value[i] = txg + object + i; 4161 4162 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4163 1, &txg, tx)); 4164 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4165 ints, value, tx)); 4166 4167 dmu_tx_commit(tx); 4168 4169 /* 4170 * Remove a random pair of entries. 4171 */ 4172 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4173 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4174 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4175 4176 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4177 4178 if (error == ENOENT) 4179 return; 4180 4181 ASSERT0(error); 4182 4183 tx = dmu_tx_create(os); 4184 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4185 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4186 if (txg == 0) 4187 return; 4188 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4189 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4190 dmu_tx_commit(tx); 4191} 4192 4193/* 4194 * Testcase to test the upgrading of a microzap to fatzap. 4195 */ 4196void 4197ztest_fzap(ztest_ds_t *zd, uint64_t id) 4198{ 4199 objset_t *os = zd->zd_os; 4200 ztest_od_t od[1]; 4201 uint64_t object, txg; 4202 4203 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4204 4205 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4206 return; 4207 4208 object = od[0].od_object; 4209 4210 /* 4211 * Add entries to this ZAP and make sure it spills over 4212 * and gets upgraded to a fatzap. Also, since we are adding 4213 * 2050 entries we should see ptrtbl growth and leaf-block split. 4214 */ 4215 for (int i = 0; i < 2050; i++) { 4216 char name[MAXNAMELEN]; 4217 uint64_t value = i; 4218 dmu_tx_t *tx; 4219 int error; 4220 4221 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4222 id, value); 4223 4224 tx = dmu_tx_create(os); 4225 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4226 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4227 if (txg == 0) 4228 return; 4229 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4230 &value, tx); 4231 ASSERT(error == 0 || error == EEXIST); 4232 dmu_tx_commit(tx); 4233 } 4234} 4235 4236/* ARGSUSED */ 4237void 4238ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4239{ 4240 objset_t *os = zd->zd_os; 4241 ztest_od_t od[1]; 4242 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4243 dmu_tx_t *tx; 4244 int i, namelen, error; 4245 int micro = ztest_random(2); 4246 char name[20], string_value[20]; 4247 void *data; 4248 4249 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4250 4251 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4252 return; 4253 4254 object = od[0].od_object; 4255 4256 /* 4257 * Generate a random name of the form 'xxx.....' where each 4258 * x is a random printable character and the dots are dots. 4259 * There are 94 such characters, and the name length goes from 4260 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4261 */ 4262 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4263 4264 for (i = 0; i < 3; i++) 4265 name[i] = '!' + ztest_random('~' - '!' + 1); 4266 for (; i < namelen - 1; i++) 4267 name[i] = '.'; 4268 name[i] = '\0'; 4269 4270 if ((namelen & 1) || micro) { 4271 wsize = sizeof (txg); 4272 wc = 1; 4273 data = &txg; 4274 } else { 4275 wsize = 1; 4276 wc = namelen; 4277 data = string_value; 4278 } 4279 4280 count = -1ULL; 4281 VERIFY(zap_count(os, object, &count) == 0); 4282 ASSERT(count != -1ULL); 4283 4284 /* 4285 * Select an operation: length, lookup, add, update, remove. 4286 */ 4287 i = ztest_random(5); 4288 4289 if (i >= 2) { 4290 tx = dmu_tx_create(os); 4291 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4292 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4293 if (txg == 0) 4294 return; 4295 bcopy(name, string_value, namelen); 4296 } else { 4297 tx = NULL; 4298 txg = 0; 4299 bzero(string_value, namelen); 4300 } 4301 4302 switch (i) { 4303 4304 case 0: 4305 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4306 if (error == 0) { 4307 ASSERT3U(wsize, ==, zl_wsize); 4308 ASSERT3U(wc, ==, zl_wc); 4309 } else { 4310 ASSERT3U(error, ==, ENOENT); 4311 } 4312 break; 4313 4314 case 1: 4315 error = zap_lookup(os, object, name, wsize, wc, data); 4316 if (error == 0) { 4317 if (data == string_value && 4318 bcmp(name, data, namelen) != 0) 4319 fatal(0, "name '%s' != val '%s' len %d", 4320 name, data, namelen); 4321 } else { 4322 ASSERT3U(error, ==, ENOENT); 4323 } 4324 break; 4325 4326 case 2: 4327 error = zap_add(os, object, name, wsize, wc, data, tx); 4328 ASSERT(error == 0 || error == EEXIST); 4329 break; 4330 4331 case 3: 4332 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4333 break; 4334 4335 case 4: 4336 error = zap_remove(os, object, name, tx); 4337 ASSERT(error == 0 || error == ENOENT); 4338 break; 4339 } 4340 4341 if (tx != NULL) 4342 dmu_tx_commit(tx); 4343} 4344 4345/* 4346 * Commit callback data. 4347 */ 4348typedef struct ztest_cb_data { 4349 list_node_t zcd_node; 4350 uint64_t zcd_txg; 4351 int zcd_expected_err; 4352 boolean_t zcd_added; 4353 boolean_t zcd_called; 4354 spa_t *zcd_spa; 4355} ztest_cb_data_t; 4356 4357/* This is the actual commit callback function */ 4358static void 4359ztest_commit_callback(void *arg, int error) 4360{ 4361 ztest_cb_data_t *data = arg; 4362 uint64_t synced_txg; 4363 4364 VERIFY(data != NULL); 4365 VERIFY3S(data->zcd_expected_err, ==, error); 4366 VERIFY(!data->zcd_called); 4367 4368 synced_txg = spa_last_synced_txg(data->zcd_spa); 4369 if (data->zcd_txg > synced_txg) 4370 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4371 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4372 synced_txg); 4373 4374 data->zcd_called = B_TRUE; 4375 4376 if (error == ECANCELED) { 4377 ASSERT0(data->zcd_txg); 4378 ASSERT(!data->zcd_added); 4379 4380 /* 4381 * The private callback data should be destroyed here, but 4382 * since we are going to check the zcd_called field after 4383 * dmu_tx_abort(), we will destroy it there. 4384 */ 4385 return; 4386 } 4387 4388 /* Was this callback added to the global callback list? */ 4389 if (!data->zcd_added) 4390 goto out; 4391 4392 ASSERT3U(data->zcd_txg, !=, 0); 4393 4394 /* Remove our callback from the list */ 4395 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4396 list_remove(&zcl.zcl_callbacks, data); 4397 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4398 4399out: 4400 umem_free(data, sizeof (ztest_cb_data_t)); 4401} 4402 4403/* Allocate and initialize callback data structure */ 4404static ztest_cb_data_t * 4405ztest_create_cb_data(objset_t *os, uint64_t txg) 4406{ 4407 ztest_cb_data_t *cb_data; 4408 4409 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4410 4411 cb_data->zcd_txg = txg; 4412 cb_data->zcd_spa = dmu_objset_spa(os); 4413 4414 return (cb_data); 4415} 4416 4417/* 4418 * If a number of txgs equal to this threshold have been created after a commit 4419 * callback has been registered but not called, then we assume there is an 4420 * implementation bug. 4421 */ 4422#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4423 4424/* 4425 * Commit callback test. 4426 */ 4427void 4428ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4429{ 4430 objset_t *os = zd->zd_os; 4431 ztest_od_t od[1]; 4432 dmu_tx_t *tx; 4433 ztest_cb_data_t *cb_data[3], *tmp_cb; 4434 uint64_t old_txg, txg; 4435 int i, error; 4436 4437 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4438 4439 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4440 return; 4441 4442 tx = dmu_tx_create(os); 4443 4444 cb_data[0] = ztest_create_cb_data(os, 0); 4445 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4446 4447 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4448 4449 /* Every once in a while, abort the transaction on purpose */ 4450 if (ztest_random(100) == 0) 4451 error = -1; 4452 4453 if (!error) 4454 error = dmu_tx_assign(tx, TXG_NOWAIT); 4455 4456 txg = error ? 0 : dmu_tx_get_txg(tx); 4457 4458 cb_data[0]->zcd_txg = txg; 4459 cb_data[1] = ztest_create_cb_data(os, txg); 4460 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4461 4462 if (error) { 4463 /* 4464 * It's not a strict requirement to call the registered 4465 * callbacks from inside dmu_tx_abort(), but that's what 4466 * it's supposed to happen in the current implementation 4467 * so we will check for that. 4468 */ 4469 for (i = 0; i < 2; i++) { 4470 cb_data[i]->zcd_expected_err = ECANCELED; 4471 VERIFY(!cb_data[i]->zcd_called); 4472 } 4473 4474 dmu_tx_abort(tx); 4475 4476 for (i = 0; i < 2; i++) { 4477 VERIFY(cb_data[i]->zcd_called); 4478 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4479 } 4480 4481 return; 4482 } 4483 4484 cb_data[2] = ztest_create_cb_data(os, txg); 4485 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4486 4487 /* 4488 * Read existing data to make sure there isn't a future leak. 4489 */ 4490 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4491 &old_txg, DMU_READ_PREFETCH)); 4492 4493 if (old_txg > txg) 4494 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4495 old_txg, txg); 4496 4497 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4498 4499 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4500 4501 /* 4502 * Since commit callbacks don't have any ordering requirement and since 4503 * it is theoretically possible for a commit callback to be called 4504 * after an arbitrary amount of time has elapsed since its txg has been 4505 * synced, it is difficult to reliably determine whether a commit 4506 * callback hasn't been called due to high load or due to a flawed 4507 * implementation. 4508 * 4509 * In practice, we will assume that if after a certain number of txgs a 4510 * commit callback hasn't been called, then most likely there's an 4511 * implementation bug.. 4512 */ 4513 tmp_cb = list_head(&zcl.zcl_callbacks); 4514 if (tmp_cb != NULL && 4515 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) { 4516 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4517 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4518 } 4519 4520 /* 4521 * Let's find the place to insert our callbacks. 4522 * 4523 * Even though the list is ordered by txg, it is possible for the 4524 * insertion point to not be the end because our txg may already be 4525 * quiescing at this point and other callbacks in the open txg 4526 * (from other objsets) may have sneaked in. 4527 */ 4528 tmp_cb = list_tail(&zcl.zcl_callbacks); 4529 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4530 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4531 4532 /* Add the 3 callbacks to the list */ 4533 for (i = 0; i < 3; i++) { 4534 if (tmp_cb == NULL) 4535 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4536 else 4537 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4538 cb_data[i]); 4539 4540 cb_data[i]->zcd_added = B_TRUE; 4541 VERIFY(!cb_data[i]->zcd_called); 4542 4543 tmp_cb = cb_data[i]; 4544 } 4545 4546 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4547 4548 dmu_tx_commit(tx); 4549} 4550 4551/* ARGSUSED */ 4552void 4553ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4554{ 4555 zfs_prop_t proplist[] = { 4556 ZFS_PROP_CHECKSUM, 4557 ZFS_PROP_COMPRESSION, 4558 ZFS_PROP_COPIES, 4559 ZFS_PROP_DEDUP 4560 }; 4561 4562 (void) rw_rdlock(&ztest_name_lock); 4563 4564 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4565 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4566 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4567 4568 (void) rw_unlock(&ztest_name_lock); 4569} 4570 4571/* ARGSUSED */ 4572void 4573ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4574{ 4575 nvlist_t *props = NULL; 4576 4577 (void) rw_rdlock(&ztest_name_lock); 4578 4579 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4580 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4581 4582 VERIFY0(spa_prop_get(ztest_spa, &props)); 4583 4584 if (ztest_opts.zo_verbose >= 6) 4585 dump_nvlist(props, 4); 4586 4587 nvlist_free(props); 4588 4589 (void) rw_unlock(&ztest_name_lock); 4590} 4591 4592/* 4593 * Test snapshot hold/release and deferred destroy. 4594 */ 4595void 4596ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4597{ 4598 int error; 4599 objset_t *os = zd->zd_os; 4600 objset_t *origin; 4601 char snapname[100]; 4602 char fullname[100]; 4603 char clonename[100]; 4604 char tag[100]; 4605 char osname[MAXNAMELEN]; 4606 4607 (void) rw_rdlock(&ztest_name_lock); 4608 4609 dmu_objset_name(os, osname); 4610 4611 (void) snprintf(snapname, 100, "sh1_%llu", id); 4612 (void) snprintf(fullname, 100, "%s@%s", osname, snapname); 4613 (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id); 4614 (void) snprintf(tag, 100, "%tag_%llu", id); 4615 4616 /* 4617 * Clean up from any previous run. 4618 */ 4619 (void) dmu_objset_destroy(clonename, B_FALSE); 4620 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE); 4621 (void) dmu_objset_destroy(fullname, B_FALSE); 4622 4623 /* 4624 * Create snapshot, clone it, mark snap for deferred destroy, 4625 * destroy clone, verify snap was also destroyed. 4626 */ 4627 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE, 4628 FALSE, -1); 4629 if (error) { 4630 if (error == ENOSPC) { 4631 ztest_record_enospc("dmu_objset_snapshot"); 4632 goto out; 4633 } 4634 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4635 } 4636 4637 error = dmu_objset_hold(fullname, FTAG, &origin); 4638 if (error) 4639 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4640 4641 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0); 4642 dmu_objset_rele(origin, FTAG); 4643 if (error) { 4644 if (error == ENOSPC) { 4645 ztest_record_enospc("dmu_objset_clone"); 4646 goto out; 4647 } 4648 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4649 } 4650 4651 error = dmu_objset_destroy(fullname, B_TRUE); 4652 if (error) { 4653 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d", 4654 fullname, error); 4655 } 4656 4657 error = dmu_objset_destroy(clonename, B_FALSE); 4658 if (error) 4659 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error); 4660 4661 error = dmu_objset_hold(fullname, FTAG, &origin); 4662 if (error != ENOENT) 4663 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4664 4665 /* 4666 * Create snapshot, add temporary hold, verify that we can't 4667 * destroy a held snapshot, mark for deferred destroy, 4668 * release hold, verify snapshot was destroyed. 4669 */ 4670 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE, 4671 FALSE, -1); 4672 if (error) { 4673 if (error == ENOSPC) { 4674 ztest_record_enospc("dmu_objset_snapshot"); 4675 goto out; 4676 } 4677 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4678 } 4679 4680 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE, 4681 B_TRUE, -1); 4682 if (error) 4683 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag); 4684 4685 error = dmu_objset_destroy(fullname, B_FALSE); 4686 if (error != EBUSY) { 4687 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d", 4688 fullname, error); 4689 } 4690 4691 error = dmu_objset_destroy(fullname, B_TRUE); 4692 if (error) { 4693 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d", 4694 fullname, error); 4695 } 4696 4697 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE); 4698 if (error) 4699 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag); 4700 4701 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT); 4702 4703out: 4704 (void) rw_unlock(&ztest_name_lock); 4705} 4706 4707/* 4708 * Inject random faults into the on-disk data. 4709 */ 4710/* ARGSUSED */ 4711void 4712ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4713{ 4714 ztest_shared_t *zs = ztest_shared; 4715 spa_t *spa = ztest_spa; 4716 int fd; 4717 uint64_t offset; 4718 uint64_t leaves; 4719 uint64_t bad = 0x1990c0ffeedecadeULL; 4720 uint64_t top, leaf; 4721 char path0[MAXPATHLEN]; 4722 char pathrand[MAXPATHLEN]; 4723 size_t fsize; 4724 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4725 int iters = 1000; 4726 int maxfaults; 4727 int mirror_save; 4728 vdev_t *vd0 = NULL; 4729 uint64_t guid0 = 0; 4730 boolean_t islog = B_FALSE; 4731 4732 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4733 maxfaults = MAXFAULTS(); 4734 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4735 mirror_save = zs->zs_mirrors; 4736 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4737 4738 ASSERT(leaves >= 1); 4739 4740 /* 4741 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4742 */ 4743 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4744 4745 if (ztest_random(2) == 0) { 4746 /* 4747 * Inject errors on a normal data device or slog device. 4748 */ 4749 top = ztest_random_vdev_top(spa, B_TRUE); 4750 leaf = ztest_random(leaves) + zs->zs_splits; 4751 4752 /* 4753 * Generate paths to the first leaf in this top-level vdev, 4754 * and to the random leaf we selected. We'll induce transient 4755 * write failures and random online/offline activity on leaf 0, 4756 * and we'll write random garbage to the randomly chosen leaf. 4757 */ 4758 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4759 ztest_opts.zo_dir, ztest_opts.zo_pool, 4760 top * leaves + zs->zs_splits); 4761 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4762 ztest_opts.zo_dir, ztest_opts.zo_pool, 4763 top * leaves + leaf); 4764 4765 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4766 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4767 islog = B_TRUE; 4768 4769 if (vd0 != NULL && maxfaults != 1) { 4770 /* 4771 * Make vd0 explicitly claim to be unreadable, 4772 * or unwriteable, or reach behind its back 4773 * and close the underlying fd. We can do this if 4774 * maxfaults == 0 because we'll fail and reexecute, 4775 * and we can do it if maxfaults >= 2 because we'll 4776 * have enough redundancy. If maxfaults == 1, the 4777 * combination of this with injection of random data 4778 * corruption below exceeds the pool's fault tolerance. 4779 */ 4780 vdev_file_t *vf = vd0->vdev_tsd; 4781 4782 if (vf != NULL && ztest_random(3) == 0) { 4783 (void) close(vf->vf_vnode->v_fd); 4784 vf->vf_vnode->v_fd = -1; 4785 } else if (ztest_random(2) == 0) { 4786 vd0->vdev_cant_read = B_TRUE; 4787 } else { 4788 vd0->vdev_cant_write = B_TRUE; 4789 } 4790 guid0 = vd0->vdev_guid; 4791 } 4792 } else { 4793 /* 4794 * Inject errors on an l2cache device. 4795 */ 4796 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4797 4798 if (sav->sav_count == 0) { 4799 spa_config_exit(spa, SCL_STATE, FTAG); 4800 return; 4801 } 4802 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4803 guid0 = vd0->vdev_guid; 4804 (void) strcpy(path0, vd0->vdev_path); 4805 (void) strcpy(pathrand, vd0->vdev_path); 4806 4807 leaf = 0; 4808 leaves = 1; 4809 maxfaults = INT_MAX; /* no limit on cache devices */ 4810 } 4811 4812 spa_config_exit(spa, SCL_STATE, FTAG); 4813 4814 /* 4815 * If we can tolerate two or more faults, or we're dealing 4816 * with a slog, randomly online/offline vd0. 4817 */ 4818 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4819 if (ztest_random(10) < 6) { 4820 int flags = (ztest_random(2) == 0 ? 4821 ZFS_OFFLINE_TEMPORARY : 0); 4822 4823 /* 4824 * We have to grab the zs_name_lock as writer to 4825 * prevent a race between offlining a slog and 4826 * destroying a dataset. Offlining the slog will 4827 * grab a reference on the dataset which may cause 4828 * dmu_objset_destroy() to fail with EBUSY thus 4829 * leaving the dataset in an inconsistent state. 4830 */ 4831 if (islog) 4832 (void) rw_wrlock(&ztest_name_lock); 4833 4834 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4835 4836 if (islog) 4837 (void) rw_unlock(&ztest_name_lock); 4838 } else { 4839 (void) vdev_online(spa, guid0, 0, NULL); 4840 } 4841 } 4842 4843 if (maxfaults == 0) 4844 return; 4845 4846 /* 4847 * We have at least single-fault tolerance, so inject data corruption. 4848 */ 4849 fd = open(pathrand, O_RDWR); 4850 4851 if (fd == -1) /* we hit a gap in the device namespace */ 4852 return; 4853 4854 fsize = lseek(fd, 0, SEEK_END); 4855 4856 while (--iters != 0) { 4857 offset = ztest_random(fsize / (leaves << bshift)) * 4858 (leaves << bshift) + (leaf << bshift) + 4859 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4860 4861 if (offset >= fsize) 4862 continue; 4863 4864 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4865 if (mirror_save != zs->zs_mirrors) { 4866 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4867 (void) close(fd); 4868 return; 4869 } 4870 4871 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4872 fatal(1, "can't inject bad word at 0x%llx in %s", 4873 offset, pathrand); 4874 4875 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4876 4877 if (ztest_opts.zo_verbose >= 7) 4878 (void) printf("injected bad word into %s," 4879 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4880 } 4881 4882 (void) close(fd); 4883} 4884 4885/* 4886 * Verify that DDT repair works as expected. 4887 */ 4888void 4889ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4890{ 4891 ztest_shared_t *zs = ztest_shared; 4892 spa_t *spa = ztest_spa; 4893 objset_t *os = zd->zd_os; 4894 ztest_od_t od[1]; 4895 uint64_t object, blocksize, txg, pattern, psize; 4896 enum zio_checksum checksum = spa_dedup_checksum(spa); 4897 dmu_buf_t *db; 4898 dmu_tx_t *tx; 4899 void *buf; 4900 blkptr_t blk; 4901 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4902 4903 blocksize = ztest_random_blocksize(); 4904 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4905 4906 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4907 4908 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4909 return; 4910 4911 /* 4912 * Take the name lock as writer to prevent anyone else from changing 4913 * the pool and dataset properies we need to maintain during this test. 4914 */ 4915 (void) rw_wrlock(&ztest_name_lock); 4916 4917 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4918 B_FALSE) != 0 || 4919 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4920 B_FALSE) != 0) { 4921 (void) rw_unlock(&ztest_name_lock); 4922 return; 4923 } 4924 4925 object = od[0].od_object; 4926 blocksize = od[0].od_blocksize; 4927 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 4928 4929 ASSERT(object != 0); 4930 4931 tx = dmu_tx_create(os); 4932 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 4933 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 4934 if (txg == 0) { 4935 (void) rw_unlock(&ztest_name_lock); 4936 return; 4937 } 4938 4939 /* 4940 * Write all the copies of our block. 4941 */ 4942 for (int i = 0; i < copies; i++) { 4943 uint64_t offset = i * blocksize; 4944 VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &db, 4945 DMU_READ_NO_PREFETCH)); 4946 ASSERT(db->db_offset == offset); 4947 ASSERT(db->db_size == blocksize); 4948 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 4949 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 4950 dmu_buf_will_fill(db, tx); 4951 ztest_pattern_set(db->db_data, db->db_size, pattern); 4952 dmu_buf_rele(db, FTAG); 4953 } 4954 4955 dmu_tx_commit(tx); 4956 txg_wait_synced(spa_get_dsl(spa), txg); 4957 4958 /* 4959 * Find out what block we got. 4960 */ 4961 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 4962 DMU_READ_NO_PREFETCH)); 4963 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 4964 dmu_buf_rele(db, FTAG); 4965 4966 /* 4967 * Damage the block. Dedup-ditto will save us when we read it later. 4968 */ 4969 psize = BP_GET_PSIZE(&blk); 4970 buf = zio_buf_alloc(psize); 4971 ztest_pattern_set(buf, psize, ~pattern); 4972 4973 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 4974 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 4975 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 4976 4977 zio_buf_free(buf, psize); 4978 4979 (void) rw_unlock(&ztest_name_lock); 4980} 4981 4982/* 4983 * Scrub the pool. 4984 */ 4985/* ARGSUSED */ 4986void 4987ztest_scrub(ztest_ds_t *zd, uint64_t id) 4988{ 4989 spa_t *spa = ztest_spa; 4990 4991 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4992 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 4993 (void) spa_scan(spa, POOL_SCAN_SCRUB); 4994} 4995 4996/* 4997 * Change the guid for the pool. 4998 */ 4999/* ARGSUSED */ 5000void 5001ztest_reguid(ztest_ds_t *zd, uint64_t id) 5002{ 5003 spa_t *spa = ztest_spa; 5004 uint64_t orig, load; 5005 int error; 5006 5007 orig = spa_guid(spa); 5008 load = spa_load_guid(spa); 5009 5010 (void) rw_wrlock(&ztest_name_lock); 5011 error = spa_change_guid(spa); 5012 (void) rw_unlock(&ztest_name_lock); 5013 5014 if (error != 0) 5015 return; 5016 5017 if (ztest_opts.zo_verbose >= 4) { 5018 (void) printf("Changed guid old %llu -> %llu\n", 5019 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5020 } 5021 5022 VERIFY3U(orig, !=, spa_guid(spa)); 5023 VERIFY3U(load, ==, spa_load_guid(spa)); 5024} 5025 5026/* 5027 * Rename the pool to a different name and then rename it back. 5028 */ 5029/* ARGSUSED */ 5030void 5031ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5032{ 5033 char *oldname, *newname; 5034 spa_t *spa; 5035 5036 (void) rw_wrlock(&ztest_name_lock); 5037 5038 oldname = ztest_opts.zo_pool; 5039 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5040 (void) strcpy(newname, oldname); 5041 (void) strcat(newname, "_tmp"); 5042 5043 /* 5044 * Do the rename 5045 */ 5046 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5047 5048 /* 5049 * Try to open it under the old name, which shouldn't exist 5050 */ 5051 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5052 5053 /* 5054 * Open it under the new name and make sure it's still the same spa_t. 5055 */ 5056 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5057 5058 ASSERT(spa == ztest_spa); 5059 spa_close(spa, FTAG); 5060 5061 /* 5062 * Rename it back to the original 5063 */ 5064 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5065 5066 /* 5067 * Make sure it can still be opened 5068 */ 5069 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5070 5071 ASSERT(spa == ztest_spa); 5072 spa_close(spa, FTAG); 5073 5074 umem_free(newname, strlen(newname) + 1); 5075 5076 (void) rw_unlock(&ztest_name_lock); 5077} 5078 5079/* 5080 * Verify pool integrity by running zdb. 5081 */ 5082static void 5083ztest_run_zdb(char *pool) 5084{ 5085 int status; 5086 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5087 char zbuf[1024]; 5088 char *bin; 5089 char *ztest; 5090 char *isa; 5091 int isalen; 5092 FILE *fp; 5093 5094 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5095 5096 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5097 bin = strstr(zdb, "/usr/bin/"); 5098 ztest = strstr(bin, "/ztest"); 5099 isa = bin + 8; 5100 isalen = ztest - isa; 5101 isa = strdup(isa); 5102 /* LINTED */ 5103 (void) sprintf(bin, 5104 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s", 5105 isalen, 5106 isa, 5107 ztest_opts.zo_verbose >= 3 ? "s" : "", 5108 ztest_opts.zo_verbose >= 4 ? "v" : "", 5109 spa_config_path, 5110 pool); 5111 free(isa); 5112 5113 if (ztest_opts.zo_verbose >= 5) 5114 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5115 5116 fp = popen(zdb, "r"); 5117 assert(fp != NULL); 5118 5119 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5120 if (ztest_opts.zo_verbose >= 3) 5121 (void) printf("%s", zbuf); 5122 5123 status = pclose(fp); 5124 5125 if (status == 0) 5126 return; 5127 5128 ztest_dump_core = 0; 5129 if (WIFEXITED(status)) 5130 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5131 else 5132 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5133} 5134 5135static void 5136ztest_walk_pool_directory(char *header) 5137{ 5138 spa_t *spa = NULL; 5139 5140 if (ztest_opts.zo_verbose >= 6) 5141 (void) printf("%s\n", header); 5142 5143 mutex_enter(&spa_namespace_lock); 5144 while ((spa = spa_next(spa)) != NULL) 5145 if (ztest_opts.zo_verbose >= 6) 5146 (void) printf("\t%s\n", spa_name(spa)); 5147 mutex_exit(&spa_namespace_lock); 5148} 5149 5150static void 5151ztest_spa_import_export(char *oldname, char *newname) 5152{ 5153 nvlist_t *config, *newconfig; 5154 uint64_t pool_guid; 5155 spa_t *spa; 5156 5157 if (ztest_opts.zo_verbose >= 4) { 5158 (void) printf("import/export: old = %s, new = %s\n", 5159 oldname, newname); 5160 } 5161 5162 /* 5163 * Clean up from previous runs. 5164 */ 5165 (void) spa_destroy(newname); 5166 5167 /* 5168 * Get the pool's configuration and guid. 5169 */ 5170 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5171 5172 /* 5173 * Kick off a scrub to tickle scrub/export races. 5174 */ 5175 if (ztest_random(2) == 0) 5176 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5177 5178 pool_guid = spa_guid(spa); 5179 spa_close(spa, FTAG); 5180 5181 ztest_walk_pool_directory("pools before export"); 5182 5183 /* 5184 * Export it. 5185 */ 5186 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5187 5188 ztest_walk_pool_directory("pools after export"); 5189 5190 /* 5191 * Try to import it. 5192 */ 5193 newconfig = spa_tryimport(config); 5194 ASSERT(newconfig != NULL); 5195 nvlist_free(newconfig); 5196 5197 /* 5198 * Import it under the new name. 5199 */ 5200 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0)); 5201 5202 ztest_walk_pool_directory("pools after import"); 5203 5204 /* 5205 * Try to import it again -- should fail with EEXIST. 5206 */ 5207 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5208 5209 /* 5210 * Try to import it under a different name -- should fail with EEXIST. 5211 */ 5212 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5213 5214 /* 5215 * Verify that the pool is no longer visible under the old name. 5216 */ 5217 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5218 5219 /* 5220 * Verify that we can open and close the pool using the new name. 5221 */ 5222 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5223 ASSERT(pool_guid == spa_guid(spa)); 5224 spa_close(spa, FTAG); 5225 5226 nvlist_free(config); 5227} 5228 5229static void 5230ztest_resume(spa_t *spa) 5231{ 5232 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5233 (void) printf("resuming from suspended state\n"); 5234 spa_vdev_state_enter(spa, SCL_NONE); 5235 vdev_clear(spa, NULL); 5236 (void) spa_vdev_state_exit(spa, NULL, 0); 5237 (void) zio_resume(spa); 5238} 5239 5240static void * 5241ztest_resume_thread(void *arg) 5242{ 5243 spa_t *spa = arg; 5244 5245 while (!ztest_exiting) { 5246 if (spa_suspended(spa)) 5247 ztest_resume(spa); 5248 (void) poll(NULL, 0, 100); 5249 } 5250 return (NULL); 5251} 5252 5253static void * 5254ztest_deadman_thread(void *arg) 5255{ 5256 ztest_shared_t *zs = arg; 5257 int grace = 300; 5258 hrtime_t delta; 5259 5260 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace; 5261 5262 (void) poll(NULL, 0, (int)(1000 * delta)); 5263 5264 fatal(0, "failed to complete within %d seconds of deadline", grace); 5265 5266 return (NULL); 5267} 5268 5269static void 5270ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5271{ 5272 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5273 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5274 hrtime_t functime = gethrtime(); 5275 5276 for (int i = 0; i < zi->zi_iters; i++) 5277 zi->zi_func(zd, id); 5278 5279 functime = gethrtime() - functime; 5280 5281 atomic_add_64(&zc->zc_count, 1); 5282 atomic_add_64(&zc->zc_time, functime); 5283 5284 if (ztest_opts.zo_verbose >= 4) { 5285 Dl_info dli; 5286 (void) dladdr((void *)zi->zi_func, &dli); 5287 (void) printf("%6.2f sec in %s\n", 5288 (double)functime / NANOSEC, dli.dli_sname); 5289 } 5290} 5291 5292static void * 5293ztest_thread(void *arg) 5294{ 5295 int rand; 5296 uint64_t id = (uintptr_t)arg; 5297 ztest_shared_t *zs = ztest_shared; 5298 uint64_t call_next; 5299 hrtime_t now; 5300 ztest_info_t *zi; 5301 ztest_shared_callstate_t *zc; 5302 5303 while ((now = gethrtime()) < zs->zs_thread_stop) { 5304 /* 5305 * See if it's time to force a crash. 5306 */ 5307 if (now > zs->zs_thread_kill) 5308 ztest_kill(zs); 5309 5310 /* 5311 * If we're getting ENOSPC with some regularity, stop. 5312 */ 5313 if (zs->zs_enospc_count > 10) 5314 break; 5315 5316 /* 5317 * Pick a random function to execute. 5318 */ 5319 rand = ztest_random(ZTEST_FUNCS); 5320 zi = &ztest_info[rand]; 5321 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5322 call_next = zc->zc_next; 5323 5324 if (now >= call_next && 5325 atomic_cas_64(&zc->zc_next, call_next, call_next + 5326 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5327 ztest_execute(rand, zi, id); 5328 } 5329 } 5330 5331 return (NULL); 5332} 5333 5334static void 5335ztest_dataset_name(char *dsname, char *pool, int d) 5336{ 5337 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 5338} 5339 5340static void 5341ztest_dataset_destroy(int d) 5342{ 5343 char name[MAXNAMELEN]; 5344 5345 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5346 5347 if (ztest_opts.zo_verbose >= 3) 5348 (void) printf("Destroying %s to free up space\n", name); 5349 5350 /* 5351 * Cleanup any non-standard clones and snapshots. In general, 5352 * ztest thread t operates on dataset (t % zopt_datasets), 5353 * so there may be more than one thing to clean up. 5354 */ 5355 for (int t = d; t < ztest_opts.zo_threads; 5356 t += ztest_opts.zo_datasets) { 5357 ztest_dsl_dataset_cleanup(name, t); 5358 } 5359 5360 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5361 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5362} 5363 5364static void 5365ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5366{ 5367 uint64_t usedobjs, dirobjs, scratch; 5368 5369 /* 5370 * ZTEST_DIROBJ is the object directory for the entire dataset. 5371 * Therefore, the number of objects in use should equal the 5372 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5373 * If not, we have an object leak. 5374 * 5375 * Note that we can only check this in ztest_dataset_open(), 5376 * when the open-context and syncing-context values agree. 5377 * That's because zap_count() returns the open-context value, 5378 * while dmu_objset_space() returns the rootbp fill count. 5379 */ 5380 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5381 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5382 ASSERT3U(dirobjs + 1, ==, usedobjs); 5383} 5384 5385static int 5386ztest_dataset_open(int d) 5387{ 5388 ztest_ds_t *zd = &ztest_ds[d]; 5389 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5390 objset_t *os; 5391 zilog_t *zilog; 5392 char name[MAXNAMELEN]; 5393 int error; 5394 5395 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5396 5397 (void) rw_rdlock(&ztest_name_lock); 5398 5399 error = ztest_dataset_create(name); 5400 if (error == ENOSPC) { 5401 (void) rw_unlock(&ztest_name_lock); 5402 ztest_record_enospc(FTAG); 5403 return (error); 5404 } 5405 ASSERT(error == 0 || error == EEXIST); 5406 5407 VERIFY0(dmu_objset_hold(name, zd, &os)); 5408 (void) rw_unlock(&ztest_name_lock); 5409 5410 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5411 5412 zilog = zd->zd_zilog; 5413 5414 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5415 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5416 fatal(0, "missing log records: claimed %llu < committed %llu", 5417 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5418 5419 ztest_dataset_dirobj_verify(zd); 5420 5421 zil_replay(os, zd, ztest_replay_vector); 5422 5423 ztest_dataset_dirobj_verify(zd); 5424 5425 if (ztest_opts.zo_verbose >= 6) 5426 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5427 zd->zd_name, 5428 (u_longlong_t)zilog->zl_parse_blk_count, 5429 (u_longlong_t)zilog->zl_parse_lr_count, 5430 (u_longlong_t)zilog->zl_replaying_seq); 5431 5432 zilog = zil_open(os, ztest_get_data); 5433 5434 if (zilog->zl_replaying_seq != 0 && 5435 zilog->zl_replaying_seq < committed_seq) 5436 fatal(0, "missing log records: replayed %llu < committed %llu", 5437 zilog->zl_replaying_seq, committed_seq); 5438 5439 return (0); 5440} 5441 5442static void 5443ztest_dataset_close(int d) 5444{ 5445 ztest_ds_t *zd = &ztest_ds[d]; 5446 5447 zil_close(zd->zd_zilog); 5448 dmu_objset_rele(zd->zd_os, zd); 5449 5450 ztest_zd_fini(zd); 5451} 5452 5453/* 5454 * Kick off threads to run tests on all datasets in parallel. 5455 */ 5456static void 5457ztest_run(ztest_shared_t *zs) 5458{ 5459 thread_t *tid; 5460 spa_t *spa; 5461 objset_t *os; 5462 thread_t resume_tid; 5463 int error; 5464 5465 ztest_exiting = B_FALSE; 5466 5467 /* 5468 * Initialize parent/child shared state. 5469 */ 5470 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5471 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5472 5473 zs->zs_thread_start = gethrtime(); 5474 zs->zs_thread_stop = 5475 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5476 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5477 zs->zs_thread_kill = zs->zs_thread_stop; 5478 if (ztest_random(100) < ztest_opts.zo_killrate) { 5479 zs->zs_thread_kill -= 5480 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5481 } 5482 5483 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5484 5485 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5486 offsetof(ztest_cb_data_t, zcd_node)); 5487 5488 /* 5489 * Open our pool. 5490 */ 5491 kernel_init(FREAD | FWRITE); 5492 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0); 5493 spa->spa_debug = B_TRUE; 5494 ztest_spa = spa; 5495 5496 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os)); 5497 zs->zs_guid = dmu_objset_fsid_guid(os); 5498 dmu_objset_rele(os, FTAG); 5499 5500 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5501 5502 /* 5503 * We don't expect the pool to suspend unless maxfaults == 0, 5504 * in which case ztest_fault_inject() temporarily takes away 5505 * the only valid replica. 5506 */ 5507 if (MAXFAULTS() == 0) 5508 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5509 else 5510 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5511 5512 /* 5513 * Create a thread to periodically resume suspended I/O. 5514 */ 5515 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5516 &resume_tid) == 0); 5517 5518 /* 5519 * Create a deadman thread to abort() if we hang. 5520 */ 5521 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5522 NULL) == 0); 5523 5524 /* 5525 * Verify that we can safely inquire about about any object, 5526 * whether it's allocated or not. To make it interesting, 5527 * we probe a 5-wide window around each power of two. 5528 * This hits all edge cases, including zero and the max. 5529 */ 5530 for (int t = 0; t < 64; t++) { 5531 for (int d = -5; d <= 5; d++) { 5532 error = dmu_object_info(spa->spa_meta_objset, 5533 (1ULL << t) + d, NULL); 5534 ASSERT(error == 0 || error == ENOENT || 5535 error == EINVAL); 5536 } 5537 } 5538 5539 /* 5540 * If we got any ENOSPC errors on the previous run, destroy something. 5541 */ 5542 if (zs->zs_enospc_count != 0) { 5543 int d = ztest_random(ztest_opts.zo_datasets); 5544 ztest_dataset_destroy(d); 5545 } 5546 zs->zs_enospc_count = 0; 5547 5548 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5549 UMEM_NOFAIL); 5550 5551 if (ztest_opts.zo_verbose >= 4) 5552 (void) printf("starting main threads...\n"); 5553 5554 /* 5555 * Kick off all the tests that run in parallel. 5556 */ 5557 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5558 if (t < ztest_opts.zo_datasets && 5559 ztest_dataset_open(t) != 0) 5560 return; 5561 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5562 THR_BOUND, &tid[t]) == 0); 5563 } 5564 5565 /* 5566 * Wait for all of the tests to complete. We go in reverse order 5567 * so we don't close datasets while threads are still using them. 5568 */ 5569 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5570 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5571 if (t < ztest_opts.zo_datasets) 5572 ztest_dataset_close(t); 5573 } 5574 5575 txg_wait_synced(spa_get_dsl(spa), 0); 5576 5577 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5578 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5579 5580 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5581 5582 /* Kill the resume thread */ 5583 ztest_exiting = B_TRUE; 5584 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5585 ztest_resume(spa); 5586 5587 /* 5588 * Right before closing the pool, kick off a bunch of async I/O; 5589 * spa_close() should wait for it to complete. 5590 */ 5591 for (uint64_t object = 1; object < 50; object++) 5592 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5593 5594 spa_close(spa, FTAG); 5595 5596 /* 5597 * Verify that we can loop over all pools. 5598 */ 5599 mutex_enter(&spa_namespace_lock); 5600 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5601 if (ztest_opts.zo_verbose > 3) 5602 (void) printf("spa_next: found %s\n", spa_name(spa)); 5603 mutex_exit(&spa_namespace_lock); 5604 5605 /* 5606 * Verify that we can export the pool and reimport it under a 5607 * different name. 5608 */ 5609 if (ztest_random(2) == 0) { 5610 char name[MAXNAMELEN]; 5611 (void) snprintf(name, MAXNAMELEN, "%s_import", 5612 ztest_opts.zo_pool); 5613 ztest_spa_import_export(ztest_opts.zo_pool, name); 5614 ztest_spa_import_export(name, ztest_opts.zo_pool); 5615 } 5616 5617 kernel_fini(); 5618 5619 list_destroy(&zcl.zcl_callbacks); 5620 5621 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5622 5623 (void) rwlock_destroy(&ztest_name_lock); 5624 (void) _mutex_destroy(&ztest_vdev_lock); 5625} 5626 5627static void 5628ztest_freeze(void) 5629{ 5630 ztest_ds_t *zd = &ztest_ds[0]; 5631 spa_t *spa; 5632 int numloops = 0; 5633 5634 if (ztest_opts.zo_verbose >= 3) 5635 (void) printf("testing spa_freeze()...\n"); 5636 5637 kernel_init(FREAD | FWRITE); 5638 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5639 VERIFY3U(0, ==, ztest_dataset_open(0)); 5640 spa->spa_debug = B_TRUE; 5641 ztest_spa = spa; 5642 5643 /* 5644 * Force the first log block to be transactionally allocated. 5645 * We have to do this before we freeze the pool -- otherwise 5646 * the log chain won't be anchored. 5647 */ 5648 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5649 ztest_dmu_object_alloc_free(zd, 0); 5650 zil_commit(zd->zd_zilog, 0); 5651 } 5652 5653 txg_wait_synced(spa_get_dsl(spa), 0); 5654 5655 /* 5656 * Freeze the pool. This stops spa_sync() from doing anything, 5657 * so that the only way to record changes from now on is the ZIL. 5658 */ 5659 spa_freeze(spa); 5660 5661 /* 5662 * Run tests that generate log records but don't alter the pool config 5663 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5664 * We do a txg_wait_synced() after each iteration to force the txg 5665 * to increase well beyond the last synced value in the uberblock. 5666 * The ZIL should be OK with that. 5667 */ 5668 while (ztest_random(10) != 0 && 5669 numloops++ < ztest_opts.zo_maxloops) { 5670 ztest_dmu_write_parallel(zd, 0); 5671 ztest_dmu_object_alloc_free(zd, 0); 5672 txg_wait_synced(spa_get_dsl(spa), 0); 5673 } 5674 5675 /* 5676 * Commit all of the changes we just generated. 5677 */ 5678 zil_commit(zd->zd_zilog, 0); 5679 txg_wait_synced(spa_get_dsl(spa), 0); 5680 5681 /* 5682 * Close our dataset and close the pool. 5683 */ 5684 ztest_dataset_close(0); 5685 spa_close(spa, FTAG); 5686 kernel_fini(); 5687 5688 /* 5689 * Open and close the pool and dataset to induce log replay. 5690 */ 5691 kernel_init(FREAD | FWRITE); 5692 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5693 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5694 VERIFY3U(0, ==, ztest_dataset_open(0)); 5695 ztest_dataset_close(0); 5696 5697 spa->spa_debug = B_TRUE; 5698 ztest_spa = spa; 5699 txg_wait_synced(spa_get_dsl(spa), 0); 5700 ztest_reguid(NULL, 0); 5701 5702 spa_close(spa, FTAG); 5703 kernel_fini(); 5704} 5705 5706void 5707print_time(hrtime_t t, char *timebuf) 5708{ 5709 hrtime_t s = t / NANOSEC; 5710 hrtime_t m = s / 60; 5711 hrtime_t h = m / 60; 5712 hrtime_t d = h / 24; 5713 5714 s -= m * 60; 5715 m -= h * 60; 5716 h -= d * 24; 5717 5718 timebuf[0] = '\0'; 5719 5720 if (d) 5721 (void) sprintf(timebuf, 5722 "%llud%02lluh%02llum%02llus", d, h, m, s); 5723 else if (h) 5724 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5725 else if (m) 5726 (void) sprintf(timebuf, "%llum%02llus", m, s); 5727 else 5728 (void) sprintf(timebuf, "%llus", s); 5729} 5730 5731static nvlist_t * 5732make_random_props() 5733{ 5734 nvlist_t *props; 5735 5736 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5737 if (ztest_random(2) == 0) 5738 return (props); 5739 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5740 5741 return (props); 5742} 5743 5744/* 5745 * Create a storage pool with the given name and initial vdev size. 5746 * Then test spa_freeze() functionality. 5747 */ 5748static void 5749ztest_init(ztest_shared_t *zs) 5750{ 5751 spa_t *spa; 5752 nvlist_t *nvroot, *props; 5753 5754 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5755 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5756 5757 kernel_init(FREAD | FWRITE); 5758 5759 /* 5760 * Create the storage pool. 5761 */ 5762 (void) spa_destroy(ztest_opts.zo_pool); 5763 ztest_shared->zs_vdev_next_leaf = 0; 5764 zs->zs_splits = 0; 5765 zs->zs_mirrors = ztest_opts.zo_mirrors; 5766 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 5767 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5768 props = make_random_props(); 5769 for (int i = 0; i < SPA_FEATURES; i++) { 5770 char buf[1024]; 5771 (void) snprintf(buf, sizeof (buf), "feature@%s", 5772 spa_feature_table[i].fi_uname); 5773 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5774 } 5775 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, 5776 NULL, NULL)); 5777 nvlist_free(nvroot); 5778 5779 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5780 zs->zs_metaslab_sz = 5781 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5782 5783 spa_close(spa, FTAG); 5784 5785 kernel_fini(); 5786 5787 ztest_run_zdb(ztest_opts.zo_pool); 5788 5789 ztest_freeze(); 5790 5791 ztest_run_zdb(ztest_opts.zo_pool); 5792 5793 (void) rwlock_destroy(&ztest_name_lock); 5794 (void) _mutex_destroy(&ztest_vdev_lock); 5795} 5796 5797static void 5798setup_fds(void) 5799{ 5800 int fd; 5801#ifdef illumos 5802 5803 char *tmp = tempnam(NULL, NULL); 5804 fd = open(tmp, O_RDWR | O_CREAT, 0700); 5805 ASSERT3U(fd, ==, ZTEST_FD_DATA); 5806 (void) unlink(tmp); 5807 free(tmp); 5808#else 5809 char tmp[MAXPATHLEN]; 5810 5811 strlcpy(tmp, ztest_opts.zo_dir, MAXPATHLEN); 5812 strlcat(tmp, "/ztest.XXXXXX", MAXPATHLEN); 5813 fd = mkstemp(tmp); 5814 ASSERT3U(fd, ==, ZTEST_FD_DATA); 5815#endif 5816 5817 fd = open("/dev/urandom", O_RDONLY); 5818 ASSERT3U(fd, ==, ZTEST_FD_RAND); 5819} 5820 5821static int 5822shared_data_size(ztest_shared_hdr_t *hdr) 5823{ 5824 int size; 5825 5826 size = hdr->zh_hdr_size; 5827 size += hdr->zh_opts_size; 5828 size += hdr->zh_size; 5829 size += hdr->zh_stats_size * hdr->zh_stats_count; 5830 size += hdr->zh_ds_size * hdr->zh_ds_count; 5831 5832 return (size); 5833} 5834 5835static void 5836setup_hdr(void) 5837{ 5838 int size; 5839 ztest_shared_hdr_t *hdr; 5840 5841#ifndef illumos 5842 pwrite(ZTEST_FD_DATA, "", 1, 0); 5843#endif 5844 5845 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5846 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0); 5847 ASSERT(hdr != MAP_FAILED); 5848 5849 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, sizeof (ztest_shared_hdr_t))); 5850 5851 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5852 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5853 hdr->zh_size = sizeof (ztest_shared_t); 5854 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5855 hdr->zh_stats_count = ZTEST_FUNCS; 5856 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5857 hdr->zh_ds_count = ztest_opts.zo_datasets; 5858 5859 size = shared_data_size(hdr); 5860 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, size)); 5861 5862 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5863} 5864 5865static void 5866setup_data(void) 5867{ 5868 int size, offset; 5869 ztest_shared_hdr_t *hdr; 5870 uint8_t *buf; 5871 5872 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5873 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0); 5874 ASSERT(hdr != MAP_FAILED); 5875 5876 size = shared_data_size(hdr); 5877 5878 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5879 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 5880 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0); 5881 ASSERT(hdr != MAP_FAILED); 5882 buf = (uint8_t *)hdr; 5883 5884 offset = hdr->zh_hdr_size; 5885 ztest_shared_opts = (void *)&buf[offset]; 5886 offset += hdr->zh_opts_size; 5887 ztest_shared = (void *)&buf[offset]; 5888 offset += hdr->zh_size; 5889 ztest_shared_callstate = (void *)&buf[offset]; 5890 offset += hdr->zh_stats_size * hdr->zh_stats_count; 5891 ztest_shared_ds = (void *)&buf[offset]; 5892} 5893 5894static boolean_t 5895exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 5896{ 5897 pid_t pid; 5898 int status; 5899 char cmdbuf[MAXPATHLEN]; 5900 5901 pid = fork(); 5902 5903 if (cmd == NULL) { 5904 (void) strlcpy(cmdbuf, getexecname(), sizeof (cmdbuf)); 5905 cmd = cmdbuf; 5906 } 5907 5908 if (pid == -1) 5909 fatal(1, "fork failed"); 5910 5911 if (pid == 0) { /* child */ 5912 char *emptyargv[2] = { cmd, NULL }; 5913 5914 struct rlimit rl = { 1024, 1024 }; 5915 (void) setrlimit(RLIMIT_NOFILE, &rl); 5916 (void) enable_extended_FILE_stdio(-1, -1); 5917 if (libpath != NULL) 5918 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 5919#ifdef illumos 5920 (void) execv(cmd, emptyargv); 5921#else 5922 (void) execvp(cmd, emptyargv); 5923#endif 5924 ztest_dump_core = B_FALSE; 5925 fatal(B_TRUE, "exec failed: %s", cmd); 5926 } 5927 5928 while (waitpid(pid, &status, 0) != pid) 5929 continue; 5930 if (statusp != NULL) 5931 *statusp = status; 5932 5933 if (WIFEXITED(status)) { 5934 if (WEXITSTATUS(status) != 0) { 5935 (void) fprintf(stderr, "child exited with code %d\n", 5936 WEXITSTATUS(status)); 5937 exit(2); 5938 } 5939 return (B_FALSE); 5940 } else if (WIFSIGNALED(status)) { 5941 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 5942 (void) fprintf(stderr, "child died with signal %d\n", 5943 WTERMSIG(status)); 5944 exit(3); 5945 } 5946 return (B_TRUE); 5947 } else { 5948 (void) fprintf(stderr, "something strange happened to child\n"); 5949 exit(4); 5950 /* NOTREACHED */ 5951 } 5952} 5953 5954static void 5955ztest_run_init(void) 5956{ 5957 ztest_shared_t *zs = ztest_shared; 5958 5959 ASSERT(ztest_opts.zo_init != 0); 5960 5961 /* 5962 * Blow away any existing copy of zpool.cache 5963 */ 5964 (void) remove(spa_config_path); 5965 5966 /* 5967 * Create and initialize our storage pool. 5968 */ 5969 for (int i = 1; i <= ztest_opts.zo_init; i++) { 5970 bzero(zs, sizeof (ztest_shared_t)); 5971 if (ztest_opts.zo_verbose >= 3 && 5972 ztest_opts.zo_init != 1) { 5973 (void) printf("ztest_init(), pass %d\n", i); 5974 } 5975 ztest_init(zs); 5976 } 5977} 5978 5979int 5980main(int argc, char **argv) 5981{ 5982 int kills = 0; 5983 int iters = 0; 5984 int older = 0; 5985 int newer = 0; 5986 ztest_shared_t *zs; 5987 ztest_info_t *zi; 5988 ztest_shared_callstate_t *zc; 5989 char timebuf[100]; 5990 char numbuf[6]; 5991 spa_t *spa; 5992 char cmd[MAXNAMELEN]; 5993 boolean_t hasalt; 5994 5995 boolean_t ischild = (0 == lseek(ZTEST_FD_DATA, 0, SEEK_CUR)); 5996 ASSERT(ischild || errno == EBADF); 5997 5998 (void) setvbuf(stdout, NULL, _IOLBF, 0); 5999 6000 dprintf_setup(&argc, argv); 6001 6002 if (!ischild) { 6003 process_options(argc, argv); 6004 6005 setup_fds(); 6006 setup_hdr(); 6007 setup_data(); 6008 bcopy(&ztest_opts, ztest_shared_opts, 6009 sizeof (*ztest_shared_opts)); 6010 } else { 6011 setup_data(); 6012 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6013 } 6014 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6015 6016 /* Override location of zpool.cache */ 6017 (void) asprintf((char **)&spa_config_path, "%s/zpool.cache", 6018 ztest_opts.zo_dir); 6019 6020 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6021 UMEM_NOFAIL); 6022 zs = ztest_shared; 6023 6024 if (ischild) { 6025 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 6026 metaslab_df_alloc_threshold = 6027 zs->zs_metaslab_df_alloc_threshold; 6028 6029 if (zs->zs_do_init) 6030 ztest_run_init(); 6031 else 6032 ztest_run(zs); 6033 exit(0); 6034 } 6035 6036 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6037 6038 if (ztest_opts.zo_verbose >= 1) { 6039 (void) printf("%llu vdevs, %d datasets, %d threads," 6040 " %llu seconds...\n", 6041 (u_longlong_t)ztest_opts.zo_vdevs, 6042 ztest_opts.zo_datasets, 6043 ztest_opts.zo_threads, 6044 (u_longlong_t)ztest_opts.zo_time); 6045 } 6046 6047 (void) strlcpy(cmd, getexecname(), sizeof (cmd)); 6048 6049 zs->zs_do_init = B_TRUE; 6050 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6051 if (ztest_opts.zo_verbose >= 1) { 6052 (void) printf("Executing older ztest for " 6053 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6054 } 6055 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6056 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6057 } else { 6058 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6059 } 6060 zs->zs_do_init = B_FALSE; 6061 6062 zs->zs_proc_start = gethrtime(); 6063 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6064 6065 for (int f = 0; f < ZTEST_FUNCS; f++) { 6066 zi = &ztest_info[f]; 6067 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6068 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6069 zc->zc_next = UINT64_MAX; 6070 else 6071 zc->zc_next = zs->zs_proc_start + 6072 ztest_random(2 * zi->zi_interval[0] + 1); 6073 } 6074 6075 /* 6076 * Run the tests in a loop. These tests include fault injection 6077 * to verify that self-healing data works, and forced crashes 6078 * to verify that we never lose on-disk consistency. 6079 */ 6080 while (gethrtime() < zs->zs_proc_stop) { 6081 int status; 6082 boolean_t killed; 6083 6084 /* 6085 * Initialize the workload counters for each function. 6086 */ 6087 for (int f = 0; f < ZTEST_FUNCS; f++) { 6088 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6089 zc->zc_count = 0; 6090 zc->zc_time = 0; 6091 } 6092 6093 /* Set the allocation switch size */ 6094 zs->zs_metaslab_df_alloc_threshold = 6095 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6096 6097 if (!hasalt || ztest_random(2) == 0) { 6098 if (hasalt && ztest_opts.zo_verbose >= 1) { 6099 (void) printf("Executing newer ztest: %s\n", 6100 cmd); 6101 } 6102 newer++; 6103 killed = exec_child(cmd, NULL, B_TRUE, &status); 6104 } else { 6105 if (hasalt && ztest_opts.zo_verbose >= 1) { 6106 (void) printf("Executing older ztest: %s\n", 6107 ztest_opts.zo_alt_ztest); 6108 } 6109 older++; 6110 killed = exec_child(ztest_opts.zo_alt_ztest, 6111 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6112 } 6113 6114 if (killed) 6115 kills++; 6116 iters++; 6117 6118 if (ztest_opts.zo_verbose >= 1) { 6119 hrtime_t now = gethrtime(); 6120 6121 now = MIN(now, zs->zs_proc_stop); 6122 print_time(zs->zs_proc_stop - now, timebuf); 6123 nicenum(zs->zs_space, numbuf); 6124 6125 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6126 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6127 iters, 6128 WIFEXITED(status) ? "Complete" : "SIGKILL", 6129 (u_longlong_t)zs->zs_enospc_count, 6130 100.0 * zs->zs_alloc / zs->zs_space, 6131 numbuf, 6132 100.0 * (now - zs->zs_proc_start) / 6133 (ztest_opts.zo_time * NANOSEC), timebuf); 6134 } 6135 6136 if (ztest_opts.zo_verbose >= 2) { 6137 (void) printf("\nWorkload summary:\n\n"); 6138 (void) printf("%7s %9s %s\n", 6139 "Calls", "Time", "Function"); 6140 (void) printf("%7s %9s %s\n", 6141 "-----", "----", "--------"); 6142 for (int f = 0; f < ZTEST_FUNCS; f++) { 6143 Dl_info dli; 6144 6145 zi = &ztest_info[f]; 6146 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6147 print_time(zc->zc_time, timebuf); 6148 (void) dladdr((void *)zi->zi_func, &dli); 6149 (void) printf("%7llu %9s %s\n", 6150 (u_longlong_t)zc->zc_count, timebuf, 6151 dli.dli_sname); 6152 } 6153 (void) printf("\n"); 6154 } 6155 6156 /* 6157 * It's possible that we killed a child during a rename test, 6158 * in which case we'll have a 'ztest_tmp' pool lying around 6159 * instead of 'ztest'. Do a blind rename in case this happened. 6160 */ 6161 kernel_init(FREAD); 6162 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6163 spa_close(spa, FTAG); 6164 } else { 6165 char tmpname[MAXNAMELEN]; 6166 kernel_fini(); 6167 kernel_init(FREAD | FWRITE); 6168 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6169 ztest_opts.zo_pool); 6170 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6171 } 6172 kernel_fini(); 6173 6174 ztest_run_zdb(ztest_opts.zo_pool); 6175 } 6176 6177 if (ztest_opts.zo_verbose >= 1) { 6178 if (hasalt) { 6179 (void) printf("%d runs of older ztest: %s\n", older, 6180 ztest_opts.zo_alt_ztest); 6181 (void) printf("%d runs of newer ztest: %s\n", newer, 6182 cmd); 6183 } 6184 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6185 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6186 } 6187 6188 return (0); 6189} 6190