ztest.c revision 252760
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 */ 27 28/* 29 * The objective of this program is to provide a DMU/ZAP/SPA stress test 30 * that runs entirely in userland, is easy to use, and easy to extend. 31 * 32 * The overall design of the ztest program is as follows: 33 * 34 * (1) For each major functional area (e.g. adding vdevs to a pool, 35 * creating and destroying datasets, reading and writing objects, etc) 36 * we have a simple routine to test that functionality. These 37 * individual routines do not have to do anything "stressful". 38 * 39 * (2) We turn these simple functionality tests into a stress test by 40 * running them all in parallel, with as many threads as desired, 41 * and spread across as many datasets, objects, and vdevs as desired. 42 * 43 * (3) While all this is happening, we inject faults into the pool to 44 * verify that self-healing data really works. 45 * 46 * (4) Every time we open a dataset, we change its checksum and compression 47 * functions. Thus even individual objects vary from block to block 48 * in which checksum they use and whether they're compressed. 49 * 50 * (5) To verify that we never lose on-disk consistency after a crash, 51 * we run the entire test in a child of the main process. 52 * At random times, the child self-immolates with a SIGKILL. 53 * This is the software equivalent of pulling the power cord. 54 * The parent then runs the test again, using the existing 55 * storage pool, as many times as desired. If backwards compatability 56 * testing is enabled ztest will sometimes run the "older" version 57 * of ztest after a SIGKILL. 58 * 59 * (6) To verify that we don't have future leaks or temporal incursions, 60 * many of the functional tests record the transaction group number 61 * as part of their data. When reading old data, they verify that 62 * the transaction group number is less than the current, open txg. 63 * If you add a new test, please do this if applicable. 64 * 65 * When run with no arguments, ztest runs for about five minutes and 66 * produces no output if successful. To get a little bit of information, 67 * specify -V. To get more information, specify -VV, and so on. 68 * 69 * To turn this into an overnight stress test, use -T to specify run time. 70 * 71 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 72 * to increase the pool capacity, fanout, and overall stress level. 73 * 74 * Use the -k option to set the desired frequency of kills. 75 * 76 * When ztest invokes itself it passes all relevant information through a 77 * temporary file which is mmap-ed in the child process. This allows shared 78 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 79 * stored at offset 0 of this file and contains information on the size and 80 * number of shared structures in the file. The information stored in this file 81 * must remain backwards compatible with older versions of ztest so that 82 * ztest can invoke them during backwards compatibility testing (-B). 83 */ 84 85#include <sys/zfs_context.h> 86#include <sys/spa.h> 87#include <sys/dmu.h> 88#include <sys/txg.h> 89#include <sys/dbuf.h> 90#include <sys/zap.h> 91#include <sys/dmu_objset.h> 92#include <sys/poll.h> 93#include <sys/stat.h> 94#include <sys/time.h> 95#include <sys/wait.h> 96#include <sys/mman.h> 97#include <sys/resource.h> 98#include <sys/zio.h> 99#include <sys/zil.h> 100#include <sys/zil_impl.h> 101#include <sys/vdev_impl.h> 102#include <sys/vdev_file.h> 103#include <sys/spa_impl.h> 104#include <sys/metaslab_impl.h> 105#include <sys/dsl_prop.h> 106#include <sys/dsl_dataset.h> 107#include <sys/dsl_destroy.h> 108#include <sys/dsl_scan.h> 109#include <sys/zio_checksum.h> 110#include <sys/refcount.h> 111#include <sys/zfeature.h> 112#include <sys/dsl_userhold.h> 113#include <stdio.h> 114#include <stdio_ext.h> 115#include <stdlib.h> 116#include <unistd.h> 117#include <signal.h> 118#include <umem.h> 119#include <dlfcn.h> 120#include <ctype.h> 121#include <math.h> 122#include <errno.h> 123#include <sys/fs/zfs.h> 124#include <libnvpair.h> 125 126static int ztest_fd_data = -1; 127static int ztest_fd_rand = -1; 128 129typedef struct ztest_shared_hdr { 130 uint64_t zh_hdr_size; 131 uint64_t zh_opts_size; 132 uint64_t zh_size; 133 uint64_t zh_stats_size; 134 uint64_t zh_stats_count; 135 uint64_t zh_ds_size; 136 uint64_t zh_ds_count; 137} ztest_shared_hdr_t; 138 139static ztest_shared_hdr_t *ztest_shared_hdr; 140 141typedef struct ztest_shared_opts { 142 char zo_pool[MAXNAMELEN]; 143 char zo_dir[MAXNAMELEN]; 144 char zo_alt_ztest[MAXNAMELEN]; 145 char zo_alt_libpath[MAXNAMELEN]; 146 uint64_t zo_vdevs; 147 uint64_t zo_vdevtime; 148 size_t zo_vdev_size; 149 int zo_ashift; 150 int zo_mirrors; 151 int zo_raidz; 152 int zo_raidz_parity; 153 int zo_datasets; 154 int zo_threads; 155 uint64_t zo_passtime; 156 uint64_t zo_killrate; 157 int zo_verbose; 158 int zo_init; 159 uint64_t zo_time; 160 uint64_t zo_maxloops; 161 uint64_t zo_metaslab_gang_bang; 162} ztest_shared_opts_t; 163 164static const ztest_shared_opts_t ztest_opts_defaults = { 165 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 166 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 167 .zo_alt_ztest = { '\0' }, 168 .zo_alt_libpath = { '\0' }, 169 .zo_vdevs = 5, 170 .zo_ashift = SPA_MINBLOCKSHIFT, 171 .zo_mirrors = 2, 172 .zo_raidz = 4, 173 .zo_raidz_parity = 1, 174 .zo_vdev_size = SPA_MINDEVSIZE, 175 .zo_datasets = 7, 176 .zo_threads = 23, 177 .zo_passtime = 60, /* 60 seconds */ 178 .zo_killrate = 70, /* 70% kill rate */ 179 .zo_verbose = 0, 180 .zo_init = 1, 181 .zo_time = 300, /* 5 minutes */ 182 .zo_maxloops = 50, /* max loops during spa_freeze() */ 183 .zo_metaslab_gang_bang = 32 << 10 184}; 185 186extern uint64_t metaslab_gang_bang; 187extern uint64_t metaslab_df_alloc_threshold; 188 189static ztest_shared_opts_t *ztest_shared_opts; 190static ztest_shared_opts_t ztest_opts; 191 192typedef struct ztest_shared_ds { 193 uint64_t zd_seq; 194} ztest_shared_ds_t; 195 196static ztest_shared_ds_t *ztest_shared_ds; 197#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 198 199#define BT_MAGIC 0x123456789abcdefULL 200#define MAXFAULTS() \ 201 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 202 203enum ztest_io_type { 204 ZTEST_IO_WRITE_TAG, 205 ZTEST_IO_WRITE_PATTERN, 206 ZTEST_IO_WRITE_ZEROES, 207 ZTEST_IO_TRUNCATE, 208 ZTEST_IO_SETATTR, 209 ZTEST_IO_REWRITE, 210 ZTEST_IO_TYPES 211}; 212 213typedef struct ztest_block_tag { 214 uint64_t bt_magic; 215 uint64_t bt_objset; 216 uint64_t bt_object; 217 uint64_t bt_offset; 218 uint64_t bt_gen; 219 uint64_t bt_txg; 220 uint64_t bt_crtxg; 221} ztest_block_tag_t; 222 223typedef struct bufwad { 224 uint64_t bw_index; 225 uint64_t bw_txg; 226 uint64_t bw_data; 227} bufwad_t; 228 229/* 230 * XXX -- fix zfs range locks to be generic so we can use them here. 231 */ 232typedef enum { 233 RL_READER, 234 RL_WRITER, 235 RL_APPEND 236} rl_type_t; 237 238typedef struct rll { 239 void *rll_writer; 240 int rll_readers; 241 mutex_t rll_lock; 242 cond_t rll_cv; 243} rll_t; 244 245typedef struct rl { 246 uint64_t rl_object; 247 uint64_t rl_offset; 248 uint64_t rl_size; 249 rll_t *rl_lock; 250} rl_t; 251 252#define ZTEST_RANGE_LOCKS 64 253#define ZTEST_OBJECT_LOCKS 64 254 255/* 256 * Object descriptor. Used as a template for object lookup/create/remove. 257 */ 258typedef struct ztest_od { 259 uint64_t od_dir; 260 uint64_t od_object; 261 dmu_object_type_t od_type; 262 dmu_object_type_t od_crtype; 263 uint64_t od_blocksize; 264 uint64_t od_crblocksize; 265 uint64_t od_gen; 266 uint64_t od_crgen; 267 char od_name[MAXNAMELEN]; 268} ztest_od_t; 269 270/* 271 * Per-dataset state. 272 */ 273typedef struct ztest_ds { 274 ztest_shared_ds_t *zd_shared; 275 objset_t *zd_os; 276 rwlock_t zd_zilog_lock; 277 zilog_t *zd_zilog; 278 ztest_od_t *zd_od; /* debugging aid */ 279 char zd_name[MAXNAMELEN]; 280 mutex_t zd_dirobj_lock; 281 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 282 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 283} ztest_ds_t; 284 285/* 286 * Per-iteration state. 287 */ 288typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 289 290typedef struct ztest_info { 291 ztest_func_t *zi_func; /* test function */ 292 uint64_t zi_iters; /* iterations per execution */ 293 uint64_t *zi_interval; /* execute every <interval> seconds */ 294} ztest_info_t; 295 296typedef struct ztest_shared_callstate { 297 uint64_t zc_count; /* per-pass count */ 298 uint64_t zc_time; /* per-pass time */ 299 uint64_t zc_next; /* next time to call this function */ 300} ztest_shared_callstate_t; 301 302static ztest_shared_callstate_t *ztest_shared_callstate; 303#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 304 305/* 306 * Note: these aren't static because we want dladdr() to work. 307 */ 308ztest_func_t ztest_dmu_read_write; 309ztest_func_t ztest_dmu_write_parallel; 310ztest_func_t ztest_dmu_object_alloc_free; 311ztest_func_t ztest_dmu_commit_callbacks; 312ztest_func_t ztest_zap; 313ztest_func_t ztest_zap_parallel; 314ztest_func_t ztest_zil_commit; 315ztest_func_t ztest_zil_remount; 316ztest_func_t ztest_dmu_read_write_zcopy; 317ztest_func_t ztest_dmu_objset_create_destroy; 318ztest_func_t ztest_dmu_prealloc; 319ztest_func_t ztest_fzap; 320ztest_func_t ztest_dmu_snapshot_create_destroy; 321ztest_func_t ztest_dsl_prop_get_set; 322ztest_func_t ztest_spa_prop_get_set; 323ztest_func_t ztest_spa_create_destroy; 324ztest_func_t ztest_fault_inject; 325ztest_func_t ztest_ddt_repair; 326ztest_func_t ztest_dmu_snapshot_hold; 327ztest_func_t ztest_spa_rename; 328ztest_func_t ztest_scrub; 329ztest_func_t ztest_dsl_dataset_promote_busy; 330ztest_func_t ztest_vdev_attach_detach; 331ztest_func_t ztest_vdev_LUN_growth; 332ztest_func_t ztest_vdev_add_remove; 333ztest_func_t ztest_vdev_aux_add_remove; 334ztest_func_t ztest_split_pool; 335ztest_func_t ztest_reguid; 336ztest_func_t ztest_spa_upgrade; 337 338uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 339uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 340uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 341uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 342uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 343 344ztest_info_t ztest_info[] = { 345 { ztest_dmu_read_write, 1, &zopt_always }, 346 { ztest_dmu_write_parallel, 10, &zopt_always }, 347 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 348 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 349 { ztest_zap, 30, &zopt_always }, 350 { ztest_zap_parallel, 100, &zopt_always }, 351 { ztest_split_pool, 1, &zopt_always }, 352 { ztest_zil_commit, 1, &zopt_incessant }, 353 { ztest_zil_remount, 1, &zopt_sometimes }, 354 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 355 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 356 { ztest_dsl_prop_get_set, 1, &zopt_often }, 357 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 358#if 0 359 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 360#endif 361 { ztest_fzap, 1, &zopt_sometimes }, 362 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 363 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 364 { ztest_fault_inject, 1, &zopt_sometimes }, 365 { ztest_ddt_repair, 1, &zopt_sometimes }, 366 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 367 { ztest_reguid, 1, &zopt_sometimes }, 368 { ztest_spa_rename, 1, &zopt_rarely }, 369 { ztest_scrub, 1, &zopt_rarely }, 370 { ztest_spa_upgrade, 1, &zopt_rarely }, 371 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 372 { ztest_vdev_attach_detach, 1, &zopt_sometimes }, 373 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 374 { ztest_vdev_add_remove, 1, 375 &ztest_opts.zo_vdevtime }, 376 { ztest_vdev_aux_add_remove, 1, 377 &ztest_opts.zo_vdevtime }, 378}; 379 380#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 381 382/* 383 * The following struct is used to hold a list of uncalled commit callbacks. 384 * The callbacks are ordered by txg number. 385 */ 386typedef struct ztest_cb_list { 387 mutex_t zcl_callbacks_lock; 388 list_t zcl_callbacks; 389} ztest_cb_list_t; 390 391/* 392 * Stuff we need to share writably between parent and child. 393 */ 394typedef struct ztest_shared { 395 boolean_t zs_do_init; 396 hrtime_t zs_proc_start; 397 hrtime_t zs_proc_stop; 398 hrtime_t zs_thread_start; 399 hrtime_t zs_thread_stop; 400 hrtime_t zs_thread_kill; 401 uint64_t zs_enospc_count; 402 uint64_t zs_vdev_next_leaf; 403 uint64_t zs_vdev_aux; 404 uint64_t zs_alloc; 405 uint64_t zs_space; 406 uint64_t zs_splits; 407 uint64_t zs_mirrors; 408 uint64_t zs_metaslab_sz; 409 uint64_t zs_metaslab_df_alloc_threshold; 410 uint64_t zs_guid; 411} ztest_shared_t; 412 413#define ID_PARALLEL -1ULL 414 415static char ztest_dev_template[] = "%s/%s.%llua"; 416static char ztest_aux_template[] = "%s/%s.%s.%llu"; 417ztest_shared_t *ztest_shared; 418 419static spa_t *ztest_spa = NULL; 420static ztest_ds_t *ztest_ds; 421 422static mutex_t ztest_vdev_lock; 423 424/* 425 * The ztest_name_lock protects the pool and dataset namespace used by 426 * the individual tests. To modify the namespace, consumers must grab 427 * this lock as writer. Grabbing the lock as reader will ensure that the 428 * namespace does not change while the lock is held. 429 */ 430static rwlock_t ztest_name_lock; 431 432static boolean_t ztest_dump_core = B_TRUE; 433static boolean_t ztest_exiting; 434 435/* Global commit callback list */ 436static ztest_cb_list_t zcl; 437 438enum ztest_object { 439 ZTEST_META_DNODE = 0, 440 ZTEST_DIROBJ, 441 ZTEST_OBJECTS 442}; 443 444static void usage(boolean_t) __NORETURN; 445 446/* 447 * These libumem hooks provide a reasonable set of defaults for the allocator's 448 * debugging facilities. 449 */ 450const char * 451_umem_debug_init() 452{ 453 return ("default,verbose"); /* $UMEM_DEBUG setting */ 454} 455 456const char * 457_umem_logging_init(void) 458{ 459 return ("fail,contents"); /* $UMEM_LOGGING setting */ 460} 461 462#define FATAL_MSG_SZ 1024 463 464char *fatal_msg; 465 466static void 467fatal(int do_perror, char *message, ...) 468{ 469 va_list args; 470 int save_errno = errno; 471 char buf[FATAL_MSG_SZ]; 472 473 (void) fflush(stdout); 474 475 va_start(args, message); 476 (void) sprintf(buf, "ztest: "); 477 /* LINTED */ 478 (void) vsprintf(buf + strlen(buf), message, args); 479 va_end(args); 480 if (do_perror) { 481 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 482 ": %s", strerror(save_errno)); 483 } 484 (void) fprintf(stderr, "%s\n", buf); 485 fatal_msg = buf; /* to ease debugging */ 486 if (ztest_dump_core) 487 abort(); 488 exit(3); 489} 490 491static int 492str2shift(const char *buf) 493{ 494 const char *ends = "BKMGTPEZ"; 495 int i; 496 497 if (buf[0] == '\0') 498 return (0); 499 for (i = 0; i < strlen(ends); i++) { 500 if (toupper(buf[0]) == ends[i]) 501 break; 502 } 503 if (i == strlen(ends)) { 504 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 505 buf); 506 usage(B_FALSE); 507 } 508 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 509 return (10*i); 510 } 511 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 512 usage(B_FALSE); 513 /* NOTREACHED */ 514} 515 516static uint64_t 517nicenumtoull(const char *buf) 518{ 519 char *end; 520 uint64_t val; 521 522 val = strtoull(buf, &end, 0); 523 if (end == buf) { 524 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 525 usage(B_FALSE); 526 } else if (end[0] == '.') { 527 double fval = strtod(buf, &end); 528 fval *= pow(2, str2shift(end)); 529 if (fval > UINT64_MAX) { 530 (void) fprintf(stderr, "ztest: value too large: %s\n", 531 buf); 532 usage(B_FALSE); 533 } 534 val = (uint64_t)fval; 535 } else { 536 int shift = str2shift(end); 537 if (shift >= 64 || (val << shift) >> shift != val) { 538 (void) fprintf(stderr, "ztest: value too large: %s\n", 539 buf); 540 usage(B_FALSE); 541 } 542 val <<= shift; 543 } 544 return (val); 545} 546 547static void 548usage(boolean_t requested) 549{ 550 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 551 552 char nice_vdev_size[10]; 553 char nice_gang_bang[10]; 554 FILE *fp = requested ? stdout : stderr; 555 556 nicenum(zo->zo_vdev_size, nice_vdev_size); 557 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 558 559 (void) fprintf(fp, "Usage: %s\n" 560 "\t[-v vdevs (default: %llu)]\n" 561 "\t[-s size_of_each_vdev (default: %s)]\n" 562 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 563 "\t[-m mirror_copies (default: %d)]\n" 564 "\t[-r raidz_disks (default: %d)]\n" 565 "\t[-R raidz_parity (default: %d)]\n" 566 "\t[-d datasets (default: %d)]\n" 567 "\t[-t threads (default: %d)]\n" 568 "\t[-g gang_block_threshold (default: %s)]\n" 569 "\t[-i init_count (default: %d)] initialize pool i times\n" 570 "\t[-k kill_percentage (default: %llu%%)]\n" 571 "\t[-p pool_name (default: %s)]\n" 572 "\t[-f dir (default: %s)] file directory for vdev files\n" 573 "\t[-V] verbose (use multiple times for ever more blather)\n" 574 "\t[-E] use existing pool instead of creating new one\n" 575 "\t[-T time (default: %llu sec)] total run time\n" 576 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 577 "\t[-P passtime (default: %llu sec)] time per pass\n" 578 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 579 "\t[-h] (print help)\n" 580 "", 581 zo->zo_pool, 582 (u_longlong_t)zo->zo_vdevs, /* -v */ 583 nice_vdev_size, /* -s */ 584 zo->zo_ashift, /* -a */ 585 zo->zo_mirrors, /* -m */ 586 zo->zo_raidz, /* -r */ 587 zo->zo_raidz_parity, /* -R */ 588 zo->zo_datasets, /* -d */ 589 zo->zo_threads, /* -t */ 590 nice_gang_bang, /* -g */ 591 zo->zo_init, /* -i */ 592 (u_longlong_t)zo->zo_killrate, /* -k */ 593 zo->zo_pool, /* -p */ 594 zo->zo_dir, /* -f */ 595 (u_longlong_t)zo->zo_time, /* -T */ 596 (u_longlong_t)zo->zo_maxloops, /* -F */ 597 (u_longlong_t)zo->zo_passtime); 598 exit(requested ? 0 : 1); 599} 600 601static void 602process_options(int argc, char **argv) 603{ 604 char *path; 605 ztest_shared_opts_t *zo = &ztest_opts; 606 607 int opt; 608 uint64_t value; 609 char altdir[MAXNAMELEN] = { 0 }; 610 611 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 612 613 while ((opt = getopt(argc, argv, 614 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 615 value = 0; 616 switch (opt) { 617 case 'v': 618 case 's': 619 case 'a': 620 case 'm': 621 case 'r': 622 case 'R': 623 case 'd': 624 case 't': 625 case 'g': 626 case 'i': 627 case 'k': 628 case 'T': 629 case 'P': 630 case 'F': 631 value = nicenumtoull(optarg); 632 } 633 switch (opt) { 634 case 'v': 635 zo->zo_vdevs = value; 636 break; 637 case 's': 638 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 639 break; 640 case 'a': 641 zo->zo_ashift = value; 642 break; 643 case 'm': 644 zo->zo_mirrors = value; 645 break; 646 case 'r': 647 zo->zo_raidz = MAX(1, value); 648 break; 649 case 'R': 650 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 651 break; 652 case 'd': 653 zo->zo_datasets = MAX(1, value); 654 break; 655 case 't': 656 zo->zo_threads = MAX(1, value); 657 break; 658 case 'g': 659 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 660 value); 661 break; 662 case 'i': 663 zo->zo_init = value; 664 break; 665 case 'k': 666 zo->zo_killrate = value; 667 break; 668 case 'p': 669 (void) strlcpy(zo->zo_pool, optarg, 670 sizeof (zo->zo_pool)); 671 break; 672 case 'f': 673 path = realpath(optarg, NULL); 674 if (path == NULL) { 675 (void) fprintf(stderr, "error: %s: %s\n", 676 optarg, strerror(errno)); 677 usage(B_FALSE); 678 } else { 679 (void) strlcpy(zo->zo_dir, path, 680 sizeof (zo->zo_dir)); 681 } 682 break; 683 case 'V': 684 zo->zo_verbose++; 685 break; 686 case 'E': 687 zo->zo_init = 0; 688 break; 689 case 'T': 690 zo->zo_time = value; 691 break; 692 case 'P': 693 zo->zo_passtime = MAX(1, value); 694 break; 695 case 'F': 696 zo->zo_maxloops = MAX(1, value); 697 break; 698 case 'B': 699 (void) strlcpy(altdir, optarg, sizeof (altdir)); 700 break; 701 case 'h': 702 usage(B_TRUE); 703 break; 704 case '?': 705 default: 706 usage(B_FALSE); 707 break; 708 } 709 } 710 711 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 712 713 zo->zo_vdevtime = 714 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 715 UINT64_MAX >> 2); 716 717 if (strlen(altdir) > 0) { 718 char *cmd; 719 char *realaltdir; 720 char *bin; 721 char *ztest; 722 char *isa; 723 int isalen; 724 725 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 726 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 727 728 VERIFY(NULL != realpath(getexecname(), cmd)); 729 if (0 != access(altdir, F_OK)) { 730 ztest_dump_core = B_FALSE; 731 fatal(B_TRUE, "invalid alternate ztest path: %s", 732 altdir); 733 } 734 VERIFY(NULL != realpath(altdir, realaltdir)); 735 736 /* 737 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 738 * We want to extract <isa> to determine if we should use 739 * 32 or 64 bit binaries. 740 */ 741 bin = strstr(cmd, "/usr/bin/"); 742 ztest = strstr(bin, "/ztest"); 743 isa = bin + 9; 744 isalen = ztest - isa; 745 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 746 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 747 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 748 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 749 750 if (0 != access(zo->zo_alt_ztest, X_OK)) { 751 ztest_dump_core = B_FALSE; 752 fatal(B_TRUE, "invalid alternate ztest: %s", 753 zo->zo_alt_ztest); 754 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 755 ztest_dump_core = B_FALSE; 756 fatal(B_TRUE, "invalid alternate lib directory %s", 757 zo->zo_alt_libpath); 758 } 759 760 umem_free(cmd, MAXPATHLEN); 761 umem_free(realaltdir, MAXPATHLEN); 762 } 763} 764 765static void 766ztest_kill(ztest_shared_t *zs) 767{ 768 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 769 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 770 (void) kill(getpid(), SIGKILL); 771} 772 773static uint64_t 774ztest_random(uint64_t range) 775{ 776 uint64_t r; 777 778 ASSERT3S(ztest_fd_rand, >=, 0); 779 780 if (range == 0) 781 return (0); 782 783 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 784 fatal(1, "short read from /dev/urandom"); 785 786 return (r % range); 787} 788 789/* ARGSUSED */ 790static void 791ztest_record_enospc(const char *s) 792{ 793 ztest_shared->zs_enospc_count++; 794} 795 796static uint64_t 797ztest_get_ashift(void) 798{ 799 if (ztest_opts.zo_ashift == 0) 800 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 801 return (ztest_opts.zo_ashift); 802} 803 804static nvlist_t * 805make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 806{ 807 char pathbuf[MAXPATHLEN]; 808 uint64_t vdev; 809 nvlist_t *file; 810 811 if (ashift == 0) 812 ashift = ztest_get_ashift(); 813 814 if (path == NULL) { 815 path = pathbuf; 816 817 if (aux != NULL) { 818 vdev = ztest_shared->zs_vdev_aux; 819 (void) snprintf(path, sizeof (pathbuf), 820 ztest_aux_template, ztest_opts.zo_dir, 821 pool == NULL ? ztest_opts.zo_pool : pool, 822 aux, vdev); 823 } else { 824 vdev = ztest_shared->zs_vdev_next_leaf++; 825 (void) snprintf(path, sizeof (pathbuf), 826 ztest_dev_template, ztest_opts.zo_dir, 827 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 828 } 829 } 830 831 if (size != 0) { 832 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 833 if (fd == -1) 834 fatal(1, "can't open %s", path); 835 if (ftruncate(fd, size) != 0) 836 fatal(1, "can't ftruncate %s", path); 837 (void) close(fd); 838 } 839 840 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 841 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 842 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 843 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 844 845 return (file); 846} 847 848static nvlist_t * 849make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 850 uint64_t ashift, int r) 851{ 852 nvlist_t *raidz, **child; 853 int c; 854 855 if (r < 2) 856 return (make_vdev_file(path, aux, pool, size, ashift)); 857 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 858 859 for (c = 0; c < r; c++) 860 child[c] = make_vdev_file(path, aux, pool, size, ashift); 861 862 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 863 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 864 VDEV_TYPE_RAIDZ) == 0); 865 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 866 ztest_opts.zo_raidz_parity) == 0); 867 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 868 child, r) == 0); 869 870 for (c = 0; c < r; c++) 871 nvlist_free(child[c]); 872 873 umem_free(child, r * sizeof (nvlist_t *)); 874 875 return (raidz); 876} 877 878static nvlist_t * 879make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 880 uint64_t ashift, int r, int m) 881{ 882 nvlist_t *mirror, **child; 883 int c; 884 885 if (m < 1) 886 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 887 888 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 889 890 for (c = 0; c < m; c++) 891 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 892 893 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 894 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 895 VDEV_TYPE_MIRROR) == 0); 896 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 897 child, m) == 0); 898 899 for (c = 0; c < m; c++) 900 nvlist_free(child[c]); 901 902 umem_free(child, m * sizeof (nvlist_t *)); 903 904 return (mirror); 905} 906 907static nvlist_t * 908make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 909 int log, int r, int m, int t) 910{ 911 nvlist_t *root, **child; 912 int c; 913 914 ASSERT(t > 0); 915 916 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 917 918 for (c = 0; c < t; c++) { 919 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 920 r, m); 921 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 922 log) == 0); 923 } 924 925 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 926 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 927 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 928 child, t) == 0); 929 930 for (c = 0; c < t; c++) 931 nvlist_free(child[c]); 932 933 umem_free(child, t * sizeof (nvlist_t *)); 934 935 return (root); 936} 937 938/* 939 * Find a random spa version. Returns back a random spa version in the 940 * range [initial_version, SPA_VERSION_FEATURES]. 941 */ 942static uint64_t 943ztest_random_spa_version(uint64_t initial_version) 944{ 945 uint64_t version = initial_version; 946 947 if (version <= SPA_VERSION_BEFORE_FEATURES) { 948 version = version + 949 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 950 } 951 952 if (version > SPA_VERSION_BEFORE_FEATURES) 953 version = SPA_VERSION_FEATURES; 954 955 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 956 return (version); 957} 958 959static int 960ztest_random_blocksize(void) 961{ 962 return (1 << (SPA_MINBLOCKSHIFT + 963 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 964} 965 966static int 967ztest_random_ibshift(void) 968{ 969 return (DN_MIN_INDBLKSHIFT + 970 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 971} 972 973static uint64_t 974ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 975{ 976 uint64_t top; 977 vdev_t *rvd = spa->spa_root_vdev; 978 vdev_t *tvd; 979 980 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 981 982 do { 983 top = ztest_random(rvd->vdev_children); 984 tvd = rvd->vdev_child[top]; 985 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 986 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 987 988 return (top); 989} 990 991static uint64_t 992ztest_random_dsl_prop(zfs_prop_t prop) 993{ 994 uint64_t value; 995 996 do { 997 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 998 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 999 1000 return (value); 1001} 1002 1003static int 1004ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1005 boolean_t inherit) 1006{ 1007 const char *propname = zfs_prop_to_name(prop); 1008 const char *valname; 1009 char setpoint[MAXPATHLEN]; 1010 uint64_t curval; 1011 int error; 1012 1013 error = dsl_prop_set_int(osname, propname, 1014 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1015 1016 if (error == ENOSPC) { 1017 ztest_record_enospc(FTAG); 1018 return (error); 1019 } 1020 ASSERT0(error); 1021 1022 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1023 1024 if (ztest_opts.zo_verbose >= 6) { 1025 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1026 (void) printf("%s %s = %s at '%s'\n", 1027 osname, propname, valname, setpoint); 1028 } 1029 1030 return (error); 1031} 1032 1033static int 1034ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1035{ 1036 spa_t *spa = ztest_spa; 1037 nvlist_t *props = NULL; 1038 int error; 1039 1040 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1041 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1042 1043 error = spa_prop_set(spa, props); 1044 1045 nvlist_free(props); 1046 1047 if (error == ENOSPC) { 1048 ztest_record_enospc(FTAG); 1049 return (error); 1050 } 1051 ASSERT0(error); 1052 1053 return (error); 1054} 1055 1056static void 1057ztest_rll_init(rll_t *rll) 1058{ 1059 rll->rll_writer = NULL; 1060 rll->rll_readers = 0; 1061 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1062 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1063} 1064 1065static void 1066ztest_rll_destroy(rll_t *rll) 1067{ 1068 ASSERT(rll->rll_writer == NULL); 1069 ASSERT(rll->rll_readers == 0); 1070 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1071 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1072} 1073 1074static void 1075ztest_rll_lock(rll_t *rll, rl_type_t type) 1076{ 1077 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1078 1079 if (type == RL_READER) { 1080 while (rll->rll_writer != NULL) 1081 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1082 rll->rll_readers++; 1083 } else { 1084 while (rll->rll_writer != NULL || rll->rll_readers) 1085 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1086 rll->rll_writer = curthread; 1087 } 1088 1089 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1090} 1091 1092static void 1093ztest_rll_unlock(rll_t *rll) 1094{ 1095 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1096 1097 if (rll->rll_writer) { 1098 ASSERT(rll->rll_readers == 0); 1099 rll->rll_writer = NULL; 1100 } else { 1101 ASSERT(rll->rll_readers != 0); 1102 ASSERT(rll->rll_writer == NULL); 1103 rll->rll_readers--; 1104 } 1105 1106 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1107 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1108 1109 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1110} 1111 1112static void 1113ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1114{ 1115 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1116 1117 ztest_rll_lock(rll, type); 1118} 1119 1120static void 1121ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1122{ 1123 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1124 1125 ztest_rll_unlock(rll); 1126} 1127 1128static rl_t * 1129ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1130 uint64_t size, rl_type_t type) 1131{ 1132 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1133 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1134 rl_t *rl; 1135 1136 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1137 rl->rl_object = object; 1138 rl->rl_offset = offset; 1139 rl->rl_size = size; 1140 rl->rl_lock = rll; 1141 1142 ztest_rll_lock(rll, type); 1143 1144 return (rl); 1145} 1146 1147static void 1148ztest_range_unlock(rl_t *rl) 1149{ 1150 rll_t *rll = rl->rl_lock; 1151 1152 ztest_rll_unlock(rll); 1153 1154 umem_free(rl, sizeof (*rl)); 1155} 1156 1157static void 1158ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1159{ 1160 zd->zd_os = os; 1161 zd->zd_zilog = dmu_objset_zil(os); 1162 zd->zd_shared = szd; 1163 dmu_objset_name(os, zd->zd_name); 1164 1165 if (zd->zd_shared != NULL) 1166 zd->zd_shared->zd_seq = 0; 1167 1168 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1169 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1170 1171 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1172 ztest_rll_init(&zd->zd_object_lock[l]); 1173 1174 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1175 ztest_rll_init(&zd->zd_range_lock[l]); 1176} 1177 1178static void 1179ztest_zd_fini(ztest_ds_t *zd) 1180{ 1181 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1182 1183 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1184 ztest_rll_destroy(&zd->zd_object_lock[l]); 1185 1186 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1187 ztest_rll_destroy(&zd->zd_range_lock[l]); 1188} 1189 1190#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1191 1192static uint64_t 1193ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1194{ 1195 uint64_t txg; 1196 int error; 1197 1198 /* 1199 * Attempt to assign tx to some transaction group. 1200 */ 1201 error = dmu_tx_assign(tx, txg_how); 1202 if (error) { 1203 if (error == ERESTART) { 1204 ASSERT(txg_how == TXG_NOWAIT); 1205 dmu_tx_wait(tx); 1206 } else { 1207 ASSERT3U(error, ==, ENOSPC); 1208 ztest_record_enospc(tag); 1209 } 1210 dmu_tx_abort(tx); 1211 return (0); 1212 } 1213 txg = dmu_tx_get_txg(tx); 1214 ASSERT(txg != 0); 1215 return (txg); 1216} 1217 1218static void 1219ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1220{ 1221 uint64_t *ip = buf; 1222 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1223 1224 while (ip < ip_end) 1225 *ip++ = value; 1226} 1227 1228static boolean_t 1229ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1230{ 1231 uint64_t *ip = buf; 1232 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1233 uint64_t diff = 0; 1234 1235 while (ip < ip_end) 1236 diff |= (value - *ip++); 1237 1238 return (diff == 0); 1239} 1240 1241static void 1242ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1243 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1244{ 1245 bt->bt_magic = BT_MAGIC; 1246 bt->bt_objset = dmu_objset_id(os); 1247 bt->bt_object = object; 1248 bt->bt_offset = offset; 1249 bt->bt_gen = gen; 1250 bt->bt_txg = txg; 1251 bt->bt_crtxg = crtxg; 1252} 1253 1254static void 1255ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1256 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1257{ 1258 ASSERT(bt->bt_magic == BT_MAGIC); 1259 ASSERT(bt->bt_objset == dmu_objset_id(os)); 1260 ASSERT(bt->bt_object == object); 1261 ASSERT(bt->bt_offset == offset); 1262 ASSERT(bt->bt_gen <= gen); 1263 ASSERT(bt->bt_txg <= txg); 1264 ASSERT(bt->bt_crtxg == crtxg); 1265} 1266 1267static ztest_block_tag_t * 1268ztest_bt_bonus(dmu_buf_t *db) 1269{ 1270 dmu_object_info_t doi; 1271 ztest_block_tag_t *bt; 1272 1273 dmu_object_info_from_db(db, &doi); 1274 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1275 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1276 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1277 1278 return (bt); 1279} 1280 1281/* 1282 * ZIL logging ops 1283 */ 1284 1285#define lrz_type lr_mode 1286#define lrz_blocksize lr_uid 1287#define lrz_ibshift lr_gid 1288#define lrz_bonustype lr_rdev 1289#define lrz_bonuslen lr_crtime[1] 1290 1291static void 1292ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1293{ 1294 char *name = (void *)(lr + 1); /* name follows lr */ 1295 size_t namesize = strlen(name) + 1; 1296 itx_t *itx; 1297 1298 if (zil_replaying(zd->zd_zilog, tx)) 1299 return; 1300 1301 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1302 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1303 sizeof (*lr) + namesize - sizeof (lr_t)); 1304 1305 zil_itx_assign(zd->zd_zilog, itx, tx); 1306} 1307 1308static void 1309ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1310{ 1311 char *name = (void *)(lr + 1); /* name follows lr */ 1312 size_t namesize = strlen(name) + 1; 1313 itx_t *itx; 1314 1315 if (zil_replaying(zd->zd_zilog, tx)) 1316 return; 1317 1318 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1319 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1320 sizeof (*lr) + namesize - sizeof (lr_t)); 1321 1322 itx->itx_oid = object; 1323 zil_itx_assign(zd->zd_zilog, itx, tx); 1324} 1325 1326static void 1327ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1328{ 1329 itx_t *itx; 1330 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1331 1332 if (zil_replaying(zd->zd_zilog, tx)) 1333 return; 1334 1335 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1336 write_state = WR_INDIRECT; 1337 1338 itx = zil_itx_create(TX_WRITE, 1339 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1340 1341 if (write_state == WR_COPIED && 1342 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1343 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1344 zil_itx_destroy(itx); 1345 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1346 write_state = WR_NEED_COPY; 1347 } 1348 itx->itx_private = zd; 1349 itx->itx_wr_state = write_state; 1350 itx->itx_sync = (ztest_random(8) == 0); 1351 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1352 1353 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1354 sizeof (*lr) - sizeof (lr_t)); 1355 1356 zil_itx_assign(zd->zd_zilog, itx, tx); 1357} 1358 1359static void 1360ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1361{ 1362 itx_t *itx; 1363 1364 if (zil_replaying(zd->zd_zilog, tx)) 1365 return; 1366 1367 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1368 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1369 sizeof (*lr) - sizeof (lr_t)); 1370 1371 itx->itx_sync = B_FALSE; 1372 zil_itx_assign(zd->zd_zilog, itx, tx); 1373} 1374 1375static void 1376ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1377{ 1378 itx_t *itx; 1379 1380 if (zil_replaying(zd->zd_zilog, tx)) 1381 return; 1382 1383 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1384 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1385 sizeof (*lr) - sizeof (lr_t)); 1386 1387 itx->itx_sync = B_FALSE; 1388 zil_itx_assign(zd->zd_zilog, itx, tx); 1389} 1390 1391/* 1392 * ZIL replay ops 1393 */ 1394static int 1395ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1396{ 1397 char *name = (void *)(lr + 1); /* name follows lr */ 1398 objset_t *os = zd->zd_os; 1399 ztest_block_tag_t *bbt; 1400 dmu_buf_t *db; 1401 dmu_tx_t *tx; 1402 uint64_t txg; 1403 int error = 0; 1404 1405 if (byteswap) 1406 byteswap_uint64_array(lr, sizeof (*lr)); 1407 1408 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1409 ASSERT(name[0] != '\0'); 1410 1411 tx = dmu_tx_create(os); 1412 1413 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1414 1415 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1416 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1417 } else { 1418 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1419 } 1420 1421 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1422 if (txg == 0) 1423 return (ENOSPC); 1424 1425 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1426 1427 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1428 if (lr->lr_foid == 0) { 1429 lr->lr_foid = zap_create(os, 1430 lr->lrz_type, lr->lrz_bonustype, 1431 lr->lrz_bonuslen, tx); 1432 } else { 1433 error = zap_create_claim(os, lr->lr_foid, 1434 lr->lrz_type, lr->lrz_bonustype, 1435 lr->lrz_bonuslen, tx); 1436 } 1437 } else { 1438 if (lr->lr_foid == 0) { 1439 lr->lr_foid = dmu_object_alloc(os, 1440 lr->lrz_type, 0, lr->lrz_bonustype, 1441 lr->lrz_bonuslen, tx); 1442 } else { 1443 error = dmu_object_claim(os, lr->lr_foid, 1444 lr->lrz_type, 0, lr->lrz_bonustype, 1445 lr->lrz_bonuslen, tx); 1446 } 1447 } 1448 1449 if (error) { 1450 ASSERT3U(error, ==, EEXIST); 1451 ASSERT(zd->zd_zilog->zl_replay); 1452 dmu_tx_commit(tx); 1453 return (error); 1454 } 1455 1456 ASSERT(lr->lr_foid != 0); 1457 1458 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1459 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1460 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1461 1462 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1463 bbt = ztest_bt_bonus(db); 1464 dmu_buf_will_dirty(db, tx); 1465 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1466 dmu_buf_rele(db, FTAG); 1467 1468 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1469 &lr->lr_foid, tx)); 1470 1471 (void) ztest_log_create(zd, tx, lr); 1472 1473 dmu_tx_commit(tx); 1474 1475 return (0); 1476} 1477 1478static int 1479ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1480{ 1481 char *name = (void *)(lr + 1); /* name follows lr */ 1482 objset_t *os = zd->zd_os; 1483 dmu_object_info_t doi; 1484 dmu_tx_t *tx; 1485 uint64_t object, txg; 1486 1487 if (byteswap) 1488 byteswap_uint64_array(lr, sizeof (*lr)); 1489 1490 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1491 ASSERT(name[0] != '\0'); 1492 1493 VERIFY3U(0, ==, 1494 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1495 ASSERT(object != 0); 1496 1497 ztest_object_lock(zd, object, RL_WRITER); 1498 1499 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1500 1501 tx = dmu_tx_create(os); 1502 1503 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1504 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1505 1506 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1507 if (txg == 0) { 1508 ztest_object_unlock(zd, object); 1509 return (ENOSPC); 1510 } 1511 1512 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1513 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1514 } else { 1515 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1516 } 1517 1518 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1519 1520 (void) ztest_log_remove(zd, tx, lr, object); 1521 1522 dmu_tx_commit(tx); 1523 1524 ztest_object_unlock(zd, object); 1525 1526 return (0); 1527} 1528 1529static int 1530ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1531{ 1532 objset_t *os = zd->zd_os; 1533 void *data = lr + 1; /* data follows lr */ 1534 uint64_t offset, length; 1535 ztest_block_tag_t *bt = data; 1536 ztest_block_tag_t *bbt; 1537 uint64_t gen, txg, lrtxg, crtxg; 1538 dmu_object_info_t doi; 1539 dmu_tx_t *tx; 1540 dmu_buf_t *db; 1541 arc_buf_t *abuf = NULL; 1542 rl_t *rl; 1543 1544 if (byteswap) 1545 byteswap_uint64_array(lr, sizeof (*lr)); 1546 1547 offset = lr->lr_offset; 1548 length = lr->lr_length; 1549 1550 /* If it's a dmu_sync() block, write the whole block */ 1551 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1552 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1553 if (length < blocksize) { 1554 offset -= offset % blocksize; 1555 length = blocksize; 1556 } 1557 } 1558 1559 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1560 byteswap_uint64_array(bt, sizeof (*bt)); 1561 1562 if (bt->bt_magic != BT_MAGIC) 1563 bt = NULL; 1564 1565 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1566 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1567 1568 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1569 1570 dmu_object_info_from_db(db, &doi); 1571 1572 bbt = ztest_bt_bonus(db); 1573 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1574 gen = bbt->bt_gen; 1575 crtxg = bbt->bt_crtxg; 1576 lrtxg = lr->lr_common.lrc_txg; 1577 1578 tx = dmu_tx_create(os); 1579 1580 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1581 1582 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1583 P2PHASE(offset, length) == 0) 1584 abuf = dmu_request_arcbuf(db, length); 1585 1586 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1587 if (txg == 0) { 1588 if (abuf != NULL) 1589 dmu_return_arcbuf(abuf); 1590 dmu_buf_rele(db, FTAG); 1591 ztest_range_unlock(rl); 1592 ztest_object_unlock(zd, lr->lr_foid); 1593 return (ENOSPC); 1594 } 1595 1596 if (bt != NULL) { 1597 /* 1598 * Usually, verify the old data before writing new data -- 1599 * but not always, because we also want to verify correct 1600 * behavior when the data was not recently read into cache. 1601 */ 1602 ASSERT(offset % doi.doi_data_block_size == 0); 1603 if (ztest_random(4) != 0) { 1604 int prefetch = ztest_random(2) ? 1605 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1606 ztest_block_tag_t rbt; 1607 1608 VERIFY(dmu_read(os, lr->lr_foid, offset, 1609 sizeof (rbt), &rbt, prefetch) == 0); 1610 if (rbt.bt_magic == BT_MAGIC) { 1611 ztest_bt_verify(&rbt, os, lr->lr_foid, 1612 offset, gen, txg, crtxg); 1613 } 1614 } 1615 1616 /* 1617 * Writes can appear to be newer than the bonus buffer because 1618 * the ztest_get_data() callback does a dmu_read() of the 1619 * open-context data, which may be different than the data 1620 * as it was when the write was generated. 1621 */ 1622 if (zd->zd_zilog->zl_replay) { 1623 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1624 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1625 bt->bt_crtxg); 1626 } 1627 1628 /* 1629 * Set the bt's gen/txg to the bonus buffer's gen/txg 1630 * so that all of the usual ASSERTs will work. 1631 */ 1632 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1633 } 1634 1635 if (abuf == NULL) { 1636 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1637 } else { 1638 bcopy(data, abuf->b_data, length); 1639 dmu_assign_arcbuf(db, offset, abuf, tx); 1640 } 1641 1642 (void) ztest_log_write(zd, tx, lr); 1643 1644 dmu_buf_rele(db, FTAG); 1645 1646 dmu_tx_commit(tx); 1647 1648 ztest_range_unlock(rl); 1649 ztest_object_unlock(zd, lr->lr_foid); 1650 1651 return (0); 1652} 1653 1654static int 1655ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1656{ 1657 objset_t *os = zd->zd_os; 1658 dmu_tx_t *tx; 1659 uint64_t txg; 1660 rl_t *rl; 1661 1662 if (byteswap) 1663 byteswap_uint64_array(lr, sizeof (*lr)); 1664 1665 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1666 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1667 RL_WRITER); 1668 1669 tx = dmu_tx_create(os); 1670 1671 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1672 1673 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1674 if (txg == 0) { 1675 ztest_range_unlock(rl); 1676 ztest_object_unlock(zd, lr->lr_foid); 1677 return (ENOSPC); 1678 } 1679 1680 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1681 lr->lr_length, tx) == 0); 1682 1683 (void) ztest_log_truncate(zd, tx, lr); 1684 1685 dmu_tx_commit(tx); 1686 1687 ztest_range_unlock(rl); 1688 ztest_object_unlock(zd, lr->lr_foid); 1689 1690 return (0); 1691} 1692 1693static int 1694ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1695{ 1696 objset_t *os = zd->zd_os; 1697 dmu_tx_t *tx; 1698 dmu_buf_t *db; 1699 ztest_block_tag_t *bbt; 1700 uint64_t txg, lrtxg, crtxg; 1701 1702 if (byteswap) 1703 byteswap_uint64_array(lr, sizeof (*lr)); 1704 1705 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1706 1707 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1708 1709 tx = dmu_tx_create(os); 1710 dmu_tx_hold_bonus(tx, lr->lr_foid); 1711 1712 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1713 if (txg == 0) { 1714 dmu_buf_rele(db, FTAG); 1715 ztest_object_unlock(zd, lr->lr_foid); 1716 return (ENOSPC); 1717 } 1718 1719 bbt = ztest_bt_bonus(db); 1720 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1721 crtxg = bbt->bt_crtxg; 1722 lrtxg = lr->lr_common.lrc_txg; 1723 1724 if (zd->zd_zilog->zl_replay) { 1725 ASSERT(lr->lr_size != 0); 1726 ASSERT(lr->lr_mode != 0); 1727 ASSERT(lrtxg != 0); 1728 } else { 1729 /* 1730 * Randomly change the size and increment the generation. 1731 */ 1732 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1733 sizeof (*bbt); 1734 lr->lr_mode = bbt->bt_gen + 1; 1735 ASSERT(lrtxg == 0); 1736 } 1737 1738 /* 1739 * Verify that the current bonus buffer is not newer than our txg. 1740 */ 1741 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1742 MAX(txg, lrtxg), crtxg); 1743 1744 dmu_buf_will_dirty(db, tx); 1745 1746 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1747 ASSERT3U(lr->lr_size, <=, db->db_size); 1748 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1749 bbt = ztest_bt_bonus(db); 1750 1751 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1752 1753 dmu_buf_rele(db, FTAG); 1754 1755 (void) ztest_log_setattr(zd, tx, lr); 1756 1757 dmu_tx_commit(tx); 1758 1759 ztest_object_unlock(zd, lr->lr_foid); 1760 1761 return (0); 1762} 1763 1764zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1765 NULL, /* 0 no such transaction type */ 1766 ztest_replay_create, /* TX_CREATE */ 1767 NULL, /* TX_MKDIR */ 1768 NULL, /* TX_MKXATTR */ 1769 NULL, /* TX_SYMLINK */ 1770 ztest_replay_remove, /* TX_REMOVE */ 1771 NULL, /* TX_RMDIR */ 1772 NULL, /* TX_LINK */ 1773 NULL, /* TX_RENAME */ 1774 ztest_replay_write, /* TX_WRITE */ 1775 ztest_replay_truncate, /* TX_TRUNCATE */ 1776 ztest_replay_setattr, /* TX_SETATTR */ 1777 NULL, /* TX_ACL */ 1778 NULL, /* TX_CREATE_ACL */ 1779 NULL, /* TX_CREATE_ATTR */ 1780 NULL, /* TX_CREATE_ACL_ATTR */ 1781 NULL, /* TX_MKDIR_ACL */ 1782 NULL, /* TX_MKDIR_ATTR */ 1783 NULL, /* TX_MKDIR_ACL_ATTR */ 1784 NULL, /* TX_WRITE2 */ 1785}; 1786 1787/* 1788 * ZIL get_data callbacks 1789 */ 1790 1791static void 1792ztest_get_done(zgd_t *zgd, int error) 1793{ 1794 ztest_ds_t *zd = zgd->zgd_private; 1795 uint64_t object = zgd->zgd_rl->rl_object; 1796 1797 if (zgd->zgd_db) 1798 dmu_buf_rele(zgd->zgd_db, zgd); 1799 1800 ztest_range_unlock(zgd->zgd_rl); 1801 ztest_object_unlock(zd, object); 1802 1803 if (error == 0 && zgd->zgd_bp) 1804 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1805 1806 umem_free(zgd, sizeof (*zgd)); 1807} 1808 1809static int 1810ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1811{ 1812 ztest_ds_t *zd = arg; 1813 objset_t *os = zd->zd_os; 1814 uint64_t object = lr->lr_foid; 1815 uint64_t offset = lr->lr_offset; 1816 uint64_t size = lr->lr_length; 1817 blkptr_t *bp = &lr->lr_blkptr; 1818 uint64_t txg = lr->lr_common.lrc_txg; 1819 uint64_t crtxg; 1820 dmu_object_info_t doi; 1821 dmu_buf_t *db; 1822 zgd_t *zgd; 1823 int error; 1824 1825 ztest_object_lock(zd, object, RL_READER); 1826 error = dmu_bonus_hold(os, object, FTAG, &db); 1827 if (error) { 1828 ztest_object_unlock(zd, object); 1829 return (error); 1830 } 1831 1832 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1833 1834 if (crtxg == 0 || crtxg > txg) { 1835 dmu_buf_rele(db, FTAG); 1836 ztest_object_unlock(zd, object); 1837 return (ENOENT); 1838 } 1839 1840 dmu_object_info_from_db(db, &doi); 1841 dmu_buf_rele(db, FTAG); 1842 db = NULL; 1843 1844 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1845 zgd->zgd_zilog = zd->zd_zilog; 1846 zgd->zgd_private = zd; 1847 1848 if (buf != NULL) { /* immediate write */ 1849 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1850 RL_READER); 1851 1852 error = dmu_read(os, object, offset, size, buf, 1853 DMU_READ_NO_PREFETCH); 1854 ASSERT(error == 0); 1855 } else { 1856 size = doi.doi_data_block_size; 1857 if (ISP2(size)) { 1858 offset = P2ALIGN(offset, size); 1859 } else { 1860 ASSERT(offset < size); 1861 offset = 0; 1862 } 1863 1864 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1865 RL_READER); 1866 1867 error = dmu_buf_hold(os, object, offset, zgd, &db, 1868 DMU_READ_NO_PREFETCH); 1869 1870 if (error == 0) { 1871 blkptr_t *obp = dmu_buf_get_blkptr(db); 1872 if (obp) { 1873 ASSERT(BP_IS_HOLE(bp)); 1874 *bp = *obp; 1875 } 1876 1877 zgd->zgd_db = db; 1878 zgd->zgd_bp = bp; 1879 1880 ASSERT(db->db_offset == offset); 1881 ASSERT(db->db_size == size); 1882 1883 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1884 ztest_get_done, zgd); 1885 1886 if (error == 0) 1887 return (0); 1888 } 1889 } 1890 1891 ztest_get_done(zgd, error); 1892 1893 return (error); 1894} 1895 1896static void * 1897ztest_lr_alloc(size_t lrsize, char *name) 1898{ 1899 char *lr; 1900 size_t namesize = name ? strlen(name) + 1 : 0; 1901 1902 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1903 1904 if (name) 1905 bcopy(name, lr + lrsize, namesize); 1906 1907 return (lr); 1908} 1909 1910void 1911ztest_lr_free(void *lr, size_t lrsize, char *name) 1912{ 1913 size_t namesize = name ? strlen(name) + 1 : 0; 1914 1915 umem_free(lr, lrsize + namesize); 1916} 1917 1918/* 1919 * Lookup a bunch of objects. Returns the number of objects not found. 1920 */ 1921static int 1922ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1923{ 1924 int missing = 0; 1925 int error; 1926 1927 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1928 1929 for (int i = 0; i < count; i++, od++) { 1930 od->od_object = 0; 1931 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1932 sizeof (uint64_t), 1, &od->od_object); 1933 if (error) { 1934 ASSERT(error == ENOENT); 1935 ASSERT(od->od_object == 0); 1936 missing++; 1937 } else { 1938 dmu_buf_t *db; 1939 ztest_block_tag_t *bbt; 1940 dmu_object_info_t doi; 1941 1942 ASSERT(od->od_object != 0); 1943 ASSERT(missing == 0); /* there should be no gaps */ 1944 1945 ztest_object_lock(zd, od->od_object, RL_READER); 1946 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1947 od->od_object, FTAG, &db)); 1948 dmu_object_info_from_db(db, &doi); 1949 bbt = ztest_bt_bonus(db); 1950 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1951 od->od_type = doi.doi_type; 1952 od->od_blocksize = doi.doi_data_block_size; 1953 od->od_gen = bbt->bt_gen; 1954 dmu_buf_rele(db, FTAG); 1955 ztest_object_unlock(zd, od->od_object); 1956 } 1957 } 1958 1959 return (missing); 1960} 1961 1962static int 1963ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1964{ 1965 int missing = 0; 1966 1967 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1968 1969 for (int i = 0; i < count; i++, od++) { 1970 if (missing) { 1971 od->od_object = 0; 1972 missing++; 1973 continue; 1974 } 1975 1976 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1977 1978 lr->lr_doid = od->od_dir; 1979 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1980 lr->lrz_type = od->od_crtype; 1981 lr->lrz_blocksize = od->od_crblocksize; 1982 lr->lrz_ibshift = ztest_random_ibshift(); 1983 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1984 lr->lrz_bonuslen = dmu_bonus_max(); 1985 lr->lr_gen = od->od_crgen; 1986 lr->lr_crtime[0] = time(NULL); 1987 1988 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 1989 ASSERT(missing == 0); 1990 od->od_object = 0; 1991 missing++; 1992 } else { 1993 od->od_object = lr->lr_foid; 1994 od->od_type = od->od_crtype; 1995 od->od_blocksize = od->od_crblocksize; 1996 od->od_gen = od->od_crgen; 1997 ASSERT(od->od_object != 0); 1998 } 1999 2000 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2001 } 2002 2003 return (missing); 2004} 2005 2006static int 2007ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2008{ 2009 int missing = 0; 2010 int error; 2011 2012 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 2013 2014 od += count - 1; 2015 2016 for (int i = count - 1; i >= 0; i--, od--) { 2017 if (missing) { 2018 missing++; 2019 continue; 2020 } 2021 2022 /* 2023 * No object was found. 2024 */ 2025 if (od->od_object == 0) 2026 continue; 2027 2028 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2029 2030 lr->lr_doid = od->od_dir; 2031 2032 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2033 ASSERT3U(error, ==, ENOSPC); 2034 missing++; 2035 } else { 2036 od->od_object = 0; 2037 } 2038 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2039 } 2040 2041 return (missing); 2042} 2043 2044static int 2045ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2046 void *data) 2047{ 2048 lr_write_t *lr; 2049 int error; 2050 2051 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2052 2053 lr->lr_foid = object; 2054 lr->lr_offset = offset; 2055 lr->lr_length = size; 2056 lr->lr_blkoff = 0; 2057 BP_ZERO(&lr->lr_blkptr); 2058 2059 bcopy(data, lr + 1, size); 2060 2061 error = ztest_replay_write(zd, lr, B_FALSE); 2062 2063 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2064 2065 return (error); 2066} 2067 2068static int 2069ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2070{ 2071 lr_truncate_t *lr; 2072 int error; 2073 2074 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2075 2076 lr->lr_foid = object; 2077 lr->lr_offset = offset; 2078 lr->lr_length = size; 2079 2080 error = ztest_replay_truncate(zd, lr, B_FALSE); 2081 2082 ztest_lr_free(lr, sizeof (*lr), NULL); 2083 2084 return (error); 2085} 2086 2087static int 2088ztest_setattr(ztest_ds_t *zd, uint64_t object) 2089{ 2090 lr_setattr_t *lr; 2091 int error; 2092 2093 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2094 2095 lr->lr_foid = object; 2096 lr->lr_size = 0; 2097 lr->lr_mode = 0; 2098 2099 error = ztest_replay_setattr(zd, lr, B_FALSE); 2100 2101 ztest_lr_free(lr, sizeof (*lr), NULL); 2102 2103 return (error); 2104} 2105 2106static void 2107ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2108{ 2109 objset_t *os = zd->zd_os; 2110 dmu_tx_t *tx; 2111 uint64_t txg; 2112 rl_t *rl; 2113 2114 txg_wait_synced(dmu_objset_pool(os), 0); 2115 2116 ztest_object_lock(zd, object, RL_READER); 2117 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2118 2119 tx = dmu_tx_create(os); 2120 2121 dmu_tx_hold_write(tx, object, offset, size); 2122 2123 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2124 2125 if (txg != 0) { 2126 dmu_prealloc(os, object, offset, size, tx); 2127 dmu_tx_commit(tx); 2128 txg_wait_synced(dmu_objset_pool(os), txg); 2129 } else { 2130 (void) dmu_free_long_range(os, object, offset, size); 2131 } 2132 2133 ztest_range_unlock(rl); 2134 ztest_object_unlock(zd, object); 2135} 2136 2137static void 2138ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2139{ 2140 int err; 2141 ztest_block_tag_t wbt; 2142 dmu_object_info_t doi; 2143 enum ztest_io_type io_type; 2144 uint64_t blocksize; 2145 void *data; 2146 2147 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2148 blocksize = doi.doi_data_block_size; 2149 data = umem_alloc(blocksize, UMEM_NOFAIL); 2150 2151 /* 2152 * Pick an i/o type at random, biased toward writing block tags. 2153 */ 2154 io_type = ztest_random(ZTEST_IO_TYPES); 2155 if (ztest_random(2) == 0) 2156 io_type = ZTEST_IO_WRITE_TAG; 2157 2158 (void) rw_rdlock(&zd->zd_zilog_lock); 2159 2160 switch (io_type) { 2161 2162 case ZTEST_IO_WRITE_TAG: 2163 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2164 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2165 break; 2166 2167 case ZTEST_IO_WRITE_PATTERN: 2168 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2169 if (ztest_random(2) == 0) { 2170 /* 2171 * Induce fletcher2 collisions to ensure that 2172 * zio_ddt_collision() detects and resolves them 2173 * when using fletcher2-verify for deduplication. 2174 */ 2175 ((uint64_t *)data)[0] ^= 1ULL << 63; 2176 ((uint64_t *)data)[4] ^= 1ULL << 63; 2177 } 2178 (void) ztest_write(zd, object, offset, blocksize, data); 2179 break; 2180 2181 case ZTEST_IO_WRITE_ZEROES: 2182 bzero(data, blocksize); 2183 (void) ztest_write(zd, object, offset, blocksize, data); 2184 break; 2185 2186 case ZTEST_IO_TRUNCATE: 2187 (void) ztest_truncate(zd, object, offset, blocksize); 2188 break; 2189 2190 case ZTEST_IO_SETATTR: 2191 (void) ztest_setattr(zd, object); 2192 break; 2193 2194 case ZTEST_IO_REWRITE: 2195 (void) rw_rdlock(&ztest_name_lock); 2196 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2197 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2198 B_FALSE); 2199 VERIFY(err == 0 || err == ENOSPC); 2200 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2201 ZFS_PROP_COMPRESSION, 2202 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2203 B_FALSE); 2204 VERIFY(err == 0 || err == ENOSPC); 2205 (void) rw_unlock(&ztest_name_lock); 2206 2207 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2208 DMU_READ_NO_PREFETCH)); 2209 2210 (void) ztest_write(zd, object, offset, blocksize, data); 2211 break; 2212 } 2213 2214 (void) rw_unlock(&zd->zd_zilog_lock); 2215 2216 umem_free(data, blocksize); 2217} 2218 2219/* 2220 * Initialize an object description template. 2221 */ 2222static void 2223ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2224 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2225{ 2226 od->od_dir = ZTEST_DIROBJ; 2227 od->od_object = 0; 2228 2229 od->od_crtype = type; 2230 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2231 od->od_crgen = gen; 2232 2233 od->od_type = DMU_OT_NONE; 2234 od->od_blocksize = 0; 2235 od->od_gen = 0; 2236 2237 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2238 tag, (int64_t)id, index); 2239} 2240 2241/* 2242 * Lookup or create the objects for a test using the od template. 2243 * If the objects do not all exist, or if 'remove' is specified, 2244 * remove any existing objects and create new ones. Otherwise, 2245 * use the existing objects. 2246 */ 2247static int 2248ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2249{ 2250 int count = size / sizeof (*od); 2251 int rv = 0; 2252 2253 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2254 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2255 (ztest_remove(zd, od, count) != 0 || 2256 ztest_create(zd, od, count) != 0)) 2257 rv = -1; 2258 zd->zd_od = od; 2259 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2260 2261 return (rv); 2262} 2263 2264/* ARGSUSED */ 2265void 2266ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2267{ 2268 zilog_t *zilog = zd->zd_zilog; 2269 2270 (void) rw_rdlock(&zd->zd_zilog_lock); 2271 2272 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2273 2274 /* 2275 * Remember the committed values in zd, which is in parent/child 2276 * shared memory. If we die, the next iteration of ztest_run() 2277 * will verify that the log really does contain this record. 2278 */ 2279 mutex_enter(&zilog->zl_lock); 2280 ASSERT(zd->zd_shared != NULL); 2281 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2282 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2283 mutex_exit(&zilog->zl_lock); 2284 2285 (void) rw_unlock(&zd->zd_zilog_lock); 2286} 2287 2288/* 2289 * This function is designed to simulate the operations that occur during a 2290 * mount/unmount operation. We hold the dataset across these operations in an 2291 * attempt to expose any implicit assumptions about ZIL management. 2292 */ 2293/* ARGSUSED */ 2294void 2295ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2296{ 2297 objset_t *os = zd->zd_os; 2298 2299 /* 2300 * We grab the zd_dirobj_lock to ensure that no other thread is 2301 * updating the zil (i.e. adding in-memory log records) and the 2302 * zd_zilog_lock to block any I/O. 2303 */ 2304 VERIFY0(mutex_lock(&zd->zd_dirobj_lock)); 2305 (void) rw_wrlock(&zd->zd_zilog_lock); 2306 2307 /* zfsvfs_teardown() */ 2308 zil_close(zd->zd_zilog); 2309 2310 /* zfsvfs_setup() */ 2311 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2312 zil_replay(os, zd, ztest_replay_vector); 2313 2314 (void) rw_unlock(&zd->zd_zilog_lock); 2315 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2316} 2317 2318/* 2319 * Verify that we can't destroy an active pool, create an existing pool, 2320 * or create a pool with a bad vdev spec. 2321 */ 2322/* ARGSUSED */ 2323void 2324ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2325{ 2326 ztest_shared_opts_t *zo = &ztest_opts; 2327 spa_t *spa; 2328 nvlist_t *nvroot; 2329 2330 /* 2331 * Attempt to create using a bad file. 2332 */ 2333 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2334 VERIFY3U(ENOENT, ==, 2335 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2336 nvlist_free(nvroot); 2337 2338 /* 2339 * Attempt to create using a bad mirror. 2340 */ 2341 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2342 VERIFY3U(ENOENT, ==, 2343 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2344 nvlist_free(nvroot); 2345 2346 /* 2347 * Attempt to create an existing pool. It shouldn't matter 2348 * what's in the nvroot; we should fail with EEXIST. 2349 */ 2350 (void) rw_rdlock(&ztest_name_lock); 2351 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2352 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2353 nvlist_free(nvroot); 2354 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2355 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2356 spa_close(spa, FTAG); 2357 2358 (void) rw_unlock(&ztest_name_lock); 2359} 2360 2361/* ARGSUSED */ 2362void 2363ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2364{ 2365 spa_t *spa; 2366 uint64_t initial_version = SPA_VERSION_INITIAL; 2367 uint64_t version, newversion; 2368 nvlist_t *nvroot, *props; 2369 char *name; 2370 2371 VERIFY0(mutex_lock(&ztest_vdev_lock)); 2372 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2373 2374 /* 2375 * Clean up from previous runs. 2376 */ 2377 (void) spa_destroy(name); 2378 2379 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2380 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2381 2382 /* 2383 * If we're configuring a RAIDZ device then make sure that the 2384 * the initial version is capable of supporting that feature. 2385 */ 2386 switch (ztest_opts.zo_raidz_parity) { 2387 case 0: 2388 case 1: 2389 initial_version = SPA_VERSION_INITIAL; 2390 break; 2391 case 2: 2392 initial_version = SPA_VERSION_RAIDZ2; 2393 break; 2394 case 3: 2395 initial_version = SPA_VERSION_RAIDZ3; 2396 break; 2397 } 2398 2399 /* 2400 * Create a pool with a spa version that can be upgraded. Pick 2401 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2402 */ 2403 do { 2404 version = ztest_random_spa_version(initial_version); 2405 } while (version > SPA_VERSION_BEFORE_FEATURES); 2406 2407 props = fnvlist_alloc(); 2408 fnvlist_add_uint64(props, 2409 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2410 VERIFY0(spa_create(name, nvroot, props, NULL)); 2411 fnvlist_free(nvroot); 2412 fnvlist_free(props); 2413 2414 VERIFY0(spa_open(name, &spa, FTAG)); 2415 VERIFY3U(spa_version(spa), ==, version); 2416 newversion = ztest_random_spa_version(version + 1); 2417 2418 if (ztest_opts.zo_verbose >= 4) { 2419 (void) printf("upgrading spa version from %llu to %llu\n", 2420 (u_longlong_t)version, (u_longlong_t)newversion); 2421 } 2422 2423 spa_upgrade(spa, newversion); 2424 VERIFY3U(spa_version(spa), >, version); 2425 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2426 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2427 spa_close(spa, FTAG); 2428 2429 strfree(name); 2430 VERIFY0(mutex_unlock(&ztest_vdev_lock)); 2431} 2432 2433static vdev_t * 2434vdev_lookup_by_path(vdev_t *vd, const char *path) 2435{ 2436 vdev_t *mvd; 2437 2438 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2439 return (vd); 2440 2441 for (int c = 0; c < vd->vdev_children; c++) 2442 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2443 NULL) 2444 return (mvd); 2445 2446 return (NULL); 2447} 2448 2449/* 2450 * Find the first available hole which can be used as a top-level. 2451 */ 2452int 2453find_vdev_hole(spa_t *spa) 2454{ 2455 vdev_t *rvd = spa->spa_root_vdev; 2456 int c; 2457 2458 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2459 2460 for (c = 0; c < rvd->vdev_children; c++) { 2461 vdev_t *cvd = rvd->vdev_child[c]; 2462 2463 if (cvd->vdev_ishole) 2464 break; 2465 } 2466 return (c); 2467} 2468 2469/* 2470 * Verify that vdev_add() works as expected. 2471 */ 2472/* ARGSUSED */ 2473void 2474ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2475{ 2476 ztest_shared_t *zs = ztest_shared; 2477 spa_t *spa = ztest_spa; 2478 uint64_t leaves; 2479 uint64_t guid; 2480 nvlist_t *nvroot; 2481 int error; 2482 2483 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2484 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2485 2486 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2487 2488 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2489 2490 /* 2491 * If we have slogs then remove them 1/4 of the time. 2492 */ 2493 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2494 /* 2495 * Grab the guid from the head of the log class rotor. 2496 */ 2497 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2498 2499 spa_config_exit(spa, SCL_VDEV, FTAG); 2500 2501 /* 2502 * We have to grab the zs_name_lock as writer to 2503 * prevent a race between removing a slog (dmu_objset_find) 2504 * and destroying a dataset. Removing the slog will 2505 * grab a reference on the dataset which may cause 2506 * dmu_objset_destroy() to fail with EBUSY thus 2507 * leaving the dataset in an inconsistent state. 2508 */ 2509 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2510 error = spa_vdev_remove(spa, guid, B_FALSE); 2511 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2512 2513 if (error && error != EEXIST) 2514 fatal(0, "spa_vdev_remove() = %d", error); 2515 } else { 2516 spa_config_exit(spa, SCL_VDEV, FTAG); 2517 2518 /* 2519 * Make 1/4 of the devices be log devices. 2520 */ 2521 nvroot = make_vdev_root(NULL, NULL, NULL, 2522 ztest_opts.zo_vdev_size, 0, 2523 ztest_random(4) == 0, ztest_opts.zo_raidz, 2524 zs->zs_mirrors, 1); 2525 2526 error = spa_vdev_add(spa, nvroot); 2527 nvlist_free(nvroot); 2528 2529 if (error == ENOSPC) 2530 ztest_record_enospc("spa_vdev_add"); 2531 else if (error != 0) 2532 fatal(0, "spa_vdev_add() = %d", error); 2533 } 2534 2535 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2536} 2537 2538/* 2539 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2540 */ 2541/* ARGSUSED */ 2542void 2543ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2544{ 2545 ztest_shared_t *zs = ztest_shared; 2546 spa_t *spa = ztest_spa; 2547 vdev_t *rvd = spa->spa_root_vdev; 2548 spa_aux_vdev_t *sav; 2549 char *aux; 2550 uint64_t guid = 0; 2551 int error; 2552 2553 if (ztest_random(2) == 0) { 2554 sav = &spa->spa_spares; 2555 aux = ZPOOL_CONFIG_SPARES; 2556 } else { 2557 sav = &spa->spa_l2cache; 2558 aux = ZPOOL_CONFIG_L2CACHE; 2559 } 2560 2561 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2562 2563 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2564 2565 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2566 /* 2567 * Pick a random device to remove. 2568 */ 2569 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2570 } else { 2571 /* 2572 * Find an unused device we can add. 2573 */ 2574 zs->zs_vdev_aux = 0; 2575 for (;;) { 2576 char path[MAXPATHLEN]; 2577 int c; 2578 (void) snprintf(path, sizeof (path), ztest_aux_template, 2579 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2580 zs->zs_vdev_aux); 2581 for (c = 0; c < sav->sav_count; c++) 2582 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2583 path) == 0) 2584 break; 2585 if (c == sav->sav_count && 2586 vdev_lookup_by_path(rvd, path) == NULL) 2587 break; 2588 zs->zs_vdev_aux++; 2589 } 2590 } 2591 2592 spa_config_exit(spa, SCL_VDEV, FTAG); 2593 2594 if (guid == 0) { 2595 /* 2596 * Add a new device. 2597 */ 2598 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2599 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2600 error = spa_vdev_add(spa, nvroot); 2601 if (error != 0) 2602 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2603 nvlist_free(nvroot); 2604 } else { 2605 /* 2606 * Remove an existing device. Sometimes, dirty its 2607 * vdev state first to make sure we handle removal 2608 * of devices that have pending state changes. 2609 */ 2610 if (ztest_random(2) == 0) 2611 (void) vdev_online(spa, guid, 0, NULL); 2612 2613 error = spa_vdev_remove(spa, guid, B_FALSE); 2614 if (error != 0 && error != EBUSY) 2615 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2616 } 2617 2618 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2619} 2620 2621/* 2622 * split a pool if it has mirror tlvdevs 2623 */ 2624/* ARGSUSED */ 2625void 2626ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2627{ 2628 ztest_shared_t *zs = ztest_shared; 2629 spa_t *spa = ztest_spa; 2630 vdev_t *rvd = spa->spa_root_vdev; 2631 nvlist_t *tree, **child, *config, *split, **schild; 2632 uint_t c, children, schildren = 0, lastlogid = 0; 2633 int error = 0; 2634 2635 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2636 2637 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2638 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2639 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2640 return; 2641 } 2642 2643 /* clean up the old pool, if any */ 2644 (void) spa_destroy("splitp"); 2645 2646 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2647 2648 /* generate a config from the existing config */ 2649 mutex_enter(&spa->spa_props_lock); 2650 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2651 &tree) == 0); 2652 mutex_exit(&spa->spa_props_lock); 2653 2654 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2655 &children) == 0); 2656 2657 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2658 for (c = 0; c < children; c++) { 2659 vdev_t *tvd = rvd->vdev_child[c]; 2660 nvlist_t **mchild; 2661 uint_t mchildren; 2662 2663 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2664 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2665 0) == 0); 2666 VERIFY(nvlist_add_string(schild[schildren], 2667 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2668 VERIFY(nvlist_add_uint64(schild[schildren], 2669 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2670 if (lastlogid == 0) 2671 lastlogid = schildren; 2672 ++schildren; 2673 continue; 2674 } 2675 lastlogid = 0; 2676 VERIFY(nvlist_lookup_nvlist_array(child[c], 2677 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2678 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2679 } 2680 2681 /* OK, create a config that can be used to split */ 2682 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2683 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2684 VDEV_TYPE_ROOT) == 0); 2685 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2686 lastlogid != 0 ? lastlogid : schildren) == 0); 2687 2688 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2689 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2690 2691 for (c = 0; c < schildren; c++) 2692 nvlist_free(schild[c]); 2693 free(schild); 2694 nvlist_free(split); 2695 2696 spa_config_exit(spa, SCL_VDEV, FTAG); 2697 2698 (void) rw_wrlock(&ztest_name_lock); 2699 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2700 (void) rw_unlock(&ztest_name_lock); 2701 2702 nvlist_free(config); 2703 2704 if (error == 0) { 2705 (void) printf("successful split - results:\n"); 2706 mutex_enter(&spa_namespace_lock); 2707 show_pool_stats(spa); 2708 show_pool_stats(spa_lookup("splitp")); 2709 mutex_exit(&spa_namespace_lock); 2710 ++zs->zs_splits; 2711 --zs->zs_mirrors; 2712 } 2713 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2714 2715} 2716 2717/* 2718 * Verify that we can attach and detach devices. 2719 */ 2720/* ARGSUSED */ 2721void 2722ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2723{ 2724 ztest_shared_t *zs = ztest_shared; 2725 spa_t *spa = ztest_spa; 2726 spa_aux_vdev_t *sav = &spa->spa_spares; 2727 vdev_t *rvd = spa->spa_root_vdev; 2728 vdev_t *oldvd, *newvd, *pvd; 2729 nvlist_t *root; 2730 uint64_t leaves; 2731 uint64_t leaf, top; 2732 uint64_t ashift = ztest_get_ashift(); 2733 uint64_t oldguid, pguid; 2734 size_t oldsize, newsize; 2735 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2736 int replacing; 2737 int oldvd_has_siblings = B_FALSE; 2738 int newvd_is_spare = B_FALSE; 2739 int oldvd_is_log; 2740 int error, expected_error; 2741 2742 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2743 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2744 2745 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2746 2747 /* 2748 * Decide whether to do an attach or a replace. 2749 */ 2750 replacing = ztest_random(2); 2751 2752 /* 2753 * Pick a random top-level vdev. 2754 */ 2755 top = ztest_random_vdev_top(spa, B_TRUE); 2756 2757 /* 2758 * Pick a random leaf within it. 2759 */ 2760 leaf = ztest_random(leaves); 2761 2762 /* 2763 * Locate this vdev. 2764 */ 2765 oldvd = rvd->vdev_child[top]; 2766 if (zs->zs_mirrors >= 1) { 2767 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2768 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2769 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2770 } 2771 if (ztest_opts.zo_raidz > 1) { 2772 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2773 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2774 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2775 } 2776 2777 /* 2778 * If we're already doing an attach or replace, oldvd may be a 2779 * mirror vdev -- in which case, pick a random child. 2780 */ 2781 while (oldvd->vdev_children != 0) { 2782 oldvd_has_siblings = B_TRUE; 2783 ASSERT(oldvd->vdev_children >= 2); 2784 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2785 } 2786 2787 oldguid = oldvd->vdev_guid; 2788 oldsize = vdev_get_min_asize(oldvd); 2789 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2790 (void) strcpy(oldpath, oldvd->vdev_path); 2791 pvd = oldvd->vdev_parent; 2792 pguid = pvd->vdev_guid; 2793 2794 /* 2795 * If oldvd has siblings, then half of the time, detach it. 2796 */ 2797 if (oldvd_has_siblings && ztest_random(2) == 0) { 2798 spa_config_exit(spa, SCL_VDEV, FTAG); 2799 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2800 if (error != 0 && error != ENODEV && error != EBUSY && 2801 error != ENOTSUP) 2802 fatal(0, "detach (%s) returned %d", oldpath, error); 2803 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2804 return; 2805 } 2806 2807 /* 2808 * For the new vdev, choose with equal probability between the two 2809 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2810 */ 2811 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2812 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2813 newvd_is_spare = B_TRUE; 2814 (void) strcpy(newpath, newvd->vdev_path); 2815 } else { 2816 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2817 ztest_opts.zo_dir, ztest_opts.zo_pool, 2818 top * leaves + leaf); 2819 if (ztest_random(2) == 0) 2820 newpath[strlen(newpath) - 1] = 'b'; 2821 newvd = vdev_lookup_by_path(rvd, newpath); 2822 } 2823 2824 if (newvd) { 2825 newsize = vdev_get_min_asize(newvd); 2826 } else { 2827 /* 2828 * Make newsize a little bigger or smaller than oldsize. 2829 * If it's smaller, the attach should fail. 2830 * If it's larger, and we're doing a replace, 2831 * we should get dynamic LUN growth when we're done. 2832 */ 2833 newsize = 10 * oldsize / (9 + ztest_random(3)); 2834 } 2835 2836 /* 2837 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2838 * unless it's a replace; in that case any non-replacing parent is OK. 2839 * 2840 * If newvd is already part of the pool, it should fail with EBUSY. 2841 * 2842 * If newvd is too small, it should fail with EOVERFLOW. 2843 */ 2844 if (pvd->vdev_ops != &vdev_mirror_ops && 2845 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2846 pvd->vdev_ops == &vdev_replacing_ops || 2847 pvd->vdev_ops == &vdev_spare_ops)) 2848 expected_error = ENOTSUP; 2849 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2850 expected_error = ENOTSUP; 2851 else if (newvd == oldvd) 2852 expected_error = replacing ? 0 : EBUSY; 2853 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2854 expected_error = EBUSY; 2855 else if (newsize < oldsize) 2856 expected_error = EOVERFLOW; 2857 else if (ashift > oldvd->vdev_top->vdev_ashift) 2858 expected_error = EDOM; 2859 else 2860 expected_error = 0; 2861 2862 spa_config_exit(spa, SCL_VDEV, FTAG); 2863 2864 /* 2865 * Build the nvlist describing newpath. 2866 */ 2867 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 2868 ashift, 0, 0, 0, 1); 2869 2870 error = spa_vdev_attach(spa, oldguid, root, replacing); 2871 2872 nvlist_free(root); 2873 2874 /* 2875 * If our parent was the replacing vdev, but the replace completed, 2876 * then instead of failing with ENOTSUP we may either succeed, 2877 * fail with ENODEV, or fail with EOVERFLOW. 2878 */ 2879 if (expected_error == ENOTSUP && 2880 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2881 expected_error = error; 2882 2883 /* 2884 * If someone grew the LUN, the replacement may be too small. 2885 */ 2886 if (error == EOVERFLOW || error == EBUSY) 2887 expected_error = error; 2888 2889 /* XXX workaround 6690467 */ 2890 if (error != expected_error && expected_error != EBUSY) { 2891 fatal(0, "attach (%s %llu, %s %llu, %d) " 2892 "returned %d, expected %d", 2893 oldpath, (longlong_t)oldsize, newpath, 2894 (longlong_t)newsize, replacing, error, expected_error); 2895 } 2896 2897 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2898} 2899 2900/* 2901 * Callback function which expands the physical size of the vdev. 2902 */ 2903vdev_t * 2904grow_vdev(vdev_t *vd, void *arg) 2905{ 2906 spa_t *spa = vd->vdev_spa; 2907 size_t *newsize = arg; 2908 size_t fsize; 2909 int fd; 2910 2911 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2912 ASSERT(vd->vdev_ops->vdev_op_leaf); 2913 2914 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2915 return (vd); 2916 2917 fsize = lseek(fd, 0, SEEK_END); 2918 (void) ftruncate(fd, *newsize); 2919 2920 if (ztest_opts.zo_verbose >= 6) { 2921 (void) printf("%s grew from %lu to %lu bytes\n", 2922 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2923 } 2924 (void) close(fd); 2925 return (NULL); 2926} 2927 2928/* 2929 * Callback function which expands a given vdev by calling vdev_online(). 2930 */ 2931/* ARGSUSED */ 2932vdev_t * 2933online_vdev(vdev_t *vd, void *arg) 2934{ 2935 spa_t *spa = vd->vdev_spa; 2936 vdev_t *tvd = vd->vdev_top; 2937 uint64_t guid = vd->vdev_guid; 2938 uint64_t generation = spa->spa_config_generation + 1; 2939 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2940 int error; 2941 2942 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2943 ASSERT(vd->vdev_ops->vdev_op_leaf); 2944 2945 /* Calling vdev_online will initialize the new metaslabs */ 2946 spa_config_exit(spa, SCL_STATE, spa); 2947 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2948 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2949 2950 /* 2951 * If vdev_online returned an error or the underlying vdev_open 2952 * failed then we abort the expand. The only way to know that 2953 * vdev_open fails is by checking the returned newstate. 2954 */ 2955 if (error || newstate != VDEV_STATE_HEALTHY) { 2956 if (ztest_opts.zo_verbose >= 5) { 2957 (void) printf("Unable to expand vdev, state %llu, " 2958 "error %d\n", (u_longlong_t)newstate, error); 2959 } 2960 return (vd); 2961 } 2962 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2963 2964 /* 2965 * Since we dropped the lock we need to ensure that we're 2966 * still talking to the original vdev. It's possible this 2967 * vdev may have been detached/replaced while we were 2968 * trying to online it. 2969 */ 2970 if (generation != spa->spa_config_generation) { 2971 if (ztest_opts.zo_verbose >= 5) { 2972 (void) printf("vdev configuration has changed, " 2973 "guid %llu, state %llu, expected gen %llu, " 2974 "got gen %llu\n", 2975 (u_longlong_t)guid, 2976 (u_longlong_t)tvd->vdev_state, 2977 (u_longlong_t)generation, 2978 (u_longlong_t)spa->spa_config_generation); 2979 } 2980 return (vd); 2981 } 2982 return (NULL); 2983} 2984 2985/* 2986 * Traverse the vdev tree calling the supplied function. 2987 * We continue to walk the tree until we either have walked all 2988 * children or we receive a non-NULL return from the callback. 2989 * If a NULL callback is passed, then we just return back the first 2990 * leaf vdev we encounter. 2991 */ 2992vdev_t * 2993vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 2994{ 2995 if (vd->vdev_ops->vdev_op_leaf) { 2996 if (func == NULL) 2997 return (vd); 2998 else 2999 return (func(vd, arg)); 3000 } 3001 3002 for (uint_t c = 0; c < vd->vdev_children; c++) { 3003 vdev_t *cvd = vd->vdev_child[c]; 3004 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3005 return (cvd); 3006 } 3007 return (NULL); 3008} 3009 3010/* 3011 * Verify that dynamic LUN growth works as expected. 3012 */ 3013/* ARGSUSED */ 3014void 3015ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3016{ 3017 spa_t *spa = ztest_spa; 3018 vdev_t *vd, *tvd; 3019 metaslab_class_t *mc; 3020 metaslab_group_t *mg; 3021 size_t psize, newsize; 3022 uint64_t top; 3023 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3024 3025 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 3026 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3027 3028 top = ztest_random_vdev_top(spa, B_TRUE); 3029 3030 tvd = spa->spa_root_vdev->vdev_child[top]; 3031 mg = tvd->vdev_mg; 3032 mc = mg->mg_class; 3033 old_ms_count = tvd->vdev_ms_count; 3034 old_class_space = metaslab_class_get_space(mc); 3035 3036 /* 3037 * Determine the size of the first leaf vdev associated with 3038 * our top-level device. 3039 */ 3040 vd = vdev_walk_tree(tvd, NULL, NULL); 3041 ASSERT3P(vd, !=, NULL); 3042 ASSERT(vd->vdev_ops->vdev_op_leaf); 3043 3044 psize = vd->vdev_psize; 3045 3046 /* 3047 * We only try to expand the vdev if it's healthy, less than 4x its 3048 * original size, and it has a valid psize. 3049 */ 3050 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3051 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3052 spa_config_exit(spa, SCL_STATE, spa); 3053 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3054 return; 3055 } 3056 ASSERT(psize > 0); 3057 newsize = psize + psize / 8; 3058 ASSERT3U(newsize, >, psize); 3059 3060 if (ztest_opts.zo_verbose >= 6) { 3061 (void) printf("Expanding LUN %s from %lu to %lu\n", 3062 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3063 } 3064 3065 /* 3066 * Growing the vdev is a two step process: 3067 * 1). expand the physical size (i.e. relabel) 3068 * 2). online the vdev to create the new metaslabs 3069 */ 3070 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3071 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3072 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3073 if (ztest_opts.zo_verbose >= 5) { 3074 (void) printf("Could not expand LUN because " 3075 "the vdev configuration changed.\n"); 3076 } 3077 spa_config_exit(spa, SCL_STATE, spa); 3078 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3079 return; 3080 } 3081 3082 spa_config_exit(spa, SCL_STATE, spa); 3083 3084 /* 3085 * Expanding the LUN will update the config asynchronously, 3086 * thus we must wait for the async thread to complete any 3087 * pending tasks before proceeding. 3088 */ 3089 for (;;) { 3090 boolean_t done; 3091 mutex_enter(&spa->spa_async_lock); 3092 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3093 mutex_exit(&spa->spa_async_lock); 3094 if (done) 3095 break; 3096 txg_wait_synced(spa_get_dsl(spa), 0); 3097 (void) poll(NULL, 0, 100); 3098 } 3099 3100 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3101 3102 tvd = spa->spa_root_vdev->vdev_child[top]; 3103 new_ms_count = tvd->vdev_ms_count; 3104 new_class_space = metaslab_class_get_space(mc); 3105 3106 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3107 if (ztest_opts.zo_verbose >= 5) { 3108 (void) printf("Could not verify LUN expansion due to " 3109 "intervening vdev offline or remove.\n"); 3110 } 3111 spa_config_exit(spa, SCL_STATE, spa); 3112 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3113 return; 3114 } 3115 3116 /* 3117 * Make sure we were able to grow the vdev. 3118 */ 3119 if (new_ms_count <= old_ms_count) 3120 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 3121 old_ms_count, new_ms_count); 3122 3123 /* 3124 * Make sure we were able to grow the pool. 3125 */ 3126 if (new_class_space <= old_class_space) 3127 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 3128 old_class_space, new_class_space); 3129 3130 if (ztest_opts.zo_verbose >= 5) { 3131 char oldnumbuf[6], newnumbuf[6]; 3132 3133 nicenum(old_class_space, oldnumbuf); 3134 nicenum(new_class_space, newnumbuf); 3135 (void) printf("%s grew from %s to %s\n", 3136 spa->spa_name, oldnumbuf, newnumbuf); 3137 } 3138 3139 spa_config_exit(spa, SCL_STATE, spa); 3140 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3141} 3142 3143/* 3144 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3145 */ 3146/* ARGSUSED */ 3147static void 3148ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3149{ 3150 /* 3151 * Create the objects common to all ztest datasets. 3152 */ 3153 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3154 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3155} 3156 3157static int 3158ztest_dataset_create(char *dsname) 3159{ 3160 uint64_t zilset = ztest_random(100); 3161 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3162 ztest_objset_create_cb, NULL); 3163 3164 if (err || zilset < 80) 3165 return (err); 3166 3167 if (ztest_opts.zo_verbose >= 6) 3168 (void) printf("Setting dataset %s to sync always\n", dsname); 3169 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3170 ZFS_SYNC_ALWAYS, B_FALSE)); 3171} 3172 3173/* ARGSUSED */ 3174static int 3175ztest_objset_destroy_cb(const char *name, void *arg) 3176{ 3177 objset_t *os; 3178 dmu_object_info_t doi; 3179 int error; 3180 3181 /* 3182 * Verify that the dataset contains a directory object. 3183 */ 3184 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3185 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3186 if (error != ENOENT) { 3187 /* We could have crashed in the middle of destroying it */ 3188 ASSERT0(error); 3189 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3190 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3191 } 3192 dmu_objset_disown(os, FTAG); 3193 3194 /* 3195 * Destroy the dataset. 3196 */ 3197 if (strchr(name, '@') != NULL) { 3198 VERIFY0(dsl_destroy_snapshot(name, B_FALSE)); 3199 } else { 3200 VERIFY0(dsl_destroy_head(name)); 3201 } 3202 return (0); 3203} 3204 3205static boolean_t 3206ztest_snapshot_create(char *osname, uint64_t id) 3207{ 3208 char snapname[MAXNAMELEN]; 3209 int error; 3210 3211 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3212 3213 error = dmu_objset_snapshot_one(osname, snapname); 3214 if (error == ENOSPC) { 3215 ztest_record_enospc(FTAG); 3216 return (B_FALSE); 3217 } 3218 if (error != 0 && error != EEXIST) { 3219 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3220 snapname, error); 3221 } 3222 return (B_TRUE); 3223} 3224 3225static boolean_t 3226ztest_snapshot_destroy(char *osname, uint64_t id) 3227{ 3228 char snapname[MAXNAMELEN]; 3229 int error; 3230 3231 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3232 (u_longlong_t)id); 3233 3234 error = dsl_destroy_snapshot(snapname, B_FALSE); 3235 if (error != 0 && error != ENOENT) 3236 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3237 return (B_TRUE); 3238} 3239 3240/* ARGSUSED */ 3241void 3242ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3243{ 3244 ztest_ds_t zdtmp; 3245 int iters; 3246 int error; 3247 objset_t *os, *os2; 3248 char name[MAXNAMELEN]; 3249 zilog_t *zilog; 3250 3251 (void) rw_rdlock(&ztest_name_lock); 3252 3253 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 3254 ztest_opts.zo_pool, (u_longlong_t)id); 3255 3256 /* 3257 * If this dataset exists from a previous run, process its replay log 3258 * half of the time. If we don't replay it, then dmu_objset_destroy() 3259 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3260 */ 3261 if (ztest_random(2) == 0 && 3262 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3263 ztest_zd_init(&zdtmp, NULL, os); 3264 zil_replay(os, &zdtmp, ztest_replay_vector); 3265 ztest_zd_fini(&zdtmp); 3266 dmu_objset_disown(os, FTAG); 3267 } 3268 3269 /* 3270 * There may be an old instance of the dataset we're about to 3271 * create lying around from a previous run. If so, destroy it 3272 * and all of its snapshots. 3273 */ 3274 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3275 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3276 3277 /* 3278 * Verify that the destroyed dataset is no longer in the namespace. 3279 */ 3280 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3281 FTAG, &os)); 3282 3283 /* 3284 * Verify that we can create a new dataset. 3285 */ 3286 error = ztest_dataset_create(name); 3287 if (error) { 3288 if (error == ENOSPC) { 3289 ztest_record_enospc(FTAG); 3290 (void) rw_unlock(&ztest_name_lock); 3291 return; 3292 } 3293 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3294 } 3295 3296 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3297 3298 ztest_zd_init(&zdtmp, NULL, os); 3299 3300 /* 3301 * Open the intent log for it. 3302 */ 3303 zilog = zil_open(os, ztest_get_data); 3304 3305 /* 3306 * Put some objects in there, do a little I/O to them, 3307 * and randomly take a couple of snapshots along the way. 3308 */ 3309 iters = ztest_random(5); 3310 for (int i = 0; i < iters; i++) { 3311 ztest_dmu_object_alloc_free(&zdtmp, id); 3312 if (ztest_random(iters) == 0) 3313 (void) ztest_snapshot_create(name, i); 3314 } 3315 3316 /* 3317 * Verify that we cannot create an existing dataset. 3318 */ 3319 VERIFY3U(EEXIST, ==, 3320 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3321 3322 /* 3323 * Verify that we can hold an objset that is also owned. 3324 */ 3325 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3326 dmu_objset_rele(os2, FTAG); 3327 3328 /* 3329 * Verify that we cannot own an objset that is already owned. 3330 */ 3331 VERIFY3U(EBUSY, ==, 3332 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3333 3334 zil_close(zilog); 3335 dmu_objset_disown(os, FTAG); 3336 ztest_zd_fini(&zdtmp); 3337 3338 (void) rw_unlock(&ztest_name_lock); 3339} 3340 3341/* 3342 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3343 */ 3344void 3345ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3346{ 3347 (void) rw_rdlock(&ztest_name_lock); 3348 (void) ztest_snapshot_destroy(zd->zd_name, id); 3349 (void) ztest_snapshot_create(zd->zd_name, id); 3350 (void) rw_unlock(&ztest_name_lock); 3351} 3352 3353/* 3354 * Cleanup non-standard snapshots and clones. 3355 */ 3356void 3357ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3358{ 3359 char snap1name[MAXNAMELEN]; 3360 char clone1name[MAXNAMELEN]; 3361 char snap2name[MAXNAMELEN]; 3362 char clone2name[MAXNAMELEN]; 3363 char snap3name[MAXNAMELEN]; 3364 int error; 3365 3366 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3367 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3368 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3369 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3370 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3371 3372 error = dsl_destroy_head(clone2name); 3373 if (error && error != ENOENT) 3374 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3375 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3376 if (error && error != ENOENT) 3377 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3378 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3379 if (error && error != ENOENT) 3380 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3381 error = dsl_destroy_head(clone1name); 3382 if (error && error != ENOENT) 3383 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3384 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3385 if (error && error != ENOENT) 3386 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3387} 3388 3389/* 3390 * Verify dsl_dataset_promote handles EBUSY 3391 */ 3392void 3393ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3394{ 3395 objset_t *os; 3396 char snap1name[MAXNAMELEN]; 3397 char clone1name[MAXNAMELEN]; 3398 char snap2name[MAXNAMELEN]; 3399 char clone2name[MAXNAMELEN]; 3400 char snap3name[MAXNAMELEN]; 3401 char *osname = zd->zd_name; 3402 int error; 3403 3404 (void) rw_rdlock(&ztest_name_lock); 3405 3406 ztest_dsl_dataset_cleanup(osname, id); 3407 3408 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3409 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3410 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3411 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3412 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3413 3414 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3415 if (error && error != EEXIST) { 3416 if (error == ENOSPC) { 3417 ztest_record_enospc(FTAG); 3418 goto out; 3419 } 3420 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3421 } 3422 3423 error = dmu_objset_clone(clone1name, snap1name); 3424 if (error) { 3425 if (error == ENOSPC) { 3426 ztest_record_enospc(FTAG); 3427 goto out; 3428 } 3429 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3430 } 3431 3432 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3433 if (error && error != EEXIST) { 3434 if (error == ENOSPC) { 3435 ztest_record_enospc(FTAG); 3436 goto out; 3437 } 3438 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3439 } 3440 3441 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3442 if (error && error != EEXIST) { 3443 if (error == ENOSPC) { 3444 ztest_record_enospc(FTAG); 3445 goto out; 3446 } 3447 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3448 } 3449 3450 error = dmu_objset_clone(clone2name, snap3name); 3451 if (error) { 3452 if (error == ENOSPC) { 3453 ztest_record_enospc(FTAG); 3454 goto out; 3455 } 3456 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3457 } 3458 3459 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3460 if (error) 3461 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3462 error = dsl_dataset_promote(clone2name, NULL); 3463 if (error != EBUSY) 3464 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3465 error); 3466 dmu_objset_disown(os, FTAG); 3467 3468out: 3469 ztest_dsl_dataset_cleanup(osname, id); 3470 3471 (void) rw_unlock(&ztest_name_lock); 3472} 3473 3474/* 3475 * Verify that dmu_object_{alloc,free} work as expected. 3476 */ 3477void 3478ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3479{ 3480 ztest_od_t od[4]; 3481 int batchsize = sizeof (od) / sizeof (od[0]); 3482 3483 for (int b = 0; b < batchsize; b++) 3484 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3485 3486 /* 3487 * Destroy the previous batch of objects, create a new batch, 3488 * and do some I/O on the new objects. 3489 */ 3490 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3491 return; 3492 3493 while (ztest_random(4 * batchsize) != 0) 3494 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3495 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3496} 3497 3498/* 3499 * Verify that dmu_{read,write} work as expected. 3500 */ 3501void 3502ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3503{ 3504 objset_t *os = zd->zd_os; 3505 ztest_od_t od[2]; 3506 dmu_tx_t *tx; 3507 int i, freeit, error; 3508 uint64_t n, s, txg; 3509 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3510 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3511 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3512 uint64_t regions = 997; 3513 uint64_t stride = 123456789ULL; 3514 uint64_t width = 40; 3515 int free_percent = 5; 3516 3517 /* 3518 * This test uses two objects, packobj and bigobj, that are always 3519 * updated together (i.e. in the same tx) so that their contents are 3520 * in sync and can be compared. Their contents relate to each other 3521 * in a simple way: packobj is a dense array of 'bufwad' structures, 3522 * while bigobj is a sparse array of the same bufwads. Specifically, 3523 * for any index n, there are three bufwads that should be identical: 3524 * 3525 * packobj, at offset n * sizeof (bufwad_t) 3526 * bigobj, at the head of the nth chunk 3527 * bigobj, at the tail of the nth chunk 3528 * 3529 * The chunk size is arbitrary. It doesn't have to be a power of two, 3530 * and it doesn't have any relation to the object blocksize. 3531 * The only requirement is that it can hold at least two bufwads. 3532 * 3533 * Normally, we write the bufwad to each of these locations. 3534 * However, free_percent of the time we instead write zeroes to 3535 * packobj and perform a dmu_free_range() on bigobj. By comparing 3536 * bigobj to packobj, we can verify that the DMU is correctly 3537 * tracking which parts of an object are allocated and free, 3538 * and that the contents of the allocated blocks are correct. 3539 */ 3540 3541 /* 3542 * Read the directory info. If it's the first time, set things up. 3543 */ 3544 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3545 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3546 3547 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3548 return; 3549 3550 bigobj = od[0].od_object; 3551 packobj = od[1].od_object; 3552 chunksize = od[0].od_gen; 3553 ASSERT(chunksize == od[1].od_gen); 3554 3555 /* 3556 * Prefetch a random chunk of the big object. 3557 * Our aim here is to get some async reads in flight 3558 * for blocks that we may free below; the DMU should 3559 * handle this race correctly. 3560 */ 3561 n = ztest_random(regions) * stride + ztest_random(width); 3562 s = 1 + ztest_random(2 * width - 1); 3563 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3564 3565 /* 3566 * Pick a random index and compute the offsets into packobj and bigobj. 3567 */ 3568 n = ztest_random(regions) * stride + ztest_random(width); 3569 s = 1 + ztest_random(width - 1); 3570 3571 packoff = n * sizeof (bufwad_t); 3572 packsize = s * sizeof (bufwad_t); 3573 3574 bigoff = n * chunksize; 3575 bigsize = s * chunksize; 3576 3577 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3578 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3579 3580 /* 3581 * free_percent of the time, free a range of bigobj rather than 3582 * overwriting it. 3583 */ 3584 freeit = (ztest_random(100) < free_percent); 3585 3586 /* 3587 * Read the current contents of our objects. 3588 */ 3589 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3590 DMU_READ_PREFETCH); 3591 ASSERT0(error); 3592 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3593 DMU_READ_PREFETCH); 3594 ASSERT0(error); 3595 3596 /* 3597 * Get a tx for the mods to both packobj and bigobj. 3598 */ 3599 tx = dmu_tx_create(os); 3600 3601 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3602 3603 if (freeit) 3604 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3605 else 3606 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3607 3608 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3609 if (txg == 0) { 3610 umem_free(packbuf, packsize); 3611 umem_free(bigbuf, bigsize); 3612 return; 3613 } 3614 3615 dmu_object_set_checksum(os, bigobj, 3616 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx); 3617 3618 dmu_object_set_compress(os, bigobj, 3619 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx); 3620 3621 /* 3622 * For each index from n to n + s, verify that the existing bufwad 3623 * in packobj matches the bufwads at the head and tail of the 3624 * corresponding chunk in bigobj. Then update all three bufwads 3625 * with the new values we want to write out. 3626 */ 3627 for (i = 0; i < s; i++) { 3628 /* LINTED */ 3629 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3630 /* LINTED */ 3631 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3632 /* LINTED */ 3633 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3634 3635 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3636 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3637 3638 if (pack->bw_txg > txg) 3639 fatal(0, "future leak: got %llx, open txg is %llx", 3640 pack->bw_txg, txg); 3641 3642 if (pack->bw_data != 0 && pack->bw_index != n + i) 3643 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3644 pack->bw_index, n, i); 3645 3646 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3647 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3648 3649 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3650 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3651 3652 if (freeit) { 3653 bzero(pack, sizeof (bufwad_t)); 3654 } else { 3655 pack->bw_index = n + i; 3656 pack->bw_txg = txg; 3657 pack->bw_data = 1 + ztest_random(-2ULL); 3658 } 3659 *bigH = *pack; 3660 *bigT = *pack; 3661 } 3662 3663 /* 3664 * We've verified all the old bufwads, and made new ones. 3665 * Now write them out. 3666 */ 3667 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3668 3669 if (freeit) { 3670 if (ztest_opts.zo_verbose >= 7) { 3671 (void) printf("freeing offset %llx size %llx" 3672 " txg %llx\n", 3673 (u_longlong_t)bigoff, 3674 (u_longlong_t)bigsize, 3675 (u_longlong_t)txg); 3676 } 3677 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3678 } else { 3679 if (ztest_opts.zo_verbose >= 7) { 3680 (void) printf("writing offset %llx size %llx" 3681 " txg %llx\n", 3682 (u_longlong_t)bigoff, 3683 (u_longlong_t)bigsize, 3684 (u_longlong_t)txg); 3685 } 3686 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3687 } 3688 3689 dmu_tx_commit(tx); 3690 3691 /* 3692 * Sanity check the stuff we just wrote. 3693 */ 3694 { 3695 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3696 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3697 3698 VERIFY(0 == dmu_read(os, packobj, packoff, 3699 packsize, packcheck, DMU_READ_PREFETCH)); 3700 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3701 bigsize, bigcheck, DMU_READ_PREFETCH)); 3702 3703 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3704 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3705 3706 umem_free(packcheck, packsize); 3707 umem_free(bigcheck, bigsize); 3708 } 3709 3710 umem_free(packbuf, packsize); 3711 umem_free(bigbuf, bigsize); 3712} 3713 3714void 3715compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3716 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3717{ 3718 uint64_t i; 3719 bufwad_t *pack; 3720 bufwad_t *bigH; 3721 bufwad_t *bigT; 3722 3723 /* 3724 * For each index from n to n + s, verify that the existing bufwad 3725 * in packobj matches the bufwads at the head and tail of the 3726 * corresponding chunk in bigobj. Then update all three bufwads 3727 * with the new values we want to write out. 3728 */ 3729 for (i = 0; i < s; i++) { 3730 /* LINTED */ 3731 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3732 /* LINTED */ 3733 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3734 /* LINTED */ 3735 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3736 3737 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3738 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3739 3740 if (pack->bw_txg > txg) 3741 fatal(0, "future leak: got %llx, open txg is %llx", 3742 pack->bw_txg, txg); 3743 3744 if (pack->bw_data != 0 && pack->bw_index != n + i) 3745 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3746 pack->bw_index, n, i); 3747 3748 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3749 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3750 3751 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3752 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3753 3754 pack->bw_index = n + i; 3755 pack->bw_txg = txg; 3756 pack->bw_data = 1 + ztest_random(-2ULL); 3757 3758 *bigH = *pack; 3759 *bigT = *pack; 3760 } 3761} 3762 3763void 3764ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3765{ 3766 objset_t *os = zd->zd_os; 3767 ztest_od_t od[2]; 3768 dmu_tx_t *tx; 3769 uint64_t i; 3770 int error; 3771 uint64_t n, s, txg; 3772 bufwad_t *packbuf, *bigbuf; 3773 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3774 uint64_t blocksize = ztest_random_blocksize(); 3775 uint64_t chunksize = blocksize; 3776 uint64_t regions = 997; 3777 uint64_t stride = 123456789ULL; 3778 uint64_t width = 9; 3779 dmu_buf_t *bonus_db; 3780 arc_buf_t **bigbuf_arcbufs; 3781 dmu_object_info_t doi; 3782 3783 /* 3784 * This test uses two objects, packobj and bigobj, that are always 3785 * updated together (i.e. in the same tx) so that their contents are 3786 * in sync and can be compared. Their contents relate to each other 3787 * in a simple way: packobj is a dense array of 'bufwad' structures, 3788 * while bigobj is a sparse array of the same bufwads. Specifically, 3789 * for any index n, there are three bufwads that should be identical: 3790 * 3791 * packobj, at offset n * sizeof (bufwad_t) 3792 * bigobj, at the head of the nth chunk 3793 * bigobj, at the tail of the nth chunk 3794 * 3795 * The chunk size is set equal to bigobj block size so that 3796 * dmu_assign_arcbuf() can be tested for object updates. 3797 */ 3798 3799 /* 3800 * Read the directory info. If it's the first time, set things up. 3801 */ 3802 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3803 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3804 3805 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3806 return; 3807 3808 bigobj = od[0].od_object; 3809 packobj = od[1].od_object; 3810 blocksize = od[0].od_blocksize; 3811 chunksize = blocksize; 3812 ASSERT(chunksize == od[1].od_gen); 3813 3814 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3815 VERIFY(ISP2(doi.doi_data_block_size)); 3816 VERIFY(chunksize == doi.doi_data_block_size); 3817 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3818 3819 /* 3820 * Pick a random index and compute the offsets into packobj and bigobj. 3821 */ 3822 n = ztest_random(regions) * stride + ztest_random(width); 3823 s = 1 + ztest_random(width - 1); 3824 3825 packoff = n * sizeof (bufwad_t); 3826 packsize = s * sizeof (bufwad_t); 3827 3828 bigoff = n * chunksize; 3829 bigsize = s * chunksize; 3830 3831 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3832 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3833 3834 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3835 3836 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3837 3838 /* 3839 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3840 * Iteration 1 test zcopy to already referenced dbufs. 3841 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3842 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3843 * Iteration 4 test zcopy when dbuf is no longer dirty. 3844 * Iteration 5 test zcopy when it can't be done. 3845 * Iteration 6 one more zcopy write. 3846 */ 3847 for (i = 0; i < 7; i++) { 3848 uint64_t j; 3849 uint64_t off; 3850 3851 /* 3852 * In iteration 5 (i == 5) use arcbufs 3853 * that don't match bigobj blksz to test 3854 * dmu_assign_arcbuf() when it can't directly 3855 * assign an arcbuf to a dbuf. 3856 */ 3857 for (j = 0; j < s; j++) { 3858 if (i != 5) { 3859 bigbuf_arcbufs[j] = 3860 dmu_request_arcbuf(bonus_db, chunksize); 3861 } else { 3862 bigbuf_arcbufs[2 * j] = 3863 dmu_request_arcbuf(bonus_db, chunksize / 2); 3864 bigbuf_arcbufs[2 * j + 1] = 3865 dmu_request_arcbuf(bonus_db, chunksize / 2); 3866 } 3867 } 3868 3869 /* 3870 * Get a tx for the mods to both packobj and bigobj. 3871 */ 3872 tx = dmu_tx_create(os); 3873 3874 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3875 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3876 3877 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3878 if (txg == 0) { 3879 umem_free(packbuf, packsize); 3880 umem_free(bigbuf, bigsize); 3881 for (j = 0; j < s; j++) { 3882 if (i != 5) { 3883 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3884 } else { 3885 dmu_return_arcbuf( 3886 bigbuf_arcbufs[2 * j]); 3887 dmu_return_arcbuf( 3888 bigbuf_arcbufs[2 * j + 1]); 3889 } 3890 } 3891 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3892 dmu_buf_rele(bonus_db, FTAG); 3893 return; 3894 } 3895 3896 /* 3897 * 50% of the time don't read objects in the 1st iteration to 3898 * test dmu_assign_arcbuf() for the case when there're no 3899 * existing dbufs for the specified offsets. 3900 */ 3901 if (i != 0 || ztest_random(2) != 0) { 3902 error = dmu_read(os, packobj, packoff, 3903 packsize, packbuf, DMU_READ_PREFETCH); 3904 ASSERT0(error); 3905 error = dmu_read(os, bigobj, bigoff, bigsize, 3906 bigbuf, DMU_READ_PREFETCH); 3907 ASSERT0(error); 3908 } 3909 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3910 n, chunksize, txg); 3911 3912 /* 3913 * We've verified all the old bufwads, and made new ones. 3914 * Now write them out. 3915 */ 3916 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3917 if (ztest_opts.zo_verbose >= 7) { 3918 (void) printf("writing offset %llx size %llx" 3919 " txg %llx\n", 3920 (u_longlong_t)bigoff, 3921 (u_longlong_t)bigsize, 3922 (u_longlong_t)txg); 3923 } 3924 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3925 dmu_buf_t *dbt; 3926 if (i != 5) { 3927 bcopy((caddr_t)bigbuf + (off - bigoff), 3928 bigbuf_arcbufs[j]->b_data, chunksize); 3929 } else { 3930 bcopy((caddr_t)bigbuf + (off - bigoff), 3931 bigbuf_arcbufs[2 * j]->b_data, 3932 chunksize / 2); 3933 bcopy((caddr_t)bigbuf + (off - bigoff) + 3934 chunksize / 2, 3935 bigbuf_arcbufs[2 * j + 1]->b_data, 3936 chunksize / 2); 3937 } 3938 3939 if (i == 1) { 3940 VERIFY(dmu_buf_hold(os, bigobj, off, 3941 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3942 } 3943 if (i != 5) { 3944 dmu_assign_arcbuf(bonus_db, off, 3945 bigbuf_arcbufs[j], tx); 3946 } else { 3947 dmu_assign_arcbuf(bonus_db, off, 3948 bigbuf_arcbufs[2 * j], tx); 3949 dmu_assign_arcbuf(bonus_db, 3950 off + chunksize / 2, 3951 bigbuf_arcbufs[2 * j + 1], tx); 3952 } 3953 if (i == 1) { 3954 dmu_buf_rele(dbt, FTAG); 3955 } 3956 } 3957 dmu_tx_commit(tx); 3958 3959 /* 3960 * Sanity check the stuff we just wrote. 3961 */ 3962 { 3963 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3964 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3965 3966 VERIFY(0 == dmu_read(os, packobj, packoff, 3967 packsize, packcheck, DMU_READ_PREFETCH)); 3968 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3969 bigsize, bigcheck, DMU_READ_PREFETCH)); 3970 3971 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3972 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3973 3974 umem_free(packcheck, packsize); 3975 umem_free(bigcheck, bigsize); 3976 } 3977 if (i == 2) { 3978 txg_wait_open(dmu_objset_pool(os), 0); 3979 } else if (i == 3) { 3980 txg_wait_synced(dmu_objset_pool(os), 0); 3981 } 3982 } 3983 3984 dmu_buf_rele(bonus_db, FTAG); 3985 umem_free(packbuf, packsize); 3986 umem_free(bigbuf, bigsize); 3987 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3988} 3989 3990/* ARGSUSED */ 3991void 3992ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 3993{ 3994 ztest_od_t od[1]; 3995 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 3996 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3997 3998 /* 3999 * Have multiple threads write to large offsets in an object 4000 * to verify that parallel writes to an object -- even to the 4001 * same blocks within the object -- doesn't cause any trouble. 4002 */ 4003 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4004 4005 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4006 return; 4007 4008 while (ztest_random(10) != 0) 4009 ztest_io(zd, od[0].od_object, offset); 4010} 4011 4012void 4013ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4014{ 4015 ztest_od_t od[1]; 4016 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4017 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4018 uint64_t count = ztest_random(20) + 1; 4019 uint64_t blocksize = ztest_random_blocksize(); 4020 void *data; 4021 4022 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4023 4024 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4025 return; 4026 4027 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4028 return; 4029 4030 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4031 4032 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4033 4034 while (ztest_random(count) != 0) { 4035 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4036 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4037 data) != 0) 4038 break; 4039 while (ztest_random(4) != 0) 4040 ztest_io(zd, od[0].od_object, randoff); 4041 } 4042 4043 umem_free(data, blocksize); 4044} 4045 4046/* 4047 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4048 */ 4049#define ZTEST_ZAP_MIN_INTS 1 4050#define ZTEST_ZAP_MAX_INTS 4 4051#define ZTEST_ZAP_MAX_PROPS 1000 4052 4053void 4054ztest_zap(ztest_ds_t *zd, uint64_t id) 4055{ 4056 objset_t *os = zd->zd_os; 4057 ztest_od_t od[1]; 4058 uint64_t object; 4059 uint64_t txg, last_txg; 4060 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4061 uint64_t zl_ints, zl_intsize, prop; 4062 int i, ints; 4063 dmu_tx_t *tx; 4064 char propname[100], txgname[100]; 4065 int error; 4066 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4067 4068 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4069 4070 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4071 return; 4072 4073 object = od[0].od_object; 4074 4075 /* 4076 * Generate a known hash collision, and verify that 4077 * we can lookup and remove both entries. 4078 */ 4079 tx = dmu_tx_create(os); 4080 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4081 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4082 if (txg == 0) 4083 return; 4084 for (i = 0; i < 2; i++) { 4085 value[i] = i; 4086 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4087 1, &value[i], tx)); 4088 } 4089 for (i = 0; i < 2; i++) { 4090 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4091 sizeof (uint64_t), 1, &value[i], tx)); 4092 VERIFY3U(0, ==, 4093 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4094 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4095 ASSERT3U(zl_ints, ==, 1); 4096 } 4097 for (i = 0; i < 2; i++) { 4098 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4099 } 4100 dmu_tx_commit(tx); 4101 4102 /* 4103 * Generate a buch of random entries. 4104 */ 4105 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4106 4107 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4108 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4109 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4110 bzero(value, sizeof (value)); 4111 last_txg = 0; 4112 4113 /* 4114 * If these zap entries already exist, validate their contents. 4115 */ 4116 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4117 if (error == 0) { 4118 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4119 ASSERT3U(zl_ints, ==, 1); 4120 4121 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4122 zl_ints, &last_txg) == 0); 4123 4124 VERIFY(zap_length(os, object, propname, &zl_intsize, 4125 &zl_ints) == 0); 4126 4127 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4128 ASSERT3U(zl_ints, ==, ints); 4129 4130 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4131 zl_ints, value) == 0); 4132 4133 for (i = 0; i < ints; i++) { 4134 ASSERT3U(value[i], ==, last_txg + object + i); 4135 } 4136 } else { 4137 ASSERT3U(error, ==, ENOENT); 4138 } 4139 4140 /* 4141 * Atomically update two entries in our zap object. 4142 * The first is named txg_%llu, and contains the txg 4143 * in which the property was last updated. The second 4144 * is named prop_%llu, and the nth element of its value 4145 * should be txg + object + n. 4146 */ 4147 tx = dmu_tx_create(os); 4148 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4149 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4150 if (txg == 0) 4151 return; 4152 4153 if (last_txg > txg) 4154 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4155 4156 for (i = 0; i < ints; i++) 4157 value[i] = txg + object + i; 4158 4159 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4160 1, &txg, tx)); 4161 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4162 ints, value, tx)); 4163 4164 dmu_tx_commit(tx); 4165 4166 /* 4167 * Remove a random pair of entries. 4168 */ 4169 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4170 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4171 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4172 4173 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4174 4175 if (error == ENOENT) 4176 return; 4177 4178 ASSERT0(error); 4179 4180 tx = dmu_tx_create(os); 4181 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4182 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4183 if (txg == 0) 4184 return; 4185 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4186 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4187 dmu_tx_commit(tx); 4188} 4189 4190/* 4191 * Testcase to test the upgrading of a microzap to fatzap. 4192 */ 4193void 4194ztest_fzap(ztest_ds_t *zd, uint64_t id) 4195{ 4196 objset_t *os = zd->zd_os; 4197 ztest_od_t od[1]; 4198 uint64_t object, txg; 4199 4200 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4201 4202 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4203 return; 4204 4205 object = od[0].od_object; 4206 4207 /* 4208 * Add entries to this ZAP and make sure it spills over 4209 * and gets upgraded to a fatzap. Also, since we are adding 4210 * 2050 entries we should see ptrtbl growth and leaf-block split. 4211 */ 4212 for (int i = 0; i < 2050; i++) { 4213 char name[MAXNAMELEN]; 4214 uint64_t value = i; 4215 dmu_tx_t *tx; 4216 int error; 4217 4218 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4219 id, value); 4220 4221 tx = dmu_tx_create(os); 4222 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4223 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4224 if (txg == 0) 4225 return; 4226 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4227 &value, tx); 4228 ASSERT(error == 0 || error == EEXIST); 4229 dmu_tx_commit(tx); 4230 } 4231} 4232 4233/* ARGSUSED */ 4234void 4235ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4236{ 4237 objset_t *os = zd->zd_os; 4238 ztest_od_t od[1]; 4239 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4240 dmu_tx_t *tx; 4241 int i, namelen, error; 4242 int micro = ztest_random(2); 4243 char name[20], string_value[20]; 4244 void *data; 4245 4246 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4247 4248 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4249 return; 4250 4251 object = od[0].od_object; 4252 4253 /* 4254 * Generate a random name of the form 'xxx.....' where each 4255 * x is a random printable character and the dots are dots. 4256 * There are 94 such characters, and the name length goes from 4257 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4258 */ 4259 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4260 4261 for (i = 0; i < 3; i++) 4262 name[i] = '!' + ztest_random('~' - '!' + 1); 4263 for (; i < namelen - 1; i++) 4264 name[i] = '.'; 4265 name[i] = '\0'; 4266 4267 if ((namelen & 1) || micro) { 4268 wsize = sizeof (txg); 4269 wc = 1; 4270 data = &txg; 4271 } else { 4272 wsize = 1; 4273 wc = namelen; 4274 data = string_value; 4275 } 4276 4277 count = -1ULL; 4278 VERIFY0(zap_count(os, object, &count)); 4279 ASSERT(count != -1ULL); 4280 4281 /* 4282 * Select an operation: length, lookup, add, update, remove. 4283 */ 4284 i = ztest_random(5); 4285 4286 if (i >= 2) { 4287 tx = dmu_tx_create(os); 4288 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4289 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4290 if (txg == 0) 4291 return; 4292 bcopy(name, string_value, namelen); 4293 } else { 4294 tx = NULL; 4295 txg = 0; 4296 bzero(string_value, namelen); 4297 } 4298 4299 switch (i) { 4300 4301 case 0: 4302 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4303 if (error == 0) { 4304 ASSERT3U(wsize, ==, zl_wsize); 4305 ASSERT3U(wc, ==, zl_wc); 4306 } else { 4307 ASSERT3U(error, ==, ENOENT); 4308 } 4309 break; 4310 4311 case 1: 4312 error = zap_lookup(os, object, name, wsize, wc, data); 4313 if (error == 0) { 4314 if (data == string_value && 4315 bcmp(name, data, namelen) != 0) 4316 fatal(0, "name '%s' != val '%s' len %d", 4317 name, data, namelen); 4318 } else { 4319 ASSERT3U(error, ==, ENOENT); 4320 } 4321 break; 4322 4323 case 2: 4324 error = zap_add(os, object, name, wsize, wc, data, tx); 4325 ASSERT(error == 0 || error == EEXIST); 4326 break; 4327 4328 case 3: 4329 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4330 break; 4331 4332 case 4: 4333 error = zap_remove(os, object, name, tx); 4334 ASSERT(error == 0 || error == ENOENT); 4335 break; 4336 } 4337 4338 if (tx != NULL) 4339 dmu_tx_commit(tx); 4340} 4341 4342/* 4343 * Commit callback data. 4344 */ 4345typedef struct ztest_cb_data { 4346 list_node_t zcd_node; 4347 uint64_t zcd_txg; 4348 int zcd_expected_err; 4349 boolean_t zcd_added; 4350 boolean_t zcd_called; 4351 spa_t *zcd_spa; 4352} ztest_cb_data_t; 4353 4354/* This is the actual commit callback function */ 4355static void 4356ztest_commit_callback(void *arg, int error) 4357{ 4358 ztest_cb_data_t *data = arg; 4359 uint64_t synced_txg; 4360 4361 VERIFY(data != NULL); 4362 VERIFY3S(data->zcd_expected_err, ==, error); 4363 VERIFY(!data->zcd_called); 4364 4365 synced_txg = spa_last_synced_txg(data->zcd_spa); 4366 if (data->zcd_txg > synced_txg) 4367 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4368 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4369 synced_txg); 4370 4371 data->zcd_called = B_TRUE; 4372 4373 if (error == ECANCELED) { 4374 ASSERT0(data->zcd_txg); 4375 ASSERT(!data->zcd_added); 4376 4377 /* 4378 * The private callback data should be destroyed here, but 4379 * since we are going to check the zcd_called field after 4380 * dmu_tx_abort(), we will destroy it there. 4381 */ 4382 return; 4383 } 4384 4385 /* Was this callback added to the global callback list? */ 4386 if (!data->zcd_added) 4387 goto out; 4388 4389 ASSERT3U(data->zcd_txg, !=, 0); 4390 4391 /* Remove our callback from the list */ 4392 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4393 list_remove(&zcl.zcl_callbacks, data); 4394 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4395 4396out: 4397 umem_free(data, sizeof (ztest_cb_data_t)); 4398} 4399 4400/* Allocate and initialize callback data structure */ 4401static ztest_cb_data_t * 4402ztest_create_cb_data(objset_t *os, uint64_t txg) 4403{ 4404 ztest_cb_data_t *cb_data; 4405 4406 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4407 4408 cb_data->zcd_txg = txg; 4409 cb_data->zcd_spa = dmu_objset_spa(os); 4410 4411 return (cb_data); 4412} 4413 4414/* 4415 * If a number of txgs equal to this threshold have been created after a commit 4416 * callback has been registered but not called, then we assume there is an 4417 * implementation bug. 4418 */ 4419#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4420 4421/* 4422 * Commit callback test. 4423 */ 4424void 4425ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4426{ 4427 objset_t *os = zd->zd_os; 4428 ztest_od_t od[1]; 4429 dmu_tx_t *tx; 4430 ztest_cb_data_t *cb_data[3], *tmp_cb; 4431 uint64_t old_txg, txg; 4432 int i, error; 4433 4434 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4435 4436 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4437 return; 4438 4439 tx = dmu_tx_create(os); 4440 4441 cb_data[0] = ztest_create_cb_data(os, 0); 4442 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4443 4444 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4445 4446 /* Every once in a while, abort the transaction on purpose */ 4447 if (ztest_random(100) == 0) 4448 error = -1; 4449 4450 if (!error) 4451 error = dmu_tx_assign(tx, TXG_NOWAIT); 4452 4453 txg = error ? 0 : dmu_tx_get_txg(tx); 4454 4455 cb_data[0]->zcd_txg = txg; 4456 cb_data[1] = ztest_create_cb_data(os, txg); 4457 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4458 4459 if (error) { 4460 /* 4461 * It's not a strict requirement to call the registered 4462 * callbacks from inside dmu_tx_abort(), but that's what 4463 * it's supposed to happen in the current implementation 4464 * so we will check for that. 4465 */ 4466 for (i = 0; i < 2; i++) { 4467 cb_data[i]->zcd_expected_err = ECANCELED; 4468 VERIFY(!cb_data[i]->zcd_called); 4469 } 4470 4471 dmu_tx_abort(tx); 4472 4473 for (i = 0; i < 2; i++) { 4474 VERIFY(cb_data[i]->zcd_called); 4475 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4476 } 4477 4478 return; 4479 } 4480 4481 cb_data[2] = ztest_create_cb_data(os, txg); 4482 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4483 4484 /* 4485 * Read existing data to make sure there isn't a future leak. 4486 */ 4487 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4488 &old_txg, DMU_READ_PREFETCH)); 4489 4490 if (old_txg > txg) 4491 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4492 old_txg, txg); 4493 4494 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4495 4496 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4497 4498 /* 4499 * Since commit callbacks don't have any ordering requirement and since 4500 * it is theoretically possible for a commit callback to be called 4501 * after an arbitrary amount of time has elapsed since its txg has been 4502 * synced, it is difficult to reliably determine whether a commit 4503 * callback hasn't been called due to high load or due to a flawed 4504 * implementation. 4505 * 4506 * In practice, we will assume that if after a certain number of txgs a 4507 * commit callback hasn't been called, then most likely there's an 4508 * implementation bug.. 4509 */ 4510 tmp_cb = list_head(&zcl.zcl_callbacks); 4511 if (tmp_cb != NULL && 4512 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4513 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4514 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4515 } 4516 4517 /* 4518 * Let's find the place to insert our callbacks. 4519 * 4520 * Even though the list is ordered by txg, it is possible for the 4521 * insertion point to not be the end because our txg may already be 4522 * quiescing at this point and other callbacks in the open txg 4523 * (from other objsets) may have sneaked in. 4524 */ 4525 tmp_cb = list_tail(&zcl.zcl_callbacks); 4526 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4527 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4528 4529 /* Add the 3 callbacks to the list */ 4530 for (i = 0; i < 3; i++) { 4531 if (tmp_cb == NULL) 4532 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4533 else 4534 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4535 cb_data[i]); 4536 4537 cb_data[i]->zcd_added = B_TRUE; 4538 VERIFY(!cb_data[i]->zcd_called); 4539 4540 tmp_cb = cb_data[i]; 4541 } 4542 4543 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4544 4545 dmu_tx_commit(tx); 4546} 4547 4548/* ARGSUSED */ 4549void 4550ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4551{ 4552 zfs_prop_t proplist[] = { 4553 ZFS_PROP_CHECKSUM, 4554 ZFS_PROP_COMPRESSION, 4555 ZFS_PROP_COPIES, 4556 ZFS_PROP_DEDUP 4557 }; 4558 4559 (void) rw_rdlock(&ztest_name_lock); 4560 4561 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4562 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4563 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4564 4565 (void) rw_unlock(&ztest_name_lock); 4566} 4567 4568/* ARGSUSED */ 4569void 4570ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4571{ 4572 nvlist_t *props = NULL; 4573 4574 (void) rw_rdlock(&ztest_name_lock); 4575 4576 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4577 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4578 4579 VERIFY0(spa_prop_get(ztest_spa, &props)); 4580 4581 if (ztest_opts.zo_verbose >= 6) 4582 dump_nvlist(props, 4); 4583 4584 nvlist_free(props); 4585 4586 (void) rw_unlock(&ztest_name_lock); 4587} 4588 4589static int 4590user_release_one(const char *snapname, const char *holdname) 4591{ 4592 nvlist_t *snaps, *holds; 4593 int error; 4594 4595 snaps = fnvlist_alloc(); 4596 holds = fnvlist_alloc(); 4597 fnvlist_add_boolean(holds, holdname); 4598 fnvlist_add_nvlist(snaps, snapname, holds); 4599 fnvlist_free(holds); 4600 error = dsl_dataset_user_release(snaps, NULL); 4601 fnvlist_free(snaps); 4602 return (error); 4603} 4604 4605/* 4606 * Test snapshot hold/release and deferred destroy. 4607 */ 4608void 4609ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4610{ 4611 int error; 4612 objset_t *os = zd->zd_os; 4613 objset_t *origin; 4614 char snapname[100]; 4615 char fullname[100]; 4616 char clonename[100]; 4617 char tag[100]; 4618 char osname[MAXNAMELEN]; 4619 nvlist_t *holds; 4620 4621 (void) rw_rdlock(&ztest_name_lock); 4622 4623 dmu_objset_name(os, osname); 4624 4625 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4626 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4627 (void) snprintf(clonename, sizeof (clonename), 4628 "%s/ch1_%llu", osname, id); 4629 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4630 4631 /* 4632 * Clean up from any previous run. 4633 */ 4634 error = dsl_destroy_head(clonename); 4635 if (error != ENOENT) 4636 ASSERT0(error); 4637 error = user_release_one(fullname, tag); 4638 if (error != ESRCH && error != ENOENT) 4639 ASSERT0(error); 4640 error = dsl_destroy_snapshot(fullname, B_FALSE); 4641 if (error != ENOENT) 4642 ASSERT0(error); 4643 4644 /* 4645 * Create snapshot, clone it, mark snap for deferred destroy, 4646 * destroy clone, verify snap was also destroyed. 4647 */ 4648 error = dmu_objset_snapshot_one(osname, snapname); 4649 if (error) { 4650 if (error == ENOSPC) { 4651 ztest_record_enospc("dmu_objset_snapshot"); 4652 goto out; 4653 } 4654 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4655 } 4656 4657 error = dmu_objset_clone(clonename, fullname); 4658 if (error) { 4659 if (error == ENOSPC) { 4660 ztest_record_enospc("dmu_objset_clone"); 4661 goto out; 4662 } 4663 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4664 } 4665 4666 error = dsl_destroy_snapshot(fullname, B_TRUE); 4667 if (error) { 4668 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4669 fullname, error); 4670 } 4671 4672 error = dsl_destroy_head(clonename); 4673 if (error) 4674 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4675 4676 error = dmu_objset_hold(fullname, FTAG, &origin); 4677 if (error != ENOENT) 4678 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4679 4680 /* 4681 * Create snapshot, add temporary hold, verify that we can't 4682 * destroy a held snapshot, mark for deferred destroy, 4683 * release hold, verify snapshot was destroyed. 4684 */ 4685 error = dmu_objset_snapshot_one(osname, snapname); 4686 if (error) { 4687 if (error == ENOSPC) { 4688 ztest_record_enospc("dmu_objset_snapshot"); 4689 goto out; 4690 } 4691 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4692 } 4693 4694 holds = fnvlist_alloc(); 4695 fnvlist_add_string(holds, fullname, tag); 4696 error = dsl_dataset_user_hold(holds, 0, NULL); 4697 fnvlist_free(holds); 4698 4699 if (error) 4700 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag); 4701 4702 error = dsl_destroy_snapshot(fullname, B_FALSE); 4703 if (error != EBUSY) { 4704 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4705 fullname, error); 4706 } 4707 4708 error = dsl_destroy_snapshot(fullname, B_TRUE); 4709 if (error) { 4710 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4711 fullname, error); 4712 } 4713 4714 error = user_release_one(fullname, tag); 4715 if (error) 4716 fatal(0, "user_release_one(%s)", fullname, tag); 4717 4718 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4719 4720out: 4721 (void) rw_unlock(&ztest_name_lock); 4722} 4723 4724/* 4725 * Inject random faults into the on-disk data. 4726 */ 4727/* ARGSUSED */ 4728void 4729ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4730{ 4731 ztest_shared_t *zs = ztest_shared; 4732 spa_t *spa = ztest_spa; 4733 int fd; 4734 uint64_t offset; 4735 uint64_t leaves; 4736 uint64_t bad = 0x1990c0ffeedecadeULL; 4737 uint64_t top, leaf; 4738 char path0[MAXPATHLEN]; 4739 char pathrand[MAXPATHLEN]; 4740 size_t fsize; 4741 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4742 int iters = 1000; 4743 int maxfaults; 4744 int mirror_save; 4745 vdev_t *vd0 = NULL; 4746 uint64_t guid0 = 0; 4747 boolean_t islog = B_FALSE; 4748 4749 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4750 maxfaults = MAXFAULTS(); 4751 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4752 mirror_save = zs->zs_mirrors; 4753 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4754 4755 ASSERT(leaves >= 1); 4756 4757 /* 4758 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4759 */ 4760 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4761 4762 if (ztest_random(2) == 0) { 4763 /* 4764 * Inject errors on a normal data device or slog device. 4765 */ 4766 top = ztest_random_vdev_top(spa, B_TRUE); 4767 leaf = ztest_random(leaves) + zs->zs_splits; 4768 4769 /* 4770 * Generate paths to the first leaf in this top-level vdev, 4771 * and to the random leaf we selected. We'll induce transient 4772 * write failures and random online/offline activity on leaf 0, 4773 * and we'll write random garbage to the randomly chosen leaf. 4774 */ 4775 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4776 ztest_opts.zo_dir, ztest_opts.zo_pool, 4777 top * leaves + zs->zs_splits); 4778 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4779 ztest_opts.zo_dir, ztest_opts.zo_pool, 4780 top * leaves + leaf); 4781 4782 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4783 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4784 islog = B_TRUE; 4785 4786 if (vd0 != NULL && maxfaults != 1) { 4787 /* 4788 * Make vd0 explicitly claim to be unreadable, 4789 * or unwriteable, or reach behind its back 4790 * and close the underlying fd. We can do this if 4791 * maxfaults == 0 because we'll fail and reexecute, 4792 * and we can do it if maxfaults >= 2 because we'll 4793 * have enough redundancy. If maxfaults == 1, the 4794 * combination of this with injection of random data 4795 * corruption below exceeds the pool's fault tolerance. 4796 */ 4797 vdev_file_t *vf = vd0->vdev_tsd; 4798 4799 if (vf != NULL && ztest_random(3) == 0) { 4800 (void) close(vf->vf_vnode->v_fd); 4801 vf->vf_vnode->v_fd = -1; 4802 } else if (ztest_random(2) == 0) { 4803 vd0->vdev_cant_read = B_TRUE; 4804 } else { 4805 vd0->vdev_cant_write = B_TRUE; 4806 } 4807 guid0 = vd0->vdev_guid; 4808 } 4809 } else { 4810 /* 4811 * Inject errors on an l2cache device. 4812 */ 4813 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4814 4815 if (sav->sav_count == 0) { 4816 spa_config_exit(spa, SCL_STATE, FTAG); 4817 return; 4818 } 4819 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4820 guid0 = vd0->vdev_guid; 4821 (void) strcpy(path0, vd0->vdev_path); 4822 (void) strcpy(pathrand, vd0->vdev_path); 4823 4824 leaf = 0; 4825 leaves = 1; 4826 maxfaults = INT_MAX; /* no limit on cache devices */ 4827 } 4828 4829 spa_config_exit(spa, SCL_STATE, FTAG); 4830 4831 /* 4832 * If we can tolerate two or more faults, or we're dealing 4833 * with a slog, randomly online/offline vd0. 4834 */ 4835 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4836 if (ztest_random(10) < 6) { 4837 int flags = (ztest_random(2) == 0 ? 4838 ZFS_OFFLINE_TEMPORARY : 0); 4839 4840 /* 4841 * We have to grab the zs_name_lock as writer to 4842 * prevent a race between offlining a slog and 4843 * destroying a dataset. Offlining the slog will 4844 * grab a reference on the dataset which may cause 4845 * dmu_objset_destroy() to fail with EBUSY thus 4846 * leaving the dataset in an inconsistent state. 4847 */ 4848 if (islog) 4849 (void) rw_wrlock(&ztest_name_lock); 4850 4851 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4852 4853 if (islog) 4854 (void) rw_unlock(&ztest_name_lock); 4855 } else { 4856 /* 4857 * Ideally we would like to be able to randomly 4858 * call vdev_[on|off]line without holding locks 4859 * to force unpredictable failures but the side 4860 * effects of vdev_[on|off]line prevent us from 4861 * doing so. We grab the ztest_vdev_lock here to 4862 * prevent a race between injection testing and 4863 * aux_vdev removal. 4864 */ 4865 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4866 (void) vdev_online(spa, guid0, 0, NULL); 4867 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4868 } 4869 } 4870 4871 if (maxfaults == 0) 4872 return; 4873 4874 /* 4875 * We have at least single-fault tolerance, so inject data corruption. 4876 */ 4877 fd = open(pathrand, O_RDWR); 4878 4879 if (fd == -1) /* we hit a gap in the device namespace */ 4880 return; 4881 4882 fsize = lseek(fd, 0, SEEK_END); 4883 4884 while (--iters != 0) { 4885 offset = ztest_random(fsize / (leaves << bshift)) * 4886 (leaves << bshift) + (leaf << bshift) + 4887 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4888 4889 if (offset >= fsize) 4890 continue; 4891 4892 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4893 if (mirror_save != zs->zs_mirrors) { 4894 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4895 (void) close(fd); 4896 return; 4897 } 4898 4899 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4900 fatal(1, "can't inject bad word at 0x%llx in %s", 4901 offset, pathrand); 4902 4903 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4904 4905 if (ztest_opts.zo_verbose >= 7) 4906 (void) printf("injected bad word into %s," 4907 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4908 } 4909 4910 (void) close(fd); 4911} 4912 4913/* 4914 * Verify that DDT repair works as expected. 4915 */ 4916void 4917ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4918{ 4919 ztest_shared_t *zs = ztest_shared; 4920 spa_t *spa = ztest_spa; 4921 objset_t *os = zd->zd_os; 4922 ztest_od_t od[1]; 4923 uint64_t object, blocksize, txg, pattern, psize; 4924 enum zio_checksum checksum = spa_dedup_checksum(spa); 4925 dmu_buf_t *db; 4926 dmu_tx_t *tx; 4927 void *buf; 4928 blkptr_t blk; 4929 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4930 4931 blocksize = ztest_random_blocksize(); 4932 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4933 4934 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4935 4936 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4937 return; 4938 4939 /* 4940 * Take the name lock as writer to prevent anyone else from changing 4941 * the pool and dataset properies we need to maintain during this test. 4942 */ 4943 (void) rw_wrlock(&ztest_name_lock); 4944 4945 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4946 B_FALSE) != 0 || 4947 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4948 B_FALSE) != 0) { 4949 (void) rw_unlock(&ztest_name_lock); 4950 return; 4951 } 4952 4953 object = od[0].od_object; 4954 blocksize = od[0].od_blocksize; 4955 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 4956 4957 ASSERT(object != 0); 4958 4959 tx = dmu_tx_create(os); 4960 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 4961 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 4962 if (txg == 0) { 4963 (void) rw_unlock(&ztest_name_lock); 4964 return; 4965 } 4966 4967 /* 4968 * Write all the copies of our block. 4969 */ 4970 for (int i = 0; i < copies; i++) { 4971 uint64_t offset = i * blocksize; 4972 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 4973 DMU_READ_NO_PREFETCH); 4974 if (error != 0) { 4975 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 4976 os, (long long)object, (long long) offset, error); 4977 } 4978 ASSERT(db->db_offset == offset); 4979 ASSERT(db->db_size == blocksize); 4980 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 4981 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 4982 dmu_buf_will_fill(db, tx); 4983 ztest_pattern_set(db->db_data, db->db_size, pattern); 4984 dmu_buf_rele(db, FTAG); 4985 } 4986 4987 dmu_tx_commit(tx); 4988 txg_wait_synced(spa_get_dsl(spa), txg); 4989 4990 /* 4991 * Find out what block we got. 4992 */ 4993 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 4994 DMU_READ_NO_PREFETCH)); 4995 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 4996 dmu_buf_rele(db, FTAG); 4997 4998 /* 4999 * Damage the block. Dedup-ditto will save us when we read it later. 5000 */ 5001 psize = BP_GET_PSIZE(&blk); 5002 buf = zio_buf_alloc(psize); 5003 ztest_pattern_set(buf, psize, ~pattern); 5004 5005 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5006 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5007 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5008 5009 zio_buf_free(buf, psize); 5010 5011 (void) rw_unlock(&ztest_name_lock); 5012} 5013 5014/* 5015 * Scrub the pool. 5016 */ 5017/* ARGSUSED */ 5018void 5019ztest_scrub(ztest_ds_t *zd, uint64_t id) 5020{ 5021 spa_t *spa = ztest_spa; 5022 5023 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5024 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5025 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5026} 5027 5028/* 5029 * Change the guid for the pool. 5030 */ 5031/* ARGSUSED */ 5032void 5033ztest_reguid(ztest_ds_t *zd, uint64_t id) 5034{ 5035 spa_t *spa = ztest_spa; 5036 uint64_t orig, load; 5037 int error; 5038 5039 orig = spa_guid(spa); 5040 load = spa_load_guid(spa); 5041 5042 (void) rw_wrlock(&ztest_name_lock); 5043 error = spa_change_guid(spa); 5044 (void) rw_unlock(&ztest_name_lock); 5045 5046 if (error != 0) 5047 return; 5048 5049 if (ztest_opts.zo_verbose >= 4) { 5050 (void) printf("Changed guid old %llu -> %llu\n", 5051 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5052 } 5053 5054 VERIFY3U(orig, !=, spa_guid(spa)); 5055 VERIFY3U(load, ==, spa_load_guid(spa)); 5056} 5057 5058/* 5059 * Rename the pool to a different name and then rename it back. 5060 */ 5061/* ARGSUSED */ 5062void 5063ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5064{ 5065 char *oldname, *newname; 5066 spa_t *spa; 5067 5068 (void) rw_wrlock(&ztest_name_lock); 5069 5070 oldname = ztest_opts.zo_pool; 5071 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5072 (void) strcpy(newname, oldname); 5073 (void) strcat(newname, "_tmp"); 5074 5075 /* 5076 * Do the rename 5077 */ 5078 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5079 5080 /* 5081 * Try to open it under the old name, which shouldn't exist 5082 */ 5083 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5084 5085 /* 5086 * Open it under the new name and make sure it's still the same spa_t. 5087 */ 5088 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5089 5090 ASSERT(spa == ztest_spa); 5091 spa_close(spa, FTAG); 5092 5093 /* 5094 * Rename it back to the original 5095 */ 5096 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5097 5098 /* 5099 * Make sure it can still be opened 5100 */ 5101 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5102 5103 ASSERT(spa == ztest_spa); 5104 spa_close(spa, FTAG); 5105 5106 umem_free(newname, strlen(newname) + 1); 5107 5108 (void) rw_unlock(&ztest_name_lock); 5109} 5110 5111/* 5112 * Verify pool integrity by running zdb. 5113 */ 5114static void 5115ztest_run_zdb(char *pool) 5116{ 5117 int status; 5118 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5119 char zbuf[1024]; 5120 char *bin; 5121 char *ztest; 5122 char *isa; 5123 int isalen; 5124 FILE *fp; 5125 5126 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5127 5128 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5129 bin = strstr(zdb, "/usr/bin/"); 5130 ztest = strstr(bin, "/ztest"); 5131 isa = bin + 8; 5132 isalen = ztest - isa; 5133 isa = strdup(isa); 5134 /* LINTED */ 5135 (void) sprintf(bin, 5136 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s", 5137 isalen, 5138 isa, 5139 ztest_opts.zo_verbose >= 3 ? "s" : "", 5140 ztest_opts.zo_verbose >= 4 ? "v" : "", 5141 spa_config_path, 5142 pool); 5143 free(isa); 5144 5145 if (ztest_opts.zo_verbose >= 5) 5146 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5147 5148 fp = popen(zdb, "r"); 5149 assert(fp != NULL); 5150 5151 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5152 if (ztest_opts.zo_verbose >= 3) 5153 (void) printf("%s", zbuf); 5154 5155 status = pclose(fp); 5156 5157 if (status == 0) 5158 return; 5159 5160 ztest_dump_core = 0; 5161 if (WIFEXITED(status)) 5162 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5163 else 5164 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5165} 5166 5167static void 5168ztest_walk_pool_directory(char *header) 5169{ 5170 spa_t *spa = NULL; 5171 5172 if (ztest_opts.zo_verbose >= 6) 5173 (void) printf("%s\n", header); 5174 5175 mutex_enter(&spa_namespace_lock); 5176 while ((spa = spa_next(spa)) != NULL) 5177 if (ztest_opts.zo_verbose >= 6) 5178 (void) printf("\t%s\n", spa_name(spa)); 5179 mutex_exit(&spa_namespace_lock); 5180} 5181 5182static void 5183ztest_spa_import_export(char *oldname, char *newname) 5184{ 5185 nvlist_t *config, *newconfig; 5186 uint64_t pool_guid; 5187 spa_t *spa; 5188 int error; 5189 5190 if (ztest_opts.zo_verbose >= 4) { 5191 (void) printf("import/export: old = %s, new = %s\n", 5192 oldname, newname); 5193 } 5194 5195 /* 5196 * Clean up from previous runs. 5197 */ 5198 (void) spa_destroy(newname); 5199 5200 /* 5201 * Get the pool's configuration and guid. 5202 */ 5203 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5204 5205 /* 5206 * Kick off a scrub to tickle scrub/export races. 5207 */ 5208 if (ztest_random(2) == 0) 5209 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5210 5211 pool_guid = spa_guid(spa); 5212 spa_close(spa, FTAG); 5213 5214 ztest_walk_pool_directory("pools before export"); 5215 5216 /* 5217 * Export it. 5218 */ 5219 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5220 5221 ztest_walk_pool_directory("pools after export"); 5222 5223 /* 5224 * Try to import it. 5225 */ 5226 newconfig = spa_tryimport(config); 5227 ASSERT(newconfig != NULL); 5228 nvlist_free(newconfig); 5229 5230 /* 5231 * Import it under the new name. 5232 */ 5233 error = spa_import(newname, config, NULL, 0); 5234 if (error != 0) { 5235 dump_nvlist(config, 0); 5236 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5237 oldname, newname, error); 5238 } 5239 5240 ztest_walk_pool_directory("pools after import"); 5241 5242 /* 5243 * Try to import it again -- should fail with EEXIST. 5244 */ 5245 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5246 5247 /* 5248 * Try to import it under a different name -- should fail with EEXIST. 5249 */ 5250 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5251 5252 /* 5253 * Verify that the pool is no longer visible under the old name. 5254 */ 5255 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5256 5257 /* 5258 * Verify that we can open and close the pool using the new name. 5259 */ 5260 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5261 ASSERT(pool_guid == spa_guid(spa)); 5262 spa_close(spa, FTAG); 5263 5264 nvlist_free(config); 5265} 5266 5267static void 5268ztest_resume(spa_t *spa) 5269{ 5270 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5271 (void) printf("resuming from suspended state\n"); 5272 spa_vdev_state_enter(spa, SCL_NONE); 5273 vdev_clear(spa, NULL); 5274 (void) spa_vdev_state_exit(spa, NULL, 0); 5275 (void) zio_resume(spa); 5276} 5277 5278static void * 5279ztest_resume_thread(void *arg) 5280{ 5281 spa_t *spa = arg; 5282 5283 while (!ztest_exiting) { 5284 if (spa_suspended(spa)) 5285 ztest_resume(spa); 5286 (void) poll(NULL, 0, 100); 5287 } 5288 return (NULL); 5289} 5290 5291static void * 5292ztest_deadman_thread(void *arg) 5293{ 5294 ztest_shared_t *zs = arg; 5295 int grace = 300; 5296 hrtime_t delta; 5297 5298 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace; 5299 5300 (void) poll(NULL, 0, (int)(1000 * delta)); 5301 5302 fatal(0, "failed to complete within %d seconds of deadline", grace); 5303 5304 return (NULL); 5305} 5306 5307static void 5308ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5309{ 5310 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5311 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5312 hrtime_t functime = gethrtime(); 5313 5314 for (int i = 0; i < zi->zi_iters; i++) 5315 zi->zi_func(zd, id); 5316 5317 functime = gethrtime() - functime; 5318 5319 atomic_add_64(&zc->zc_count, 1); 5320 atomic_add_64(&zc->zc_time, functime); 5321 5322 if (ztest_opts.zo_verbose >= 4) { 5323 Dl_info dli; 5324 (void) dladdr((void *)zi->zi_func, &dli); 5325 (void) printf("%6.2f sec in %s\n", 5326 (double)functime / NANOSEC, dli.dli_sname); 5327 } 5328} 5329 5330static void * 5331ztest_thread(void *arg) 5332{ 5333 int rand; 5334 uint64_t id = (uintptr_t)arg; 5335 ztest_shared_t *zs = ztest_shared; 5336 uint64_t call_next; 5337 hrtime_t now; 5338 ztest_info_t *zi; 5339 ztest_shared_callstate_t *zc; 5340 5341 while ((now = gethrtime()) < zs->zs_thread_stop) { 5342 /* 5343 * See if it's time to force a crash. 5344 */ 5345 if (now > zs->zs_thread_kill) 5346 ztest_kill(zs); 5347 5348 /* 5349 * If we're getting ENOSPC with some regularity, stop. 5350 */ 5351 if (zs->zs_enospc_count > 10) 5352 break; 5353 5354 /* 5355 * Pick a random function to execute. 5356 */ 5357 rand = ztest_random(ZTEST_FUNCS); 5358 zi = &ztest_info[rand]; 5359 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5360 call_next = zc->zc_next; 5361 5362 if (now >= call_next && 5363 atomic_cas_64(&zc->zc_next, call_next, call_next + 5364 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5365 ztest_execute(rand, zi, id); 5366 } 5367 } 5368 5369 return (NULL); 5370} 5371 5372static void 5373ztest_dataset_name(char *dsname, char *pool, int d) 5374{ 5375 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 5376} 5377 5378static void 5379ztest_dataset_destroy(int d) 5380{ 5381 char name[MAXNAMELEN]; 5382 5383 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5384 5385 if (ztest_opts.zo_verbose >= 3) 5386 (void) printf("Destroying %s to free up space\n", name); 5387 5388 /* 5389 * Cleanup any non-standard clones and snapshots. In general, 5390 * ztest thread t operates on dataset (t % zopt_datasets), 5391 * so there may be more than one thing to clean up. 5392 */ 5393 for (int t = d; t < ztest_opts.zo_threads; 5394 t += ztest_opts.zo_datasets) { 5395 ztest_dsl_dataset_cleanup(name, t); 5396 } 5397 5398 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5399 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5400} 5401 5402static void 5403ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5404{ 5405 uint64_t usedobjs, dirobjs, scratch; 5406 5407 /* 5408 * ZTEST_DIROBJ is the object directory for the entire dataset. 5409 * Therefore, the number of objects in use should equal the 5410 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5411 * If not, we have an object leak. 5412 * 5413 * Note that we can only check this in ztest_dataset_open(), 5414 * when the open-context and syncing-context values agree. 5415 * That's because zap_count() returns the open-context value, 5416 * while dmu_objset_space() returns the rootbp fill count. 5417 */ 5418 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5419 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5420 ASSERT3U(dirobjs + 1, ==, usedobjs); 5421} 5422 5423static int 5424ztest_dataset_open(int d) 5425{ 5426 ztest_ds_t *zd = &ztest_ds[d]; 5427 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5428 objset_t *os; 5429 zilog_t *zilog; 5430 char name[MAXNAMELEN]; 5431 int error; 5432 5433 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5434 5435 (void) rw_rdlock(&ztest_name_lock); 5436 5437 error = ztest_dataset_create(name); 5438 if (error == ENOSPC) { 5439 (void) rw_unlock(&ztest_name_lock); 5440 ztest_record_enospc(FTAG); 5441 return (error); 5442 } 5443 ASSERT(error == 0 || error == EEXIST); 5444 5445 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5446 (void) rw_unlock(&ztest_name_lock); 5447 5448 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5449 5450 zilog = zd->zd_zilog; 5451 5452 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5453 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5454 fatal(0, "missing log records: claimed %llu < committed %llu", 5455 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5456 5457 ztest_dataset_dirobj_verify(zd); 5458 5459 zil_replay(os, zd, ztest_replay_vector); 5460 5461 ztest_dataset_dirobj_verify(zd); 5462 5463 if (ztest_opts.zo_verbose >= 6) 5464 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5465 zd->zd_name, 5466 (u_longlong_t)zilog->zl_parse_blk_count, 5467 (u_longlong_t)zilog->zl_parse_lr_count, 5468 (u_longlong_t)zilog->zl_replaying_seq); 5469 5470 zilog = zil_open(os, ztest_get_data); 5471 5472 if (zilog->zl_replaying_seq != 0 && 5473 zilog->zl_replaying_seq < committed_seq) 5474 fatal(0, "missing log records: replayed %llu < committed %llu", 5475 zilog->zl_replaying_seq, committed_seq); 5476 5477 return (0); 5478} 5479 5480static void 5481ztest_dataset_close(int d) 5482{ 5483 ztest_ds_t *zd = &ztest_ds[d]; 5484 5485 zil_close(zd->zd_zilog); 5486 dmu_objset_disown(zd->zd_os, zd); 5487 5488 ztest_zd_fini(zd); 5489} 5490 5491/* 5492 * Kick off threads to run tests on all datasets in parallel. 5493 */ 5494static void 5495ztest_run(ztest_shared_t *zs) 5496{ 5497 thread_t *tid; 5498 spa_t *spa; 5499 objset_t *os; 5500 thread_t resume_tid; 5501 int error; 5502 5503 ztest_exiting = B_FALSE; 5504 5505 /* 5506 * Initialize parent/child shared state. 5507 */ 5508 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5509 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5510 5511 zs->zs_thread_start = gethrtime(); 5512 zs->zs_thread_stop = 5513 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5514 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5515 zs->zs_thread_kill = zs->zs_thread_stop; 5516 if (ztest_random(100) < ztest_opts.zo_killrate) { 5517 zs->zs_thread_kill -= 5518 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5519 } 5520 5521 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5522 5523 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5524 offsetof(ztest_cb_data_t, zcd_node)); 5525 5526 /* 5527 * Open our pool. 5528 */ 5529 kernel_init(FREAD | FWRITE); 5530 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5531 spa->spa_debug = B_TRUE; 5532 ztest_spa = spa; 5533 5534 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5535 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5536 zs->zs_guid = dmu_objset_fsid_guid(os); 5537 dmu_objset_disown(os, FTAG); 5538 5539 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5540 5541 /* 5542 * We don't expect the pool to suspend unless maxfaults == 0, 5543 * in which case ztest_fault_inject() temporarily takes away 5544 * the only valid replica. 5545 */ 5546 if (MAXFAULTS() == 0) 5547 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5548 else 5549 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5550 5551 /* 5552 * Create a thread to periodically resume suspended I/O. 5553 */ 5554 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5555 &resume_tid) == 0); 5556 5557 /* 5558 * Create a deadman thread to abort() if we hang. 5559 */ 5560 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5561 NULL) == 0); 5562 5563 /* 5564 * Verify that we can safely inquire about about any object, 5565 * whether it's allocated or not. To make it interesting, 5566 * we probe a 5-wide window around each power of two. 5567 * This hits all edge cases, including zero and the max. 5568 */ 5569 for (int t = 0; t < 64; t++) { 5570 for (int d = -5; d <= 5; d++) { 5571 error = dmu_object_info(spa->spa_meta_objset, 5572 (1ULL << t) + d, NULL); 5573 ASSERT(error == 0 || error == ENOENT || 5574 error == EINVAL); 5575 } 5576 } 5577 5578 /* 5579 * If we got any ENOSPC errors on the previous run, destroy something. 5580 */ 5581 if (zs->zs_enospc_count != 0) { 5582 int d = ztest_random(ztest_opts.zo_datasets); 5583 ztest_dataset_destroy(d); 5584 } 5585 zs->zs_enospc_count = 0; 5586 5587 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5588 UMEM_NOFAIL); 5589 5590 if (ztest_opts.zo_verbose >= 4) 5591 (void) printf("starting main threads...\n"); 5592 5593 /* 5594 * Kick off all the tests that run in parallel. 5595 */ 5596 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5597 if (t < ztest_opts.zo_datasets && 5598 ztest_dataset_open(t) != 0) 5599 return; 5600 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5601 THR_BOUND, &tid[t]) == 0); 5602 } 5603 5604 /* 5605 * Wait for all of the tests to complete. We go in reverse order 5606 * so we don't close datasets while threads are still using them. 5607 */ 5608 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5609 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5610 if (t < ztest_opts.zo_datasets) 5611 ztest_dataset_close(t); 5612 } 5613 5614 txg_wait_synced(spa_get_dsl(spa), 0); 5615 5616 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5617 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5618 5619 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5620 5621 /* Kill the resume thread */ 5622 ztest_exiting = B_TRUE; 5623 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5624 ztest_resume(spa); 5625 5626 /* 5627 * Right before closing the pool, kick off a bunch of async I/O; 5628 * spa_close() should wait for it to complete. 5629 */ 5630 for (uint64_t object = 1; object < 50; object++) 5631 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5632 5633 spa_close(spa, FTAG); 5634 5635 /* 5636 * Verify that we can loop over all pools. 5637 */ 5638 mutex_enter(&spa_namespace_lock); 5639 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5640 if (ztest_opts.zo_verbose > 3) 5641 (void) printf("spa_next: found %s\n", spa_name(spa)); 5642 mutex_exit(&spa_namespace_lock); 5643 5644 /* 5645 * Verify that we can export the pool and reimport it under a 5646 * different name. 5647 */ 5648 if (ztest_random(2) == 0) { 5649 char name[MAXNAMELEN]; 5650 (void) snprintf(name, MAXNAMELEN, "%s_import", 5651 ztest_opts.zo_pool); 5652 ztest_spa_import_export(ztest_opts.zo_pool, name); 5653 ztest_spa_import_export(name, ztest_opts.zo_pool); 5654 } 5655 5656 kernel_fini(); 5657 5658 list_destroy(&zcl.zcl_callbacks); 5659 5660 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5661 5662 (void) rwlock_destroy(&ztest_name_lock); 5663 (void) _mutex_destroy(&ztest_vdev_lock); 5664} 5665 5666static void 5667ztest_freeze(void) 5668{ 5669 ztest_ds_t *zd = &ztest_ds[0]; 5670 spa_t *spa; 5671 int numloops = 0; 5672 5673 if (ztest_opts.zo_verbose >= 3) 5674 (void) printf("testing spa_freeze()...\n"); 5675 5676 kernel_init(FREAD | FWRITE); 5677 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5678 VERIFY3U(0, ==, ztest_dataset_open(0)); 5679 spa->spa_debug = B_TRUE; 5680 ztest_spa = spa; 5681 5682 /* 5683 * Force the first log block to be transactionally allocated. 5684 * We have to do this before we freeze the pool -- otherwise 5685 * the log chain won't be anchored. 5686 */ 5687 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5688 ztest_dmu_object_alloc_free(zd, 0); 5689 zil_commit(zd->zd_zilog, 0); 5690 } 5691 5692 txg_wait_synced(spa_get_dsl(spa), 0); 5693 5694 /* 5695 * Freeze the pool. This stops spa_sync() from doing anything, 5696 * so that the only way to record changes from now on is the ZIL. 5697 */ 5698 spa_freeze(spa); 5699 5700 /* 5701 * Run tests that generate log records but don't alter the pool config 5702 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5703 * We do a txg_wait_synced() after each iteration to force the txg 5704 * to increase well beyond the last synced value in the uberblock. 5705 * The ZIL should be OK with that. 5706 */ 5707 while (ztest_random(10) != 0 && 5708 numloops++ < ztest_opts.zo_maxloops) { 5709 ztest_dmu_write_parallel(zd, 0); 5710 ztest_dmu_object_alloc_free(zd, 0); 5711 txg_wait_synced(spa_get_dsl(spa), 0); 5712 } 5713 5714 /* 5715 * Commit all of the changes we just generated. 5716 */ 5717 zil_commit(zd->zd_zilog, 0); 5718 txg_wait_synced(spa_get_dsl(spa), 0); 5719 5720 /* 5721 * Close our dataset and close the pool. 5722 */ 5723 ztest_dataset_close(0); 5724 spa_close(spa, FTAG); 5725 kernel_fini(); 5726 5727 /* 5728 * Open and close the pool and dataset to induce log replay. 5729 */ 5730 kernel_init(FREAD | FWRITE); 5731 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5732 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5733 VERIFY3U(0, ==, ztest_dataset_open(0)); 5734 ztest_dataset_close(0); 5735 5736 spa->spa_debug = B_TRUE; 5737 ztest_spa = spa; 5738 txg_wait_synced(spa_get_dsl(spa), 0); 5739 ztest_reguid(NULL, 0); 5740 5741 spa_close(spa, FTAG); 5742 kernel_fini(); 5743} 5744 5745void 5746print_time(hrtime_t t, char *timebuf) 5747{ 5748 hrtime_t s = t / NANOSEC; 5749 hrtime_t m = s / 60; 5750 hrtime_t h = m / 60; 5751 hrtime_t d = h / 24; 5752 5753 s -= m * 60; 5754 m -= h * 60; 5755 h -= d * 24; 5756 5757 timebuf[0] = '\0'; 5758 5759 if (d) 5760 (void) sprintf(timebuf, 5761 "%llud%02lluh%02llum%02llus", d, h, m, s); 5762 else if (h) 5763 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5764 else if (m) 5765 (void) sprintf(timebuf, "%llum%02llus", m, s); 5766 else 5767 (void) sprintf(timebuf, "%llus", s); 5768} 5769 5770static nvlist_t * 5771make_random_props() 5772{ 5773 nvlist_t *props; 5774 5775 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5776 if (ztest_random(2) == 0) 5777 return (props); 5778 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5779 5780 return (props); 5781} 5782 5783/* 5784 * Create a storage pool with the given name and initial vdev size. 5785 * Then test spa_freeze() functionality. 5786 */ 5787static void 5788ztest_init(ztest_shared_t *zs) 5789{ 5790 spa_t *spa; 5791 nvlist_t *nvroot, *props; 5792 5793 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5794 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5795 5796 kernel_init(FREAD | FWRITE); 5797 5798 /* 5799 * Create the storage pool. 5800 */ 5801 (void) spa_destroy(ztest_opts.zo_pool); 5802 ztest_shared->zs_vdev_next_leaf = 0; 5803 zs->zs_splits = 0; 5804 zs->zs_mirrors = ztest_opts.zo_mirrors; 5805 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 5806 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5807 props = make_random_props(); 5808 for (int i = 0; i < SPA_FEATURES; i++) { 5809 char buf[1024]; 5810 (void) snprintf(buf, sizeof (buf), "feature@%s", 5811 spa_feature_table[i].fi_uname); 5812 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5813 } 5814 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 5815 nvlist_free(nvroot); 5816 5817 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5818 zs->zs_metaslab_sz = 5819 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5820 5821 spa_close(spa, FTAG); 5822 5823 kernel_fini(); 5824 5825 ztest_run_zdb(ztest_opts.zo_pool); 5826 5827 ztest_freeze(); 5828 5829 ztest_run_zdb(ztest_opts.zo_pool); 5830 5831 (void) rwlock_destroy(&ztest_name_lock); 5832 (void) _mutex_destroy(&ztest_vdev_lock); 5833} 5834 5835static void 5836setup_data_fd(void) 5837{ 5838 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 5839 5840 ztest_fd_data = mkstemp(ztest_name_data); 5841 ASSERT3S(ztest_fd_data, >=, 0); 5842 (void) unlink(ztest_name_data); 5843} 5844 5845 5846static int 5847shared_data_size(ztest_shared_hdr_t *hdr) 5848{ 5849 int size; 5850 5851 size = hdr->zh_hdr_size; 5852 size += hdr->zh_opts_size; 5853 size += hdr->zh_size; 5854 size += hdr->zh_stats_size * hdr->zh_stats_count; 5855 size += hdr->zh_ds_size * hdr->zh_ds_count; 5856 5857 return (size); 5858} 5859 5860static void 5861setup_hdr(void) 5862{ 5863 int size; 5864 ztest_shared_hdr_t *hdr; 5865 5866 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5867 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5868 ASSERT(hdr != MAP_FAILED); 5869 5870 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 5871 5872 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5873 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5874 hdr->zh_size = sizeof (ztest_shared_t); 5875 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5876 hdr->zh_stats_count = ZTEST_FUNCS; 5877 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5878 hdr->zh_ds_count = ztest_opts.zo_datasets; 5879 5880 size = shared_data_size(hdr); 5881 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 5882 5883 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5884} 5885 5886static void 5887setup_data(void) 5888{ 5889 int size, offset; 5890 ztest_shared_hdr_t *hdr; 5891 uint8_t *buf; 5892 5893 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5894 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 5895 ASSERT(hdr != MAP_FAILED); 5896 5897 size = shared_data_size(hdr); 5898 5899 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5900 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 5901 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5902 ASSERT(hdr != MAP_FAILED); 5903 buf = (uint8_t *)hdr; 5904 5905 offset = hdr->zh_hdr_size; 5906 ztest_shared_opts = (void *)&buf[offset]; 5907 offset += hdr->zh_opts_size; 5908 ztest_shared = (void *)&buf[offset]; 5909 offset += hdr->zh_size; 5910 ztest_shared_callstate = (void *)&buf[offset]; 5911 offset += hdr->zh_stats_size * hdr->zh_stats_count; 5912 ztest_shared_ds = (void *)&buf[offset]; 5913} 5914 5915static boolean_t 5916exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 5917{ 5918 pid_t pid; 5919 int status; 5920 char *cmdbuf = NULL; 5921 5922 pid = fork(); 5923 5924 if (cmd == NULL) { 5925 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 5926 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 5927 cmd = cmdbuf; 5928 } 5929 5930 if (pid == -1) 5931 fatal(1, "fork failed"); 5932 5933 if (pid == 0) { /* child */ 5934 char *emptyargv[2] = { cmd, NULL }; 5935 char fd_data_str[12]; 5936 5937 struct rlimit rl = { 1024, 1024 }; 5938 (void) setrlimit(RLIMIT_NOFILE, &rl); 5939 5940 (void) close(ztest_fd_rand); 5941 VERIFY3U(11, >=, 5942 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 5943 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 5944 5945 (void) enable_extended_FILE_stdio(-1, -1); 5946 if (libpath != NULL) 5947 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 5948#ifdef illumos 5949 (void) execv(cmd, emptyargv); 5950#else 5951 (void) execvp(cmd, emptyargv); 5952#endif 5953 ztest_dump_core = B_FALSE; 5954 fatal(B_TRUE, "exec failed: %s", cmd); 5955 } 5956 5957 if (cmdbuf != NULL) { 5958 umem_free(cmdbuf, MAXPATHLEN); 5959 cmd = NULL; 5960 } 5961 5962 while (waitpid(pid, &status, 0) != pid) 5963 continue; 5964 if (statusp != NULL) 5965 *statusp = status; 5966 5967 if (WIFEXITED(status)) { 5968 if (WEXITSTATUS(status) != 0) { 5969 (void) fprintf(stderr, "child exited with code %d\n", 5970 WEXITSTATUS(status)); 5971 exit(2); 5972 } 5973 return (B_FALSE); 5974 } else if (WIFSIGNALED(status)) { 5975 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 5976 (void) fprintf(stderr, "child died with signal %d\n", 5977 WTERMSIG(status)); 5978 exit(3); 5979 } 5980 return (B_TRUE); 5981 } else { 5982 (void) fprintf(stderr, "something strange happened to child\n"); 5983 exit(4); 5984 /* NOTREACHED */ 5985 } 5986} 5987 5988static void 5989ztest_run_init(void) 5990{ 5991 ztest_shared_t *zs = ztest_shared; 5992 5993 ASSERT(ztest_opts.zo_init != 0); 5994 5995 /* 5996 * Blow away any existing copy of zpool.cache 5997 */ 5998 (void) remove(spa_config_path); 5999 6000 /* 6001 * Create and initialize our storage pool. 6002 */ 6003 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6004 bzero(zs, sizeof (ztest_shared_t)); 6005 if (ztest_opts.zo_verbose >= 3 && 6006 ztest_opts.zo_init != 1) { 6007 (void) printf("ztest_init(), pass %d\n", i); 6008 } 6009 ztest_init(zs); 6010 } 6011} 6012 6013int 6014main(int argc, char **argv) 6015{ 6016 int kills = 0; 6017 int iters = 0; 6018 int older = 0; 6019 int newer = 0; 6020 ztest_shared_t *zs; 6021 ztest_info_t *zi; 6022 ztest_shared_callstate_t *zc; 6023 char timebuf[100]; 6024 char numbuf[6]; 6025 spa_t *spa; 6026 char *cmd; 6027 boolean_t hasalt; 6028 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6029 6030 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6031 6032 dprintf_setup(&argc, argv); 6033 6034 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6035 ASSERT3S(ztest_fd_rand, >=, 0); 6036 6037 if (!fd_data_str) { 6038 process_options(argc, argv); 6039 6040 setup_data_fd(); 6041 setup_hdr(); 6042 setup_data(); 6043 bcopy(&ztest_opts, ztest_shared_opts, 6044 sizeof (*ztest_shared_opts)); 6045 } else { 6046 ztest_fd_data = atoi(fd_data_str); 6047 setup_data(); 6048 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6049 } 6050 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6051 6052 /* Override location of zpool.cache */ 6053 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6054 ztest_opts.zo_dir), !=, -1); 6055 6056 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6057 UMEM_NOFAIL); 6058 zs = ztest_shared; 6059 6060 if (fd_data_str) { 6061 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 6062 metaslab_df_alloc_threshold = 6063 zs->zs_metaslab_df_alloc_threshold; 6064 6065 if (zs->zs_do_init) 6066 ztest_run_init(); 6067 else 6068 ztest_run(zs); 6069 exit(0); 6070 } 6071 6072 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6073 6074 if (ztest_opts.zo_verbose >= 1) { 6075 (void) printf("%llu vdevs, %d datasets, %d threads," 6076 " %llu seconds...\n", 6077 (u_longlong_t)ztest_opts.zo_vdevs, 6078 ztest_opts.zo_datasets, 6079 ztest_opts.zo_threads, 6080 (u_longlong_t)ztest_opts.zo_time); 6081 } 6082 6083 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6084 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6085 6086 zs->zs_do_init = B_TRUE; 6087 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6088 if (ztest_opts.zo_verbose >= 1) { 6089 (void) printf("Executing older ztest for " 6090 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6091 } 6092 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6093 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6094 } else { 6095 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6096 } 6097 zs->zs_do_init = B_FALSE; 6098 6099 zs->zs_proc_start = gethrtime(); 6100 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6101 6102 for (int f = 0; f < ZTEST_FUNCS; f++) { 6103 zi = &ztest_info[f]; 6104 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6105 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6106 zc->zc_next = UINT64_MAX; 6107 else 6108 zc->zc_next = zs->zs_proc_start + 6109 ztest_random(2 * zi->zi_interval[0] + 1); 6110 } 6111 6112 /* 6113 * Run the tests in a loop. These tests include fault injection 6114 * to verify that self-healing data works, and forced crashes 6115 * to verify that we never lose on-disk consistency. 6116 */ 6117 while (gethrtime() < zs->zs_proc_stop) { 6118 int status; 6119 boolean_t killed; 6120 6121 /* 6122 * Initialize the workload counters for each function. 6123 */ 6124 for (int f = 0; f < ZTEST_FUNCS; f++) { 6125 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6126 zc->zc_count = 0; 6127 zc->zc_time = 0; 6128 } 6129 6130 /* Set the allocation switch size */ 6131 zs->zs_metaslab_df_alloc_threshold = 6132 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6133 6134 if (!hasalt || ztest_random(2) == 0) { 6135 if (hasalt && ztest_opts.zo_verbose >= 1) { 6136 (void) printf("Executing newer ztest: %s\n", 6137 cmd); 6138 } 6139 newer++; 6140 killed = exec_child(cmd, NULL, B_TRUE, &status); 6141 } else { 6142 if (hasalt && ztest_opts.zo_verbose >= 1) { 6143 (void) printf("Executing older ztest: %s\n", 6144 ztest_opts.zo_alt_ztest); 6145 } 6146 older++; 6147 killed = exec_child(ztest_opts.zo_alt_ztest, 6148 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6149 } 6150 6151 if (killed) 6152 kills++; 6153 iters++; 6154 6155 if (ztest_opts.zo_verbose >= 1) { 6156 hrtime_t now = gethrtime(); 6157 6158 now = MIN(now, zs->zs_proc_stop); 6159 print_time(zs->zs_proc_stop - now, timebuf); 6160 nicenum(zs->zs_space, numbuf); 6161 6162 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6163 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6164 iters, 6165 WIFEXITED(status) ? "Complete" : "SIGKILL", 6166 (u_longlong_t)zs->zs_enospc_count, 6167 100.0 * zs->zs_alloc / zs->zs_space, 6168 numbuf, 6169 100.0 * (now - zs->zs_proc_start) / 6170 (ztest_opts.zo_time * NANOSEC), timebuf); 6171 } 6172 6173 if (ztest_opts.zo_verbose >= 2) { 6174 (void) printf("\nWorkload summary:\n\n"); 6175 (void) printf("%7s %9s %s\n", 6176 "Calls", "Time", "Function"); 6177 (void) printf("%7s %9s %s\n", 6178 "-----", "----", "--------"); 6179 for (int f = 0; f < ZTEST_FUNCS; f++) { 6180 Dl_info dli; 6181 6182 zi = &ztest_info[f]; 6183 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6184 print_time(zc->zc_time, timebuf); 6185 (void) dladdr((void *)zi->zi_func, &dli); 6186 (void) printf("%7llu %9s %s\n", 6187 (u_longlong_t)zc->zc_count, timebuf, 6188 dli.dli_sname); 6189 } 6190 (void) printf("\n"); 6191 } 6192 6193 /* 6194 * It's possible that we killed a child during a rename test, 6195 * in which case we'll have a 'ztest_tmp' pool lying around 6196 * instead of 'ztest'. Do a blind rename in case this happened. 6197 */ 6198 kernel_init(FREAD); 6199 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6200 spa_close(spa, FTAG); 6201 } else { 6202 char tmpname[MAXNAMELEN]; 6203 kernel_fini(); 6204 kernel_init(FREAD | FWRITE); 6205 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6206 ztest_opts.zo_pool); 6207 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6208 } 6209 kernel_fini(); 6210 6211 ztest_run_zdb(ztest_opts.zo_pool); 6212 } 6213 6214 if (ztest_opts.zo_verbose >= 1) { 6215 if (hasalt) { 6216 (void) printf("%d runs of older ztest: %s\n", older, 6217 ztest_opts.zo_alt_ztest); 6218 (void) printf("%d runs of newer ztest: %s\n", newer, 6219 cmd); 6220 } 6221 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6222 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6223 } 6224 6225 umem_free(cmd, MAXNAMELEN); 6226 6227 return (0); 6228} 6229