ztest.c revision 339034
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2013 Steven Hartland. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright 2017 Joyent, Inc. 29 * Copyright 2017 RackTop Systems. 30 */ 31 32/* 33 * The objective of this program is to provide a DMU/ZAP/SPA stress test 34 * that runs entirely in userland, is easy to use, and easy to extend. 35 * 36 * The overall design of the ztest program is as follows: 37 * 38 * (1) For each major functional area (e.g. adding vdevs to a pool, 39 * creating and destroying datasets, reading and writing objects, etc) 40 * we have a simple routine to test that functionality. These 41 * individual routines do not have to do anything "stressful". 42 * 43 * (2) We turn these simple functionality tests into a stress test by 44 * running them all in parallel, with as many threads as desired, 45 * and spread across as many datasets, objects, and vdevs as desired. 46 * 47 * (3) While all this is happening, we inject faults into the pool to 48 * verify that self-healing data really works. 49 * 50 * (4) Every time we open a dataset, we change its checksum and compression 51 * functions. Thus even individual objects vary from block to block 52 * in which checksum they use and whether they're compressed. 53 * 54 * (5) To verify that we never lose on-disk consistency after a crash, 55 * we run the entire test in a child of the main process. 56 * At random times, the child self-immolates with a SIGKILL. 57 * This is the software equivalent of pulling the power cord. 58 * The parent then runs the test again, using the existing 59 * storage pool, as many times as desired. If backwards compatibility 60 * testing is enabled ztest will sometimes run the "older" version 61 * of ztest after a SIGKILL. 62 * 63 * (6) To verify that we don't have future leaks or temporal incursions, 64 * many of the functional tests record the transaction group number 65 * as part of their data. When reading old data, they verify that 66 * the transaction group number is less than the current, open txg. 67 * If you add a new test, please do this if applicable. 68 * 69 * When run with no arguments, ztest runs for about five minutes and 70 * produces no output if successful. To get a little bit of information, 71 * specify -V. To get more information, specify -VV, and so on. 72 * 73 * To turn this into an overnight stress test, use -T to specify run time. 74 * 75 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 76 * to increase the pool capacity, fanout, and overall stress level. 77 * 78 * Use the -k option to set the desired frequency of kills. 79 * 80 * When ztest invokes itself it passes all relevant information through a 81 * temporary file which is mmap-ed in the child process. This allows shared 82 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 83 * stored at offset 0 of this file and contains information on the size and 84 * number of shared structures in the file. The information stored in this file 85 * must remain backwards compatible with older versions of ztest so that 86 * ztest can invoke them during backwards compatibility testing (-B). 87 */ 88 89#include <sys/zfs_context.h> 90#include <sys/spa.h> 91#include <sys/dmu.h> 92#include <sys/txg.h> 93#include <sys/dbuf.h> 94#include <sys/zap.h> 95#include <sys/dmu_objset.h> 96#include <sys/poll.h> 97#include <sys/stat.h> 98#include <sys/time.h> 99#include <sys/wait.h> 100#include <sys/mman.h> 101#include <sys/resource.h> 102#include <sys/zio.h> 103#include <sys/zil.h> 104#include <sys/zil_impl.h> 105#include <sys/vdev_impl.h> 106#include <sys/vdev_file.h> 107#include <sys/spa_impl.h> 108#include <sys/metaslab_impl.h> 109#include <sys/dsl_prop.h> 110#include <sys/dsl_dataset.h> 111#include <sys/dsl_destroy.h> 112#include <sys/dsl_scan.h> 113#include <sys/zio_checksum.h> 114#include <sys/refcount.h> 115#include <sys/zfeature.h> 116#include <sys/dsl_userhold.h> 117#include <sys/abd.h> 118#include <stdio.h> 119#include <stdio_ext.h> 120#include <stdlib.h> 121#include <unistd.h> 122#include <signal.h> 123#include <umem.h> 124#include <dlfcn.h> 125#include <ctype.h> 126#include <math.h> 127#include <errno.h> 128#include <sys/fs/zfs.h> 129#include <libnvpair.h> 130#include <libcmdutils.h> 131 132static int ztest_fd_data = -1; 133static int ztest_fd_rand = -1; 134 135typedef struct ztest_shared_hdr { 136 uint64_t zh_hdr_size; 137 uint64_t zh_opts_size; 138 uint64_t zh_size; 139 uint64_t zh_stats_size; 140 uint64_t zh_stats_count; 141 uint64_t zh_ds_size; 142 uint64_t zh_ds_count; 143} ztest_shared_hdr_t; 144 145static ztest_shared_hdr_t *ztest_shared_hdr; 146 147typedef struct ztest_shared_opts { 148 char zo_pool[ZFS_MAX_DATASET_NAME_LEN]; 149 char zo_dir[ZFS_MAX_DATASET_NAME_LEN]; 150 char zo_alt_ztest[MAXNAMELEN]; 151 char zo_alt_libpath[MAXNAMELEN]; 152 uint64_t zo_vdevs; 153 uint64_t zo_vdevtime; 154 size_t zo_vdev_size; 155 int zo_ashift; 156 int zo_mirrors; 157 int zo_raidz; 158 int zo_raidz_parity; 159 int zo_datasets; 160 int zo_threads; 161 uint64_t zo_passtime; 162 uint64_t zo_killrate; 163 int zo_verbose; 164 int zo_init; 165 uint64_t zo_time; 166 uint64_t zo_maxloops; 167 uint64_t zo_metaslab_force_ganging; 168} ztest_shared_opts_t; 169 170static const ztest_shared_opts_t ztest_opts_defaults = { 171 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 172 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 173 .zo_alt_ztest = { '\0' }, 174 .zo_alt_libpath = { '\0' }, 175 .zo_vdevs = 5, 176 .zo_ashift = SPA_MINBLOCKSHIFT, 177 .zo_mirrors = 2, 178 .zo_raidz = 4, 179 .zo_raidz_parity = 1, 180 .zo_vdev_size = SPA_MINDEVSIZE * 4, /* 256m default size */ 181 .zo_datasets = 7, 182 .zo_threads = 23, 183 .zo_passtime = 60, /* 60 seconds */ 184 .zo_killrate = 70, /* 70% kill rate */ 185 .zo_verbose = 0, 186 .zo_init = 1, 187 .zo_time = 300, /* 5 minutes */ 188 .zo_maxloops = 50, /* max loops during spa_freeze() */ 189 .zo_metaslab_force_ganging = 32 << 10 190}; 191 192extern uint64_t metaslab_force_ganging; 193extern uint64_t metaslab_df_alloc_threshold; 194extern uint64_t zfs_deadman_synctime_ms; 195extern int metaslab_preload_limit; 196extern boolean_t zfs_compressed_arc_enabled; 197extern boolean_t zfs_abd_scatter_enabled; 198 199static ztest_shared_opts_t *ztest_shared_opts; 200static ztest_shared_opts_t ztest_opts; 201 202typedef struct ztest_shared_ds { 203 uint64_t zd_seq; 204} ztest_shared_ds_t; 205 206static ztest_shared_ds_t *ztest_shared_ds; 207#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 208 209#define BT_MAGIC 0x123456789abcdefULL 210#define MAXFAULTS() \ 211 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 212 213enum ztest_io_type { 214 ZTEST_IO_WRITE_TAG, 215 ZTEST_IO_WRITE_PATTERN, 216 ZTEST_IO_WRITE_ZEROES, 217 ZTEST_IO_TRUNCATE, 218 ZTEST_IO_SETATTR, 219 ZTEST_IO_REWRITE, 220 ZTEST_IO_TYPES 221}; 222 223typedef struct ztest_block_tag { 224 uint64_t bt_magic; 225 uint64_t bt_objset; 226 uint64_t bt_object; 227 uint64_t bt_offset; 228 uint64_t bt_gen; 229 uint64_t bt_txg; 230 uint64_t bt_crtxg; 231} ztest_block_tag_t; 232 233typedef struct bufwad { 234 uint64_t bw_index; 235 uint64_t bw_txg; 236 uint64_t bw_data; 237} bufwad_t; 238 239/* 240 * XXX -- fix zfs range locks to be generic so we can use them here. 241 */ 242typedef enum { 243 RL_READER, 244 RL_WRITER, 245 RL_APPEND 246} rl_type_t; 247 248typedef struct rll { 249 void *rll_writer; 250 int rll_readers; 251 kmutex_t rll_lock; 252 kcondvar_t rll_cv; 253} rll_t; 254 255typedef struct rl { 256 uint64_t rl_object; 257 uint64_t rl_offset; 258 uint64_t rl_size; 259 rll_t *rl_lock; 260} rl_t; 261 262#define ZTEST_RANGE_LOCKS 64 263#define ZTEST_OBJECT_LOCKS 64 264 265/* 266 * Object descriptor. Used as a template for object lookup/create/remove. 267 */ 268typedef struct ztest_od { 269 uint64_t od_dir; 270 uint64_t od_object; 271 dmu_object_type_t od_type; 272 dmu_object_type_t od_crtype; 273 uint64_t od_blocksize; 274 uint64_t od_crblocksize; 275 uint64_t od_gen; 276 uint64_t od_crgen; 277 char od_name[ZFS_MAX_DATASET_NAME_LEN]; 278} ztest_od_t; 279 280/* 281 * Per-dataset state. 282 */ 283typedef struct ztest_ds { 284 ztest_shared_ds_t *zd_shared; 285 objset_t *zd_os; 286 krwlock_t zd_zilog_lock; 287 zilog_t *zd_zilog; 288 ztest_od_t *zd_od; /* debugging aid */ 289 char zd_name[ZFS_MAX_DATASET_NAME_LEN]; 290 kmutex_t zd_dirobj_lock; 291 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 292 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 293} ztest_ds_t; 294 295/* 296 * Per-iteration state. 297 */ 298typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 299 300typedef struct ztest_info { 301 ztest_func_t *zi_func; /* test function */ 302 uint64_t zi_iters; /* iterations per execution */ 303 uint64_t *zi_interval; /* execute every <interval> seconds */ 304} ztest_info_t; 305 306typedef struct ztest_shared_callstate { 307 uint64_t zc_count; /* per-pass count */ 308 uint64_t zc_time; /* per-pass time */ 309 uint64_t zc_next; /* next time to call this function */ 310} ztest_shared_callstate_t; 311 312static ztest_shared_callstate_t *ztest_shared_callstate; 313#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 314 315/* 316 * Note: these aren't static because we want dladdr() to work. 317 */ 318ztest_func_t ztest_dmu_read_write; 319ztest_func_t ztest_dmu_write_parallel; 320ztest_func_t ztest_dmu_object_alloc_free; 321ztest_func_t ztest_dmu_commit_callbacks; 322ztest_func_t ztest_zap; 323ztest_func_t ztest_zap_parallel; 324ztest_func_t ztest_zil_commit; 325ztest_func_t ztest_zil_remount; 326ztest_func_t ztest_dmu_read_write_zcopy; 327ztest_func_t ztest_dmu_objset_create_destroy; 328ztest_func_t ztest_dmu_prealloc; 329ztest_func_t ztest_fzap; 330ztest_func_t ztest_dmu_snapshot_create_destroy; 331ztest_func_t ztest_dsl_prop_get_set; 332ztest_func_t ztest_spa_prop_get_set; 333ztest_func_t ztest_spa_create_destroy; 334ztest_func_t ztest_fault_inject; 335ztest_func_t ztest_ddt_repair; 336ztest_func_t ztest_dmu_snapshot_hold; 337ztest_func_t ztest_spa_rename; 338ztest_func_t ztest_scrub; 339ztest_func_t ztest_dsl_dataset_promote_busy; 340ztest_func_t ztest_vdev_attach_detach; 341ztest_func_t ztest_vdev_LUN_growth; 342ztest_func_t ztest_vdev_add_remove; 343ztest_func_t ztest_vdev_aux_add_remove; 344ztest_func_t ztest_split_pool; 345ztest_func_t ztest_reguid; 346ztest_func_t ztest_spa_upgrade; 347ztest_func_t ztest_device_removal; 348ztest_func_t ztest_remap_blocks; 349ztest_func_t ztest_spa_checkpoint_create_discard; 350 351uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 352uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 353uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 354uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 355uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 356 357ztest_info_t ztest_info[] = { 358 { ztest_dmu_read_write, 1, &zopt_always }, 359 { ztest_dmu_write_parallel, 10, &zopt_always }, 360 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 361 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 362 { ztest_zap, 30, &zopt_always }, 363 { ztest_zap_parallel, 100, &zopt_always }, 364 { ztest_split_pool, 1, &zopt_always }, 365 { ztest_zil_commit, 1, &zopt_incessant }, 366 { ztest_zil_remount, 1, &zopt_sometimes }, 367 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 368 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 369 { ztest_dsl_prop_get_set, 1, &zopt_often }, 370 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 371#if 0 372 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 373#endif 374 { ztest_fzap, 1, &zopt_sometimes }, 375 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 376 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 377 { ztest_fault_inject, 1, &zopt_incessant }, 378 { ztest_ddt_repair, 1, &zopt_sometimes }, 379 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 380 { ztest_reguid, 1, &zopt_rarely }, 381 { ztest_spa_rename, 1, &zopt_rarely }, 382 { ztest_scrub, 1, &zopt_often }, 383 { ztest_spa_upgrade, 1, &zopt_rarely }, 384 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 385 { ztest_vdev_attach_detach, 1, &zopt_incessant }, 386 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 387 { ztest_vdev_add_remove, 1, 388 &ztest_opts.zo_vdevtime }, 389 { ztest_vdev_aux_add_remove, 1, 390 &ztest_opts.zo_vdevtime }, 391 { ztest_device_removal, 1, &zopt_sometimes }, 392 { ztest_remap_blocks, 1, &zopt_sometimes }, 393 { ztest_spa_checkpoint_create_discard, 1, &zopt_rarely } 394}; 395 396#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 397 398/* 399 * The following struct is used to hold a list of uncalled commit callbacks. 400 * The callbacks are ordered by txg number. 401 */ 402typedef struct ztest_cb_list { 403 kmutex_t zcl_callbacks_lock; 404 list_t zcl_callbacks; 405} ztest_cb_list_t; 406 407/* 408 * Stuff we need to share writably between parent and child. 409 */ 410typedef struct ztest_shared { 411 boolean_t zs_do_init; 412 hrtime_t zs_proc_start; 413 hrtime_t zs_proc_stop; 414 hrtime_t zs_thread_start; 415 hrtime_t zs_thread_stop; 416 hrtime_t zs_thread_kill; 417 uint64_t zs_enospc_count; 418 uint64_t zs_vdev_next_leaf; 419 uint64_t zs_vdev_aux; 420 uint64_t zs_alloc; 421 uint64_t zs_space; 422 uint64_t zs_splits; 423 uint64_t zs_mirrors; 424 uint64_t zs_metaslab_sz; 425 uint64_t zs_metaslab_df_alloc_threshold; 426 uint64_t zs_guid; 427} ztest_shared_t; 428 429#define ID_PARALLEL -1ULL 430 431static char ztest_dev_template[] = "%s/%s.%llua"; 432static char ztest_aux_template[] = "%s/%s.%s.%llu"; 433ztest_shared_t *ztest_shared; 434 435static spa_t *ztest_spa = NULL; 436static ztest_ds_t *ztest_ds; 437 438static kmutex_t ztest_vdev_lock; 439static kmutex_t ztest_checkpoint_lock; 440 441/* 442 * The ztest_name_lock protects the pool and dataset namespace used by 443 * the individual tests. To modify the namespace, consumers must grab 444 * this lock as writer. Grabbing the lock as reader will ensure that the 445 * namespace does not change while the lock is held. 446 */ 447static krwlock_t ztest_name_lock; 448 449static boolean_t ztest_dump_core = B_TRUE; 450static boolean_t ztest_exiting; 451 452/* Global commit callback list */ 453static ztest_cb_list_t zcl; 454 455enum ztest_object { 456 ZTEST_META_DNODE = 0, 457 ZTEST_DIROBJ, 458 ZTEST_OBJECTS 459}; 460 461static void usage(boolean_t) __NORETURN; 462 463/* 464 * These libumem hooks provide a reasonable set of defaults for the allocator's 465 * debugging facilities. 466 */ 467const char * 468_umem_debug_init() 469{ 470 return ("default,verbose"); /* $UMEM_DEBUG setting */ 471} 472 473const char * 474_umem_logging_init(void) 475{ 476 return ("fail,contents"); /* $UMEM_LOGGING setting */ 477} 478 479#define FATAL_MSG_SZ 1024 480 481char *fatal_msg; 482 483static void 484fatal(int do_perror, char *message, ...) 485{ 486 va_list args; 487 int save_errno = errno; 488 char buf[FATAL_MSG_SZ]; 489 490 (void) fflush(stdout); 491 492 va_start(args, message); 493 (void) sprintf(buf, "ztest: "); 494 /* LINTED */ 495 (void) vsprintf(buf + strlen(buf), message, args); 496 va_end(args); 497 if (do_perror) { 498 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 499 ": %s", strerror(save_errno)); 500 } 501 (void) fprintf(stderr, "%s\n", buf); 502 fatal_msg = buf; /* to ease debugging */ 503 if (ztest_dump_core) 504 abort(); 505 exit(3); 506} 507 508static int 509str2shift(const char *buf) 510{ 511 const char *ends = "BKMGTPEZ"; 512 int i; 513 514 if (buf[0] == '\0') 515 return (0); 516 for (i = 0; i < strlen(ends); i++) { 517 if (toupper(buf[0]) == ends[i]) 518 break; 519 } 520 if (i == strlen(ends)) { 521 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 522 buf); 523 usage(B_FALSE); 524 } 525 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 526 return (10*i); 527 } 528 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 529 usage(B_FALSE); 530 /* NOTREACHED */ 531} 532 533static uint64_t 534nicenumtoull(const char *buf) 535{ 536 char *end; 537 uint64_t val; 538 539 val = strtoull(buf, &end, 0); 540 if (end == buf) { 541 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 542 usage(B_FALSE); 543 } else if (end[0] == '.') { 544 double fval = strtod(buf, &end); 545 fval *= pow(2, str2shift(end)); 546 if (fval > UINT64_MAX) { 547 (void) fprintf(stderr, "ztest: value too large: %s\n", 548 buf); 549 usage(B_FALSE); 550 } 551 val = (uint64_t)fval; 552 } else { 553 int shift = str2shift(end); 554 if (shift >= 64 || (val << shift) >> shift != val) { 555 (void) fprintf(stderr, "ztest: value too large: %s\n", 556 buf); 557 usage(B_FALSE); 558 } 559 val <<= shift; 560 } 561 return (val); 562} 563 564static void 565usage(boolean_t requested) 566{ 567 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 568 569 char nice_vdev_size[NN_NUMBUF_SZ]; 570 char nice_force_ganging[NN_NUMBUF_SZ]; 571 FILE *fp = requested ? stdout : stderr; 572 573 nicenum(zo->zo_vdev_size, nice_vdev_size, sizeof (nice_vdev_size)); 574 nicenum(zo->zo_metaslab_force_ganging, nice_force_ganging, 575 sizeof (nice_force_ganging)); 576 577 (void) fprintf(fp, "Usage: %s\n" 578 "\t[-v vdevs (default: %llu)]\n" 579 "\t[-s size_of_each_vdev (default: %s)]\n" 580 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 581 "\t[-m mirror_copies (default: %d)]\n" 582 "\t[-r raidz_disks (default: %d)]\n" 583 "\t[-R raidz_parity (default: %d)]\n" 584 "\t[-d datasets (default: %d)]\n" 585 "\t[-t threads (default: %d)]\n" 586 "\t[-g gang_block_threshold (default: %s)]\n" 587 "\t[-i init_count (default: %d)] initialize pool i times\n" 588 "\t[-k kill_percentage (default: %llu%%)]\n" 589 "\t[-p pool_name (default: %s)]\n" 590 "\t[-f dir (default: %s)] file directory for vdev files\n" 591 "\t[-V] verbose (use multiple times for ever more blather)\n" 592 "\t[-E] use existing pool instead of creating new one\n" 593 "\t[-T time (default: %llu sec)] total run time\n" 594 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 595 "\t[-P passtime (default: %llu sec)] time per pass\n" 596 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 597 "\t[-o variable=value] ... set global variable to an unsigned\n" 598 "\t 32-bit integer value\n" 599 "\t[-h] (print help)\n" 600 "", 601 zo->zo_pool, 602 (u_longlong_t)zo->zo_vdevs, /* -v */ 603 nice_vdev_size, /* -s */ 604 zo->zo_ashift, /* -a */ 605 zo->zo_mirrors, /* -m */ 606 zo->zo_raidz, /* -r */ 607 zo->zo_raidz_parity, /* -R */ 608 zo->zo_datasets, /* -d */ 609 zo->zo_threads, /* -t */ 610 nice_force_ganging, /* -g */ 611 zo->zo_init, /* -i */ 612 (u_longlong_t)zo->zo_killrate, /* -k */ 613 zo->zo_pool, /* -p */ 614 zo->zo_dir, /* -f */ 615 (u_longlong_t)zo->zo_time, /* -T */ 616 (u_longlong_t)zo->zo_maxloops, /* -F */ 617 (u_longlong_t)zo->zo_passtime); 618 exit(requested ? 0 : 1); 619} 620 621static void 622process_options(int argc, char **argv) 623{ 624 char *path; 625 ztest_shared_opts_t *zo = &ztest_opts; 626 627 int opt; 628 uint64_t value; 629 char altdir[MAXNAMELEN] = { 0 }; 630 631 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 632 633 while ((opt = getopt(argc, argv, 634 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:o:")) != EOF) { 635 value = 0; 636 switch (opt) { 637 case 'v': 638 case 's': 639 case 'a': 640 case 'm': 641 case 'r': 642 case 'R': 643 case 'd': 644 case 't': 645 case 'g': 646 case 'i': 647 case 'k': 648 case 'T': 649 case 'P': 650 case 'F': 651 value = nicenumtoull(optarg); 652 } 653 switch (opt) { 654 case 'v': 655 zo->zo_vdevs = value; 656 break; 657 case 's': 658 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 659 break; 660 case 'a': 661 zo->zo_ashift = value; 662 break; 663 case 'm': 664 zo->zo_mirrors = value; 665 break; 666 case 'r': 667 zo->zo_raidz = MAX(1, value); 668 break; 669 case 'R': 670 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 671 break; 672 case 'd': 673 zo->zo_datasets = MAX(1, value); 674 break; 675 case 't': 676 zo->zo_threads = MAX(1, value); 677 break; 678 case 'g': 679 zo->zo_metaslab_force_ganging = 680 MAX(SPA_MINBLOCKSIZE << 1, value); 681 break; 682 case 'i': 683 zo->zo_init = value; 684 break; 685 case 'k': 686 zo->zo_killrate = value; 687 break; 688 case 'p': 689 (void) strlcpy(zo->zo_pool, optarg, 690 sizeof (zo->zo_pool)); 691 break; 692 case 'f': 693 path = realpath(optarg, NULL); 694 if (path == NULL) { 695 (void) fprintf(stderr, "error: %s: %s\n", 696 optarg, strerror(errno)); 697 usage(B_FALSE); 698 } else { 699 (void) strlcpy(zo->zo_dir, path, 700 sizeof (zo->zo_dir)); 701 } 702 break; 703 case 'V': 704 zo->zo_verbose++; 705 break; 706 case 'E': 707 zo->zo_init = 0; 708 break; 709 case 'T': 710 zo->zo_time = value; 711 break; 712 case 'P': 713 zo->zo_passtime = MAX(1, value); 714 break; 715 case 'F': 716 zo->zo_maxloops = MAX(1, value); 717 break; 718 case 'B': 719 (void) strlcpy(altdir, optarg, sizeof (altdir)); 720 break; 721 case 'o': 722 if (set_global_var(optarg) != 0) 723 usage(B_FALSE); 724 break; 725 case 'h': 726 usage(B_TRUE); 727 break; 728 case '?': 729 default: 730 usage(B_FALSE); 731 break; 732 } 733 } 734 735 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 736 737 zo->zo_vdevtime = 738 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 739 UINT64_MAX >> 2); 740 741 if (strlen(altdir) > 0) { 742 char *cmd; 743 char *realaltdir; 744 char *bin; 745 char *ztest; 746 char *isa; 747 int isalen; 748 749 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 750 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 751 752 VERIFY(NULL != realpath(getexecname(), cmd)); 753 if (0 != access(altdir, F_OK)) { 754 ztest_dump_core = B_FALSE; 755 fatal(B_TRUE, "invalid alternate ztest path: %s", 756 altdir); 757 } 758 VERIFY(NULL != realpath(altdir, realaltdir)); 759 760 /* 761 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 762 * We want to extract <isa> to determine if we should use 763 * 32 or 64 bit binaries. 764 */ 765 bin = strstr(cmd, "/usr/bin/"); 766 ztest = strstr(bin, "/ztest"); 767 isa = bin + 9; 768 isalen = ztest - isa; 769 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 770 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 771 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 772 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 773 774 if (0 != access(zo->zo_alt_ztest, X_OK)) { 775 ztest_dump_core = B_FALSE; 776 fatal(B_TRUE, "invalid alternate ztest: %s", 777 zo->zo_alt_ztest); 778 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 779 ztest_dump_core = B_FALSE; 780 fatal(B_TRUE, "invalid alternate lib directory %s", 781 zo->zo_alt_libpath); 782 } 783 784 umem_free(cmd, MAXPATHLEN); 785 umem_free(realaltdir, MAXPATHLEN); 786 } 787} 788 789static void 790ztest_kill(ztest_shared_t *zs) 791{ 792 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 793 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 794 795 /* 796 * Before we kill off ztest, make sure that the config is updated. 797 * See comment above spa_write_cachefile(). 798 */ 799 mutex_enter(&spa_namespace_lock); 800 spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE); 801 mutex_exit(&spa_namespace_lock); 802 803 zfs_dbgmsg_print(FTAG); 804 (void) kill(getpid(), SIGKILL); 805} 806 807static uint64_t 808ztest_random(uint64_t range) 809{ 810 uint64_t r; 811 812 ASSERT3S(ztest_fd_rand, >=, 0); 813 814 if (range == 0) 815 return (0); 816 817 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 818 fatal(1, "short read from /dev/urandom"); 819 820 return (r % range); 821} 822 823/* ARGSUSED */ 824static void 825ztest_record_enospc(const char *s) 826{ 827 ztest_shared->zs_enospc_count++; 828} 829 830static uint64_t 831ztest_get_ashift(void) 832{ 833 if (ztest_opts.zo_ashift == 0) 834 return (SPA_MINBLOCKSHIFT + ztest_random(5)); 835 return (ztest_opts.zo_ashift); 836} 837 838static nvlist_t * 839make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 840{ 841 char pathbuf[MAXPATHLEN]; 842 uint64_t vdev; 843 nvlist_t *file; 844 845 if (ashift == 0) 846 ashift = ztest_get_ashift(); 847 848 if (path == NULL) { 849 path = pathbuf; 850 851 if (aux != NULL) { 852 vdev = ztest_shared->zs_vdev_aux; 853 (void) snprintf(path, sizeof (pathbuf), 854 ztest_aux_template, ztest_opts.zo_dir, 855 pool == NULL ? ztest_opts.zo_pool : pool, 856 aux, vdev); 857 } else { 858 vdev = ztest_shared->zs_vdev_next_leaf++; 859 (void) snprintf(path, sizeof (pathbuf), 860 ztest_dev_template, ztest_opts.zo_dir, 861 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 862 } 863 } 864 865 if (size != 0) { 866 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 867 if (fd == -1) 868 fatal(1, "can't open %s", path); 869 if (ftruncate(fd, size) != 0) 870 fatal(1, "can't ftruncate %s", path); 871 (void) close(fd); 872 } 873 874 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 875 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 876 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 877 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 878 879 return (file); 880} 881 882static nvlist_t * 883make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 884 uint64_t ashift, int r) 885{ 886 nvlist_t *raidz, **child; 887 int c; 888 889 if (r < 2) 890 return (make_vdev_file(path, aux, pool, size, ashift)); 891 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 892 893 for (c = 0; c < r; c++) 894 child[c] = make_vdev_file(path, aux, pool, size, ashift); 895 896 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 897 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 898 VDEV_TYPE_RAIDZ) == 0); 899 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 900 ztest_opts.zo_raidz_parity) == 0); 901 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 902 child, r) == 0); 903 904 for (c = 0; c < r; c++) 905 nvlist_free(child[c]); 906 907 umem_free(child, r * sizeof (nvlist_t *)); 908 909 return (raidz); 910} 911 912static nvlist_t * 913make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 914 uint64_t ashift, int r, int m) 915{ 916 nvlist_t *mirror, **child; 917 int c; 918 919 if (m < 1) 920 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 921 922 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 923 924 for (c = 0; c < m; c++) 925 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 926 927 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 928 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 929 VDEV_TYPE_MIRROR) == 0); 930 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 931 child, m) == 0); 932 933 for (c = 0; c < m; c++) 934 nvlist_free(child[c]); 935 936 umem_free(child, m * sizeof (nvlist_t *)); 937 938 return (mirror); 939} 940 941static nvlist_t * 942make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 943 int log, int r, int m, int t) 944{ 945 nvlist_t *root, **child; 946 int c; 947 948 ASSERT(t > 0); 949 950 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 951 952 for (c = 0; c < t; c++) { 953 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 954 r, m); 955 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 956 log) == 0); 957 } 958 959 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 960 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 961 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 962 child, t) == 0); 963 964 for (c = 0; c < t; c++) 965 nvlist_free(child[c]); 966 967 umem_free(child, t * sizeof (nvlist_t *)); 968 969 return (root); 970} 971 972/* 973 * Find a random spa version. Returns back a random spa version in the 974 * range [initial_version, SPA_VERSION_FEATURES]. 975 */ 976static uint64_t 977ztest_random_spa_version(uint64_t initial_version) 978{ 979 uint64_t version = initial_version; 980 981 if (version <= SPA_VERSION_BEFORE_FEATURES) { 982 version = version + 983 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 984 } 985 986 if (version > SPA_VERSION_BEFORE_FEATURES) 987 version = SPA_VERSION_FEATURES; 988 989 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 990 return (version); 991} 992 993static int 994ztest_random_blocksize(void) 995{ 996 uint64_t block_shift; 997 /* 998 * Choose a block size >= the ashift. 999 * If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks. 1000 */ 1001 int maxbs = SPA_OLD_MAXBLOCKSHIFT; 1002 if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE) 1003 maxbs = 20; 1004 block_shift = ztest_random(maxbs - ztest_spa->spa_max_ashift + 1); 1005 return (1 << (SPA_MINBLOCKSHIFT + block_shift)); 1006} 1007 1008static int 1009ztest_random_ibshift(void) 1010{ 1011 return (DN_MIN_INDBLKSHIFT + 1012 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 1013} 1014 1015static uint64_t 1016ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 1017{ 1018 uint64_t top; 1019 vdev_t *rvd = spa->spa_root_vdev; 1020 vdev_t *tvd; 1021 1022 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1023 1024 do { 1025 top = ztest_random(rvd->vdev_children); 1026 tvd = rvd->vdev_child[top]; 1027 } while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) || 1028 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 1029 1030 return (top); 1031} 1032 1033static uint64_t 1034ztest_random_dsl_prop(zfs_prop_t prop) 1035{ 1036 uint64_t value; 1037 1038 do { 1039 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1040 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1041 1042 return (value); 1043} 1044 1045static int 1046ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1047 boolean_t inherit) 1048{ 1049 const char *propname = zfs_prop_to_name(prop); 1050 const char *valname; 1051 char setpoint[MAXPATHLEN]; 1052 uint64_t curval; 1053 int error; 1054 1055 error = dsl_prop_set_int(osname, propname, 1056 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1057 1058 if (error == ENOSPC) { 1059 ztest_record_enospc(FTAG); 1060 return (error); 1061 } 1062 ASSERT0(error); 1063 1064 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1065 1066 if (ztest_opts.zo_verbose >= 6) { 1067 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1068 (void) printf("%s %s = %s at '%s'\n", 1069 osname, propname, valname, setpoint); 1070 } 1071 1072 return (error); 1073} 1074 1075static int 1076ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1077{ 1078 spa_t *spa = ztest_spa; 1079 nvlist_t *props = NULL; 1080 int error; 1081 1082 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1083 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1084 1085 error = spa_prop_set(spa, props); 1086 1087 nvlist_free(props); 1088 1089 if (error == ENOSPC) { 1090 ztest_record_enospc(FTAG); 1091 return (error); 1092 } 1093 ASSERT0(error); 1094 1095 return (error); 1096} 1097 1098static void 1099ztest_rll_init(rll_t *rll) 1100{ 1101 rll->rll_writer = NULL; 1102 rll->rll_readers = 0; 1103 mutex_init(&rll->rll_lock, NULL, USYNC_THREAD, NULL); 1104 cv_init(&rll->rll_cv, NULL, USYNC_THREAD, NULL); 1105} 1106 1107static void 1108ztest_rll_destroy(rll_t *rll) 1109{ 1110 ASSERT(rll->rll_writer == NULL); 1111 ASSERT(rll->rll_readers == 0); 1112 mutex_destroy(&rll->rll_lock); 1113 cv_destroy(&rll->rll_cv); 1114} 1115 1116static void 1117ztest_rll_lock(rll_t *rll, rl_type_t type) 1118{ 1119 mutex_enter(&rll->rll_lock); 1120 1121 if (type == RL_READER) { 1122 while (rll->rll_writer != NULL) 1123 cv_wait(&rll->rll_cv, &rll->rll_lock); 1124 rll->rll_readers++; 1125 } else { 1126 while (rll->rll_writer != NULL || rll->rll_readers) 1127 cv_wait(&rll->rll_cv, &rll->rll_lock); 1128 rll->rll_writer = curthread; 1129 } 1130 1131 mutex_exit(&rll->rll_lock); 1132} 1133 1134static void 1135ztest_rll_unlock(rll_t *rll) 1136{ 1137 mutex_enter(&rll->rll_lock); 1138 1139 if (rll->rll_writer) { 1140 ASSERT(rll->rll_readers == 0); 1141 rll->rll_writer = NULL; 1142 } else { 1143 ASSERT(rll->rll_readers != 0); 1144 ASSERT(rll->rll_writer == NULL); 1145 rll->rll_readers--; 1146 } 1147 1148 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1149 cv_broadcast(&rll->rll_cv); 1150 1151 mutex_exit(&rll->rll_lock); 1152} 1153 1154static void 1155ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1156{ 1157 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1158 1159 ztest_rll_lock(rll, type); 1160} 1161 1162static void 1163ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1164{ 1165 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1166 1167 ztest_rll_unlock(rll); 1168} 1169 1170static rl_t * 1171ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1172 uint64_t size, rl_type_t type) 1173{ 1174 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1175 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1176 rl_t *rl; 1177 1178 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1179 rl->rl_object = object; 1180 rl->rl_offset = offset; 1181 rl->rl_size = size; 1182 rl->rl_lock = rll; 1183 1184 ztest_rll_lock(rll, type); 1185 1186 return (rl); 1187} 1188 1189static void 1190ztest_range_unlock(rl_t *rl) 1191{ 1192 rll_t *rll = rl->rl_lock; 1193 1194 ztest_rll_unlock(rll); 1195 1196 umem_free(rl, sizeof (*rl)); 1197} 1198 1199static void 1200ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1201{ 1202 zd->zd_os = os; 1203 zd->zd_zilog = dmu_objset_zil(os); 1204 zd->zd_shared = szd; 1205 dmu_objset_name(os, zd->zd_name); 1206 1207 if (zd->zd_shared != NULL) 1208 zd->zd_shared->zd_seq = 0; 1209 1210 rw_init(&zd->zd_zilog_lock, NULL, USYNC_THREAD, NULL); 1211 mutex_init(&zd->zd_dirobj_lock, NULL, USYNC_THREAD, NULL); 1212 1213 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1214 ztest_rll_init(&zd->zd_object_lock[l]); 1215 1216 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1217 ztest_rll_init(&zd->zd_range_lock[l]); 1218} 1219 1220static void 1221ztest_zd_fini(ztest_ds_t *zd) 1222{ 1223 mutex_destroy(&zd->zd_dirobj_lock); 1224 1225 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1226 ztest_rll_destroy(&zd->zd_object_lock[l]); 1227 1228 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1229 ztest_rll_destroy(&zd->zd_range_lock[l]); 1230} 1231 1232#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1233 1234static uint64_t 1235ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1236{ 1237 uint64_t txg; 1238 int error; 1239 1240 /* 1241 * Attempt to assign tx to some transaction group. 1242 */ 1243 error = dmu_tx_assign(tx, txg_how); 1244 if (error) { 1245 if (error == ERESTART) { 1246 ASSERT(txg_how == TXG_NOWAIT); 1247 dmu_tx_wait(tx); 1248 } else { 1249 ASSERT3U(error, ==, ENOSPC); 1250 ztest_record_enospc(tag); 1251 } 1252 dmu_tx_abort(tx); 1253 return (0); 1254 } 1255 txg = dmu_tx_get_txg(tx); 1256 ASSERT(txg != 0); 1257 return (txg); 1258} 1259 1260static void 1261ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1262{ 1263 uint64_t *ip = buf; 1264 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1265 1266 while (ip < ip_end) 1267 *ip++ = value; 1268} 1269 1270static boolean_t 1271ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1272{ 1273 uint64_t *ip = buf; 1274 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1275 uint64_t diff = 0; 1276 1277 while (ip < ip_end) 1278 diff |= (value - *ip++); 1279 1280 return (diff == 0); 1281} 1282 1283static void 1284ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1285 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1286{ 1287 bt->bt_magic = BT_MAGIC; 1288 bt->bt_objset = dmu_objset_id(os); 1289 bt->bt_object = object; 1290 bt->bt_offset = offset; 1291 bt->bt_gen = gen; 1292 bt->bt_txg = txg; 1293 bt->bt_crtxg = crtxg; 1294} 1295 1296static void 1297ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1298 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1299{ 1300 ASSERT3U(bt->bt_magic, ==, BT_MAGIC); 1301 ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); 1302 ASSERT3U(bt->bt_object, ==, object); 1303 ASSERT3U(bt->bt_offset, ==, offset); 1304 ASSERT3U(bt->bt_gen, <=, gen); 1305 ASSERT3U(bt->bt_txg, <=, txg); 1306 ASSERT3U(bt->bt_crtxg, ==, crtxg); 1307} 1308 1309static ztest_block_tag_t * 1310ztest_bt_bonus(dmu_buf_t *db) 1311{ 1312 dmu_object_info_t doi; 1313 ztest_block_tag_t *bt; 1314 1315 dmu_object_info_from_db(db, &doi); 1316 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1317 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1318 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1319 1320 return (bt); 1321} 1322 1323/* 1324 * ZIL logging ops 1325 */ 1326 1327#define lrz_type lr_mode 1328#define lrz_blocksize lr_uid 1329#define lrz_ibshift lr_gid 1330#define lrz_bonustype lr_rdev 1331#define lrz_bonuslen lr_crtime[1] 1332 1333static void 1334ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1335{ 1336 char *name = (void *)(lr + 1); /* name follows lr */ 1337 size_t namesize = strlen(name) + 1; 1338 itx_t *itx; 1339 1340 if (zil_replaying(zd->zd_zilog, tx)) 1341 return; 1342 1343 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1344 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1345 sizeof (*lr) + namesize - sizeof (lr_t)); 1346 1347 zil_itx_assign(zd->zd_zilog, itx, tx); 1348} 1349 1350static void 1351ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1352{ 1353 char *name = (void *)(lr + 1); /* name follows lr */ 1354 size_t namesize = strlen(name) + 1; 1355 itx_t *itx; 1356 1357 if (zil_replaying(zd->zd_zilog, tx)) 1358 return; 1359 1360 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1361 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1362 sizeof (*lr) + namesize - sizeof (lr_t)); 1363 1364 itx->itx_oid = object; 1365 zil_itx_assign(zd->zd_zilog, itx, tx); 1366} 1367 1368static void 1369ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1370{ 1371 itx_t *itx; 1372 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1373 1374 if (zil_replaying(zd->zd_zilog, tx)) 1375 return; 1376 1377 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1378 write_state = WR_INDIRECT; 1379 1380 itx = zil_itx_create(TX_WRITE, 1381 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1382 1383 if (write_state == WR_COPIED && 1384 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1385 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1386 zil_itx_destroy(itx); 1387 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1388 write_state = WR_NEED_COPY; 1389 } 1390 itx->itx_private = zd; 1391 itx->itx_wr_state = write_state; 1392 itx->itx_sync = (ztest_random(8) == 0); 1393 1394 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1395 sizeof (*lr) - sizeof (lr_t)); 1396 1397 zil_itx_assign(zd->zd_zilog, itx, tx); 1398} 1399 1400static void 1401ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1402{ 1403 itx_t *itx; 1404 1405 if (zil_replaying(zd->zd_zilog, tx)) 1406 return; 1407 1408 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1409 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1410 sizeof (*lr) - sizeof (lr_t)); 1411 1412 itx->itx_sync = B_FALSE; 1413 zil_itx_assign(zd->zd_zilog, itx, tx); 1414} 1415 1416static void 1417ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1418{ 1419 itx_t *itx; 1420 1421 if (zil_replaying(zd->zd_zilog, tx)) 1422 return; 1423 1424 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1425 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1426 sizeof (*lr) - sizeof (lr_t)); 1427 1428 itx->itx_sync = B_FALSE; 1429 zil_itx_assign(zd->zd_zilog, itx, tx); 1430} 1431 1432/* 1433 * ZIL replay ops 1434 */ 1435static int 1436ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap) 1437{ 1438 ztest_ds_t *zd = arg1; 1439 lr_create_t *lr = arg2; 1440 char *name = (void *)(lr + 1); /* name follows lr */ 1441 objset_t *os = zd->zd_os; 1442 ztest_block_tag_t *bbt; 1443 dmu_buf_t *db; 1444 dmu_tx_t *tx; 1445 uint64_t txg; 1446 int error = 0; 1447 1448 if (byteswap) 1449 byteswap_uint64_array(lr, sizeof (*lr)); 1450 1451 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1452 ASSERT(name[0] != '\0'); 1453 1454 tx = dmu_tx_create(os); 1455 1456 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1457 1458 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1459 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1460 } else { 1461 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1462 } 1463 1464 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1465 if (txg == 0) 1466 return (ENOSPC); 1467 1468 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1469 1470 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1471 if (lr->lr_foid == 0) { 1472 lr->lr_foid = zap_create(os, 1473 lr->lrz_type, lr->lrz_bonustype, 1474 lr->lrz_bonuslen, tx); 1475 } else { 1476 error = zap_create_claim(os, lr->lr_foid, 1477 lr->lrz_type, lr->lrz_bonustype, 1478 lr->lrz_bonuslen, tx); 1479 } 1480 } else { 1481 if (lr->lr_foid == 0) { 1482 lr->lr_foid = dmu_object_alloc(os, 1483 lr->lrz_type, 0, lr->lrz_bonustype, 1484 lr->lrz_bonuslen, tx); 1485 } else { 1486 error = dmu_object_claim(os, lr->lr_foid, 1487 lr->lrz_type, 0, lr->lrz_bonustype, 1488 lr->lrz_bonuslen, tx); 1489 } 1490 } 1491 1492 if (error) { 1493 ASSERT3U(error, ==, EEXIST); 1494 ASSERT(zd->zd_zilog->zl_replay); 1495 dmu_tx_commit(tx); 1496 return (error); 1497 } 1498 1499 ASSERT(lr->lr_foid != 0); 1500 1501 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1502 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1503 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1504 1505 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1506 bbt = ztest_bt_bonus(db); 1507 dmu_buf_will_dirty(db, tx); 1508 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1509 dmu_buf_rele(db, FTAG); 1510 1511 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1512 &lr->lr_foid, tx)); 1513 1514 (void) ztest_log_create(zd, tx, lr); 1515 1516 dmu_tx_commit(tx); 1517 1518 return (0); 1519} 1520 1521static int 1522ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap) 1523{ 1524 ztest_ds_t *zd = arg1; 1525 lr_remove_t *lr = arg2; 1526 char *name = (void *)(lr + 1); /* name follows lr */ 1527 objset_t *os = zd->zd_os; 1528 dmu_object_info_t doi; 1529 dmu_tx_t *tx; 1530 uint64_t object, txg; 1531 1532 if (byteswap) 1533 byteswap_uint64_array(lr, sizeof (*lr)); 1534 1535 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1536 ASSERT(name[0] != '\0'); 1537 1538 VERIFY3U(0, ==, 1539 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1540 ASSERT(object != 0); 1541 1542 ztest_object_lock(zd, object, RL_WRITER); 1543 1544 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1545 1546 tx = dmu_tx_create(os); 1547 1548 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1549 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1550 1551 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1552 if (txg == 0) { 1553 ztest_object_unlock(zd, object); 1554 return (ENOSPC); 1555 } 1556 1557 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1558 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1559 } else { 1560 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1561 } 1562 1563 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1564 1565 (void) ztest_log_remove(zd, tx, lr, object); 1566 1567 dmu_tx_commit(tx); 1568 1569 ztest_object_unlock(zd, object); 1570 1571 return (0); 1572} 1573 1574static int 1575ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) 1576{ 1577 ztest_ds_t *zd = arg1; 1578 lr_write_t *lr = arg2; 1579 objset_t *os = zd->zd_os; 1580 void *data = lr + 1; /* data follows lr */ 1581 uint64_t offset, length; 1582 ztest_block_tag_t *bt = data; 1583 ztest_block_tag_t *bbt; 1584 uint64_t gen, txg, lrtxg, crtxg; 1585 dmu_object_info_t doi; 1586 dmu_tx_t *tx; 1587 dmu_buf_t *db; 1588 arc_buf_t *abuf = NULL; 1589 rl_t *rl; 1590 1591 if (byteswap) 1592 byteswap_uint64_array(lr, sizeof (*lr)); 1593 1594 offset = lr->lr_offset; 1595 length = lr->lr_length; 1596 1597 /* If it's a dmu_sync() block, write the whole block */ 1598 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1599 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1600 if (length < blocksize) { 1601 offset -= offset % blocksize; 1602 length = blocksize; 1603 } 1604 } 1605 1606 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1607 byteswap_uint64_array(bt, sizeof (*bt)); 1608 1609 if (bt->bt_magic != BT_MAGIC) 1610 bt = NULL; 1611 1612 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1613 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1614 1615 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1616 1617 dmu_object_info_from_db(db, &doi); 1618 1619 bbt = ztest_bt_bonus(db); 1620 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1621 gen = bbt->bt_gen; 1622 crtxg = bbt->bt_crtxg; 1623 lrtxg = lr->lr_common.lrc_txg; 1624 1625 tx = dmu_tx_create(os); 1626 1627 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1628 1629 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1630 P2PHASE(offset, length) == 0) 1631 abuf = dmu_request_arcbuf(db, length); 1632 1633 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1634 if (txg == 0) { 1635 if (abuf != NULL) 1636 dmu_return_arcbuf(abuf); 1637 dmu_buf_rele(db, FTAG); 1638 ztest_range_unlock(rl); 1639 ztest_object_unlock(zd, lr->lr_foid); 1640 return (ENOSPC); 1641 } 1642 1643 if (bt != NULL) { 1644 /* 1645 * Usually, verify the old data before writing new data -- 1646 * but not always, because we also want to verify correct 1647 * behavior when the data was not recently read into cache. 1648 */ 1649 ASSERT(offset % doi.doi_data_block_size == 0); 1650 if (ztest_random(4) != 0) { 1651 int prefetch = ztest_random(2) ? 1652 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1653 ztest_block_tag_t rbt; 1654 1655 VERIFY(dmu_read(os, lr->lr_foid, offset, 1656 sizeof (rbt), &rbt, prefetch) == 0); 1657 if (rbt.bt_magic == BT_MAGIC) { 1658 ztest_bt_verify(&rbt, os, lr->lr_foid, 1659 offset, gen, txg, crtxg); 1660 } 1661 } 1662 1663 /* 1664 * Writes can appear to be newer than the bonus buffer because 1665 * the ztest_get_data() callback does a dmu_read() of the 1666 * open-context data, which may be different than the data 1667 * as it was when the write was generated. 1668 */ 1669 if (zd->zd_zilog->zl_replay) { 1670 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1671 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1672 bt->bt_crtxg); 1673 } 1674 1675 /* 1676 * Set the bt's gen/txg to the bonus buffer's gen/txg 1677 * so that all of the usual ASSERTs will work. 1678 */ 1679 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1680 } 1681 1682 if (abuf == NULL) { 1683 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1684 } else { 1685 bcopy(data, abuf->b_data, length); 1686 dmu_assign_arcbuf(db, offset, abuf, tx); 1687 } 1688 1689 (void) ztest_log_write(zd, tx, lr); 1690 1691 dmu_buf_rele(db, FTAG); 1692 1693 dmu_tx_commit(tx); 1694 1695 ztest_range_unlock(rl); 1696 ztest_object_unlock(zd, lr->lr_foid); 1697 1698 return (0); 1699} 1700 1701static int 1702ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap) 1703{ 1704 ztest_ds_t *zd = arg1; 1705 lr_truncate_t *lr = arg2; 1706 objset_t *os = zd->zd_os; 1707 dmu_tx_t *tx; 1708 uint64_t txg; 1709 rl_t *rl; 1710 1711 if (byteswap) 1712 byteswap_uint64_array(lr, sizeof (*lr)); 1713 1714 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1715 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1716 RL_WRITER); 1717 1718 tx = dmu_tx_create(os); 1719 1720 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1721 1722 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1723 if (txg == 0) { 1724 ztest_range_unlock(rl); 1725 ztest_object_unlock(zd, lr->lr_foid); 1726 return (ENOSPC); 1727 } 1728 1729 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1730 lr->lr_length, tx) == 0); 1731 1732 (void) ztest_log_truncate(zd, tx, lr); 1733 1734 dmu_tx_commit(tx); 1735 1736 ztest_range_unlock(rl); 1737 ztest_object_unlock(zd, lr->lr_foid); 1738 1739 return (0); 1740} 1741 1742static int 1743ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap) 1744{ 1745 ztest_ds_t *zd = arg1; 1746 lr_setattr_t *lr = arg2; 1747 objset_t *os = zd->zd_os; 1748 dmu_tx_t *tx; 1749 dmu_buf_t *db; 1750 ztest_block_tag_t *bbt; 1751 uint64_t txg, lrtxg, crtxg; 1752 1753 if (byteswap) 1754 byteswap_uint64_array(lr, sizeof (*lr)); 1755 1756 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1757 1758 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1759 1760 tx = dmu_tx_create(os); 1761 dmu_tx_hold_bonus(tx, lr->lr_foid); 1762 1763 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1764 if (txg == 0) { 1765 dmu_buf_rele(db, FTAG); 1766 ztest_object_unlock(zd, lr->lr_foid); 1767 return (ENOSPC); 1768 } 1769 1770 bbt = ztest_bt_bonus(db); 1771 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1772 crtxg = bbt->bt_crtxg; 1773 lrtxg = lr->lr_common.lrc_txg; 1774 1775 if (zd->zd_zilog->zl_replay) { 1776 ASSERT(lr->lr_size != 0); 1777 ASSERT(lr->lr_mode != 0); 1778 ASSERT(lrtxg != 0); 1779 } else { 1780 /* 1781 * Randomly change the size and increment the generation. 1782 */ 1783 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1784 sizeof (*bbt); 1785 lr->lr_mode = bbt->bt_gen + 1; 1786 ASSERT(lrtxg == 0); 1787 } 1788 1789 /* 1790 * Verify that the current bonus buffer is not newer than our txg. 1791 */ 1792 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1793 MAX(txg, lrtxg), crtxg); 1794 1795 dmu_buf_will_dirty(db, tx); 1796 1797 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1798 ASSERT3U(lr->lr_size, <=, db->db_size); 1799 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1800 bbt = ztest_bt_bonus(db); 1801 1802 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1803 1804 dmu_buf_rele(db, FTAG); 1805 1806 (void) ztest_log_setattr(zd, tx, lr); 1807 1808 dmu_tx_commit(tx); 1809 1810 ztest_object_unlock(zd, lr->lr_foid); 1811 1812 return (0); 1813} 1814 1815zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1816 NULL, /* 0 no such transaction type */ 1817 ztest_replay_create, /* TX_CREATE */ 1818 NULL, /* TX_MKDIR */ 1819 NULL, /* TX_MKXATTR */ 1820 NULL, /* TX_SYMLINK */ 1821 ztest_replay_remove, /* TX_REMOVE */ 1822 NULL, /* TX_RMDIR */ 1823 NULL, /* TX_LINK */ 1824 NULL, /* TX_RENAME */ 1825 ztest_replay_write, /* TX_WRITE */ 1826 ztest_replay_truncate, /* TX_TRUNCATE */ 1827 ztest_replay_setattr, /* TX_SETATTR */ 1828 NULL, /* TX_ACL */ 1829 NULL, /* TX_CREATE_ACL */ 1830 NULL, /* TX_CREATE_ATTR */ 1831 NULL, /* TX_CREATE_ACL_ATTR */ 1832 NULL, /* TX_MKDIR_ACL */ 1833 NULL, /* TX_MKDIR_ATTR */ 1834 NULL, /* TX_MKDIR_ACL_ATTR */ 1835 NULL, /* TX_WRITE2 */ 1836}; 1837 1838/* 1839 * ZIL get_data callbacks 1840 */ 1841 1842static void 1843ztest_get_done(zgd_t *zgd, int error) 1844{ 1845 ztest_ds_t *zd = zgd->zgd_private; 1846 uint64_t object = zgd->zgd_rl->rl_object; 1847 1848 if (zgd->zgd_db) 1849 dmu_buf_rele(zgd->zgd_db, zgd); 1850 1851 ztest_range_unlock(zgd->zgd_rl); 1852 ztest_object_unlock(zd, object); 1853 1854 if (error == 0 && zgd->zgd_bp) 1855 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1856 1857 umem_free(zgd, sizeof (*zgd)); 1858} 1859 1860static int 1861ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, 1862 zio_t *zio) 1863{ 1864 ztest_ds_t *zd = arg; 1865 objset_t *os = zd->zd_os; 1866 uint64_t object = lr->lr_foid; 1867 uint64_t offset = lr->lr_offset; 1868 uint64_t size = lr->lr_length; 1869 uint64_t txg = lr->lr_common.lrc_txg; 1870 uint64_t crtxg; 1871 dmu_object_info_t doi; 1872 dmu_buf_t *db; 1873 zgd_t *zgd; 1874 int error; 1875 1876 ASSERT3P(lwb, !=, NULL); 1877 ASSERT3P(zio, !=, NULL); 1878 ASSERT3U(size, !=, 0); 1879 1880 ztest_object_lock(zd, object, RL_READER); 1881 error = dmu_bonus_hold(os, object, FTAG, &db); 1882 if (error) { 1883 ztest_object_unlock(zd, object); 1884 return (error); 1885 } 1886 1887 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1888 1889 if (crtxg == 0 || crtxg > txg) { 1890 dmu_buf_rele(db, FTAG); 1891 ztest_object_unlock(zd, object); 1892 return (ENOENT); 1893 } 1894 1895 dmu_object_info_from_db(db, &doi); 1896 dmu_buf_rele(db, FTAG); 1897 db = NULL; 1898 1899 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1900 zgd->zgd_lwb = lwb; 1901 zgd->zgd_private = zd; 1902 1903 if (buf != NULL) { /* immediate write */ 1904 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1905 RL_READER); 1906 1907 error = dmu_read(os, object, offset, size, buf, 1908 DMU_READ_NO_PREFETCH); 1909 ASSERT(error == 0); 1910 } else { 1911 size = doi.doi_data_block_size; 1912 if (ISP2(size)) { 1913 offset = P2ALIGN(offset, size); 1914 } else { 1915 ASSERT(offset < size); 1916 offset = 0; 1917 } 1918 1919 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1920 RL_READER); 1921 1922 error = dmu_buf_hold(os, object, offset, zgd, &db, 1923 DMU_READ_NO_PREFETCH); 1924 1925 if (error == 0) { 1926 blkptr_t *bp = &lr->lr_blkptr; 1927 1928 zgd->zgd_db = db; 1929 zgd->zgd_bp = bp; 1930 1931 ASSERT(db->db_offset == offset); 1932 ASSERT(db->db_size == size); 1933 1934 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1935 ztest_get_done, zgd); 1936 1937 if (error == 0) 1938 return (0); 1939 } 1940 } 1941 1942 ztest_get_done(zgd, error); 1943 1944 return (error); 1945} 1946 1947static void * 1948ztest_lr_alloc(size_t lrsize, char *name) 1949{ 1950 char *lr; 1951 size_t namesize = name ? strlen(name) + 1 : 0; 1952 1953 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1954 1955 if (name) 1956 bcopy(name, lr + lrsize, namesize); 1957 1958 return (lr); 1959} 1960 1961void 1962ztest_lr_free(void *lr, size_t lrsize, char *name) 1963{ 1964 size_t namesize = name ? strlen(name) + 1 : 0; 1965 1966 umem_free(lr, lrsize + namesize); 1967} 1968 1969/* 1970 * Lookup a bunch of objects. Returns the number of objects not found. 1971 */ 1972static int 1973ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1974{ 1975 int missing = 0; 1976 int error; 1977 1978 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 1979 1980 for (int i = 0; i < count; i++, od++) { 1981 od->od_object = 0; 1982 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1983 sizeof (uint64_t), 1, &od->od_object); 1984 if (error) { 1985 ASSERT(error == ENOENT); 1986 ASSERT(od->od_object == 0); 1987 missing++; 1988 } else { 1989 dmu_buf_t *db; 1990 ztest_block_tag_t *bbt; 1991 dmu_object_info_t doi; 1992 1993 ASSERT(od->od_object != 0); 1994 ASSERT(missing == 0); /* there should be no gaps */ 1995 1996 ztest_object_lock(zd, od->od_object, RL_READER); 1997 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1998 od->od_object, FTAG, &db)); 1999 dmu_object_info_from_db(db, &doi); 2000 bbt = ztest_bt_bonus(db); 2001 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 2002 od->od_type = doi.doi_type; 2003 od->od_blocksize = doi.doi_data_block_size; 2004 od->od_gen = bbt->bt_gen; 2005 dmu_buf_rele(db, FTAG); 2006 ztest_object_unlock(zd, od->od_object); 2007 } 2008 } 2009 2010 return (missing); 2011} 2012 2013static int 2014ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 2015{ 2016 int missing = 0; 2017 2018 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2019 2020 for (int i = 0; i < count; i++, od++) { 2021 if (missing) { 2022 od->od_object = 0; 2023 missing++; 2024 continue; 2025 } 2026 2027 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2028 2029 lr->lr_doid = od->od_dir; 2030 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 2031 lr->lrz_type = od->od_crtype; 2032 lr->lrz_blocksize = od->od_crblocksize; 2033 lr->lrz_ibshift = ztest_random_ibshift(); 2034 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 2035 lr->lrz_bonuslen = dmu_bonus_max(); 2036 lr->lr_gen = od->od_crgen; 2037 lr->lr_crtime[0] = time(NULL); 2038 2039 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 2040 ASSERT(missing == 0); 2041 od->od_object = 0; 2042 missing++; 2043 } else { 2044 od->od_object = lr->lr_foid; 2045 od->od_type = od->od_crtype; 2046 od->od_blocksize = od->od_crblocksize; 2047 od->od_gen = od->od_crgen; 2048 ASSERT(od->od_object != 0); 2049 } 2050 2051 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2052 } 2053 2054 return (missing); 2055} 2056 2057static int 2058ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2059{ 2060 int missing = 0; 2061 int error; 2062 2063 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2064 2065 od += count - 1; 2066 2067 for (int i = count - 1; i >= 0; i--, od--) { 2068 if (missing) { 2069 missing++; 2070 continue; 2071 } 2072 2073 /* 2074 * No object was found. 2075 */ 2076 if (od->od_object == 0) 2077 continue; 2078 2079 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2080 2081 lr->lr_doid = od->od_dir; 2082 2083 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2084 ASSERT3U(error, ==, ENOSPC); 2085 missing++; 2086 } else { 2087 od->od_object = 0; 2088 } 2089 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2090 } 2091 2092 return (missing); 2093} 2094 2095static int 2096ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2097 void *data) 2098{ 2099 lr_write_t *lr; 2100 int error; 2101 2102 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2103 2104 lr->lr_foid = object; 2105 lr->lr_offset = offset; 2106 lr->lr_length = size; 2107 lr->lr_blkoff = 0; 2108 BP_ZERO(&lr->lr_blkptr); 2109 2110 bcopy(data, lr + 1, size); 2111 2112 error = ztest_replay_write(zd, lr, B_FALSE); 2113 2114 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2115 2116 return (error); 2117} 2118 2119static int 2120ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2121{ 2122 lr_truncate_t *lr; 2123 int error; 2124 2125 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2126 2127 lr->lr_foid = object; 2128 lr->lr_offset = offset; 2129 lr->lr_length = size; 2130 2131 error = ztest_replay_truncate(zd, lr, B_FALSE); 2132 2133 ztest_lr_free(lr, sizeof (*lr), NULL); 2134 2135 return (error); 2136} 2137 2138static int 2139ztest_setattr(ztest_ds_t *zd, uint64_t object) 2140{ 2141 lr_setattr_t *lr; 2142 int error; 2143 2144 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2145 2146 lr->lr_foid = object; 2147 lr->lr_size = 0; 2148 lr->lr_mode = 0; 2149 2150 error = ztest_replay_setattr(zd, lr, B_FALSE); 2151 2152 ztest_lr_free(lr, sizeof (*lr), NULL); 2153 2154 return (error); 2155} 2156 2157static void 2158ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2159{ 2160 objset_t *os = zd->zd_os; 2161 dmu_tx_t *tx; 2162 uint64_t txg; 2163 rl_t *rl; 2164 2165 txg_wait_synced(dmu_objset_pool(os), 0); 2166 2167 ztest_object_lock(zd, object, RL_READER); 2168 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2169 2170 tx = dmu_tx_create(os); 2171 2172 dmu_tx_hold_write(tx, object, offset, size); 2173 2174 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2175 2176 if (txg != 0) { 2177 dmu_prealloc(os, object, offset, size, tx); 2178 dmu_tx_commit(tx); 2179 txg_wait_synced(dmu_objset_pool(os), txg); 2180 } else { 2181 (void) dmu_free_long_range(os, object, offset, size); 2182 } 2183 2184 ztest_range_unlock(rl); 2185 ztest_object_unlock(zd, object); 2186} 2187 2188static void 2189ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2190{ 2191 int err; 2192 ztest_block_tag_t wbt; 2193 dmu_object_info_t doi; 2194 enum ztest_io_type io_type; 2195 uint64_t blocksize; 2196 void *data; 2197 2198 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2199 blocksize = doi.doi_data_block_size; 2200 data = umem_alloc(blocksize, UMEM_NOFAIL); 2201 2202 /* 2203 * Pick an i/o type at random, biased toward writing block tags. 2204 */ 2205 io_type = ztest_random(ZTEST_IO_TYPES); 2206 if (ztest_random(2) == 0) 2207 io_type = ZTEST_IO_WRITE_TAG; 2208 2209 rw_enter(&zd->zd_zilog_lock, RW_READER); 2210 2211 switch (io_type) { 2212 2213 case ZTEST_IO_WRITE_TAG: 2214 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2215 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2216 break; 2217 2218 case ZTEST_IO_WRITE_PATTERN: 2219 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2220 if (ztest_random(2) == 0) { 2221 /* 2222 * Induce fletcher2 collisions to ensure that 2223 * zio_ddt_collision() detects and resolves them 2224 * when using fletcher2-verify for deduplication. 2225 */ 2226 ((uint64_t *)data)[0] ^= 1ULL << 63; 2227 ((uint64_t *)data)[4] ^= 1ULL << 63; 2228 } 2229 (void) ztest_write(zd, object, offset, blocksize, data); 2230 break; 2231 2232 case ZTEST_IO_WRITE_ZEROES: 2233 bzero(data, blocksize); 2234 (void) ztest_write(zd, object, offset, blocksize, data); 2235 break; 2236 2237 case ZTEST_IO_TRUNCATE: 2238 (void) ztest_truncate(zd, object, offset, blocksize); 2239 break; 2240 2241 case ZTEST_IO_SETATTR: 2242 (void) ztest_setattr(zd, object); 2243 break; 2244 2245 case ZTEST_IO_REWRITE: 2246 rw_enter(&ztest_name_lock, RW_READER); 2247 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2248 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2249 B_FALSE); 2250 VERIFY(err == 0 || err == ENOSPC); 2251 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2252 ZFS_PROP_COMPRESSION, 2253 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2254 B_FALSE); 2255 VERIFY(err == 0 || err == ENOSPC); 2256 rw_exit(&ztest_name_lock); 2257 2258 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2259 DMU_READ_NO_PREFETCH)); 2260 2261 (void) ztest_write(zd, object, offset, blocksize, data); 2262 break; 2263 } 2264 2265 rw_exit(&zd->zd_zilog_lock); 2266 2267 umem_free(data, blocksize); 2268} 2269 2270/* 2271 * Initialize an object description template. 2272 */ 2273static void 2274ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2275 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2276{ 2277 od->od_dir = ZTEST_DIROBJ; 2278 od->od_object = 0; 2279 2280 od->od_crtype = type; 2281 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2282 od->od_crgen = gen; 2283 2284 od->od_type = DMU_OT_NONE; 2285 od->od_blocksize = 0; 2286 od->od_gen = 0; 2287 2288 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2289 tag, (int64_t)id, index); 2290} 2291 2292/* 2293 * Lookup or create the objects for a test using the od template. 2294 * If the objects do not all exist, or if 'remove' is specified, 2295 * remove any existing objects and create new ones. Otherwise, 2296 * use the existing objects. 2297 */ 2298static int 2299ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2300{ 2301 int count = size / sizeof (*od); 2302 int rv = 0; 2303 2304 mutex_enter(&zd->zd_dirobj_lock); 2305 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2306 (ztest_remove(zd, od, count) != 0 || 2307 ztest_create(zd, od, count) != 0)) 2308 rv = -1; 2309 zd->zd_od = od; 2310 mutex_exit(&zd->zd_dirobj_lock); 2311 2312 return (rv); 2313} 2314 2315/* ARGSUSED */ 2316void 2317ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2318{ 2319 zilog_t *zilog = zd->zd_zilog; 2320 2321 rw_enter(&zd->zd_zilog_lock, RW_READER); 2322 2323 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2324 2325 /* 2326 * Remember the committed values in zd, which is in parent/child 2327 * shared memory. If we die, the next iteration of ztest_run() 2328 * will verify that the log really does contain this record. 2329 */ 2330 mutex_enter(&zilog->zl_lock); 2331 ASSERT(zd->zd_shared != NULL); 2332 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2333 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2334 mutex_exit(&zilog->zl_lock); 2335 2336 rw_exit(&zd->zd_zilog_lock); 2337} 2338 2339/* 2340 * This function is designed to simulate the operations that occur during a 2341 * mount/unmount operation. We hold the dataset across these operations in an 2342 * attempt to expose any implicit assumptions about ZIL management. 2343 */ 2344/* ARGSUSED */ 2345void 2346ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2347{ 2348 objset_t *os = zd->zd_os; 2349 2350 /* 2351 * We grab the zd_dirobj_lock to ensure that no other thread is 2352 * updating the zil (i.e. adding in-memory log records) and the 2353 * zd_zilog_lock to block any I/O. 2354 */ 2355 mutex_enter(&zd->zd_dirobj_lock); 2356 rw_enter(&zd->zd_zilog_lock, RW_WRITER); 2357 2358 /* zfsvfs_teardown() */ 2359 zil_close(zd->zd_zilog); 2360 2361 /* zfsvfs_setup() */ 2362 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2363 zil_replay(os, zd, ztest_replay_vector); 2364 2365 rw_exit(&zd->zd_zilog_lock); 2366 mutex_exit(&zd->zd_dirobj_lock); 2367} 2368 2369/* 2370 * Verify that we can't destroy an active pool, create an existing pool, 2371 * or create a pool with a bad vdev spec. 2372 */ 2373/* ARGSUSED */ 2374void 2375ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2376{ 2377 ztest_shared_opts_t *zo = &ztest_opts; 2378 spa_t *spa; 2379 nvlist_t *nvroot; 2380 2381 /* 2382 * Attempt to create using a bad file. 2383 */ 2384 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2385 VERIFY3U(ENOENT, ==, 2386 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2387 nvlist_free(nvroot); 2388 2389 /* 2390 * Attempt to create using a bad mirror. 2391 */ 2392 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2393 VERIFY3U(ENOENT, ==, 2394 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2395 nvlist_free(nvroot); 2396 2397 /* 2398 * Attempt to create an existing pool. It shouldn't matter 2399 * what's in the nvroot; we should fail with EEXIST. 2400 */ 2401 rw_enter(&ztest_name_lock, RW_READER); 2402 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2403 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2404 nvlist_free(nvroot); 2405 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2406 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2407 spa_close(spa, FTAG); 2408 2409 rw_exit(&ztest_name_lock); 2410} 2411 2412/* ARGSUSED */ 2413void 2414ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2415{ 2416 spa_t *spa; 2417 uint64_t initial_version = SPA_VERSION_INITIAL; 2418 uint64_t version, newversion; 2419 nvlist_t *nvroot, *props; 2420 char *name; 2421 2422 mutex_enter(&ztest_vdev_lock); 2423 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2424 2425 /* 2426 * Clean up from previous runs. 2427 */ 2428 (void) spa_destroy(name); 2429 2430 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2431 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2432 2433 /* 2434 * If we're configuring a RAIDZ device then make sure that the 2435 * the initial version is capable of supporting that feature. 2436 */ 2437 switch (ztest_opts.zo_raidz_parity) { 2438 case 0: 2439 case 1: 2440 initial_version = SPA_VERSION_INITIAL; 2441 break; 2442 case 2: 2443 initial_version = SPA_VERSION_RAIDZ2; 2444 break; 2445 case 3: 2446 initial_version = SPA_VERSION_RAIDZ3; 2447 break; 2448 } 2449 2450 /* 2451 * Create a pool with a spa version that can be upgraded. Pick 2452 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2453 */ 2454 do { 2455 version = ztest_random_spa_version(initial_version); 2456 } while (version > SPA_VERSION_BEFORE_FEATURES); 2457 2458 props = fnvlist_alloc(); 2459 fnvlist_add_uint64(props, 2460 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2461 VERIFY0(spa_create(name, nvroot, props, NULL)); 2462 fnvlist_free(nvroot); 2463 fnvlist_free(props); 2464 2465 VERIFY0(spa_open(name, &spa, FTAG)); 2466 VERIFY3U(spa_version(spa), ==, version); 2467 newversion = ztest_random_spa_version(version + 1); 2468 2469 if (ztest_opts.zo_verbose >= 4) { 2470 (void) printf("upgrading spa version from %llu to %llu\n", 2471 (u_longlong_t)version, (u_longlong_t)newversion); 2472 } 2473 2474 spa_upgrade(spa, newversion); 2475 VERIFY3U(spa_version(spa), >, version); 2476 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2477 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2478 spa_close(spa, FTAG); 2479 2480 strfree(name); 2481 mutex_exit(&ztest_vdev_lock); 2482} 2483 2484static void 2485ztest_spa_checkpoint(spa_t *spa) 2486{ 2487 ASSERT(MUTEX_HELD(&ztest_checkpoint_lock)); 2488 2489 int error = spa_checkpoint(spa->spa_name); 2490 2491 switch (error) { 2492 case 0: 2493 case ZFS_ERR_DEVRM_IN_PROGRESS: 2494 case ZFS_ERR_DISCARDING_CHECKPOINT: 2495 case ZFS_ERR_CHECKPOINT_EXISTS: 2496 break; 2497 case ENOSPC: 2498 ztest_record_enospc(FTAG); 2499 break; 2500 default: 2501 fatal(0, "spa_checkpoint(%s) = %d", spa->spa_name, error); 2502 } 2503} 2504 2505static void 2506ztest_spa_discard_checkpoint(spa_t *spa) 2507{ 2508 ASSERT(MUTEX_HELD(&ztest_checkpoint_lock)); 2509 2510 int error = spa_checkpoint_discard(spa->spa_name); 2511 2512 switch (error) { 2513 case 0: 2514 case ZFS_ERR_DISCARDING_CHECKPOINT: 2515 case ZFS_ERR_NO_CHECKPOINT: 2516 break; 2517 default: 2518 fatal(0, "spa_discard_checkpoint(%s) = %d", 2519 spa->spa_name, error); 2520 } 2521 2522} 2523 2524/* ARGSUSED */ 2525void 2526ztest_spa_checkpoint_create_discard(ztest_ds_t *zd, uint64_t id) 2527{ 2528 spa_t *spa = ztest_spa; 2529 2530 mutex_enter(&ztest_checkpoint_lock); 2531 if (ztest_random(2) == 0) { 2532 ztest_spa_checkpoint(spa); 2533 } else { 2534 ztest_spa_discard_checkpoint(spa); 2535 } 2536 mutex_exit(&ztest_checkpoint_lock); 2537} 2538 2539 2540static vdev_t * 2541vdev_lookup_by_path(vdev_t *vd, const char *path) 2542{ 2543 vdev_t *mvd; 2544 2545 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2546 return (vd); 2547 2548 for (int c = 0; c < vd->vdev_children; c++) 2549 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2550 NULL) 2551 return (mvd); 2552 2553 return (NULL); 2554} 2555 2556/* 2557 * Find the first available hole which can be used as a top-level. 2558 */ 2559int 2560find_vdev_hole(spa_t *spa) 2561{ 2562 vdev_t *rvd = spa->spa_root_vdev; 2563 int c; 2564 2565 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2566 2567 for (c = 0; c < rvd->vdev_children; c++) { 2568 vdev_t *cvd = rvd->vdev_child[c]; 2569 2570 if (cvd->vdev_ishole) 2571 break; 2572 } 2573 return (c); 2574} 2575 2576/* 2577 * Verify that vdev_add() works as expected. 2578 */ 2579/* ARGSUSED */ 2580void 2581ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2582{ 2583 ztest_shared_t *zs = ztest_shared; 2584 spa_t *spa = ztest_spa; 2585 uint64_t leaves; 2586 uint64_t guid; 2587 nvlist_t *nvroot; 2588 int error; 2589 2590 mutex_enter(&ztest_vdev_lock); 2591 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2592 2593 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2594 2595 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2596 2597 /* 2598 * If we have slogs then remove them 1/4 of the time. 2599 */ 2600 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2601 /* 2602 * Grab the guid from the head of the log class rotor. 2603 */ 2604 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2605 2606 spa_config_exit(spa, SCL_VDEV, FTAG); 2607 2608 /* 2609 * We have to grab the zs_name_lock as writer to 2610 * prevent a race between removing a slog (dmu_objset_find) 2611 * and destroying a dataset. Removing the slog will 2612 * grab a reference on the dataset which may cause 2613 * dmu_objset_destroy() to fail with EBUSY thus 2614 * leaving the dataset in an inconsistent state. 2615 */ 2616 rw_enter(&ztest_name_lock, RW_WRITER); 2617 error = spa_vdev_remove(spa, guid, B_FALSE); 2618 rw_exit(&ztest_name_lock); 2619 2620 switch (error) { 2621 case 0: 2622 case EEXIST: 2623 case ZFS_ERR_CHECKPOINT_EXISTS: 2624 case ZFS_ERR_DISCARDING_CHECKPOINT: 2625 break; 2626 default: 2627 fatal(0, "spa_vdev_remove() = %d", error); 2628 } 2629 } else { 2630 spa_config_exit(spa, SCL_VDEV, FTAG); 2631 2632 /* 2633 * Make 1/4 of the devices be log devices. 2634 */ 2635 nvroot = make_vdev_root(NULL, NULL, NULL, 2636 ztest_opts.zo_vdev_size, 0, 2637 ztest_random(4) == 0, ztest_opts.zo_raidz, 2638 zs->zs_mirrors, 1); 2639 2640 error = spa_vdev_add(spa, nvroot); 2641 nvlist_free(nvroot); 2642 2643 switch (error) { 2644 case 0: 2645 break; 2646 case ENOSPC: 2647 ztest_record_enospc("spa_vdev_add"); 2648 break; 2649 default: 2650 fatal(0, "spa_vdev_add() = %d", error); 2651 } 2652 } 2653 2654 mutex_exit(&ztest_vdev_lock); 2655} 2656 2657/* 2658 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2659 */ 2660/* ARGSUSED */ 2661void 2662ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2663{ 2664 ztest_shared_t *zs = ztest_shared; 2665 spa_t *spa = ztest_spa; 2666 vdev_t *rvd = spa->spa_root_vdev; 2667 spa_aux_vdev_t *sav; 2668 char *aux; 2669 uint64_t guid = 0; 2670 int error; 2671 2672 if (ztest_random(2) == 0) { 2673 sav = &spa->spa_spares; 2674 aux = ZPOOL_CONFIG_SPARES; 2675 } else { 2676 sav = &spa->spa_l2cache; 2677 aux = ZPOOL_CONFIG_L2CACHE; 2678 } 2679 2680 mutex_enter(&ztest_vdev_lock); 2681 2682 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2683 2684 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2685 /* 2686 * Pick a random device to remove. 2687 */ 2688 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2689 } else { 2690 /* 2691 * Find an unused device we can add. 2692 */ 2693 zs->zs_vdev_aux = 0; 2694 for (;;) { 2695 char path[MAXPATHLEN]; 2696 int c; 2697 (void) snprintf(path, sizeof (path), ztest_aux_template, 2698 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2699 zs->zs_vdev_aux); 2700 for (c = 0; c < sav->sav_count; c++) 2701 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2702 path) == 0) 2703 break; 2704 if (c == sav->sav_count && 2705 vdev_lookup_by_path(rvd, path) == NULL) 2706 break; 2707 zs->zs_vdev_aux++; 2708 } 2709 } 2710 2711 spa_config_exit(spa, SCL_VDEV, FTAG); 2712 2713 if (guid == 0) { 2714 /* 2715 * Add a new device. 2716 */ 2717 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2718 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2719 error = spa_vdev_add(spa, nvroot); 2720 2721 switch (error) { 2722 case 0: 2723 break; 2724 default: 2725 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2726 } 2727 nvlist_free(nvroot); 2728 } else { 2729 /* 2730 * Remove an existing device. Sometimes, dirty its 2731 * vdev state first to make sure we handle removal 2732 * of devices that have pending state changes. 2733 */ 2734 if (ztest_random(2) == 0) 2735 (void) vdev_online(spa, guid, 0, NULL); 2736 2737 error = spa_vdev_remove(spa, guid, B_FALSE); 2738 2739 switch (error) { 2740 case 0: 2741 case EBUSY: 2742 case ZFS_ERR_CHECKPOINT_EXISTS: 2743 case ZFS_ERR_DISCARDING_CHECKPOINT: 2744 break; 2745 default: 2746 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2747 } 2748 } 2749 2750 mutex_exit(&ztest_vdev_lock); 2751} 2752 2753/* 2754 * split a pool if it has mirror tlvdevs 2755 */ 2756/* ARGSUSED */ 2757void 2758ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2759{ 2760 ztest_shared_t *zs = ztest_shared; 2761 spa_t *spa = ztest_spa; 2762 vdev_t *rvd = spa->spa_root_vdev; 2763 nvlist_t *tree, **child, *config, *split, **schild; 2764 uint_t c, children, schildren = 0, lastlogid = 0; 2765 int error = 0; 2766 2767 mutex_enter(&ztest_vdev_lock); 2768 2769 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2770 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2771 mutex_exit(&ztest_vdev_lock); 2772 return; 2773 } 2774 2775 /* clean up the old pool, if any */ 2776 (void) spa_destroy("splitp"); 2777 2778 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2779 2780 /* generate a config from the existing config */ 2781 mutex_enter(&spa->spa_props_lock); 2782 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2783 &tree) == 0); 2784 mutex_exit(&spa->spa_props_lock); 2785 2786 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2787 &children) == 0); 2788 2789 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2790 for (c = 0; c < children; c++) { 2791 vdev_t *tvd = rvd->vdev_child[c]; 2792 nvlist_t **mchild; 2793 uint_t mchildren; 2794 2795 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2796 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2797 0) == 0); 2798 VERIFY(nvlist_add_string(schild[schildren], 2799 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2800 VERIFY(nvlist_add_uint64(schild[schildren], 2801 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2802 if (lastlogid == 0) 2803 lastlogid = schildren; 2804 ++schildren; 2805 continue; 2806 } 2807 lastlogid = 0; 2808 VERIFY(nvlist_lookup_nvlist_array(child[c], 2809 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2810 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2811 } 2812 2813 /* OK, create a config that can be used to split */ 2814 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2815 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2816 VDEV_TYPE_ROOT) == 0); 2817 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2818 lastlogid != 0 ? lastlogid : schildren) == 0); 2819 2820 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2821 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2822 2823 for (c = 0; c < schildren; c++) 2824 nvlist_free(schild[c]); 2825 free(schild); 2826 nvlist_free(split); 2827 2828 spa_config_exit(spa, SCL_VDEV, FTAG); 2829 2830 rw_enter(&ztest_name_lock, RW_WRITER); 2831 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2832 rw_exit(&ztest_name_lock); 2833 2834 nvlist_free(config); 2835 2836 if (error == 0) { 2837 (void) printf("successful split - results:\n"); 2838 mutex_enter(&spa_namespace_lock); 2839 show_pool_stats(spa); 2840 show_pool_stats(spa_lookup("splitp")); 2841 mutex_exit(&spa_namespace_lock); 2842 ++zs->zs_splits; 2843 --zs->zs_mirrors; 2844 } 2845 mutex_exit(&ztest_vdev_lock); 2846} 2847 2848/* 2849 * Verify that we can attach and detach devices. 2850 */ 2851/* ARGSUSED */ 2852void 2853ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2854{ 2855 ztest_shared_t *zs = ztest_shared; 2856 spa_t *spa = ztest_spa; 2857 spa_aux_vdev_t *sav = &spa->spa_spares; 2858 vdev_t *rvd = spa->spa_root_vdev; 2859 vdev_t *oldvd, *newvd, *pvd; 2860 nvlist_t *root; 2861 uint64_t leaves; 2862 uint64_t leaf, top; 2863 uint64_t ashift = ztest_get_ashift(); 2864 uint64_t oldguid, pguid; 2865 uint64_t oldsize, newsize; 2866 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2867 int replacing; 2868 int oldvd_has_siblings = B_FALSE; 2869 int newvd_is_spare = B_FALSE; 2870 int oldvd_is_log; 2871 int error, expected_error; 2872 2873 mutex_enter(&ztest_vdev_lock); 2874 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2875 2876 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2877 2878 /* 2879 * If a vdev is in the process of being removed, its removal may 2880 * finish while we are in progress, leading to an unexpected error 2881 * value. Don't bother trying to attach while we are in the middle 2882 * of removal. 2883 */ 2884 if (spa->spa_vdev_removal != NULL) { 2885 spa_config_exit(spa, SCL_ALL, FTAG); 2886 mutex_exit(&ztest_vdev_lock); 2887 return; 2888 } 2889 2890 /* 2891 * Decide whether to do an attach or a replace. 2892 */ 2893 replacing = ztest_random(2); 2894 2895 /* 2896 * Pick a random top-level vdev. 2897 */ 2898 top = ztest_random_vdev_top(spa, B_TRUE); 2899 2900 /* 2901 * Pick a random leaf within it. 2902 */ 2903 leaf = ztest_random(leaves); 2904 2905 /* 2906 * Locate this vdev. 2907 */ 2908 oldvd = rvd->vdev_child[top]; 2909 if (zs->zs_mirrors >= 1) { 2910 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2911 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2912 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2913 } 2914 if (ztest_opts.zo_raidz > 1) { 2915 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2916 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2917 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2918 } 2919 2920 /* 2921 * If we're already doing an attach or replace, oldvd may be a 2922 * mirror vdev -- in which case, pick a random child. 2923 */ 2924 while (oldvd->vdev_children != 0) { 2925 oldvd_has_siblings = B_TRUE; 2926 ASSERT(oldvd->vdev_children >= 2); 2927 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2928 } 2929 2930 oldguid = oldvd->vdev_guid; 2931 oldsize = vdev_get_min_asize(oldvd); 2932 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2933 (void) strcpy(oldpath, oldvd->vdev_path); 2934 pvd = oldvd->vdev_parent; 2935 pguid = pvd->vdev_guid; 2936 2937 /* 2938 * If oldvd has siblings, then half of the time, detach it. 2939 */ 2940 if (oldvd_has_siblings && ztest_random(2) == 0) { 2941 spa_config_exit(spa, SCL_ALL, FTAG); 2942 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2943 if (error != 0 && error != ENODEV && error != EBUSY && 2944 error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS && 2945 error != ZFS_ERR_DISCARDING_CHECKPOINT) 2946 fatal(0, "detach (%s) returned %d", oldpath, error); 2947 mutex_exit(&ztest_vdev_lock); 2948 return; 2949 } 2950 2951 /* 2952 * For the new vdev, choose with equal probability between the two 2953 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2954 */ 2955 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2956 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2957 newvd_is_spare = B_TRUE; 2958 (void) strcpy(newpath, newvd->vdev_path); 2959 } else { 2960 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2961 ztest_opts.zo_dir, ztest_opts.zo_pool, 2962 top * leaves + leaf); 2963 if (ztest_random(2) == 0) 2964 newpath[strlen(newpath) - 1] = 'b'; 2965 newvd = vdev_lookup_by_path(rvd, newpath); 2966 } 2967 2968 if (newvd) { 2969 /* 2970 * Reopen to ensure the vdev's asize field isn't stale. 2971 */ 2972 vdev_reopen(newvd); 2973 newsize = vdev_get_min_asize(newvd); 2974 } else { 2975 /* 2976 * Make newsize a little bigger or smaller than oldsize. 2977 * If it's smaller, the attach should fail. 2978 * If it's larger, and we're doing a replace, 2979 * we should get dynamic LUN growth when we're done. 2980 */ 2981 newsize = 10 * oldsize / (9 + ztest_random(3)); 2982 } 2983 2984 /* 2985 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2986 * unless it's a replace; in that case any non-replacing parent is OK. 2987 * 2988 * If newvd is already part of the pool, it should fail with EBUSY. 2989 * 2990 * If newvd is too small, it should fail with EOVERFLOW. 2991 */ 2992 if (pvd->vdev_ops != &vdev_mirror_ops && 2993 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2994 pvd->vdev_ops == &vdev_replacing_ops || 2995 pvd->vdev_ops == &vdev_spare_ops)) 2996 expected_error = ENOTSUP; 2997 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2998 expected_error = ENOTSUP; 2999 else if (newvd == oldvd) 3000 expected_error = replacing ? 0 : EBUSY; 3001 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 3002 expected_error = EBUSY; 3003 else if (newsize < oldsize) 3004 expected_error = EOVERFLOW; 3005 else if (ashift > oldvd->vdev_top->vdev_ashift) 3006 expected_error = EDOM; 3007 else 3008 expected_error = 0; 3009 3010 spa_config_exit(spa, SCL_ALL, FTAG); 3011 3012 /* 3013 * Build the nvlist describing newpath. 3014 */ 3015 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 3016 ashift, 0, 0, 0, 1); 3017 3018 error = spa_vdev_attach(spa, oldguid, root, replacing); 3019 3020 nvlist_free(root); 3021 3022 /* 3023 * If our parent was the replacing vdev, but the replace completed, 3024 * then instead of failing with ENOTSUP we may either succeed, 3025 * fail with ENODEV, or fail with EOVERFLOW. 3026 */ 3027 if (expected_error == ENOTSUP && 3028 (error == 0 || error == ENODEV || error == EOVERFLOW)) 3029 expected_error = error; 3030 3031 /* 3032 * If someone grew the LUN, the replacement may be too small. 3033 */ 3034 if (error == EOVERFLOW || error == EBUSY) 3035 expected_error = error; 3036 3037 if (error == ZFS_ERR_CHECKPOINT_EXISTS || 3038 error == ZFS_ERR_DISCARDING_CHECKPOINT) 3039 expected_error = error; 3040 3041 /* XXX workaround 6690467 */ 3042 if (error != expected_error && expected_error != EBUSY) { 3043 fatal(0, "attach (%s %llu, %s %llu, %d) " 3044 "returned %d, expected %d", 3045 oldpath, oldsize, newpath, 3046 newsize, replacing, error, expected_error); 3047 } 3048 3049 mutex_exit(&ztest_vdev_lock); 3050} 3051 3052/* ARGSUSED */ 3053void 3054ztest_device_removal(ztest_ds_t *zd, uint64_t id) 3055{ 3056 spa_t *spa = ztest_spa; 3057 vdev_t *vd; 3058 uint64_t guid; 3059 3060 mutex_enter(&ztest_vdev_lock); 3061 3062 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3063 vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE)); 3064 guid = vd->vdev_guid; 3065 spa_config_exit(spa, SCL_VDEV, FTAG); 3066 3067 (void) spa_vdev_remove(spa, guid, B_FALSE); 3068 3069 mutex_exit(&ztest_vdev_lock); 3070} 3071 3072/* 3073 * Callback function which expands the physical size of the vdev. 3074 */ 3075vdev_t * 3076grow_vdev(vdev_t *vd, void *arg) 3077{ 3078 spa_t *spa = vd->vdev_spa; 3079 size_t *newsize = arg; 3080 size_t fsize; 3081 int fd; 3082 3083 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 3084 ASSERT(vd->vdev_ops->vdev_op_leaf); 3085 3086 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 3087 return (vd); 3088 3089 fsize = lseek(fd, 0, SEEK_END); 3090 (void) ftruncate(fd, *newsize); 3091 3092 if (ztest_opts.zo_verbose >= 6) { 3093 (void) printf("%s grew from %lu to %lu bytes\n", 3094 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 3095 } 3096 (void) close(fd); 3097 return (NULL); 3098} 3099 3100/* 3101 * Callback function which expands a given vdev by calling vdev_online(). 3102 */ 3103/* ARGSUSED */ 3104vdev_t * 3105online_vdev(vdev_t *vd, void *arg) 3106{ 3107 spa_t *spa = vd->vdev_spa; 3108 vdev_t *tvd = vd->vdev_top; 3109 uint64_t guid = vd->vdev_guid; 3110 uint64_t generation = spa->spa_config_generation + 1; 3111 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 3112 int error; 3113 3114 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 3115 ASSERT(vd->vdev_ops->vdev_op_leaf); 3116 3117 /* Calling vdev_online will initialize the new metaslabs */ 3118 spa_config_exit(spa, SCL_STATE, spa); 3119 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 3120 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3121 3122 /* 3123 * If vdev_online returned an error or the underlying vdev_open 3124 * failed then we abort the expand. The only way to know that 3125 * vdev_open fails is by checking the returned newstate. 3126 */ 3127 if (error || newstate != VDEV_STATE_HEALTHY) { 3128 if (ztest_opts.zo_verbose >= 5) { 3129 (void) printf("Unable to expand vdev, state %llu, " 3130 "error %d\n", (u_longlong_t)newstate, error); 3131 } 3132 return (vd); 3133 } 3134 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 3135 3136 /* 3137 * Since we dropped the lock we need to ensure that we're 3138 * still talking to the original vdev. It's possible this 3139 * vdev may have been detached/replaced while we were 3140 * trying to online it. 3141 */ 3142 if (generation != spa->spa_config_generation) { 3143 if (ztest_opts.zo_verbose >= 5) { 3144 (void) printf("vdev configuration has changed, " 3145 "guid %llu, state %llu, expected gen %llu, " 3146 "got gen %llu\n", 3147 (u_longlong_t)guid, 3148 (u_longlong_t)tvd->vdev_state, 3149 (u_longlong_t)generation, 3150 (u_longlong_t)spa->spa_config_generation); 3151 } 3152 return (vd); 3153 } 3154 return (NULL); 3155} 3156 3157/* 3158 * Traverse the vdev tree calling the supplied function. 3159 * We continue to walk the tree until we either have walked all 3160 * children or we receive a non-NULL return from the callback. 3161 * If a NULL callback is passed, then we just return back the first 3162 * leaf vdev we encounter. 3163 */ 3164vdev_t * 3165vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 3166{ 3167 if (vd->vdev_ops->vdev_op_leaf) { 3168 if (func == NULL) 3169 return (vd); 3170 else 3171 return (func(vd, arg)); 3172 } 3173 3174 for (uint_t c = 0; c < vd->vdev_children; c++) { 3175 vdev_t *cvd = vd->vdev_child[c]; 3176 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3177 return (cvd); 3178 } 3179 return (NULL); 3180} 3181 3182/* 3183 * Verify that dynamic LUN growth works as expected. 3184 */ 3185/* ARGSUSED */ 3186void 3187ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3188{ 3189 spa_t *spa = ztest_spa; 3190 vdev_t *vd, *tvd; 3191 metaslab_class_t *mc; 3192 metaslab_group_t *mg; 3193 size_t psize, newsize; 3194 uint64_t top; 3195 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3196 3197 mutex_enter(&ztest_checkpoint_lock); 3198 mutex_enter(&ztest_vdev_lock); 3199 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3200 3201 /* 3202 * If there is a vdev removal in progress, it could complete while 3203 * we are running, in which case we would not be able to verify 3204 * that the metaslab_class space increased (because it decreases 3205 * when the device removal completes). 3206 */ 3207 if (spa->spa_vdev_removal != NULL) { 3208 spa_config_exit(spa, SCL_STATE, spa); 3209 mutex_exit(&ztest_vdev_lock); 3210 mutex_exit(&ztest_checkpoint_lock); 3211 return; 3212 } 3213 3214 top = ztest_random_vdev_top(spa, B_TRUE); 3215 3216 tvd = spa->spa_root_vdev->vdev_child[top]; 3217 mg = tvd->vdev_mg; 3218 mc = mg->mg_class; 3219 old_ms_count = tvd->vdev_ms_count; 3220 old_class_space = metaslab_class_get_space(mc); 3221 3222 /* 3223 * Determine the size of the first leaf vdev associated with 3224 * our top-level device. 3225 */ 3226 vd = vdev_walk_tree(tvd, NULL, NULL); 3227 ASSERT3P(vd, !=, NULL); 3228 ASSERT(vd->vdev_ops->vdev_op_leaf); 3229 3230 psize = vd->vdev_psize; 3231 3232 /* 3233 * We only try to expand the vdev if it's healthy, less than 4x its 3234 * original size, and it has a valid psize. 3235 */ 3236 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3237 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3238 spa_config_exit(spa, SCL_STATE, spa); 3239 mutex_exit(&ztest_vdev_lock); 3240 mutex_exit(&ztest_checkpoint_lock); 3241 return; 3242 } 3243 ASSERT(psize > 0); 3244 newsize = psize + psize / 8; 3245 ASSERT3U(newsize, >, psize); 3246 3247 if (ztest_opts.zo_verbose >= 6) { 3248 (void) printf("Expanding LUN %s from %lu to %lu\n", 3249 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3250 } 3251 3252 /* 3253 * Growing the vdev is a two step process: 3254 * 1). expand the physical size (i.e. relabel) 3255 * 2). online the vdev to create the new metaslabs 3256 */ 3257 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3258 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3259 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3260 if (ztest_opts.zo_verbose >= 5) { 3261 (void) printf("Could not expand LUN because " 3262 "the vdev configuration changed.\n"); 3263 } 3264 spa_config_exit(spa, SCL_STATE, spa); 3265 mutex_exit(&ztest_vdev_lock); 3266 mutex_exit(&ztest_checkpoint_lock); 3267 return; 3268 } 3269 3270 spa_config_exit(spa, SCL_STATE, spa); 3271 3272 /* 3273 * Expanding the LUN will update the config asynchronously, 3274 * thus we must wait for the async thread to complete any 3275 * pending tasks before proceeding. 3276 */ 3277 for (;;) { 3278 boolean_t done; 3279 mutex_enter(&spa->spa_async_lock); 3280 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3281 mutex_exit(&spa->spa_async_lock); 3282 if (done) 3283 break; 3284 txg_wait_synced(spa_get_dsl(spa), 0); 3285 (void) poll(NULL, 0, 100); 3286 } 3287 3288 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3289 3290 tvd = spa->spa_root_vdev->vdev_child[top]; 3291 new_ms_count = tvd->vdev_ms_count; 3292 new_class_space = metaslab_class_get_space(mc); 3293 3294 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3295 if (ztest_opts.zo_verbose >= 5) { 3296 (void) printf("Could not verify LUN expansion due to " 3297 "intervening vdev offline or remove.\n"); 3298 } 3299 spa_config_exit(spa, SCL_STATE, spa); 3300 mutex_exit(&ztest_vdev_lock); 3301 mutex_exit(&ztest_checkpoint_lock); 3302 return; 3303 } 3304 3305 /* 3306 * Make sure we were able to grow the vdev. 3307 */ 3308 if (new_ms_count <= old_ms_count) { 3309 fatal(0, "LUN expansion failed: ms_count %llu < %llu\n", 3310 old_ms_count, new_ms_count); 3311 } 3312 3313 /* 3314 * Make sure we were able to grow the pool. 3315 */ 3316 if (new_class_space <= old_class_space) { 3317 fatal(0, "LUN expansion failed: class_space %llu < %llu\n", 3318 old_class_space, new_class_space); 3319 } 3320 3321 if (ztest_opts.zo_verbose >= 5) { 3322 char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ]; 3323 3324 nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf)); 3325 nicenum(new_class_space, newnumbuf, sizeof (newnumbuf)); 3326 (void) printf("%s grew from %s to %s\n", 3327 spa->spa_name, oldnumbuf, newnumbuf); 3328 } 3329 3330 spa_config_exit(spa, SCL_STATE, spa); 3331 mutex_exit(&ztest_vdev_lock); 3332 mutex_exit(&ztest_checkpoint_lock); 3333} 3334 3335/* 3336 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3337 */ 3338/* ARGSUSED */ 3339static void 3340ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3341{ 3342 /* 3343 * Create the objects common to all ztest datasets. 3344 */ 3345 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3346 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3347} 3348 3349static int 3350ztest_dataset_create(char *dsname) 3351{ 3352 uint64_t zilset = ztest_random(100); 3353 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3354 ztest_objset_create_cb, NULL); 3355 3356 if (err || zilset < 80) 3357 return (err); 3358 3359 if (ztest_opts.zo_verbose >= 6) 3360 (void) printf("Setting dataset %s to sync always\n", dsname); 3361 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3362 ZFS_SYNC_ALWAYS, B_FALSE)); 3363} 3364 3365/* ARGSUSED */ 3366static int 3367ztest_objset_destroy_cb(const char *name, void *arg) 3368{ 3369 objset_t *os; 3370 dmu_object_info_t doi; 3371 int error; 3372 3373 /* 3374 * Verify that the dataset contains a directory object. 3375 */ 3376 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3377 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3378 if (error != ENOENT) { 3379 /* We could have crashed in the middle of destroying it */ 3380 ASSERT0(error); 3381 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3382 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3383 } 3384 dmu_objset_disown(os, FTAG); 3385 3386 /* 3387 * Destroy the dataset. 3388 */ 3389 if (strchr(name, '@') != NULL) { 3390 VERIFY0(dsl_destroy_snapshot(name, B_FALSE)); 3391 } else { 3392 VERIFY0(dsl_destroy_head(name)); 3393 } 3394 return (0); 3395} 3396 3397static boolean_t 3398ztest_snapshot_create(char *osname, uint64_t id) 3399{ 3400 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 3401 int error; 3402 3403 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3404 3405 error = dmu_objset_snapshot_one(osname, snapname); 3406 if (error == ENOSPC) { 3407 ztest_record_enospc(FTAG); 3408 return (B_FALSE); 3409 } 3410 if (error != 0 && error != EEXIST) { 3411 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3412 snapname, error); 3413 } 3414 return (B_TRUE); 3415} 3416 3417static boolean_t 3418ztest_snapshot_destroy(char *osname, uint64_t id) 3419{ 3420 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 3421 int error; 3422 3423 (void) snprintf(snapname, sizeof (snapname), "%s@%llu", osname, 3424 (u_longlong_t)id); 3425 3426 error = dsl_destroy_snapshot(snapname, B_FALSE); 3427 if (error != 0 && error != ENOENT) 3428 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3429 return (B_TRUE); 3430} 3431 3432/* ARGSUSED */ 3433void 3434ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3435{ 3436 ztest_ds_t zdtmp; 3437 int iters; 3438 int error; 3439 objset_t *os, *os2; 3440 char name[ZFS_MAX_DATASET_NAME_LEN]; 3441 zilog_t *zilog; 3442 3443 rw_enter(&ztest_name_lock, RW_READER); 3444 3445 (void) snprintf(name, sizeof (name), "%s/temp_%llu", 3446 ztest_opts.zo_pool, (u_longlong_t)id); 3447 3448 /* 3449 * If this dataset exists from a previous run, process its replay log 3450 * half of the time. If we don't replay it, then dmu_objset_destroy() 3451 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3452 */ 3453 if (ztest_random(2) == 0 && 3454 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3455 ztest_zd_init(&zdtmp, NULL, os); 3456 zil_replay(os, &zdtmp, ztest_replay_vector); 3457 ztest_zd_fini(&zdtmp); 3458 dmu_objset_disown(os, FTAG); 3459 } 3460 3461 /* 3462 * There may be an old instance of the dataset we're about to 3463 * create lying around from a previous run. If so, destroy it 3464 * and all of its snapshots. 3465 */ 3466 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3467 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3468 3469 /* 3470 * Verify that the destroyed dataset is no longer in the namespace. 3471 */ 3472 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3473 FTAG, &os)); 3474 3475 /* 3476 * Verify that we can create a new dataset. 3477 */ 3478 error = ztest_dataset_create(name); 3479 if (error) { 3480 if (error == ENOSPC) { 3481 ztest_record_enospc(FTAG); 3482 rw_exit(&ztest_name_lock); 3483 return; 3484 } 3485 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3486 } 3487 3488 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3489 3490 ztest_zd_init(&zdtmp, NULL, os); 3491 3492 /* 3493 * Open the intent log for it. 3494 */ 3495 zilog = zil_open(os, ztest_get_data); 3496 3497 /* 3498 * Put some objects in there, do a little I/O to them, 3499 * and randomly take a couple of snapshots along the way. 3500 */ 3501 iters = ztest_random(5); 3502 for (int i = 0; i < iters; i++) { 3503 ztest_dmu_object_alloc_free(&zdtmp, id); 3504 if (ztest_random(iters) == 0) 3505 (void) ztest_snapshot_create(name, i); 3506 } 3507 3508 /* 3509 * Verify that we cannot create an existing dataset. 3510 */ 3511 VERIFY3U(EEXIST, ==, 3512 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3513 3514 /* 3515 * Verify that we can hold an objset that is also owned. 3516 */ 3517 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3518 dmu_objset_rele(os2, FTAG); 3519 3520 /* 3521 * Verify that we cannot own an objset that is already owned. 3522 */ 3523 VERIFY3U(EBUSY, ==, 3524 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3525 3526 zil_close(zilog); 3527 dmu_objset_disown(os, FTAG); 3528 ztest_zd_fini(&zdtmp); 3529 3530 rw_exit(&ztest_name_lock); 3531} 3532 3533/* 3534 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3535 */ 3536void 3537ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3538{ 3539 rw_enter(&ztest_name_lock, RW_READER); 3540 (void) ztest_snapshot_destroy(zd->zd_name, id); 3541 (void) ztest_snapshot_create(zd->zd_name, id); 3542 rw_exit(&ztest_name_lock); 3543} 3544 3545/* 3546 * Cleanup non-standard snapshots and clones. 3547 */ 3548void 3549ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3550{ 3551 char snap1name[ZFS_MAX_DATASET_NAME_LEN]; 3552 char clone1name[ZFS_MAX_DATASET_NAME_LEN]; 3553 char snap2name[ZFS_MAX_DATASET_NAME_LEN]; 3554 char clone2name[ZFS_MAX_DATASET_NAME_LEN]; 3555 char snap3name[ZFS_MAX_DATASET_NAME_LEN]; 3556 int error; 3557 3558 (void) snprintf(snap1name, sizeof (snap1name), 3559 "%s@s1_%llu", osname, id); 3560 (void) snprintf(clone1name, sizeof (clone1name), 3561 "%s/c1_%llu", osname, id); 3562 (void) snprintf(snap2name, sizeof (snap2name), 3563 "%s@s2_%llu", clone1name, id); 3564 (void) snprintf(clone2name, sizeof (clone2name), 3565 "%s/c2_%llu", osname, id); 3566 (void) snprintf(snap3name, sizeof (snap3name), 3567 "%s@s3_%llu", clone1name, id); 3568 3569 error = dsl_destroy_head(clone2name); 3570 if (error && error != ENOENT) 3571 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3572 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3573 if (error && error != ENOENT) 3574 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3575 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3576 if (error && error != ENOENT) 3577 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3578 error = dsl_destroy_head(clone1name); 3579 if (error && error != ENOENT) 3580 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3581 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3582 if (error && error != ENOENT) 3583 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3584} 3585 3586/* 3587 * Verify dsl_dataset_promote handles EBUSY 3588 */ 3589void 3590ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3591{ 3592 objset_t *os; 3593 char snap1name[ZFS_MAX_DATASET_NAME_LEN]; 3594 char clone1name[ZFS_MAX_DATASET_NAME_LEN]; 3595 char snap2name[ZFS_MAX_DATASET_NAME_LEN]; 3596 char clone2name[ZFS_MAX_DATASET_NAME_LEN]; 3597 char snap3name[ZFS_MAX_DATASET_NAME_LEN]; 3598 char *osname = zd->zd_name; 3599 int error; 3600 3601 rw_enter(&ztest_name_lock, RW_READER); 3602 3603 ztest_dsl_dataset_cleanup(osname, id); 3604 3605 (void) snprintf(snap1name, sizeof (snap1name), 3606 "%s@s1_%llu", osname, id); 3607 (void) snprintf(clone1name, sizeof (clone1name), 3608 "%s/c1_%llu", osname, id); 3609 (void) snprintf(snap2name, sizeof (snap2name), 3610 "%s@s2_%llu", clone1name, id); 3611 (void) snprintf(clone2name, sizeof (clone2name), 3612 "%s/c2_%llu", osname, id); 3613 (void) snprintf(snap3name, sizeof (snap3name), 3614 "%s@s3_%llu", clone1name, id); 3615 3616 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3617 if (error && error != EEXIST) { 3618 if (error == ENOSPC) { 3619 ztest_record_enospc(FTAG); 3620 goto out; 3621 } 3622 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3623 } 3624 3625 error = dmu_objset_clone(clone1name, snap1name); 3626 if (error) { 3627 if (error == ENOSPC) { 3628 ztest_record_enospc(FTAG); 3629 goto out; 3630 } 3631 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3632 } 3633 3634 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3635 if (error && error != EEXIST) { 3636 if (error == ENOSPC) { 3637 ztest_record_enospc(FTAG); 3638 goto out; 3639 } 3640 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3641 } 3642 3643 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3644 if (error && error != EEXIST) { 3645 if (error == ENOSPC) { 3646 ztest_record_enospc(FTAG); 3647 goto out; 3648 } 3649 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3650 } 3651 3652 error = dmu_objset_clone(clone2name, snap3name); 3653 if (error) { 3654 if (error == ENOSPC) { 3655 ztest_record_enospc(FTAG); 3656 goto out; 3657 } 3658 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3659 } 3660 3661 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3662 if (error) 3663 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3664 error = dsl_dataset_promote(clone2name, NULL); 3665 if (error == ENOSPC) { 3666 dmu_objset_disown(os, FTAG); 3667 ztest_record_enospc(FTAG); 3668 goto out; 3669 } 3670 if (error != EBUSY) 3671 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3672 error); 3673 dmu_objset_disown(os, FTAG); 3674 3675out: 3676 ztest_dsl_dataset_cleanup(osname, id); 3677 3678 rw_exit(&ztest_name_lock); 3679} 3680 3681/* 3682 * Verify that dmu_object_{alloc,free} work as expected. 3683 */ 3684void 3685ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3686{ 3687 ztest_od_t od[4]; 3688 int batchsize = sizeof (od) / sizeof (od[0]); 3689 3690 for (int b = 0; b < batchsize; b++) 3691 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3692 3693 /* 3694 * Destroy the previous batch of objects, create a new batch, 3695 * and do some I/O on the new objects. 3696 */ 3697 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3698 return; 3699 3700 while (ztest_random(4 * batchsize) != 0) 3701 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3702 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3703} 3704 3705/* 3706 * Verify that dmu_{read,write} work as expected. 3707 */ 3708void 3709ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3710{ 3711 objset_t *os = zd->zd_os; 3712 ztest_od_t od[2]; 3713 dmu_tx_t *tx; 3714 int i, freeit, error; 3715 uint64_t n, s, txg; 3716 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3717 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3718 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3719 uint64_t regions = 997; 3720 uint64_t stride = 123456789ULL; 3721 uint64_t width = 40; 3722 int free_percent = 5; 3723 3724 /* 3725 * This test uses two objects, packobj and bigobj, that are always 3726 * updated together (i.e. in the same tx) so that their contents are 3727 * in sync and can be compared. Their contents relate to each other 3728 * in a simple way: packobj is a dense array of 'bufwad' structures, 3729 * while bigobj is a sparse array of the same bufwads. Specifically, 3730 * for any index n, there are three bufwads that should be identical: 3731 * 3732 * packobj, at offset n * sizeof (bufwad_t) 3733 * bigobj, at the head of the nth chunk 3734 * bigobj, at the tail of the nth chunk 3735 * 3736 * The chunk size is arbitrary. It doesn't have to be a power of two, 3737 * and it doesn't have any relation to the object blocksize. 3738 * The only requirement is that it can hold at least two bufwads. 3739 * 3740 * Normally, we write the bufwad to each of these locations. 3741 * However, free_percent of the time we instead write zeroes to 3742 * packobj and perform a dmu_free_range() on bigobj. By comparing 3743 * bigobj to packobj, we can verify that the DMU is correctly 3744 * tracking which parts of an object are allocated and free, 3745 * and that the contents of the allocated blocks are correct. 3746 */ 3747 3748 /* 3749 * Read the directory info. If it's the first time, set things up. 3750 */ 3751 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3752 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3753 3754 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3755 return; 3756 3757 bigobj = od[0].od_object; 3758 packobj = od[1].od_object; 3759 chunksize = od[0].od_gen; 3760 ASSERT(chunksize == od[1].od_gen); 3761 3762 /* 3763 * Prefetch a random chunk of the big object. 3764 * Our aim here is to get some async reads in flight 3765 * for blocks that we may free below; the DMU should 3766 * handle this race correctly. 3767 */ 3768 n = ztest_random(regions) * stride + ztest_random(width); 3769 s = 1 + ztest_random(2 * width - 1); 3770 dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize, 3771 ZIO_PRIORITY_SYNC_READ); 3772 3773 /* 3774 * Pick a random index and compute the offsets into packobj and bigobj. 3775 */ 3776 n = ztest_random(regions) * stride + ztest_random(width); 3777 s = 1 + ztest_random(width - 1); 3778 3779 packoff = n * sizeof (bufwad_t); 3780 packsize = s * sizeof (bufwad_t); 3781 3782 bigoff = n * chunksize; 3783 bigsize = s * chunksize; 3784 3785 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3786 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3787 3788 /* 3789 * free_percent of the time, free a range of bigobj rather than 3790 * overwriting it. 3791 */ 3792 freeit = (ztest_random(100) < free_percent); 3793 3794 /* 3795 * Read the current contents of our objects. 3796 */ 3797 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3798 DMU_READ_PREFETCH); 3799 ASSERT0(error); 3800 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3801 DMU_READ_PREFETCH); 3802 ASSERT0(error); 3803 3804 /* 3805 * Get a tx for the mods to both packobj and bigobj. 3806 */ 3807 tx = dmu_tx_create(os); 3808 3809 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3810 3811 if (freeit) 3812 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3813 else 3814 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3815 3816 /* This accounts for setting the checksum/compression. */ 3817 dmu_tx_hold_bonus(tx, bigobj); 3818 3819 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3820 if (txg == 0) { 3821 umem_free(packbuf, packsize); 3822 umem_free(bigbuf, bigsize); 3823 return; 3824 } 3825 3826 enum zio_checksum cksum; 3827 do { 3828 cksum = (enum zio_checksum) 3829 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM); 3830 } while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS); 3831 dmu_object_set_checksum(os, bigobj, cksum, tx); 3832 3833 enum zio_compress comp; 3834 do { 3835 comp = (enum zio_compress) 3836 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION); 3837 } while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS); 3838 dmu_object_set_compress(os, bigobj, comp, tx); 3839 3840 /* 3841 * For each index from n to n + s, verify that the existing bufwad 3842 * in packobj matches the bufwads at the head and tail of the 3843 * corresponding chunk in bigobj. Then update all three bufwads 3844 * with the new values we want to write out. 3845 */ 3846 for (i = 0; i < s; i++) { 3847 /* LINTED */ 3848 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3849 /* LINTED */ 3850 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3851 /* LINTED */ 3852 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3853 3854 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3855 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3856 3857 if (pack->bw_txg > txg) 3858 fatal(0, "future leak: got %llx, open txg is %llx", 3859 pack->bw_txg, txg); 3860 3861 if (pack->bw_data != 0 && pack->bw_index != n + i) 3862 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3863 pack->bw_index, n, i); 3864 3865 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3866 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3867 3868 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3869 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3870 3871 if (freeit) { 3872 bzero(pack, sizeof (bufwad_t)); 3873 } else { 3874 pack->bw_index = n + i; 3875 pack->bw_txg = txg; 3876 pack->bw_data = 1 + ztest_random(-2ULL); 3877 } 3878 *bigH = *pack; 3879 *bigT = *pack; 3880 } 3881 3882 /* 3883 * We've verified all the old bufwads, and made new ones. 3884 * Now write them out. 3885 */ 3886 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3887 3888 if (freeit) { 3889 if (ztest_opts.zo_verbose >= 7) { 3890 (void) printf("freeing offset %llx size %llx" 3891 " txg %llx\n", 3892 (u_longlong_t)bigoff, 3893 (u_longlong_t)bigsize, 3894 (u_longlong_t)txg); 3895 } 3896 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3897 } else { 3898 if (ztest_opts.zo_verbose >= 7) { 3899 (void) printf("writing offset %llx size %llx" 3900 " txg %llx\n", 3901 (u_longlong_t)bigoff, 3902 (u_longlong_t)bigsize, 3903 (u_longlong_t)txg); 3904 } 3905 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3906 } 3907 3908 dmu_tx_commit(tx); 3909 3910 /* 3911 * Sanity check the stuff we just wrote. 3912 */ 3913 { 3914 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3915 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3916 3917 VERIFY(0 == dmu_read(os, packobj, packoff, 3918 packsize, packcheck, DMU_READ_PREFETCH)); 3919 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3920 bigsize, bigcheck, DMU_READ_PREFETCH)); 3921 3922 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3923 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3924 3925 umem_free(packcheck, packsize); 3926 umem_free(bigcheck, bigsize); 3927 } 3928 3929 umem_free(packbuf, packsize); 3930 umem_free(bigbuf, bigsize); 3931} 3932 3933void 3934compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3935 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3936{ 3937 uint64_t i; 3938 bufwad_t *pack; 3939 bufwad_t *bigH; 3940 bufwad_t *bigT; 3941 3942 /* 3943 * For each index from n to n + s, verify that the existing bufwad 3944 * in packobj matches the bufwads at the head and tail of the 3945 * corresponding chunk in bigobj. Then update all three bufwads 3946 * with the new values we want to write out. 3947 */ 3948 for (i = 0; i < s; i++) { 3949 /* LINTED */ 3950 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3951 /* LINTED */ 3952 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3953 /* LINTED */ 3954 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3955 3956 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3957 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3958 3959 if (pack->bw_txg > txg) 3960 fatal(0, "future leak: got %llx, open txg is %llx", 3961 pack->bw_txg, txg); 3962 3963 if (pack->bw_data != 0 && pack->bw_index != n + i) 3964 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3965 pack->bw_index, n, i); 3966 3967 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3968 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3969 3970 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3971 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3972 3973 pack->bw_index = n + i; 3974 pack->bw_txg = txg; 3975 pack->bw_data = 1 + ztest_random(-2ULL); 3976 3977 *bigH = *pack; 3978 *bigT = *pack; 3979 } 3980} 3981 3982void 3983ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3984{ 3985 objset_t *os = zd->zd_os; 3986 ztest_od_t od[2]; 3987 dmu_tx_t *tx; 3988 uint64_t i; 3989 int error; 3990 uint64_t n, s, txg; 3991 bufwad_t *packbuf, *bigbuf; 3992 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3993 uint64_t blocksize = ztest_random_blocksize(); 3994 uint64_t chunksize = blocksize; 3995 uint64_t regions = 997; 3996 uint64_t stride = 123456789ULL; 3997 uint64_t width = 9; 3998 dmu_buf_t *bonus_db; 3999 arc_buf_t **bigbuf_arcbufs; 4000 dmu_object_info_t doi; 4001 4002 /* 4003 * This test uses two objects, packobj and bigobj, that are always 4004 * updated together (i.e. in the same tx) so that their contents are 4005 * in sync and can be compared. Their contents relate to each other 4006 * in a simple way: packobj is a dense array of 'bufwad' structures, 4007 * while bigobj is a sparse array of the same bufwads. Specifically, 4008 * for any index n, there are three bufwads that should be identical: 4009 * 4010 * packobj, at offset n * sizeof (bufwad_t) 4011 * bigobj, at the head of the nth chunk 4012 * bigobj, at the tail of the nth chunk 4013 * 4014 * The chunk size is set equal to bigobj block size so that 4015 * dmu_assign_arcbuf() can be tested for object updates. 4016 */ 4017 4018 /* 4019 * Read the directory info. If it's the first time, set things up. 4020 */ 4021 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4022 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 4023 4024 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4025 return; 4026 4027 bigobj = od[0].od_object; 4028 packobj = od[1].od_object; 4029 blocksize = od[0].od_blocksize; 4030 chunksize = blocksize; 4031 ASSERT(chunksize == od[1].od_gen); 4032 4033 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 4034 VERIFY(ISP2(doi.doi_data_block_size)); 4035 VERIFY(chunksize == doi.doi_data_block_size); 4036 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 4037 4038 /* 4039 * Pick a random index and compute the offsets into packobj and bigobj. 4040 */ 4041 n = ztest_random(regions) * stride + ztest_random(width); 4042 s = 1 + ztest_random(width - 1); 4043 4044 packoff = n * sizeof (bufwad_t); 4045 packsize = s * sizeof (bufwad_t); 4046 4047 bigoff = n * chunksize; 4048 bigsize = s * chunksize; 4049 4050 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 4051 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 4052 4053 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 4054 4055 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 4056 4057 /* 4058 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 4059 * Iteration 1 test zcopy to already referenced dbufs. 4060 * Iteration 2 test zcopy to dirty dbuf in the same txg. 4061 * Iteration 3 test zcopy to dbuf dirty in previous txg. 4062 * Iteration 4 test zcopy when dbuf is no longer dirty. 4063 * Iteration 5 test zcopy when it can't be done. 4064 * Iteration 6 one more zcopy write. 4065 */ 4066 for (i = 0; i < 7; i++) { 4067 uint64_t j; 4068 uint64_t off; 4069 4070 /* 4071 * In iteration 5 (i == 5) use arcbufs 4072 * that don't match bigobj blksz to test 4073 * dmu_assign_arcbuf() when it can't directly 4074 * assign an arcbuf to a dbuf. 4075 */ 4076 for (j = 0; j < s; j++) { 4077 if (i != 5) { 4078 bigbuf_arcbufs[j] = 4079 dmu_request_arcbuf(bonus_db, chunksize); 4080 } else { 4081 bigbuf_arcbufs[2 * j] = 4082 dmu_request_arcbuf(bonus_db, chunksize / 2); 4083 bigbuf_arcbufs[2 * j + 1] = 4084 dmu_request_arcbuf(bonus_db, chunksize / 2); 4085 } 4086 } 4087 4088 /* 4089 * Get a tx for the mods to both packobj and bigobj. 4090 */ 4091 tx = dmu_tx_create(os); 4092 4093 dmu_tx_hold_write(tx, packobj, packoff, packsize); 4094 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 4095 4096 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4097 if (txg == 0) { 4098 umem_free(packbuf, packsize); 4099 umem_free(bigbuf, bigsize); 4100 for (j = 0; j < s; j++) { 4101 if (i != 5) { 4102 dmu_return_arcbuf(bigbuf_arcbufs[j]); 4103 } else { 4104 dmu_return_arcbuf( 4105 bigbuf_arcbufs[2 * j]); 4106 dmu_return_arcbuf( 4107 bigbuf_arcbufs[2 * j + 1]); 4108 } 4109 } 4110 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4111 dmu_buf_rele(bonus_db, FTAG); 4112 return; 4113 } 4114 4115 /* 4116 * 50% of the time don't read objects in the 1st iteration to 4117 * test dmu_assign_arcbuf() for the case when there're no 4118 * existing dbufs for the specified offsets. 4119 */ 4120 if (i != 0 || ztest_random(2) != 0) { 4121 error = dmu_read(os, packobj, packoff, 4122 packsize, packbuf, DMU_READ_PREFETCH); 4123 ASSERT0(error); 4124 error = dmu_read(os, bigobj, bigoff, bigsize, 4125 bigbuf, DMU_READ_PREFETCH); 4126 ASSERT0(error); 4127 } 4128 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 4129 n, chunksize, txg); 4130 4131 /* 4132 * We've verified all the old bufwads, and made new ones. 4133 * Now write them out. 4134 */ 4135 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 4136 if (ztest_opts.zo_verbose >= 7) { 4137 (void) printf("writing offset %llx size %llx" 4138 " txg %llx\n", 4139 (u_longlong_t)bigoff, 4140 (u_longlong_t)bigsize, 4141 (u_longlong_t)txg); 4142 } 4143 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 4144 dmu_buf_t *dbt; 4145 if (i != 5) { 4146 bcopy((caddr_t)bigbuf + (off - bigoff), 4147 bigbuf_arcbufs[j]->b_data, chunksize); 4148 } else { 4149 bcopy((caddr_t)bigbuf + (off - bigoff), 4150 bigbuf_arcbufs[2 * j]->b_data, 4151 chunksize / 2); 4152 bcopy((caddr_t)bigbuf + (off - bigoff) + 4153 chunksize / 2, 4154 bigbuf_arcbufs[2 * j + 1]->b_data, 4155 chunksize / 2); 4156 } 4157 4158 if (i == 1) { 4159 VERIFY(dmu_buf_hold(os, bigobj, off, 4160 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 4161 } 4162 if (i != 5) { 4163 dmu_assign_arcbuf(bonus_db, off, 4164 bigbuf_arcbufs[j], tx); 4165 } else { 4166 dmu_assign_arcbuf(bonus_db, off, 4167 bigbuf_arcbufs[2 * j], tx); 4168 dmu_assign_arcbuf(bonus_db, 4169 off + chunksize / 2, 4170 bigbuf_arcbufs[2 * j + 1], tx); 4171 } 4172 if (i == 1) { 4173 dmu_buf_rele(dbt, FTAG); 4174 } 4175 } 4176 dmu_tx_commit(tx); 4177 4178 /* 4179 * Sanity check the stuff we just wrote. 4180 */ 4181 { 4182 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 4183 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 4184 4185 VERIFY(0 == dmu_read(os, packobj, packoff, 4186 packsize, packcheck, DMU_READ_PREFETCH)); 4187 VERIFY(0 == dmu_read(os, bigobj, bigoff, 4188 bigsize, bigcheck, DMU_READ_PREFETCH)); 4189 4190 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 4191 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 4192 4193 umem_free(packcheck, packsize); 4194 umem_free(bigcheck, bigsize); 4195 } 4196 if (i == 2) { 4197 txg_wait_open(dmu_objset_pool(os), 0); 4198 } else if (i == 3) { 4199 txg_wait_synced(dmu_objset_pool(os), 0); 4200 } 4201 } 4202 4203 dmu_buf_rele(bonus_db, FTAG); 4204 umem_free(packbuf, packsize); 4205 umem_free(bigbuf, bigsize); 4206 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4207} 4208 4209/* ARGSUSED */ 4210void 4211ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 4212{ 4213 ztest_od_t od[1]; 4214 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 4215 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4216 4217 /* 4218 * Have multiple threads write to large offsets in an object 4219 * to verify that parallel writes to an object -- even to the 4220 * same blocks within the object -- doesn't cause any trouble. 4221 */ 4222 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4223 4224 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4225 return; 4226 4227 while (ztest_random(10) != 0) 4228 ztest_io(zd, od[0].od_object, offset); 4229} 4230 4231void 4232ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4233{ 4234 ztest_od_t od[1]; 4235 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4236 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4237 uint64_t count = ztest_random(20) + 1; 4238 uint64_t blocksize = ztest_random_blocksize(); 4239 void *data; 4240 4241 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4242 4243 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4244 return; 4245 4246 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4247 return; 4248 4249 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4250 4251 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4252 4253 while (ztest_random(count) != 0) { 4254 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4255 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4256 data) != 0) 4257 break; 4258 while (ztest_random(4) != 0) 4259 ztest_io(zd, od[0].od_object, randoff); 4260 } 4261 4262 umem_free(data, blocksize); 4263} 4264 4265/* 4266 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4267 */ 4268#define ZTEST_ZAP_MIN_INTS 1 4269#define ZTEST_ZAP_MAX_INTS 4 4270#define ZTEST_ZAP_MAX_PROPS 1000 4271 4272void 4273ztest_zap(ztest_ds_t *zd, uint64_t id) 4274{ 4275 objset_t *os = zd->zd_os; 4276 ztest_od_t od[1]; 4277 uint64_t object; 4278 uint64_t txg, last_txg; 4279 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4280 uint64_t zl_ints, zl_intsize, prop; 4281 int i, ints; 4282 dmu_tx_t *tx; 4283 char propname[100], txgname[100]; 4284 int error; 4285 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4286 4287 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4288 4289 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4290 return; 4291 4292 object = od[0].od_object; 4293 4294 /* 4295 * Generate a known hash collision, and verify that 4296 * we can lookup and remove both entries. 4297 */ 4298 tx = dmu_tx_create(os); 4299 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4300 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4301 if (txg == 0) 4302 return; 4303 for (i = 0; i < 2; i++) { 4304 value[i] = i; 4305 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4306 1, &value[i], tx)); 4307 } 4308 for (i = 0; i < 2; i++) { 4309 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4310 sizeof (uint64_t), 1, &value[i], tx)); 4311 VERIFY3U(0, ==, 4312 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4313 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4314 ASSERT3U(zl_ints, ==, 1); 4315 } 4316 for (i = 0; i < 2; i++) { 4317 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4318 } 4319 dmu_tx_commit(tx); 4320 4321 /* 4322 * Generate a buch of random entries. 4323 */ 4324 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4325 4326 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4327 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4328 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4329 bzero(value, sizeof (value)); 4330 last_txg = 0; 4331 4332 /* 4333 * If these zap entries already exist, validate their contents. 4334 */ 4335 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4336 if (error == 0) { 4337 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4338 ASSERT3U(zl_ints, ==, 1); 4339 4340 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4341 zl_ints, &last_txg) == 0); 4342 4343 VERIFY(zap_length(os, object, propname, &zl_intsize, 4344 &zl_ints) == 0); 4345 4346 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4347 ASSERT3U(zl_ints, ==, ints); 4348 4349 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4350 zl_ints, value) == 0); 4351 4352 for (i = 0; i < ints; i++) { 4353 ASSERT3U(value[i], ==, last_txg + object + i); 4354 } 4355 } else { 4356 ASSERT3U(error, ==, ENOENT); 4357 } 4358 4359 /* 4360 * Atomically update two entries in our zap object. 4361 * The first is named txg_%llu, and contains the txg 4362 * in which the property was last updated. The second 4363 * is named prop_%llu, and the nth element of its value 4364 * should be txg + object + n. 4365 */ 4366 tx = dmu_tx_create(os); 4367 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4368 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4369 if (txg == 0) 4370 return; 4371 4372 if (last_txg > txg) 4373 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4374 4375 for (i = 0; i < ints; i++) 4376 value[i] = txg + object + i; 4377 4378 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4379 1, &txg, tx)); 4380 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4381 ints, value, tx)); 4382 4383 dmu_tx_commit(tx); 4384 4385 /* 4386 * Remove a random pair of entries. 4387 */ 4388 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4389 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4390 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4391 4392 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4393 4394 if (error == ENOENT) 4395 return; 4396 4397 ASSERT0(error); 4398 4399 tx = dmu_tx_create(os); 4400 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4401 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4402 if (txg == 0) 4403 return; 4404 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4405 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4406 dmu_tx_commit(tx); 4407} 4408 4409/* 4410 * Testcase to test the upgrading of a microzap to fatzap. 4411 */ 4412void 4413ztest_fzap(ztest_ds_t *zd, uint64_t id) 4414{ 4415 objset_t *os = zd->zd_os; 4416 ztest_od_t od[1]; 4417 uint64_t object, txg; 4418 4419 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4420 4421 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4422 return; 4423 4424 object = od[0].od_object; 4425 4426 /* 4427 * Add entries to this ZAP and make sure it spills over 4428 * and gets upgraded to a fatzap. Also, since we are adding 4429 * 2050 entries we should see ptrtbl growth and leaf-block split. 4430 */ 4431 for (int i = 0; i < 2050; i++) { 4432 char name[ZFS_MAX_DATASET_NAME_LEN]; 4433 uint64_t value = i; 4434 dmu_tx_t *tx; 4435 int error; 4436 4437 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4438 id, value); 4439 4440 tx = dmu_tx_create(os); 4441 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4442 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4443 if (txg == 0) 4444 return; 4445 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4446 &value, tx); 4447 ASSERT(error == 0 || error == EEXIST); 4448 dmu_tx_commit(tx); 4449 } 4450} 4451 4452/* ARGSUSED */ 4453void 4454ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4455{ 4456 objset_t *os = zd->zd_os; 4457 ztest_od_t od[1]; 4458 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4459 dmu_tx_t *tx; 4460 int i, namelen, error; 4461 int micro = ztest_random(2); 4462 char name[20], string_value[20]; 4463 void *data; 4464 4465 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4466 4467 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4468 return; 4469 4470 object = od[0].od_object; 4471 4472 /* 4473 * Generate a random name of the form 'xxx.....' where each 4474 * x is a random printable character and the dots are dots. 4475 * There are 94 such characters, and the name length goes from 4476 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4477 */ 4478 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4479 4480 for (i = 0; i < 3; i++) 4481 name[i] = '!' + ztest_random('~' - '!' + 1); 4482 for (; i < namelen - 1; i++) 4483 name[i] = '.'; 4484 name[i] = '\0'; 4485 4486 if ((namelen & 1) || micro) { 4487 wsize = sizeof (txg); 4488 wc = 1; 4489 data = &txg; 4490 } else { 4491 wsize = 1; 4492 wc = namelen; 4493 data = string_value; 4494 } 4495 4496 count = -1ULL; 4497 VERIFY0(zap_count(os, object, &count)); 4498 ASSERT(count != -1ULL); 4499 4500 /* 4501 * Select an operation: length, lookup, add, update, remove. 4502 */ 4503 i = ztest_random(5); 4504 4505 if (i >= 2) { 4506 tx = dmu_tx_create(os); 4507 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4508 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4509 if (txg == 0) 4510 return; 4511 bcopy(name, string_value, namelen); 4512 } else { 4513 tx = NULL; 4514 txg = 0; 4515 bzero(string_value, namelen); 4516 } 4517 4518 switch (i) { 4519 4520 case 0: 4521 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4522 if (error == 0) { 4523 ASSERT3U(wsize, ==, zl_wsize); 4524 ASSERT3U(wc, ==, zl_wc); 4525 } else { 4526 ASSERT3U(error, ==, ENOENT); 4527 } 4528 break; 4529 4530 case 1: 4531 error = zap_lookup(os, object, name, wsize, wc, data); 4532 if (error == 0) { 4533 if (data == string_value && 4534 bcmp(name, data, namelen) != 0) 4535 fatal(0, "name '%s' != val '%s' len %d", 4536 name, data, namelen); 4537 } else { 4538 ASSERT3U(error, ==, ENOENT); 4539 } 4540 break; 4541 4542 case 2: 4543 error = zap_add(os, object, name, wsize, wc, data, tx); 4544 ASSERT(error == 0 || error == EEXIST); 4545 break; 4546 4547 case 3: 4548 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4549 break; 4550 4551 case 4: 4552 error = zap_remove(os, object, name, tx); 4553 ASSERT(error == 0 || error == ENOENT); 4554 break; 4555 } 4556 4557 if (tx != NULL) 4558 dmu_tx_commit(tx); 4559} 4560 4561/* 4562 * Commit callback data. 4563 */ 4564typedef struct ztest_cb_data { 4565 list_node_t zcd_node; 4566 uint64_t zcd_txg; 4567 int zcd_expected_err; 4568 boolean_t zcd_added; 4569 boolean_t zcd_called; 4570 spa_t *zcd_spa; 4571} ztest_cb_data_t; 4572 4573/* This is the actual commit callback function */ 4574static void 4575ztest_commit_callback(void *arg, int error) 4576{ 4577 ztest_cb_data_t *data = arg; 4578 uint64_t synced_txg; 4579 4580 VERIFY(data != NULL); 4581 VERIFY3S(data->zcd_expected_err, ==, error); 4582 VERIFY(!data->zcd_called); 4583 4584 synced_txg = spa_last_synced_txg(data->zcd_spa); 4585 if (data->zcd_txg > synced_txg) 4586 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4587 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4588 synced_txg); 4589 4590 data->zcd_called = B_TRUE; 4591 4592 if (error == ECANCELED) { 4593 ASSERT0(data->zcd_txg); 4594 ASSERT(!data->zcd_added); 4595 4596 /* 4597 * The private callback data should be destroyed here, but 4598 * since we are going to check the zcd_called field after 4599 * dmu_tx_abort(), we will destroy it there. 4600 */ 4601 return; 4602 } 4603 4604 /* Was this callback added to the global callback list? */ 4605 if (!data->zcd_added) 4606 goto out; 4607 4608 ASSERT3U(data->zcd_txg, !=, 0); 4609 4610 /* Remove our callback from the list */ 4611 mutex_enter(&zcl.zcl_callbacks_lock); 4612 list_remove(&zcl.zcl_callbacks, data); 4613 mutex_exit(&zcl.zcl_callbacks_lock); 4614 4615out: 4616 umem_free(data, sizeof (ztest_cb_data_t)); 4617} 4618 4619/* Allocate and initialize callback data structure */ 4620static ztest_cb_data_t * 4621ztest_create_cb_data(objset_t *os, uint64_t txg) 4622{ 4623 ztest_cb_data_t *cb_data; 4624 4625 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4626 4627 cb_data->zcd_txg = txg; 4628 cb_data->zcd_spa = dmu_objset_spa(os); 4629 4630 return (cb_data); 4631} 4632 4633/* 4634 * If a number of txgs equal to this threshold have been created after a commit 4635 * callback has been registered but not called, then we assume there is an 4636 * implementation bug. 4637 */ 4638#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4639 4640/* 4641 * Commit callback test. 4642 */ 4643void 4644ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4645{ 4646 objset_t *os = zd->zd_os; 4647 ztest_od_t od[1]; 4648 dmu_tx_t *tx; 4649 ztest_cb_data_t *cb_data[3], *tmp_cb; 4650 uint64_t old_txg, txg; 4651 int i, error; 4652 4653 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4654 4655 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4656 return; 4657 4658 tx = dmu_tx_create(os); 4659 4660 cb_data[0] = ztest_create_cb_data(os, 0); 4661 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4662 4663 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4664 4665 /* Every once in a while, abort the transaction on purpose */ 4666 if (ztest_random(100) == 0) 4667 error = -1; 4668 4669 if (!error) 4670 error = dmu_tx_assign(tx, TXG_NOWAIT); 4671 4672 txg = error ? 0 : dmu_tx_get_txg(tx); 4673 4674 cb_data[0]->zcd_txg = txg; 4675 cb_data[1] = ztest_create_cb_data(os, txg); 4676 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4677 4678 if (error) { 4679 /* 4680 * It's not a strict requirement to call the registered 4681 * callbacks from inside dmu_tx_abort(), but that's what 4682 * it's supposed to happen in the current implementation 4683 * so we will check for that. 4684 */ 4685 for (i = 0; i < 2; i++) { 4686 cb_data[i]->zcd_expected_err = ECANCELED; 4687 VERIFY(!cb_data[i]->zcd_called); 4688 } 4689 4690 dmu_tx_abort(tx); 4691 4692 for (i = 0; i < 2; i++) { 4693 VERIFY(cb_data[i]->zcd_called); 4694 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4695 } 4696 4697 return; 4698 } 4699 4700 cb_data[2] = ztest_create_cb_data(os, txg); 4701 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4702 4703 /* 4704 * Read existing data to make sure there isn't a future leak. 4705 */ 4706 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4707 &old_txg, DMU_READ_PREFETCH)); 4708 4709 if (old_txg > txg) 4710 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4711 old_txg, txg); 4712 4713 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4714 4715 mutex_enter(&zcl.zcl_callbacks_lock); 4716 4717 /* 4718 * Since commit callbacks don't have any ordering requirement and since 4719 * it is theoretically possible for a commit callback to be called 4720 * after an arbitrary amount of time has elapsed since its txg has been 4721 * synced, it is difficult to reliably determine whether a commit 4722 * callback hasn't been called due to high load or due to a flawed 4723 * implementation. 4724 * 4725 * In practice, we will assume that if after a certain number of txgs a 4726 * commit callback hasn't been called, then most likely there's an 4727 * implementation bug.. 4728 */ 4729 tmp_cb = list_head(&zcl.zcl_callbacks); 4730 if (tmp_cb != NULL && 4731 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4732 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4733 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4734 } 4735 4736 /* 4737 * Let's find the place to insert our callbacks. 4738 * 4739 * Even though the list is ordered by txg, it is possible for the 4740 * insertion point to not be the end because our txg may already be 4741 * quiescing at this point and other callbacks in the open txg 4742 * (from other objsets) may have sneaked in. 4743 */ 4744 tmp_cb = list_tail(&zcl.zcl_callbacks); 4745 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4746 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4747 4748 /* Add the 3 callbacks to the list */ 4749 for (i = 0; i < 3; i++) { 4750 if (tmp_cb == NULL) 4751 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4752 else 4753 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4754 cb_data[i]); 4755 4756 cb_data[i]->zcd_added = B_TRUE; 4757 VERIFY(!cb_data[i]->zcd_called); 4758 4759 tmp_cb = cb_data[i]; 4760 } 4761 4762 mutex_exit(&zcl.zcl_callbacks_lock); 4763 4764 dmu_tx_commit(tx); 4765} 4766 4767/* ARGSUSED */ 4768void 4769ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4770{ 4771 zfs_prop_t proplist[] = { 4772 ZFS_PROP_CHECKSUM, 4773 ZFS_PROP_COMPRESSION, 4774 ZFS_PROP_COPIES, 4775 ZFS_PROP_DEDUP 4776 }; 4777 4778 rw_enter(&ztest_name_lock, RW_READER); 4779 4780 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4781 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4782 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4783 4784 rw_exit(&ztest_name_lock); 4785} 4786 4787/* ARGSUSED */ 4788void 4789ztest_remap_blocks(ztest_ds_t *zd, uint64_t id) 4790{ 4791 rw_enter(&ztest_name_lock, RW_READER); 4792 4793 int error = dmu_objset_remap_indirects(zd->zd_name); 4794 if (error == ENOSPC) 4795 error = 0; 4796 ASSERT0(error); 4797 4798 rw_exit(&ztest_name_lock); 4799} 4800 4801/* ARGSUSED */ 4802void 4803ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4804{ 4805 nvlist_t *props = NULL; 4806 4807 rw_enter(&ztest_name_lock, RW_READER); 4808 4809 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4810 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4811 4812 VERIFY0(spa_prop_get(ztest_spa, &props)); 4813 4814 if (ztest_opts.zo_verbose >= 6) 4815 dump_nvlist(props, 4); 4816 4817 nvlist_free(props); 4818 4819 rw_exit(&ztest_name_lock); 4820} 4821 4822static int 4823user_release_one(const char *snapname, const char *holdname) 4824{ 4825 nvlist_t *snaps, *holds; 4826 int error; 4827 4828 snaps = fnvlist_alloc(); 4829 holds = fnvlist_alloc(); 4830 fnvlist_add_boolean(holds, holdname); 4831 fnvlist_add_nvlist(snaps, snapname, holds); 4832 fnvlist_free(holds); 4833 error = dsl_dataset_user_release(snaps, NULL); 4834 fnvlist_free(snaps); 4835 return (error); 4836} 4837 4838/* 4839 * Test snapshot hold/release and deferred destroy. 4840 */ 4841void 4842ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4843{ 4844 int error; 4845 objset_t *os = zd->zd_os; 4846 objset_t *origin; 4847 char snapname[100]; 4848 char fullname[100]; 4849 char clonename[100]; 4850 char tag[100]; 4851 char osname[ZFS_MAX_DATASET_NAME_LEN]; 4852 nvlist_t *holds; 4853 4854 rw_enter(&ztest_name_lock, RW_READER); 4855 4856 dmu_objset_name(os, osname); 4857 4858 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4859 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4860 (void) snprintf(clonename, sizeof (clonename), 4861 "%s/ch1_%llu", osname, id); 4862 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4863 4864 /* 4865 * Clean up from any previous run. 4866 */ 4867 error = dsl_destroy_head(clonename); 4868 if (error != ENOENT) 4869 ASSERT0(error); 4870 error = user_release_one(fullname, tag); 4871 if (error != ESRCH && error != ENOENT) 4872 ASSERT0(error); 4873 error = dsl_destroy_snapshot(fullname, B_FALSE); 4874 if (error != ENOENT) 4875 ASSERT0(error); 4876 4877 /* 4878 * Create snapshot, clone it, mark snap for deferred destroy, 4879 * destroy clone, verify snap was also destroyed. 4880 */ 4881 error = dmu_objset_snapshot_one(osname, snapname); 4882 if (error) { 4883 if (error == ENOSPC) { 4884 ztest_record_enospc("dmu_objset_snapshot"); 4885 goto out; 4886 } 4887 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4888 } 4889 4890 error = dmu_objset_clone(clonename, fullname); 4891 if (error) { 4892 if (error == ENOSPC) { 4893 ztest_record_enospc("dmu_objset_clone"); 4894 goto out; 4895 } 4896 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4897 } 4898 4899 error = dsl_destroy_snapshot(fullname, B_TRUE); 4900 if (error) { 4901 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4902 fullname, error); 4903 } 4904 4905 error = dsl_destroy_head(clonename); 4906 if (error) 4907 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4908 4909 error = dmu_objset_hold(fullname, FTAG, &origin); 4910 if (error != ENOENT) 4911 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4912 4913 /* 4914 * Create snapshot, add temporary hold, verify that we can't 4915 * destroy a held snapshot, mark for deferred destroy, 4916 * release hold, verify snapshot was destroyed. 4917 */ 4918 error = dmu_objset_snapshot_one(osname, snapname); 4919 if (error) { 4920 if (error == ENOSPC) { 4921 ztest_record_enospc("dmu_objset_snapshot"); 4922 goto out; 4923 } 4924 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4925 } 4926 4927 holds = fnvlist_alloc(); 4928 fnvlist_add_string(holds, fullname, tag); 4929 error = dsl_dataset_user_hold(holds, 0, NULL); 4930 fnvlist_free(holds); 4931 4932 if (error == ENOSPC) { 4933 ztest_record_enospc("dsl_dataset_user_hold"); 4934 goto out; 4935 } else if (error) { 4936 fatal(0, "dsl_dataset_user_hold(%s, %s) = %u", 4937 fullname, tag, error); 4938 } 4939 4940 error = dsl_destroy_snapshot(fullname, B_FALSE); 4941 if (error != EBUSY) { 4942 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4943 fullname, error); 4944 } 4945 4946 error = dsl_destroy_snapshot(fullname, B_TRUE); 4947 if (error) { 4948 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4949 fullname, error); 4950 } 4951 4952 error = user_release_one(fullname, tag); 4953 if (error) 4954 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error); 4955 4956 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4957 4958out: 4959 rw_exit(&ztest_name_lock); 4960} 4961 4962/* 4963 * Inject random faults into the on-disk data. 4964 */ 4965/* ARGSUSED */ 4966void 4967ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4968{ 4969 ztest_shared_t *zs = ztest_shared; 4970 spa_t *spa = ztest_spa; 4971 int fd; 4972 uint64_t offset; 4973 uint64_t leaves; 4974 uint64_t bad = 0x1990c0ffeedecadeULL; 4975 uint64_t top, leaf; 4976 char path0[MAXPATHLEN]; 4977 char pathrand[MAXPATHLEN]; 4978 size_t fsize; 4979 int bshift = SPA_MAXBLOCKSHIFT + 2; 4980 int iters = 1000; 4981 int maxfaults; 4982 int mirror_save; 4983 vdev_t *vd0 = NULL; 4984 uint64_t guid0 = 0; 4985 boolean_t islog = B_FALSE; 4986 4987 mutex_enter(&ztest_vdev_lock); 4988 maxfaults = MAXFAULTS(); 4989 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4990 mirror_save = zs->zs_mirrors; 4991 mutex_exit(&ztest_vdev_lock); 4992 4993 ASSERT(leaves >= 1); 4994 4995 /* 4996 * Grab the name lock as reader. There are some operations 4997 * which don't like to have their vdevs changed while 4998 * they are in progress (i.e. spa_change_guid). Those 4999 * operations will have grabbed the name lock as writer. 5000 */ 5001 rw_enter(&ztest_name_lock, RW_READER); 5002 5003 /* 5004 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 5005 */ 5006 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 5007 5008 if (ztest_random(2) == 0) { 5009 /* 5010 * Inject errors on a normal data device or slog device. 5011 */ 5012 top = ztest_random_vdev_top(spa, B_TRUE); 5013 leaf = ztest_random(leaves) + zs->zs_splits; 5014 5015 /* 5016 * Generate paths to the first leaf in this top-level vdev, 5017 * and to the random leaf we selected. We'll induce transient 5018 * write failures and random online/offline activity on leaf 0, 5019 * and we'll write random garbage to the randomly chosen leaf. 5020 */ 5021 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 5022 ztest_opts.zo_dir, ztest_opts.zo_pool, 5023 top * leaves + zs->zs_splits); 5024 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 5025 ztest_opts.zo_dir, ztest_opts.zo_pool, 5026 top * leaves + leaf); 5027 5028 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 5029 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 5030 islog = B_TRUE; 5031 5032 /* 5033 * If the top-level vdev needs to be resilvered 5034 * then we only allow faults on the device that is 5035 * resilvering. 5036 */ 5037 if (vd0 != NULL && maxfaults != 1 && 5038 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 5039 vd0->vdev_resilver_txg != 0)) { 5040 /* 5041 * Make vd0 explicitly claim to be unreadable, 5042 * or unwriteable, or reach behind its back 5043 * and close the underlying fd. We can do this if 5044 * maxfaults == 0 because we'll fail and reexecute, 5045 * and we can do it if maxfaults >= 2 because we'll 5046 * have enough redundancy. If maxfaults == 1, the 5047 * combination of this with injection of random data 5048 * corruption below exceeds the pool's fault tolerance. 5049 */ 5050 vdev_file_t *vf = vd0->vdev_tsd; 5051 5052 zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d", 5053 (long long)vd0->vdev_id, (int)maxfaults); 5054 5055 if (vf != NULL && ztest_random(3) == 0) { 5056 (void) close(vf->vf_vnode->v_fd); 5057 vf->vf_vnode->v_fd = -1; 5058 } else if (ztest_random(2) == 0) { 5059 vd0->vdev_cant_read = B_TRUE; 5060 } else { 5061 vd0->vdev_cant_write = B_TRUE; 5062 } 5063 guid0 = vd0->vdev_guid; 5064 } 5065 } else { 5066 /* 5067 * Inject errors on an l2cache device. 5068 */ 5069 spa_aux_vdev_t *sav = &spa->spa_l2cache; 5070 5071 if (sav->sav_count == 0) { 5072 spa_config_exit(spa, SCL_STATE, FTAG); 5073 rw_exit(&ztest_name_lock); 5074 return; 5075 } 5076 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 5077 guid0 = vd0->vdev_guid; 5078 (void) strcpy(path0, vd0->vdev_path); 5079 (void) strcpy(pathrand, vd0->vdev_path); 5080 5081 leaf = 0; 5082 leaves = 1; 5083 maxfaults = INT_MAX; /* no limit on cache devices */ 5084 } 5085 5086 spa_config_exit(spa, SCL_STATE, FTAG); 5087 rw_exit(&ztest_name_lock); 5088 5089 /* 5090 * If we can tolerate two or more faults, or we're dealing 5091 * with a slog, randomly online/offline vd0. 5092 */ 5093 if ((maxfaults >= 2 || islog) && guid0 != 0) { 5094 if (ztest_random(10) < 6) { 5095 int flags = (ztest_random(2) == 0 ? 5096 ZFS_OFFLINE_TEMPORARY : 0); 5097 5098 /* 5099 * We have to grab the zs_name_lock as writer to 5100 * prevent a race between offlining a slog and 5101 * destroying a dataset. Offlining the slog will 5102 * grab a reference on the dataset which may cause 5103 * dmu_objset_destroy() to fail with EBUSY thus 5104 * leaving the dataset in an inconsistent state. 5105 */ 5106 if (islog) 5107 rw_enter(&ztest_name_lock, RW_WRITER); 5108 5109 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 5110 5111 if (islog) 5112 rw_exit(&ztest_name_lock); 5113 } else { 5114 /* 5115 * Ideally we would like to be able to randomly 5116 * call vdev_[on|off]line without holding locks 5117 * to force unpredictable failures but the side 5118 * effects of vdev_[on|off]line prevent us from 5119 * doing so. We grab the ztest_vdev_lock here to 5120 * prevent a race between injection testing and 5121 * aux_vdev removal. 5122 */ 5123 mutex_enter(&ztest_vdev_lock); 5124 (void) vdev_online(spa, guid0, 0, NULL); 5125 mutex_exit(&ztest_vdev_lock); 5126 } 5127 } 5128 5129 if (maxfaults == 0) 5130 return; 5131 5132 /* 5133 * We have at least single-fault tolerance, so inject data corruption. 5134 */ 5135 fd = open(pathrand, O_RDWR); 5136 5137 if (fd == -1) /* we hit a gap in the device namespace */ 5138 return; 5139 5140 fsize = lseek(fd, 0, SEEK_END); 5141 5142 while (--iters != 0) { 5143 /* 5144 * The offset must be chosen carefully to ensure that 5145 * we do not inject a given logical block with errors 5146 * on two different leaf devices, because ZFS can not 5147 * tolerate that (if maxfaults==1). 5148 * 5149 * We divide each leaf into chunks of size 5150 * (# leaves * SPA_MAXBLOCKSIZE * 4). Within each chunk 5151 * there is a series of ranges to which we can inject errors. 5152 * Each range can accept errors on only a single leaf vdev. 5153 * The error injection ranges are separated by ranges 5154 * which we will not inject errors on any device (DMZs). 5155 * Each DMZ must be large enough such that a single block 5156 * can not straddle it, so that a single block can not be 5157 * a target in two different injection ranges (on different 5158 * leaf vdevs). 5159 * 5160 * For example, with 3 leaves, each chunk looks like: 5161 * 0 to 32M: injection range for leaf 0 5162 * 32M to 64M: DMZ - no injection allowed 5163 * 64M to 96M: injection range for leaf 1 5164 * 96M to 128M: DMZ - no injection allowed 5165 * 128M to 160M: injection range for leaf 2 5166 * 160M to 192M: DMZ - no injection allowed 5167 */ 5168 offset = ztest_random(fsize / (leaves << bshift)) * 5169 (leaves << bshift) + (leaf << bshift) + 5170 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 5171 5172 /* 5173 * Only allow damage to the labels at one end of the vdev. 5174 * 5175 * If all labels are damaged, the device will be totally 5176 * inaccessible, which will result in loss of data, 5177 * because we also damage (parts of) the other side of 5178 * the mirror/raidz. 5179 * 5180 * Additionally, we will always have both an even and an 5181 * odd label, so that we can handle crashes in the 5182 * middle of vdev_config_sync(). 5183 */ 5184 if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE) 5185 continue; 5186 5187 /* 5188 * The two end labels are stored at the "end" of the disk, but 5189 * the end of the disk (vdev_psize) is aligned to 5190 * sizeof (vdev_label_t). 5191 */ 5192 uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t)); 5193 if ((leaf & 1) == 1 && 5194 offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE) 5195 continue; 5196 5197 mutex_enter(&ztest_vdev_lock); 5198 if (mirror_save != zs->zs_mirrors) { 5199 mutex_exit(&ztest_vdev_lock); 5200 (void) close(fd); 5201 return; 5202 } 5203 5204 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 5205 fatal(1, "can't inject bad word at 0x%llx in %s", 5206 offset, pathrand); 5207 5208 mutex_exit(&ztest_vdev_lock); 5209 5210 if (ztest_opts.zo_verbose >= 7) 5211 (void) printf("injected bad word into %s," 5212 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 5213 } 5214 5215 (void) close(fd); 5216} 5217 5218/* 5219 * Verify that DDT repair works as expected. 5220 */ 5221void 5222ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 5223{ 5224 ztest_shared_t *zs = ztest_shared; 5225 spa_t *spa = ztest_spa; 5226 objset_t *os = zd->zd_os; 5227 ztest_od_t od[1]; 5228 uint64_t object, blocksize, txg, pattern, psize; 5229 enum zio_checksum checksum = spa_dedup_checksum(spa); 5230 dmu_buf_t *db; 5231 dmu_tx_t *tx; 5232 abd_t *abd; 5233 blkptr_t blk; 5234 int copies = 2 * ZIO_DEDUPDITTO_MIN; 5235 5236 blocksize = ztest_random_blocksize(); 5237 blocksize = MIN(blocksize, 2048); /* because we write so many */ 5238 5239 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 5240 5241 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 5242 return; 5243 5244 /* 5245 * Take the name lock as writer to prevent anyone else from changing 5246 * the pool and dataset properies we need to maintain during this test. 5247 */ 5248 rw_enter(&ztest_name_lock, RW_WRITER); 5249 5250 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 5251 B_FALSE) != 0 || 5252 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 5253 B_FALSE) != 0) { 5254 rw_exit(&ztest_name_lock); 5255 return; 5256 } 5257 5258 dmu_objset_stats_t dds; 5259 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 5260 dmu_objset_fast_stat(os, &dds); 5261 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 5262 5263 object = od[0].od_object; 5264 blocksize = od[0].od_blocksize; 5265 pattern = zs->zs_guid ^ dds.dds_guid; 5266 5267 ASSERT(object != 0); 5268 5269 tx = dmu_tx_create(os); 5270 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 5271 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 5272 if (txg == 0) { 5273 rw_exit(&ztest_name_lock); 5274 return; 5275 } 5276 5277 /* 5278 * Write all the copies of our block. 5279 */ 5280 for (int i = 0; i < copies; i++) { 5281 uint64_t offset = i * blocksize; 5282 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 5283 DMU_READ_NO_PREFETCH); 5284 if (error != 0) { 5285 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 5286 os, (long long)object, (long long) offset, error); 5287 } 5288 ASSERT(db->db_offset == offset); 5289 ASSERT(db->db_size == blocksize); 5290 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 5291 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 5292 dmu_buf_will_fill(db, tx); 5293 ztest_pattern_set(db->db_data, db->db_size, pattern); 5294 dmu_buf_rele(db, FTAG); 5295 } 5296 5297 dmu_tx_commit(tx); 5298 txg_wait_synced(spa_get_dsl(spa), txg); 5299 5300 /* 5301 * Find out what block we got. 5302 */ 5303 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 5304 DMU_READ_NO_PREFETCH)); 5305 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 5306 dmu_buf_rele(db, FTAG); 5307 5308 /* 5309 * Damage the block. Dedup-ditto will save us when we read it later. 5310 */ 5311 psize = BP_GET_PSIZE(&blk); 5312 abd = abd_alloc_linear(psize, B_TRUE); 5313 ztest_pattern_set(abd_to_buf(abd), psize, ~pattern); 5314 5315 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5316 abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5317 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5318 5319 abd_free(abd); 5320 5321 rw_exit(&ztest_name_lock); 5322} 5323 5324/* 5325 * Scrub the pool. 5326 */ 5327/* ARGSUSED */ 5328void 5329ztest_scrub(ztest_ds_t *zd, uint64_t id) 5330{ 5331 spa_t *spa = ztest_spa; 5332 5333 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5334 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5335 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5336} 5337 5338/* 5339 * Change the guid for the pool. 5340 */ 5341/* ARGSUSED */ 5342void 5343ztest_reguid(ztest_ds_t *zd, uint64_t id) 5344{ 5345 spa_t *spa = ztest_spa; 5346 uint64_t orig, load; 5347 int error; 5348 5349 orig = spa_guid(spa); 5350 load = spa_load_guid(spa); 5351 5352 rw_enter(&ztest_name_lock, RW_WRITER); 5353 error = spa_change_guid(spa); 5354 rw_exit(&ztest_name_lock); 5355 5356 if (error != 0) 5357 return; 5358 5359 if (ztest_opts.zo_verbose >= 4) { 5360 (void) printf("Changed guid old %llu -> %llu\n", 5361 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5362 } 5363 5364 VERIFY3U(orig, !=, spa_guid(spa)); 5365 VERIFY3U(load, ==, spa_load_guid(spa)); 5366} 5367 5368/* 5369 * Rename the pool to a different name and then rename it back. 5370 */ 5371/* ARGSUSED */ 5372void 5373ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5374{ 5375 char *oldname, *newname; 5376 spa_t *spa; 5377 5378 rw_enter(&ztest_name_lock, RW_WRITER); 5379 5380 oldname = ztest_opts.zo_pool; 5381 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5382 (void) strcpy(newname, oldname); 5383 (void) strcat(newname, "_tmp"); 5384 5385 /* 5386 * Do the rename 5387 */ 5388 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5389 5390 /* 5391 * Try to open it under the old name, which shouldn't exist 5392 */ 5393 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5394 5395 /* 5396 * Open it under the new name and make sure it's still the same spa_t. 5397 */ 5398 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5399 5400 ASSERT(spa == ztest_spa); 5401 spa_close(spa, FTAG); 5402 5403 /* 5404 * Rename it back to the original 5405 */ 5406 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5407 5408 /* 5409 * Make sure it can still be opened 5410 */ 5411 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5412 5413 ASSERT(spa == ztest_spa); 5414 spa_close(spa, FTAG); 5415 5416 umem_free(newname, strlen(newname) + 1); 5417 5418 rw_exit(&ztest_name_lock); 5419} 5420 5421/* 5422 * Verify pool integrity by running zdb. 5423 */ 5424static void 5425ztest_run_zdb(char *pool) 5426{ 5427 int status; 5428 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5429 char zbuf[1024]; 5430 char *bin; 5431 char *ztest; 5432 char *isa; 5433 int isalen; 5434 FILE *fp; 5435 5436 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5437 5438 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5439 bin = strstr(zdb, "/usr/bin/"); 5440 ztest = strstr(bin, "/ztest"); 5441 isa = bin + 8; 5442 isalen = ztest - isa; 5443 isa = strdup(isa); 5444 /* LINTED */ 5445 (void) sprintf(bin, 5446 "/usr/sbin%.*s/zdb -bcc%s%s -G -d -U %s %s", 5447 isalen, 5448 isa, 5449 ztest_opts.zo_verbose >= 3 ? "s" : "", 5450 ztest_opts.zo_verbose >= 4 ? "v" : "", 5451 spa_config_path, 5452 pool); 5453 free(isa); 5454 5455 if (ztest_opts.zo_verbose >= 5) 5456 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5457 5458 fp = popen(zdb, "r"); 5459 assert(fp != NULL); 5460 5461 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5462 if (ztest_opts.zo_verbose >= 3) 5463 (void) printf("%s", zbuf); 5464 5465 status = pclose(fp); 5466 5467 if (status == 0) 5468 return; 5469 5470 ztest_dump_core = 0; 5471 if (WIFEXITED(status)) 5472 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5473 else 5474 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5475} 5476 5477static void 5478ztest_walk_pool_directory(char *header) 5479{ 5480 spa_t *spa = NULL; 5481 5482 if (ztest_opts.zo_verbose >= 6) 5483 (void) printf("%s\n", header); 5484 5485 mutex_enter(&spa_namespace_lock); 5486 while ((spa = spa_next(spa)) != NULL) 5487 if (ztest_opts.zo_verbose >= 6) 5488 (void) printf("\t%s\n", spa_name(spa)); 5489 mutex_exit(&spa_namespace_lock); 5490} 5491 5492static void 5493ztest_spa_import_export(char *oldname, char *newname) 5494{ 5495 nvlist_t *config, *newconfig; 5496 uint64_t pool_guid; 5497 spa_t *spa; 5498 int error; 5499 5500 if (ztest_opts.zo_verbose >= 4) { 5501 (void) printf("import/export: old = %s, new = %s\n", 5502 oldname, newname); 5503 } 5504 5505 /* 5506 * Clean up from previous runs. 5507 */ 5508 (void) spa_destroy(newname); 5509 5510 /* 5511 * Get the pool's configuration and guid. 5512 */ 5513 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5514 5515 /* 5516 * Kick off a scrub to tickle scrub/export races. 5517 */ 5518 if (ztest_random(2) == 0) 5519 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5520 5521 pool_guid = spa_guid(spa); 5522 spa_close(spa, FTAG); 5523 5524 ztest_walk_pool_directory("pools before export"); 5525 5526 /* 5527 * Export it. 5528 */ 5529 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5530 5531 ztest_walk_pool_directory("pools after export"); 5532 5533 /* 5534 * Try to import it. 5535 */ 5536 newconfig = spa_tryimport(config); 5537 ASSERT(newconfig != NULL); 5538 nvlist_free(newconfig); 5539 5540 /* 5541 * Import it under the new name. 5542 */ 5543 error = spa_import(newname, config, NULL, 0); 5544 if (error != 0) { 5545 dump_nvlist(config, 0); 5546 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5547 oldname, newname, error); 5548 } 5549 5550 ztest_walk_pool_directory("pools after import"); 5551 5552 /* 5553 * Try to import it again -- should fail with EEXIST. 5554 */ 5555 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5556 5557 /* 5558 * Try to import it under a different name -- should fail with EEXIST. 5559 */ 5560 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5561 5562 /* 5563 * Verify that the pool is no longer visible under the old name. 5564 */ 5565 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5566 5567 /* 5568 * Verify that we can open and close the pool using the new name. 5569 */ 5570 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5571 ASSERT(pool_guid == spa_guid(spa)); 5572 spa_close(spa, FTAG); 5573 5574 nvlist_free(config); 5575} 5576 5577static void 5578ztest_resume(spa_t *spa) 5579{ 5580 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5581 (void) printf("resuming from suspended state\n"); 5582 spa_vdev_state_enter(spa, SCL_NONE); 5583 vdev_clear(spa, NULL); 5584 (void) spa_vdev_state_exit(spa, NULL, 0); 5585 (void) zio_resume(spa); 5586} 5587 5588static void * 5589ztest_resume_thread(void *arg) 5590{ 5591 spa_t *spa = arg; 5592 5593 while (!ztest_exiting) { 5594 if (spa_suspended(spa)) 5595 ztest_resume(spa); 5596 (void) poll(NULL, 0, 100); 5597 5598 /* 5599 * Periodically change the zfs_compressed_arc_enabled setting. 5600 */ 5601 if (ztest_random(10) == 0) 5602 zfs_compressed_arc_enabled = ztest_random(2); 5603 5604 /* 5605 * Periodically change the zfs_abd_scatter_enabled setting. 5606 */ 5607 if (ztest_random(10) == 0) 5608 zfs_abd_scatter_enabled = ztest_random(2); 5609 } 5610 return (NULL); 5611} 5612 5613static void * 5614ztest_deadman_thread(void *arg) 5615{ 5616 ztest_shared_t *zs = arg; 5617 spa_t *spa = ztest_spa; 5618 hrtime_t delta, total = 0; 5619 5620 for (;;) { 5621 delta = zs->zs_thread_stop - zs->zs_thread_start + 5622 MSEC2NSEC(zfs_deadman_synctime_ms); 5623 5624 (void) poll(NULL, 0, (int)NSEC2MSEC(delta)); 5625 5626 /* 5627 * If the pool is suspended then fail immediately. Otherwise, 5628 * check to see if the pool is making any progress. If 5629 * vdev_deadman() discovers that there hasn't been any recent 5630 * I/Os then it will end up aborting the tests. 5631 */ 5632 if (spa_suspended(spa) || spa->spa_root_vdev == NULL) { 5633 fatal(0, "aborting test after %llu seconds because " 5634 "pool has transitioned to a suspended state.", 5635 zfs_deadman_synctime_ms / 1000); 5636 return (NULL); 5637 } 5638 vdev_deadman(spa->spa_root_vdev); 5639 5640 total += zfs_deadman_synctime_ms/1000; 5641 (void) printf("ztest has been running for %lld seconds\n", 5642 total); 5643 } 5644} 5645 5646static void 5647ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5648{ 5649 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5650 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5651 hrtime_t functime = gethrtime(); 5652 5653 for (int i = 0; i < zi->zi_iters; i++) 5654 zi->zi_func(zd, id); 5655 5656 functime = gethrtime() - functime; 5657 5658 atomic_add_64(&zc->zc_count, 1); 5659 atomic_add_64(&zc->zc_time, functime); 5660 5661 if (ztest_opts.zo_verbose >= 4) { 5662 Dl_info dli; 5663 (void) dladdr((void *)zi->zi_func, &dli); 5664 (void) printf("%6.2f sec in %s\n", 5665 (double)functime / NANOSEC, dli.dli_sname); 5666 } 5667} 5668 5669static void * 5670ztest_thread(void *arg) 5671{ 5672 int rand; 5673 uint64_t id = (uintptr_t)arg; 5674 ztest_shared_t *zs = ztest_shared; 5675 uint64_t call_next; 5676 hrtime_t now; 5677 ztest_info_t *zi; 5678 ztest_shared_callstate_t *zc; 5679 5680 while ((now = gethrtime()) < zs->zs_thread_stop) { 5681 /* 5682 * See if it's time to force a crash. 5683 */ 5684 if (now > zs->zs_thread_kill) 5685 ztest_kill(zs); 5686 5687 /* 5688 * If we're getting ENOSPC with some regularity, stop. 5689 */ 5690 if (zs->zs_enospc_count > 10) 5691 break; 5692 5693 /* 5694 * Pick a random function to execute. 5695 */ 5696 rand = ztest_random(ZTEST_FUNCS); 5697 zi = &ztest_info[rand]; 5698 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5699 call_next = zc->zc_next; 5700 5701 if (now >= call_next && 5702 atomic_cas_64(&zc->zc_next, call_next, call_next + 5703 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5704 ztest_execute(rand, zi, id); 5705 } 5706 } 5707 5708 return (NULL); 5709} 5710 5711static void 5712ztest_dataset_name(char *dsname, char *pool, int d) 5713{ 5714 (void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d); 5715} 5716 5717static void 5718ztest_dataset_destroy(int d) 5719{ 5720 char name[ZFS_MAX_DATASET_NAME_LEN]; 5721 5722 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5723 5724 if (ztest_opts.zo_verbose >= 3) 5725 (void) printf("Destroying %s to free up space\n", name); 5726 5727 /* 5728 * Cleanup any non-standard clones and snapshots. In general, 5729 * ztest thread t operates on dataset (t % zopt_datasets), 5730 * so there may be more than one thing to clean up. 5731 */ 5732 for (int t = d; t < ztest_opts.zo_threads; 5733 t += ztest_opts.zo_datasets) { 5734 ztest_dsl_dataset_cleanup(name, t); 5735 } 5736 5737 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5738 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5739} 5740 5741static void 5742ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5743{ 5744 uint64_t usedobjs, dirobjs, scratch; 5745 5746 /* 5747 * ZTEST_DIROBJ is the object directory for the entire dataset. 5748 * Therefore, the number of objects in use should equal the 5749 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5750 * If not, we have an object leak. 5751 * 5752 * Note that we can only check this in ztest_dataset_open(), 5753 * when the open-context and syncing-context values agree. 5754 * That's because zap_count() returns the open-context value, 5755 * while dmu_objset_space() returns the rootbp fill count. 5756 */ 5757 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5758 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5759 ASSERT3U(dirobjs + 1, ==, usedobjs); 5760} 5761 5762static int 5763ztest_dataset_open(int d) 5764{ 5765 ztest_ds_t *zd = &ztest_ds[d]; 5766 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5767 objset_t *os; 5768 zilog_t *zilog; 5769 char name[ZFS_MAX_DATASET_NAME_LEN]; 5770 int error; 5771 5772 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5773 5774 rw_enter(&ztest_name_lock, RW_READER); 5775 5776 error = ztest_dataset_create(name); 5777 if (error == ENOSPC) { 5778 rw_exit(&ztest_name_lock); 5779 ztest_record_enospc(FTAG); 5780 return (error); 5781 } 5782 ASSERT(error == 0 || error == EEXIST); 5783 5784 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5785 rw_exit(&ztest_name_lock); 5786 5787 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5788 5789 zilog = zd->zd_zilog; 5790 5791 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5792 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5793 fatal(0, "missing log records: claimed %llu < committed %llu", 5794 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5795 5796 ztest_dataset_dirobj_verify(zd); 5797 5798 zil_replay(os, zd, ztest_replay_vector); 5799 5800 ztest_dataset_dirobj_verify(zd); 5801 5802 if (ztest_opts.zo_verbose >= 6) 5803 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5804 zd->zd_name, 5805 (u_longlong_t)zilog->zl_parse_blk_count, 5806 (u_longlong_t)zilog->zl_parse_lr_count, 5807 (u_longlong_t)zilog->zl_replaying_seq); 5808 5809 zilog = zil_open(os, ztest_get_data); 5810 5811 if (zilog->zl_replaying_seq != 0 && 5812 zilog->zl_replaying_seq < committed_seq) 5813 fatal(0, "missing log records: replayed %llu < committed %llu", 5814 zilog->zl_replaying_seq, committed_seq); 5815 5816 return (0); 5817} 5818 5819static void 5820ztest_dataset_close(int d) 5821{ 5822 ztest_ds_t *zd = &ztest_ds[d]; 5823 5824 zil_close(zd->zd_zilog); 5825 dmu_objset_disown(zd->zd_os, zd); 5826 5827 ztest_zd_fini(zd); 5828} 5829 5830/* 5831 * Kick off threads to run tests on all datasets in parallel. 5832 */ 5833static void 5834ztest_run(ztest_shared_t *zs) 5835{ 5836 thread_t *tid; 5837 spa_t *spa; 5838 objset_t *os; 5839 thread_t resume_tid; 5840 int error; 5841 5842 ztest_exiting = B_FALSE; 5843 5844 /* 5845 * Initialize parent/child shared state. 5846 */ 5847 mutex_init(&ztest_checkpoint_lock, NULL, USYNC_THREAD, NULL); 5848 mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL); 5849 rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL); 5850 5851 zs->zs_thread_start = gethrtime(); 5852 zs->zs_thread_stop = 5853 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5854 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5855 zs->zs_thread_kill = zs->zs_thread_stop; 5856 if (ztest_random(100) < ztest_opts.zo_killrate) { 5857 zs->zs_thread_kill -= 5858 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5859 } 5860 5861 mutex_init(&zcl.zcl_callbacks_lock, NULL, USYNC_THREAD, NULL); 5862 5863 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5864 offsetof(ztest_cb_data_t, zcd_node)); 5865 5866 /* 5867 * Open our pool. 5868 */ 5869 kernel_init(FREAD | FWRITE); 5870 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5871 spa->spa_debug = B_TRUE; 5872 metaslab_preload_limit = ztest_random(20) + 1; 5873 ztest_spa = spa; 5874 5875 dmu_objset_stats_t dds; 5876 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5877 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5878 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 5879 dmu_objset_fast_stat(os, &dds); 5880 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 5881 zs->zs_guid = dds.dds_guid; 5882 dmu_objset_disown(os, FTAG); 5883 5884 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5885 5886 /* 5887 * We don't expect the pool to suspend unless maxfaults == 0, 5888 * in which case ztest_fault_inject() temporarily takes away 5889 * the only valid replica. 5890 */ 5891 if (MAXFAULTS() == 0) 5892 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5893 else 5894 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5895 5896 /* 5897 * Create a thread to periodically resume suspended I/O. 5898 */ 5899 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5900 &resume_tid) == 0); 5901 5902 /* 5903 * Create a deadman thread to abort() if we hang. 5904 */ 5905 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5906 NULL) == 0); 5907 5908 /* 5909 * Verify that we can safely inquire about any object, 5910 * whether it's allocated or not. To make it interesting, 5911 * we probe a 5-wide window around each power of two. 5912 * This hits all edge cases, including zero and the max. 5913 */ 5914 for (int t = 0; t < 64; t++) { 5915 for (int d = -5; d <= 5; d++) { 5916 error = dmu_object_info(spa->spa_meta_objset, 5917 (1ULL << t) + d, NULL); 5918 ASSERT(error == 0 || error == ENOENT || 5919 error == EINVAL); 5920 } 5921 } 5922 5923 /* 5924 * If we got any ENOSPC errors on the previous run, destroy something. 5925 */ 5926 if (zs->zs_enospc_count != 0) { 5927 int d = ztest_random(ztest_opts.zo_datasets); 5928 ztest_dataset_destroy(d); 5929 } 5930 zs->zs_enospc_count = 0; 5931 5932 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5933 UMEM_NOFAIL); 5934 5935 if (ztest_opts.zo_verbose >= 4) 5936 (void) printf("starting main threads...\n"); 5937 5938 /* 5939 * Kick off all the tests that run in parallel. 5940 */ 5941 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5942 if (t < ztest_opts.zo_datasets && 5943 ztest_dataset_open(t) != 0) 5944 return; 5945 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5946 THR_BOUND, &tid[t]) == 0); 5947 } 5948 5949 /* 5950 * Wait for all of the tests to complete. We go in reverse order 5951 * so we don't close datasets while threads are still using them. 5952 */ 5953 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5954 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5955 if (t < ztest_opts.zo_datasets) 5956 ztest_dataset_close(t); 5957 } 5958 5959 txg_wait_synced(spa_get_dsl(spa), 0); 5960 5961 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5962 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5963 zfs_dbgmsg_print(FTAG); 5964 5965 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5966 5967 /* Kill the resume thread */ 5968 ztest_exiting = B_TRUE; 5969 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5970 ztest_resume(spa); 5971 5972 /* 5973 * Right before closing the pool, kick off a bunch of async I/O; 5974 * spa_close() should wait for it to complete. 5975 */ 5976 for (uint64_t object = 1; object < 50; object++) { 5977 dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20, 5978 ZIO_PRIORITY_SYNC_READ); 5979 } 5980 5981 spa_close(spa, FTAG); 5982 5983 /* 5984 * Verify that we can loop over all pools. 5985 */ 5986 mutex_enter(&spa_namespace_lock); 5987 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5988 if (ztest_opts.zo_verbose > 3) 5989 (void) printf("spa_next: found %s\n", spa_name(spa)); 5990 mutex_exit(&spa_namespace_lock); 5991 5992 /* 5993 * Verify that we can export the pool and reimport it under a 5994 * different name. 5995 */ 5996 if (ztest_random(2) == 0) { 5997 char name[ZFS_MAX_DATASET_NAME_LEN]; 5998 (void) snprintf(name, sizeof (name), "%s_import", 5999 ztest_opts.zo_pool); 6000 ztest_spa_import_export(ztest_opts.zo_pool, name); 6001 ztest_spa_import_export(name, ztest_opts.zo_pool); 6002 } 6003 6004 kernel_fini(); 6005 6006 list_destroy(&zcl.zcl_callbacks); 6007 6008 mutex_destroy(&zcl.zcl_callbacks_lock); 6009 6010 rw_destroy(&ztest_name_lock); 6011 mutex_destroy(&ztest_vdev_lock); 6012 mutex_destroy(&ztest_checkpoint_lock); 6013} 6014 6015static void 6016ztest_freeze(void) 6017{ 6018 ztest_ds_t *zd = &ztest_ds[0]; 6019 spa_t *spa; 6020 int numloops = 0; 6021 6022 if (ztest_opts.zo_verbose >= 3) 6023 (void) printf("testing spa_freeze()...\n"); 6024 6025 kernel_init(FREAD | FWRITE); 6026 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 6027 VERIFY3U(0, ==, ztest_dataset_open(0)); 6028 spa->spa_debug = B_TRUE; 6029 ztest_spa = spa; 6030 6031 /* 6032 * Force the first log block to be transactionally allocated. 6033 * We have to do this before we freeze the pool -- otherwise 6034 * the log chain won't be anchored. 6035 */ 6036 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 6037 ztest_dmu_object_alloc_free(zd, 0); 6038 zil_commit(zd->zd_zilog, 0); 6039 } 6040 6041 txg_wait_synced(spa_get_dsl(spa), 0); 6042 6043 /* 6044 * Freeze the pool. This stops spa_sync() from doing anything, 6045 * so that the only way to record changes from now on is the ZIL. 6046 */ 6047 spa_freeze(spa); 6048 6049 /* 6050 * Because it is hard to predict how much space a write will actually 6051 * require beforehand, we leave ourselves some fudge space to write over 6052 * capacity. 6053 */ 6054 uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2; 6055 6056 /* 6057 * Run tests that generate log records but don't alter the pool config 6058 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 6059 * We do a txg_wait_synced() after each iteration to force the txg 6060 * to increase well beyond the last synced value in the uberblock. 6061 * The ZIL should be OK with that. 6062 * 6063 * Run a random number of times less than zo_maxloops and ensure we do 6064 * not run out of space on the pool. 6065 */ 6066 while (ztest_random(10) != 0 && 6067 numloops++ < ztest_opts.zo_maxloops && 6068 metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) { 6069 ztest_od_t od; 6070 ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 6071 VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE)); 6072 ztest_io(zd, od.od_object, 6073 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 6074 txg_wait_synced(spa_get_dsl(spa), 0); 6075 } 6076 6077 /* 6078 * Commit all of the changes we just generated. 6079 */ 6080 zil_commit(zd->zd_zilog, 0); 6081 txg_wait_synced(spa_get_dsl(spa), 0); 6082 6083 /* 6084 * Close our dataset and close the pool. 6085 */ 6086 ztest_dataset_close(0); 6087 spa_close(spa, FTAG); 6088 kernel_fini(); 6089 6090 /* 6091 * Open and close the pool and dataset to induce log replay. 6092 */ 6093 kernel_init(FREAD | FWRITE); 6094 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 6095 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 6096 VERIFY3U(0, ==, ztest_dataset_open(0)); 6097 ztest_dataset_close(0); 6098 6099 spa->spa_debug = B_TRUE; 6100 ztest_spa = spa; 6101 txg_wait_synced(spa_get_dsl(spa), 0); 6102 ztest_reguid(NULL, 0); 6103 6104 spa_close(spa, FTAG); 6105 kernel_fini(); 6106} 6107 6108void 6109print_time(hrtime_t t, char *timebuf) 6110{ 6111 hrtime_t s = t / NANOSEC; 6112 hrtime_t m = s / 60; 6113 hrtime_t h = m / 60; 6114 hrtime_t d = h / 24; 6115 6116 s -= m * 60; 6117 m -= h * 60; 6118 h -= d * 24; 6119 6120 timebuf[0] = '\0'; 6121 6122 if (d) 6123 (void) sprintf(timebuf, 6124 "%llud%02lluh%02llum%02llus", d, h, m, s); 6125 else if (h) 6126 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 6127 else if (m) 6128 (void) sprintf(timebuf, "%llum%02llus", m, s); 6129 else 6130 (void) sprintf(timebuf, "%llus", s); 6131} 6132 6133static nvlist_t * 6134make_random_props() 6135{ 6136 nvlist_t *props; 6137 6138 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 6139 if (ztest_random(2) == 0) 6140 return (props); 6141 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 6142 6143 return (props); 6144} 6145 6146/* 6147 * Create a storage pool with the given name and initial vdev size. 6148 * Then test spa_freeze() functionality. 6149 */ 6150static void 6151ztest_init(ztest_shared_t *zs) 6152{ 6153 spa_t *spa; 6154 nvlist_t *nvroot, *props; 6155 6156 mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL); 6157 mutex_init(&ztest_checkpoint_lock, NULL, USYNC_THREAD, NULL); 6158 rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL); 6159 6160 kernel_init(FREAD | FWRITE); 6161 6162 /* 6163 * Create the storage pool. 6164 */ 6165 (void) spa_destroy(ztest_opts.zo_pool); 6166 ztest_shared->zs_vdev_next_leaf = 0; 6167 zs->zs_splits = 0; 6168 zs->zs_mirrors = ztest_opts.zo_mirrors; 6169 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 6170 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 6171 props = make_random_props(); 6172 for (int i = 0; i < SPA_FEATURES; i++) { 6173 char buf[1024]; 6174 (void) snprintf(buf, sizeof (buf), "feature@%s", 6175 spa_feature_table[i].fi_uname); 6176 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 6177 } 6178 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 6179 nvlist_free(nvroot); 6180 nvlist_free(props); 6181 6182 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 6183 zs->zs_metaslab_sz = 6184 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 6185 6186 spa_close(spa, FTAG); 6187 6188 kernel_fini(); 6189 6190 ztest_run_zdb(ztest_opts.zo_pool); 6191 6192 ztest_freeze(); 6193 6194 ztest_run_zdb(ztest_opts.zo_pool); 6195 6196 rw_destroy(&ztest_name_lock); 6197 mutex_destroy(&ztest_vdev_lock); 6198 mutex_destroy(&ztest_checkpoint_lock); 6199} 6200 6201static void 6202setup_data_fd(void) 6203{ 6204 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 6205 6206 ztest_fd_data = mkstemp(ztest_name_data); 6207 ASSERT3S(ztest_fd_data, >=, 0); 6208 (void) unlink(ztest_name_data); 6209} 6210 6211 6212static int 6213shared_data_size(ztest_shared_hdr_t *hdr) 6214{ 6215 int size; 6216 6217 size = hdr->zh_hdr_size; 6218 size += hdr->zh_opts_size; 6219 size += hdr->zh_size; 6220 size += hdr->zh_stats_size * hdr->zh_stats_count; 6221 size += hdr->zh_ds_size * hdr->zh_ds_count; 6222 6223 return (size); 6224} 6225 6226static void 6227setup_hdr(void) 6228{ 6229 int size; 6230 ztest_shared_hdr_t *hdr; 6231 6232 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 6233 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 6234 ASSERT(hdr != MAP_FAILED); 6235 6236 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 6237 6238 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 6239 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 6240 hdr->zh_size = sizeof (ztest_shared_t); 6241 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 6242 hdr->zh_stats_count = ZTEST_FUNCS; 6243 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 6244 hdr->zh_ds_count = ztest_opts.zo_datasets; 6245 6246 size = shared_data_size(hdr); 6247 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 6248 6249 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 6250} 6251 6252static void 6253setup_data(void) 6254{ 6255 int size, offset; 6256 ztest_shared_hdr_t *hdr; 6257 uint8_t *buf; 6258 6259 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 6260 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 6261 ASSERT(hdr != MAP_FAILED); 6262 6263 size = shared_data_size(hdr); 6264 6265 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 6266 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 6267 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 6268 ASSERT(hdr != MAP_FAILED); 6269 buf = (uint8_t *)hdr; 6270 6271 offset = hdr->zh_hdr_size; 6272 ztest_shared_opts = (void *)&buf[offset]; 6273 offset += hdr->zh_opts_size; 6274 ztest_shared = (void *)&buf[offset]; 6275 offset += hdr->zh_size; 6276 ztest_shared_callstate = (void *)&buf[offset]; 6277 offset += hdr->zh_stats_size * hdr->zh_stats_count; 6278 ztest_shared_ds = (void *)&buf[offset]; 6279} 6280 6281static boolean_t 6282exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 6283{ 6284 pid_t pid; 6285 int status; 6286 char *cmdbuf = NULL; 6287 6288 pid = fork(); 6289 6290 if (cmd == NULL) { 6291 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 6292 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 6293 cmd = cmdbuf; 6294 } 6295 6296 if (pid == -1) 6297 fatal(1, "fork failed"); 6298 6299 if (pid == 0) { /* child */ 6300 char *emptyargv[2] = { cmd, NULL }; 6301 char fd_data_str[12]; 6302 6303 struct rlimit rl = { 1024, 1024 }; 6304 (void) setrlimit(RLIMIT_NOFILE, &rl); 6305 6306 (void) close(ztest_fd_rand); 6307 VERIFY3U(11, >=, 6308 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 6309 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 6310 6311 (void) enable_extended_FILE_stdio(-1, -1); 6312 if (libpath != NULL) 6313 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 6314#ifdef illumos 6315 (void) execv(cmd, emptyargv); 6316#else 6317 (void) execvp(cmd, emptyargv); 6318#endif 6319 ztest_dump_core = B_FALSE; 6320 fatal(B_TRUE, "exec failed: %s", cmd); 6321 } 6322 6323 if (cmdbuf != NULL) { 6324 umem_free(cmdbuf, MAXPATHLEN); 6325 cmd = NULL; 6326 } 6327 6328 while (waitpid(pid, &status, 0) != pid) 6329 continue; 6330 if (statusp != NULL) 6331 *statusp = status; 6332 6333 if (WIFEXITED(status)) { 6334 if (WEXITSTATUS(status) != 0) { 6335 (void) fprintf(stderr, "child exited with code %d\n", 6336 WEXITSTATUS(status)); 6337 exit(2); 6338 } 6339 return (B_FALSE); 6340 } else if (WIFSIGNALED(status)) { 6341 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 6342 (void) fprintf(stderr, "child died with signal %d\n", 6343 WTERMSIG(status)); 6344 exit(3); 6345 } 6346 return (B_TRUE); 6347 } else { 6348 (void) fprintf(stderr, "something strange happened to child\n"); 6349 exit(4); 6350 /* NOTREACHED */ 6351 } 6352} 6353 6354static void 6355ztest_run_init(void) 6356{ 6357 ztest_shared_t *zs = ztest_shared; 6358 6359 ASSERT(ztest_opts.zo_init != 0); 6360 6361 /* 6362 * Blow away any existing copy of zpool.cache 6363 */ 6364 (void) remove(spa_config_path); 6365 6366 /* 6367 * Create and initialize our storage pool. 6368 */ 6369 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6370 bzero(zs, sizeof (ztest_shared_t)); 6371 if (ztest_opts.zo_verbose >= 3 && 6372 ztest_opts.zo_init != 1) { 6373 (void) printf("ztest_init(), pass %d\n", i); 6374 } 6375 ztest_init(zs); 6376 } 6377} 6378 6379int 6380main(int argc, char **argv) 6381{ 6382 int kills = 0; 6383 int iters = 0; 6384 int older = 0; 6385 int newer = 0; 6386 ztest_shared_t *zs; 6387 ztest_info_t *zi; 6388 ztest_shared_callstate_t *zc; 6389 char timebuf[100]; 6390 char numbuf[NN_NUMBUF_SZ]; 6391 spa_t *spa; 6392 char *cmd; 6393 boolean_t hasalt; 6394 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6395 6396 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6397 6398 dprintf_setup(&argc, argv); 6399 zfs_deadman_synctime_ms = 300000; 6400 6401 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6402 ASSERT3S(ztest_fd_rand, >=, 0); 6403 6404 if (!fd_data_str) { 6405 process_options(argc, argv); 6406 6407 setup_data_fd(); 6408 setup_hdr(); 6409 setup_data(); 6410 bcopy(&ztest_opts, ztest_shared_opts, 6411 sizeof (*ztest_shared_opts)); 6412 } else { 6413 ztest_fd_data = atoi(fd_data_str); 6414 setup_data(); 6415 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6416 } 6417 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6418 6419 /* Override location of zpool.cache */ 6420 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6421 ztest_opts.zo_dir), !=, -1); 6422 6423 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6424 UMEM_NOFAIL); 6425 zs = ztest_shared; 6426 6427 if (fd_data_str) { 6428 metaslab_force_ganging = ztest_opts.zo_metaslab_force_ganging; 6429 metaslab_df_alloc_threshold = 6430 zs->zs_metaslab_df_alloc_threshold; 6431 6432 if (zs->zs_do_init) 6433 ztest_run_init(); 6434 else 6435 ztest_run(zs); 6436 exit(0); 6437 } 6438 6439 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6440 6441 if (ztest_opts.zo_verbose >= 1) { 6442 (void) printf("%llu vdevs, %d datasets, %d threads," 6443 " %llu seconds...\n", 6444 (u_longlong_t)ztest_opts.zo_vdevs, 6445 ztest_opts.zo_datasets, 6446 ztest_opts.zo_threads, 6447 (u_longlong_t)ztest_opts.zo_time); 6448 } 6449 6450 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6451 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6452 6453 zs->zs_do_init = B_TRUE; 6454 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6455 if (ztest_opts.zo_verbose >= 1) { 6456 (void) printf("Executing older ztest for " 6457 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6458 } 6459 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6460 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6461 } else { 6462 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6463 } 6464 zs->zs_do_init = B_FALSE; 6465 6466 zs->zs_proc_start = gethrtime(); 6467 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6468 6469 for (int f = 0; f < ZTEST_FUNCS; f++) { 6470 zi = &ztest_info[f]; 6471 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6472 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6473 zc->zc_next = UINT64_MAX; 6474 else 6475 zc->zc_next = zs->zs_proc_start + 6476 ztest_random(2 * zi->zi_interval[0] + 1); 6477 } 6478 6479 /* 6480 * Run the tests in a loop. These tests include fault injection 6481 * to verify that self-healing data works, and forced crashes 6482 * to verify that we never lose on-disk consistency. 6483 */ 6484 while (gethrtime() < zs->zs_proc_stop) { 6485 int status; 6486 boolean_t killed; 6487 6488 /* 6489 * Initialize the workload counters for each function. 6490 */ 6491 for (int f = 0; f < ZTEST_FUNCS; f++) { 6492 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6493 zc->zc_count = 0; 6494 zc->zc_time = 0; 6495 } 6496 6497 /* Set the allocation switch size */ 6498 zs->zs_metaslab_df_alloc_threshold = 6499 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6500 6501 if (!hasalt || ztest_random(2) == 0) { 6502 if (hasalt && ztest_opts.zo_verbose >= 1) { 6503 (void) printf("Executing newer ztest: %s\n", 6504 cmd); 6505 } 6506 newer++; 6507 killed = exec_child(cmd, NULL, B_TRUE, &status); 6508 } else { 6509 if (hasalt && ztest_opts.zo_verbose >= 1) { 6510 (void) printf("Executing older ztest: %s\n", 6511 ztest_opts.zo_alt_ztest); 6512 } 6513 older++; 6514 killed = exec_child(ztest_opts.zo_alt_ztest, 6515 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6516 } 6517 6518 if (killed) 6519 kills++; 6520 iters++; 6521 6522 if (ztest_opts.zo_verbose >= 1) { 6523 hrtime_t now = gethrtime(); 6524 6525 now = MIN(now, zs->zs_proc_stop); 6526 print_time(zs->zs_proc_stop - now, timebuf); 6527 nicenum(zs->zs_space, numbuf, sizeof (numbuf)); 6528 6529 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6530 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6531 iters, 6532 WIFEXITED(status) ? "Complete" : "SIGKILL", 6533 (u_longlong_t)zs->zs_enospc_count, 6534 100.0 * zs->zs_alloc / zs->zs_space, 6535 numbuf, 6536 100.0 * (now - zs->zs_proc_start) / 6537 (ztest_opts.zo_time * NANOSEC), timebuf); 6538 } 6539 6540 if (ztest_opts.zo_verbose >= 2) { 6541 (void) printf("\nWorkload summary:\n\n"); 6542 (void) printf("%7s %9s %s\n", 6543 "Calls", "Time", "Function"); 6544 (void) printf("%7s %9s %s\n", 6545 "-----", "----", "--------"); 6546 for (int f = 0; f < ZTEST_FUNCS; f++) { 6547 Dl_info dli; 6548 6549 zi = &ztest_info[f]; 6550 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6551 print_time(zc->zc_time, timebuf); 6552 (void) dladdr((void *)zi->zi_func, &dli); 6553 (void) printf("%7llu %9s %s\n", 6554 (u_longlong_t)zc->zc_count, timebuf, 6555 dli.dli_sname); 6556 } 6557 (void) printf("\n"); 6558 } 6559 6560 /* 6561 * It's possible that we killed a child during a rename test, 6562 * in which case we'll have a 'ztest_tmp' pool lying around 6563 * instead of 'ztest'. Do a blind rename in case this happened. 6564 */ 6565 kernel_init(FREAD); 6566 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6567 spa_close(spa, FTAG); 6568 } else { 6569 char tmpname[ZFS_MAX_DATASET_NAME_LEN]; 6570 kernel_fini(); 6571 kernel_init(FREAD | FWRITE); 6572 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6573 ztest_opts.zo_pool); 6574 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6575 } 6576 kernel_fini(); 6577 6578 ztest_run_zdb(ztest_opts.zo_pool); 6579 } 6580 6581 if (ztest_opts.zo_verbose >= 1) { 6582 if (hasalt) { 6583 (void) printf("%d runs of older ztest: %s\n", older, 6584 ztest_opts.zo_alt_ztest); 6585 (void) printf("%d runs of newer ztest: %s\n", newer, 6586 cmd); 6587 } 6588 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6589 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6590 } 6591 6592 umem_free(cmd, MAXNAMELEN); 6593 6594 return (0); 6595} 6596