ztest.c revision 307265
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2013 Steven Hartland. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 */ 29 30/* 31 * The objective of this program is to provide a DMU/ZAP/SPA stress test 32 * that runs entirely in userland, is easy to use, and easy to extend. 33 * 34 * The overall design of the ztest program is as follows: 35 * 36 * (1) For each major functional area (e.g. adding vdevs to a pool, 37 * creating and destroying datasets, reading and writing objects, etc) 38 * we have a simple routine to test that functionality. These 39 * individual routines do not have to do anything "stressful". 40 * 41 * (2) We turn these simple functionality tests into a stress test by 42 * running them all in parallel, with as many threads as desired, 43 * and spread across as many datasets, objects, and vdevs as desired. 44 * 45 * (3) While all this is happening, we inject faults into the pool to 46 * verify that self-healing data really works. 47 * 48 * (4) Every time we open a dataset, we change its checksum and compression 49 * functions. Thus even individual objects vary from block to block 50 * in which checksum they use and whether they're compressed. 51 * 52 * (5) To verify that we never lose on-disk consistency after a crash, 53 * we run the entire test in a child of the main process. 54 * At random times, the child self-immolates with a SIGKILL. 55 * This is the software equivalent of pulling the power cord. 56 * The parent then runs the test again, using the existing 57 * storage pool, as many times as desired. If backwards compatibility 58 * testing is enabled ztest will sometimes run the "older" version 59 * of ztest after a SIGKILL. 60 * 61 * (6) To verify that we don't have future leaks or temporal incursions, 62 * many of the functional tests record the transaction group number 63 * as part of their data. When reading old data, they verify that 64 * the transaction group number is less than the current, open txg. 65 * If you add a new test, please do this if applicable. 66 * 67 * When run with no arguments, ztest runs for about five minutes and 68 * produces no output if successful. To get a little bit of information, 69 * specify -V. To get more information, specify -VV, and so on. 70 * 71 * To turn this into an overnight stress test, use -T to specify run time. 72 * 73 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 74 * to increase the pool capacity, fanout, and overall stress level. 75 * 76 * Use the -k option to set the desired frequency of kills. 77 * 78 * When ztest invokes itself it passes all relevant information through a 79 * temporary file which is mmap-ed in the child process. This allows shared 80 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 81 * stored at offset 0 of this file and contains information on the size and 82 * number of shared structures in the file. The information stored in this file 83 * must remain backwards compatible with older versions of ztest so that 84 * ztest can invoke them during backwards compatibility testing (-B). 85 */ 86 87#include <sys/zfs_context.h> 88#include <sys/spa.h> 89#include <sys/dmu.h> 90#include <sys/txg.h> 91#include <sys/dbuf.h> 92#include <sys/zap.h> 93#include <sys/dmu_objset.h> 94#include <sys/poll.h> 95#include <sys/stat.h> 96#include <sys/time.h> 97#include <sys/wait.h> 98#include <sys/mman.h> 99#include <sys/resource.h> 100#include <sys/zio.h> 101#include <sys/zil.h> 102#include <sys/zil_impl.h> 103#include <sys/vdev_impl.h> 104#include <sys/vdev_file.h> 105#include <sys/spa_impl.h> 106#include <sys/metaslab_impl.h> 107#include <sys/dsl_prop.h> 108#include <sys/dsl_dataset.h> 109#include <sys/dsl_destroy.h> 110#include <sys/dsl_scan.h> 111#include <sys/zio_checksum.h> 112#include <sys/refcount.h> 113#include <sys/zfeature.h> 114#include <sys/dsl_userhold.h> 115#include <stdio.h> 116#include <stdio_ext.h> 117#include <stdlib.h> 118#include <unistd.h> 119#include <signal.h> 120#include <umem.h> 121#include <dlfcn.h> 122#include <ctype.h> 123#include <math.h> 124#include <errno.h> 125#include <sys/fs/zfs.h> 126#include <libnvpair.h> 127 128static int ztest_fd_data = -1; 129static int ztest_fd_rand = -1; 130 131typedef struct ztest_shared_hdr { 132 uint64_t zh_hdr_size; 133 uint64_t zh_opts_size; 134 uint64_t zh_size; 135 uint64_t zh_stats_size; 136 uint64_t zh_stats_count; 137 uint64_t zh_ds_size; 138 uint64_t zh_ds_count; 139} ztest_shared_hdr_t; 140 141static ztest_shared_hdr_t *ztest_shared_hdr; 142 143typedef struct ztest_shared_opts { 144 char zo_pool[ZFS_MAX_DATASET_NAME_LEN]; 145 char zo_dir[ZFS_MAX_DATASET_NAME_LEN]; 146 char zo_alt_ztest[MAXNAMELEN]; 147 char zo_alt_libpath[MAXNAMELEN]; 148 uint64_t zo_vdevs; 149 uint64_t zo_vdevtime; 150 size_t zo_vdev_size; 151 int zo_ashift; 152 int zo_mirrors; 153 int zo_raidz; 154 int zo_raidz_parity; 155 int zo_datasets; 156 int zo_threads; 157 uint64_t zo_passtime; 158 uint64_t zo_killrate; 159 int zo_verbose; 160 int zo_init; 161 uint64_t zo_time; 162 uint64_t zo_maxloops; 163 uint64_t zo_metaslab_gang_bang; 164} ztest_shared_opts_t; 165 166static const ztest_shared_opts_t ztest_opts_defaults = { 167 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 168 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 169 .zo_alt_ztest = { '\0' }, 170 .zo_alt_libpath = { '\0' }, 171 .zo_vdevs = 5, 172 .zo_ashift = SPA_MINBLOCKSHIFT, 173 .zo_mirrors = 2, 174 .zo_raidz = 4, 175 .zo_raidz_parity = 1, 176 .zo_vdev_size = SPA_MINDEVSIZE * 2, 177 .zo_datasets = 7, 178 .zo_threads = 23, 179 .zo_passtime = 60, /* 60 seconds */ 180 .zo_killrate = 70, /* 70% kill rate */ 181 .zo_verbose = 0, 182 .zo_init = 1, 183 .zo_time = 300, /* 5 minutes */ 184 .zo_maxloops = 50, /* max loops during spa_freeze() */ 185 .zo_metaslab_gang_bang = 32 << 10 186}; 187 188extern uint64_t metaslab_gang_bang; 189extern uint64_t metaslab_df_alloc_threshold; 190extern uint64_t zfs_deadman_synctime_ms; 191extern int metaslab_preload_limit; 192extern boolean_t zfs_compressed_arc_enabled; 193 194static ztest_shared_opts_t *ztest_shared_opts; 195static ztest_shared_opts_t ztest_opts; 196 197typedef struct ztest_shared_ds { 198 uint64_t zd_seq; 199} ztest_shared_ds_t; 200 201static ztest_shared_ds_t *ztest_shared_ds; 202#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 203 204#define BT_MAGIC 0x123456789abcdefULL 205#define MAXFAULTS() \ 206 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 207 208enum ztest_io_type { 209 ZTEST_IO_WRITE_TAG, 210 ZTEST_IO_WRITE_PATTERN, 211 ZTEST_IO_WRITE_ZEROES, 212 ZTEST_IO_TRUNCATE, 213 ZTEST_IO_SETATTR, 214 ZTEST_IO_REWRITE, 215 ZTEST_IO_TYPES 216}; 217 218typedef struct ztest_block_tag { 219 uint64_t bt_magic; 220 uint64_t bt_objset; 221 uint64_t bt_object; 222 uint64_t bt_offset; 223 uint64_t bt_gen; 224 uint64_t bt_txg; 225 uint64_t bt_crtxg; 226} ztest_block_tag_t; 227 228typedef struct bufwad { 229 uint64_t bw_index; 230 uint64_t bw_txg; 231 uint64_t bw_data; 232} bufwad_t; 233 234/* 235 * XXX -- fix zfs range locks to be generic so we can use them here. 236 */ 237typedef enum { 238 RL_READER, 239 RL_WRITER, 240 RL_APPEND 241} rl_type_t; 242 243typedef struct rll { 244 void *rll_writer; 245 int rll_readers; 246 mutex_t rll_lock; 247 cond_t rll_cv; 248} rll_t; 249 250typedef struct rl { 251 uint64_t rl_object; 252 uint64_t rl_offset; 253 uint64_t rl_size; 254 rll_t *rl_lock; 255} rl_t; 256 257#define ZTEST_RANGE_LOCKS 64 258#define ZTEST_OBJECT_LOCKS 64 259 260/* 261 * Object descriptor. Used as a template for object lookup/create/remove. 262 */ 263typedef struct ztest_od { 264 uint64_t od_dir; 265 uint64_t od_object; 266 dmu_object_type_t od_type; 267 dmu_object_type_t od_crtype; 268 uint64_t od_blocksize; 269 uint64_t od_crblocksize; 270 uint64_t od_gen; 271 uint64_t od_crgen; 272 char od_name[ZFS_MAX_DATASET_NAME_LEN]; 273} ztest_od_t; 274 275/* 276 * Per-dataset state. 277 */ 278typedef struct ztest_ds { 279 ztest_shared_ds_t *zd_shared; 280 objset_t *zd_os; 281 rwlock_t zd_zilog_lock; 282 zilog_t *zd_zilog; 283 ztest_od_t *zd_od; /* debugging aid */ 284 char zd_name[ZFS_MAX_DATASET_NAME_LEN]; 285 mutex_t zd_dirobj_lock; 286 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 287 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 288} ztest_ds_t; 289 290/* 291 * Per-iteration state. 292 */ 293typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 294 295typedef struct ztest_info { 296 ztest_func_t *zi_func; /* test function */ 297 uint64_t zi_iters; /* iterations per execution */ 298 uint64_t *zi_interval; /* execute every <interval> seconds */ 299} ztest_info_t; 300 301typedef struct ztest_shared_callstate { 302 uint64_t zc_count; /* per-pass count */ 303 uint64_t zc_time; /* per-pass time */ 304 uint64_t zc_next; /* next time to call this function */ 305} ztest_shared_callstate_t; 306 307static ztest_shared_callstate_t *ztest_shared_callstate; 308#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 309 310/* 311 * Note: these aren't static because we want dladdr() to work. 312 */ 313ztest_func_t ztest_dmu_read_write; 314ztest_func_t ztest_dmu_write_parallel; 315ztest_func_t ztest_dmu_object_alloc_free; 316ztest_func_t ztest_dmu_commit_callbacks; 317ztest_func_t ztest_zap; 318ztest_func_t ztest_zap_parallel; 319ztest_func_t ztest_zil_commit; 320ztest_func_t ztest_zil_remount; 321ztest_func_t ztest_dmu_read_write_zcopy; 322ztest_func_t ztest_dmu_objset_create_destroy; 323ztest_func_t ztest_dmu_prealloc; 324ztest_func_t ztest_fzap; 325ztest_func_t ztest_dmu_snapshot_create_destroy; 326ztest_func_t ztest_dsl_prop_get_set; 327ztest_func_t ztest_spa_prop_get_set; 328ztest_func_t ztest_spa_create_destroy; 329ztest_func_t ztest_fault_inject; 330ztest_func_t ztest_ddt_repair; 331ztest_func_t ztest_dmu_snapshot_hold; 332ztest_func_t ztest_spa_rename; 333ztest_func_t ztest_scrub; 334ztest_func_t ztest_dsl_dataset_promote_busy; 335ztest_func_t ztest_vdev_attach_detach; 336ztest_func_t ztest_vdev_LUN_growth; 337ztest_func_t ztest_vdev_add_remove; 338ztest_func_t ztest_vdev_aux_add_remove; 339ztest_func_t ztest_split_pool; 340ztest_func_t ztest_reguid; 341ztest_func_t ztest_spa_upgrade; 342 343uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 344uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 345uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 346uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 347uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 348 349ztest_info_t ztest_info[] = { 350 { ztest_dmu_read_write, 1, &zopt_always }, 351 { ztest_dmu_write_parallel, 10, &zopt_always }, 352 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 353 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 354 { ztest_zap, 30, &zopt_always }, 355 { ztest_zap_parallel, 100, &zopt_always }, 356 { ztest_split_pool, 1, &zopt_always }, 357 { ztest_zil_commit, 1, &zopt_incessant }, 358 { ztest_zil_remount, 1, &zopt_sometimes }, 359 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 360 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 361 { ztest_dsl_prop_get_set, 1, &zopt_often }, 362 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 363#if 0 364 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 365#endif 366 { ztest_fzap, 1, &zopt_sometimes }, 367 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 368 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 369 { ztest_fault_inject, 1, &zopt_sometimes }, 370 { ztest_ddt_repair, 1, &zopt_sometimes }, 371 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 372 { ztest_reguid, 1, &zopt_rarely }, 373 { ztest_spa_rename, 1, &zopt_rarely }, 374 { ztest_scrub, 1, &zopt_rarely }, 375 { ztest_spa_upgrade, 1, &zopt_rarely }, 376 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 377 { ztest_vdev_attach_detach, 1, &zopt_sometimes }, 378 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 379 { ztest_vdev_add_remove, 1, 380 &ztest_opts.zo_vdevtime }, 381 { ztest_vdev_aux_add_remove, 1, 382 &ztest_opts.zo_vdevtime }, 383}; 384 385#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 386 387/* 388 * The following struct is used to hold a list of uncalled commit callbacks. 389 * The callbacks are ordered by txg number. 390 */ 391typedef struct ztest_cb_list { 392 mutex_t zcl_callbacks_lock; 393 list_t zcl_callbacks; 394} ztest_cb_list_t; 395 396/* 397 * Stuff we need to share writably between parent and child. 398 */ 399typedef struct ztest_shared { 400 boolean_t zs_do_init; 401 hrtime_t zs_proc_start; 402 hrtime_t zs_proc_stop; 403 hrtime_t zs_thread_start; 404 hrtime_t zs_thread_stop; 405 hrtime_t zs_thread_kill; 406 uint64_t zs_enospc_count; 407 uint64_t zs_vdev_next_leaf; 408 uint64_t zs_vdev_aux; 409 uint64_t zs_alloc; 410 uint64_t zs_space; 411 uint64_t zs_splits; 412 uint64_t zs_mirrors; 413 uint64_t zs_metaslab_sz; 414 uint64_t zs_metaslab_df_alloc_threshold; 415 uint64_t zs_guid; 416} ztest_shared_t; 417 418#define ID_PARALLEL -1ULL 419 420static char ztest_dev_template[] = "%s/%s.%llua"; 421static char ztest_aux_template[] = "%s/%s.%s.%llu"; 422ztest_shared_t *ztest_shared; 423 424static spa_t *ztest_spa = NULL; 425static ztest_ds_t *ztest_ds; 426 427static mutex_t ztest_vdev_lock; 428 429/* 430 * The ztest_name_lock protects the pool and dataset namespace used by 431 * the individual tests. To modify the namespace, consumers must grab 432 * this lock as writer. Grabbing the lock as reader will ensure that the 433 * namespace does not change while the lock is held. 434 */ 435static rwlock_t ztest_name_lock; 436 437static boolean_t ztest_dump_core = B_TRUE; 438static boolean_t ztest_exiting; 439 440/* Global commit callback list */ 441static ztest_cb_list_t zcl; 442 443enum ztest_object { 444 ZTEST_META_DNODE = 0, 445 ZTEST_DIROBJ, 446 ZTEST_OBJECTS 447}; 448 449static void usage(boolean_t) __NORETURN; 450 451/* 452 * These libumem hooks provide a reasonable set of defaults for the allocator's 453 * debugging facilities. 454 */ 455const char * 456_umem_debug_init() 457{ 458 return ("default,verbose"); /* $UMEM_DEBUG setting */ 459} 460 461const char * 462_umem_logging_init(void) 463{ 464 return ("fail,contents"); /* $UMEM_LOGGING setting */ 465} 466 467#define FATAL_MSG_SZ 1024 468 469char *fatal_msg; 470 471static void 472fatal(int do_perror, char *message, ...) 473{ 474 va_list args; 475 int save_errno = errno; 476 char buf[FATAL_MSG_SZ]; 477 478 (void) fflush(stdout); 479 480 va_start(args, message); 481 (void) sprintf(buf, "ztest: "); 482 /* LINTED */ 483 (void) vsprintf(buf + strlen(buf), message, args); 484 va_end(args); 485 if (do_perror) { 486 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 487 ": %s", strerror(save_errno)); 488 } 489 (void) fprintf(stderr, "%s\n", buf); 490 fatal_msg = buf; /* to ease debugging */ 491 if (ztest_dump_core) 492 abort(); 493 exit(3); 494} 495 496static int 497str2shift(const char *buf) 498{ 499 const char *ends = "BKMGTPEZ"; 500 int i; 501 502 if (buf[0] == '\0') 503 return (0); 504 for (i = 0; i < strlen(ends); i++) { 505 if (toupper(buf[0]) == ends[i]) 506 break; 507 } 508 if (i == strlen(ends)) { 509 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 510 buf); 511 usage(B_FALSE); 512 } 513 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 514 return (10*i); 515 } 516 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 517 usage(B_FALSE); 518 /* NOTREACHED */ 519} 520 521static uint64_t 522nicenumtoull(const char *buf) 523{ 524 char *end; 525 uint64_t val; 526 527 val = strtoull(buf, &end, 0); 528 if (end == buf) { 529 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 530 usage(B_FALSE); 531 } else if (end[0] == '.') { 532 double fval = strtod(buf, &end); 533 fval *= pow(2, str2shift(end)); 534 if (fval > UINT64_MAX) { 535 (void) fprintf(stderr, "ztest: value too large: %s\n", 536 buf); 537 usage(B_FALSE); 538 } 539 val = (uint64_t)fval; 540 } else { 541 int shift = str2shift(end); 542 if (shift >= 64 || (val << shift) >> shift != val) { 543 (void) fprintf(stderr, "ztest: value too large: %s\n", 544 buf); 545 usage(B_FALSE); 546 } 547 val <<= shift; 548 } 549 return (val); 550} 551 552static void 553usage(boolean_t requested) 554{ 555 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 556 557 char nice_vdev_size[10]; 558 char nice_gang_bang[10]; 559 FILE *fp = requested ? stdout : stderr; 560 561 nicenum(zo->zo_vdev_size, nice_vdev_size); 562 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 563 564 (void) fprintf(fp, "Usage: %s\n" 565 "\t[-v vdevs (default: %llu)]\n" 566 "\t[-s size_of_each_vdev (default: %s)]\n" 567 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 568 "\t[-m mirror_copies (default: %d)]\n" 569 "\t[-r raidz_disks (default: %d)]\n" 570 "\t[-R raidz_parity (default: %d)]\n" 571 "\t[-d datasets (default: %d)]\n" 572 "\t[-t threads (default: %d)]\n" 573 "\t[-g gang_block_threshold (default: %s)]\n" 574 "\t[-i init_count (default: %d)] initialize pool i times\n" 575 "\t[-k kill_percentage (default: %llu%%)]\n" 576 "\t[-p pool_name (default: %s)]\n" 577 "\t[-f dir (default: %s)] file directory for vdev files\n" 578 "\t[-V] verbose (use multiple times for ever more blather)\n" 579 "\t[-E] use existing pool instead of creating new one\n" 580 "\t[-T time (default: %llu sec)] total run time\n" 581 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 582 "\t[-P passtime (default: %llu sec)] time per pass\n" 583 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 584 "\t[-h] (print help)\n" 585 "", 586 zo->zo_pool, 587 (u_longlong_t)zo->zo_vdevs, /* -v */ 588 nice_vdev_size, /* -s */ 589 zo->zo_ashift, /* -a */ 590 zo->zo_mirrors, /* -m */ 591 zo->zo_raidz, /* -r */ 592 zo->zo_raidz_parity, /* -R */ 593 zo->zo_datasets, /* -d */ 594 zo->zo_threads, /* -t */ 595 nice_gang_bang, /* -g */ 596 zo->zo_init, /* -i */ 597 (u_longlong_t)zo->zo_killrate, /* -k */ 598 zo->zo_pool, /* -p */ 599 zo->zo_dir, /* -f */ 600 (u_longlong_t)zo->zo_time, /* -T */ 601 (u_longlong_t)zo->zo_maxloops, /* -F */ 602 (u_longlong_t)zo->zo_passtime); 603 exit(requested ? 0 : 1); 604} 605 606static void 607process_options(int argc, char **argv) 608{ 609 char *path; 610 ztest_shared_opts_t *zo = &ztest_opts; 611 612 int opt; 613 uint64_t value; 614 char altdir[MAXNAMELEN] = { 0 }; 615 616 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 617 618 while ((opt = getopt(argc, argv, 619 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 620 value = 0; 621 switch (opt) { 622 case 'v': 623 case 's': 624 case 'a': 625 case 'm': 626 case 'r': 627 case 'R': 628 case 'd': 629 case 't': 630 case 'g': 631 case 'i': 632 case 'k': 633 case 'T': 634 case 'P': 635 case 'F': 636 value = nicenumtoull(optarg); 637 } 638 switch (opt) { 639 case 'v': 640 zo->zo_vdevs = value; 641 break; 642 case 's': 643 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 644 break; 645 case 'a': 646 zo->zo_ashift = value; 647 break; 648 case 'm': 649 zo->zo_mirrors = value; 650 break; 651 case 'r': 652 zo->zo_raidz = MAX(1, value); 653 break; 654 case 'R': 655 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 656 break; 657 case 'd': 658 zo->zo_datasets = MAX(1, value); 659 break; 660 case 't': 661 zo->zo_threads = MAX(1, value); 662 break; 663 case 'g': 664 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 665 value); 666 break; 667 case 'i': 668 zo->zo_init = value; 669 break; 670 case 'k': 671 zo->zo_killrate = value; 672 break; 673 case 'p': 674 (void) strlcpy(zo->zo_pool, optarg, 675 sizeof (zo->zo_pool)); 676 break; 677 case 'f': 678 path = realpath(optarg, NULL); 679 if (path == NULL) { 680 (void) fprintf(stderr, "error: %s: %s\n", 681 optarg, strerror(errno)); 682 usage(B_FALSE); 683 } else { 684 (void) strlcpy(zo->zo_dir, path, 685 sizeof (zo->zo_dir)); 686 } 687 break; 688 case 'V': 689 zo->zo_verbose++; 690 break; 691 case 'E': 692 zo->zo_init = 0; 693 break; 694 case 'T': 695 zo->zo_time = value; 696 break; 697 case 'P': 698 zo->zo_passtime = MAX(1, value); 699 break; 700 case 'F': 701 zo->zo_maxloops = MAX(1, value); 702 break; 703 case 'B': 704 (void) strlcpy(altdir, optarg, sizeof (altdir)); 705 break; 706 case 'h': 707 usage(B_TRUE); 708 break; 709 case '?': 710 default: 711 usage(B_FALSE); 712 break; 713 } 714 } 715 716 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 717 718 zo->zo_vdevtime = 719 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 720 UINT64_MAX >> 2); 721 722 if (strlen(altdir) > 0) { 723 char *cmd; 724 char *realaltdir; 725 char *bin; 726 char *ztest; 727 char *isa; 728 int isalen; 729 730 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 731 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 732 733 VERIFY(NULL != realpath(getexecname(), cmd)); 734 if (0 != access(altdir, F_OK)) { 735 ztest_dump_core = B_FALSE; 736 fatal(B_TRUE, "invalid alternate ztest path: %s", 737 altdir); 738 } 739 VERIFY(NULL != realpath(altdir, realaltdir)); 740 741 /* 742 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 743 * We want to extract <isa> to determine if we should use 744 * 32 or 64 bit binaries. 745 */ 746 bin = strstr(cmd, "/usr/bin/"); 747 ztest = strstr(bin, "/ztest"); 748 isa = bin + 9; 749 isalen = ztest - isa; 750 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 751 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 752 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 753 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 754 755 if (0 != access(zo->zo_alt_ztest, X_OK)) { 756 ztest_dump_core = B_FALSE; 757 fatal(B_TRUE, "invalid alternate ztest: %s", 758 zo->zo_alt_ztest); 759 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 760 ztest_dump_core = B_FALSE; 761 fatal(B_TRUE, "invalid alternate lib directory %s", 762 zo->zo_alt_libpath); 763 } 764 765 umem_free(cmd, MAXPATHLEN); 766 umem_free(realaltdir, MAXPATHLEN); 767 } 768} 769 770static void 771ztest_kill(ztest_shared_t *zs) 772{ 773 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 774 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 775 776 /* 777 * Before we kill off ztest, make sure that the config is updated. 778 * See comment above spa_config_sync(). 779 */ 780 mutex_enter(&spa_namespace_lock); 781 spa_config_sync(ztest_spa, B_FALSE, B_FALSE); 782 mutex_exit(&spa_namespace_lock); 783 784 zfs_dbgmsg_print(FTAG); 785 (void) kill(getpid(), SIGKILL); 786} 787 788static uint64_t 789ztest_random(uint64_t range) 790{ 791 uint64_t r; 792 793 ASSERT3S(ztest_fd_rand, >=, 0); 794 795 if (range == 0) 796 return (0); 797 798 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 799 fatal(1, "short read from /dev/urandom"); 800 801 return (r % range); 802} 803 804/* ARGSUSED */ 805static void 806ztest_record_enospc(const char *s) 807{ 808 ztest_shared->zs_enospc_count++; 809} 810 811static uint64_t 812ztest_get_ashift(void) 813{ 814 if (ztest_opts.zo_ashift == 0) 815 return (SPA_MINBLOCKSHIFT + ztest_random(5)); 816 return (ztest_opts.zo_ashift); 817} 818 819static nvlist_t * 820make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 821{ 822 char pathbuf[MAXPATHLEN]; 823 uint64_t vdev; 824 nvlist_t *file; 825 826 if (ashift == 0) 827 ashift = ztest_get_ashift(); 828 829 if (path == NULL) { 830 path = pathbuf; 831 832 if (aux != NULL) { 833 vdev = ztest_shared->zs_vdev_aux; 834 (void) snprintf(path, sizeof (pathbuf), 835 ztest_aux_template, ztest_opts.zo_dir, 836 pool == NULL ? ztest_opts.zo_pool : pool, 837 aux, vdev); 838 } else { 839 vdev = ztest_shared->zs_vdev_next_leaf++; 840 (void) snprintf(path, sizeof (pathbuf), 841 ztest_dev_template, ztest_opts.zo_dir, 842 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 843 } 844 } 845 846 if (size != 0) { 847 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 848 if (fd == -1) 849 fatal(1, "can't open %s", path); 850 if (ftruncate(fd, size) != 0) 851 fatal(1, "can't ftruncate %s", path); 852 (void) close(fd); 853 } 854 855 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 856 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 857 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 858 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 859 860 return (file); 861} 862 863static nvlist_t * 864make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 865 uint64_t ashift, int r) 866{ 867 nvlist_t *raidz, **child; 868 int c; 869 870 if (r < 2) 871 return (make_vdev_file(path, aux, pool, size, ashift)); 872 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 873 874 for (c = 0; c < r; c++) 875 child[c] = make_vdev_file(path, aux, pool, size, ashift); 876 877 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 878 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 879 VDEV_TYPE_RAIDZ) == 0); 880 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 881 ztest_opts.zo_raidz_parity) == 0); 882 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 883 child, r) == 0); 884 885 for (c = 0; c < r; c++) 886 nvlist_free(child[c]); 887 888 umem_free(child, r * sizeof (nvlist_t *)); 889 890 return (raidz); 891} 892 893static nvlist_t * 894make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 895 uint64_t ashift, int r, int m) 896{ 897 nvlist_t *mirror, **child; 898 int c; 899 900 if (m < 1) 901 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 902 903 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 904 905 for (c = 0; c < m; c++) 906 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 907 908 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 909 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 910 VDEV_TYPE_MIRROR) == 0); 911 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 912 child, m) == 0); 913 914 for (c = 0; c < m; c++) 915 nvlist_free(child[c]); 916 917 umem_free(child, m * sizeof (nvlist_t *)); 918 919 return (mirror); 920} 921 922static nvlist_t * 923make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 924 int log, int r, int m, int t) 925{ 926 nvlist_t *root, **child; 927 int c; 928 929 ASSERT(t > 0); 930 931 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 932 933 for (c = 0; c < t; c++) { 934 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 935 r, m); 936 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 937 log) == 0); 938 } 939 940 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 941 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 942 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 943 child, t) == 0); 944 945 for (c = 0; c < t; c++) 946 nvlist_free(child[c]); 947 948 umem_free(child, t * sizeof (nvlist_t *)); 949 950 return (root); 951} 952 953/* 954 * Find a random spa version. Returns back a random spa version in the 955 * range [initial_version, SPA_VERSION_FEATURES]. 956 */ 957static uint64_t 958ztest_random_spa_version(uint64_t initial_version) 959{ 960 uint64_t version = initial_version; 961 962 if (version <= SPA_VERSION_BEFORE_FEATURES) { 963 version = version + 964 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 965 } 966 967 if (version > SPA_VERSION_BEFORE_FEATURES) 968 version = SPA_VERSION_FEATURES; 969 970 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 971 return (version); 972} 973 974static int 975ztest_random_blocksize(void) 976{ 977 uint64_t block_shift; 978 /* 979 * Choose a block size >= the ashift. 980 * If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks. 981 */ 982 int maxbs = SPA_OLD_MAXBLOCKSHIFT; 983 if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE) 984 maxbs = 20; 985 block_shift = ztest_random(maxbs - ztest_spa->spa_max_ashift + 1); 986 return (1 << (SPA_MINBLOCKSHIFT + block_shift)); 987} 988 989static int 990ztest_random_ibshift(void) 991{ 992 return (DN_MIN_INDBLKSHIFT + 993 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 994} 995 996static uint64_t 997ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 998{ 999 uint64_t top; 1000 vdev_t *rvd = spa->spa_root_vdev; 1001 vdev_t *tvd; 1002 1003 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1004 1005 do { 1006 top = ztest_random(rvd->vdev_children); 1007 tvd = rvd->vdev_child[top]; 1008 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 1009 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 1010 1011 return (top); 1012} 1013 1014static uint64_t 1015ztest_random_dsl_prop(zfs_prop_t prop) 1016{ 1017 uint64_t value; 1018 1019 do { 1020 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1021 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1022 1023 return (value); 1024} 1025 1026static int 1027ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1028 boolean_t inherit) 1029{ 1030 const char *propname = zfs_prop_to_name(prop); 1031 const char *valname; 1032 char setpoint[MAXPATHLEN]; 1033 uint64_t curval; 1034 int error; 1035 1036 error = dsl_prop_set_int(osname, propname, 1037 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1038 1039 if (error == ENOSPC) { 1040 ztest_record_enospc(FTAG); 1041 return (error); 1042 } 1043 ASSERT0(error); 1044 1045 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1046 1047 if (ztest_opts.zo_verbose >= 6) { 1048 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1049 (void) printf("%s %s = %s at '%s'\n", 1050 osname, propname, valname, setpoint); 1051 } 1052 1053 return (error); 1054} 1055 1056static int 1057ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1058{ 1059 spa_t *spa = ztest_spa; 1060 nvlist_t *props = NULL; 1061 int error; 1062 1063 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1064 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1065 1066 error = spa_prop_set(spa, props); 1067 1068 nvlist_free(props); 1069 1070 if (error == ENOSPC) { 1071 ztest_record_enospc(FTAG); 1072 return (error); 1073 } 1074 ASSERT0(error); 1075 1076 return (error); 1077} 1078 1079static void 1080ztest_rll_init(rll_t *rll) 1081{ 1082 rll->rll_writer = NULL; 1083 rll->rll_readers = 0; 1084 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1085 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1086} 1087 1088static void 1089ztest_rll_destroy(rll_t *rll) 1090{ 1091 ASSERT(rll->rll_writer == NULL); 1092 ASSERT(rll->rll_readers == 0); 1093 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1094 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1095} 1096 1097static void 1098ztest_rll_lock(rll_t *rll, rl_type_t type) 1099{ 1100 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1101 1102 if (type == RL_READER) { 1103 while (rll->rll_writer != NULL) 1104 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1105 rll->rll_readers++; 1106 } else { 1107 while (rll->rll_writer != NULL || rll->rll_readers) 1108 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1109 rll->rll_writer = curthread; 1110 } 1111 1112 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1113} 1114 1115static void 1116ztest_rll_unlock(rll_t *rll) 1117{ 1118 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1119 1120 if (rll->rll_writer) { 1121 ASSERT(rll->rll_readers == 0); 1122 rll->rll_writer = NULL; 1123 } else { 1124 ASSERT(rll->rll_readers != 0); 1125 ASSERT(rll->rll_writer == NULL); 1126 rll->rll_readers--; 1127 } 1128 1129 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1130 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1131 1132 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1133} 1134 1135static void 1136ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1137{ 1138 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1139 1140 ztest_rll_lock(rll, type); 1141} 1142 1143static void 1144ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1145{ 1146 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1147 1148 ztest_rll_unlock(rll); 1149} 1150 1151static rl_t * 1152ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1153 uint64_t size, rl_type_t type) 1154{ 1155 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1156 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1157 rl_t *rl; 1158 1159 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1160 rl->rl_object = object; 1161 rl->rl_offset = offset; 1162 rl->rl_size = size; 1163 rl->rl_lock = rll; 1164 1165 ztest_rll_lock(rll, type); 1166 1167 return (rl); 1168} 1169 1170static void 1171ztest_range_unlock(rl_t *rl) 1172{ 1173 rll_t *rll = rl->rl_lock; 1174 1175 ztest_rll_unlock(rll); 1176 1177 umem_free(rl, sizeof (*rl)); 1178} 1179 1180static void 1181ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1182{ 1183 zd->zd_os = os; 1184 zd->zd_zilog = dmu_objset_zil(os); 1185 zd->zd_shared = szd; 1186 dmu_objset_name(os, zd->zd_name); 1187 1188 if (zd->zd_shared != NULL) 1189 zd->zd_shared->zd_seq = 0; 1190 1191 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1192 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1193 1194 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1195 ztest_rll_init(&zd->zd_object_lock[l]); 1196 1197 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1198 ztest_rll_init(&zd->zd_range_lock[l]); 1199} 1200 1201static void 1202ztest_zd_fini(ztest_ds_t *zd) 1203{ 1204 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1205 1206 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1207 ztest_rll_destroy(&zd->zd_object_lock[l]); 1208 1209 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1210 ztest_rll_destroy(&zd->zd_range_lock[l]); 1211} 1212 1213#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1214 1215static uint64_t 1216ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1217{ 1218 uint64_t txg; 1219 int error; 1220 1221 /* 1222 * Attempt to assign tx to some transaction group. 1223 */ 1224 error = dmu_tx_assign(tx, txg_how); 1225 if (error) { 1226 if (error == ERESTART) { 1227 ASSERT(txg_how == TXG_NOWAIT); 1228 dmu_tx_wait(tx); 1229 } else { 1230 ASSERT3U(error, ==, ENOSPC); 1231 ztest_record_enospc(tag); 1232 } 1233 dmu_tx_abort(tx); 1234 return (0); 1235 } 1236 txg = dmu_tx_get_txg(tx); 1237 ASSERT(txg != 0); 1238 return (txg); 1239} 1240 1241static void 1242ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1243{ 1244 uint64_t *ip = buf; 1245 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1246 1247 while (ip < ip_end) 1248 *ip++ = value; 1249} 1250 1251static boolean_t 1252ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1253{ 1254 uint64_t *ip = buf; 1255 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1256 uint64_t diff = 0; 1257 1258 while (ip < ip_end) 1259 diff |= (value - *ip++); 1260 1261 return (diff == 0); 1262} 1263 1264static void 1265ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1266 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1267{ 1268 bt->bt_magic = BT_MAGIC; 1269 bt->bt_objset = dmu_objset_id(os); 1270 bt->bt_object = object; 1271 bt->bt_offset = offset; 1272 bt->bt_gen = gen; 1273 bt->bt_txg = txg; 1274 bt->bt_crtxg = crtxg; 1275} 1276 1277static void 1278ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1279 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1280{ 1281 ASSERT3U(bt->bt_magic, ==, BT_MAGIC); 1282 ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); 1283 ASSERT3U(bt->bt_object, ==, object); 1284 ASSERT3U(bt->bt_offset, ==, offset); 1285 ASSERT3U(bt->bt_gen, <=, gen); 1286 ASSERT3U(bt->bt_txg, <=, txg); 1287 ASSERT3U(bt->bt_crtxg, ==, crtxg); 1288} 1289 1290static ztest_block_tag_t * 1291ztest_bt_bonus(dmu_buf_t *db) 1292{ 1293 dmu_object_info_t doi; 1294 ztest_block_tag_t *bt; 1295 1296 dmu_object_info_from_db(db, &doi); 1297 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1298 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1299 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1300 1301 return (bt); 1302} 1303 1304/* 1305 * ZIL logging ops 1306 */ 1307 1308#define lrz_type lr_mode 1309#define lrz_blocksize lr_uid 1310#define lrz_ibshift lr_gid 1311#define lrz_bonustype lr_rdev 1312#define lrz_bonuslen lr_crtime[1] 1313 1314static void 1315ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1316{ 1317 char *name = (void *)(lr + 1); /* name follows lr */ 1318 size_t namesize = strlen(name) + 1; 1319 itx_t *itx; 1320 1321 if (zil_replaying(zd->zd_zilog, tx)) 1322 return; 1323 1324 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1325 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1326 sizeof (*lr) + namesize - sizeof (lr_t)); 1327 1328 zil_itx_assign(zd->zd_zilog, itx, tx); 1329} 1330 1331static void 1332ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1333{ 1334 char *name = (void *)(lr + 1); /* name follows lr */ 1335 size_t namesize = strlen(name) + 1; 1336 itx_t *itx; 1337 1338 if (zil_replaying(zd->zd_zilog, tx)) 1339 return; 1340 1341 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1342 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1343 sizeof (*lr) + namesize - sizeof (lr_t)); 1344 1345 itx->itx_oid = object; 1346 zil_itx_assign(zd->zd_zilog, itx, tx); 1347} 1348 1349static void 1350ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1351{ 1352 itx_t *itx; 1353 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1354 1355 if (zil_replaying(zd->zd_zilog, tx)) 1356 return; 1357 1358 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1359 write_state = WR_INDIRECT; 1360 1361 itx = zil_itx_create(TX_WRITE, 1362 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1363 1364 if (write_state == WR_COPIED && 1365 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1366 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1367 zil_itx_destroy(itx); 1368 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1369 write_state = WR_NEED_COPY; 1370 } 1371 itx->itx_private = zd; 1372 itx->itx_wr_state = write_state; 1373 itx->itx_sync = (ztest_random(8) == 0); 1374 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1375 1376 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1377 sizeof (*lr) - sizeof (lr_t)); 1378 1379 zil_itx_assign(zd->zd_zilog, itx, tx); 1380} 1381 1382static void 1383ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1384{ 1385 itx_t *itx; 1386 1387 if (zil_replaying(zd->zd_zilog, tx)) 1388 return; 1389 1390 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1391 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1392 sizeof (*lr) - sizeof (lr_t)); 1393 1394 itx->itx_sync = B_FALSE; 1395 zil_itx_assign(zd->zd_zilog, itx, tx); 1396} 1397 1398static void 1399ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1400{ 1401 itx_t *itx; 1402 1403 if (zil_replaying(zd->zd_zilog, tx)) 1404 return; 1405 1406 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1407 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1408 sizeof (*lr) - sizeof (lr_t)); 1409 1410 itx->itx_sync = B_FALSE; 1411 zil_itx_assign(zd->zd_zilog, itx, tx); 1412} 1413 1414/* 1415 * ZIL replay ops 1416 */ 1417static int 1418ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1419{ 1420 char *name = (void *)(lr + 1); /* name follows lr */ 1421 objset_t *os = zd->zd_os; 1422 ztest_block_tag_t *bbt; 1423 dmu_buf_t *db; 1424 dmu_tx_t *tx; 1425 uint64_t txg; 1426 int error = 0; 1427 1428 if (byteswap) 1429 byteswap_uint64_array(lr, sizeof (*lr)); 1430 1431 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1432 ASSERT(name[0] != '\0'); 1433 1434 tx = dmu_tx_create(os); 1435 1436 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1437 1438 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1439 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1440 } else { 1441 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1442 } 1443 1444 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1445 if (txg == 0) 1446 return (ENOSPC); 1447 1448 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1449 1450 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1451 if (lr->lr_foid == 0) { 1452 lr->lr_foid = zap_create(os, 1453 lr->lrz_type, lr->lrz_bonustype, 1454 lr->lrz_bonuslen, tx); 1455 } else { 1456 error = zap_create_claim(os, lr->lr_foid, 1457 lr->lrz_type, lr->lrz_bonustype, 1458 lr->lrz_bonuslen, tx); 1459 } 1460 } else { 1461 if (lr->lr_foid == 0) { 1462 lr->lr_foid = dmu_object_alloc(os, 1463 lr->lrz_type, 0, lr->lrz_bonustype, 1464 lr->lrz_bonuslen, tx); 1465 } else { 1466 error = dmu_object_claim(os, lr->lr_foid, 1467 lr->lrz_type, 0, lr->lrz_bonustype, 1468 lr->lrz_bonuslen, tx); 1469 } 1470 } 1471 1472 if (error) { 1473 ASSERT3U(error, ==, EEXIST); 1474 ASSERT(zd->zd_zilog->zl_replay); 1475 dmu_tx_commit(tx); 1476 return (error); 1477 } 1478 1479 ASSERT(lr->lr_foid != 0); 1480 1481 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1482 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1483 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1484 1485 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1486 bbt = ztest_bt_bonus(db); 1487 dmu_buf_will_dirty(db, tx); 1488 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1489 dmu_buf_rele(db, FTAG); 1490 1491 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1492 &lr->lr_foid, tx)); 1493 1494 (void) ztest_log_create(zd, tx, lr); 1495 1496 dmu_tx_commit(tx); 1497 1498 return (0); 1499} 1500 1501static int 1502ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1503{ 1504 char *name = (void *)(lr + 1); /* name follows lr */ 1505 objset_t *os = zd->zd_os; 1506 dmu_object_info_t doi; 1507 dmu_tx_t *tx; 1508 uint64_t object, txg; 1509 1510 if (byteswap) 1511 byteswap_uint64_array(lr, sizeof (*lr)); 1512 1513 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1514 ASSERT(name[0] != '\0'); 1515 1516 VERIFY3U(0, ==, 1517 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1518 ASSERT(object != 0); 1519 1520 ztest_object_lock(zd, object, RL_WRITER); 1521 1522 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1523 1524 tx = dmu_tx_create(os); 1525 1526 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1527 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1528 1529 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1530 if (txg == 0) { 1531 ztest_object_unlock(zd, object); 1532 return (ENOSPC); 1533 } 1534 1535 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1536 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1537 } else { 1538 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1539 } 1540 1541 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1542 1543 (void) ztest_log_remove(zd, tx, lr, object); 1544 1545 dmu_tx_commit(tx); 1546 1547 ztest_object_unlock(zd, object); 1548 1549 return (0); 1550} 1551 1552static int 1553ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1554{ 1555 objset_t *os = zd->zd_os; 1556 void *data = lr + 1; /* data follows lr */ 1557 uint64_t offset, length; 1558 ztest_block_tag_t *bt = data; 1559 ztest_block_tag_t *bbt; 1560 uint64_t gen, txg, lrtxg, crtxg; 1561 dmu_object_info_t doi; 1562 dmu_tx_t *tx; 1563 dmu_buf_t *db; 1564 arc_buf_t *abuf = NULL; 1565 rl_t *rl; 1566 1567 if (byteswap) 1568 byteswap_uint64_array(lr, sizeof (*lr)); 1569 1570 offset = lr->lr_offset; 1571 length = lr->lr_length; 1572 1573 /* If it's a dmu_sync() block, write the whole block */ 1574 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1575 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1576 if (length < blocksize) { 1577 offset -= offset % blocksize; 1578 length = blocksize; 1579 } 1580 } 1581 1582 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1583 byteswap_uint64_array(bt, sizeof (*bt)); 1584 1585 if (bt->bt_magic != BT_MAGIC) 1586 bt = NULL; 1587 1588 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1589 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1590 1591 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1592 1593 dmu_object_info_from_db(db, &doi); 1594 1595 bbt = ztest_bt_bonus(db); 1596 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1597 gen = bbt->bt_gen; 1598 crtxg = bbt->bt_crtxg; 1599 lrtxg = lr->lr_common.lrc_txg; 1600 1601 tx = dmu_tx_create(os); 1602 1603 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1604 1605 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1606 P2PHASE(offset, length) == 0) 1607 abuf = dmu_request_arcbuf(db, length); 1608 1609 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1610 if (txg == 0) { 1611 if (abuf != NULL) 1612 dmu_return_arcbuf(abuf); 1613 dmu_buf_rele(db, FTAG); 1614 ztest_range_unlock(rl); 1615 ztest_object_unlock(zd, lr->lr_foid); 1616 return (ENOSPC); 1617 } 1618 1619 if (bt != NULL) { 1620 /* 1621 * Usually, verify the old data before writing new data -- 1622 * but not always, because we also want to verify correct 1623 * behavior when the data was not recently read into cache. 1624 */ 1625 ASSERT(offset % doi.doi_data_block_size == 0); 1626 if (ztest_random(4) != 0) { 1627 int prefetch = ztest_random(2) ? 1628 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1629 ztest_block_tag_t rbt; 1630 1631 VERIFY(dmu_read(os, lr->lr_foid, offset, 1632 sizeof (rbt), &rbt, prefetch) == 0); 1633 if (rbt.bt_magic == BT_MAGIC) { 1634 ztest_bt_verify(&rbt, os, lr->lr_foid, 1635 offset, gen, txg, crtxg); 1636 } 1637 } 1638 1639 /* 1640 * Writes can appear to be newer than the bonus buffer because 1641 * the ztest_get_data() callback does a dmu_read() of the 1642 * open-context data, which may be different than the data 1643 * as it was when the write was generated. 1644 */ 1645 if (zd->zd_zilog->zl_replay) { 1646 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1647 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1648 bt->bt_crtxg); 1649 } 1650 1651 /* 1652 * Set the bt's gen/txg to the bonus buffer's gen/txg 1653 * so that all of the usual ASSERTs will work. 1654 */ 1655 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1656 } 1657 1658 if (abuf == NULL) { 1659 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1660 } else { 1661 bcopy(data, abuf->b_data, length); 1662 dmu_assign_arcbuf(db, offset, abuf, tx); 1663 } 1664 1665 (void) ztest_log_write(zd, tx, lr); 1666 1667 dmu_buf_rele(db, FTAG); 1668 1669 dmu_tx_commit(tx); 1670 1671 ztest_range_unlock(rl); 1672 ztest_object_unlock(zd, lr->lr_foid); 1673 1674 return (0); 1675} 1676 1677static int 1678ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1679{ 1680 objset_t *os = zd->zd_os; 1681 dmu_tx_t *tx; 1682 uint64_t txg; 1683 rl_t *rl; 1684 1685 if (byteswap) 1686 byteswap_uint64_array(lr, sizeof (*lr)); 1687 1688 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1689 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1690 RL_WRITER); 1691 1692 tx = dmu_tx_create(os); 1693 1694 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1695 1696 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1697 if (txg == 0) { 1698 ztest_range_unlock(rl); 1699 ztest_object_unlock(zd, lr->lr_foid); 1700 return (ENOSPC); 1701 } 1702 1703 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1704 lr->lr_length, tx) == 0); 1705 1706 (void) ztest_log_truncate(zd, tx, lr); 1707 1708 dmu_tx_commit(tx); 1709 1710 ztest_range_unlock(rl); 1711 ztest_object_unlock(zd, lr->lr_foid); 1712 1713 return (0); 1714} 1715 1716static int 1717ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1718{ 1719 objset_t *os = zd->zd_os; 1720 dmu_tx_t *tx; 1721 dmu_buf_t *db; 1722 ztest_block_tag_t *bbt; 1723 uint64_t txg, lrtxg, crtxg; 1724 1725 if (byteswap) 1726 byteswap_uint64_array(lr, sizeof (*lr)); 1727 1728 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1729 1730 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1731 1732 tx = dmu_tx_create(os); 1733 dmu_tx_hold_bonus(tx, lr->lr_foid); 1734 1735 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1736 if (txg == 0) { 1737 dmu_buf_rele(db, FTAG); 1738 ztest_object_unlock(zd, lr->lr_foid); 1739 return (ENOSPC); 1740 } 1741 1742 bbt = ztest_bt_bonus(db); 1743 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1744 crtxg = bbt->bt_crtxg; 1745 lrtxg = lr->lr_common.lrc_txg; 1746 1747 if (zd->zd_zilog->zl_replay) { 1748 ASSERT(lr->lr_size != 0); 1749 ASSERT(lr->lr_mode != 0); 1750 ASSERT(lrtxg != 0); 1751 } else { 1752 /* 1753 * Randomly change the size and increment the generation. 1754 */ 1755 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1756 sizeof (*bbt); 1757 lr->lr_mode = bbt->bt_gen + 1; 1758 ASSERT(lrtxg == 0); 1759 } 1760 1761 /* 1762 * Verify that the current bonus buffer is not newer than our txg. 1763 */ 1764 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1765 MAX(txg, lrtxg), crtxg); 1766 1767 dmu_buf_will_dirty(db, tx); 1768 1769 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1770 ASSERT3U(lr->lr_size, <=, db->db_size); 1771 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1772 bbt = ztest_bt_bonus(db); 1773 1774 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1775 1776 dmu_buf_rele(db, FTAG); 1777 1778 (void) ztest_log_setattr(zd, tx, lr); 1779 1780 dmu_tx_commit(tx); 1781 1782 ztest_object_unlock(zd, lr->lr_foid); 1783 1784 return (0); 1785} 1786 1787zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1788 NULL, /* 0 no such transaction type */ 1789 ztest_replay_create, /* TX_CREATE */ 1790 NULL, /* TX_MKDIR */ 1791 NULL, /* TX_MKXATTR */ 1792 NULL, /* TX_SYMLINK */ 1793 ztest_replay_remove, /* TX_REMOVE */ 1794 NULL, /* TX_RMDIR */ 1795 NULL, /* TX_LINK */ 1796 NULL, /* TX_RENAME */ 1797 ztest_replay_write, /* TX_WRITE */ 1798 ztest_replay_truncate, /* TX_TRUNCATE */ 1799 ztest_replay_setattr, /* TX_SETATTR */ 1800 NULL, /* TX_ACL */ 1801 NULL, /* TX_CREATE_ACL */ 1802 NULL, /* TX_CREATE_ATTR */ 1803 NULL, /* TX_CREATE_ACL_ATTR */ 1804 NULL, /* TX_MKDIR_ACL */ 1805 NULL, /* TX_MKDIR_ATTR */ 1806 NULL, /* TX_MKDIR_ACL_ATTR */ 1807 NULL, /* TX_WRITE2 */ 1808}; 1809 1810/* 1811 * ZIL get_data callbacks 1812 */ 1813 1814static void 1815ztest_get_done(zgd_t *zgd, int error) 1816{ 1817 ztest_ds_t *zd = zgd->zgd_private; 1818 uint64_t object = zgd->zgd_rl->rl_object; 1819 1820 if (zgd->zgd_db) 1821 dmu_buf_rele(zgd->zgd_db, zgd); 1822 1823 ztest_range_unlock(zgd->zgd_rl); 1824 ztest_object_unlock(zd, object); 1825 1826 if (error == 0 && zgd->zgd_bp) 1827 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1828 1829 umem_free(zgd, sizeof (*zgd)); 1830} 1831 1832static int 1833ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1834{ 1835 ztest_ds_t *zd = arg; 1836 objset_t *os = zd->zd_os; 1837 uint64_t object = lr->lr_foid; 1838 uint64_t offset = lr->lr_offset; 1839 uint64_t size = lr->lr_length; 1840 blkptr_t *bp = &lr->lr_blkptr; 1841 uint64_t txg = lr->lr_common.lrc_txg; 1842 uint64_t crtxg; 1843 dmu_object_info_t doi; 1844 dmu_buf_t *db; 1845 zgd_t *zgd; 1846 int error; 1847 1848 ztest_object_lock(zd, object, RL_READER); 1849 error = dmu_bonus_hold(os, object, FTAG, &db); 1850 if (error) { 1851 ztest_object_unlock(zd, object); 1852 return (error); 1853 } 1854 1855 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1856 1857 if (crtxg == 0 || crtxg > txg) { 1858 dmu_buf_rele(db, FTAG); 1859 ztest_object_unlock(zd, object); 1860 return (ENOENT); 1861 } 1862 1863 dmu_object_info_from_db(db, &doi); 1864 dmu_buf_rele(db, FTAG); 1865 db = NULL; 1866 1867 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1868 zgd->zgd_zilog = zd->zd_zilog; 1869 zgd->zgd_private = zd; 1870 1871 if (buf != NULL) { /* immediate write */ 1872 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1873 RL_READER); 1874 1875 error = dmu_read(os, object, offset, size, buf, 1876 DMU_READ_NO_PREFETCH); 1877 ASSERT(error == 0); 1878 } else { 1879 size = doi.doi_data_block_size; 1880 if (ISP2(size)) { 1881 offset = P2ALIGN(offset, size); 1882 } else { 1883 ASSERT(offset < size); 1884 offset = 0; 1885 } 1886 1887 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1888 RL_READER); 1889 1890 error = dmu_buf_hold(os, object, offset, zgd, &db, 1891 DMU_READ_NO_PREFETCH); 1892 1893 if (error == 0) { 1894 blkptr_t *obp = dmu_buf_get_blkptr(db); 1895 if (obp) { 1896 ASSERT(BP_IS_HOLE(bp)); 1897 *bp = *obp; 1898 } 1899 1900 zgd->zgd_db = db; 1901 zgd->zgd_bp = bp; 1902 1903 ASSERT(db->db_offset == offset); 1904 ASSERT(db->db_size == size); 1905 1906 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1907 ztest_get_done, zgd); 1908 1909 if (error == 0) 1910 return (0); 1911 } 1912 } 1913 1914 ztest_get_done(zgd, error); 1915 1916 return (error); 1917} 1918 1919static void * 1920ztest_lr_alloc(size_t lrsize, char *name) 1921{ 1922 char *lr; 1923 size_t namesize = name ? strlen(name) + 1 : 0; 1924 1925 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1926 1927 if (name) 1928 bcopy(name, lr + lrsize, namesize); 1929 1930 return (lr); 1931} 1932 1933void 1934ztest_lr_free(void *lr, size_t lrsize, char *name) 1935{ 1936 size_t namesize = name ? strlen(name) + 1 : 0; 1937 1938 umem_free(lr, lrsize + namesize); 1939} 1940 1941/* 1942 * Lookup a bunch of objects. Returns the number of objects not found. 1943 */ 1944static int 1945ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1946{ 1947 int missing = 0; 1948 int error; 1949 1950 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1951 1952 for (int i = 0; i < count; i++, od++) { 1953 od->od_object = 0; 1954 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1955 sizeof (uint64_t), 1, &od->od_object); 1956 if (error) { 1957 ASSERT(error == ENOENT); 1958 ASSERT(od->od_object == 0); 1959 missing++; 1960 } else { 1961 dmu_buf_t *db; 1962 ztest_block_tag_t *bbt; 1963 dmu_object_info_t doi; 1964 1965 ASSERT(od->od_object != 0); 1966 ASSERT(missing == 0); /* there should be no gaps */ 1967 1968 ztest_object_lock(zd, od->od_object, RL_READER); 1969 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1970 od->od_object, FTAG, &db)); 1971 dmu_object_info_from_db(db, &doi); 1972 bbt = ztest_bt_bonus(db); 1973 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1974 od->od_type = doi.doi_type; 1975 od->od_blocksize = doi.doi_data_block_size; 1976 od->od_gen = bbt->bt_gen; 1977 dmu_buf_rele(db, FTAG); 1978 ztest_object_unlock(zd, od->od_object); 1979 } 1980 } 1981 1982 return (missing); 1983} 1984 1985static int 1986ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1987{ 1988 int missing = 0; 1989 1990 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1991 1992 for (int i = 0; i < count; i++, od++) { 1993 if (missing) { 1994 od->od_object = 0; 1995 missing++; 1996 continue; 1997 } 1998 1999 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2000 2001 lr->lr_doid = od->od_dir; 2002 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 2003 lr->lrz_type = od->od_crtype; 2004 lr->lrz_blocksize = od->od_crblocksize; 2005 lr->lrz_ibshift = ztest_random_ibshift(); 2006 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 2007 lr->lrz_bonuslen = dmu_bonus_max(); 2008 lr->lr_gen = od->od_crgen; 2009 lr->lr_crtime[0] = time(NULL); 2010 2011 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 2012 ASSERT(missing == 0); 2013 od->od_object = 0; 2014 missing++; 2015 } else { 2016 od->od_object = lr->lr_foid; 2017 od->od_type = od->od_crtype; 2018 od->od_blocksize = od->od_crblocksize; 2019 od->od_gen = od->od_crgen; 2020 ASSERT(od->od_object != 0); 2021 } 2022 2023 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2024 } 2025 2026 return (missing); 2027} 2028 2029static int 2030ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2031{ 2032 int missing = 0; 2033 int error; 2034 2035 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 2036 2037 od += count - 1; 2038 2039 for (int i = count - 1; i >= 0; i--, od--) { 2040 if (missing) { 2041 missing++; 2042 continue; 2043 } 2044 2045 /* 2046 * No object was found. 2047 */ 2048 if (od->od_object == 0) 2049 continue; 2050 2051 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2052 2053 lr->lr_doid = od->od_dir; 2054 2055 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2056 ASSERT3U(error, ==, ENOSPC); 2057 missing++; 2058 } else { 2059 od->od_object = 0; 2060 } 2061 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2062 } 2063 2064 return (missing); 2065} 2066 2067static int 2068ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2069 void *data) 2070{ 2071 lr_write_t *lr; 2072 int error; 2073 2074 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2075 2076 lr->lr_foid = object; 2077 lr->lr_offset = offset; 2078 lr->lr_length = size; 2079 lr->lr_blkoff = 0; 2080 BP_ZERO(&lr->lr_blkptr); 2081 2082 bcopy(data, lr + 1, size); 2083 2084 error = ztest_replay_write(zd, lr, B_FALSE); 2085 2086 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2087 2088 return (error); 2089} 2090 2091static int 2092ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2093{ 2094 lr_truncate_t *lr; 2095 int error; 2096 2097 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2098 2099 lr->lr_foid = object; 2100 lr->lr_offset = offset; 2101 lr->lr_length = size; 2102 2103 error = ztest_replay_truncate(zd, lr, B_FALSE); 2104 2105 ztest_lr_free(lr, sizeof (*lr), NULL); 2106 2107 return (error); 2108} 2109 2110static int 2111ztest_setattr(ztest_ds_t *zd, uint64_t object) 2112{ 2113 lr_setattr_t *lr; 2114 int error; 2115 2116 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2117 2118 lr->lr_foid = object; 2119 lr->lr_size = 0; 2120 lr->lr_mode = 0; 2121 2122 error = ztest_replay_setattr(zd, lr, B_FALSE); 2123 2124 ztest_lr_free(lr, sizeof (*lr), NULL); 2125 2126 return (error); 2127} 2128 2129static void 2130ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2131{ 2132 objset_t *os = zd->zd_os; 2133 dmu_tx_t *tx; 2134 uint64_t txg; 2135 rl_t *rl; 2136 2137 txg_wait_synced(dmu_objset_pool(os), 0); 2138 2139 ztest_object_lock(zd, object, RL_READER); 2140 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2141 2142 tx = dmu_tx_create(os); 2143 2144 dmu_tx_hold_write(tx, object, offset, size); 2145 2146 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2147 2148 if (txg != 0) { 2149 dmu_prealloc(os, object, offset, size, tx); 2150 dmu_tx_commit(tx); 2151 txg_wait_synced(dmu_objset_pool(os), txg); 2152 } else { 2153 (void) dmu_free_long_range(os, object, offset, size); 2154 } 2155 2156 ztest_range_unlock(rl); 2157 ztest_object_unlock(zd, object); 2158} 2159 2160static void 2161ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2162{ 2163 int err; 2164 ztest_block_tag_t wbt; 2165 dmu_object_info_t doi; 2166 enum ztest_io_type io_type; 2167 uint64_t blocksize; 2168 void *data; 2169 2170 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2171 blocksize = doi.doi_data_block_size; 2172 data = umem_alloc(blocksize, UMEM_NOFAIL); 2173 2174 /* 2175 * Pick an i/o type at random, biased toward writing block tags. 2176 */ 2177 io_type = ztest_random(ZTEST_IO_TYPES); 2178 if (ztest_random(2) == 0) 2179 io_type = ZTEST_IO_WRITE_TAG; 2180 2181 (void) rw_rdlock(&zd->zd_zilog_lock); 2182 2183 switch (io_type) { 2184 2185 case ZTEST_IO_WRITE_TAG: 2186 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2187 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2188 break; 2189 2190 case ZTEST_IO_WRITE_PATTERN: 2191 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2192 if (ztest_random(2) == 0) { 2193 /* 2194 * Induce fletcher2 collisions to ensure that 2195 * zio_ddt_collision() detects and resolves them 2196 * when using fletcher2-verify for deduplication. 2197 */ 2198 ((uint64_t *)data)[0] ^= 1ULL << 63; 2199 ((uint64_t *)data)[4] ^= 1ULL << 63; 2200 } 2201 (void) ztest_write(zd, object, offset, blocksize, data); 2202 break; 2203 2204 case ZTEST_IO_WRITE_ZEROES: 2205 bzero(data, blocksize); 2206 (void) ztest_write(zd, object, offset, blocksize, data); 2207 break; 2208 2209 case ZTEST_IO_TRUNCATE: 2210 (void) ztest_truncate(zd, object, offset, blocksize); 2211 break; 2212 2213 case ZTEST_IO_SETATTR: 2214 (void) ztest_setattr(zd, object); 2215 break; 2216 2217 case ZTEST_IO_REWRITE: 2218 (void) rw_rdlock(&ztest_name_lock); 2219 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2220 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2221 B_FALSE); 2222 VERIFY(err == 0 || err == ENOSPC); 2223 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2224 ZFS_PROP_COMPRESSION, 2225 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2226 B_FALSE); 2227 VERIFY(err == 0 || err == ENOSPC); 2228 (void) rw_unlock(&ztest_name_lock); 2229 2230 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2231 DMU_READ_NO_PREFETCH)); 2232 2233 (void) ztest_write(zd, object, offset, blocksize, data); 2234 break; 2235 } 2236 2237 (void) rw_unlock(&zd->zd_zilog_lock); 2238 2239 umem_free(data, blocksize); 2240} 2241 2242/* 2243 * Initialize an object description template. 2244 */ 2245static void 2246ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2247 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2248{ 2249 od->od_dir = ZTEST_DIROBJ; 2250 od->od_object = 0; 2251 2252 od->od_crtype = type; 2253 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2254 od->od_crgen = gen; 2255 2256 od->od_type = DMU_OT_NONE; 2257 od->od_blocksize = 0; 2258 od->od_gen = 0; 2259 2260 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2261 tag, (int64_t)id, index); 2262} 2263 2264/* 2265 * Lookup or create the objects for a test using the od template. 2266 * If the objects do not all exist, or if 'remove' is specified, 2267 * remove any existing objects and create new ones. Otherwise, 2268 * use the existing objects. 2269 */ 2270static int 2271ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2272{ 2273 int count = size / sizeof (*od); 2274 int rv = 0; 2275 2276 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2277 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2278 (ztest_remove(zd, od, count) != 0 || 2279 ztest_create(zd, od, count) != 0)) 2280 rv = -1; 2281 zd->zd_od = od; 2282 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2283 2284 return (rv); 2285} 2286 2287/* ARGSUSED */ 2288void 2289ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2290{ 2291 zilog_t *zilog = zd->zd_zilog; 2292 2293 (void) rw_rdlock(&zd->zd_zilog_lock); 2294 2295 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2296 2297 /* 2298 * Remember the committed values in zd, which is in parent/child 2299 * shared memory. If we die, the next iteration of ztest_run() 2300 * will verify that the log really does contain this record. 2301 */ 2302 mutex_enter(&zilog->zl_lock); 2303 ASSERT(zd->zd_shared != NULL); 2304 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2305 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2306 mutex_exit(&zilog->zl_lock); 2307 2308 (void) rw_unlock(&zd->zd_zilog_lock); 2309} 2310 2311/* 2312 * This function is designed to simulate the operations that occur during a 2313 * mount/unmount operation. We hold the dataset across these operations in an 2314 * attempt to expose any implicit assumptions about ZIL management. 2315 */ 2316/* ARGSUSED */ 2317void 2318ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2319{ 2320 objset_t *os = zd->zd_os; 2321 2322 /* 2323 * We grab the zd_dirobj_lock to ensure that no other thread is 2324 * updating the zil (i.e. adding in-memory log records) and the 2325 * zd_zilog_lock to block any I/O. 2326 */ 2327 VERIFY0(mutex_lock(&zd->zd_dirobj_lock)); 2328 (void) rw_wrlock(&zd->zd_zilog_lock); 2329 2330 /* zfsvfs_teardown() */ 2331 zil_close(zd->zd_zilog); 2332 2333 /* zfsvfs_setup() */ 2334 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2335 zil_replay(os, zd, ztest_replay_vector); 2336 2337 (void) rw_unlock(&zd->zd_zilog_lock); 2338 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2339} 2340 2341/* 2342 * Verify that we can't destroy an active pool, create an existing pool, 2343 * or create a pool with a bad vdev spec. 2344 */ 2345/* ARGSUSED */ 2346void 2347ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2348{ 2349 ztest_shared_opts_t *zo = &ztest_opts; 2350 spa_t *spa; 2351 nvlist_t *nvroot; 2352 2353 /* 2354 * Attempt to create using a bad file. 2355 */ 2356 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2357 VERIFY3U(ENOENT, ==, 2358 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2359 nvlist_free(nvroot); 2360 2361 /* 2362 * Attempt to create using a bad mirror. 2363 */ 2364 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2365 VERIFY3U(ENOENT, ==, 2366 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2367 nvlist_free(nvroot); 2368 2369 /* 2370 * Attempt to create an existing pool. It shouldn't matter 2371 * what's in the nvroot; we should fail with EEXIST. 2372 */ 2373 (void) rw_rdlock(&ztest_name_lock); 2374 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2375 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2376 nvlist_free(nvroot); 2377 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2378 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2379 spa_close(spa, FTAG); 2380 2381 (void) rw_unlock(&ztest_name_lock); 2382} 2383 2384/* ARGSUSED */ 2385void 2386ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2387{ 2388 spa_t *spa; 2389 uint64_t initial_version = SPA_VERSION_INITIAL; 2390 uint64_t version, newversion; 2391 nvlist_t *nvroot, *props; 2392 char *name; 2393 2394 VERIFY0(mutex_lock(&ztest_vdev_lock)); 2395 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2396 2397 /* 2398 * Clean up from previous runs. 2399 */ 2400 (void) spa_destroy(name); 2401 2402 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2403 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2404 2405 /* 2406 * If we're configuring a RAIDZ device then make sure that the 2407 * the initial version is capable of supporting that feature. 2408 */ 2409 switch (ztest_opts.zo_raidz_parity) { 2410 case 0: 2411 case 1: 2412 initial_version = SPA_VERSION_INITIAL; 2413 break; 2414 case 2: 2415 initial_version = SPA_VERSION_RAIDZ2; 2416 break; 2417 case 3: 2418 initial_version = SPA_VERSION_RAIDZ3; 2419 break; 2420 } 2421 2422 /* 2423 * Create a pool with a spa version that can be upgraded. Pick 2424 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2425 */ 2426 do { 2427 version = ztest_random_spa_version(initial_version); 2428 } while (version > SPA_VERSION_BEFORE_FEATURES); 2429 2430 props = fnvlist_alloc(); 2431 fnvlist_add_uint64(props, 2432 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2433 VERIFY0(spa_create(name, nvroot, props, NULL)); 2434 fnvlist_free(nvroot); 2435 fnvlist_free(props); 2436 2437 VERIFY0(spa_open(name, &spa, FTAG)); 2438 VERIFY3U(spa_version(spa), ==, version); 2439 newversion = ztest_random_spa_version(version + 1); 2440 2441 if (ztest_opts.zo_verbose >= 4) { 2442 (void) printf("upgrading spa version from %llu to %llu\n", 2443 (u_longlong_t)version, (u_longlong_t)newversion); 2444 } 2445 2446 spa_upgrade(spa, newversion); 2447 VERIFY3U(spa_version(spa), >, version); 2448 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2449 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2450 spa_close(spa, FTAG); 2451 2452 strfree(name); 2453 VERIFY0(mutex_unlock(&ztest_vdev_lock)); 2454} 2455 2456static vdev_t * 2457vdev_lookup_by_path(vdev_t *vd, const char *path) 2458{ 2459 vdev_t *mvd; 2460 2461 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2462 return (vd); 2463 2464 for (int c = 0; c < vd->vdev_children; c++) 2465 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2466 NULL) 2467 return (mvd); 2468 2469 return (NULL); 2470} 2471 2472/* 2473 * Find the first available hole which can be used as a top-level. 2474 */ 2475int 2476find_vdev_hole(spa_t *spa) 2477{ 2478 vdev_t *rvd = spa->spa_root_vdev; 2479 int c; 2480 2481 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2482 2483 for (c = 0; c < rvd->vdev_children; c++) { 2484 vdev_t *cvd = rvd->vdev_child[c]; 2485 2486 if (cvd->vdev_ishole) 2487 break; 2488 } 2489 return (c); 2490} 2491 2492/* 2493 * Verify that vdev_add() works as expected. 2494 */ 2495/* ARGSUSED */ 2496void 2497ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2498{ 2499 ztest_shared_t *zs = ztest_shared; 2500 spa_t *spa = ztest_spa; 2501 uint64_t leaves; 2502 uint64_t guid; 2503 nvlist_t *nvroot; 2504 int error; 2505 2506 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2507 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2508 2509 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2510 2511 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2512 2513 /* 2514 * If we have slogs then remove them 1/4 of the time. 2515 */ 2516 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2517 /* 2518 * Grab the guid from the head of the log class rotor. 2519 */ 2520 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2521 2522 spa_config_exit(spa, SCL_VDEV, FTAG); 2523 2524 /* 2525 * We have to grab the zs_name_lock as writer to 2526 * prevent a race between removing a slog (dmu_objset_find) 2527 * and destroying a dataset. Removing the slog will 2528 * grab a reference on the dataset which may cause 2529 * dmu_objset_destroy() to fail with EBUSY thus 2530 * leaving the dataset in an inconsistent state. 2531 */ 2532 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2533 error = spa_vdev_remove(spa, guid, B_FALSE); 2534 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2535 2536 if (error && error != EEXIST) 2537 fatal(0, "spa_vdev_remove() = %d", error); 2538 } else { 2539 spa_config_exit(spa, SCL_VDEV, FTAG); 2540 2541 /* 2542 * Make 1/4 of the devices be log devices. 2543 */ 2544 nvroot = make_vdev_root(NULL, NULL, NULL, 2545 ztest_opts.zo_vdev_size, 0, 2546 ztest_random(4) == 0, ztest_opts.zo_raidz, 2547 zs->zs_mirrors, 1); 2548 2549 error = spa_vdev_add(spa, nvroot); 2550 nvlist_free(nvroot); 2551 2552 if (error == ENOSPC) 2553 ztest_record_enospc("spa_vdev_add"); 2554 else if (error != 0) 2555 fatal(0, "spa_vdev_add() = %d", error); 2556 } 2557 2558 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2559} 2560 2561/* 2562 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2563 */ 2564/* ARGSUSED */ 2565void 2566ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2567{ 2568 ztest_shared_t *zs = ztest_shared; 2569 spa_t *spa = ztest_spa; 2570 vdev_t *rvd = spa->spa_root_vdev; 2571 spa_aux_vdev_t *sav; 2572 char *aux; 2573 uint64_t guid = 0; 2574 int error; 2575 2576 if (ztest_random(2) == 0) { 2577 sav = &spa->spa_spares; 2578 aux = ZPOOL_CONFIG_SPARES; 2579 } else { 2580 sav = &spa->spa_l2cache; 2581 aux = ZPOOL_CONFIG_L2CACHE; 2582 } 2583 2584 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2585 2586 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2587 2588 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2589 /* 2590 * Pick a random device to remove. 2591 */ 2592 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2593 } else { 2594 /* 2595 * Find an unused device we can add. 2596 */ 2597 zs->zs_vdev_aux = 0; 2598 for (;;) { 2599 char path[MAXPATHLEN]; 2600 int c; 2601 (void) snprintf(path, sizeof (path), ztest_aux_template, 2602 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2603 zs->zs_vdev_aux); 2604 for (c = 0; c < sav->sav_count; c++) 2605 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2606 path) == 0) 2607 break; 2608 if (c == sav->sav_count && 2609 vdev_lookup_by_path(rvd, path) == NULL) 2610 break; 2611 zs->zs_vdev_aux++; 2612 } 2613 } 2614 2615 spa_config_exit(spa, SCL_VDEV, FTAG); 2616 2617 if (guid == 0) { 2618 /* 2619 * Add a new device. 2620 */ 2621 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2622 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2623 error = spa_vdev_add(spa, nvroot); 2624 if (error != 0) 2625 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2626 nvlist_free(nvroot); 2627 } else { 2628 /* 2629 * Remove an existing device. Sometimes, dirty its 2630 * vdev state first to make sure we handle removal 2631 * of devices that have pending state changes. 2632 */ 2633 if (ztest_random(2) == 0) 2634 (void) vdev_online(spa, guid, 0, NULL); 2635 2636 error = spa_vdev_remove(spa, guid, B_FALSE); 2637 if (error != 0 && error != EBUSY) 2638 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2639 } 2640 2641 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2642} 2643 2644/* 2645 * split a pool if it has mirror tlvdevs 2646 */ 2647/* ARGSUSED */ 2648void 2649ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2650{ 2651 ztest_shared_t *zs = ztest_shared; 2652 spa_t *spa = ztest_spa; 2653 vdev_t *rvd = spa->spa_root_vdev; 2654 nvlist_t *tree, **child, *config, *split, **schild; 2655 uint_t c, children, schildren = 0, lastlogid = 0; 2656 int error = 0; 2657 2658 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2659 2660 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2661 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2662 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2663 return; 2664 } 2665 2666 /* clean up the old pool, if any */ 2667 (void) spa_destroy("splitp"); 2668 2669 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2670 2671 /* generate a config from the existing config */ 2672 mutex_enter(&spa->spa_props_lock); 2673 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2674 &tree) == 0); 2675 mutex_exit(&spa->spa_props_lock); 2676 2677 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2678 &children) == 0); 2679 2680 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2681 for (c = 0; c < children; c++) { 2682 vdev_t *tvd = rvd->vdev_child[c]; 2683 nvlist_t **mchild; 2684 uint_t mchildren; 2685 2686 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2687 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2688 0) == 0); 2689 VERIFY(nvlist_add_string(schild[schildren], 2690 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2691 VERIFY(nvlist_add_uint64(schild[schildren], 2692 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2693 if (lastlogid == 0) 2694 lastlogid = schildren; 2695 ++schildren; 2696 continue; 2697 } 2698 lastlogid = 0; 2699 VERIFY(nvlist_lookup_nvlist_array(child[c], 2700 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2701 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2702 } 2703 2704 /* OK, create a config that can be used to split */ 2705 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2706 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2707 VDEV_TYPE_ROOT) == 0); 2708 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2709 lastlogid != 0 ? lastlogid : schildren) == 0); 2710 2711 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2712 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2713 2714 for (c = 0; c < schildren; c++) 2715 nvlist_free(schild[c]); 2716 free(schild); 2717 nvlist_free(split); 2718 2719 spa_config_exit(spa, SCL_VDEV, FTAG); 2720 2721 (void) rw_wrlock(&ztest_name_lock); 2722 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2723 (void) rw_unlock(&ztest_name_lock); 2724 2725 nvlist_free(config); 2726 2727 if (error == 0) { 2728 (void) printf("successful split - results:\n"); 2729 mutex_enter(&spa_namespace_lock); 2730 show_pool_stats(spa); 2731 show_pool_stats(spa_lookup("splitp")); 2732 mutex_exit(&spa_namespace_lock); 2733 ++zs->zs_splits; 2734 --zs->zs_mirrors; 2735 } 2736 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2737 2738} 2739 2740/* 2741 * Verify that we can attach and detach devices. 2742 */ 2743/* ARGSUSED */ 2744void 2745ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2746{ 2747 ztest_shared_t *zs = ztest_shared; 2748 spa_t *spa = ztest_spa; 2749 spa_aux_vdev_t *sav = &spa->spa_spares; 2750 vdev_t *rvd = spa->spa_root_vdev; 2751 vdev_t *oldvd, *newvd, *pvd; 2752 nvlist_t *root; 2753 uint64_t leaves; 2754 uint64_t leaf, top; 2755 uint64_t ashift = ztest_get_ashift(); 2756 uint64_t oldguid, pguid; 2757 uint64_t oldsize, newsize; 2758 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2759 int replacing; 2760 int oldvd_has_siblings = B_FALSE; 2761 int newvd_is_spare = B_FALSE; 2762 int oldvd_is_log; 2763 int error, expected_error; 2764 2765 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2766 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2767 2768 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2769 2770 /* 2771 * Decide whether to do an attach or a replace. 2772 */ 2773 replacing = ztest_random(2); 2774 2775 /* 2776 * Pick a random top-level vdev. 2777 */ 2778 top = ztest_random_vdev_top(spa, B_TRUE); 2779 2780 /* 2781 * Pick a random leaf within it. 2782 */ 2783 leaf = ztest_random(leaves); 2784 2785 /* 2786 * Locate this vdev. 2787 */ 2788 oldvd = rvd->vdev_child[top]; 2789 if (zs->zs_mirrors >= 1) { 2790 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2791 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2792 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2793 } 2794 if (ztest_opts.zo_raidz > 1) { 2795 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2796 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2797 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2798 } 2799 2800 /* 2801 * If we're already doing an attach or replace, oldvd may be a 2802 * mirror vdev -- in which case, pick a random child. 2803 */ 2804 while (oldvd->vdev_children != 0) { 2805 oldvd_has_siblings = B_TRUE; 2806 ASSERT(oldvd->vdev_children >= 2); 2807 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2808 } 2809 2810 oldguid = oldvd->vdev_guid; 2811 oldsize = vdev_get_min_asize(oldvd); 2812 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2813 (void) strcpy(oldpath, oldvd->vdev_path); 2814 pvd = oldvd->vdev_parent; 2815 pguid = pvd->vdev_guid; 2816 2817 /* 2818 * If oldvd has siblings, then half of the time, detach it. 2819 */ 2820 if (oldvd_has_siblings && ztest_random(2) == 0) { 2821 spa_config_exit(spa, SCL_VDEV, FTAG); 2822 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2823 if (error != 0 && error != ENODEV && error != EBUSY && 2824 error != ENOTSUP) 2825 fatal(0, "detach (%s) returned %d", oldpath, error); 2826 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2827 return; 2828 } 2829 2830 /* 2831 * For the new vdev, choose with equal probability between the two 2832 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2833 */ 2834 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2835 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2836 newvd_is_spare = B_TRUE; 2837 (void) strcpy(newpath, newvd->vdev_path); 2838 } else { 2839 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2840 ztest_opts.zo_dir, ztest_opts.zo_pool, 2841 top * leaves + leaf); 2842 if (ztest_random(2) == 0) 2843 newpath[strlen(newpath) - 1] = 'b'; 2844 newvd = vdev_lookup_by_path(rvd, newpath); 2845 } 2846 2847 if (newvd) { 2848 newsize = vdev_get_min_asize(newvd); 2849 } else { 2850 /* 2851 * Make newsize a little bigger or smaller than oldsize. 2852 * If it's smaller, the attach should fail. 2853 * If it's larger, and we're doing a replace, 2854 * we should get dynamic LUN growth when we're done. 2855 */ 2856 newsize = 10 * oldsize / (9 + ztest_random(3)); 2857 } 2858 2859 /* 2860 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2861 * unless it's a replace; in that case any non-replacing parent is OK. 2862 * 2863 * If newvd is already part of the pool, it should fail with EBUSY. 2864 * 2865 * If newvd is too small, it should fail with EOVERFLOW. 2866 */ 2867 if (pvd->vdev_ops != &vdev_mirror_ops && 2868 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2869 pvd->vdev_ops == &vdev_replacing_ops || 2870 pvd->vdev_ops == &vdev_spare_ops)) 2871 expected_error = ENOTSUP; 2872 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2873 expected_error = ENOTSUP; 2874 else if (newvd == oldvd) 2875 expected_error = replacing ? 0 : EBUSY; 2876 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2877 expected_error = EBUSY; 2878 else if (newsize < oldsize) 2879 expected_error = EOVERFLOW; 2880 else if (ashift > oldvd->vdev_top->vdev_ashift) 2881 expected_error = EDOM; 2882 else 2883 expected_error = 0; 2884 2885 spa_config_exit(spa, SCL_VDEV, FTAG); 2886 2887 /* 2888 * Build the nvlist describing newpath. 2889 */ 2890 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 2891 ashift, 0, 0, 0, 1); 2892 2893 error = spa_vdev_attach(spa, oldguid, root, replacing); 2894 2895 nvlist_free(root); 2896 2897 /* 2898 * If our parent was the replacing vdev, but the replace completed, 2899 * then instead of failing with ENOTSUP we may either succeed, 2900 * fail with ENODEV, or fail with EOVERFLOW. 2901 */ 2902 if (expected_error == ENOTSUP && 2903 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2904 expected_error = error; 2905 2906 /* 2907 * If someone grew the LUN, the replacement may be too small. 2908 */ 2909 if (error == EOVERFLOW || error == EBUSY) 2910 expected_error = error; 2911 2912 /* XXX workaround 6690467 */ 2913 if (error != expected_error && expected_error != EBUSY) { 2914 fatal(0, "attach (%s %llu, %s %llu, %d) " 2915 "returned %d, expected %d", 2916 oldpath, oldsize, newpath, 2917 newsize, replacing, error, expected_error); 2918 } 2919 2920 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2921} 2922 2923/* 2924 * Callback function which expands the physical size of the vdev. 2925 */ 2926vdev_t * 2927grow_vdev(vdev_t *vd, void *arg) 2928{ 2929 spa_t *spa = vd->vdev_spa; 2930 size_t *newsize = arg; 2931 size_t fsize; 2932 int fd; 2933 2934 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2935 ASSERT(vd->vdev_ops->vdev_op_leaf); 2936 2937 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2938 return (vd); 2939 2940 fsize = lseek(fd, 0, SEEK_END); 2941 (void) ftruncate(fd, *newsize); 2942 2943 if (ztest_opts.zo_verbose >= 6) { 2944 (void) printf("%s grew from %lu to %lu bytes\n", 2945 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2946 } 2947 (void) close(fd); 2948 return (NULL); 2949} 2950 2951/* 2952 * Callback function which expands a given vdev by calling vdev_online(). 2953 */ 2954/* ARGSUSED */ 2955vdev_t * 2956online_vdev(vdev_t *vd, void *arg) 2957{ 2958 spa_t *spa = vd->vdev_spa; 2959 vdev_t *tvd = vd->vdev_top; 2960 uint64_t guid = vd->vdev_guid; 2961 uint64_t generation = spa->spa_config_generation + 1; 2962 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2963 int error; 2964 2965 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2966 ASSERT(vd->vdev_ops->vdev_op_leaf); 2967 2968 /* Calling vdev_online will initialize the new metaslabs */ 2969 spa_config_exit(spa, SCL_STATE, spa); 2970 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2971 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2972 2973 /* 2974 * If vdev_online returned an error or the underlying vdev_open 2975 * failed then we abort the expand. The only way to know that 2976 * vdev_open fails is by checking the returned newstate. 2977 */ 2978 if (error || newstate != VDEV_STATE_HEALTHY) { 2979 if (ztest_opts.zo_verbose >= 5) { 2980 (void) printf("Unable to expand vdev, state %llu, " 2981 "error %d\n", (u_longlong_t)newstate, error); 2982 } 2983 return (vd); 2984 } 2985 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2986 2987 /* 2988 * Since we dropped the lock we need to ensure that we're 2989 * still talking to the original vdev. It's possible this 2990 * vdev may have been detached/replaced while we were 2991 * trying to online it. 2992 */ 2993 if (generation != spa->spa_config_generation) { 2994 if (ztest_opts.zo_verbose >= 5) { 2995 (void) printf("vdev configuration has changed, " 2996 "guid %llu, state %llu, expected gen %llu, " 2997 "got gen %llu\n", 2998 (u_longlong_t)guid, 2999 (u_longlong_t)tvd->vdev_state, 3000 (u_longlong_t)generation, 3001 (u_longlong_t)spa->spa_config_generation); 3002 } 3003 return (vd); 3004 } 3005 return (NULL); 3006} 3007 3008/* 3009 * Traverse the vdev tree calling the supplied function. 3010 * We continue to walk the tree until we either have walked all 3011 * children or we receive a non-NULL return from the callback. 3012 * If a NULL callback is passed, then we just return back the first 3013 * leaf vdev we encounter. 3014 */ 3015vdev_t * 3016vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 3017{ 3018 if (vd->vdev_ops->vdev_op_leaf) { 3019 if (func == NULL) 3020 return (vd); 3021 else 3022 return (func(vd, arg)); 3023 } 3024 3025 for (uint_t c = 0; c < vd->vdev_children; c++) { 3026 vdev_t *cvd = vd->vdev_child[c]; 3027 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3028 return (cvd); 3029 } 3030 return (NULL); 3031} 3032 3033/* 3034 * Verify that dynamic LUN growth works as expected. 3035 */ 3036/* ARGSUSED */ 3037void 3038ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3039{ 3040 spa_t *spa = ztest_spa; 3041 vdev_t *vd, *tvd; 3042 metaslab_class_t *mc; 3043 metaslab_group_t *mg; 3044 size_t psize, newsize; 3045 uint64_t top; 3046 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3047 3048 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 3049 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3050 3051 top = ztest_random_vdev_top(spa, B_TRUE); 3052 3053 tvd = spa->spa_root_vdev->vdev_child[top]; 3054 mg = tvd->vdev_mg; 3055 mc = mg->mg_class; 3056 old_ms_count = tvd->vdev_ms_count; 3057 old_class_space = metaslab_class_get_space(mc); 3058 3059 /* 3060 * Determine the size of the first leaf vdev associated with 3061 * our top-level device. 3062 */ 3063 vd = vdev_walk_tree(tvd, NULL, NULL); 3064 ASSERT3P(vd, !=, NULL); 3065 ASSERT(vd->vdev_ops->vdev_op_leaf); 3066 3067 psize = vd->vdev_psize; 3068 3069 /* 3070 * We only try to expand the vdev if it's healthy, less than 4x its 3071 * original size, and it has a valid psize. 3072 */ 3073 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3074 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3075 spa_config_exit(spa, SCL_STATE, spa); 3076 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3077 return; 3078 } 3079 ASSERT(psize > 0); 3080 newsize = psize + psize / 8; 3081 ASSERT3U(newsize, >, psize); 3082 3083 if (ztest_opts.zo_verbose >= 6) { 3084 (void) printf("Expanding LUN %s from %lu to %lu\n", 3085 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3086 } 3087 3088 /* 3089 * Growing the vdev is a two step process: 3090 * 1). expand the physical size (i.e. relabel) 3091 * 2). online the vdev to create the new metaslabs 3092 */ 3093 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3094 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3095 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3096 if (ztest_opts.zo_verbose >= 5) { 3097 (void) printf("Could not expand LUN because " 3098 "the vdev configuration changed.\n"); 3099 } 3100 spa_config_exit(spa, SCL_STATE, spa); 3101 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3102 return; 3103 } 3104 3105 spa_config_exit(spa, SCL_STATE, spa); 3106 3107 /* 3108 * Expanding the LUN will update the config asynchronously, 3109 * thus we must wait for the async thread to complete any 3110 * pending tasks before proceeding. 3111 */ 3112 for (;;) { 3113 boolean_t done; 3114 mutex_enter(&spa->spa_async_lock); 3115 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3116 mutex_exit(&spa->spa_async_lock); 3117 if (done) 3118 break; 3119 txg_wait_synced(spa_get_dsl(spa), 0); 3120 (void) poll(NULL, 0, 100); 3121 } 3122 3123 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3124 3125 tvd = spa->spa_root_vdev->vdev_child[top]; 3126 new_ms_count = tvd->vdev_ms_count; 3127 new_class_space = metaslab_class_get_space(mc); 3128 3129 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3130 if (ztest_opts.zo_verbose >= 5) { 3131 (void) printf("Could not verify LUN expansion due to " 3132 "intervening vdev offline or remove.\n"); 3133 } 3134 spa_config_exit(spa, SCL_STATE, spa); 3135 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3136 return; 3137 } 3138 3139 /* 3140 * Make sure we were able to grow the vdev. 3141 */ 3142 if (new_ms_count <= old_ms_count) 3143 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 3144 old_ms_count, new_ms_count); 3145 3146 /* 3147 * Make sure we were able to grow the pool. 3148 */ 3149 if (new_class_space <= old_class_space) 3150 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 3151 old_class_space, new_class_space); 3152 3153 if (ztest_opts.zo_verbose >= 5) { 3154 char oldnumbuf[6], newnumbuf[6]; 3155 3156 nicenum(old_class_space, oldnumbuf); 3157 nicenum(new_class_space, newnumbuf); 3158 (void) printf("%s grew from %s to %s\n", 3159 spa->spa_name, oldnumbuf, newnumbuf); 3160 } 3161 3162 spa_config_exit(spa, SCL_STATE, spa); 3163 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3164} 3165 3166/* 3167 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3168 */ 3169/* ARGSUSED */ 3170static void 3171ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3172{ 3173 /* 3174 * Create the objects common to all ztest datasets. 3175 */ 3176 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3177 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3178} 3179 3180static int 3181ztest_dataset_create(char *dsname) 3182{ 3183 uint64_t zilset = ztest_random(100); 3184 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3185 ztest_objset_create_cb, NULL); 3186 3187 if (err || zilset < 80) 3188 return (err); 3189 3190 if (ztest_opts.zo_verbose >= 6) 3191 (void) printf("Setting dataset %s to sync always\n", dsname); 3192 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3193 ZFS_SYNC_ALWAYS, B_FALSE)); 3194} 3195 3196/* ARGSUSED */ 3197static int 3198ztest_objset_destroy_cb(const char *name, void *arg) 3199{ 3200 objset_t *os; 3201 dmu_object_info_t doi; 3202 int error; 3203 3204 /* 3205 * Verify that the dataset contains a directory object. 3206 */ 3207 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3208 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3209 if (error != ENOENT) { 3210 /* We could have crashed in the middle of destroying it */ 3211 ASSERT0(error); 3212 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3213 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3214 } 3215 dmu_objset_disown(os, FTAG); 3216 3217 /* 3218 * Destroy the dataset. 3219 */ 3220 if (strchr(name, '@') != NULL) { 3221 VERIFY0(dsl_destroy_snapshot(name, B_FALSE)); 3222 } else { 3223 VERIFY0(dsl_destroy_head(name)); 3224 } 3225 return (0); 3226} 3227 3228static boolean_t 3229ztest_snapshot_create(char *osname, uint64_t id) 3230{ 3231 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 3232 int error; 3233 3234 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3235 3236 error = dmu_objset_snapshot_one(osname, snapname); 3237 if (error == ENOSPC) { 3238 ztest_record_enospc(FTAG); 3239 return (B_FALSE); 3240 } 3241 if (error != 0 && error != EEXIST) { 3242 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3243 snapname, error); 3244 } 3245 return (B_TRUE); 3246} 3247 3248static boolean_t 3249ztest_snapshot_destroy(char *osname, uint64_t id) 3250{ 3251 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 3252 int error; 3253 3254 (void) snprintf(snapname, sizeof (snapname), "%s@%llu", osname, 3255 (u_longlong_t)id); 3256 3257 error = dsl_destroy_snapshot(snapname, B_FALSE); 3258 if (error != 0 && error != ENOENT) 3259 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3260 return (B_TRUE); 3261} 3262 3263/* ARGSUSED */ 3264void 3265ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3266{ 3267 ztest_ds_t zdtmp; 3268 int iters; 3269 int error; 3270 objset_t *os, *os2; 3271 char name[ZFS_MAX_DATASET_NAME_LEN]; 3272 zilog_t *zilog; 3273 3274 (void) rw_rdlock(&ztest_name_lock); 3275 3276 (void) snprintf(name, sizeof (name), "%s/temp_%llu", 3277 ztest_opts.zo_pool, (u_longlong_t)id); 3278 3279 /* 3280 * If this dataset exists from a previous run, process its replay log 3281 * half of the time. If we don't replay it, then dmu_objset_destroy() 3282 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3283 */ 3284 if (ztest_random(2) == 0 && 3285 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3286 ztest_zd_init(&zdtmp, NULL, os); 3287 zil_replay(os, &zdtmp, ztest_replay_vector); 3288 ztest_zd_fini(&zdtmp); 3289 dmu_objset_disown(os, FTAG); 3290 } 3291 3292 /* 3293 * There may be an old instance of the dataset we're about to 3294 * create lying around from a previous run. If so, destroy it 3295 * and all of its snapshots. 3296 */ 3297 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3298 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3299 3300 /* 3301 * Verify that the destroyed dataset is no longer in the namespace. 3302 */ 3303 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3304 FTAG, &os)); 3305 3306 /* 3307 * Verify that we can create a new dataset. 3308 */ 3309 error = ztest_dataset_create(name); 3310 if (error) { 3311 if (error == ENOSPC) { 3312 ztest_record_enospc(FTAG); 3313 (void) rw_unlock(&ztest_name_lock); 3314 return; 3315 } 3316 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3317 } 3318 3319 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3320 3321 ztest_zd_init(&zdtmp, NULL, os); 3322 3323 /* 3324 * Open the intent log for it. 3325 */ 3326 zilog = zil_open(os, ztest_get_data); 3327 3328 /* 3329 * Put some objects in there, do a little I/O to them, 3330 * and randomly take a couple of snapshots along the way. 3331 */ 3332 iters = ztest_random(5); 3333 for (int i = 0; i < iters; i++) { 3334 ztest_dmu_object_alloc_free(&zdtmp, id); 3335 if (ztest_random(iters) == 0) 3336 (void) ztest_snapshot_create(name, i); 3337 } 3338 3339 /* 3340 * Verify that we cannot create an existing dataset. 3341 */ 3342 VERIFY3U(EEXIST, ==, 3343 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3344 3345 /* 3346 * Verify that we can hold an objset that is also owned. 3347 */ 3348 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3349 dmu_objset_rele(os2, FTAG); 3350 3351 /* 3352 * Verify that we cannot own an objset that is already owned. 3353 */ 3354 VERIFY3U(EBUSY, ==, 3355 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3356 3357 zil_close(zilog); 3358 dmu_objset_disown(os, FTAG); 3359 ztest_zd_fini(&zdtmp); 3360 3361 (void) rw_unlock(&ztest_name_lock); 3362} 3363 3364/* 3365 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3366 */ 3367void 3368ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3369{ 3370 (void) rw_rdlock(&ztest_name_lock); 3371 (void) ztest_snapshot_destroy(zd->zd_name, id); 3372 (void) ztest_snapshot_create(zd->zd_name, id); 3373 (void) rw_unlock(&ztest_name_lock); 3374} 3375 3376/* 3377 * Cleanup non-standard snapshots and clones. 3378 */ 3379void 3380ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3381{ 3382 char snap1name[ZFS_MAX_DATASET_NAME_LEN]; 3383 char clone1name[ZFS_MAX_DATASET_NAME_LEN]; 3384 char snap2name[ZFS_MAX_DATASET_NAME_LEN]; 3385 char clone2name[ZFS_MAX_DATASET_NAME_LEN]; 3386 char snap3name[ZFS_MAX_DATASET_NAME_LEN]; 3387 int error; 3388 3389 (void) snprintf(snap1name, sizeof (snap1name), 3390 "%s@s1_%llu", osname, id); 3391 (void) snprintf(clone1name, sizeof (clone1name), 3392 "%s/c1_%llu", osname, id); 3393 (void) snprintf(snap2name, sizeof (snap2name), 3394 "%s@s2_%llu", clone1name, id); 3395 (void) snprintf(clone2name, sizeof (clone2name), 3396 "%s/c2_%llu", osname, id); 3397 (void) snprintf(snap3name, sizeof (snap3name), 3398 "%s@s3_%llu", clone1name, id); 3399 3400 error = dsl_destroy_head(clone2name); 3401 if (error && error != ENOENT) 3402 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3403 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3404 if (error && error != ENOENT) 3405 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3406 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3407 if (error && error != ENOENT) 3408 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3409 error = dsl_destroy_head(clone1name); 3410 if (error && error != ENOENT) 3411 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3412 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3413 if (error && error != ENOENT) 3414 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3415} 3416 3417/* 3418 * Verify dsl_dataset_promote handles EBUSY 3419 */ 3420void 3421ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3422{ 3423 objset_t *os; 3424 char snap1name[ZFS_MAX_DATASET_NAME_LEN]; 3425 char clone1name[ZFS_MAX_DATASET_NAME_LEN]; 3426 char snap2name[ZFS_MAX_DATASET_NAME_LEN]; 3427 char clone2name[ZFS_MAX_DATASET_NAME_LEN]; 3428 char snap3name[ZFS_MAX_DATASET_NAME_LEN]; 3429 char *osname = zd->zd_name; 3430 int error; 3431 3432 (void) rw_rdlock(&ztest_name_lock); 3433 3434 ztest_dsl_dataset_cleanup(osname, id); 3435 3436 (void) snprintf(snap1name, sizeof (snap1name), 3437 "%s@s1_%llu", osname, id); 3438 (void) snprintf(clone1name, sizeof (clone1name), 3439 "%s/c1_%llu", osname, id); 3440 (void) snprintf(snap2name, sizeof (snap2name), 3441 "%s@s2_%llu", clone1name, id); 3442 (void) snprintf(clone2name, sizeof (clone2name), 3443 "%s/c2_%llu", osname, id); 3444 (void) snprintf(snap3name, sizeof (snap3name), 3445 "%s@s3_%llu", clone1name, id); 3446 3447 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3448 if (error && error != EEXIST) { 3449 if (error == ENOSPC) { 3450 ztest_record_enospc(FTAG); 3451 goto out; 3452 } 3453 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3454 } 3455 3456 error = dmu_objset_clone(clone1name, snap1name); 3457 if (error) { 3458 if (error == ENOSPC) { 3459 ztest_record_enospc(FTAG); 3460 goto out; 3461 } 3462 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3463 } 3464 3465 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3466 if (error && error != EEXIST) { 3467 if (error == ENOSPC) { 3468 ztest_record_enospc(FTAG); 3469 goto out; 3470 } 3471 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3472 } 3473 3474 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3475 if (error && error != EEXIST) { 3476 if (error == ENOSPC) { 3477 ztest_record_enospc(FTAG); 3478 goto out; 3479 } 3480 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3481 } 3482 3483 error = dmu_objset_clone(clone2name, snap3name); 3484 if (error) { 3485 if (error == ENOSPC) { 3486 ztest_record_enospc(FTAG); 3487 goto out; 3488 } 3489 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3490 } 3491 3492 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3493 if (error) 3494 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3495 error = dsl_dataset_promote(clone2name, NULL); 3496 if (error == ENOSPC) { 3497 dmu_objset_disown(os, FTAG); 3498 ztest_record_enospc(FTAG); 3499 goto out; 3500 } 3501 if (error != EBUSY) 3502 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3503 error); 3504 dmu_objset_disown(os, FTAG); 3505 3506out: 3507 ztest_dsl_dataset_cleanup(osname, id); 3508 3509 (void) rw_unlock(&ztest_name_lock); 3510} 3511 3512/* 3513 * Verify that dmu_object_{alloc,free} work as expected. 3514 */ 3515void 3516ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3517{ 3518 ztest_od_t od[4]; 3519 int batchsize = sizeof (od) / sizeof (od[0]); 3520 3521 for (int b = 0; b < batchsize; b++) 3522 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3523 3524 /* 3525 * Destroy the previous batch of objects, create a new batch, 3526 * and do some I/O on the new objects. 3527 */ 3528 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3529 return; 3530 3531 while (ztest_random(4 * batchsize) != 0) 3532 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3533 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3534} 3535 3536/* 3537 * Verify that dmu_{read,write} work as expected. 3538 */ 3539void 3540ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3541{ 3542 objset_t *os = zd->zd_os; 3543 ztest_od_t od[2]; 3544 dmu_tx_t *tx; 3545 int i, freeit, error; 3546 uint64_t n, s, txg; 3547 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3548 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3549 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3550 uint64_t regions = 997; 3551 uint64_t stride = 123456789ULL; 3552 uint64_t width = 40; 3553 int free_percent = 5; 3554 3555 /* 3556 * This test uses two objects, packobj and bigobj, that are always 3557 * updated together (i.e. in the same tx) so that their contents are 3558 * in sync and can be compared. Their contents relate to each other 3559 * in a simple way: packobj is a dense array of 'bufwad' structures, 3560 * while bigobj is a sparse array of the same bufwads. Specifically, 3561 * for any index n, there are three bufwads that should be identical: 3562 * 3563 * packobj, at offset n * sizeof (bufwad_t) 3564 * bigobj, at the head of the nth chunk 3565 * bigobj, at the tail of the nth chunk 3566 * 3567 * The chunk size is arbitrary. It doesn't have to be a power of two, 3568 * and it doesn't have any relation to the object blocksize. 3569 * The only requirement is that it can hold at least two bufwads. 3570 * 3571 * Normally, we write the bufwad to each of these locations. 3572 * However, free_percent of the time we instead write zeroes to 3573 * packobj and perform a dmu_free_range() on bigobj. By comparing 3574 * bigobj to packobj, we can verify that the DMU is correctly 3575 * tracking which parts of an object are allocated and free, 3576 * and that the contents of the allocated blocks are correct. 3577 */ 3578 3579 /* 3580 * Read the directory info. If it's the first time, set things up. 3581 */ 3582 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3583 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3584 3585 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3586 return; 3587 3588 bigobj = od[0].od_object; 3589 packobj = od[1].od_object; 3590 chunksize = od[0].od_gen; 3591 ASSERT(chunksize == od[1].od_gen); 3592 3593 /* 3594 * Prefetch a random chunk of the big object. 3595 * Our aim here is to get some async reads in flight 3596 * for blocks that we may free below; the DMU should 3597 * handle this race correctly. 3598 */ 3599 n = ztest_random(regions) * stride + ztest_random(width); 3600 s = 1 + ztest_random(2 * width - 1); 3601 dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize, 3602 ZIO_PRIORITY_SYNC_READ); 3603 3604 /* 3605 * Pick a random index and compute the offsets into packobj and bigobj. 3606 */ 3607 n = ztest_random(regions) * stride + ztest_random(width); 3608 s = 1 + ztest_random(width - 1); 3609 3610 packoff = n * sizeof (bufwad_t); 3611 packsize = s * sizeof (bufwad_t); 3612 3613 bigoff = n * chunksize; 3614 bigsize = s * chunksize; 3615 3616 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3617 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3618 3619 /* 3620 * free_percent of the time, free a range of bigobj rather than 3621 * overwriting it. 3622 */ 3623 freeit = (ztest_random(100) < free_percent); 3624 3625 /* 3626 * Read the current contents of our objects. 3627 */ 3628 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3629 DMU_READ_PREFETCH); 3630 ASSERT0(error); 3631 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3632 DMU_READ_PREFETCH); 3633 ASSERT0(error); 3634 3635 /* 3636 * Get a tx for the mods to both packobj and bigobj. 3637 */ 3638 tx = dmu_tx_create(os); 3639 3640 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3641 3642 if (freeit) 3643 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3644 else 3645 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3646 3647 /* This accounts for setting the checksum/compression. */ 3648 dmu_tx_hold_bonus(tx, bigobj); 3649 3650 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3651 if (txg == 0) { 3652 umem_free(packbuf, packsize); 3653 umem_free(bigbuf, bigsize); 3654 return; 3655 } 3656 3657 enum zio_checksum cksum; 3658 do { 3659 cksum = (enum zio_checksum) 3660 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM); 3661 } while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS); 3662 dmu_object_set_checksum(os, bigobj, cksum, tx); 3663 3664 enum zio_compress comp; 3665 do { 3666 comp = (enum zio_compress) 3667 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION); 3668 } while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS); 3669 dmu_object_set_compress(os, bigobj, comp, tx); 3670 3671 /* 3672 * For each index from n to n + s, verify that the existing bufwad 3673 * in packobj matches the bufwads at the head and tail of the 3674 * corresponding chunk in bigobj. Then update all three bufwads 3675 * with the new values we want to write out. 3676 */ 3677 for (i = 0; i < s; i++) { 3678 /* LINTED */ 3679 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3680 /* LINTED */ 3681 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3682 /* LINTED */ 3683 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3684 3685 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3686 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3687 3688 if (pack->bw_txg > txg) 3689 fatal(0, "future leak: got %llx, open txg is %llx", 3690 pack->bw_txg, txg); 3691 3692 if (pack->bw_data != 0 && pack->bw_index != n + i) 3693 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3694 pack->bw_index, n, i); 3695 3696 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3697 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3698 3699 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3700 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3701 3702 if (freeit) { 3703 bzero(pack, sizeof (bufwad_t)); 3704 } else { 3705 pack->bw_index = n + i; 3706 pack->bw_txg = txg; 3707 pack->bw_data = 1 + ztest_random(-2ULL); 3708 } 3709 *bigH = *pack; 3710 *bigT = *pack; 3711 } 3712 3713 /* 3714 * We've verified all the old bufwads, and made new ones. 3715 * Now write them out. 3716 */ 3717 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3718 3719 if (freeit) { 3720 if (ztest_opts.zo_verbose >= 7) { 3721 (void) printf("freeing offset %llx size %llx" 3722 " txg %llx\n", 3723 (u_longlong_t)bigoff, 3724 (u_longlong_t)bigsize, 3725 (u_longlong_t)txg); 3726 } 3727 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3728 } else { 3729 if (ztest_opts.zo_verbose >= 7) { 3730 (void) printf("writing offset %llx size %llx" 3731 " txg %llx\n", 3732 (u_longlong_t)bigoff, 3733 (u_longlong_t)bigsize, 3734 (u_longlong_t)txg); 3735 } 3736 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3737 } 3738 3739 dmu_tx_commit(tx); 3740 3741 /* 3742 * Sanity check the stuff we just wrote. 3743 */ 3744 { 3745 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3746 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3747 3748 VERIFY(0 == dmu_read(os, packobj, packoff, 3749 packsize, packcheck, DMU_READ_PREFETCH)); 3750 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3751 bigsize, bigcheck, DMU_READ_PREFETCH)); 3752 3753 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3754 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3755 3756 umem_free(packcheck, packsize); 3757 umem_free(bigcheck, bigsize); 3758 } 3759 3760 umem_free(packbuf, packsize); 3761 umem_free(bigbuf, bigsize); 3762} 3763 3764void 3765compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3766 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3767{ 3768 uint64_t i; 3769 bufwad_t *pack; 3770 bufwad_t *bigH; 3771 bufwad_t *bigT; 3772 3773 /* 3774 * For each index from n to n + s, verify that the existing bufwad 3775 * in packobj matches the bufwads at the head and tail of the 3776 * corresponding chunk in bigobj. Then update all three bufwads 3777 * with the new values we want to write out. 3778 */ 3779 for (i = 0; i < s; i++) { 3780 /* LINTED */ 3781 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3782 /* LINTED */ 3783 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3784 /* LINTED */ 3785 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3786 3787 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3788 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3789 3790 if (pack->bw_txg > txg) 3791 fatal(0, "future leak: got %llx, open txg is %llx", 3792 pack->bw_txg, txg); 3793 3794 if (pack->bw_data != 0 && pack->bw_index != n + i) 3795 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3796 pack->bw_index, n, i); 3797 3798 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3799 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3800 3801 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3802 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3803 3804 pack->bw_index = n + i; 3805 pack->bw_txg = txg; 3806 pack->bw_data = 1 + ztest_random(-2ULL); 3807 3808 *bigH = *pack; 3809 *bigT = *pack; 3810 } 3811} 3812 3813void 3814ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3815{ 3816 objset_t *os = zd->zd_os; 3817 ztest_od_t od[2]; 3818 dmu_tx_t *tx; 3819 uint64_t i; 3820 int error; 3821 uint64_t n, s, txg; 3822 bufwad_t *packbuf, *bigbuf; 3823 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3824 uint64_t blocksize = ztest_random_blocksize(); 3825 uint64_t chunksize = blocksize; 3826 uint64_t regions = 997; 3827 uint64_t stride = 123456789ULL; 3828 uint64_t width = 9; 3829 dmu_buf_t *bonus_db; 3830 arc_buf_t **bigbuf_arcbufs; 3831 dmu_object_info_t doi; 3832 3833 /* 3834 * This test uses two objects, packobj and bigobj, that are always 3835 * updated together (i.e. in the same tx) so that their contents are 3836 * in sync and can be compared. Their contents relate to each other 3837 * in a simple way: packobj is a dense array of 'bufwad' structures, 3838 * while bigobj is a sparse array of the same bufwads. Specifically, 3839 * for any index n, there are three bufwads that should be identical: 3840 * 3841 * packobj, at offset n * sizeof (bufwad_t) 3842 * bigobj, at the head of the nth chunk 3843 * bigobj, at the tail of the nth chunk 3844 * 3845 * The chunk size is set equal to bigobj block size so that 3846 * dmu_assign_arcbuf() can be tested for object updates. 3847 */ 3848 3849 /* 3850 * Read the directory info. If it's the first time, set things up. 3851 */ 3852 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3853 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3854 3855 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3856 return; 3857 3858 bigobj = od[0].od_object; 3859 packobj = od[1].od_object; 3860 blocksize = od[0].od_blocksize; 3861 chunksize = blocksize; 3862 ASSERT(chunksize == od[1].od_gen); 3863 3864 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3865 VERIFY(ISP2(doi.doi_data_block_size)); 3866 VERIFY(chunksize == doi.doi_data_block_size); 3867 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3868 3869 /* 3870 * Pick a random index and compute the offsets into packobj and bigobj. 3871 */ 3872 n = ztest_random(regions) * stride + ztest_random(width); 3873 s = 1 + ztest_random(width - 1); 3874 3875 packoff = n * sizeof (bufwad_t); 3876 packsize = s * sizeof (bufwad_t); 3877 3878 bigoff = n * chunksize; 3879 bigsize = s * chunksize; 3880 3881 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3882 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3883 3884 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3885 3886 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3887 3888 /* 3889 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3890 * Iteration 1 test zcopy to already referenced dbufs. 3891 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3892 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3893 * Iteration 4 test zcopy when dbuf is no longer dirty. 3894 * Iteration 5 test zcopy when it can't be done. 3895 * Iteration 6 one more zcopy write. 3896 */ 3897 for (i = 0; i < 7; i++) { 3898 uint64_t j; 3899 uint64_t off; 3900 3901 /* 3902 * In iteration 5 (i == 5) use arcbufs 3903 * that don't match bigobj blksz to test 3904 * dmu_assign_arcbuf() when it can't directly 3905 * assign an arcbuf to a dbuf. 3906 */ 3907 for (j = 0; j < s; j++) { 3908 if (i != 5) { 3909 bigbuf_arcbufs[j] = 3910 dmu_request_arcbuf(bonus_db, chunksize); 3911 } else { 3912 bigbuf_arcbufs[2 * j] = 3913 dmu_request_arcbuf(bonus_db, chunksize / 2); 3914 bigbuf_arcbufs[2 * j + 1] = 3915 dmu_request_arcbuf(bonus_db, chunksize / 2); 3916 } 3917 } 3918 3919 /* 3920 * Get a tx for the mods to both packobj and bigobj. 3921 */ 3922 tx = dmu_tx_create(os); 3923 3924 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3925 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3926 3927 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3928 if (txg == 0) { 3929 umem_free(packbuf, packsize); 3930 umem_free(bigbuf, bigsize); 3931 for (j = 0; j < s; j++) { 3932 if (i != 5) { 3933 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3934 } else { 3935 dmu_return_arcbuf( 3936 bigbuf_arcbufs[2 * j]); 3937 dmu_return_arcbuf( 3938 bigbuf_arcbufs[2 * j + 1]); 3939 } 3940 } 3941 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3942 dmu_buf_rele(bonus_db, FTAG); 3943 return; 3944 } 3945 3946 /* 3947 * 50% of the time don't read objects in the 1st iteration to 3948 * test dmu_assign_arcbuf() for the case when there're no 3949 * existing dbufs for the specified offsets. 3950 */ 3951 if (i != 0 || ztest_random(2) != 0) { 3952 error = dmu_read(os, packobj, packoff, 3953 packsize, packbuf, DMU_READ_PREFETCH); 3954 ASSERT0(error); 3955 error = dmu_read(os, bigobj, bigoff, bigsize, 3956 bigbuf, DMU_READ_PREFETCH); 3957 ASSERT0(error); 3958 } 3959 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3960 n, chunksize, txg); 3961 3962 /* 3963 * We've verified all the old bufwads, and made new ones. 3964 * Now write them out. 3965 */ 3966 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3967 if (ztest_opts.zo_verbose >= 7) { 3968 (void) printf("writing offset %llx size %llx" 3969 " txg %llx\n", 3970 (u_longlong_t)bigoff, 3971 (u_longlong_t)bigsize, 3972 (u_longlong_t)txg); 3973 } 3974 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3975 dmu_buf_t *dbt; 3976 if (i != 5) { 3977 bcopy((caddr_t)bigbuf + (off - bigoff), 3978 bigbuf_arcbufs[j]->b_data, chunksize); 3979 } else { 3980 bcopy((caddr_t)bigbuf + (off - bigoff), 3981 bigbuf_arcbufs[2 * j]->b_data, 3982 chunksize / 2); 3983 bcopy((caddr_t)bigbuf + (off - bigoff) + 3984 chunksize / 2, 3985 bigbuf_arcbufs[2 * j + 1]->b_data, 3986 chunksize / 2); 3987 } 3988 3989 if (i == 1) { 3990 VERIFY(dmu_buf_hold(os, bigobj, off, 3991 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3992 } 3993 if (i != 5) { 3994 dmu_assign_arcbuf(bonus_db, off, 3995 bigbuf_arcbufs[j], tx); 3996 } else { 3997 dmu_assign_arcbuf(bonus_db, off, 3998 bigbuf_arcbufs[2 * j], tx); 3999 dmu_assign_arcbuf(bonus_db, 4000 off + chunksize / 2, 4001 bigbuf_arcbufs[2 * j + 1], tx); 4002 } 4003 if (i == 1) { 4004 dmu_buf_rele(dbt, FTAG); 4005 } 4006 } 4007 dmu_tx_commit(tx); 4008 4009 /* 4010 * Sanity check the stuff we just wrote. 4011 */ 4012 { 4013 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 4014 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 4015 4016 VERIFY(0 == dmu_read(os, packobj, packoff, 4017 packsize, packcheck, DMU_READ_PREFETCH)); 4018 VERIFY(0 == dmu_read(os, bigobj, bigoff, 4019 bigsize, bigcheck, DMU_READ_PREFETCH)); 4020 4021 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 4022 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 4023 4024 umem_free(packcheck, packsize); 4025 umem_free(bigcheck, bigsize); 4026 } 4027 if (i == 2) { 4028 txg_wait_open(dmu_objset_pool(os), 0); 4029 } else if (i == 3) { 4030 txg_wait_synced(dmu_objset_pool(os), 0); 4031 } 4032 } 4033 4034 dmu_buf_rele(bonus_db, FTAG); 4035 umem_free(packbuf, packsize); 4036 umem_free(bigbuf, bigsize); 4037 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 4038} 4039 4040/* ARGSUSED */ 4041void 4042ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 4043{ 4044 ztest_od_t od[1]; 4045 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 4046 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4047 4048 /* 4049 * Have multiple threads write to large offsets in an object 4050 * to verify that parallel writes to an object -- even to the 4051 * same blocks within the object -- doesn't cause any trouble. 4052 */ 4053 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4054 4055 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4056 return; 4057 4058 while (ztest_random(10) != 0) 4059 ztest_io(zd, od[0].od_object, offset); 4060} 4061 4062void 4063ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4064{ 4065 ztest_od_t od[1]; 4066 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4067 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4068 uint64_t count = ztest_random(20) + 1; 4069 uint64_t blocksize = ztest_random_blocksize(); 4070 void *data; 4071 4072 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4073 4074 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4075 return; 4076 4077 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4078 return; 4079 4080 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4081 4082 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4083 4084 while (ztest_random(count) != 0) { 4085 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4086 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4087 data) != 0) 4088 break; 4089 while (ztest_random(4) != 0) 4090 ztest_io(zd, od[0].od_object, randoff); 4091 } 4092 4093 umem_free(data, blocksize); 4094} 4095 4096/* 4097 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4098 */ 4099#define ZTEST_ZAP_MIN_INTS 1 4100#define ZTEST_ZAP_MAX_INTS 4 4101#define ZTEST_ZAP_MAX_PROPS 1000 4102 4103void 4104ztest_zap(ztest_ds_t *zd, uint64_t id) 4105{ 4106 objset_t *os = zd->zd_os; 4107 ztest_od_t od[1]; 4108 uint64_t object; 4109 uint64_t txg, last_txg; 4110 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4111 uint64_t zl_ints, zl_intsize, prop; 4112 int i, ints; 4113 dmu_tx_t *tx; 4114 char propname[100], txgname[100]; 4115 int error; 4116 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4117 4118 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4119 4120 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4121 return; 4122 4123 object = od[0].od_object; 4124 4125 /* 4126 * Generate a known hash collision, and verify that 4127 * we can lookup and remove both entries. 4128 */ 4129 tx = dmu_tx_create(os); 4130 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4131 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4132 if (txg == 0) 4133 return; 4134 for (i = 0; i < 2; i++) { 4135 value[i] = i; 4136 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4137 1, &value[i], tx)); 4138 } 4139 for (i = 0; i < 2; i++) { 4140 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4141 sizeof (uint64_t), 1, &value[i], tx)); 4142 VERIFY3U(0, ==, 4143 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4144 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4145 ASSERT3U(zl_ints, ==, 1); 4146 } 4147 for (i = 0; i < 2; i++) { 4148 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4149 } 4150 dmu_tx_commit(tx); 4151 4152 /* 4153 * Generate a buch of random entries. 4154 */ 4155 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4156 4157 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4158 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4159 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4160 bzero(value, sizeof (value)); 4161 last_txg = 0; 4162 4163 /* 4164 * If these zap entries already exist, validate their contents. 4165 */ 4166 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4167 if (error == 0) { 4168 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4169 ASSERT3U(zl_ints, ==, 1); 4170 4171 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4172 zl_ints, &last_txg) == 0); 4173 4174 VERIFY(zap_length(os, object, propname, &zl_intsize, 4175 &zl_ints) == 0); 4176 4177 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4178 ASSERT3U(zl_ints, ==, ints); 4179 4180 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4181 zl_ints, value) == 0); 4182 4183 for (i = 0; i < ints; i++) { 4184 ASSERT3U(value[i], ==, last_txg + object + i); 4185 } 4186 } else { 4187 ASSERT3U(error, ==, ENOENT); 4188 } 4189 4190 /* 4191 * Atomically update two entries in our zap object. 4192 * The first is named txg_%llu, and contains the txg 4193 * in which the property was last updated. The second 4194 * is named prop_%llu, and the nth element of its value 4195 * should be txg + object + n. 4196 */ 4197 tx = dmu_tx_create(os); 4198 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4199 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4200 if (txg == 0) 4201 return; 4202 4203 if (last_txg > txg) 4204 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4205 4206 for (i = 0; i < ints; i++) 4207 value[i] = txg + object + i; 4208 4209 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4210 1, &txg, tx)); 4211 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4212 ints, value, tx)); 4213 4214 dmu_tx_commit(tx); 4215 4216 /* 4217 * Remove a random pair of entries. 4218 */ 4219 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4220 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4221 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4222 4223 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4224 4225 if (error == ENOENT) 4226 return; 4227 4228 ASSERT0(error); 4229 4230 tx = dmu_tx_create(os); 4231 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4232 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4233 if (txg == 0) 4234 return; 4235 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4236 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4237 dmu_tx_commit(tx); 4238} 4239 4240/* 4241 * Testcase to test the upgrading of a microzap to fatzap. 4242 */ 4243void 4244ztest_fzap(ztest_ds_t *zd, uint64_t id) 4245{ 4246 objset_t *os = zd->zd_os; 4247 ztest_od_t od[1]; 4248 uint64_t object, txg; 4249 4250 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4251 4252 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4253 return; 4254 4255 object = od[0].od_object; 4256 4257 /* 4258 * Add entries to this ZAP and make sure it spills over 4259 * and gets upgraded to a fatzap. Also, since we are adding 4260 * 2050 entries we should see ptrtbl growth and leaf-block split. 4261 */ 4262 for (int i = 0; i < 2050; i++) { 4263 char name[ZFS_MAX_DATASET_NAME_LEN]; 4264 uint64_t value = i; 4265 dmu_tx_t *tx; 4266 int error; 4267 4268 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4269 id, value); 4270 4271 tx = dmu_tx_create(os); 4272 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4273 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4274 if (txg == 0) 4275 return; 4276 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4277 &value, tx); 4278 ASSERT(error == 0 || error == EEXIST); 4279 dmu_tx_commit(tx); 4280 } 4281} 4282 4283/* ARGSUSED */ 4284void 4285ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4286{ 4287 objset_t *os = zd->zd_os; 4288 ztest_od_t od[1]; 4289 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4290 dmu_tx_t *tx; 4291 int i, namelen, error; 4292 int micro = ztest_random(2); 4293 char name[20], string_value[20]; 4294 void *data; 4295 4296 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4297 4298 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4299 return; 4300 4301 object = od[0].od_object; 4302 4303 /* 4304 * Generate a random name of the form 'xxx.....' where each 4305 * x is a random printable character and the dots are dots. 4306 * There are 94 such characters, and the name length goes from 4307 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4308 */ 4309 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4310 4311 for (i = 0; i < 3; i++) 4312 name[i] = '!' + ztest_random('~' - '!' + 1); 4313 for (; i < namelen - 1; i++) 4314 name[i] = '.'; 4315 name[i] = '\0'; 4316 4317 if ((namelen & 1) || micro) { 4318 wsize = sizeof (txg); 4319 wc = 1; 4320 data = &txg; 4321 } else { 4322 wsize = 1; 4323 wc = namelen; 4324 data = string_value; 4325 } 4326 4327 count = -1ULL; 4328 VERIFY0(zap_count(os, object, &count)); 4329 ASSERT(count != -1ULL); 4330 4331 /* 4332 * Select an operation: length, lookup, add, update, remove. 4333 */ 4334 i = ztest_random(5); 4335 4336 if (i >= 2) { 4337 tx = dmu_tx_create(os); 4338 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4339 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4340 if (txg == 0) 4341 return; 4342 bcopy(name, string_value, namelen); 4343 } else { 4344 tx = NULL; 4345 txg = 0; 4346 bzero(string_value, namelen); 4347 } 4348 4349 switch (i) { 4350 4351 case 0: 4352 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4353 if (error == 0) { 4354 ASSERT3U(wsize, ==, zl_wsize); 4355 ASSERT3U(wc, ==, zl_wc); 4356 } else { 4357 ASSERT3U(error, ==, ENOENT); 4358 } 4359 break; 4360 4361 case 1: 4362 error = zap_lookup(os, object, name, wsize, wc, data); 4363 if (error == 0) { 4364 if (data == string_value && 4365 bcmp(name, data, namelen) != 0) 4366 fatal(0, "name '%s' != val '%s' len %d", 4367 name, data, namelen); 4368 } else { 4369 ASSERT3U(error, ==, ENOENT); 4370 } 4371 break; 4372 4373 case 2: 4374 error = zap_add(os, object, name, wsize, wc, data, tx); 4375 ASSERT(error == 0 || error == EEXIST); 4376 break; 4377 4378 case 3: 4379 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4380 break; 4381 4382 case 4: 4383 error = zap_remove(os, object, name, tx); 4384 ASSERT(error == 0 || error == ENOENT); 4385 break; 4386 } 4387 4388 if (tx != NULL) 4389 dmu_tx_commit(tx); 4390} 4391 4392/* 4393 * Commit callback data. 4394 */ 4395typedef struct ztest_cb_data { 4396 list_node_t zcd_node; 4397 uint64_t zcd_txg; 4398 int zcd_expected_err; 4399 boolean_t zcd_added; 4400 boolean_t zcd_called; 4401 spa_t *zcd_spa; 4402} ztest_cb_data_t; 4403 4404/* This is the actual commit callback function */ 4405static void 4406ztest_commit_callback(void *arg, int error) 4407{ 4408 ztest_cb_data_t *data = arg; 4409 uint64_t synced_txg; 4410 4411 VERIFY(data != NULL); 4412 VERIFY3S(data->zcd_expected_err, ==, error); 4413 VERIFY(!data->zcd_called); 4414 4415 synced_txg = spa_last_synced_txg(data->zcd_spa); 4416 if (data->zcd_txg > synced_txg) 4417 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4418 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4419 synced_txg); 4420 4421 data->zcd_called = B_TRUE; 4422 4423 if (error == ECANCELED) { 4424 ASSERT0(data->zcd_txg); 4425 ASSERT(!data->zcd_added); 4426 4427 /* 4428 * The private callback data should be destroyed here, but 4429 * since we are going to check the zcd_called field after 4430 * dmu_tx_abort(), we will destroy it there. 4431 */ 4432 return; 4433 } 4434 4435 /* Was this callback added to the global callback list? */ 4436 if (!data->zcd_added) 4437 goto out; 4438 4439 ASSERT3U(data->zcd_txg, !=, 0); 4440 4441 /* Remove our callback from the list */ 4442 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4443 list_remove(&zcl.zcl_callbacks, data); 4444 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4445 4446out: 4447 umem_free(data, sizeof (ztest_cb_data_t)); 4448} 4449 4450/* Allocate and initialize callback data structure */ 4451static ztest_cb_data_t * 4452ztest_create_cb_data(objset_t *os, uint64_t txg) 4453{ 4454 ztest_cb_data_t *cb_data; 4455 4456 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4457 4458 cb_data->zcd_txg = txg; 4459 cb_data->zcd_spa = dmu_objset_spa(os); 4460 4461 return (cb_data); 4462} 4463 4464/* 4465 * If a number of txgs equal to this threshold have been created after a commit 4466 * callback has been registered but not called, then we assume there is an 4467 * implementation bug. 4468 */ 4469#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4470 4471/* 4472 * Commit callback test. 4473 */ 4474void 4475ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4476{ 4477 objset_t *os = zd->zd_os; 4478 ztest_od_t od[1]; 4479 dmu_tx_t *tx; 4480 ztest_cb_data_t *cb_data[3], *tmp_cb; 4481 uint64_t old_txg, txg; 4482 int i, error; 4483 4484 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4485 4486 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4487 return; 4488 4489 tx = dmu_tx_create(os); 4490 4491 cb_data[0] = ztest_create_cb_data(os, 0); 4492 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4493 4494 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4495 4496 /* Every once in a while, abort the transaction on purpose */ 4497 if (ztest_random(100) == 0) 4498 error = -1; 4499 4500 if (!error) 4501 error = dmu_tx_assign(tx, TXG_NOWAIT); 4502 4503 txg = error ? 0 : dmu_tx_get_txg(tx); 4504 4505 cb_data[0]->zcd_txg = txg; 4506 cb_data[1] = ztest_create_cb_data(os, txg); 4507 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4508 4509 if (error) { 4510 /* 4511 * It's not a strict requirement to call the registered 4512 * callbacks from inside dmu_tx_abort(), but that's what 4513 * it's supposed to happen in the current implementation 4514 * so we will check for that. 4515 */ 4516 for (i = 0; i < 2; i++) { 4517 cb_data[i]->zcd_expected_err = ECANCELED; 4518 VERIFY(!cb_data[i]->zcd_called); 4519 } 4520 4521 dmu_tx_abort(tx); 4522 4523 for (i = 0; i < 2; i++) { 4524 VERIFY(cb_data[i]->zcd_called); 4525 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4526 } 4527 4528 return; 4529 } 4530 4531 cb_data[2] = ztest_create_cb_data(os, txg); 4532 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4533 4534 /* 4535 * Read existing data to make sure there isn't a future leak. 4536 */ 4537 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4538 &old_txg, DMU_READ_PREFETCH)); 4539 4540 if (old_txg > txg) 4541 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4542 old_txg, txg); 4543 4544 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4545 4546 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4547 4548 /* 4549 * Since commit callbacks don't have any ordering requirement and since 4550 * it is theoretically possible for a commit callback to be called 4551 * after an arbitrary amount of time has elapsed since its txg has been 4552 * synced, it is difficult to reliably determine whether a commit 4553 * callback hasn't been called due to high load or due to a flawed 4554 * implementation. 4555 * 4556 * In practice, we will assume that if after a certain number of txgs a 4557 * commit callback hasn't been called, then most likely there's an 4558 * implementation bug.. 4559 */ 4560 tmp_cb = list_head(&zcl.zcl_callbacks); 4561 if (tmp_cb != NULL && 4562 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4563 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4564 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4565 } 4566 4567 /* 4568 * Let's find the place to insert our callbacks. 4569 * 4570 * Even though the list is ordered by txg, it is possible for the 4571 * insertion point to not be the end because our txg may already be 4572 * quiescing at this point and other callbacks in the open txg 4573 * (from other objsets) may have sneaked in. 4574 */ 4575 tmp_cb = list_tail(&zcl.zcl_callbacks); 4576 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4577 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4578 4579 /* Add the 3 callbacks to the list */ 4580 for (i = 0; i < 3; i++) { 4581 if (tmp_cb == NULL) 4582 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4583 else 4584 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4585 cb_data[i]); 4586 4587 cb_data[i]->zcd_added = B_TRUE; 4588 VERIFY(!cb_data[i]->zcd_called); 4589 4590 tmp_cb = cb_data[i]; 4591 } 4592 4593 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4594 4595 dmu_tx_commit(tx); 4596} 4597 4598/* ARGSUSED */ 4599void 4600ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4601{ 4602 zfs_prop_t proplist[] = { 4603 ZFS_PROP_CHECKSUM, 4604 ZFS_PROP_COMPRESSION, 4605 ZFS_PROP_COPIES, 4606 ZFS_PROP_DEDUP 4607 }; 4608 4609 (void) rw_rdlock(&ztest_name_lock); 4610 4611 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4612 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4613 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4614 4615 (void) rw_unlock(&ztest_name_lock); 4616} 4617 4618/* ARGSUSED */ 4619void 4620ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4621{ 4622 nvlist_t *props = NULL; 4623 4624 (void) rw_rdlock(&ztest_name_lock); 4625 4626 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4627 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4628 4629 VERIFY0(spa_prop_get(ztest_spa, &props)); 4630 4631 if (ztest_opts.zo_verbose >= 6) 4632 dump_nvlist(props, 4); 4633 4634 nvlist_free(props); 4635 4636 (void) rw_unlock(&ztest_name_lock); 4637} 4638 4639static int 4640user_release_one(const char *snapname, const char *holdname) 4641{ 4642 nvlist_t *snaps, *holds; 4643 int error; 4644 4645 snaps = fnvlist_alloc(); 4646 holds = fnvlist_alloc(); 4647 fnvlist_add_boolean(holds, holdname); 4648 fnvlist_add_nvlist(snaps, snapname, holds); 4649 fnvlist_free(holds); 4650 error = dsl_dataset_user_release(snaps, NULL); 4651 fnvlist_free(snaps); 4652 return (error); 4653} 4654 4655/* 4656 * Test snapshot hold/release and deferred destroy. 4657 */ 4658void 4659ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4660{ 4661 int error; 4662 objset_t *os = zd->zd_os; 4663 objset_t *origin; 4664 char snapname[100]; 4665 char fullname[100]; 4666 char clonename[100]; 4667 char tag[100]; 4668 char osname[ZFS_MAX_DATASET_NAME_LEN]; 4669 nvlist_t *holds; 4670 4671 (void) rw_rdlock(&ztest_name_lock); 4672 4673 dmu_objset_name(os, osname); 4674 4675 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4676 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4677 (void) snprintf(clonename, sizeof (clonename), 4678 "%s/ch1_%llu", osname, id); 4679 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4680 4681 /* 4682 * Clean up from any previous run. 4683 */ 4684 error = dsl_destroy_head(clonename); 4685 if (error != ENOENT) 4686 ASSERT0(error); 4687 error = user_release_one(fullname, tag); 4688 if (error != ESRCH && error != ENOENT) 4689 ASSERT0(error); 4690 error = dsl_destroy_snapshot(fullname, B_FALSE); 4691 if (error != ENOENT) 4692 ASSERT0(error); 4693 4694 /* 4695 * Create snapshot, clone it, mark snap for deferred destroy, 4696 * destroy clone, verify snap was also destroyed. 4697 */ 4698 error = dmu_objset_snapshot_one(osname, snapname); 4699 if (error) { 4700 if (error == ENOSPC) { 4701 ztest_record_enospc("dmu_objset_snapshot"); 4702 goto out; 4703 } 4704 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4705 } 4706 4707 error = dmu_objset_clone(clonename, fullname); 4708 if (error) { 4709 if (error == ENOSPC) { 4710 ztest_record_enospc("dmu_objset_clone"); 4711 goto out; 4712 } 4713 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4714 } 4715 4716 error = dsl_destroy_snapshot(fullname, B_TRUE); 4717 if (error) { 4718 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4719 fullname, error); 4720 } 4721 4722 error = dsl_destroy_head(clonename); 4723 if (error) 4724 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4725 4726 error = dmu_objset_hold(fullname, FTAG, &origin); 4727 if (error != ENOENT) 4728 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4729 4730 /* 4731 * Create snapshot, add temporary hold, verify that we can't 4732 * destroy a held snapshot, mark for deferred destroy, 4733 * release hold, verify snapshot was destroyed. 4734 */ 4735 error = dmu_objset_snapshot_one(osname, snapname); 4736 if (error) { 4737 if (error == ENOSPC) { 4738 ztest_record_enospc("dmu_objset_snapshot"); 4739 goto out; 4740 } 4741 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4742 } 4743 4744 holds = fnvlist_alloc(); 4745 fnvlist_add_string(holds, fullname, tag); 4746 error = dsl_dataset_user_hold(holds, 0, NULL); 4747 fnvlist_free(holds); 4748 4749 if (error == ENOSPC) { 4750 ztest_record_enospc("dsl_dataset_user_hold"); 4751 goto out; 4752 } else if (error) { 4753 fatal(0, "dsl_dataset_user_hold(%s, %s) = %u", 4754 fullname, tag, error); 4755 } 4756 4757 error = dsl_destroy_snapshot(fullname, B_FALSE); 4758 if (error != EBUSY) { 4759 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4760 fullname, error); 4761 } 4762 4763 error = dsl_destroy_snapshot(fullname, B_TRUE); 4764 if (error) { 4765 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4766 fullname, error); 4767 } 4768 4769 error = user_release_one(fullname, tag); 4770 if (error) 4771 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error); 4772 4773 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4774 4775out: 4776 (void) rw_unlock(&ztest_name_lock); 4777} 4778 4779/* 4780 * Inject random faults into the on-disk data. 4781 */ 4782/* ARGSUSED */ 4783void 4784ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4785{ 4786 ztest_shared_t *zs = ztest_shared; 4787 spa_t *spa = ztest_spa; 4788 int fd; 4789 uint64_t offset; 4790 uint64_t leaves; 4791 uint64_t bad = 0x1990c0ffeedecadeULL; 4792 uint64_t top, leaf; 4793 char path0[MAXPATHLEN]; 4794 char pathrand[MAXPATHLEN]; 4795 size_t fsize; 4796 int bshift = SPA_OLD_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4797 int iters = 1000; 4798 int maxfaults; 4799 int mirror_save; 4800 vdev_t *vd0 = NULL; 4801 uint64_t guid0 = 0; 4802 boolean_t islog = B_FALSE; 4803 4804 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4805 maxfaults = MAXFAULTS(); 4806 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4807 mirror_save = zs->zs_mirrors; 4808 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4809 4810 ASSERT(leaves >= 1); 4811 4812 /* 4813 * Grab the name lock as reader. There are some operations 4814 * which don't like to have their vdevs changed while 4815 * they are in progress (i.e. spa_change_guid). Those 4816 * operations will have grabbed the name lock as writer. 4817 */ 4818 (void) rw_rdlock(&ztest_name_lock); 4819 4820 /* 4821 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4822 */ 4823 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4824 4825 if (ztest_random(2) == 0) { 4826 /* 4827 * Inject errors on a normal data device or slog device. 4828 */ 4829 top = ztest_random_vdev_top(spa, B_TRUE); 4830 leaf = ztest_random(leaves) + zs->zs_splits; 4831 4832 /* 4833 * Generate paths to the first leaf in this top-level vdev, 4834 * and to the random leaf we selected. We'll induce transient 4835 * write failures and random online/offline activity on leaf 0, 4836 * and we'll write random garbage to the randomly chosen leaf. 4837 */ 4838 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4839 ztest_opts.zo_dir, ztest_opts.zo_pool, 4840 top * leaves + zs->zs_splits); 4841 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4842 ztest_opts.zo_dir, ztest_opts.zo_pool, 4843 top * leaves + leaf); 4844 4845 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4846 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4847 islog = B_TRUE; 4848 4849 /* 4850 * If the top-level vdev needs to be resilvered 4851 * then we only allow faults on the device that is 4852 * resilvering. 4853 */ 4854 if (vd0 != NULL && maxfaults != 1 && 4855 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 4856 vd0->vdev_resilver_txg != 0)) { 4857 /* 4858 * Make vd0 explicitly claim to be unreadable, 4859 * or unwriteable, or reach behind its back 4860 * and close the underlying fd. We can do this if 4861 * maxfaults == 0 because we'll fail and reexecute, 4862 * and we can do it if maxfaults >= 2 because we'll 4863 * have enough redundancy. If maxfaults == 1, the 4864 * combination of this with injection of random data 4865 * corruption below exceeds the pool's fault tolerance. 4866 */ 4867 vdev_file_t *vf = vd0->vdev_tsd; 4868 4869 if (vf != NULL && ztest_random(3) == 0) { 4870 (void) close(vf->vf_vnode->v_fd); 4871 vf->vf_vnode->v_fd = -1; 4872 } else if (ztest_random(2) == 0) { 4873 vd0->vdev_cant_read = B_TRUE; 4874 } else { 4875 vd0->vdev_cant_write = B_TRUE; 4876 } 4877 guid0 = vd0->vdev_guid; 4878 } 4879 } else { 4880 /* 4881 * Inject errors on an l2cache device. 4882 */ 4883 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4884 4885 if (sav->sav_count == 0) { 4886 spa_config_exit(spa, SCL_STATE, FTAG); 4887 (void) rw_unlock(&ztest_name_lock); 4888 return; 4889 } 4890 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4891 guid0 = vd0->vdev_guid; 4892 (void) strcpy(path0, vd0->vdev_path); 4893 (void) strcpy(pathrand, vd0->vdev_path); 4894 4895 leaf = 0; 4896 leaves = 1; 4897 maxfaults = INT_MAX; /* no limit on cache devices */ 4898 } 4899 4900 spa_config_exit(spa, SCL_STATE, FTAG); 4901 (void) rw_unlock(&ztest_name_lock); 4902 4903 /* 4904 * If we can tolerate two or more faults, or we're dealing 4905 * with a slog, randomly online/offline vd0. 4906 */ 4907 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4908 if (ztest_random(10) < 6) { 4909 int flags = (ztest_random(2) == 0 ? 4910 ZFS_OFFLINE_TEMPORARY : 0); 4911 4912 /* 4913 * We have to grab the zs_name_lock as writer to 4914 * prevent a race between offlining a slog and 4915 * destroying a dataset. Offlining the slog will 4916 * grab a reference on the dataset which may cause 4917 * dmu_objset_destroy() to fail with EBUSY thus 4918 * leaving the dataset in an inconsistent state. 4919 */ 4920 if (islog) 4921 (void) rw_wrlock(&ztest_name_lock); 4922 4923 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4924 4925 if (islog) 4926 (void) rw_unlock(&ztest_name_lock); 4927 } else { 4928 /* 4929 * Ideally we would like to be able to randomly 4930 * call vdev_[on|off]line without holding locks 4931 * to force unpredictable failures but the side 4932 * effects of vdev_[on|off]line prevent us from 4933 * doing so. We grab the ztest_vdev_lock here to 4934 * prevent a race between injection testing and 4935 * aux_vdev removal. 4936 */ 4937 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4938 (void) vdev_online(spa, guid0, 0, NULL); 4939 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4940 } 4941 } 4942 4943 if (maxfaults == 0) 4944 return; 4945 4946 /* 4947 * We have at least single-fault tolerance, so inject data corruption. 4948 */ 4949 fd = open(pathrand, O_RDWR); 4950 4951 if (fd == -1) /* we hit a gap in the device namespace */ 4952 return; 4953 4954 fsize = lseek(fd, 0, SEEK_END); 4955 4956 while (--iters != 0) { 4957 offset = ztest_random(fsize / (leaves << bshift)) * 4958 (leaves << bshift) + (leaf << bshift) + 4959 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4960 4961 if (offset >= fsize) 4962 continue; 4963 4964 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4965 if (mirror_save != zs->zs_mirrors) { 4966 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4967 (void) close(fd); 4968 return; 4969 } 4970 4971 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4972 fatal(1, "can't inject bad word at 0x%llx in %s", 4973 offset, pathrand); 4974 4975 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4976 4977 if (ztest_opts.zo_verbose >= 7) 4978 (void) printf("injected bad word into %s," 4979 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4980 } 4981 4982 (void) close(fd); 4983} 4984 4985/* 4986 * Verify that DDT repair works as expected. 4987 */ 4988void 4989ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4990{ 4991 ztest_shared_t *zs = ztest_shared; 4992 spa_t *spa = ztest_spa; 4993 objset_t *os = zd->zd_os; 4994 ztest_od_t od[1]; 4995 uint64_t object, blocksize, txg, pattern, psize; 4996 enum zio_checksum checksum = spa_dedup_checksum(spa); 4997 dmu_buf_t *db; 4998 dmu_tx_t *tx; 4999 void *buf; 5000 blkptr_t blk; 5001 int copies = 2 * ZIO_DEDUPDITTO_MIN; 5002 5003 blocksize = ztest_random_blocksize(); 5004 blocksize = MIN(blocksize, 2048); /* because we write so many */ 5005 5006 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 5007 5008 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 5009 return; 5010 5011 /* 5012 * Take the name lock as writer to prevent anyone else from changing 5013 * the pool and dataset properies we need to maintain during this test. 5014 */ 5015 (void) rw_wrlock(&ztest_name_lock); 5016 5017 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 5018 B_FALSE) != 0 || 5019 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 5020 B_FALSE) != 0) { 5021 (void) rw_unlock(&ztest_name_lock); 5022 return; 5023 } 5024 5025 object = od[0].od_object; 5026 blocksize = od[0].od_blocksize; 5027 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 5028 5029 ASSERT(object != 0); 5030 5031 tx = dmu_tx_create(os); 5032 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 5033 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 5034 if (txg == 0) { 5035 (void) rw_unlock(&ztest_name_lock); 5036 return; 5037 } 5038 5039 /* 5040 * Write all the copies of our block. 5041 */ 5042 for (int i = 0; i < copies; i++) { 5043 uint64_t offset = i * blocksize; 5044 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 5045 DMU_READ_NO_PREFETCH); 5046 if (error != 0) { 5047 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 5048 os, (long long)object, (long long) offset, error); 5049 } 5050 ASSERT(db->db_offset == offset); 5051 ASSERT(db->db_size == blocksize); 5052 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 5053 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 5054 dmu_buf_will_fill(db, tx); 5055 ztest_pattern_set(db->db_data, db->db_size, pattern); 5056 dmu_buf_rele(db, FTAG); 5057 } 5058 5059 dmu_tx_commit(tx); 5060 txg_wait_synced(spa_get_dsl(spa), txg); 5061 5062 /* 5063 * Find out what block we got. 5064 */ 5065 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 5066 DMU_READ_NO_PREFETCH)); 5067 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 5068 dmu_buf_rele(db, FTAG); 5069 5070 /* 5071 * Damage the block. Dedup-ditto will save us when we read it later. 5072 */ 5073 psize = BP_GET_PSIZE(&blk); 5074 buf = zio_buf_alloc(psize); 5075 ztest_pattern_set(buf, psize, ~pattern); 5076 5077 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5078 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5079 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5080 5081 zio_buf_free(buf, psize); 5082 5083 (void) rw_unlock(&ztest_name_lock); 5084} 5085 5086/* 5087 * Scrub the pool. 5088 */ 5089/* ARGSUSED */ 5090void 5091ztest_scrub(ztest_ds_t *zd, uint64_t id) 5092{ 5093 spa_t *spa = ztest_spa; 5094 5095 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5096 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5097 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5098} 5099 5100/* 5101 * Change the guid for the pool. 5102 */ 5103/* ARGSUSED */ 5104void 5105ztest_reguid(ztest_ds_t *zd, uint64_t id) 5106{ 5107 spa_t *spa = ztest_spa; 5108 uint64_t orig, load; 5109 int error; 5110 5111 orig = spa_guid(spa); 5112 load = spa_load_guid(spa); 5113 5114 (void) rw_wrlock(&ztest_name_lock); 5115 error = spa_change_guid(spa); 5116 (void) rw_unlock(&ztest_name_lock); 5117 5118 if (error != 0) 5119 return; 5120 5121 if (ztest_opts.zo_verbose >= 4) { 5122 (void) printf("Changed guid old %llu -> %llu\n", 5123 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5124 } 5125 5126 VERIFY3U(orig, !=, spa_guid(spa)); 5127 VERIFY3U(load, ==, spa_load_guid(spa)); 5128} 5129 5130/* 5131 * Rename the pool to a different name and then rename it back. 5132 */ 5133/* ARGSUSED */ 5134void 5135ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5136{ 5137 char *oldname, *newname; 5138 spa_t *spa; 5139 5140 (void) rw_wrlock(&ztest_name_lock); 5141 5142 oldname = ztest_opts.zo_pool; 5143 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5144 (void) strcpy(newname, oldname); 5145 (void) strcat(newname, "_tmp"); 5146 5147 /* 5148 * Do the rename 5149 */ 5150 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5151 5152 /* 5153 * Try to open it under the old name, which shouldn't exist 5154 */ 5155 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5156 5157 /* 5158 * Open it under the new name and make sure it's still the same spa_t. 5159 */ 5160 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5161 5162 ASSERT(spa == ztest_spa); 5163 spa_close(spa, FTAG); 5164 5165 /* 5166 * Rename it back to the original 5167 */ 5168 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5169 5170 /* 5171 * Make sure it can still be opened 5172 */ 5173 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5174 5175 ASSERT(spa == ztest_spa); 5176 spa_close(spa, FTAG); 5177 5178 umem_free(newname, strlen(newname) + 1); 5179 5180 (void) rw_unlock(&ztest_name_lock); 5181} 5182 5183/* 5184 * Verify pool integrity by running zdb. 5185 */ 5186static void 5187ztest_run_zdb(char *pool) 5188{ 5189 int status; 5190 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5191 char zbuf[1024]; 5192 char *bin; 5193 char *ztest; 5194 char *isa; 5195 int isalen; 5196 FILE *fp; 5197 5198 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5199 5200 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5201 bin = strstr(zdb, "/usr/bin/"); 5202 ztest = strstr(bin, "/ztest"); 5203 isa = bin + 8; 5204 isalen = ztest - isa; 5205 isa = strdup(isa); 5206 /* LINTED */ 5207 (void) sprintf(bin, 5208 "/usr/sbin%.*s/zdb -bcc%s%s -d -U %s %s", 5209 isalen, 5210 isa, 5211 ztest_opts.zo_verbose >= 3 ? "s" : "", 5212 ztest_opts.zo_verbose >= 4 ? "v" : "", 5213 spa_config_path, 5214 pool); 5215 free(isa); 5216 5217 if (ztest_opts.zo_verbose >= 5) 5218 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5219 5220 fp = popen(zdb, "r"); 5221 assert(fp != NULL); 5222 5223 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5224 if (ztest_opts.zo_verbose >= 3) 5225 (void) printf("%s", zbuf); 5226 5227 status = pclose(fp); 5228 5229 if (status == 0) 5230 return; 5231 5232 ztest_dump_core = 0; 5233 if (WIFEXITED(status)) 5234 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5235 else 5236 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5237} 5238 5239static void 5240ztest_walk_pool_directory(char *header) 5241{ 5242 spa_t *spa = NULL; 5243 5244 if (ztest_opts.zo_verbose >= 6) 5245 (void) printf("%s\n", header); 5246 5247 mutex_enter(&spa_namespace_lock); 5248 while ((spa = spa_next(spa)) != NULL) 5249 if (ztest_opts.zo_verbose >= 6) 5250 (void) printf("\t%s\n", spa_name(spa)); 5251 mutex_exit(&spa_namespace_lock); 5252} 5253 5254static void 5255ztest_spa_import_export(char *oldname, char *newname) 5256{ 5257 nvlist_t *config, *newconfig; 5258 uint64_t pool_guid; 5259 spa_t *spa; 5260 int error; 5261 5262 if (ztest_opts.zo_verbose >= 4) { 5263 (void) printf("import/export: old = %s, new = %s\n", 5264 oldname, newname); 5265 } 5266 5267 /* 5268 * Clean up from previous runs. 5269 */ 5270 (void) spa_destroy(newname); 5271 5272 /* 5273 * Get the pool's configuration and guid. 5274 */ 5275 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5276 5277 /* 5278 * Kick off a scrub to tickle scrub/export races. 5279 */ 5280 if (ztest_random(2) == 0) 5281 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5282 5283 pool_guid = spa_guid(spa); 5284 spa_close(spa, FTAG); 5285 5286 ztest_walk_pool_directory("pools before export"); 5287 5288 /* 5289 * Export it. 5290 */ 5291 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5292 5293 ztest_walk_pool_directory("pools after export"); 5294 5295 /* 5296 * Try to import it. 5297 */ 5298 newconfig = spa_tryimport(config); 5299 ASSERT(newconfig != NULL); 5300 nvlist_free(newconfig); 5301 5302 /* 5303 * Import it under the new name. 5304 */ 5305 error = spa_import(newname, config, NULL, 0); 5306 if (error != 0) { 5307 dump_nvlist(config, 0); 5308 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5309 oldname, newname, error); 5310 } 5311 5312 ztest_walk_pool_directory("pools after import"); 5313 5314 /* 5315 * Try to import it again -- should fail with EEXIST. 5316 */ 5317 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5318 5319 /* 5320 * Try to import it under a different name -- should fail with EEXIST. 5321 */ 5322 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5323 5324 /* 5325 * Verify that the pool is no longer visible under the old name. 5326 */ 5327 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5328 5329 /* 5330 * Verify that we can open and close the pool using the new name. 5331 */ 5332 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5333 ASSERT(pool_guid == spa_guid(spa)); 5334 spa_close(spa, FTAG); 5335 5336 nvlist_free(config); 5337} 5338 5339static void 5340ztest_resume(spa_t *spa) 5341{ 5342 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5343 (void) printf("resuming from suspended state\n"); 5344 spa_vdev_state_enter(spa, SCL_NONE); 5345 vdev_clear(spa, NULL); 5346 (void) spa_vdev_state_exit(spa, NULL, 0); 5347 (void) zio_resume(spa); 5348} 5349 5350static void * 5351ztest_resume_thread(void *arg) 5352{ 5353 spa_t *spa = arg; 5354 5355 while (!ztest_exiting) { 5356 if (spa_suspended(spa)) 5357 ztest_resume(spa); 5358 (void) poll(NULL, 0, 100); 5359 5360 /* 5361 * Periodically change the zfs_compressed_arc_enabled setting. 5362 */ 5363 if (ztest_random(10) == 0) 5364 zfs_compressed_arc_enabled = ztest_random(2); 5365 } 5366 return (NULL); 5367} 5368 5369static void * 5370ztest_deadman_thread(void *arg) 5371{ 5372 ztest_shared_t *zs = arg; 5373 spa_t *spa = ztest_spa; 5374 hrtime_t delta, total = 0; 5375 5376 for (;;) { 5377 delta = zs->zs_thread_stop - zs->zs_thread_start + 5378 MSEC2NSEC(zfs_deadman_synctime_ms); 5379 5380 (void) poll(NULL, 0, (int)NSEC2MSEC(delta)); 5381 5382 /* 5383 * If the pool is suspended then fail immediately. Otherwise, 5384 * check to see if the pool is making any progress. If 5385 * vdev_deadman() discovers that there hasn't been any recent 5386 * I/Os then it will end up aborting the tests. 5387 */ 5388 if (spa_suspended(spa) || spa->spa_root_vdev == NULL) { 5389 fatal(0, "aborting test after %llu seconds because " 5390 "pool has transitioned to a suspended state.", 5391 zfs_deadman_synctime_ms / 1000); 5392 return (NULL); 5393 } 5394 vdev_deadman(spa->spa_root_vdev); 5395 5396 total += zfs_deadman_synctime_ms/1000; 5397 (void) printf("ztest has been running for %lld seconds\n", 5398 total); 5399 } 5400} 5401 5402static void 5403ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5404{ 5405 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5406 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5407 hrtime_t functime = gethrtime(); 5408 5409 for (int i = 0; i < zi->zi_iters; i++) 5410 zi->zi_func(zd, id); 5411 5412 functime = gethrtime() - functime; 5413 5414 atomic_add_64(&zc->zc_count, 1); 5415 atomic_add_64(&zc->zc_time, functime); 5416 5417 if (ztest_opts.zo_verbose >= 4) { 5418 Dl_info dli; 5419 (void) dladdr((void *)zi->zi_func, &dli); 5420 (void) printf("%6.2f sec in %s\n", 5421 (double)functime / NANOSEC, dli.dli_sname); 5422 } 5423} 5424 5425static void * 5426ztest_thread(void *arg) 5427{ 5428 int rand; 5429 uint64_t id = (uintptr_t)arg; 5430 ztest_shared_t *zs = ztest_shared; 5431 uint64_t call_next; 5432 hrtime_t now; 5433 ztest_info_t *zi; 5434 ztest_shared_callstate_t *zc; 5435 5436 while ((now = gethrtime()) < zs->zs_thread_stop) { 5437 /* 5438 * See if it's time to force a crash. 5439 */ 5440 if (now > zs->zs_thread_kill) 5441 ztest_kill(zs); 5442 5443 /* 5444 * If we're getting ENOSPC with some regularity, stop. 5445 */ 5446 if (zs->zs_enospc_count > 10) 5447 break; 5448 5449 /* 5450 * Pick a random function to execute. 5451 */ 5452 rand = ztest_random(ZTEST_FUNCS); 5453 zi = &ztest_info[rand]; 5454 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5455 call_next = zc->zc_next; 5456 5457 if (now >= call_next && 5458 atomic_cas_64(&zc->zc_next, call_next, call_next + 5459 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5460 ztest_execute(rand, zi, id); 5461 } 5462 } 5463 5464 return (NULL); 5465} 5466 5467static void 5468ztest_dataset_name(char *dsname, char *pool, int d) 5469{ 5470 (void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d); 5471} 5472 5473static void 5474ztest_dataset_destroy(int d) 5475{ 5476 char name[ZFS_MAX_DATASET_NAME_LEN]; 5477 5478 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5479 5480 if (ztest_opts.zo_verbose >= 3) 5481 (void) printf("Destroying %s to free up space\n", name); 5482 5483 /* 5484 * Cleanup any non-standard clones and snapshots. In general, 5485 * ztest thread t operates on dataset (t % zopt_datasets), 5486 * so there may be more than one thing to clean up. 5487 */ 5488 for (int t = d; t < ztest_opts.zo_threads; 5489 t += ztest_opts.zo_datasets) { 5490 ztest_dsl_dataset_cleanup(name, t); 5491 } 5492 5493 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5494 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5495} 5496 5497static void 5498ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5499{ 5500 uint64_t usedobjs, dirobjs, scratch; 5501 5502 /* 5503 * ZTEST_DIROBJ is the object directory for the entire dataset. 5504 * Therefore, the number of objects in use should equal the 5505 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5506 * If not, we have an object leak. 5507 * 5508 * Note that we can only check this in ztest_dataset_open(), 5509 * when the open-context and syncing-context values agree. 5510 * That's because zap_count() returns the open-context value, 5511 * while dmu_objset_space() returns the rootbp fill count. 5512 */ 5513 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5514 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5515 ASSERT3U(dirobjs + 1, ==, usedobjs); 5516} 5517 5518static int 5519ztest_dataset_open(int d) 5520{ 5521 ztest_ds_t *zd = &ztest_ds[d]; 5522 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5523 objset_t *os; 5524 zilog_t *zilog; 5525 char name[ZFS_MAX_DATASET_NAME_LEN]; 5526 int error; 5527 5528 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5529 5530 (void) rw_rdlock(&ztest_name_lock); 5531 5532 error = ztest_dataset_create(name); 5533 if (error == ENOSPC) { 5534 (void) rw_unlock(&ztest_name_lock); 5535 ztest_record_enospc(FTAG); 5536 return (error); 5537 } 5538 ASSERT(error == 0 || error == EEXIST); 5539 5540 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5541 (void) rw_unlock(&ztest_name_lock); 5542 5543 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5544 5545 zilog = zd->zd_zilog; 5546 5547 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5548 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5549 fatal(0, "missing log records: claimed %llu < committed %llu", 5550 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5551 5552 ztest_dataset_dirobj_verify(zd); 5553 5554 zil_replay(os, zd, ztest_replay_vector); 5555 5556 ztest_dataset_dirobj_verify(zd); 5557 5558 if (ztest_opts.zo_verbose >= 6) 5559 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5560 zd->zd_name, 5561 (u_longlong_t)zilog->zl_parse_blk_count, 5562 (u_longlong_t)zilog->zl_parse_lr_count, 5563 (u_longlong_t)zilog->zl_replaying_seq); 5564 5565 zilog = zil_open(os, ztest_get_data); 5566 5567 if (zilog->zl_replaying_seq != 0 && 5568 zilog->zl_replaying_seq < committed_seq) 5569 fatal(0, "missing log records: replayed %llu < committed %llu", 5570 zilog->zl_replaying_seq, committed_seq); 5571 5572 return (0); 5573} 5574 5575static void 5576ztest_dataset_close(int d) 5577{ 5578 ztest_ds_t *zd = &ztest_ds[d]; 5579 5580 zil_close(zd->zd_zilog); 5581 dmu_objset_disown(zd->zd_os, zd); 5582 5583 ztest_zd_fini(zd); 5584} 5585 5586/* 5587 * Kick off threads to run tests on all datasets in parallel. 5588 */ 5589static void 5590ztest_run(ztest_shared_t *zs) 5591{ 5592 thread_t *tid; 5593 spa_t *spa; 5594 objset_t *os; 5595 thread_t resume_tid; 5596 int error; 5597 5598 ztest_exiting = B_FALSE; 5599 5600 /* 5601 * Initialize parent/child shared state. 5602 */ 5603 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5604 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5605 5606 zs->zs_thread_start = gethrtime(); 5607 zs->zs_thread_stop = 5608 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5609 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5610 zs->zs_thread_kill = zs->zs_thread_stop; 5611 if (ztest_random(100) < ztest_opts.zo_killrate) { 5612 zs->zs_thread_kill -= 5613 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5614 } 5615 5616 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5617 5618 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5619 offsetof(ztest_cb_data_t, zcd_node)); 5620 5621 /* 5622 * Open our pool. 5623 */ 5624 kernel_init(FREAD | FWRITE); 5625 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5626 spa->spa_debug = B_TRUE; 5627 metaslab_preload_limit = ztest_random(20) + 1; 5628 ztest_spa = spa; 5629 5630 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5631 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5632 zs->zs_guid = dmu_objset_fsid_guid(os); 5633 dmu_objset_disown(os, FTAG); 5634 5635 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5636 5637 /* 5638 * We don't expect the pool to suspend unless maxfaults == 0, 5639 * in which case ztest_fault_inject() temporarily takes away 5640 * the only valid replica. 5641 */ 5642 if (MAXFAULTS() == 0) 5643 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5644 else 5645 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5646 5647 /* 5648 * Create a thread to periodically resume suspended I/O. 5649 */ 5650 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5651 &resume_tid) == 0); 5652 5653 /* 5654 * Create a deadman thread to abort() if we hang. 5655 */ 5656 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5657 NULL) == 0); 5658 5659 /* 5660 * Verify that we can safely inquire about about any object, 5661 * whether it's allocated or not. To make it interesting, 5662 * we probe a 5-wide window around each power of two. 5663 * This hits all edge cases, including zero and the max. 5664 */ 5665 for (int t = 0; t < 64; t++) { 5666 for (int d = -5; d <= 5; d++) { 5667 error = dmu_object_info(spa->spa_meta_objset, 5668 (1ULL << t) + d, NULL); 5669 ASSERT(error == 0 || error == ENOENT || 5670 error == EINVAL); 5671 } 5672 } 5673 5674 /* 5675 * If we got any ENOSPC errors on the previous run, destroy something. 5676 */ 5677 if (zs->zs_enospc_count != 0) { 5678 int d = ztest_random(ztest_opts.zo_datasets); 5679 ztest_dataset_destroy(d); 5680 } 5681 zs->zs_enospc_count = 0; 5682 5683 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5684 UMEM_NOFAIL); 5685 5686 if (ztest_opts.zo_verbose >= 4) 5687 (void) printf("starting main threads...\n"); 5688 5689 /* 5690 * Kick off all the tests that run in parallel. 5691 */ 5692 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5693 if (t < ztest_opts.zo_datasets && 5694 ztest_dataset_open(t) != 0) 5695 return; 5696 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5697 THR_BOUND, &tid[t]) == 0); 5698 } 5699 5700 /* 5701 * Wait for all of the tests to complete. We go in reverse order 5702 * so we don't close datasets while threads are still using them. 5703 */ 5704 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5705 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5706 if (t < ztest_opts.zo_datasets) 5707 ztest_dataset_close(t); 5708 } 5709 5710 txg_wait_synced(spa_get_dsl(spa), 0); 5711 5712 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5713 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5714 zfs_dbgmsg_print(FTAG); 5715 5716 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5717 5718 /* Kill the resume thread */ 5719 ztest_exiting = B_TRUE; 5720 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5721 ztest_resume(spa); 5722 5723 /* 5724 * Right before closing the pool, kick off a bunch of async I/O; 5725 * spa_close() should wait for it to complete. 5726 */ 5727 for (uint64_t object = 1; object < 50; object++) { 5728 dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20, 5729 ZIO_PRIORITY_SYNC_READ); 5730 } 5731 5732 spa_close(spa, FTAG); 5733 5734 /* 5735 * Verify that we can loop over all pools. 5736 */ 5737 mutex_enter(&spa_namespace_lock); 5738 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5739 if (ztest_opts.zo_verbose > 3) 5740 (void) printf("spa_next: found %s\n", spa_name(spa)); 5741 mutex_exit(&spa_namespace_lock); 5742 5743 /* 5744 * Verify that we can export the pool and reimport it under a 5745 * different name. 5746 */ 5747 if (ztest_random(2) == 0) { 5748 char name[ZFS_MAX_DATASET_NAME_LEN]; 5749 (void) snprintf(name, sizeof (name), "%s_import", 5750 ztest_opts.zo_pool); 5751 ztest_spa_import_export(ztest_opts.zo_pool, name); 5752 ztest_spa_import_export(name, ztest_opts.zo_pool); 5753 } 5754 5755 kernel_fini(); 5756 5757 list_destroy(&zcl.zcl_callbacks); 5758 5759 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5760 5761 (void) rwlock_destroy(&ztest_name_lock); 5762 (void) _mutex_destroy(&ztest_vdev_lock); 5763} 5764 5765static void 5766ztest_freeze(void) 5767{ 5768 ztest_ds_t *zd = &ztest_ds[0]; 5769 spa_t *spa; 5770 int numloops = 0; 5771 5772 if (ztest_opts.zo_verbose >= 3) 5773 (void) printf("testing spa_freeze()...\n"); 5774 5775 kernel_init(FREAD | FWRITE); 5776 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5777 VERIFY3U(0, ==, ztest_dataset_open(0)); 5778 spa->spa_debug = B_TRUE; 5779 ztest_spa = spa; 5780 5781 /* 5782 * Force the first log block to be transactionally allocated. 5783 * We have to do this before we freeze the pool -- otherwise 5784 * the log chain won't be anchored. 5785 */ 5786 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5787 ztest_dmu_object_alloc_free(zd, 0); 5788 zil_commit(zd->zd_zilog, 0); 5789 } 5790 5791 txg_wait_synced(spa_get_dsl(spa), 0); 5792 5793 /* 5794 * Freeze the pool. This stops spa_sync() from doing anything, 5795 * so that the only way to record changes from now on is the ZIL. 5796 */ 5797 spa_freeze(spa); 5798 5799 /* 5800 * Because it is hard to predict how much space a write will actually 5801 * require beforehand, we leave ourselves some fudge space to write over 5802 * capacity. 5803 */ 5804 uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2; 5805 5806 /* 5807 * Run tests that generate log records but don't alter the pool config 5808 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5809 * We do a txg_wait_synced() after each iteration to force the txg 5810 * to increase well beyond the last synced value in the uberblock. 5811 * The ZIL should be OK with that. 5812 * 5813 * Run a random number of times less than zo_maxloops and ensure we do 5814 * not run out of space on the pool. 5815 */ 5816 while (ztest_random(10) != 0 && 5817 numloops++ < ztest_opts.zo_maxloops && 5818 metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) { 5819 ztest_od_t od; 5820 ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 5821 VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE)); 5822 ztest_io(zd, od.od_object, 5823 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 5824 txg_wait_synced(spa_get_dsl(spa), 0); 5825 } 5826 5827 /* 5828 * Commit all of the changes we just generated. 5829 */ 5830 zil_commit(zd->zd_zilog, 0); 5831 txg_wait_synced(spa_get_dsl(spa), 0); 5832 5833 /* 5834 * Close our dataset and close the pool. 5835 */ 5836 ztest_dataset_close(0); 5837 spa_close(spa, FTAG); 5838 kernel_fini(); 5839 5840 /* 5841 * Open and close the pool and dataset to induce log replay. 5842 */ 5843 kernel_init(FREAD | FWRITE); 5844 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5845 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5846 VERIFY3U(0, ==, ztest_dataset_open(0)); 5847 ztest_dataset_close(0); 5848 5849 spa->spa_debug = B_TRUE; 5850 ztest_spa = spa; 5851 txg_wait_synced(spa_get_dsl(spa), 0); 5852 ztest_reguid(NULL, 0); 5853 5854 spa_close(spa, FTAG); 5855 kernel_fini(); 5856} 5857 5858void 5859print_time(hrtime_t t, char *timebuf) 5860{ 5861 hrtime_t s = t / NANOSEC; 5862 hrtime_t m = s / 60; 5863 hrtime_t h = m / 60; 5864 hrtime_t d = h / 24; 5865 5866 s -= m * 60; 5867 m -= h * 60; 5868 h -= d * 24; 5869 5870 timebuf[0] = '\0'; 5871 5872 if (d) 5873 (void) sprintf(timebuf, 5874 "%llud%02lluh%02llum%02llus", d, h, m, s); 5875 else if (h) 5876 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5877 else if (m) 5878 (void) sprintf(timebuf, "%llum%02llus", m, s); 5879 else 5880 (void) sprintf(timebuf, "%llus", s); 5881} 5882 5883static nvlist_t * 5884make_random_props() 5885{ 5886 nvlist_t *props; 5887 5888 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5889 if (ztest_random(2) == 0) 5890 return (props); 5891 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5892 5893 return (props); 5894} 5895 5896/* 5897 * Create a storage pool with the given name and initial vdev size. 5898 * Then test spa_freeze() functionality. 5899 */ 5900static void 5901ztest_init(ztest_shared_t *zs) 5902{ 5903 spa_t *spa; 5904 nvlist_t *nvroot, *props; 5905 5906 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5907 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5908 5909 kernel_init(FREAD | FWRITE); 5910 5911 /* 5912 * Create the storage pool. 5913 */ 5914 (void) spa_destroy(ztest_opts.zo_pool); 5915 ztest_shared->zs_vdev_next_leaf = 0; 5916 zs->zs_splits = 0; 5917 zs->zs_mirrors = ztest_opts.zo_mirrors; 5918 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 5919 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5920 props = make_random_props(); 5921 for (int i = 0; i < SPA_FEATURES; i++) { 5922 char buf[1024]; 5923 (void) snprintf(buf, sizeof (buf), "feature@%s", 5924 spa_feature_table[i].fi_uname); 5925 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5926 } 5927 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 5928 nvlist_free(nvroot); 5929 nvlist_free(props); 5930 5931 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5932 zs->zs_metaslab_sz = 5933 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5934 5935 spa_close(spa, FTAG); 5936 5937 kernel_fini(); 5938 5939 ztest_run_zdb(ztest_opts.zo_pool); 5940 5941 ztest_freeze(); 5942 5943 ztest_run_zdb(ztest_opts.zo_pool); 5944 5945 (void) rwlock_destroy(&ztest_name_lock); 5946 (void) _mutex_destroy(&ztest_vdev_lock); 5947} 5948 5949static void 5950setup_data_fd(void) 5951{ 5952 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 5953 5954 ztest_fd_data = mkstemp(ztest_name_data); 5955 ASSERT3S(ztest_fd_data, >=, 0); 5956 (void) unlink(ztest_name_data); 5957} 5958 5959 5960static int 5961shared_data_size(ztest_shared_hdr_t *hdr) 5962{ 5963 int size; 5964 5965 size = hdr->zh_hdr_size; 5966 size += hdr->zh_opts_size; 5967 size += hdr->zh_size; 5968 size += hdr->zh_stats_size * hdr->zh_stats_count; 5969 size += hdr->zh_ds_size * hdr->zh_ds_count; 5970 5971 return (size); 5972} 5973 5974static void 5975setup_hdr(void) 5976{ 5977 int size; 5978 ztest_shared_hdr_t *hdr; 5979 5980 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5981 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5982 ASSERT(hdr != MAP_FAILED); 5983 5984 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 5985 5986 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5987 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5988 hdr->zh_size = sizeof (ztest_shared_t); 5989 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5990 hdr->zh_stats_count = ZTEST_FUNCS; 5991 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5992 hdr->zh_ds_count = ztest_opts.zo_datasets; 5993 5994 size = shared_data_size(hdr); 5995 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 5996 5997 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5998} 5999 6000static void 6001setup_data(void) 6002{ 6003 int size, offset; 6004 ztest_shared_hdr_t *hdr; 6005 uint8_t *buf; 6006 6007 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 6008 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 6009 ASSERT(hdr != MAP_FAILED); 6010 6011 size = shared_data_size(hdr); 6012 6013 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 6014 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 6015 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 6016 ASSERT(hdr != MAP_FAILED); 6017 buf = (uint8_t *)hdr; 6018 6019 offset = hdr->zh_hdr_size; 6020 ztest_shared_opts = (void *)&buf[offset]; 6021 offset += hdr->zh_opts_size; 6022 ztest_shared = (void *)&buf[offset]; 6023 offset += hdr->zh_size; 6024 ztest_shared_callstate = (void *)&buf[offset]; 6025 offset += hdr->zh_stats_size * hdr->zh_stats_count; 6026 ztest_shared_ds = (void *)&buf[offset]; 6027} 6028 6029static boolean_t 6030exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 6031{ 6032 pid_t pid; 6033 int status; 6034 char *cmdbuf = NULL; 6035 6036 pid = fork(); 6037 6038 if (cmd == NULL) { 6039 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 6040 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 6041 cmd = cmdbuf; 6042 } 6043 6044 if (pid == -1) 6045 fatal(1, "fork failed"); 6046 6047 if (pid == 0) { /* child */ 6048 char *emptyargv[2] = { cmd, NULL }; 6049 char fd_data_str[12]; 6050 6051 struct rlimit rl = { 1024, 1024 }; 6052 (void) setrlimit(RLIMIT_NOFILE, &rl); 6053 6054 (void) close(ztest_fd_rand); 6055 VERIFY3U(11, >=, 6056 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 6057 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 6058 6059 (void) enable_extended_FILE_stdio(-1, -1); 6060 if (libpath != NULL) 6061 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 6062#ifdef illumos 6063 (void) execv(cmd, emptyargv); 6064#else 6065 (void) execvp(cmd, emptyargv); 6066#endif 6067 ztest_dump_core = B_FALSE; 6068 fatal(B_TRUE, "exec failed: %s", cmd); 6069 } 6070 6071 if (cmdbuf != NULL) { 6072 umem_free(cmdbuf, MAXPATHLEN); 6073 cmd = NULL; 6074 } 6075 6076 while (waitpid(pid, &status, 0) != pid) 6077 continue; 6078 if (statusp != NULL) 6079 *statusp = status; 6080 6081 if (WIFEXITED(status)) { 6082 if (WEXITSTATUS(status) != 0) { 6083 (void) fprintf(stderr, "child exited with code %d\n", 6084 WEXITSTATUS(status)); 6085 exit(2); 6086 } 6087 return (B_FALSE); 6088 } else if (WIFSIGNALED(status)) { 6089 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 6090 (void) fprintf(stderr, "child died with signal %d\n", 6091 WTERMSIG(status)); 6092 exit(3); 6093 } 6094 return (B_TRUE); 6095 } else { 6096 (void) fprintf(stderr, "something strange happened to child\n"); 6097 exit(4); 6098 /* NOTREACHED */ 6099 } 6100} 6101 6102static void 6103ztest_run_init(void) 6104{ 6105 ztest_shared_t *zs = ztest_shared; 6106 6107 ASSERT(ztest_opts.zo_init != 0); 6108 6109 /* 6110 * Blow away any existing copy of zpool.cache 6111 */ 6112 (void) remove(spa_config_path); 6113 6114 /* 6115 * Create and initialize our storage pool. 6116 */ 6117 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6118 bzero(zs, sizeof (ztest_shared_t)); 6119 if (ztest_opts.zo_verbose >= 3 && 6120 ztest_opts.zo_init != 1) { 6121 (void) printf("ztest_init(), pass %d\n", i); 6122 } 6123 ztest_init(zs); 6124 } 6125} 6126 6127int 6128main(int argc, char **argv) 6129{ 6130 int kills = 0; 6131 int iters = 0; 6132 int older = 0; 6133 int newer = 0; 6134 ztest_shared_t *zs; 6135 ztest_info_t *zi; 6136 ztest_shared_callstate_t *zc; 6137 char timebuf[100]; 6138 char numbuf[6]; 6139 spa_t *spa; 6140 char *cmd; 6141 boolean_t hasalt; 6142 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6143 6144 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6145 6146 dprintf_setup(&argc, argv); 6147 zfs_deadman_synctime_ms = 300000; 6148 6149 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6150 ASSERT3S(ztest_fd_rand, >=, 0); 6151 6152 if (!fd_data_str) { 6153 process_options(argc, argv); 6154 6155 setup_data_fd(); 6156 setup_hdr(); 6157 setup_data(); 6158 bcopy(&ztest_opts, ztest_shared_opts, 6159 sizeof (*ztest_shared_opts)); 6160 } else { 6161 ztest_fd_data = atoi(fd_data_str); 6162 setup_data(); 6163 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6164 } 6165 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6166 6167 /* Override location of zpool.cache */ 6168 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6169 ztest_opts.zo_dir), !=, -1); 6170 6171 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6172 UMEM_NOFAIL); 6173 zs = ztest_shared; 6174 6175 if (fd_data_str) { 6176 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 6177 metaslab_df_alloc_threshold = 6178 zs->zs_metaslab_df_alloc_threshold; 6179 6180 if (zs->zs_do_init) 6181 ztest_run_init(); 6182 else 6183 ztest_run(zs); 6184 exit(0); 6185 } 6186 6187 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6188 6189 if (ztest_opts.zo_verbose >= 1) { 6190 (void) printf("%llu vdevs, %d datasets, %d threads," 6191 " %llu seconds...\n", 6192 (u_longlong_t)ztest_opts.zo_vdevs, 6193 ztest_opts.zo_datasets, 6194 ztest_opts.zo_threads, 6195 (u_longlong_t)ztest_opts.zo_time); 6196 } 6197 6198 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6199 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6200 6201 zs->zs_do_init = B_TRUE; 6202 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6203 if (ztest_opts.zo_verbose >= 1) { 6204 (void) printf("Executing older ztest for " 6205 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6206 } 6207 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6208 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6209 } else { 6210 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6211 } 6212 zs->zs_do_init = B_FALSE; 6213 6214 zs->zs_proc_start = gethrtime(); 6215 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6216 6217 for (int f = 0; f < ZTEST_FUNCS; f++) { 6218 zi = &ztest_info[f]; 6219 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6220 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6221 zc->zc_next = UINT64_MAX; 6222 else 6223 zc->zc_next = zs->zs_proc_start + 6224 ztest_random(2 * zi->zi_interval[0] + 1); 6225 } 6226 6227 /* 6228 * Run the tests in a loop. These tests include fault injection 6229 * to verify that self-healing data works, and forced crashes 6230 * to verify that we never lose on-disk consistency. 6231 */ 6232 while (gethrtime() < zs->zs_proc_stop) { 6233 int status; 6234 boolean_t killed; 6235 6236 /* 6237 * Initialize the workload counters for each function. 6238 */ 6239 for (int f = 0; f < ZTEST_FUNCS; f++) { 6240 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6241 zc->zc_count = 0; 6242 zc->zc_time = 0; 6243 } 6244 6245 /* Set the allocation switch size */ 6246 zs->zs_metaslab_df_alloc_threshold = 6247 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6248 6249 if (!hasalt || ztest_random(2) == 0) { 6250 if (hasalt && ztest_opts.zo_verbose >= 1) { 6251 (void) printf("Executing newer ztest: %s\n", 6252 cmd); 6253 } 6254 newer++; 6255 killed = exec_child(cmd, NULL, B_TRUE, &status); 6256 } else { 6257 if (hasalt && ztest_opts.zo_verbose >= 1) { 6258 (void) printf("Executing older ztest: %s\n", 6259 ztest_opts.zo_alt_ztest); 6260 } 6261 older++; 6262 killed = exec_child(ztest_opts.zo_alt_ztest, 6263 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6264 } 6265 6266 if (killed) 6267 kills++; 6268 iters++; 6269 6270 if (ztest_opts.zo_verbose >= 1) { 6271 hrtime_t now = gethrtime(); 6272 6273 now = MIN(now, zs->zs_proc_stop); 6274 print_time(zs->zs_proc_stop - now, timebuf); 6275 nicenum(zs->zs_space, numbuf); 6276 6277 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6278 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6279 iters, 6280 WIFEXITED(status) ? "Complete" : "SIGKILL", 6281 (u_longlong_t)zs->zs_enospc_count, 6282 100.0 * zs->zs_alloc / zs->zs_space, 6283 numbuf, 6284 100.0 * (now - zs->zs_proc_start) / 6285 (ztest_opts.zo_time * NANOSEC), timebuf); 6286 } 6287 6288 if (ztest_opts.zo_verbose >= 2) { 6289 (void) printf("\nWorkload summary:\n\n"); 6290 (void) printf("%7s %9s %s\n", 6291 "Calls", "Time", "Function"); 6292 (void) printf("%7s %9s %s\n", 6293 "-----", "----", "--------"); 6294 for (int f = 0; f < ZTEST_FUNCS; f++) { 6295 Dl_info dli; 6296 6297 zi = &ztest_info[f]; 6298 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6299 print_time(zc->zc_time, timebuf); 6300 (void) dladdr((void *)zi->zi_func, &dli); 6301 (void) printf("%7llu %9s %s\n", 6302 (u_longlong_t)zc->zc_count, timebuf, 6303 dli.dli_sname); 6304 } 6305 (void) printf("\n"); 6306 } 6307 6308 /* 6309 * It's possible that we killed a child during a rename test, 6310 * in which case we'll have a 'ztest_tmp' pool lying around 6311 * instead of 'ztest'. Do a blind rename in case this happened. 6312 */ 6313 kernel_init(FREAD); 6314 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6315 spa_close(spa, FTAG); 6316 } else { 6317 char tmpname[ZFS_MAX_DATASET_NAME_LEN]; 6318 kernel_fini(); 6319 kernel_init(FREAD | FWRITE); 6320 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6321 ztest_opts.zo_pool); 6322 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6323 } 6324 kernel_fini(); 6325 6326 ztest_run_zdb(ztest_opts.zo_pool); 6327 } 6328 6329 if (ztest_opts.zo_verbose >= 1) { 6330 if (hasalt) { 6331 (void) printf("%d runs of older ztest: %s\n", older, 6332 ztest_opts.zo_alt_ztest); 6333 (void) printf("%d runs of newer ztest: %s\n", newer, 6334 cmd); 6335 } 6336 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6337 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6338 } 6339 6340 umem_free(cmd, MAXNAMELEN); 6341 6342 return (0); 6343} 6344