ztest.c revision 254074
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2013 Steven Hartland. All rights reserved. 27 */ 28 29/* 30 * The objective of this program is to provide a DMU/ZAP/SPA stress test 31 * that runs entirely in userland, is easy to use, and easy to extend. 32 * 33 * The overall design of the ztest program is as follows: 34 * 35 * (1) For each major functional area (e.g. adding vdevs to a pool, 36 * creating and destroying datasets, reading and writing objects, etc) 37 * we have a simple routine to test that functionality. These 38 * individual routines do not have to do anything "stressful". 39 * 40 * (2) We turn these simple functionality tests into a stress test by 41 * running them all in parallel, with as many threads as desired, 42 * and spread across as many datasets, objects, and vdevs as desired. 43 * 44 * (3) While all this is happening, we inject faults into the pool to 45 * verify that self-healing data really works. 46 * 47 * (4) Every time we open a dataset, we change its checksum and compression 48 * functions. Thus even individual objects vary from block to block 49 * in which checksum they use and whether they're compressed. 50 * 51 * (5) To verify that we never lose on-disk consistency after a crash, 52 * we run the entire test in a child of the main process. 53 * At random times, the child self-immolates with a SIGKILL. 54 * This is the software equivalent of pulling the power cord. 55 * The parent then runs the test again, using the existing 56 * storage pool, as many times as desired. If backwards compatability 57 * testing is enabled ztest will sometimes run the "older" version 58 * of ztest after a SIGKILL. 59 * 60 * (6) To verify that we don't have future leaks or temporal incursions, 61 * many of the functional tests record the transaction group number 62 * as part of their data. When reading old data, they verify that 63 * the transaction group number is less than the current, open txg. 64 * If you add a new test, please do this if applicable. 65 * 66 * When run with no arguments, ztest runs for about five minutes and 67 * produces no output if successful. To get a little bit of information, 68 * specify -V. To get more information, specify -VV, and so on. 69 * 70 * To turn this into an overnight stress test, use -T to specify run time. 71 * 72 * You can ask more more vdevs [-v], datasets [-d], or threads [-t] 73 * to increase the pool capacity, fanout, and overall stress level. 74 * 75 * Use the -k option to set the desired frequency of kills. 76 * 77 * When ztest invokes itself it passes all relevant information through a 78 * temporary file which is mmap-ed in the child process. This allows shared 79 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 80 * stored at offset 0 of this file and contains information on the size and 81 * number of shared structures in the file. The information stored in this file 82 * must remain backwards compatible with older versions of ztest so that 83 * ztest can invoke them during backwards compatibility testing (-B). 84 */ 85 86#include <sys/zfs_context.h> 87#include <sys/spa.h> 88#include <sys/dmu.h> 89#include <sys/txg.h> 90#include <sys/dbuf.h> 91#include <sys/zap.h> 92#include <sys/dmu_objset.h> 93#include <sys/poll.h> 94#include <sys/stat.h> 95#include <sys/time.h> 96#include <sys/wait.h> 97#include <sys/mman.h> 98#include <sys/resource.h> 99#include <sys/zio.h> 100#include <sys/zil.h> 101#include <sys/zil_impl.h> 102#include <sys/vdev_impl.h> 103#include <sys/vdev_file.h> 104#include <sys/spa_impl.h> 105#include <sys/metaslab_impl.h> 106#include <sys/dsl_prop.h> 107#include <sys/dsl_dataset.h> 108#include <sys/dsl_destroy.h> 109#include <sys/dsl_scan.h> 110#include <sys/zio_checksum.h> 111#include <sys/refcount.h> 112#include <sys/zfeature.h> 113#include <sys/dsl_userhold.h> 114#include <stdio.h> 115#include <stdio_ext.h> 116#include <stdlib.h> 117#include <unistd.h> 118#include <signal.h> 119#include <umem.h> 120#include <dlfcn.h> 121#include <ctype.h> 122#include <math.h> 123#include <errno.h> 124#include <sys/fs/zfs.h> 125#include <libnvpair.h> 126 127static int ztest_fd_data = -1; 128static int ztest_fd_rand = -1; 129 130typedef struct ztest_shared_hdr { 131 uint64_t zh_hdr_size; 132 uint64_t zh_opts_size; 133 uint64_t zh_size; 134 uint64_t zh_stats_size; 135 uint64_t zh_stats_count; 136 uint64_t zh_ds_size; 137 uint64_t zh_ds_count; 138} ztest_shared_hdr_t; 139 140static ztest_shared_hdr_t *ztest_shared_hdr; 141 142typedef struct ztest_shared_opts { 143 char zo_pool[MAXNAMELEN]; 144 char zo_dir[MAXNAMELEN]; 145 char zo_alt_ztest[MAXNAMELEN]; 146 char zo_alt_libpath[MAXNAMELEN]; 147 uint64_t zo_vdevs; 148 uint64_t zo_vdevtime; 149 size_t zo_vdev_size; 150 int zo_ashift; 151 int zo_mirrors; 152 int zo_raidz; 153 int zo_raidz_parity; 154 int zo_datasets; 155 int zo_threads; 156 uint64_t zo_passtime; 157 uint64_t zo_killrate; 158 int zo_verbose; 159 int zo_init; 160 uint64_t zo_time; 161 uint64_t zo_maxloops; 162 uint64_t zo_metaslab_gang_bang; 163} ztest_shared_opts_t; 164 165static const ztest_shared_opts_t ztest_opts_defaults = { 166 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' }, 167 .zo_dir = { '/', 't', 'm', 'p', '\0' }, 168 .zo_alt_ztest = { '\0' }, 169 .zo_alt_libpath = { '\0' }, 170 .zo_vdevs = 5, 171 .zo_ashift = SPA_MINBLOCKSHIFT, 172 .zo_mirrors = 2, 173 .zo_raidz = 4, 174 .zo_raidz_parity = 1, 175 .zo_vdev_size = SPA_MINDEVSIZE, 176 .zo_datasets = 7, 177 .zo_threads = 23, 178 .zo_passtime = 60, /* 60 seconds */ 179 .zo_killrate = 70, /* 70% kill rate */ 180 .zo_verbose = 0, 181 .zo_init = 1, 182 .zo_time = 300, /* 5 minutes */ 183 .zo_maxloops = 50, /* max loops during spa_freeze() */ 184 .zo_metaslab_gang_bang = 32 << 10 185}; 186 187extern uint64_t metaslab_gang_bang; 188extern uint64_t metaslab_df_alloc_threshold; 189extern uint64_t zfs_deadman_synctime; 190 191static ztest_shared_opts_t *ztest_shared_opts; 192static ztest_shared_opts_t ztest_opts; 193 194typedef struct ztest_shared_ds { 195 uint64_t zd_seq; 196} ztest_shared_ds_t; 197 198static ztest_shared_ds_t *ztest_shared_ds; 199#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 200 201#define BT_MAGIC 0x123456789abcdefULL 202#define MAXFAULTS() \ 203 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1) 204 205enum ztest_io_type { 206 ZTEST_IO_WRITE_TAG, 207 ZTEST_IO_WRITE_PATTERN, 208 ZTEST_IO_WRITE_ZEROES, 209 ZTEST_IO_TRUNCATE, 210 ZTEST_IO_SETATTR, 211 ZTEST_IO_REWRITE, 212 ZTEST_IO_TYPES 213}; 214 215typedef struct ztest_block_tag { 216 uint64_t bt_magic; 217 uint64_t bt_objset; 218 uint64_t bt_object; 219 uint64_t bt_offset; 220 uint64_t bt_gen; 221 uint64_t bt_txg; 222 uint64_t bt_crtxg; 223} ztest_block_tag_t; 224 225typedef struct bufwad { 226 uint64_t bw_index; 227 uint64_t bw_txg; 228 uint64_t bw_data; 229} bufwad_t; 230 231/* 232 * XXX -- fix zfs range locks to be generic so we can use them here. 233 */ 234typedef enum { 235 RL_READER, 236 RL_WRITER, 237 RL_APPEND 238} rl_type_t; 239 240typedef struct rll { 241 void *rll_writer; 242 int rll_readers; 243 mutex_t rll_lock; 244 cond_t rll_cv; 245} rll_t; 246 247typedef struct rl { 248 uint64_t rl_object; 249 uint64_t rl_offset; 250 uint64_t rl_size; 251 rll_t *rl_lock; 252} rl_t; 253 254#define ZTEST_RANGE_LOCKS 64 255#define ZTEST_OBJECT_LOCKS 64 256 257/* 258 * Object descriptor. Used as a template for object lookup/create/remove. 259 */ 260typedef struct ztest_od { 261 uint64_t od_dir; 262 uint64_t od_object; 263 dmu_object_type_t od_type; 264 dmu_object_type_t od_crtype; 265 uint64_t od_blocksize; 266 uint64_t od_crblocksize; 267 uint64_t od_gen; 268 uint64_t od_crgen; 269 char od_name[MAXNAMELEN]; 270} ztest_od_t; 271 272/* 273 * Per-dataset state. 274 */ 275typedef struct ztest_ds { 276 ztest_shared_ds_t *zd_shared; 277 objset_t *zd_os; 278 rwlock_t zd_zilog_lock; 279 zilog_t *zd_zilog; 280 ztest_od_t *zd_od; /* debugging aid */ 281 char zd_name[MAXNAMELEN]; 282 mutex_t zd_dirobj_lock; 283 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 284 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 285} ztest_ds_t; 286 287/* 288 * Per-iteration state. 289 */ 290typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 291 292typedef struct ztest_info { 293 ztest_func_t *zi_func; /* test function */ 294 uint64_t zi_iters; /* iterations per execution */ 295 uint64_t *zi_interval; /* execute every <interval> seconds */ 296} ztest_info_t; 297 298typedef struct ztest_shared_callstate { 299 uint64_t zc_count; /* per-pass count */ 300 uint64_t zc_time; /* per-pass time */ 301 uint64_t zc_next; /* next time to call this function */ 302} ztest_shared_callstate_t; 303 304static ztest_shared_callstate_t *ztest_shared_callstate; 305#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 306 307/* 308 * Note: these aren't static because we want dladdr() to work. 309 */ 310ztest_func_t ztest_dmu_read_write; 311ztest_func_t ztest_dmu_write_parallel; 312ztest_func_t ztest_dmu_object_alloc_free; 313ztest_func_t ztest_dmu_commit_callbacks; 314ztest_func_t ztest_zap; 315ztest_func_t ztest_zap_parallel; 316ztest_func_t ztest_zil_commit; 317ztest_func_t ztest_zil_remount; 318ztest_func_t ztest_dmu_read_write_zcopy; 319ztest_func_t ztest_dmu_objset_create_destroy; 320ztest_func_t ztest_dmu_prealloc; 321ztest_func_t ztest_fzap; 322ztest_func_t ztest_dmu_snapshot_create_destroy; 323ztest_func_t ztest_dsl_prop_get_set; 324ztest_func_t ztest_spa_prop_get_set; 325ztest_func_t ztest_spa_create_destroy; 326ztest_func_t ztest_fault_inject; 327ztest_func_t ztest_ddt_repair; 328ztest_func_t ztest_dmu_snapshot_hold; 329ztest_func_t ztest_spa_rename; 330ztest_func_t ztest_scrub; 331ztest_func_t ztest_dsl_dataset_promote_busy; 332ztest_func_t ztest_vdev_attach_detach; 333ztest_func_t ztest_vdev_LUN_growth; 334ztest_func_t ztest_vdev_add_remove; 335ztest_func_t ztest_vdev_aux_add_remove; 336ztest_func_t ztest_split_pool; 337ztest_func_t ztest_reguid; 338ztest_func_t ztest_spa_upgrade; 339 340uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 341uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 342uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 343uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 344uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 345 346ztest_info_t ztest_info[] = { 347 { ztest_dmu_read_write, 1, &zopt_always }, 348 { ztest_dmu_write_parallel, 10, &zopt_always }, 349 { ztest_dmu_object_alloc_free, 1, &zopt_always }, 350 { ztest_dmu_commit_callbacks, 1, &zopt_always }, 351 { ztest_zap, 30, &zopt_always }, 352 { ztest_zap_parallel, 100, &zopt_always }, 353 { ztest_split_pool, 1, &zopt_always }, 354 { ztest_zil_commit, 1, &zopt_incessant }, 355 { ztest_zil_remount, 1, &zopt_sometimes }, 356 { ztest_dmu_read_write_zcopy, 1, &zopt_often }, 357 { ztest_dmu_objset_create_destroy, 1, &zopt_often }, 358 { ztest_dsl_prop_get_set, 1, &zopt_often }, 359 { ztest_spa_prop_get_set, 1, &zopt_sometimes }, 360#if 0 361 { ztest_dmu_prealloc, 1, &zopt_sometimes }, 362#endif 363 { ztest_fzap, 1, &zopt_sometimes }, 364 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, 365 { ztest_spa_create_destroy, 1, &zopt_sometimes }, 366 { ztest_fault_inject, 1, &zopt_sometimes }, 367 { ztest_ddt_repair, 1, &zopt_sometimes }, 368 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, 369 { ztest_reguid, 1, &zopt_rarely }, 370 { ztest_spa_rename, 1, &zopt_rarely }, 371 { ztest_scrub, 1, &zopt_rarely }, 372 { ztest_spa_upgrade, 1, &zopt_rarely }, 373 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, 374 { ztest_vdev_attach_detach, 1, &zopt_sometimes }, 375 { ztest_vdev_LUN_growth, 1, &zopt_rarely }, 376 { ztest_vdev_add_remove, 1, 377 &ztest_opts.zo_vdevtime }, 378 { ztest_vdev_aux_add_remove, 1, 379 &ztest_opts.zo_vdevtime }, 380}; 381 382#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 383 384/* 385 * The following struct is used to hold a list of uncalled commit callbacks. 386 * The callbacks are ordered by txg number. 387 */ 388typedef struct ztest_cb_list { 389 mutex_t zcl_callbacks_lock; 390 list_t zcl_callbacks; 391} ztest_cb_list_t; 392 393/* 394 * Stuff we need to share writably between parent and child. 395 */ 396typedef struct ztest_shared { 397 boolean_t zs_do_init; 398 hrtime_t zs_proc_start; 399 hrtime_t zs_proc_stop; 400 hrtime_t zs_thread_start; 401 hrtime_t zs_thread_stop; 402 hrtime_t zs_thread_kill; 403 uint64_t zs_enospc_count; 404 uint64_t zs_vdev_next_leaf; 405 uint64_t zs_vdev_aux; 406 uint64_t zs_alloc; 407 uint64_t zs_space; 408 uint64_t zs_splits; 409 uint64_t zs_mirrors; 410 uint64_t zs_metaslab_sz; 411 uint64_t zs_metaslab_df_alloc_threshold; 412 uint64_t zs_guid; 413} ztest_shared_t; 414 415#define ID_PARALLEL -1ULL 416 417static char ztest_dev_template[] = "%s/%s.%llua"; 418static char ztest_aux_template[] = "%s/%s.%s.%llu"; 419ztest_shared_t *ztest_shared; 420 421static spa_t *ztest_spa = NULL; 422static ztest_ds_t *ztest_ds; 423 424static mutex_t ztest_vdev_lock; 425 426/* 427 * The ztest_name_lock protects the pool and dataset namespace used by 428 * the individual tests. To modify the namespace, consumers must grab 429 * this lock as writer. Grabbing the lock as reader will ensure that the 430 * namespace does not change while the lock is held. 431 */ 432static rwlock_t ztest_name_lock; 433 434static boolean_t ztest_dump_core = B_TRUE; 435static boolean_t ztest_exiting; 436 437/* Global commit callback list */ 438static ztest_cb_list_t zcl; 439 440enum ztest_object { 441 ZTEST_META_DNODE = 0, 442 ZTEST_DIROBJ, 443 ZTEST_OBJECTS 444}; 445 446static void usage(boolean_t) __NORETURN; 447 448/* 449 * These libumem hooks provide a reasonable set of defaults for the allocator's 450 * debugging facilities. 451 */ 452const char * 453_umem_debug_init() 454{ 455 return ("default,verbose"); /* $UMEM_DEBUG setting */ 456} 457 458const char * 459_umem_logging_init(void) 460{ 461 return ("fail,contents"); /* $UMEM_LOGGING setting */ 462} 463 464#define FATAL_MSG_SZ 1024 465 466char *fatal_msg; 467 468static void 469fatal(int do_perror, char *message, ...) 470{ 471 va_list args; 472 int save_errno = errno; 473 char buf[FATAL_MSG_SZ]; 474 475 (void) fflush(stdout); 476 477 va_start(args, message); 478 (void) sprintf(buf, "ztest: "); 479 /* LINTED */ 480 (void) vsprintf(buf + strlen(buf), message, args); 481 va_end(args); 482 if (do_perror) { 483 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 484 ": %s", strerror(save_errno)); 485 } 486 (void) fprintf(stderr, "%s\n", buf); 487 fatal_msg = buf; /* to ease debugging */ 488 if (ztest_dump_core) 489 abort(); 490 exit(3); 491} 492 493static int 494str2shift(const char *buf) 495{ 496 const char *ends = "BKMGTPEZ"; 497 int i; 498 499 if (buf[0] == '\0') 500 return (0); 501 for (i = 0; i < strlen(ends); i++) { 502 if (toupper(buf[0]) == ends[i]) 503 break; 504 } 505 if (i == strlen(ends)) { 506 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 507 buf); 508 usage(B_FALSE); 509 } 510 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 511 return (10*i); 512 } 513 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 514 usage(B_FALSE); 515 /* NOTREACHED */ 516} 517 518static uint64_t 519nicenumtoull(const char *buf) 520{ 521 char *end; 522 uint64_t val; 523 524 val = strtoull(buf, &end, 0); 525 if (end == buf) { 526 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 527 usage(B_FALSE); 528 } else if (end[0] == '.') { 529 double fval = strtod(buf, &end); 530 fval *= pow(2, str2shift(end)); 531 if (fval > UINT64_MAX) { 532 (void) fprintf(stderr, "ztest: value too large: %s\n", 533 buf); 534 usage(B_FALSE); 535 } 536 val = (uint64_t)fval; 537 } else { 538 int shift = str2shift(end); 539 if (shift >= 64 || (val << shift) >> shift != val) { 540 (void) fprintf(stderr, "ztest: value too large: %s\n", 541 buf); 542 usage(B_FALSE); 543 } 544 val <<= shift; 545 } 546 return (val); 547} 548 549static void 550usage(boolean_t requested) 551{ 552 const ztest_shared_opts_t *zo = &ztest_opts_defaults; 553 554 char nice_vdev_size[10]; 555 char nice_gang_bang[10]; 556 FILE *fp = requested ? stdout : stderr; 557 558 nicenum(zo->zo_vdev_size, nice_vdev_size); 559 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang); 560 561 (void) fprintf(fp, "Usage: %s\n" 562 "\t[-v vdevs (default: %llu)]\n" 563 "\t[-s size_of_each_vdev (default: %s)]\n" 564 "\t[-a alignment_shift (default: %d)] use 0 for random\n" 565 "\t[-m mirror_copies (default: %d)]\n" 566 "\t[-r raidz_disks (default: %d)]\n" 567 "\t[-R raidz_parity (default: %d)]\n" 568 "\t[-d datasets (default: %d)]\n" 569 "\t[-t threads (default: %d)]\n" 570 "\t[-g gang_block_threshold (default: %s)]\n" 571 "\t[-i init_count (default: %d)] initialize pool i times\n" 572 "\t[-k kill_percentage (default: %llu%%)]\n" 573 "\t[-p pool_name (default: %s)]\n" 574 "\t[-f dir (default: %s)] file directory for vdev files\n" 575 "\t[-V] verbose (use multiple times for ever more blather)\n" 576 "\t[-E] use existing pool instead of creating new one\n" 577 "\t[-T time (default: %llu sec)] total run time\n" 578 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n" 579 "\t[-P passtime (default: %llu sec)] time per pass\n" 580 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n" 581 "\t[-h] (print help)\n" 582 "", 583 zo->zo_pool, 584 (u_longlong_t)zo->zo_vdevs, /* -v */ 585 nice_vdev_size, /* -s */ 586 zo->zo_ashift, /* -a */ 587 zo->zo_mirrors, /* -m */ 588 zo->zo_raidz, /* -r */ 589 zo->zo_raidz_parity, /* -R */ 590 zo->zo_datasets, /* -d */ 591 zo->zo_threads, /* -t */ 592 nice_gang_bang, /* -g */ 593 zo->zo_init, /* -i */ 594 (u_longlong_t)zo->zo_killrate, /* -k */ 595 zo->zo_pool, /* -p */ 596 zo->zo_dir, /* -f */ 597 (u_longlong_t)zo->zo_time, /* -T */ 598 (u_longlong_t)zo->zo_maxloops, /* -F */ 599 (u_longlong_t)zo->zo_passtime); 600 exit(requested ? 0 : 1); 601} 602 603static void 604process_options(int argc, char **argv) 605{ 606 char *path; 607 ztest_shared_opts_t *zo = &ztest_opts; 608 609 int opt; 610 uint64_t value; 611 char altdir[MAXNAMELEN] = { 0 }; 612 613 bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); 614 615 while ((opt = getopt(argc, argv, 616 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) { 617 value = 0; 618 switch (opt) { 619 case 'v': 620 case 's': 621 case 'a': 622 case 'm': 623 case 'r': 624 case 'R': 625 case 'd': 626 case 't': 627 case 'g': 628 case 'i': 629 case 'k': 630 case 'T': 631 case 'P': 632 case 'F': 633 value = nicenumtoull(optarg); 634 } 635 switch (opt) { 636 case 'v': 637 zo->zo_vdevs = value; 638 break; 639 case 's': 640 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 641 break; 642 case 'a': 643 zo->zo_ashift = value; 644 break; 645 case 'm': 646 zo->zo_mirrors = value; 647 break; 648 case 'r': 649 zo->zo_raidz = MAX(1, value); 650 break; 651 case 'R': 652 zo->zo_raidz_parity = MIN(MAX(value, 1), 3); 653 break; 654 case 'd': 655 zo->zo_datasets = MAX(1, value); 656 break; 657 case 't': 658 zo->zo_threads = MAX(1, value); 659 break; 660 case 'g': 661 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, 662 value); 663 break; 664 case 'i': 665 zo->zo_init = value; 666 break; 667 case 'k': 668 zo->zo_killrate = value; 669 break; 670 case 'p': 671 (void) strlcpy(zo->zo_pool, optarg, 672 sizeof (zo->zo_pool)); 673 break; 674 case 'f': 675 path = realpath(optarg, NULL); 676 if (path == NULL) { 677 (void) fprintf(stderr, "error: %s: %s\n", 678 optarg, strerror(errno)); 679 usage(B_FALSE); 680 } else { 681 (void) strlcpy(zo->zo_dir, path, 682 sizeof (zo->zo_dir)); 683 } 684 break; 685 case 'V': 686 zo->zo_verbose++; 687 break; 688 case 'E': 689 zo->zo_init = 0; 690 break; 691 case 'T': 692 zo->zo_time = value; 693 break; 694 case 'P': 695 zo->zo_passtime = MAX(1, value); 696 break; 697 case 'F': 698 zo->zo_maxloops = MAX(1, value); 699 break; 700 case 'B': 701 (void) strlcpy(altdir, optarg, sizeof (altdir)); 702 break; 703 case 'h': 704 usage(B_TRUE); 705 break; 706 case '?': 707 default: 708 usage(B_FALSE); 709 break; 710 } 711 } 712 713 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1); 714 715 zo->zo_vdevtime = 716 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 717 UINT64_MAX >> 2); 718 719 if (strlen(altdir) > 0) { 720 char *cmd; 721 char *realaltdir; 722 char *bin; 723 char *ztest; 724 char *isa; 725 int isalen; 726 727 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 728 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 729 730 VERIFY(NULL != realpath(getexecname(), cmd)); 731 if (0 != access(altdir, F_OK)) { 732 ztest_dump_core = B_FALSE; 733 fatal(B_TRUE, "invalid alternate ztest path: %s", 734 altdir); 735 } 736 VERIFY(NULL != realpath(altdir, realaltdir)); 737 738 /* 739 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest". 740 * We want to extract <isa> to determine if we should use 741 * 32 or 64 bit binaries. 742 */ 743 bin = strstr(cmd, "/usr/bin/"); 744 ztest = strstr(bin, "/ztest"); 745 isa = bin + 9; 746 isalen = ztest - isa; 747 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest), 748 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa); 749 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath), 750 "%s/usr/lib/%.*s", realaltdir, isalen, isa); 751 752 if (0 != access(zo->zo_alt_ztest, X_OK)) { 753 ztest_dump_core = B_FALSE; 754 fatal(B_TRUE, "invalid alternate ztest: %s", 755 zo->zo_alt_ztest); 756 } else if (0 != access(zo->zo_alt_libpath, X_OK)) { 757 ztest_dump_core = B_FALSE; 758 fatal(B_TRUE, "invalid alternate lib directory %s", 759 zo->zo_alt_libpath); 760 } 761 762 umem_free(cmd, MAXPATHLEN); 763 umem_free(realaltdir, MAXPATHLEN); 764 } 765} 766 767static void 768ztest_kill(ztest_shared_t *zs) 769{ 770 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 771 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 772 (void) kill(getpid(), SIGKILL); 773} 774 775static uint64_t 776ztest_random(uint64_t range) 777{ 778 uint64_t r; 779 780 ASSERT3S(ztest_fd_rand, >=, 0); 781 782 if (range == 0) 783 return (0); 784 785 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 786 fatal(1, "short read from /dev/urandom"); 787 788 return (r % range); 789} 790 791/* ARGSUSED */ 792static void 793ztest_record_enospc(const char *s) 794{ 795 ztest_shared->zs_enospc_count++; 796} 797 798static uint64_t 799ztest_get_ashift(void) 800{ 801 if (ztest_opts.zo_ashift == 0) 802 return (SPA_MINBLOCKSHIFT + ztest_random(3)); 803 return (ztest_opts.zo_ashift); 804} 805 806static nvlist_t * 807make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift) 808{ 809 char pathbuf[MAXPATHLEN]; 810 uint64_t vdev; 811 nvlist_t *file; 812 813 if (ashift == 0) 814 ashift = ztest_get_ashift(); 815 816 if (path == NULL) { 817 path = pathbuf; 818 819 if (aux != NULL) { 820 vdev = ztest_shared->zs_vdev_aux; 821 (void) snprintf(path, sizeof (pathbuf), 822 ztest_aux_template, ztest_opts.zo_dir, 823 pool == NULL ? ztest_opts.zo_pool : pool, 824 aux, vdev); 825 } else { 826 vdev = ztest_shared->zs_vdev_next_leaf++; 827 (void) snprintf(path, sizeof (pathbuf), 828 ztest_dev_template, ztest_opts.zo_dir, 829 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 830 } 831 } 832 833 if (size != 0) { 834 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 835 if (fd == -1) 836 fatal(1, "can't open %s", path); 837 if (ftruncate(fd, size) != 0) 838 fatal(1, "can't ftruncate %s", path); 839 (void) close(fd); 840 } 841 842 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); 843 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); 844 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); 845 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); 846 847 return (file); 848} 849 850static nvlist_t * 851make_vdev_raidz(char *path, char *aux, char *pool, size_t size, 852 uint64_t ashift, int r) 853{ 854 nvlist_t *raidz, **child; 855 int c; 856 857 if (r < 2) 858 return (make_vdev_file(path, aux, pool, size, ashift)); 859 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 860 861 for (c = 0; c < r; c++) 862 child[c] = make_vdev_file(path, aux, pool, size, ashift); 863 864 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); 865 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, 866 VDEV_TYPE_RAIDZ) == 0); 867 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, 868 ztest_opts.zo_raidz_parity) == 0); 869 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, 870 child, r) == 0); 871 872 for (c = 0; c < r; c++) 873 nvlist_free(child[c]); 874 875 umem_free(child, r * sizeof (nvlist_t *)); 876 877 return (raidz); 878} 879 880static nvlist_t * 881make_vdev_mirror(char *path, char *aux, char *pool, size_t size, 882 uint64_t ashift, int r, int m) 883{ 884 nvlist_t *mirror, **child; 885 int c; 886 887 if (m < 1) 888 return (make_vdev_raidz(path, aux, pool, size, ashift, r)); 889 890 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 891 892 for (c = 0; c < m; c++) 893 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r); 894 895 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); 896 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, 897 VDEV_TYPE_MIRROR) == 0); 898 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 899 child, m) == 0); 900 901 for (c = 0; c < m; c++) 902 nvlist_free(child[c]); 903 904 umem_free(child, m * sizeof (nvlist_t *)); 905 906 return (mirror); 907} 908 909static nvlist_t * 910make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift, 911 int log, int r, int m, int t) 912{ 913 nvlist_t *root, **child; 914 int c; 915 916 ASSERT(t > 0); 917 918 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 919 920 for (c = 0; c < t; c++) { 921 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 922 r, m); 923 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 924 log) == 0); 925 } 926 927 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); 928 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); 929 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 930 child, t) == 0); 931 932 for (c = 0; c < t; c++) 933 nvlist_free(child[c]); 934 935 umem_free(child, t * sizeof (nvlist_t *)); 936 937 return (root); 938} 939 940/* 941 * Find a random spa version. Returns back a random spa version in the 942 * range [initial_version, SPA_VERSION_FEATURES]. 943 */ 944static uint64_t 945ztest_random_spa_version(uint64_t initial_version) 946{ 947 uint64_t version = initial_version; 948 949 if (version <= SPA_VERSION_BEFORE_FEATURES) { 950 version = version + 951 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 952 } 953 954 if (version > SPA_VERSION_BEFORE_FEATURES) 955 version = SPA_VERSION_FEATURES; 956 957 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 958 return (version); 959} 960 961static int 962ztest_random_blocksize(void) 963{ 964 return (1 << (SPA_MINBLOCKSHIFT + 965 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1))); 966} 967 968static int 969ztest_random_ibshift(void) 970{ 971 return (DN_MIN_INDBLKSHIFT + 972 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 973} 974 975static uint64_t 976ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 977{ 978 uint64_t top; 979 vdev_t *rvd = spa->spa_root_vdev; 980 vdev_t *tvd; 981 982 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 983 984 do { 985 top = ztest_random(rvd->vdev_children); 986 tvd = rvd->vdev_child[top]; 987 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) || 988 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 989 990 return (top); 991} 992 993static uint64_t 994ztest_random_dsl_prop(zfs_prop_t prop) 995{ 996 uint64_t value; 997 998 do { 999 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1000 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1001 1002 return (value); 1003} 1004 1005static int 1006ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1007 boolean_t inherit) 1008{ 1009 const char *propname = zfs_prop_to_name(prop); 1010 const char *valname; 1011 char setpoint[MAXPATHLEN]; 1012 uint64_t curval; 1013 int error; 1014 1015 error = dsl_prop_set_int(osname, propname, 1016 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1017 1018 if (error == ENOSPC) { 1019 ztest_record_enospc(FTAG); 1020 return (error); 1021 } 1022 ASSERT0(error); 1023 1024 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1025 1026 if (ztest_opts.zo_verbose >= 6) { 1027 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0); 1028 (void) printf("%s %s = %s at '%s'\n", 1029 osname, propname, valname, setpoint); 1030 } 1031 1032 return (error); 1033} 1034 1035static int 1036ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1037{ 1038 spa_t *spa = ztest_spa; 1039 nvlist_t *props = NULL; 1040 int error; 1041 1042 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 1043 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0); 1044 1045 error = spa_prop_set(spa, props); 1046 1047 nvlist_free(props); 1048 1049 if (error == ENOSPC) { 1050 ztest_record_enospc(FTAG); 1051 return (error); 1052 } 1053 ASSERT0(error); 1054 1055 return (error); 1056} 1057 1058static void 1059ztest_rll_init(rll_t *rll) 1060{ 1061 rll->rll_writer = NULL; 1062 rll->rll_readers = 0; 1063 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); 1064 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); 1065} 1066 1067static void 1068ztest_rll_destroy(rll_t *rll) 1069{ 1070 ASSERT(rll->rll_writer == NULL); 1071 ASSERT(rll->rll_readers == 0); 1072 VERIFY(_mutex_destroy(&rll->rll_lock) == 0); 1073 VERIFY(cond_destroy(&rll->rll_cv) == 0); 1074} 1075 1076static void 1077ztest_rll_lock(rll_t *rll, rl_type_t type) 1078{ 1079 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1080 1081 if (type == RL_READER) { 1082 while (rll->rll_writer != NULL) 1083 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1084 rll->rll_readers++; 1085 } else { 1086 while (rll->rll_writer != NULL || rll->rll_readers) 1087 (void) cond_wait(&rll->rll_cv, &rll->rll_lock); 1088 rll->rll_writer = curthread; 1089 } 1090 1091 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1092} 1093 1094static void 1095ztest_rll_unlock(rll_t *rll) 1096{ 1097 VERIFY(mutex_lock(&rll->rll_lock) == 0); 1098 1099 if (rll->rll_writer) { 1100 ASSERT(rll->rll_readers == 0); 1101 rll->rll_writer = NULL; 1102 } else { 1103 ASSERT(rll->rll_readers != 0); 1104 ASSERT(rll->rll_writer == NULL); 1105 rll->rll_readers--; 1106 } 1107 1108 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1109 VERIFY(cond_broadcast(&rll->rll_cv) == 0); 1110 1111 VERIFY(mutex_unlock(&rll->rll_lock) == 0); 1112} 1113 1114static void 1115ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1116{ 1117 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1118 1119 ztest_rll_lock(rll, type); 1120} 1121 1122static void 1123ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1124{ 1125 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1126 1127 ztest_rll_unlock(rll); 1128} 1129 1130static rl_t * 1131ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1132 uint64_t size, rl_type_t type) 1133{ 1134 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1135 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1136 rl_t *rl; 1137 1138 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1139 rl->rl_object = object; 1140 rl->rl_offset = offset; 1141 rl->rl_size = size; 1142 rl->rl_lock = rll; 1143 1144 ztest_rll_lock(rll, type); 1145 1146 return (rl); 1147} 1148 1149static void 1150ztest_range_unlock(rl_t *rl) 1151{ 1152 rll_t *rll = rl->rl_lock; 1153 1154 ztest_rll_unlock(rll); 1155 1156 umem_free(rl, sizeof (*rl)); 1157} 1158 1159static void 1160ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1161{ 1162 zd->zd_os = os; 1163 zd->zd_zilog = dmu_objset_zil(os); 1164 zd->zd_shared = szd; 1165 dmu_objset_name(os, zd->zd_name); 1166 1167 if (zd->zd_shared != NULL) 1168 zd->zd_shared->zd_seq = 0; 1169 1170 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0); 1171 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); 1172 1173 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1174 ztest_rll_init(&zd->zd_object_lock[l]); 1175 1176 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1177 ztest_rll_init(&zd->zd_range_lock[l]); 1178} 1179 1180static void 1181ztest_zd_fini(ztest_ds_t *zd) 1182{ 1183 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); 1184 1185 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1186 ztest_rll_destroy(&zd->zd_object_lock[l]); 1187 1188 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) 1189 ztest_rll_destroy(&zd->zd_range_lock[l]); 1190} 1191 1192#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1193 1194static uint64_t 1195ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1196{ 1197 uint64_t txg; 1198 int error; 1199 1200 /* 1201 * Attempt to assign tx to some transaction group. 1202 */ 1203 error = dmu_tx_assign(tx, txg_how); 1204 if (error) { 1205 if (error == ERESTART) { 1206 ASSERT(txg_how == TXG_NOWAIT); 1207 dmu_tx_wait(tx); 1208 } else { 1209 ASSERT3U(error, ==, ENOSPC); 1210 ztest_record_enospc(tag); 1211 } 1212 dmu_tx_abort(tx); 1213 return (0); 1214 } 1215 txg = dmu_tx_get_txg(tx); 1216 ASSERT(txg != 0); 1217 return (txg); 1218} 1219 1220static void 1221ztest_pattern_set(void *buf, uint64_t size, uint64_t value) 1222{ 1223 uint64_t *ip = buf; 1224 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1225 1226 while (ip < ip_end) 1227 *ip++ = value; 1228} 1229 1230static boolean_t 1231ztest_pattern_match(void *buf, uint64_t size, uint64_t value) 1232{ 1233 uint64_t *ip = buf; 1234 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); 1235 uint64_t diff = 0; 1236 1237 while (ip < ip_end) 1238 diff |= (value - *ip++); 1239 1240 return (diff == 0); 1241} 1242 1243static void 1244ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1245 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1246{ 1247 bt->bt_magic = BT_MAGIC; 1248 bt->bt_objset = dmu_objset_id(os); 1249 bt->bt_object = object; 1250 bt->bt_offset = offset; 1251 bt->bt_gen = gen; 1252 bt->bt_txg = txg; 1253 bt->bt_crtxg = crtxg; 1254} 1255 1256static void 1257ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1258 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) 1259{ 1260 ASSERT(bt->bt_magic == BT_MAGIC); 1261 ASSERT(bt->bt_objset == dmu_objset_id(os)); 1262 ASSERT(bt->bt_object == object); 1263 ASSERT(bt->bt_offset == offset); 1264 ASSERT(bt->bt_gen <= gen); 1265 ASSERT(bt->bt_txg <= txg); 1266 ASSERT(bt->bt_crtxg == crtxg); 1267} 1268 1269static ztest_block_tag_t * 1270ztest_bt_bonus(dmu_buf_t *db) 1271{ 1272 dmu_object_info_t doi; 1273 ztest_block_tag_t *bt; 1274 1275 dmu_object_info_from_db(db, &doi); 1276 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1277 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1278 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1279 1280 return (bt); 1281} 1282 1283/* 1284 * ZIL logging ops 1285 */ 1286 1287#define lrz_type lr_mode 1288#define lrz_blocksize lr_uid 1289#define lrz_ibshift lr_gid 1290#define lrz_bonustype lr_rdev 1291#define lrz_bonuslen lr_crtime[1] 1292 1293static void 1294ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1295{ 1296 char *name = (void *)(lr + 1); /* name follows lr */ 1297 size_t namesize = strlen(name) + 1; 1298 itx_t *itx; 1299 1300 if (zil_replaying(zd->zd_zilog, tx)) 1301 return; 1302 1303 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1304 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1305 sizeof (*lr) + namesize - sizeof (lr_t)); 1306 1307 zil_itx_assign(zd->zd_zilog, itx, tx); 1308} 1309 1310static void 1311ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1312{ 1313 char *name = (void *)(lr + 1); /* name follows lr */ 1314 size_t namesize = strlen(name) + 1; 1315 itx_t *itx; 1316 1317 if (zil_replaying(zd->zd_zilog, tx)) 1318 return; 1319 1320 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1321 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1322 sizeof (*lr) + namesize - sizeof (lr_t)); 1323 1324 itx->itx_oid = object; 1325 zil_itx_assign(zd->zd_zilog, itx, tx); 1326} 1327 1328static void 1329ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1330{ 1331 itx_t *itx; 1332 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1333 1334 if (zil_replaying(zd->zd_zilog, tx)) 1335 return; 1336 1337 if (lr->lr_length > ZIL_MAX_LOG_DATA) 1338 write_state = WR_INDIRECT; 1339 1340 itx = zil_itx_create(TX_WRITE, 1341 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1342 1343 if (write_state == WR_COPIED && 1344 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1345 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1346 zil_itx_destroy(itx); 1347 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1348 write_state = WR_NEED_COPY; 1349 } 1350 itx->itx_private = zd; 1351 itx->itx_wr_state = write_state; 1352 itx->itx_sync = (ztest_random(8) == 0); 1353 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); 1354 1355 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1356 sizeof (*lr) - sizeof (lr_t)); 1357 1358 zil_itx_assign(zd->zd_zilog, itx, tx); 1359} 1360 1361static void 1362ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1363{ 1364 itx_t *itx; 1365 1366 if (zil_replaying(zd->zd_zilog, tx)) 1367 return; 1368 1369 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1370 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1371 sizeof (*lr) - sizeof (lr_t)); 1372 1373 itx->itx_sync = B_FALSE; 1374 zil_itx_assign(zd->zd_zilog, itx, tx); 1375} 1376 1377static void 1378ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1379{ 1380 itx_t *itx; 1381 1382 if (zil_replaying(zd->zd_zilog, tx)) 1383 return; 1384 1385 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1386 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, 1387 sizeof (*lr) - sizeof (lr_t)); 1388 1389 itx->itx_sync = B_FALSE; 1390 zil_itx_assign(zd->zd_zilog, itx, tx); 1391} 1392 1393/* 1394 * ZIL replay ops 1395 */ 1396static int 1397ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) 1398{ 1399 char *name = (void *)(lr + 1); /* name follows lr */ 1400 objset_t *os = zd->zd_os; 1401 ztest_block_tag_t *bbt; 1402 dmu_buf_t *db; 1403 dmu_tx_t *tx; 1404 uint64_t txg; 1405 int error = 0; 1406 1407 if (byteswap) 1408 byteswap_uint64_array(lr, sizeof (*lr)); 1409 1410 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1411 ASSERT(name[0] != '\0'); 1412 1413 tx = dmu_tx_create(os); 1414 1415 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1416 1417 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1418 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1419 } else { 1420 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1421 } 1422 1423 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1424 if (txg == 0) 1425 return (ENOSPC); 1426 1427 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); 1428 1429 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1430 if (lr->lr_foid == 0) { 1431 lr->lr_foid = zap_create(os, 1432 lr->lrz_type, lr->lrz_bonustype, 1433 lr->lrz_bonuslen, tx); 1434 } else { 1435 error = zap_create_claim(os, lr->lr_foid, 1436 lr->lrz_type, lr->lrz_bonustype, 1437 lr->lrz_bonuslen, tx); 1438 } 1439 } else { 1440 if (lr->lr_foid == 0) { 1441 lr->lr_foid = dmu_object_alloc(os, 1442 lr->lrz_type, 0, lr->lrz_bonustype, 1443 lr->lrz_bonuslen, tx); 1444 } else { 1445 error = dmu_object_claim(os, lr->lr_foid, 1446 lr->lrz_type, 0, lr->lrz_bonustype, 1447 lr->lrz_bonuslen, tx); 1448 } 1449 } 1450 1451 if (error) { 1452 ASSERT3U(error, ==, EEXIST); 1453 ASSERT(zd->zd_zilog->zl_replay); 1454 dmu_tx_commit(tx); 1455 return (error); 1456 } 1457 1458 ASSERT(lr->lr_foid != 0); 1459 1460 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 1461 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, 1462 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 1463 1464 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1465 bbt = ztest_bt_bonus(db); 1466 dmu_buf_will_dirty(db, tx); 1467 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); 1468 dmu_buf_rele(db, FTAG); 1469 1470 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 1471 &lr->lr_foid, tx)); 1472 1473 (void) ztest_log_create(zd, tx, lr); 1474 1475 dmu_tx_commit(tx); 1476 1477 return (0); 1478} 1479 1480static int 1481ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) 1482{ 1483 char *name = (void *)(lr + 1); /* name follows lr */ 1484 objset_t *os = zd->zd_os; 1485 dmu_object_info_t doi; 1486 dmu_tx_t *tx; 1487 uint64_t object, txg; 1488 1489 if (byteswap) 1490 byteswap_uint64_array(lr, sizeof (*lr)); 1491 1492 ASSERT(lr->lr_doid == ZTEST_DIROBJ); 1493 ASSERT(name[0] != '\0'); 1494 1495 VERIFY3U(0, ==, 1496 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 1497 ASSERT(object != 0); 1498 1499 ztest_object_lock(zd, object, RL_WRITER); 1500 1501 VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); 1502 1503 tx = dmu_tx_create(os); 1504 1505 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 1506 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1507 1508 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1509 if (txg == 0) { 1510 ztest_object_unlock(zd, object); 1511 return (ENOSPC); 1512 } 1513 1514 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 1515 VERIFY3U(0, ==, zap_destroy(os, object, tx)); 1516 } else { 1517 VERIFY3U(0, ==, dmu_object_free(os, object, tx)); 1518 } 1519 1520 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); 1521 1522 (void) ztest_log_remove(zd, tx, lr, object); 1523 1524 dmu_tx_commit(tx); 1525 1526 ztest_object_unlock(zd, object); 1527 1528 return (0); 1529} 1530 1531static int 1532ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) 1533{ 1534 objset_t *os = zd->zd_os; 1535 void *data = lr + 1; /* data follows lr */ 1536 uint64_t offset, length; 1537 ztest_block_tag_t *bt = data; 1538 ztest_block_tag_t *bbt; 1539 uint64_t gen, txg, lrtxg, crtxg; 1540 dmu_object_info_t doi; 1541 dmu_tx_t *tx; 1542 dmu_buf_t *db; 1543 arc_buf_t *abuf = NULL; 1544 rl_t *rl; 1545 1546 if (byteswap) 1547 byteswap_uint64_array(lr, sizeof (*lr)); 1548 1549 offset = lr->lr_offset; 1550 length = lr->lr_length; 1551 1552 /* If it's a dmu_sync() block, write the whole block */ 1553 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 1554 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 1555 if (length < blocksize) { 1556 offset -= offset % blocksize; 1557 length = blocksize; 1558 } 1559 } 1560 1561 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 1562 byteswap_uint64_array(bt, sizeof (*bt)); 1563 1564 if (bt->bt_magic != BT_MAGIC) 1565 bt = NULL; 1566 1567 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1568 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 1569 1570 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1571 1572 dmu_object_info_from_db(db, &doi); 1573 1574 bbt = ztest_bt_bonus(db); 1575 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1576 gen = bbt->bt_gen; 1577 crtxg = bbt->bt_crtxg; 1578 lrtxg = lr->lr_common.lrc_txg; 1579 1580 tx = dmu_tx_create(os); 1581 1582 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 1583 1584 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 1585 P2PHASE(offset, length) == 0) 1586 abuf = dmu_request_arcbuf(db, length); 1587 1588 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1589 if (txg == 0) { 1590 if (abuf != NULL) 1591 dmu_return_arcbuf(abuf); 1592 dmu_buf_rele(db, FTAG); 1593 ztest_range_unlock(rl); 1594 ztest_object_unlock(zd, lr->lr_foid); 1595 return (ENOSPC); 1596 } 1597 1598 if (bt != NULL) { 1599 /* 1600 * Usually, verify the old data before writing new data -- 1601 * but not always, because we also want to verify correct 1602 * behavior when the data was not recently read into cache. 1603 */ 1604 ASSERT(offset % doi.doi_data_block_size == 0); 1605 if (ztest_random(4) != 0) { 1606 int prefetch = ztest_random(2) ? 1607 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 1608 ztest_block_tag_t rbt; 1609 1610 VERIFY(dmu_read(os, lr->lr_foid, offset, 1611 sizeof (rbt), &rbt, prefetch) == 0); 1612 if (rbt.bt_magic == BT_MAGIC) { 1613 ztest_bt_verify(&rbt, os, lr->lr_foid, 1614 offset, gen, txg, crtxg); 1615 } 1616 } 1617 1618 /* 1619 * Writes can appear to be newer than the bonus buffer because 1620 * the ztest_get_data() callback does a dmu_read() of the 1621 * open-context data, which may be different than the data 1622 * as it was when the write was generated. 1623 */ 1624 if (zd->zd_zilog->zl_replay) { 1625 ztest_bt_verify(bt, os, lr->lr_foid, offset, 1626 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 1627 bt->bt_crtxg); 1628 } 1629 1630 /* 1631 * Set the bt's gen/txg to the bonus buffer's gen/txg 1632 * so that all of the usual ASSERTs will work. 1633 */ 1634 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); 1635 } 1636 1637 if (abuf == NULL) { 1638 dmu_write(os, lr->lr_foid, offset, length, data, tx); 1639 } else { 1640 bcopy(data, abuf->b_data, length); 1641 dmu_assign_arcbuf(db, offset, abuf, tx); 1642 } 1643 1644 (void) ztest_log_write(zd, tx, lr); 1645 1646 dmu_buf_rele(db, FTAG); 1647 1648 dmu_tx_commit(tx); 1649 1650 ztest_range_unlock(rl); 1651 ztest_object_unlock(zd, lr->lr_foid); 1652 1653 return (0); 1654} 1655 1656static int 1657ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) 1658{ 1659 objset_t *os = zd->zd_os; 1660 dmu_tx_t *tx; 1661 uint64_t txg; 1662 rl_t *rl; 1663 1664 if (byteswap) 1665 byteswap_uint64_array(lr, sizeof (*lr)); 1666 1667 ztest_object_lock(zd, lr->lr_foid, RL_READER); 1668 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 1669 RL_WRITER); 1670 1671 tx = dmu_tx_create(os); 1672 1673 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 1674 1675 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1676 if (txg == 0) { 1677 ztest_range_unlock(rl); 1678 ztest_object_unlock(zd, lr->lr_foid); 1679 return (ENOSPC); 1680 } 1681 1682 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 1683 lr->lr_length, tx) == 0); 1684 1685 (void) ztest_log_truncate(zd, tx, lr); 1686 1687 dmu_tx_commit(tx); 1688 1689 ztest_range_unlock(rl); 1690 ztest_object_unlock(zd, lr->lr_foid); 1691 1692 return (0); 1693} 1694 1695static int 1696ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) 1697{ 1698 objset_t *os = zd->zd_os; 1699 dmu_tx_t *tx; 1700 dmu_buf_t *db; 1701 ztest_block_tag_t *bbt; 1702 uint64_t txg, lrtxg, crtxg; 1703 1704 if (byteswap) 1705 byteswap_uint64_array(lr, sizeof (*lr)); 1706 1707 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 1708 1709 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 1710 1711 tx = dmu_tx_create(os); 1712 dmu_tx_hold_bonus(tx, lr->lr_foid); 1713 1714 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1715 if (txg == 0) { 1716 dmu_buf_rele(db, FTAG); 1717 ztest_object_unlock(zd, lr->lr_foid); 1718 return (ENOSPC); 1719 } 1720 1721 bbt = ztest_bt_bonus(db); 1722 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1723 crtxg = bbt->bt_crtxg; 1724 lrtxg = lr->lr_common.lrc_txg; 1725 1726 if (zd->zd_zilog->zl_replay) { 1727 ASSERT(lr->lr_size != 0); 1728 ASSERT(lr->lr_mode != 0); 1729 ASSERT(lrtxg != 0); 1730 } else { 1731 /* 1732 * Randomly change the size and increment the generation. 1733 */ 1734 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 1735 sizeof (*bbt); 1736 lr->lr_mode = bbt->bt_gen + 1; 1737 ASSERT(lrtxg == 0); 1738 } 1739 1740 /* 1741 * Verify that the current bonus buffer is not newer than our txg. 1742 */ 1743 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, 1744 MAX(txg, lrtxg), crtxg); 1745 1746 dmu_buf_will_dirty(db, tx); 1747 1748 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 1749 ASSERT3U(lr->lr_size, <=, db->db_size); 1750 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 1751 bbt = ztest_bt_bonus(db); 1752 1753 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); 1754 1755 dmu_buf_rele(db, FTAG); 1756 1757 (void) ztest_log_setattr(zd, tx, lr); 1758 1759 dmu_tx_commit(tx); 1760 1761 ztest_object_unlock(zd, lr->lr_foid); 1762 1763 return (0); 1764} 1765 1766zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 1767 NULL, /* 0 no such transaction type */ 1768 ztest_replay_create, /* TX_CREATE */ 1769 NULL, /* TX_MKDIR */ 1770 NULL, /* TX_MKXATTR */ 1771 NULL, /* TX_SYMLINK */ 1772 ztest_replay_remove, /* TX_REMOVE */ 1773 NULL, /* TX_RMDIR */ 1774 NULL, /* TX_LINK */ 1775 NULL, /* TX_RENAME */ 1776 ztest_replay_write, /* TX_WRITE */ 1777 ztest_replay_truncate, /* TX_TRUNCATE */ 1778 ztest_replay_setattr, /* TX_SETATTR */ 1779 NULL, /* TX_ACL */ 1780 NULL, /* TX_CREATE_ACL */ 1781 NULL, /* TX_CREATE_ATTR */ 1782 NULL, /* TX_CREATE_ACL_ATTR */ 1783 NULL, /* TX_MKDIR_ACL */ 1784 NULL, /* TX_MKDIR_ATTR */ 1785 NULL, /* TX_MKDIR_ACL_ATTR */ 1786 NULL, /* TX_WRITE2 */ 1787}; 1788 1789/* 1790 * ZIL get_data callbacks 1791 */ 1792 1793static void 1794ztest_get_done(zgd_t *zgd, int error) 1795{ 1796 ztest_ds_t *zd = zgd->zgd_private; 1797 uint64_t object = zgd->zgd_rl->rl_object; 1798 1799 if (zgd->zgd_db) 1800 dmu_buf_rele(zgd->zgd_db, zgd); 1801 1802 ztest_range_unlock(zgd->zgd_rl); 1803 ztest_object_unlock(zd, object); 1804 1805 if (error == 0 && zgd->zgd_bp) 1806 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1807 1808 umem_free(zgd, sizeof (*zgd)); 1809} 1810 1811static int 1812ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1813{ 1814 ztest_ds_t *zd = arg; 1815 objset_t *os = zd->zd_os; 1816 uint64_t object = lr->lr_foid; 1817 uint64_t offset = lr->lr_offset; 1818 uint64_t size = lr->lr_length; 1819 blkptr_t *bp = &lr->lr_blkptr; 1820 uint64_t txg = lr->lr_common.lrc_txg; 1821 uint64_t crtxg; 1822 dmu_object_info_t doi; 1823 dmu_buf_t *db; 1824 zgd_t *zgd; 1825 int error; 1826 1827 ztest_object_lock(zd, object, RL_READER); 1828 error = dmu_bonus_hold(os, object, FTAG, &db); 1829 if (error) { 1830 ztest_object_unlock(zd, object); 1831 return (error); 1832 } 1833 1834 crtxg = ztest_bt_bonus(db)->bt_crtxg; 1835 1836 if (crtxg == 0 || crtxg > txg) { 1837 dmu_buf_rele(db, FTAG); 1838 ztest_object_unlock(zd, object); 1839 return (ENOENT); 1840 } 1841 1842 dmu_object_info_from_db(db, &doi); 1843 dmu_buf_rele(db, FTAG); 1844 db = NULL; 1845 1846 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 1847 zgd->zgd_zilog = zd->zd_zilog; 1848 zgd->zgd_private = zd; 1849 1850 if (buf != NULL) { /* immediate write */ 1851 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1852 RL_READER); 1853 1854 error = dmu_read(os, object, offset, size, buf, 1855 DMU_READ_NO_PREFETCH); 1856 ASSERT(error == 0); 1857 } else { 1858 size = doi.doi_data_block_size; 1859 if (ISP2(size)) { 1860 offset = P2ALIGN(offset, size); 1861 } else { 1862 ASSERT(offset < size); 1863 offset = 0; 1864 } 1865 1866 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, 1867 RL_READER); 1868 1869 error = dmu_buf_hold(os, object, offset, zgd, &db, 1870 DMU_READ_NO_PREFETCH); 1871 1872 if (error == 0) { 1873 blkptr_t *obp = dmu_buf_get_blkptr(db); 1874 if (obp) { 1875 ASSERT(BP_IS_HOLE(bp)); 1876 *bp = *obp; 1877 } 1878 1879 zgd->zgd_db = db; 1880 zgd->zgd_bp = bp; 1881 1882 ASSERT(db->db_offset == offset); 1883 ASSERT(db->db_size == size); 1884 1885 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1886 ztest_get_done, zgd); 1887 1888 if (error == 0) 1889 return (0); 1890 } 1891 } 1892 1893 ztest_get_done(zgd, error); 1894 1895 return (error); 1896} 1897 1898static void * 1899ztest_lr_alloc(size_t lrsize, char *name) 1900{ 1901 char *lr; 1902 size_t namesize = name ? strlen(name) + 1 : 0; 1903 1904 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 1905 1906 if (name) 1907 bcopy(name, lr + lrsize, namesize); 1908 1909 return (lr); 1910} 1911 1912void 1913ztest_lr_free(void *lr, size_t lrsize, char *name) 1914{ 1915 size_t namesize = name ? strlen(name) + 1 : 0; 1916 1917 umem_free(lr, lrsize + namesize); 1918} 1919 1920/* 1921 * Lookup a bunch of objects. Returns the number of objects not found. 1922 */ 1923static int 1924ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 1925{ 1926 int missing = 0; 1927 int error; 1928 1929 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1930 1931 for (int i = 0; i < count; i++, od++) { 1932 od->od_object = 0; 1933 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 1934 sizeof (uint64_t), 1, &od->od_object); 1935 if (error) { 1936 ASSERT(error == ENOENT); 1937 ASSERT(od->od_object == 0); 1938 missing++; 1939 } else { 1940 dmu_buf_t *db; 1941 ztest_block_tag_t *bbt; 1942 dmu_object_info_t doi; 1943 1944 ASSERT(od->od_object != 0); 1945 ASSERT(missing == 0); /* there should be no gaps */ 1946 1947 ztest_object_lock(zd, od->od_object, RL_READER); 1948 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, 1949 od->od_object, FTAG, &db)); 1950 dmu_object_info_from_db(db, &doi); 1951 bbt = ztest_bt_bonus(db); 1952 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 1953 od->od_type = doi.doi_type; 1954 od->od_blocksize = doi.doi_data_block_size; 1955 od->od_gen = bbt->bt_gen; 1956 dmu_buf_rele(db, FTAG); 1957 ztest_object_unlock(zd, od->od_object); 1958 } 1959 } 1960 1961 return (missing); 1962} 1963 1964static int 1965ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 1966{ 1967 int missing = 0; 1968 1969 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 1970 1971 for (int i = 0; i < count; i++, od++) { 1972 if (missing) { 1973 od->od_object = 0; 1974 missing++; 1975 continue; 1976 } 1977 1978 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 1979 1980 lr->lr_doid = od->od_dir; 1981 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 1982 lr->lrz_type = od->od_crtype; 1983 lr->lrz_blocksize = od->od_crblocksize; 1984 lr->lrz_ibshift = ztest_random_ibshift(); 1985 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 1986 lr->lrz_bonuslen = dmu_bonus_max(); 1987 lr->lr_gen = od->od_crgen; 1988 lr->lr_crtime[0] = time(NULL); 1989 1990 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 1991 ASSERT(missing == 0); 1992 od->od_object = 0; 1993 missing++; 1994 } else { 1995 od->od_object = lr->lr_foid; 1996 od->od_type = od->od_crtype; 1997 od->od_blocksize = od->od_crblocksize; 1998 od->od_gen = od->od_crgen; 1999 ASSERT(od->od_object != 0); 2000 } 2001 2002 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2003 } 2004 2005 return (missing); 2006} 2007 2008static int 2009ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2010{ 2011 int missing = 0; 2012 int error; 2013 2014 ASSERT(_mutex_held(&zd->zd_dirobj_lock)); 2015 2016 od += count - 1; 2017 2018 for (int i = count - 1; i >= 0; i--, od--) { 2019 if (missing) { 2020 missing++; 2021 continue; 2022 } 2023 2024 /* 2025 * No object was found. 2026 */ 2027 if (od->od_object == 0) 2028 continue; 2029 2030 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2031 2032 lr->lr_doid = od->od_dir; 2033 2034 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2035 ASSERT3U(error, ==, ENOSPC); 2036 missing++; 2037 } else { 2038 od->od_object = 0; 2039 } 2040 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2041 } 2042 2043 return (missing); 2044} 2045 2046static int 2047ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2048 void *data) 2049{ 2050 lr_write_t *lr; 2051 int error; 2052 2053 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2054 2055 lr->lr_foid = object; 2056 lr->lr_offset = offset; 2057 lr->lr_length = size; 2058 lr->lr_blkoff = 0; 2059 BP_ZERO(&lr->lr_blkptr); 2060 2061 bcopy(data, lr + 1, size); 2062 2063 error = ztest_replay_write(zd, lr, B_FALSE); 2064 2065 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2066 2067 return (error); 2068} 2069 2070static int 2071ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2072{ 2073 lr_truncate_t *lr; 2074 int error; 2075 2076 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2077 2078 lr->lr_foid = object; 2079 lr->lr_offset = offset; 2080 lr->lr_length = size; 2081 2082 error = ztest_replay_truncate(zd, lr, B_FALSE); 2083 2084 ztest_lr_free(lr, sizeof (*lr), NULL); 2085 2086 return (error); 2087} 2088 2089static int 2090ztest_setattr(ztest_ds_t *zd, uint64_t object) 2091{ 2092 lr_setattr_t *lr; 2093 int error; 2094 2095 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2096 2097 lr->lr_foid = object; 2098 lr->lr_size = 0; 2099 lr->lr_mode = 0; 2100 2101 error = ztest_replay_setattr(zd, lr, B_FALSE); 2102 2103 ztest_lr_free(lr, sizeof (*lr), NULL); 2104 2105 return (error); 2106} 2107 2108static void 2109ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2110{ 2111 objset_t *os = zd->zd_os; 2112 dmu_tx_t *tx; 2113 uint64_t txg; 2114 rl_t *rl; 2115 2116 txg_wait_synced(dmu_objset_pool(os), 0); 2117 2118 ztest_object_lock(zd, object, RL_READER); 2119 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2120 2121 tx = dmu_tx_create(os); 2122 2123 dmu_tx_hold_write(tx, object, offset, size); 2124 2125 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2126 2127 if (txg != 0) { 2128 dmu_prealloc(os, object, offset, size, tx); 2129 dmu_tx_commit(tx); 2130 txg_wait_synced(dmu_objset_pool(os), txg); 2131 } else { 2132 (void) dmu_free_long_range(os, object, offset, size); 2133 } 2134 2135 ztest_range_unlock(rl); 2136 ztest_object_unlock(zd, object); 2137} 2138 2139static void 2140ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2141{ 2142 int err; 2143 ztest_block_tag_t wbt; 2144 dmu_object_info_t doi; 2145 enum ztest_io_type io_type; 2146 uint64_t blocksize; 2147 void *data; 2148 2149 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); 2150 blocksize = doi.doi_data_block_size; 2151 data = umem_alloc(blocksize, UMEM_NOFAIL); 2152 2153 /* 2154 * Pick an i/o type at random, biased toward writing block tags. 2155 */ 2156 io_type = ztest_random(ZTEST_IO_TYPES); 2157 if (ztest_random(2) == 0) 2158 io_type = ZTEST_IO_WRITE_TAG; 2159 2160 (void) rw_rdlock(&zd->zd_zilog_lock); 2161 2162 switch (io_type) { 2163 2164 case ZTEST_IO_WRITE_TAG: 2165 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); 2166 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2167 break; 2168 2169 case ZTEST_IO_WRITE_PATTERN: 2170 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2171 if (ztest_random(2) == 0) { 2172 /* 2173 * Induce fletcher2 collisions to ensure that 2174 * zio_ddt_collision() detects and resolves them 2175 * when using fletcher2-verify for deduplication. 2176 */ 2177 ((uint64_t *)data)[0] ^= 1ULL << 63; 2178 ((uint64_t *)data)[4] ^= 1ULL << 63; 2179 } 2180 (void) ztest_write(zd, object, offset, blocksize, data); 2181 break; 2182 2183 case ZTEST_IO_WRITE_ZEROES: 2184 bzero(data, blocksize); 2185 (void) ztest_write(zd, object, offset, blocksize, data); 2186 break; 2187 2188 case ZTEST_IO_TRUNCATE: 2189 (void) ztest_truncate(zd, object, offset, blocksize); 2190 break; 2191 2192 case ZTEST_IO_SETATTR: 2193 (void) ztest_setattr(zd, object); 2194 break; 2195 2196 case ZTEST_IO_REWRITE: 2197 (void) rw_rdlock(&ztest_name_lock); 2198 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2199 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2200 B_FALSE); 2201 VERIFY(err == 0 || err == ENOSPC); 2202 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2203 ZFS_PROP_COMPRESSION, 2204 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2205 B_FALSE); 2206 VERIFY(err == 0 || err == ENOSPC); 2207 (void) rw_unlock(&ztest_name_lock); 2208 2209 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2210 DMU_READ_NO_PREFETCH)); 2211 2212 (void) ztest_write(zd, object, offset, blocksize, data); 2213 break; 2214 } 2215 2216 (void) rw_unlock(&zd->zd_zilog_lock); 2217 2218 umem_free(data, blocksize); 2219} 2220 2221/* 2222 * Initialize an object description template. 2223 */ 2224static void 2225ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, 2226 dmu_object_type_t type, uint64_t blocksize, uint64_t gen) 2227{ 2228 od->od_dir = ZTEST_DIROBJ; 2229 od->od_object = 0; 2230 2231 od->od_crtype = type; 2232 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2233 od->od_crgen = gen; 2234 2235 od->od_type = DMU_OT_NONE; 2236 od->od_blocksize = 0; 2237 od->od_gen = 0; 2238 2239 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", 2240 tag, (int64_t)id, index); 2241} 2242 2243/* 2244 * Lookup or create the objects for a test using the od template. 2245 * If the objects do not all exist, or if 'remove' is specified, 2246 * remove any existing objects and create new ones. Otherwise, 2247 * use the existing objects. 2248 */ 2249static int 2250ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2251{ 2252 int count = size / sizeof (*od); 2253 int rv = 0; 2254 2255 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); 2256 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2257 (ztest_remove(zd, od, count) != 0 || 2258 ztest_create(zd, od, count) != 0)) 2259 rv = -1; 2260 zd->zd_od = od; 2261 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2262 2263 return (rv); 2264} 2265 2266/* ARGSUSED */ 2267void 2268ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2269{ 2270 zilog_t *zilog = zd->zd_zilog; 2271 2272 (void) rw_rdlock(&zd->zd_zilog_lock); 2273 2274 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2275 2276 /* 2277 * Remember the committed values in zd, which is in parent/child 2278 * shared memory. If we die, the next iteration of ztest_run() 2279 * will verify that the log really does contain this record. 2280 */ 2281 mutex_enter(&zilog->zl_lock); 2282 ASSERT(zd->zd_shared != NULL); 2283 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2284 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2285 mutex_exit(&zilog->zl_lock); 2286 2287 (void) rw_unlock(&zd->zd_zilog_lock); 2288} 2289 2290/* 2291 * This function is designed to simulate the operations that occur during a 2292 * mount/unmount operation. We hold the dataset across these operations in an 2293 * attempt to expose any implicit assumptions about ZIL management. 2294 */ 2295/* ARGSUSED */ 2296void 2297ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2298{ 2299 objset_t *os = zd->zd_os; 2300 2301 /* 2302 * We grab the zd_dirobj_lock to ensure that no other thread is 2303 * updating the zil (i.e. adding in-memory log records) and the 2304 * zd_zilog_lock to block any I/O. 2305 */ 2306 VERIFY0(mutex_lock(&zd->zd_dirobj_lock)); 2307 (void) rw_wrlock(&zd->zd_zilog_lock); 2308 2309 /* zfsvfs_teardown() */ 2310 zil_close(zd->zd_zilog); 2311 2312 /* zfsvfs_setup() */ 2313 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); 2314 zil_replay(os, zd, ztest_replay_vector); 2315 2316 (void) rw_unlock(&zd->zd_zilog_lock); 2317 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); 2318} 2319 2320/* 2321 * Verify that we can't destroy an active pool, create an existing pool, 2322 * or create a pool with a bad vdev spec. 2323 */ 2324/* ARGSUSED */ 2325void 2326ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2327{ 2328 ztest_shared_opts_t *zo = &ztest_opts; 2329 spa_t *spa; 2330 nvlist_t *nvroot; 2331 2332 /* 2333 * Attempt to create using a bad file. 2334 */ 2335 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2336 VERIFY3U(ENOENT, ==, 2337 spa_create("ztest_bad_file", nvroot, NULL, NULL)); 2338 nvlist_free(nvroot); 2339 2340 /* 2341 * Attempt to create using a bad mirror. 2342 */ 2343 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1); 2344 VERIFY3U(ENOENT, ==, 2345 spa_create("ztest_bad_mirror", nvroot, NULL, NULL)); 2346 nvlist_free(nvroot); 2347 2348 /* 2349 * Attempt to create an existing pool. It shouldn't matter 2350 * what's in the nvroot; we should fail with EEXIST. 2351 */ 2352 (void) rw_rdlock(&ztest_name_lock); 2353 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1); 2354 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL)); 2355 nvlist_free(nvroot); 2356 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG)); 2357 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool)); 2358 spa_close(spa, FTAG); 2359 2360 (void) rw_unlock(&ztest_name_lock); 2361} 2362 2363/* ARGSUSED */ 2364void 2365ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 2366{ 2367 spa_t *spa; 2368 uint64_t initial_version = SPA_VERSION_INITIAL; 2369 uint64_t version, newversion; 2370 nvlist_t *nvroot, *props; 2371 char *name; 2372 2373 VERIFY0(mutex_lock(&ztest_vdev_lock)); 2374 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 2375 2376 /* 2377 * Clean up from previous runs. 2378 */ 2379 (void) spa_destroy(name); 2380 2381 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 2382 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1); 2383 2384 /* 2385 * If we're configuring a RAIDZ device then make sure that the 2386 * the initial version is capable of supporting that feature. 2387 */ 2388 switch (ztest_opts.zo_raidz_parity) { 2389 case 0: 2390 case 1: 2391 initial_version = SPA_VERSION_INITIAL; 2392 break; 2393 case 2: 2394 initial_version = SPA_VERSION_RAIDZ2; 2395 break; 2396 case 3: 2397 initial_version = SPA_VERSION_RAIDZ3; 2398 break; 2399 } 2400 2401 /* 2402 * Create a pool with a spa version that can be upgraded. Pick 2403 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 2404 */ 2405 do { 2406 version = ztest_random_spa_version(initial_version); 2407 } while (version > SPA_VERSION_BEFORE_FEATURES); 2408 2409 props = fnvlist_alloc(); 2410 fnvlist_add_uint64(props, 2411 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 2412 VERIFY0(spa_create(name, nvroot, props, NULL)); 2413 fnvlist_free(nvroot); 2414 fnvlist_free(props); 2415 2416 VERIFY0(spa_open(name, &spa, FTAG)); 2417 VERIFY3U(spa_version(spa), ==, version); 2418 newversion = ztest_random_spa_version(version + 1); 2419 2420 if (ztest_opts.zo_verbose >= 4) { 2421 (void) printf("upgrading spa version from %llu to %llu\n", 2422 (u_longlong_t)version, (u_longlong_t)newversion); 2423 } 2424 2425 spa_upgrade(spa, newversion); 2426 VERIFY3U(spa_version(spa), >, version); 2427 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 2428 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 2429 spa_close(spa, FTAG); 2430 2431 strfree(name); 2432 VERIFY0(mutex_unlock(&ztest_vdev_lock)); 2433} 2434 2435static vdev_t * 2436vdev_lookup_by_path(vdev_t *vd, const char *path) 2437{ 2438 vdev_t *mvd; 2439 2440 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 2441 return (vd); 2442 2443 for (int c = 0; c < vd->vdev_children; c++) 2444 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 2445 NULL) 2446 return (mvd); 2447 2448 return (NULL); 2449} 2450 2451/* 2452 * Find the first available hole which can be used as a top-level. 2453 */ 2454int 2455find_vdev_hole(spa_t *spa) 2456{ 2457 vdev_t *rvd = spa->spa_root_vdev; 2458 int c; 2459 2460 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); 2461 2462 for (c = 0; c < rvd->vdev_children; c++) { 2463 vdev_t *cvd = rvd->vdev_child[c]; 2464 2465 if (cvd->vdev_ishole) 2466 break; 2467 } 2468 return (c); 2469} 2470 2471/* 2472 * Verify that vdev_add() works as expected. 2473 */ 2474/* ARGSUSED */ 2475void 2476ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 2477{ 2478 ztest_shared_t *zs = ztest_shared; 2479 spa_t *spa = ztest_spa; 2480 uint64_t leaves; 2481 uint64_t guid; 2482 nvlist_t *nvroot; 2483 int error; 2484 2485 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2486 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; 2487 2488 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2489 2490 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; 2491 2492 /* 2493 * If we have slogs then remove them 1/4 of the time. 2494 */ 2495 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 2496 /* 2497 * Grab the guid from the head of the log class rotor. 2498 */ 2499 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; 2500 2501 spa_config_exit(spa, SCL_VDEV, FTAG); 2502 2503 /* 2504 * We have to grab the zs_name_lock as writer to 2505 * prevent a race between removing a slog (dmu_objset_find) 2506 * and destroying a dataset. Removing the slog will 2507 * grab a reference on the dataset which may cause 2508 * dmu_objset_destroy() to fail with EBUSY thus 2509 * leaving the dataset in an inconsistent state. 2510 */ 2511 VERIFY(rw_wrlock(&ztest_name_lock) == 0); 2512 error = spa_vdev_remove(spa, guid, B_FALSE); 2513 VERIFY(rw_unlock(&ztest_name_lock) == 0); 2514 2515 if (error && error != EEXIST) 2516 fatal(0, "spa_vdev_remove() = %d", error); 2517 } else { 2518 spa_config_exit(spa, SCL_VDEV, FTAG); 2519 2520 /* 2521 * Make 1/4 of the devices be log devices. 2522 */ 2523 nvroot = make_vdev_root(NULL, NULL, NULL, 2524 ztest_opts.zo_vdev_size, 0, 2525 ztest_random(4) == 0, ztest_opts.zo_raidz, 2526 zs->zs_mirrors, 1); 2527 2528 error = spa_vdev_add(spa, nvroot); 2529 nvlist_free(nvroot); 2530 2531 if (error == ENOSPC) 2532 ztest_record_enospc("spa_vdev_add"); 2533 else if (error != 0) 2534 fatal(0, "spa_vdev_add() = %d", error); 2535 } 2536 2537 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2538} 2539 2540/* 2541 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 2542 */ 2543/* ARGSUSED */ 2544void 2545ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 2546{ 2547 ztest_shared_t *zs = ztest_shared; 2548 spa_t *spa = ztest_spa; 2549 vdev_t *rvd = spa->spa_root_vdev; 2550 spa_aux_vdev_t *sav; 2551 char *aux; 2552 uint64_t guid = 0; 2553 int error; 2554 2555 if (ztest_random(2) == 0) { 2556 sav = &spa->spa_spares; 2557 aux = ZPOOL_CONFIG_SPARES; 2558 } else { 2559 sav = &spa->spa_l2cache; 2560 aux = ZPOOL_CONFIG_L2CACHE; 2561 } 2562 2563 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2564 2565 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2566 2567 if (sav->sav_count != 0 && ztest_random(4) == 0) { 2568 /* 2569 * Pick a random device to remove. 2570 */ 2571 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; 2572 } else { 2573 /* 2574 * Find an unused device we can add. 2575 */ 2576 zs->zs_vdev_aux = 0; 2577 for (;;) { 2578 char path[MAXPATHLEN]; 2579 int c; 2580 (void) snprintf(path, sizeof (path), ztest_aux_template, 2581 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 2582 zs->zs_vdev_aux); 2583 for (c = 0; c < sav->sav_count; c++) 2584 if (strcmp(sav->sav_vdevs[c]->vdev_path, 2585 path) == 0) 2586 break; 2587 if (c == sav->sav_count && 2588 vdev_lookup_by_path(rvd, path) == NULL) 2589 break; 2590 zs->zs_vdev_aux++; 2591 } 2592 } 2593 2594 spa_config_exit(spa, SCL_VDEV, FTAG); 2595 2596 if (guid == 0) { 2597 /* 2598 * Add a new device. 2599 */ 2600 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 2601 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1); 2602 error = spa_vdev_add(spa, nvroot); 2603 if (error != 0) 2604 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); 2605 nvlist_free(nvroot); 2606 } else { 2607 /* 2608 * Remove an existing device. Sometimes, dirty its 2609 * vdev state first to make sure we handle removal 2610 * of devices that have pending state changes. 2611 */ 2612 if (ztest_random(2) == 0) 2613 (void) vdev_online(spa, guid, 0, NULL); 2614 2615 error = spa_vdev_remove(spa, guid, B_FALSE); 2616 if (error != 0 && error != EBUSY) 2617 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); 2618 } 2619 2620 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2621} 2622 2623/* 2624 * split a pool if it has mirror tlvdevs 2625 */ 2626/* ARGSUSED */ 2627void 2628ztest_split_pool(ztest_ds_t *zd, uint64_t id) 2629{ 2630 ztest_shared_t *zs = ztest_shared; 2631 spa_t *spa = ztest_spa; 2632 vdev_t *rvd = spa->spa_root_vdev; 2633 nvlist_t *tree, **child, *config, *split, **schild; 2634 uint_t c, children, schildren = 0, lastlogid = 0; 2635 int error = 0; 2636 2637 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2638 2639 /* ensure we have a useable config; mirrors of raidz aren't supported */ 2640 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) { 2641 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2642 return; 2643 } 2644 2645 /* clean up the old pool, if any */ 2646 (void) spa_destroy("splitp"); 2647 2648 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2649 2650 /* generate a config from the existing config */ 2651 mutex_enter(&spa->spa_props_lock); 2652 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE, 2653 &tree) == 0); 2654 mutex_exit(&spa->spa_props_lock); 2655 2656 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2657 &children) == 0); 2658 2659 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *)); 2660 for (c = 0; c < children; c++) { 2661 vdev_t *tvd = rvd->vdev_child[c]; 2662 nvlist_t **mchild; 2663 uint_t mchildren; 2664 2665 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 2666 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME, 2667 0) == 0); 2668 VERIFY(nvlist_add_string(schild[schildren], 2669 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0); 2670 VERIFY(nvlist_add_uint64(schild[schildren], 2671 ZPOOL_CONFIG_IS_HOLE, 1) == 0); 2672 if (lastlogid == 0) 2673 lastlogid = schildren; 2674 ++schildren; 2675 continue; 2676 } 2677 lastlogid = 0; 2678 VERIFY(nvlist_lookup_nvlist_array(child[c], 2679 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2680 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0); 2681 } 2682 2683 /* OK, create a config that can be used to split */ 2684 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0); 2685 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE, 2686 VDEV_TYPE_ROOT) == 0); 2687 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild, 2688 lastlogid != 0 ? lastlogid : schildren) == 0); 2689 2690 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 2691 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0); 2692 2693 for (c = 0; c < schildren; c++) 2694 nvlist_free(schild[c]); 2695 free(schild); 2696 nvlist_free(split); 2697 2698 spa_config_exit(spa, SCL_VDEV, FTAG); 2699 2700 (void) rw_wrlock(&ztest_name_lock); 2701 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 2702 (void) rw_unlock(&ztest_name_lock); 2703 2704 nvlist_free(config); 2705 2706 if (error == 0) { 2707 (void) printf("successful split - results:\n"); 2708 mutex_enter(&spa_namespace_lock); 2709 show_pool_stats(spa); 2710 show_pool_stats(spa_lookup("splitp")); 2711 mutex_exit(&spa_namespace_lock); 2712 ++zs->zs_splits; 2713 --zs->zs_mirrors; 2714 } 2715 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2716 2717} 2718 2719/* 2720 * Verify that we can attach and detach devices. 2721 */ 2722/* ARGSUSED */ 2723void 2724ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 2725{ 2726 ztest_shared_t *zs = ztest_shared; 2727 spa_t *spa = ztest_spa; 2728 spa_aux_vdev_t *sav = &spa->spa_spares; 2729 vdev_t *rvd = spa->spa_root_vdev; 2730 vdev_t *oldvd, *newvd, *pvd; 2731 nvlist_t *root; 2732 uint64_t leaves; 2733 uint64_t leaf, top; 2734 uint64_t ashift = ztest_get_ashift(); 2735 uint64_t oldguid, pguid; 2736 size_t oldsize, newsize; 2737 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; 2738 int replacing; 2739 int oldvd_has_siblings = B_FALSE; 2740 int newvd_is_spare = B_FALSE; 2741 int oldvd_is_log; 2742 int error, expected_error; 2743 2744 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 2745 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 2746 2747 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2748 2749 /* 2750 * Decide whether to do an attach or a replace. 2751 */ 2752 replacing = ztest_random(2); 2753 2754 /* 2755 * Pick a random top-level vdev. 2756 */ 2757 top = ztest_random_vdev_top(spa, B_TRUE); 2758 2759 /* 2760 * Pick a random leaf within it. 2761 */ 2762 leaf = ztest_random(leaves); 2763 2764 /* 2765 * Locate this vdev. 2766 */ 2767 oldvd = rvd->vdev_child[top]; 2768 if (zs->zs_mirrors >= 1) { 2769 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops); 2770 ASSERT(oldvd->vdev_children >= zs->zs_mirrors); 2771 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz]; 2772 } 2773 if (ztest_opts.zo_raidz > 1) { 2774 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops); 2775 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz); 2776 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz]; 2777 } 2778 2779 /* 2780 * If we're already doing an attach or replace, oldvd may be a 2781 * mirror vdev -- in which case, pick a random child. 2782 */ 2783 while (oldvd->vdev_children != 0) { 2784 oldvd_has_siblings = B_TRUE; 2785 ASSERT(oldvd->vdev_children >= 2); 2786 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 2787 } 2788 2789 oldguid = oldvd->vdev_guid; 2790 oldsize = vdev_get_min_asize(oldvd); 2791 oldvd_is_log = oldvd->vdev_top->vdev_islog; 2792 (void) strcpy(oldpath, oldvd->vdev_path); 2793 pvd = oldvd->vdev_parent; 2794 pguid = pvd->vdev_guid; 2795 2796 /* 2797 * If oldvd has siblings, then half of the time, detach it. 2798 */ 2799 if (oldvd_has_siblings && ztest_random(2) == 0) { 2800 spa_config_exit(spa, SCL_VDEV, FTAG); 2801 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 2802 if (error != 0 && error != ENODEV && error != EBUSY && 2803 error != ENOTSUP) 2804 fatal(0, "detach (%s) returned %d", oldpath, error); 2805 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2806 return; 2807 } 2808 2809 /* 2810 * For the new vdev, choose with equal probability between the two 2811 * standard paths (ending in either 'a' or 'b') or a random hot spare. 2812 */ 2813 if (sav->sav_count != 0 && ztest_random(3) == 0) { 2814 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 2815 newvd_is_spare = B_TRUE; 2816 (void) strcpy(newpath, newvd->vdev_path); 2817 } else { 2818 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, 2819 ztest_opts.zo_dir, ztest_opts.zo_pool, 2820 top * leaves + leaf); 2821 if (ztest_random(2) == 0) 2822 newpath[strlen(newpath) - 1] = 'b'; 2823 newvd = vdev_lookup_by_path(rvd, newpath); 2824 } 2825 2826 if (newvd) { 2827 newsize = vdev_get_min_asize(newvd); 2828 } else { 2829 /* 2830 * Make newsize a little bigger or smaller than oldsize. 2831 * If it's smaller, the attach should fail. 2832 * If it's larger, and we're doing a replace, 2833 * we should get dynamic LUN growth when we're done. 2834 */ 2835 newsize = 10 * oldsize / (9 + ztest_random(3)); 2836 } 2837 2838 /* 2839 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 2840 * unless it's a replace; in that case any non-replacing parent is OK. 2841 * 2842 * If newvd is already part of the pool, it should fail with EBUSY. 2843 * 2844 * If newvd is too small, it should fail with EOVERFLOW. 2845 */ 2846 if (pvd->vdev_ops != &vdev_mirror_ops && 2847 pvd->vdev_ops != &vdev_root_ops && (!replacing || 2848 pvd->vdev_ops == &vdev_replacing_ops || 2849 pvd->vdev_ops == &vdev_spare_ops)) 2850 expected_error = ENOTSUP; 2851 else if (newvd_is_spare && (!replacing || oldvd_is_log)) 2852 expected_error = ENOTSUP; 2853 else if (newvd == oldvd) 2854 expected_error = replacing ? 0 : EBUSY; 2855 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 2856 expected_error = EBUSY; 2857 else if (newsize < oldsize) 2858 expected_error = EOVERFLOW; 2859 else if (ashift > oldvd->vdev_top->vdev_ashift) 2860 expected_error = EDOM; 2861 else 2862 expected_error = 0; 2863 2864 spa_config_exit(spa, SCL_VDEV, FTAG); 2865 2866 /* 2867 * Build the nvlist describing newpath. 2868 */ 2869 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 2870 ashift, 0, 0, 0, 1); 2871 2872 error = spa_vdev_attach(spa, oldguid, root, replacing); 2873 2874 nvlist_free(root); 2875 2876 /* 2877 * If our parent was the replacing vdev, but the replace completed, 2878 * then instead of failing with ENOTSUP we may either succeed, 2879 * fail with ENODEV, or fail with EOVERFLOW. 2880 */ 2881 if (expected_error == ENOTSUP && 2882 (error == 0 || error == ENODEV || error == EOVERFLOW)) 2883 expected_error = error; 2884 2885 /* 2886 * If someone grew the LUN, the replacement may be too small. 2887 */ 2888 if (error == EOVERFLOW || error == EBUSY) 2889 expected_error = error; 2890 2891 /* XXX workaround 6690467 */ 2892 if (error != expected_error && expected_error != EBUSY) { 2893 fatal(0, "attach (%s %llu, %s %llu, %d) " 2894 "returned %d, expected %d", 2895 oldpath, (longlong_t)oldsize, newpath, 2896 (longlong_t)newsize, replacing, error, expected_error); 2897 } 2898 2899 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 2900} 2901 2902/* 2903 * Callback function which expands the physical size of the vdev. 2904 */ 2905vdev_t * 2906grow_vdev(vdev_t *vd, void *arg) 2907{ 2908 spa_t *spa = vd->vdev_spa; 2909 size_t *newsize = arg; 2910 size_t fsize; 2911 int fd; 2912 2913 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2914 ASSERT(vd->vdev_ops->vdev_op_leaf); 2915 2916 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 2917 return (vd); 2918 2919 fsize = lseek(fd, 0, SEEK_END); 2920 (void) ftruncate(fd, *newsize); 2921 2922 if (ztest_opts.zo_verbose >= 6) { 2923 (void) printf("%s grew from %lu to %lu bytes\n", 2924 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 2925 } 2926 (void) close(fd); 2927 return (NULL); 2928} 2929 2930/* 2931 * Callback function which expands a given vdev by calling vdev_online(). 2932 */ 2933/* ARGSUSED */ 2934vdev_t * 2935online_vdev(vdev_t *vd, void *arg) 2936{ 2937 spa_t *spa = vd->vdev_spa; 2938 vdev_t *tvd = vd->vdev_top; 2939 uint64_t guid = vd->vdev_guid; 2940 uint64_t generation = spa->spa_config_generation + 1; 2941 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 2942 int error; 2943 2944 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE); 2945 ASSERT(vd->vdev_ops->vdev_op_leaf); 2946 2947 /* Calling vdev_online will initialize the new metaslabs */ 2948 spa_config_exit(spa, SCL_STATE, spa); 2949 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 2950 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 2951 2952 /* 2953 * If vdev_online returned an error or the underlying vdev_open 2954 * failed then we abort the expand. The only way to know that 2955 * vdev_open fails is by checking the returned newstate. 2956 */ 2957 if (error || newstate != VDEV_STATE_HEALTHY) { 2958 if (ztest_opts.zo_verbose >= 5) { 2959 (void) printf("Unable to expand vdev, state %llu, " 2960 "error %d\n", (u_longlong_t)newstate, error); 2961 } 2962 return (vd); 2963 } 2964 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 2965 2966 /* 2967 * Since we dropped the lock we need to ensure that we're 2968 * still talking to the original vdev. It's possible this 2969 * vdev may have been detached/replaced while we were 2970 * trying to online it. 2971 */ 2972 if (generation != spa->spa_config_generation) { 2973 if (ztest_opts.zo_verbose >= 5) { 2974 (void) printf("vdev configuration has changed, " 2975 "guid %llu, state %llu, expected gen %llu, " 2976 "got gen %llu\n", 2977 (u_longlong_t)guid, 2978 (u_longlong_t)tvd->vdev_state, 2979 (u_longlong_t)generation, 2980 (u_longlong_t)spa->spa_config_generation); 2981 } 2982 return (vd); 2983 } 2984 return (NULL); 2985} 2986 2987/* 2988 * Traverse the vdev tree calling the supplied function. 2989 * We continue to walk the tree until we either have walked all 2990 * children or we receive a non-NULL return from the callback. 2991 * If a NULL callback is passed, then we just return back the first 2992 * leaf vdev we encounter. 2993 */ 2994vdev_t * 2995vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 2996{ 2997 if (vd->vdev_ops->vdev_op_leaf) { 2998 if (func == NULL) 2999 return (vd); 3000 else 3001 return (func(vd, arg)); 3002 } 3003 3004 for (uint_t c = 0; c < vd->vdev_children; c++) { 3005 vdev_t *cvd = vd->vdev_child[c]; 3006 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3007 return (cvd); 3008 } 3009 return (NULL); 3010} 3011 3012/* 3013 * Verify that dynamic LUN growth works as expected. 3014 */ 3015/* ARGSUSED */ 3016void 3017ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 3018{ 3019 spa_t *spa = ztest_spa; 3020 vdev_t *vd, *tvd; 3021 metaslab_class_t *mc; 3022 metaslab_group_t *mg; 3023 size_t psize, newsize; 3024 uint64_t top; 3025 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 3026 3027 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 3028 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3029 3030 top = ztest_random_vdev_top(spa, B_TRUE); 3031 3032 tvd = spa->spa_root_vdev->vdev_child[top]; 3033 mg = tvd->vdev_mg; 3034 mc = mg->mg_class; 3035 old_ms_count = tvd->vdev_ms_count; 3036 old_class_space = metaslab_class_get_space(mc); 3037 3038 /* 3039 * Determine the size of the first leaf vdev associated with 3040 * our top-level device. 3041 */ 3042 vd = vdev_walk_tree(tvd, NULL, NULL); 3043 ASSERT3P(vd, !=, NULL); 3044 ASSERT(vd->vdev_ops->vdev_op_leaf); 3045 3046 psize = vd->vdev_psize; 3047 3048 /* 3049 * We only try to expand the vdev if it's healthy, less than 4x its 3050 * original size, and it has a valid psize. 3051 */ 3052 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 3053 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 3054 spa_config_exit(spa, SCL_STATE, spa); 3055 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3056 return; 3057 } 3058 ASSERT(psize > 0); 3059 newsize = psize + psize / 8; 3060 ASSERT3U(newsize, >, psize); 3061 3062 if (ztest_opts.zo_verbose >= 6) { 3063 (void) printf("Expanding LUN %s from %lu to %lu\n", 3064 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 3065 } 3066 3067 /* 3068 * Growing the vdev is a two step process: 3069 * 1). expand the physical size (i.e. relabel) 3070 * 2). online the vdev to create the new metaslabs 3071 */ 3072 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 3073 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 3074 tvd->vdev_state != VDEV_STATE_HEALTHY) { 3075 if (ztest_opts.zo_verbose >= 5) { 3076 (void) printf("Could not expand LUN because " 3077 "the vdev configuration changed.\n"); 3078 } 3079 spa_config_exit(spa, SCL_STATE, spa); 3080 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3081 return; 3082 } 3083 3084 spa_config_exit(spa, SCL_STATE, spa); 3085 3086 /* 3087 * Expanding the LUN will update the config asynchronously, 3088 * thus we must wait for the async thread to complete any 3089 * pending tasks before proceeding. 3090 */ 3091 for (;;) { 3092 boolean_t done; 3093 mutex_enter(&spa->spa_async_lock); 3094 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 3095 mutex_exit(&spa->spa_async_lock); 3096 if (done) 3097 break; 3098 txg_wait_synced(spa_get_dsl(spa), 0); 3099 (void) poll(NULL, 0, 100); 3100 } 3101 3102 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3103 3104 tvd = spa->spa_root_vdev->vdev_child[top]; 3105 new_ms_count = tvd->vdev_ms_count; 3106 new_class_space = metaslab_class_get_space(mc); 3107 3108 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 3109 if (ztest_opts.zo_verbose >= 5) { 3110 (void) printf("Could not verify LUN expansion due to " 3111 "intervening vdev offline or remove.\n"); 3112 } 3113 spa_config_exit(spa, SCL_STATE, spa); 3114 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3115 return; 3116 } 3117 3118 /* 3119 * Make sure we were able to grow the vdev. 3120 */ 3121 if (new_ms_count <= old_ms_count) 3122 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n", 3123 old_ms_count, new_ms_count); 3124 3125 /* 3126 * Make sure we were able to grow the pool. 3127 */ 3128 if (new_class_space <= old_class_space) 3129 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n", 3130 old_class_space, new_class_space); 3131 3132 if (ztest_opts.zo_verbose >= 5) { 3133 char oldnumbuf[6], newnumbuf[6]; 3134 3135 nicenum(old_class_space, oldnumbuf); 3136 nicenum(new_class_space, newnumbuf); 3137 (void) printf("%s grew from %s to %s\n", 3138 spa->spa_name, oldnumbuf, newnumbuf); 3139 } 3140 3141 spa_config_exit(spa, SCL_STATE, spa); 3142 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 3143} 3144 3145/* 3146 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 3147 */ 3148/* ARGSUSED */ 3149static void 3150ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 3151{ 3152 /* 3153 * Create the objects common to all ztest datasets. 3154 */ 3155 VERIFY(zap_create_claim(os, ZTEST_DIROBJ, 3156 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); 3157} 3158 3159static int 3160ztest_dataset_create(char *dsname) 3161{ 3162 uint64_t zilset = ztest_random(100); 3163 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, 3164 ztest_objset_create_cb, NULL); 3165 3166 if (err || zilset < 80) 3167 return (err); 3168 3169 if (ztest_opts.zo_verbose >= 6) 3170 (void) printf("Setting dataset %s to sync always\n", dsname); 3171 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 3172 ZFS_SYNC_ALWAYS, B_FALSE)); 3173} 3174 3175/* ARGSUSED */ 3176static int 3177ztest_objset_destroy_cb(const char *name, void *arg) 3178{ 3179 objset_t *os; 3180 dmu_object_info_t doi; 3181 int error; 3182 3183 /* 3184 * Verify that the dataset contains a directory object. 3185 */ 3186 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os)); 3187 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 3188 if (error != ENOENT) { 3189 /* We could have crashed in the middle of destroying it */ 3190 ASSERT0(error); 3191 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 3192 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 3193 } 3194 dmu_objset_disown(os, FTAG); 3195 3196 /* 3197 * Destroy the dataset. 3198 */ 3199 if (strchr(name, '@') != NULL) { 3200 VERIFY0(dsl_destroy_snapshot(name, B_FALSE)); 3201 } else { 3202 VERIFY0(dsl_destroy_head(name)); 3203 } 3204 return (0); 3205} 3206 3207static boolean_t 3208ztest_snapshot_create(char *osname, uint64_t id) 3209{ 3210 char snapname[MAXNAMELEN]; 3211 int error; 3212 3213 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id); 3214 3215 error = dmu_objset_snapshot_one(osname, snapname); 3216 if (error == ENOSPC) { 3217 ztest_record_enospc(FTAG); 3218 return (B_FALSE); 3219 } 3220 if (error != 0 && error != EEXIST) { 3221 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname, 3222 snapname, error); 3223 } 3224 return (B_TRUE); 3225} 3226 3227static boolean_t 3228ztest_snapshot_destroy(char *osname, uint64_t id) 3229{ 3230 char snapname[MAXNAMELEN]; 3231 int error; 3232 3233 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname, 3234 (u_longlong_t)id); 3235 3236 error = dsl_destroy_snapshot(snapname, B_FALSE); 3237 if (error != 0 && error != ENOENT) 3238 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error); 3239 return (B_TRUE); 3240} 3241 3242/* ARGSUSED */ 3243void 3244ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 3245{ 3246 ztest_ds_t zdtmp; 3247 int iters; 3248 int error; 3249 objset_t *os, *os2; 3250 char name[MAXNAMELEN]; 3251 zilog_t *zilog; 3252 3253 (void) rw_rdlock(&ztest_name_lock); 3254 3255 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", 3256 ztest_opts.zo_pool, (u_longlong_t)id); 3257 3258 /* 3259 * If this dataset exists from a previous run, process its replay log 3260 * half of the time. If we don't replay it, then dmu_objset_destroy() 3261 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 3262 */ 3263 if (ztest_random(2) == 0 && 3264 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { 3265 ztest_zd_init(&zdtmp, NULL, os); 3266 zil_replay(os, &zdtmp, ztest_replay_vector); 3267 ztest_zd_fini(&zdtmp); 3268 dmu_objset_disown(os, FTAG); 3269 } 3270 3271 /* 3272 * There may be an old instance of the dataset we're about to 3273 * create lying around from a previous run. If so, destroy it 3274 * and all of its snapshots. 3275 */ 3276 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 3277 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 3278 3279 /* 3280 * Verify that the destroyed dataset is no longer in the namespace. 3281 */ 3282 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 3283 FTAG, &os)); 3284 3285 /* 3286 * Verify that we can create a new dataset. 3287 */ 3288 error = ztest_dataset_create(name); 3289 if (error) { 3290 if (error == ENOSPC) { 3291 ztest_record_enospc(FTAG); 3292 (void) rw_unlock(&ztest_name_lock); 3293 return; 3294 } 3295 fatal(0, "dmu_objset_create(%s) = %d", name, error); 3296 } 3297 3298 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); 3299 3300 ztest_zd_init(&zdtmp, NULL, os); 3301 3302 /* 3303 * Open the intent log for it. 3304 */ 3305 zilog = zil_open(os, ztest_get_data); 3306 3307 /* 3308 * Put some objects in there, do a little I/O to them, 3309 * and randomly take a couple of snapshots along the way. 3310 */ 3311 iters = ztest_random(5); 3312 for (int i = 0; i < iters; i++) { 3313 ztest_dmu_object_alloc_free(&zdtmp, id); 3314 if (ztest_random(iters) == 0) 3315 (void) ztest_snapshot_create(name, i); 3316 } 3317 3318 /* 3319 * Verify that we cannot create an existing dataset. 3320 */ 3321 VERIFY3U(EEXIST, ==, 3322 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL)); 3323 3324 /* 3325 * Verify that we can hold an objset that is also owned. 3326 */ 3327 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2)); 3328 dmu_objset_rele(os2, FTAG); 3329 3330 /* 3331 * Verify that we cannot own an objset that is already owned. 3332 */ 3333 VERIFY3U(EBUSY, ==, 3334 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2)); 3335 3336 zil_close(zilog); 3337 dmu_objset_disown(os, FTAG); 3338 ztest_zd_fini(&zdtmp); 3339 3340 (void) rw_unlock(&ztest_name_lock); 3341} 3342 3343/* 3344 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 3345 */ 3346void 3347ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 3348{ 3349 (void) rw_rdlock(&ztest_name_lock); 3350 (void) ztest_snapshot_destroy(zd->zd_name, id); 3351 (void) ztest_snapshot_create(zd->zd_name, id); 3352 (void) rw_unlock(&ztest_name_lock); 3353} 3354 3355/* 3356 * Cleanup non-standard snapshots and clones. 3357 */ 3358void 3359ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 3360{ 3361 char snap1name[MAXNAMELEN]; 3362 char clone1name[MAXNAMELEN]; 3363 char snap2name[MAXNAMELEN]; 3364 char clone2name[MAXNAMELEN]; 3365 char snap3name[MAXNAMELEN]; 3366 int error; 3367 3368 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3369 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3370 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3371 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3372 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3373 3374 error = dsl_destroy_head(clone2name); 3375 if (error && error != ENOENT) 3376 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error); 3377 error = dsl_destroy_snapshot(snap3name, B_FALSE); 3378 if (error && error != ENOENT) 3379 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error); 3380 error = dsl_destroy_snapshot(snap2name, B_FALSE); 3381 if (error && error != ENOENT) 3382 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error); 3383 error = dsl_destroy_head(clone1name); 3384 if (error && error != ENOENT) 3385 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error); 3386 error = dsl_destroy_snapshot(snap1name, B_FALSE); 3387 if (error && error != ENOENT) 3388 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error); 3389} 3390 3391/* 3392 * Verify dsl_dataset_promote handles EBUSY 3393 */ 3394void 3395ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 3396{ 3397 objset_t *os; 3398 char snap1name[MAXNAMELEN]; 3399 char clone1name[MAXNAMELEN]; 3400 char snap2name[MAXNAMELEN]; 3401 char clone2name[MAXNAMELEN]; 3402 char snap3name[MAXNAMELEN]; 3403 char *osname = zd->zd_name; 3404 int error; 3405 3406 (void) rw_rdlock(&ztest_name_lock); 3407 3408 ztest_dsl_dataset_cleanup(osname, id); 3409 3410 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); 3411 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); 3412 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); 3413 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); 3414 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); 3415 3416 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 3417 if (error && error != EEXIST) { 3418 if (error == ENOSPC) { 3419 ztest_record_enospc(FTAG); 3420 goto out; 3421 } 3422 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); 3423 } 3424 3425 error = dmu_objset_clone(clone1name, snap1name); 3426 if (error) { 3427 if (error == ENOSPC) { 3428 ztest_record_enospc(FTAG); 3429 goto out; 3430 } 3431 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error); 3432 } 3433 3434 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 3435 if (error && error != EEXIST) { 3436 if (error == ENOSPC) { 3437 ztest_record_enospc(FTAG); 3438 goto out; 3439 } 3440 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error); 3441 } 3442 3443 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 3444 if (error && error != EEXIST) { 3445 if (error == ENOSPC) { 3446 ztest_record_enospc(FTAG); 3447 goto out; 3448 } 3449 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error); 3450 } 3451 3452 error = dmu_objset_clone(clone2name, snap3name); 3453 if (error) { 3454 if (error == ENOSPC) { 3455 ztest_record_enospc(FTAG); 3456 goto out; 3457 } 3458 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error); 3459 } 3460 3461 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os); 3462 if (error) 3463 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error); 3464 error = dsl_dataset_promote(clone2name, NULL); 3465 if (error != EBUSY) 3466 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name, 3467 error); 3468 dmu_objset_disown(os, FTAG); 3469 3470out: 3471 ztest_dsl_dataset_cleanup(osname, id); 3472 3473 (void) rw_unlock(&ztest_name_lock); 3474} 3475 3476/* 3477 * Verify that dmu_object_{alloc,free} work as expected. 3478 */ 3479void 3480ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 3481{ 3482 ztest_od_t od[4]; 3483 int batchsize = sizeof (od) / sizeof (od[0]); 3484 3485 for (int b = 0; b < batchsize; b++) 3486 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); 3487 3488 /* 3489 * Destroy the previous batch of objects, create a new batch, 3490 * and do some I/O on the new objects. 3491 */ 3492 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) 3493 return; 3494 3495 while (ztest_random(4 * batchsize) != 0) 3496 ztest_io(zd, od[ztest_random(batchsize)].od_object, 3497 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3498} 3499 3500/* 3501 * Verify that dmu_{read,write} work as expected. 3502 */ 3503void 3504ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 3505{ 3506 objset_t *os = zd->zd_os; 3507 ztest_od_t od[2]; 3508 dmu_tx_t *tx; 3509 int i, freeit, error; 3510 uint64_t n, s, txg; 3511 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 3512 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3513 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 3514 uint64_t regions = 997; 3515 uint64_t stride = 123456789ULL; 3516 uint64_t width = 40; 3517 int free_percent = 5; 3518 3519 /* 3520 * This test uses two objects, packobj and bigobj, that are always 3521 * updated together (i.e. in the same tx) so that their contents are 3522 * in sync and can be compared. Their contents relate to each other 3523 * in a simple way: packobj is a dense array of 'bufwad' structures, 3524 * while bigobj is a sparse array of the same bufwads. Specifically, 3525 * for any index n, there are three bufwads that should be identical: 3526 * 3527 * packobj, at offset n * sizeof (bufwad_t) 3528 * bigobj, at the head of the nth chunk 3529 * bigobj, at the tail of the nth chunk 3530 * 3531 * The chunk size is arbitrary. It doesn't have to be a power of two, 3532 * and it doesn't have any relation to the object blocksize. 3533 * The only requirement is that it can hold at least two bufwads. 3534 * 3535 * Normally, we write the bufwad to each of these locations. 3536 * However, free_percent of the time we instead write zeroes to 3537 * packobj and perform a dmu_free_range() on bigobj. By comparing 3538 * bigobj to packobj, we can verify that the DMU is correctly 3539 * tracking which parts of an object are allocated and free, 3540 * and that the contents of the allocated blocks are correct. 3541 */ 3542 3543 /* 3544 * Read the directory info. If it's the first time, set things up. 3545 */ 3546 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); 3547 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3548 3549 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3550 return; 3551 3552 bigobj = od[0].od_object; 3553 packobj = od[1].od_object; 3554 chunksize = od[0].od_gen; 3555 ASSERT(chunksize == od[1].od_gen); 3556 3557 /* 3558 * Prefetch a random chunk of the big object. 3559 * Our aim here is to get some async reads in flight 3560 * for blocks that we may free below; the DMU should 3561 * handle this race correctly. 3562 */ 3563 n = ztest_random(regions) * stride + ztest_random(width); 3564 s = 1 + ztest_random(2 * width - 1); 3565 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize); 3566 3567 /* 3568 * Pick a random index and compute the offsets into packobj and bigobj. 3569 */ 3570 n = ztest_random(regions) * stride + ztest_random(width); 3571 s = 1 + ztest_random(width - 1); 3572 3573 packoff = n * sizeof (bufwad_t); 3574 packsize = s * sizeof (bufwad_t); 3575 3576 bigoff = n * chunksize; 3577 bigsize = s * chunksize; 3578 3579 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 3580 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 3581 3582 /* 3583 * free_percent of the time, free a range of bigobj rather than 3584 * overwriting it. 3585 */ 3586 freeit = (ztest_random(100) < free_percent); 3587 3588 /* 3589 * Read the current contents of our objects. 3590 */ 3591 error = dmu_read(os, packobj, packoff, packsize, packbuf, 3592 DMU_READ_PREFETCH); 3593 ASSERT0(error); 3594 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 3595 DMU_READ_PREFETCH); 3596 ASSERT0(error); 3597 3598 /* 3599 * Get a tx for the mods to both packobj and bigobj. 3600 */ 3601 tx = dmu_tx_create(os); 3602 3603 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3604 3605 if (freeit) 3606 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 3607 else 3608 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3609 3610 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3611 if (txg == 0) { 3612 umem_free(packbuf, packsize); 3613 umem_free(bigbuf, bigsize); 3614 return; 3615 } 3616 3617 dmu_object_set_checksum(os, bigobj, 3618 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx); 3619 3620 dmu_object_set_compress(os, bigobj, 3621 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx); 3622 3623 /* 3624 * For each index from n to n + s, verify that the existing bufwad 3625 * in packobj matches the bufwads at the head and tail of the 3626 * corresponding chunk in bigobj. Then update all three bufwads 3627 * with the new values we want to write out. 3628 */ 3629 for (i = 0; i < s; i++) { 3630 /* LINTED */ 3631 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3632 /* LINTED */ 3633 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3634 /* LINTED */ 3635 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3636 3637 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3638 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3639 3640 if (pack->bw_txg > txg) 3641 fatal(0, "future leak: got %llx, open txg is %llx", 3642 pack->bw_txg, txg); 3643 3644 if (pack->bw_data != 0 && pack->bw_index != n + i) 3645 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3646 pack->bw_index, n, i); 3647 3648 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3649 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3650 3651 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3652 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3653 3654 if (freeit) { 3655 bzero(pack, sizeof (bufwad_t)); 3656 } else { 3657 pack->bw_index = n + i; 3658 pack->bw_txg = txg; 3659 pack->bw_data = 1 + ztest_random(-2ULL); 3660 } 3661 *bigH = *pack; 3662 *bigT = *pack; 3663 } 3664 3665 /* 3666 * We've verified all the old bufwads, and made new ones. 3667 * Now write them out. 3668 */ 3669 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3670 3671 if (freeit) { 3672 if (ztest_opts.zo_verbose >= 7) { 3673 (void) printf("freeing offset %llx size %llx" 3674 " txg %llx\n", 3675 (u_longlong_t)bigoff, 3676 (u_longlong_t)bigsize, 3677 (u_longlong_t)txg); 3678 } 3679 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 3680 } else { 3681 if (ztest_opts.zo_verbose >= 7) { 3682 (void) printf("writing offset %llx size %llx" 3683 " txg %llx\n", 3684 (u_longlong_t)bigoff, 3685 (u_longlong_t)bigsize, 3686 (u_longlong_t)txg); 3687 } 3688 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 3689 } 3690 3691 dmu_tx_commit(tx); 3692 3693 /* 3694 * Sanity check the stuff we just wrote. 3695 */ 3696 { 3697 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3698 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3699 3700 VERIFY(0 == dmu_read(os, packobj, packoff, 3701 packsize, packcheck, DMU_READ_PREFETCH)); 3702 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3703 bigsize, bigcheck, DMU_READ_PREFETCH)); 3704 3705 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3706 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3707 3708 umem_free(packcheck, packsize); 3709 umem_free(bigcheck, bigsize); 3710 } 3711 3712 umem_free(packbuf, packsize); 3713 umem_free(bigbuf, bigsize); 3714} 3715 3716void 3717compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 3718 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 3719{ 3720 uint64_t i; 3721 bufwad_t *pack; 3722 bufwad_t *bigH; 3723 bufwad_t *bigT; 3724 3725 /* 3726 * For each index from n to n + s, verify that the existing bufwad 3727 * in packobj matches the bufwads at the head and tail of the 3728 * corresponding chunk in bigobj. Then update all three bufwads 3729 * with the new values we want to write out. 3730 */ 3731 for (i = 0; i < s; i++) { 3732 /* LINTED */ 3733 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 3734 /* LINTED */ 3735 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 3736 /* LINTED */ 3737 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 3738 3739 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); 3740 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); 3741 3742 if (pack->bw_txg > txg) 3743 fatal(0, "future leak: got %llx, open txg is %llx", 3744 pack->bw_txg, txg); 3745 3746 if (pack->bw_data != 0 && pack->bw_index != n + i) 3747 fatal(0, "wrong index: got %llx, wanted %llx+%llx", 3748 pack->bw_index, n, i); 3749 3750 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) 3751 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); 3752 3753 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) 3754 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); 3755 3756 pack->bw_index = n + i; 3757 pack->bw_txg = txg; 3758 pack->bw_data = 1 + ztest_random(-2ULL); 3759 3760 *bigH = *pack; 3761 *bigT = *pack; 3762 } 3763} 3764 3765void 3766ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 3767{ 3768 objset_t *os = zd->zd_os; 3769 ztest_od_t od[2]; 3770 dmu_tx_t *tx; 3771 uint64_t i; 3772 int error; 3773 uint64_t n, s, txg; 3774 bufwad_t *packbuf, *bigbuf; 3775 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 3776 uint64_t blocksize = ztest_random_blocksize(); 3777 uint64_t chunksize = blocksize; 3778 uint64_t regions = 997; 3779 uint64_t stride = 123456789ULL; 3780 uint64_t width = 9; 3781 dmu_buf_t *bonus_db; 3782 arc_buf_t **bigbuf_arcbufs; 3783 dmu_object_info_t doi; 3784 3785 /* 3786 * This test uses two objects, packobj and bigobj, that are always 3787 * updated together (i.e. in the same tx) so that their contents are 3788 * in sync and can be compared. Their contents relate to each other 3789 * in a simple way: packobj is a dense array of 'bufwad' structures, 3790 * while bigobj is a sparse array of the same bufwads. Specifically, 3791 * for any index n, there are three bufwads that should be identical: 3792 * 3793 * packobj, at offset n * sizeof (bufwad_t) 3794 * bigobj, at the head of the nth chunk 3795 * bigobj, at the tail of the nth chunk 3796 * 3797 * The chunk size is set equal to bigobj block size so that 3798 * dmu_assign_arcbuf() can be tested for object updates. 3799 */ 3800 3801 /* 3802 * Read the directory info. If it's the first time, set things up. 3803 */ 3804 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 3805 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); 3806 3807 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 3808 return; 3809 3810 bigobj = od[0].od_object; 3811 packobj = od[1].od_object; 3812 blocksize = od[0].od_blocksize; 3813 chunksize = blocksize; 3814 ASSERT(chunksize == od[1].od_gen); 3815 3816 VERIFY(dmu_object_info(os, bigobj, &doi) == 0); 3817 VERIFY(ISP2(doi.doi_data_block_size)); 3818 VERIFY(chunksize == doi.doi_data_block_size); 3819 VERIFY(chunksize >= 2 * sizeof (bufwad_t)); 3820 3821 /* 3822 * Pick a random index and compute the offsets into packobj and bigobj. 3823 */ 3824 n = ztest_random(regions) * stride + ztest_random(width); 3825 s = 1 + ztest_random(width - 1); 3826 3827 packoff = n * sizeof (bufwad_t); 3828 packsize = s * sizeof (bufwad_t); 3829 3830 bigoff = n * chunksize; 3831 bigsize = s * chunksize; 3832 3833 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 3834 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 3835 3836 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 3837 3838 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 3839 3840 /* 3841 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 3842 * Iteration 1 test zcopy to already referenced dbufs. 3843 * Iteration 2 test zcopy to dirty dbuf in the same txg. 3844 * Iteration 3 test zcopy to dbuf dirty in previous txg. 3845 * Iteration 4 test zcopy when dbuf is no longer dirty. 3846 * Iteration 5 test zcopy when it can't be done. 3847 * Iteration 6 one more zcopy write. 3848 */ 3849 for (i = 0; i < 7; i++) { 3850 uint64_t j; 3851 uint64_t off; 3852 3853 /* 3854 * In iteration 5 (i == 5) use arcbufs 3855 * that don't match bigobj blksz to test 3856 * dmu_assign_arcbuf() when it can't directly 3857 * assign an arcbuf to a dbuf. 3858 */ 3859 for (j = 0; j < s; j++) { 3860 if (i != 5) { 3861 bigbuf_arcbufs[j] = 3862 dmu_request_arcbuf(bonus_db, chunksize); 3863 } else { 3864 bigbuf_arcbufs[2 * j] = 3865 dmu_request_arcbuf(bonus_db, chunksize / 2); 3866 bigbuf_arcbufs[2 * j + 1] = 3867 dmu_request_arcbuf(bonus_db, chunksize / 2); 3868 } 3869 } 3870 3871 /* 3872 * Get a tx for the mods to both packobj and bigobj. 3873 */ 3874 tx = dmu_tx_create(os); 3875 3876 dmu_tx_hold_write(tx, packobj, packoff, packsize); 3877 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 3878 3879 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 3880 if (txg == 0) { 3881 umem_free(packbuf, packsize); 3882 umem_free(bigbuf, bigsize); 3883 for (j = 0; j < s; j++) { 3884 if (i != 5) { 3885 dmu_return_arcbuf(bigbuf_arcbufs[j]); 3886 } else { 3887 dmu_return_arcbuf( 3888 bigbuf_arcbufs[2 * j]); 3889 dmu_return_arcbuf( 3890 bigbuf_arcbufs[2 * j + 1]); 3891 } 3892 } 3893 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3894 dmu_buf_rele(bonus_db, FTAG); 3895 return; 3896 } 3897 3898 /* 3899 * 50% of the time don't read objects in the 1st iteration to 3900 * test dmu_assign_arcbuf() for the case when there're no 3901 * existing dbufs for the specified offsets. 3902 */ 3903 if (i != 0 || ztest_random(2) != 0) { 3904 error = dmu_read(os, packobj, packoff, 3905 packsize, packbuf, DMU_READ_PREFETCH); 3906 ASSERT0(error); 3907 error = dmu_read(os, bigobj, bigoff, bigsize, 3908 bigbuf, DMU_READ_PREFETCH); 3909 ASSERT0(error); 3910 } 3911 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 3912 n, chunksize, txg); 3913 3914 /* 3915 * We've verified all the old bufwads, and made new ones. 3916 * Now write them out. 3917 */ 3918 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 3919 if (ztest_opts.zo_verbose >= 7) { 3920 (void) printf("writing offset %llx size %llx" 3921 " txg %llx\n", 3922 (u_longlong_t)bigoff, 3923 (u_longlong_t)bigsize, 3924 (u_longlong_t)txg); 3925 } 3926 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 3927 dmu_buf_t *dbt; 3928 if (i != 5) { 3929 bcopy((caddr_t)bigbuf + (off - bigoff), 3930 bigbuf_arcbufs[j]->b_data, chunksize); 3931 } else { 3932 bcopy((caddr_t)bigbuf + (off - bigoff), 3933 bigbuf_arcbufs[2 * j]->b_data, 3934 chunksize / 2); 3935 bcopy((caddr_t)bigbuf + (off - bigoff) + 3936 chunksize / 2, 3937 bigbuf_arcbufs[2 * j + 1]->b_data, 3938 chunksize / 2); 3939 } 3940 3941 if (i == 1) { 3942 VERIFY(dmu_buf_hold(os, bigobj, off, 3943 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 3944 } 3945 if (i != 5) { 3946 dmu_assign_arcbuf(bonus_db, off, 3947 bigbuf_arcbufs[j], tx); 3948 } else { 3949 dmu_assign_arcbuf(bonus_db, off, 3950 bigbuf_arcbufs[2 * j], tx); 3951 dmu_assign_arcbuf(bonus_db, 3952 off + chunksize / 2, 3953 bigbuf_arcbufs[2 * j + 1], tx); 3954 } 3955 if (i == 1) { 3956 dmu_buf_rele(dbt, FTAG); 3957 } 3958 } 3959 dmu_tx_commit(tx); 3960 3961 /* 3962 * Sanity check the stuff we just wrote. 3963 */ 3964 { 3965 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 3966 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 3967 3968 VERIFY(0 == dmu_read(os, packobj, packoff, 3969 packsize, packcheck, DMU_READ_PREFETCH)); 3970 VERIFY(0 == dmu_read(os, bigobj, bigoff, 3971 bigsize, bigcheck, DMU_READ_PREFETCH)); 3972 3973 ASSERT(bcmp(packbuf, packcheck, packsize) == 0); 3974 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); 3975 3976 umem_free(packcheck, packsize); 3977 umem_free(bigcheck, bigsize); 3978 } 3979 if (i == 2) { 3980 txg_wait_open(dmu_objset_pool(os), 0); 3981 } else if (i == 3) { 3982 txg_wait_synced(dmu_objset_pool(os), 0); 3983 } 3984 } 3985 3986 dmu_buf_rele(bonus_db, FTAG); 3987 umem_free(packbuf, packsize); 3988 umem_free(bigbuf, bigsize); 3989 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 3990} 3991 3992/* ARGSUSED */ 3993void 3994ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 3995{ 3996 ztest_od_t od[1]; 3997 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 3998 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 3999 4000 /* 4001 * Have multiple threads write to large offsets in an object 4002 * to verify that parallel writes to an object -- even to the 4003 * same blocks within the object -- doesn't cause any trouble. 4004 */ 4005 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4006 4007 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4008 return; 4009 4010 while (ztest_random(10) != 0) 4011 ztest_io(zd, od[0].od_object, offset); 4012} 4013 4014void 4015ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 4016{ 4017 ztest_od_t od[1]; 4018 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 4019 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4020 uint64_t count = ztest_random(20) + 1; 4021 uint64_t blocksize = ztest_random_blocksize(); 4022 void *data; 4023 4024 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4025 4026 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4027 return; 4028 4029 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) 4030 return; 4031 4032 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); 4033 4034 data = umem_zalloc(blocksize, UMEM_NOFAIL); 4035 4036 while (ztest_random(count) != 0) { 4037 uint64_t randoff = offset + (ztest_random(count) * blocksize); 4038 if (ztest_write(zd, od[0].od_object, randoff, blocksize, 4039 data) != 0) 4040 break; 4041 while (ztest_random(4) != 0) 4042 ztest_io(zd, od[0].od_object, randoff); 4043 } 4044 4045 umem_free(data, blocksize); 4046} 4047 4048/* 4049 * Verify that zap_{create,destroy,add,remove,update} work as expected. 4050 */ 4051#define ZTEST_ZAP_MIN_INTS 1 4052#define ZTEST_ZAP_MAX_INTS 4 4053#define ZTEST_ZAP_MAX_PROPS 1000 4054 4055void 4056ztest_zap(ztest_ds_t *zd, uint64_t id) 4057{ 4058 objset_t *os = zd->zd_os; 4059 ztest_od_t od[1]; 4060 uint64_t object; 4061 uint64_t txg, last_txg; 4062 uint64_t value[ZTEST_ZAP_MAX_INTS]; 4063 uint64_t zl_ints, zl_intsize, prop; 4064 int i, ints; 4065 dmu_tx_t *tx; 4066 char propname[100], txgname[100]; 4067 int error; 4068 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 4069 4070 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4071 4072 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4073 return; 4074 4075 object = od[0].od_object; 4076 4077 /* 4078 * Generate a known hash collision, and verify that 4079 * we can lookup and remove both entries. 4080 */ 4081 tx = dmu_tx_create(os); 4082 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4083 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4084 if (txg == 0) 4085 return; 4086 for (i = 0; i < 2; i++) { 4087 value[i] = i; 4088 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), 4089 1, &value[i], tx)); 4090 } 4091 for (i = 0; i < 2; i++) { 4092 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 4093 sizeof (uint64_t), 1, &value[i], tx)); 4094 VERIFY3U(0, ==, 4095 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 4096 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4097 ASSERT3U(zl_ints, ==, 1); 4098 } 4099 for (i = 0; i < 2; i++) { 4100 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx)); 4101 } 4102 dmu_tx_commit(tx); 4103 4104 /* 4105 * Generate a buch of random entries. 4106 */ 4107 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 4108 4109 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4110 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4111 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4112 bzero(value, sizeof (value)); 4113 last_txg = 0; 4114 4115 /* 4116 * If these zap entries already exist, validate their contents. 4117 */ 4118 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4119 if (error == 0) { 4120 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4121 ASSERT3U(zl_ints, ==, 1); 4122 4123 VERIFY(zap_lookup(os, object, txgname, zl_intsize, 4124 zl_ints, &last_txg) == 0); 4125 4126 VERIFY(zap_length(os, object, propname, &zl_intsize, 4127 &zl_ints) == 0); 4128 4129 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 4130 ASSERT3U(zl_ints, ==, ints); 4131 4132 VERIFY(zap_lookup(os, object, propname, zl_intsize, 4133 zl_ints, value) == 0); 4134 4135 for (i = 0; i < ints; i++) { 4136 ASSERT3U(value[i], ==, last_txg + object + i); 4137 } 4138 } else { 4139 ASSERT3U(error, ==, ENOENT); 4140 } 4141 4142 /* 4143 * Atomically update two entries in our zap object. 4144 * The first is named txg_%llu, and contains the txg 4145 * in which the property was last updated. The second 4146 * is named prop_%llu, and the nth element of its value 4147 * should be txg + object + n. 4148 */ 4149 tx = dmu_tx_create(os); 4150 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4151 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4152 if (txg == 0) 4153 return; 4154 4155 if (last_txg > txg) 4156 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); 4157 4158 for (i = 0; i < ints; i++) 4159 value[i] = txg + object + i; 4160 4161 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t), 4162 1, &txg, tx)); 4163 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t), 4164 ints, value, tx)); 4165 4166 dmu_tx_commit(tx); 4167 4168 /* 4169 * Remove a random pair of entries. 4170 */ 4171 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 4172 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); 4173 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); 4174 4175 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 4176 4177 if (error == ENOENT) 4178 return; 4179 4180 ASSERT0(error); 4181 4182 tx = dmu_tx_create(os); 4183 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4184 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4185 if (txg == 0) 4186 return; 4187 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); 4188 VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); 4189 dmu_tx_commit(tx); 4190} 4191 4192/* 4193 * Testcase to test the upgrading of a microzap to fatzap. 4194 */ 4195void 4196ztest_fzap(ztest_ds_t *zd, uint64_t id) 4197{ 4198 objset_t *os = zd->zd_os; 4199 ztest_od_t od[1]; 4200 uint64_t object, txg; 4201 4202 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); 4203 4204 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) 4205 return; 4206 4207 object = od[0].od_object; 4208 4209 /* 4210 * Add entries to this ZAP and make sure it spills over 4211 * and gets upgraded to a fatzap. Also, since we are adding 4212 * 2050 entries we should see ptrtbl growth and leaf-block split. 4213 */ 4214 for (int i = 0; i < 2050; i++) { 4215 char name[MAXNAMELEN]; 4216 uint64_t value = i; 4217 dmu_tx_t *tx; 4218 int error; 4219 4220 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", 4221 id, value); 4222 4223 tx = dmu_tx_create(os); 4224 dmu_tx_hold_zap(tx, object, B_TRUE, name); 4225 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4226 if (txg == 0) 4227 return; 4228 error = zap_add(os, object, name, sizeof (uint64_t), 1, 4229 &value, tx); 4230 ASSERT(error == 0 || error == EEXIST); 4231 dmu_tx_commit(tx); 4232 } 4233} 4234 4235/* ARGSUSED */ 4236void 4237ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 4238{ 4239 objset_t *os = zd->zd_os; 4240 ztest_od_t od[1]; 4241 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 4242 dmu_tx_t *tx; 4243 int i, namelen, error; 4244 int micro = ztest_random(2); 4245 char name[20], string_value[20]; 4246 void *data; 4247 4248 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); 4249 4250 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4251 return; 4252 4253 object = od[0].od_object; 4254 4255 /* 4256 * Generate a random name of the form 'xxx.....' where each 4257 * x is a random printable character and the dots are dots. 4258 * There are 94 such characters, and the name length goes from 4259 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 4260 */ 4261 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 4262 4263 for (i = 0; i < 3; i++) 4264 name[i] = '!' + ztest_random('~' - '!' + 1); 4265 for (; i < namelen - 1; i++) 4266 name[i] = '.'; 4267 name[i] = '\0'; 4268 4269 if ((namelen & 1) || micro) { 4270 wsize = sizeof (txg); 4271 wc = 1; 4272 data = &txg; 4273 } else { 4274 wsize = 1; 4275 wc = namelen; 4276 data = string_value; 4277 } 4278 4279 count = -1ULL; 4280 VERIFY0(zap_count(os, object, &count)); 4281 ASSERT(count != -1ULL); 4282 4283 /* 4284 * Select an operation: length, lookup, add, update, remove. 4285 */ 4286 i = ztest_random(5); 4287 4288 if (i >= 2) { 4289 tx = dmu_tx_create(os); 4290 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 4291 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4292 if (txg == 0) 4293 return; 4294 bcopy(name, string_value, namelen); 4295 } else { 4296 tx = NULL; 4297 txg = 0; 4298 bzero(string_value, namelen); 4299 } 4300 4301 switch (i) { 4302 4303 case 0: 4304 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 4305 if (error == 0) { 4306 ASSERT3U(wsize, ==, zl_wsize); 4307 ASSERT3U(wc, ==, zl_wc); 4308 } else { 4309 ASSERT3U(error, ==, ENOENT); 4310 } 4311 break; 4312 4313 case 1: 4314 error = zap_lookup(os, object, name, wsize, wc, data); 4315 if (error == 0) { 4316 if (data == string_value && 4317 bcmp(name, data, namelen) != 0) 4318 fatal(0, "name '%s' != val '%s' len %d", 4319 name, data, namelen); 4320 } else { 4321 ASSERT3U(error, ==, ENOENT); 4322 } 4323 break; 4324 4325 case 2: 4326 error = zap_add(os, object, name, wsize, wc, data, tx); 4327 ASSERT(error == 0 || error == EEXIST); 4328 break; 4329 4330 case 3: 4331 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); 4332 break; 4333 4334 case 4: 4335 error = zap_remove(os, object, name, tx); 4336 ASSERT(error == 0 || error == ENOENT); 4337 break; 4338 } 4339 4340 if (tx != NULL) 4341 dmu_tx_commit(tx); 4342} 4343 4344/* 4345 * Commit callback data. 4346 */ 4347typedef struct ztest_cb_data { 4348 list_node_t zcd_node; 4349 uint64_t zcd_txg; 4350 int zcd_expected_err; 4351 boolean_t zcd_added; 4352 boolean_t zcd_called; 4353 spa_t *zcd_spa; 4354} ztest_cb_data_t; 4355 4356/* This is the actual commit callback function */ 4357static void 4358ztest_commit_callback(void *arg, int error) 4359{ 4360 ztest_cb_data_t *data = arg; 4361 uint64_t synced_txg; 4362 4363 VERIFY(data != NULL); 4364 VERIFY3S(data->zcd_expected_err, ==, error); 4365 VERIFY(!data->zcd_called); 4366 4367 synced_txg = spa_last_synced_txg(data->zcd_spa); 4368 if (data->zcd_txg > synced_txg) 4369 fatal(0, "commit callback of txg %" PRIu64 " called prematurely" 4370 ", last synced txg = %" PRIu64 "\n", data->zcd_txg, 4371 synced_txg); 4372 4373 data->zcd_called = B_TRUE; 4374 4375 if (error == ECANCELED) { 4376 ASSERT0(data->zcd_txg); 4377 ASSERT(!data->zcd_added); 4378 4379 /* 4380 * The private callback data should be destroyed here, but 4381 * since we are going to check the zcd_called field after 4382 * dmu_tx_abort(), we will destroy it there. 4383 */ 4384 return; 4385 } 4386 4387 /* Was this callback added to the global callback list? */ 4388 if (!data->zcd_added) 4389 goto out; 4390 4391 ASSERT3U(data->zcd_txg, !=, 0); 4392 4393 /* Remove our callback from the list */ 4394 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4395 list_remove(&zcl.zcl_callbacks, data); 4396 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4397 4398out: 4399 umem_free(data, sizeof (ztest_cb_data_t)); 4400} 4401 4402/* Allocate and initialize callback data structure */ 4403static ztest_cb_data_t * 4404ztest_create_cb_data(objset_t *os, uint64_t txg) 4405{ 4406 ztest_cb_data_t *cb_data; 4407 4408 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 4409 4410 cb_data->zcd_txg = txg; 4411 cb_data->zcd_spa = dmu_objset_spa(os); 4412 4413 return (cb_data); 4414} 4415 4416/* 4417 * If a number of txgs equal to this threshold have been created after a commit 4418 * callback has been registered but not called, then we assume there is an 4419 * implementation bug. 4420 */ 4421#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) 4422 4423/* 4424 * Commit callback test. 4425 */ 4426void 4427ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 4428{ 4429 objset_t *os = zd->zd_os; 4430 ztest_od_t od[1]; 4431 dmu_tx_t *tx; 4432 ztest_cb_data_t *cb_data[3], *tmp_cb; 4433 uint64_t old_txg, txg; 4434 int i, error; 4435 4436 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); 4437 4438 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4439 return; 4440 4441 tx = dmu_tx_create(os); 4442 4443 cb_data[0] = ztest_create_cb_data(os, 0); 4444 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 4445 4446 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); 4447 4448 /* Every once in a while, abort the transaction on purpose */ 4449 if (ztest_random(100) == 0) 4450 error = -1; 4451 4452 if (!error) 4453 error = dmu_tx_assign(tx, TXG_NOWAIT); 4454 4455 txg = error ? 0 : dmu_tx_get_txg(tx); 4456 4457 cb_data[0]->zcd_txg = txg; 4458 cb_data[1] = ztest_create_cb_data(os, txg); 4459 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 4460 4461 if (error) { 4462 /* 4463 * It's not a strict requirement to call the registered 4464 * callbacks from inside dmu_tx_abort(), but that's what 4465 * it's supposed to happen in the current implementation 4466 * so we will check for that. 4467 */ 4468 for (i = 0; i < 2; i++) { 4469 cb_data[i]->zcd_expected_err = ECANCELED; 4470 VERIFY(!cb_data[i]->zcd_called); 4471 } 4472 4473 dmu_tx_abort(tx); 4474 4475 for (i = 0; i < 2; i++) { 4476 VERIFY(cb_data[i]->zcd_called); 4477 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 4478 } 4479 4480 return; 4481 } 4482 4483 cb_data[2] = ztest_create_cb_data(os, txg); 4484 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 4485 4486 /* 4487 * Read existing data to make sure there isn't a future leak. 4488 */ 4489 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), 4490 &old_txg, DMU_READ_PREFETCH)); 4491 4492 if (old_txg > txg) 4493 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, 4494 old_txg, txg); 4495 4496 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); 4497 4498 (void) mutex_lock(&zcl.zcl_callbacks_lock); 4499 4500 /* 4501 * Since commit callbacks don't have any ordering requirement and since 4502 * it is theoretically possible for a commit callback to be called 4503 * after an arbitrary amount of time has elapsed since its txg has been 4504 * synced, it is difficult to reliably determine whether a commit 4505 * callback hasn't been called due to high load or due to a flawed 4506 * implementation. 4507 * 4508 * In practice, we will assume that if after a certain number of txgs a 4509 * commit callback hasn't been called, then most likely there's an 4510 * implementation bug.. 4511 */ 4512 tmp_cb = list_head(&zcl.zcl_callbacks); 4513 if (tmp_cb != NULL && 4514 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) { 4515 fatal(0, "Commit callback threshold exceeded, oldest txg: %" 4516 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); 4517 } 4518 4519 /* 4520 * Let's find the place to insert our callbacks. 4521 * 4522 * Even though the list is ordered by txg, it is possible for the 4523 * insertion point to not be the end because our txg may already be 4524 * quiescing at this point and other callbacks in the open txg 4525 * (from other objsets) may have sneaked in. 4526 */ 4527 tmp_cb = list_tail(&zcl.zcl_callbacks); 4528 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 4529 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 4530 4531 /* Add the 3 callbacks to the list */ 4532 for (i = 0; i < 3; i++) { 4533 if (tmp_cb == NULL) 4534 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 4535 else 4536 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 4537 cb_data[i]); 4538 4539 cb_data[i]->zcd_added = B_TRUE; 4540 VERIFY(!cb_data[i]->zcd_called); 4541 4542 tmp_cb = cb_data[i]; 4543 } 4544 4545 (void) mutex_unlock(&zcl.zcl_callbacks_lock); 4546 4547 dmu_tx_commit(tx); 4548} 4549 4550/* ARGSUSED */ 4551void 4552ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 4553{ 4554 zfs_prop_t proplist[] = { 4555 ZFS_PROP_CHECKSUM, 4556 ZFS_PROP_COMPRESSION, 4557 ZFS_PROP_COPIES, 4558 ZFS_PROP_DEDUP 4559 }; 4560 4561 (void) rw_rdlock(&ztest_name_lock); 4562 4563 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) 4564 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 4565 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 4566 4567 (void) rw_unlock(&ztest_name_lock); 4568} 4569 4570/* ARGSUSED */ 4571void 4572ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 4573{ 4574 nvlist_t *props = NULL; 4575 4576 (void) rw_rdlock(&ztest_name_lock); 4577 4578 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO, 4579 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); 4580 4581 VERIFY0(spa_prop_get(ztest_spa, &props)); 4582 4583 if (ztest_opts.zo_verbose >= 6) 4584 dump_nvlist(props, 4); 4585 4586 nvlist_free(props); 4587 4588 (void) rw_unlock(&ztest_name_lock); 4589} 4590 4591static int 4592user_release_one(const char *snapname, const char *holdname) 4593{ 4594 nvlist_t *snaps, *holds; 4595 int error; 4596 4597 snaps = fnvlist_alloc(); 4598 holds = fnvlist_alloc(); 4599 fnvlist_add_boolean(holds, holdname); 4600 fnvlist_add_nvlist(snaps, snapname, holds); 4601 fnvlist_free(holds); 4602 error = dsl_dataset_user_release(snaps, NULL); 4603 fnvlist_free(snaps); 4604 return (error); 4605} 4606 4607/* 4608 * Test snapshot hold/release and deferred destroy. 4609 */ 4610void 4611ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 4612{ 4613 int error; 4614 objset_t *os = zd->zd_os; 4615 objset_t *origin; 4616 char snapname[100]; 4617 char fullname[100]; 4618 char clonename[100]; 4619 char tag[100]; 4620 char osname[MAXNAMELEN]; 4621 nvlist_t *holds; 4622 4623 (void) rw_rdlock(&ztest_name_lock); 4624 4625 dmu_objset_name(os, osname); 4626 4627 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id); 4628 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 4629 (void) snprintf(clonename, sizeof (clonename), 4630 "%s/ch1_%llu", osname, id); 4631 (void) snprintf(tag, sizeof (tag), "tag_%llu", id); 4632 4633 /* 4634 * Clean up from any previous run. 4635 */ 4636 error = dsl_destroy_head(clonename); 4637 if (error != ENOENT) 4638 ASSERT0(error); 4639 error = user_release_one(fullname, tag); 4640 if (error != ESRCH && error != ENOENT) 4641 ASSERT0(error); 4642 error = dsl_destroy_snapshot(fullname, B_FALSE); 4643 if (error != ENOENT) 4644 ASSERT0(error); 4645 4646 /* 4647 * Create snapshot, clone it, mark snap for deferred destroy, 4648 * destroy clone, verify snap was also destroyed. 4649 */ 4650 error = dmu_objset_snapshot_one(osname, snapname); 4651 if (error) { 4652 if (error == ENOSPC) { 4653 ztest_record_enospc("dmu_objset_snapshot"); 4654 goto out; 4655 } 4656 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4657 } 4658 4659 error = dmu_objset_clone(clonename, fullname); 4660 if (error) { 4661 if (error == ENOSPC) { 4662 ztest_record_enospc("dmu_objset_clone"); 4663 goto out; 4664 } 4665 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error); 4666 } 4667 4668 error = dsl_destroy_snapshot(fullname, B_TRUE); 4669 if (error) { 4670 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4671 fullname, error); 4672 } 4673 4674 error = dsl_destroy_head(clonename); 4675 if (error) 4676 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error); 4677 4678 error = dmu_objset_hold(fullname, FTAG, &origin); 4679 if (error != ENOENT) 4680 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error); 4681 4682 /* 4683 * Create snapshot, add temporary hold, verify that we can't 4684 * destroy a held snapshot, mark for deferred destroy, 4685 * release hold, verify snapshot was destroyed. 4686 */ 4687 error = dmu_objset_snapshot_one(osname, snapname); 4688 if (error) { 4689 if (error == ENOSPC) { 4690 ztest_record_enospc("dmu_objset_snapshot"); 4691 goto out; 4692 } 4693 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); 4694 } 4695 4696 holds = fnvlist_alloc(); 4697 fnvlist_add_string(holds, fullname, tag); 4698 error = dsl_dataset_user_hold(holds, 0, NULL); 4699 fnvlist_free(holds); 4700 4701 if (error) 4702 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag); 4703 4704 error = dsl_destroy_snapshot(fullname, B_FALSE); 4705 if (error != EBUSY) { 4706 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 4707 fullname, error); 4708 } 4709 4710 error = dsl_destroy_snapshot(fullname, B_TRUE); 4711 if (error) { 4712 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 4713 fullname, error); 4714 } 4715 4716 error = user_release_one(fullname, tag); 4717 if (error) 4718 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error); 4719 4720 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 4721 4722out: 4723 (void) rw_unlock(&ztest_name_lock); 4724} 4725 4726/* 4727 * Inject random faults into the on-disk data. 4728 */ 4729/* ARGSUSED */ 4730void 4731ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 4732{ 4733 ztest_shared_t *zs = ztest_shared; 4734 spa_t *spa = ztest_spa; 4735 int fd; 4736 uint64_t offset; 4737 uint64_t leaves; 4738 uint64_t bad = 0x1990c0ffeedecadeULL; 4739 uint64_t top, leaf; 4740 char path0[MAXPATHLEN]; 4741 char pathrand[MAXPATHLEN]; 4742 size_t fsize; 4743 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ 4744 int iters = 1000; 4745 int maxfaults; 4746 int mirror_save; 4747 vdev_t *vd0 = NULL; 4748 uint64_t guid0 = 0; 4749 boolean_t islog = B_FALSE; 4750 4751 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4752 maxfaults = MAXFAULTS(); 4753 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz; 4754 mirror_save = zs->zs_mirrors; 4755 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4756 4757 ASSERT(leaves >= 1); 4758 4759 /* 4760 * Grab the name lock as reader. There are some operations 4761 * which don't like to have their vdevs changed while 4762 * they are in progress (i.e. spa_change_guid). Those 4763 * operations will have grabbed the name lock as writer. 4764 */ 4765 (void) rw_rdlock(&ztest_name_lock); 4766 4767 /* 4768 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 4769 */ 4770 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4771 4772 if (ztest_random(2) == 0) { 4773 /* 4774 * Inject errors on a normal data device or slog device. 4775 */ 4776 top = ztest_random_vdev_top(spa, B_TRUE); 4777 leaf = ztest_random(leaves) + zs->zs_splits; 4778 4779 /* 4780 * Generate paths to the first leaf in this top-level vdev, 4781 * and to the random leaf we selected. We'll induce transient 4782 * write failures and random online/offline activity on leaf 0, 4783 * and we'll write random garbage to the randomly chosen leaf. 4784 */ 4785 (void) snprintf(path0, sizeof (path0), ztest_dev_template, 4786 ztest_opts.zo_dir, ztest_opts.zo_pool, 4787 top * leaves + zs->zs_splits); 4788 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, 4789 ztest_opts.zo_dir, ztest_opts.zo_pool, 4790 top * leaves + leaf); 4791 4792 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 4793 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 4794 islog = B_TRUE; 4795 4796 /* 4797 * If the top-level vdev needs to be resilvered 4798 * then we only allow faults on the device that is 4799 * resilvering. 4800 */ 4801 if (vd0 != NULL && maxfaults != 1 && 4802 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 4803 vd0->vdev_resilvering)) { 4804 /* 4805 * Make vd0 explicitly claim to be unreadable, 4806 * or unwriteable, or reach behind its back 4807 * and close the underlying fd. We can do this if 4808 * maxfaults == 0 because we'll fail and reexecute, 4809 * and we can do it if maxfaults >= 2 because we'll 4810 * have enough redundancy. If maxfaults == 1, the 4811 * combination of this with injection of random data 4812 * corruption below exceeds the pool's fault tolerance. 4813 */ 4814 vdev_file_t *vf = vd0->vdev_tsd; 4815 4816 if (vf != NULL && ztest_random(3) == 0) { 4817 (void) close(vf->vf_vnode->v_fd); 4818 vf->vf_vnode->v_fd = -1; 4819 } else if (ztest_random(2) == 0) { 4820 vd0->vdev_cant_read = B_TRUE; 4821 } else { 4822 vd0->vdev_cant_write = B_TRUE; 4823 } 4824 guid0 = vd0->vdev_guid; 4825 } 4826 } else { 4827 /* 4828 * Inject errors on an l2cache device. 4829 */ 4830 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4831 4832 if (sav->sav_count == 0) { 4833 spa_config_exit(spa, SCL_STATE, FTAG); 4834 (void) rw_unlock(&ztest_name_lock); 4835 return; 4836 } 4837 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 4838 guid0 = vd0->vdev_guid; 4839 (void) strcpy(path0, vd0->vdev_path); 4840 (void) strcpy(pathrand, vd0->vdev_path); 4841 4842 leaf = 0; 4843 leaves = 1; 4844 maxfaults = INT_MAX; /* no limit on cache devices */ 4845 } 4846 4847 spa_config_exit(spa, SCL_STATE, FTAG); 4848 (void) rw_unlock(&ztest_name_lock); 4849 4850 /* 4851 * If we can tolerate two or more faults, or we're dealing 4852 * with a slog, randomly online/offline vd0. 4853 */ 4854 if ((maxfaults >= 2 || islog) && guid0 != 0) { 4855 if (ztest_random(10) < 6) { 4856 int flags = (ztest_random(2) == 0 ? 4857 ZFS_OFFLINE_TEMPORARY : 0); 4858 4859 /* 4860 * We have to grab the zs_name_lock as writer to 4861 * prevent a race between offlining a slog and 4862 * destroying a dataset. Offlining the slog will 4863 * grab a reference on the dataset which may cause 4864 * dmu_objset_destroy() to fail with EBUSY thus 4865 * leaving the dataset in an inconsistent state. 4866 */ 4867 if (islog) 4868 (void) rw_wrlock(&ztest_name_lock); 4869 4870 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); 4871 4872 if (islog) 4873 (void) rw_unlock(&ztest_name_lock); 4874 } else { 4875 /* 4876 * Ideally we would like to be able to randomly 4877 * call vdev_[on|off]line without holding locks 4878 * to force unpredictable failures but the side 4879 * effects of vdev_[on|off]line prevent us from 4880 * doing so. We grab the ztest_vdev_lock here to 4881 * prevent a race between injection testing and 4882 * aux_vdev removal. 4883 */ 4884 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4885 (void) vdev_online(spa, guid0, 0, NULL); 4886 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4887 } 4888 } 4889 4890 if (maxfaults == 0) 4891 return; 4892 4893 /* 4894 * We have at least single-fault tolerance, so inject data corruption. 4895 */ 4896 fd = open(pathrand, O_RDWR); 4897 4898 if (fd == -1) /* we hit a gap in the device namespace */ 4899 return; 4900 4901 fsize = lseek(fd, 0, SEEK_END); 4902 4903 while (--iters != 0) { 4904 offset = ztest_random(fsize / (leaves << bshift)) * 4905 (leaves << bshift) + (leaf << bshift) + 4906 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 4907 4908 if (offset >= fsize) 4909 continue; 4910 4911 VERIFY(mutex_lock(&ztest_vdev_lock) == 0); 4912 if (mirror_save != zs->zs_mirrors) { 4913 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4914 (void) close(fd); 4915 return; 4916 } 4917 4918 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 4919 fatal(1, "can't inject bad word at 0x%llx in %s", 4920 offset, pathrand); 4921 4922 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0); 4923 4924 if (ztest_opts.zo_verbose >= 7) 4925 (void) printf("injected bad word into %s," 4926 " offset 0x%llx\n", pathrand, (u_longlong_t)offset); 4927 } 4928 4929 (void) close(fd); 4930} 4931 4932/* 4933 * Verify that DDT repair works as expected. 4934 */ 4935void 4936ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) 4937{ 4938 ztest_shared_t *zs = ztest_shared; 4939 spa_t *spa = ztest_spa; 4940 objset_t *os = zd->zd_os; 4941 ztest_od_t od[1]; 4942 uint64_t object, blocksize, txg, pattern, psize; 4943 enum zio_checksum checksum = spa_dedup_checksum(spa); 4944 dmu_buf_t *db; 4945 dmu_tx_t *tx; 4946 void *buf; 4947 blkptr_t blk; 4948 int copies = 2 * ZIO_DEDUPDITTO_MIN; 4949 4950 blocksize = ztest_random_blocksize(); 4951 blocksize = MIN(blocksize, 2048); /* because we write so many */ 4952 4953 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); 4954 4955 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) 4956 return; 4957 4958 /* 4959 * Take the name lock as writer to prevent anyone else from changing 4960 * the pool and dataset properies we need to maintain during this test. 4961 */ 4962 (void) rw_wrlock(&ztest_name_lock); 4963 4964 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, 4965 B_FALSE) != 0 || 4966 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, 4967 B_FALSE) != 0) { 4968 (void) rw_unlock(&ztest_name_lock); 4969 return; 4970 } 4971 4972 object = od[0].od_object; 4973 blocksize = od[0].od_blocksize; 4974 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); 4975 4976 ASSERT(object != 0); 4977 4978 tx = dmu_tx_create(os); 4979 dmu_tx_hold_write(tx, object, 0, copies * blocksize); 4980 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 4981 if (txg == 0) { 4982 (void) rw_unlock(&ztest_name_lock); 4983 return; 4984 } 4985 4986 /* 4987 * Write all the copies of our block. 4988 */ 4989 for (int i = 0; i < copies; i++) { 4990 uint64_t offset = i * blocksize; 4991 int error = dmu_buf_hold(os, object, offset, FTAG, &db, 4992 DMU_READ_NO_PREFETCH); 4993 if (error != 0) { 4994 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u", 4995 os, (long long)object, (long long) offset, error); 4996 } 4997 ASSERT(db->db_offset == offset); 4998 ASSERT(db->db_size == blocksize); 4999 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) || 5000 ztest_pattern_match(db->db_data, db->db_size, 0ULL)); 5001 dmu_buf_will_fill(db, tx); 5002 ztest_pattern_set(db->db_data, db->db_size, pattern); 5003 dmu_buf_rele(db, FTAG); 5004 } 5005 5006 dmu_tx_commit(tx); 5007 txg_wait_synced(spa_get_dsl(spa), txg); 5008 5009 /* 5010 * Find out what block we got. 5011 */ 5012 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, 5013 DMU_READ_NO_PREFETCH)); 5014 blk = *((dmu_buf_impl_t *)db)->db_blkptr; 5015 dmu_buf_rele(db, FTAG); 5016 5017 /* 5018 * Damage the block. Dedup-ditto will save us when we read it later. 5019 */ 5020 psize = BP_GET_PSIZE(&blk); 5021 buf = zio_buf_alloc(psize); 5022 ztest_pattern_set(buf, psize, ~pattern); 5023 5024 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk, 5025 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, 5026 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL)); 5027 5028 zio_buf_free(buf, psize); 5029 5030 (void) rw_unlock(&ztest_name_lock); 5031} 5032 5033/* 5034 * Scrub the pool. 5035 */ 5036/* ARGSUSED */ 5037void 5038ztest_scrub(ztest_ds_t *zd, uint64_t id) 5039{ 5040 spa_t *spa = ztest_spa; 5041 5042 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5043 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */ 5044 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5045} 5046 5047/* 5048 * Change the guid for the pool. 5049 */ 5050/* ARGSUSED */ 5051void 5052ztest_reguid(ztest_ds_t *zd, uint64_t id) 5053{ 5054 spa_t *spa = ztest_spa; 5055 uint64_t orig, load; 5056 int error; 5057 5058 orig = spa_guid(spa); 5059 load = spa_load_guid(spa); 5060 5061 (void) rw_wrlock(&ztest_name_lock); 5062 error = spa_change_guid(spa); 5063 (void) rw_unlock(&ztest_name_lock); 5064 5065 if (error != 0) 5066 return; 5067 5068 if (ztest_opts.zo_verbose >= 4) { 5069 (void) printf("Changed guid old %llu -> %llu\n", 5070 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); 5071 } 5072 5073 VERIFY3U(orig, !=, spa_guid(spa)); 5074 VERIFY3U(load, ==, spa_load_guid(spa)); 5075} 5076 5077/* 5078 * Rename the pool to a different name and then rename it back. 5079 */ 5080/* ARGSUSED */ 5081void 5082ztest_spa_rename(ztest_ds_t *zd, uint64_t id) 5083{ 5084 char *oldname, *newname; 5085 spa_t *spa; 5086 5087 (void) rw_wrlock(&ztest_name_lock); 5088 5089 oldname = ztest_opts.zo_pool; 5090 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); 5091 (void) strcpy(newname, oldname); 5092 (void) strcat(newname, "_tmp"); 5093 5094 /* 5095 * Do the rename 5096 */ 5097 VERIFY3U(0, ==, spa_rename(oldname, newname)); 5098 5099 /* 5100 * Try to open it under the old name, which shouldn't exist 5101 */ 5102 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5103 5104 /* 5105 * Open it under the new name and make sure it's still the same spa_t. 5106 */ 5107 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5108 5109 ASSERT(spa == ztest_spa); 5110 spa_close(spa, FTAG); 5111 5112 /* 5113 * Rename it back to the original 5114 */ 5115 VERIFY3U(0, ==, spa_rename(newname, oldname)); 5116 5117 /* 5118 * Make sure it can still be opened 5119 */ 5120 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5121 5122 ASSERT(spa == ztest_spa); 5123 spa_close(spa, FTAG); 5124 5125 umem_free(newname, strlen(newname) + 1); 5126 5127 (void) rw_unlock(&ztest_name_lock); 5128} 5129 5130/* 5131 * Verify pool integrity by running zdb. 5132 */ 5133static void 5134ztest_run_zdb(char *pool) 5135{ 5136 int status; 5137 char zdb[MAXPATHLEN + MAXNAMELEN + 20]; 5138 char zbuf[1024]; 5139 char *bin; 5140 char *ztest; 5141 char *isa; 5142 int isalen; 5143 FILE *fp; 5144 5145 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb)); 5146 5147 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ 5148 bin = strstr(zdb, "/usr/bin/"); 5149 ztest = strstr(bin, "/ztest"); 5150 isa = bin + 8; 5151 isalen = ztest - isa; 5152 isa = strdup(isa); 5153 /* LINTED */ 5154 (void) sprintf(bin, 5155 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s", 5156 isalen, 5157 isa, 5158 ztest_opts.zo_verbose >= 3 ? "s" : "", 5159 ztest_opts.zo_verbose >= 4 ? "v" : "", 5160 spa_config_path, 5161 pool); 5162 free(isa); 5163 5164 if (ztest_opts.zo_verbose >= 5) 5165 (void) printf("Executing %s\n", strstr(zdb, "zdb ")); 5166 5167 fp = popen(zdb, "r"); 5168 assert(fp != NULL); 5169 5170 while (fgets(zbuf, sizeof (zbuf), fp) != NULL) 5171 if (ztest_opts.zo_verbose >= 3) 5172 (void) printf("%s", zbuf); 5173 5174 status = pclose(fp); 5175 5176 if (status == 0) 5177 return; 5178 5179 ztest_dump_core = 0; 5180 if (WIFEXITED(status)) 5181 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 5182 else 5183 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); 5184} 5185 5186static void 5187ztest_walk_pool_directory(char *header) 5188{ 5189 spa_t *spa = NULL; 5190 5191 if (ztest_opts.zo_verbose >= 6) 5192 (void) printf("%s\n", header); 5193 5194 mutex_enter(&spa_namespace_lock); 5195 while ((spa = spa_next(spa)) != NULL) 5196 if (ztest_opts.zo_verbose >= 6) 5197 (void) printf("\t%s\n", spa_name(spa)); 5198 mutex_exit(&spa_namespace_lock); 5199} 5200 5201static void 5202ztest_spa_import_export(char *oldname, char *newname) 5203{ 5204 nvlist_t *config, *newconfig; 5205 uint64_t pool_guid; 5206 spa_t *spa; 5207 int error; 5208 5209 if (ztest_opts.zo_verbose >= 4) { 5210 (void) printf("import/export: old = %s, new = %s\n", 5211 oldname, newname); 5212 } 5213 5214 /* 5215 * Clean up from previous runs. 5216 */ 5217 (void) spa_destroy(newname); 5218 5219 /* 5220 * Get the pool's configuration and guid. 5221 */ 5222 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG)); 5223 5224 /* 5225 * Kick off a scrub to tickle scrub/export races. 5226 */ 5227 if (ztest_random(2) == 0) 5228 (void) spa_scan(spa, POOL_SCAN_SCRUB); 5229 5230 pool_guid = spa_guid(spa); 5231 spa_close(spa, FTAG); 5232 5233 ztest_walk_pool_directory("pools before export"); 5234 5235 /* 5236 * Export it. 5237 */ 5238 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE)); 5239 5240 ztest_walk_pool_directory("pools after export"); 5241 5242 /* 5243 * Try to import it. 5244 */ 5245 newconfig = spa_tryimport(config); 5246 ASSERT(newconfig != NULL); 5247 nvlist_free(newconfig); 5248 5249 /* 5250 * Import it under the new name. 5251 */ 5252 error = spa_import(newname, config, NULL, 0); 5253 if (error != 0) { 5254 dump_nvlist(config, 0); 5255 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 5256 oldname, newname, error); 5257 } 5258 5259 ztest_walk_pool_directory("pools after import"); 5260 5261 /* 5262 * Try to import it again -- should fail with EEXIST. 5263 */ 5264 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 5265 5266 /* 5267 * Try to import it under a different name -- should fail with EEXIST. 5268 */ 5269 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 5270 5271 /* 5272 * Verify that the pool is no longer visible under the old name. 5273 */ 5274 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 5275 5276 /* 5277 * Verify that we can open and close the pool using the new name. 5278 */ 5279 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG)); 5280 ASSERT(pool_guid == spa_guid(spa)); 5281 spa_close(spa, FTAG); 5282 5283 nvlist_free(config); 5284} 5285 5286static void 5287ztest_resume(spa_t *spa) 5288{ 5289 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 5290 (void) printf("resuming from suspended state\n"); 5291 spa_vdev_state_enter(spa, SCL_NONE); 5292 vdev_clear(spa, NULL); 5293 (void) spa_vdev_state_exit(spa, NULL, 0); 5294 (void) zio_resume(spa); 5295} 5296 5297static void * 5298ztest_resume_thread(void *arg) 5299{ 5300 spa_t *spa = arg; 5301 5302 while (!ztest_exiting) { 5303 if (spa_suspended(spa)) 5304 ztest_resume(spa); 5305 (void) poll(NULL, 0, 100); 5306 } 5307 return (NULL); 5308} 5309 5310static void * 5311ztest_deadman_thread(void *arg) 5312{ 5313 ztest_shared_t *zs = arg; 5314 spa_t *spa = ztest_spa; 5315 hrtime_t delta, total = 0; 5316 5317 for (;;) { 5318 delta = (zs->zs_thread_stop - zs->zs_thread_start) / 5319 NANOSEC + zfs_deadman_synctime; 5320 5321 (void) poll(NULL, 0, (int)(1000 * delta)); 5322 5323 /* 5324 * If the pool is suspended then fail immediately. Otherwise, 5325 * check to see if the pool is making any progress. If 5326 * vdev_deadman() discovers that there hasn't been any recent 5327 * I/Os then it will end up aborting the tests. 5328 */ 5329 if (spa_suspended(spa)) { 5330 fatal(0, "aborting test after %llu seconds because " 5331 "pool has transitioned to a suspended state.", 5332 zfs_deadman_synctime); 5333 return (NULL); 5334 } 5335 vdev_deadman(spa->spa_root_vdev); 5336 5337 total += zfs_deadman_synctime; 5338 (void) printf("ztest has been running for %lld seconds\n", 5339 total); 5340 } 5341} 5342 5343static void 5344ztest_execute(int test, ztest_info_t *zi, uint64_t id) 5345{ 5346 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 5347 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 5348 hrtime_t functime = gethrtime(); 5349 5350 for (int i = 0; i < zi->zi_iters; i++) 5351 zi->zi_func(zd, id); 5352 5353 functime = gethrtime() - functime; 5354 5355 atomic_add_64(&zc->zc_count, 1); 5356 atomic_add_64(&zc->zc_time, functime); 5357 5358 if (ztest_opts.zo_verbose >= 4) { 5359 Dl_info dli; 5360 (void) dladdr((void *)zi->zi_func, &dli); 5361 (void) printf("%6.2f sec in %s\n", 5362 (double)functime / NANOSEC, dli.dli_sname); 5363 } 5364} 5365 5366static void * 5367ztest_thread(void *arg) 5368{ 5369 int rand; 5370 uint64_t id = (uintptr_t)arg; 5371 ztest_shared_t *zs = ztest_shared; 5372 uint64_t call_next; 5373 hrtime_t now; 5374 ztest_info_t *zi; 5375 ztest_shared_callstate_t *zc; 5376 5377 while ((now = gethrtime()) < zs->zs_thread_stop) { 5378 /* 5379 * See if it's time to force a crash. 5380 */ 5381 if (now > zs->zs_thread_kill) 5382 ztest_kill(zs); 5383 5384 /* 5385 * If we're getting ENOSPC with some regularity, stop. 5386 */ 5387 if (zs->zs_enospc_count > 10) 5388 break; 5389 5390 /* 5391 * Pick a random function to execute. 5392 */ 5393 rand = ztest_random(ZTEST_FUNCS); 5394 zi = &ztest_info[rand]; 5395 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 5396 call_next = zc->zc_next; 5397 5398 if (now >= call_next && 5399 atomic_cas_64(&zc->zc_next, call_next, call_next + 5400 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 5401 ztest_execute(rand, zi, id); 5402 } 5403 } 5404 5405 return (NULL); 5406} 5407 5408static void 5409ztest_dataset_name(char *dsname, char *pool, int d) 5410{ 5411 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d); 5412} 5413 5414static void 5415ztest_dataset_destroy(int d) 5416{ 5417 char name[MAXNAMELEN]; 5418 5419 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5420 5421 if (ztest_opts.zo_verbose >= 3) 5422 (void) printf("Destroying %s to free up space\n", name); 5423 5424 /* 5425 * Cleanup any non-standard clones and snapshots. In general, 5426 * ztest thread t operates on dataset (t % zopt_datasets), 5427 * so there may be more than one thing to clean up. 5428 */ 5429 for (int t = d; t < ztest_opts.zo_threads; 5430 t += ztest_opts.zo_datasets) { 5431 ztest_dsl_dataset_cleanup(name, t); 5432 } 5433 5434 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 5435 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5436} 5437 5438static void 5439ztest_dataset_dirobj_verify(ztest_ds_t *zd) 5440{ 5441 uint64_t usedobjs, dirobjs, scratch; 5442 5443 /* 5444 * ZTEST_DIROBJ is the object directory for the entire dataset. 5445 * Therefore, the number of objects in use should equal the 5446 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 5447 * If not, we have an object leak. 5448 * 5449 * Note that we can only check this in ztest_dataset_open(), 5450 * when the open-context and syncing-context values agree. 5451 * That's because zap_count() returns the open-context value, 5452 * while dmu_objset_space() returns the rootbp fill count. 5453 */ 5454 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 5455 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 5456 ASSERT3U(dirobjs + 1, ==, usedobjs); 5457} 5458 5459static int 5460ztest_dataset_open(int d) 5461{ 5462 ztest_ds_t *zd = &ztest_ds[d]; 5463 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 5464 objset_t *os; 5465 zilog_t *zilog; 5466 char name[MAXNAMELEN]; 5467 int error; 5468 5469 ztest_dataset_name(name, ztest_opts.zo_pool, d); 5470 5471 (void) rw_rdlock(&ztest_name_lock); 5472 5473 error = ztest_dataset_create(name); 5474 if (error == ENOSPC) { 5475 (void) rw_unlock(&ztest_name_lock); 5476 ztest_record_enospc(FTAG); 5477 return (error); 5478 } 5479 ASSERT(error == 0 || error == EEXIST); 5480 5481 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os)); 5482 (void) rw_unlock(&ztest_name_lock); 5483 5484 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 5485 5486 zilog = zd->zd_zilog; 5487 5488 if (zilog->zl_header->zh_claim_lr_seq != 0 && 5489 zilog->zl_header->zh_claim_lr_seq < committed_seq) 5490 fatal(0, "missing log records: claimed %llu < committed %llu", 5491 zilog->zl_header->zh_claim_lr_seq, committed_seq); 5492 5493 ztest_dataset_dirobj_verify(zd); 5494 5495 zil_replay(os, zd, ztest_replay_vector); 5496 5497 ztest_dataset_dirobj_verify(zd); 5498 5499 if (ztest_opts.zo_verbose >= 6) 5500 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n", 5501 zd->zd_name, 5502 (u_longlong_t)zilog->zl_parse_blk_count, 5503 (u_longlong_t)zilog->zl_parse_lr_count, 5504 (u_longlong_t)zilog->zl_replaying_seq); 5505 5506 zilog = zil_open(os, ztest_get_data); 5507 5508 if (zilog->zl_replaying_seq != 0 && 5509 zilog->zl_replaying_seq < committed_seq) 5510 fatal(0, "missing log records: replayed %llu < committed %llu", 5511 zilog->zl_replaying_seq, committed_seq); 5512 5513 return (0); 5514} 5515 5516static void 5517ztest_dataset_close(int d) 5518{ 5519 ztest_ds_t *zd = &ztest_ds[d]; 5520 5521 zil_close(zd->zd_zilog); 5522 dmu_objset_disown(zd->zd_os, zd); 5523 5524 ztest_zd_fini(zd); 5525} 5526 5527/* 5528 * Kick off threads to run tests on all datasets in parallel. 5529 */ 5530static void 5531ztest_run(ztest_shared_t *zs) 5532{ 5533 thread_t *tid; 5534 spa_t *spa; 5535 objset_t *os; 5536 thread_t resume_tid; 5537 int error; 5538 5539 ztest_exiting = B_FALSE; 5540 5541 /* 5542 * Initialize parent/child shared state. 5543 */ 5544 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5545 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5546 5547 zs->zs_thread_start = gethrtime(); 5548 zs->zs_thread_stop = 5549 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 5550 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 5551 zs->zs_thread_kill = zs->zs_thread_stop; 5552 if (ztest_random(100) < ztest_opts.zo_killrate) { 5553 zs->zs_thread_kill -= 5554 ztest_random(ztest_opts.zo_passtime * NANOSEC); 5555 } 5556 5557 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); 5558 5559 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 5560 offsetof(ztest_cb_data_t, zcd_node)); 5561 5562 /* 5563 * Open our pool. 5564 */ 5565 kernel_init(FREAD | FWRITE); 5566 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5567 spa->spa_debug = B_TRUE; 5568 ztest_spa = spa; 5569 5570 VERIFY0(dmu_objset_own(ztest_opts.zo_pool, 5571 DMU_OST_ANY, B_TRUE, FTAG, &os)); 5572 zs->zs_guid = dmu_objset_fsid_guid(os); 5573 dmu_objset_disown(os, FTAG); 5574 5575 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; 5576 5577 /* 5578 * We don't expect the pool to suspend unless maxfaults == 0, 5579 * in which case ztest_fault_inject() temporarily takes away 5580 * the only valid replica. 5581 */ 5582 if (MAXFAULTS() == 0) 5583 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT; 5584 else 5585 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; 5586 5587 /* 5588 * Create a thread to periodically resume suspended I/O. 5589 */ 5590 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, 5591 &resume_tid) == 0); 5592 5593 /* 5594 * Create a deadman thread to abort() if we hang. 5595 */ 5596 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, 5597 NULL) == 0); 5598 5599 /* 5600 * Verify that we can safely inquire about about any object, 5601 * whether it's allocated or not. To make it interesting, 5602 * we probe a 5-wide window around each power of two. 5603 * This hits all edge cases, including zero and the max. 5604 */ 5605 for (int t = 0; t < 64; t++) { 5606 for (int d = -5; d <= 5; d++) { 5607 error = dmu_object_info(spa->spa_meta_objset, 5608 (1ULL << t) + d, NULL); 5609 ASSERT(error == 0 || error == ENOENT || 5610 error == EINVAL); 5611 } 5612 } 5613 5614 /* 5615 * If we got any ENOSPC errors on the previous run, destroy something. 5616 */ 5617 if (zs->zs_enospc_count != 0) { 5618 int d = ztest_random(ztest_opts.zo_datasets); 5619 ztest_dataset_destroy(d); 5620 } 5621 zs->zs_enospc_count = 0; 5622 5623 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t), 5624 UMEM_NOFAIL); 5625 5626 if (ztest_opts.zo_verbose >= 4) 5627 (void) printf("starting main threads...\n"); 5628 5629 /* 5630 * Kick off all the tests that run in parallel. 5631 */ 5632 for (int t = 0; t < ztest_opts.zo_threads; t++) { 5633 if (t < ztest_opts.zo_datasets && 5634 ztest_dataset_open(t) != 0) 5635 return; 5636 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, 5637 THR_BOUND, &tid[t]) == 0); 5638 } 5639 5640 /* 5641 * Wait for all of the tests to complete. We go in reverse order 5642 * so we don't close datasets while threads are still using them. 5643 */ 5644 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) { 5645 VERIFY(thr_join(tid[t], NULL, NULL) == 0); 5646 if (t < ztest_opts.zo_datasets) 5647 ztest_dataset_close(t); 5648 } 5649 5650 txg_wait_synced(spa_get_dsl(spa), 0); 5651 5652 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 5653 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 5654 5655 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t)); 5656 5657 /* Kill the resume thread */ 5658 ztest_exiting = B_TRUE; 5659 VERIFY(thr_join(resume_tid, NULL, NULL) == 0); 5660 ztest_resume(spa); 5661 5662 /* 5663 * Right before closing the pool, kick off a bunch of async I/O; 5664 * spa_close() should wait for it to complete. 5665 */ 5666 for (uint64_t object = 1; object < 50; object++) 5667 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); 5668 5669 spa_close(spa, FTAG); 5670 5671 /* 5672 * Verify that we can loop over all pools. 5673 */ 5674 mutex_enter(&spa_namespace_lock); 5675 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 5676 if (ztest_opts.zo_verbose > 3) 5677 (void) printf("spa_next: found %s\n", spa_name(spa)); 5678 mutex_exit(&spa_namespace_lock); 5679 5680 /* 5681 * Verify that we can export the pool and reimport it under a 5682 * different name. 5683 */ 5684 if (ztest_random(2) == 0) { 5685 char name[MAXNAMELEN]; 5686 (void) snprintf(name, MAXNAMELEN, "%s_import", 5687 ztest_opts.zo_pool); 5688 ztest_spa_import_export(ztest_opts.zo_pool, name); 5689 ztest_spa_import_export(name, ztest_opts.zo_pool); 5690 } 5691 5692 kernel_fini(); 5693 5694 list_destroy(&zcl.zcl_callbacks); 5695 5696 (void) _mutex_destroy(&zcl.zcl_callbacks_lock); 5697 5698 (void) rwlock_destroy(&ztest_name_lock); 5699 (void) _mutex_destroy(&ztest_vdev_lock); 5700} 5701 5702static void 5703ztest_freeze(void) 5704{ 5705 ztest_ds_t *zd = &ztest_ds[0]; 5706 spa_t *spa; 5707 int numloops = 0; 5708 5709 if (ztest_opts.zo_verbose >= 3) 5710 (void) printf("testing spa_freeze()...\n"); 5711 5712 kernel_init(FREAD | FWRITE); 5713 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5714 VERIFY3U(0, ==, ztest_dataset_open(0)); 5715 spa->spa_debug = B_TRUE; 5716 ztest_spa = spa; 5717 5718 /* 5719 * Force the first log block to be transactionally allocated. 5720 * We have to do this before we freeze the pool -- otherwise 5721 * the log chain won't be anchored. 5722 */ 5723 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 5724 ztest_dmu_object_alloc_free(zd, 0); 5725 zil_commit(zd->zd_zilog, 0); 5726 } 5727 5728 txg_wait_synced(spa_get_dsl(spa), 0); 5729 5730 /* 5731 * Freeze the pool. This stops spa_sync() from doing anything, 5732 * so that the only way to record changes from now on is the ZIL. 5733 */ 5734 spa_freeze(spa); 5735 5736 /* 5737 * Run tests that generate log records but don't alter the pool config 5738 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 5739 * We do a txg_wait_synced() after each iteration to force the txg 5740 * to increase well beyond the last synced value in the uberblock. 5741 * The ZIL should be OK with that. 5742 */ 5743 while (ztest_random(10) != 0 && 5744 numloops++ < ztest_opts.zo_maxloops) { 5745 ztest_dmu_write_parallel(zd, 0); 5746 ztest_dmu_object_alloc_free(zd, 0); 5747 txg_wait_synced(spa_get_dsl(spa), 0); 5748 } 5749 5750 /* 5751 * Commit all of the changes we just generated. 5752 */ 5753 zil_commit(zd->zd_zilog, 0); 5754 txg_wait_synced(spa_get_dsl(spa), 0); 5755 5756 /* 5757 * Close our dataset and close the pool. 5758 */ 5759 ztest_dataset_close(0); 5760 spa_close(spa, FTAG); 5761 kernel_fini(); 5762 5763 /* 5764 * Open and close the pool and dataset to induce log replay. 5765 */ 5766 kernel_init(FREAD | FWRITE); 5767 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5768 ASSERT(spa_freeze_txg(spa) == UINT64_MAX); 5769 VERIFY3U(0, ==, ztest_dataset_open(0)); 5770 ztest_dataset_close(0); 5771 5772 spa->spa_debug = B_TRUE; 5773 ztest_spa = spa; 5774 txg_wait_synced(spa_get_dsl(spa), 0); 5775 ztest_reguid(NULL, 0); 5776 5777 spa_close(spa, FTAG); 5778 kernel_fini(); 5779} 5780 5781void 5782print_time(hrtime_t t, char *timebuf) 5783{ 5784 hrtime_t s = t / NANOSEC; 5785 hrtime_t m = s / 60; 5786 hrtime_t h = m / 60; 5787 hrtime_t d = h / 24; 5788 5789 s -= m * 60; 5790 m -= h * 60; 5791 h -= d * 24; 5792 5793 timebuf[0] = '\0'; 5794 5795 if (d) 5796 (void) sprintf(timebuf, 5797 "%llud%02lluh%02llum%02llus", d, h, m, s); 5798 else if (h) 5799 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 5800 else if (m) 5801 (void) sprintf(timebuf, "%llum%02llus", m, s); 5802 else 5803 (void) sprintf(timebuf, "%llus", s); 5804} 5805 5806static nvlist_t * 5807make_random_props() 5808{ 5809 nvlist_t *props; 5810 5811 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0); 5812 if (ztest_random(2) == 0) 5813 return (props); 5814 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0); 5815 5816 return (props); 5817} 5818 5819/* 5820 * Create a storage pool with the given name and initial vdev size. 5821 * Then test spa_freeze() functionality. 5822 */ 5823static void 5824ztest_init(ztest_shared_t *zs) 5825{ 5826 spa_t *spa; 5827 nvlist_t *nvroot, *props; 5828 5829 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0); 5830 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); 5831 5832 kernel_init(FREAD | FWRITE); 5833 5834 /* 5835 * Create the storage pool. 5836 */ 5837 (void) spa_destroy(ztest_opts.zo_pool); 5838 ztest_shared->zs_vdev_next_leaf = 0; 5839 zs->zs_splits = 0; 5840 zs->zs_mirrors = ztest_opts.zo_mirrors; 5841 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 5842 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1); 5843 props = make_random_props(); 5844 for (int i = 0; i < SPA_FEATURES; i++) { 5845 char buf[1024]; 5846 (void) snprintf(buf, sizeof (buf), "feature@%s", 5847 spa_feature_table[i].fi_uname); 5848 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0)); 5849 } 5850 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL)); 5851 nvlist_free(nvroot); 5852 5853 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG)); 5854 zs->zs_metaslab_sz = 5855 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 5856 5857 spa_close(spa, FTAG); 5858 5859 kernel_fini(); 5860 5861 ztest_run_zdb(ztest_opts.zo_pool); 5862 5863 ztest_freeze(); 5864 5865 ztest_run_zdb(ztest_opts.zo_pool); 5866 5867 (void) rwlock_destroy(&ztest_name_lock); 5868 (void) _mutex_destroy(&ztest_vdev_lock); 5869} 5870 5871static void 5872setup_data_fd(void) 5873{ 5874 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 5875 5876 ztest_fd_data = mkstemp(ztest_name_data); 5877 ASSERT3S(ztest_fd_data, >=, 0); 5878 (void) unlink(ztest_name_data); 5879} 5880 5881 5882static int 5883shared_data_size(ztest_shared_hdr_t *hdr) 5884{ 5885 int size; 5886 5887 size = hdr->zh_hdr_size; 5888 size += hdr->zh_opts_size; 5889 size += hdr->zh_size; 5890 size += hdr->zh_stats_size * hdr->zh_stats_count; 5891 size += hdr->zh_ds_size * hdr->zh_ds_count; 5892 5893 return (size); 5894} 5895 5896static void 5897setup_hdr(void) 5898{ 5899 int size; 5900 ztest_shared_hdr_t *hdr; 5901 5902 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5903 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5904 ASSERT(hdr != MAP_FAILED); 5905 5906 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 5907 5908 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 5909 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 5910 hdr->zh_size = sizeof (ztest_shared_t); 5911 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 5912 hdr->zh_stats_count = ZTEST_FUNCS; 5913 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 5914 hdr->zh_ds_count = ztest_opts.zo_datasets; 5915 5916 size = shared_data_size(hdr); 5917 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size)); 5918 5919 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5920} 5921 5922static void 5923setup_data(void) 5924{ 5925 int size, offset; 5926 ztest_shared_hdr_t *hdr; 5927 uint8_t *buf; 5928 5929 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 5930 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 5931 ASSERT(hdr != MAP_FAILED); 5932 5933 size = shared_data_size(hdr); 5934 5935 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 5936 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 5937 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 5938 ASSERT(hdr != MAP_FAILED); 5939 buf = (uint8_t *)hdr; 5940 5941 offset = hdr->zh_hdr_size; 5942 ztest_shared_opts = (void *)&buf[offset]; 5943 offset += hdr->zh_opts_size; 5944 ztest_shared = (void *)&buf[offset]; 5945 offset += hdr->zh_size; 5946 ztest_shared_callstate = (void *)&buf[offset]; 5947 offset += hdr->zh_stats_size * hdr->zh_stats_count; 5948 ztest_shared_ds = (void *)&buf[offset]; 5949} 5950 5951static boolean_t 5952exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 5953{ 5954 pid_t pid; 5955 int status; 5956 char *cmdbuf = NULL; 5957 5958 pid = fork(); 5959 5960 if (cmd == NULL) { 5961 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 5962 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 5963 cmd = cmdbuf; 5964 } 5965 5966 if (pid == -1) 5967 fatal(1, "fork failed"); 5968 5969 if (pid == 0) { /* child */ 5970 char *emptyargv[2] = { cmd, NULL }; 5971 char fd_data_str[12]; 5972 5973 struct rlimit rl = { 1024, 1024 }; 5974 (void) setrlimit(RLIMIT_NOFILE, &rl); 5975 5976 (void) close(ztest_fd_rand); 5977 VERIFY3U(11, >=, 5978 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 5979 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 5980 5981 (void) enable_extended_FILE_stdio(-1, -1); 5982 if (libpath != NULL) 5983 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1)); 5984#ifdef illumos 5985 (void) execv(cmd, emptyargv); 5986#else 5987 (void) execvp(cmd, emptyargv); 5988#endif 5989 ztest_dump_core = B_FALSE; 5990 fatal(B_TRUE, "exec failed: %s", cmd); 5991 } 5992 5993 if (cmdbuf != NULL) { 5994 umem_free(cmdbuf, MAXPATHLEN); 5995 cmd = NULL; 5996 } 5997 5998 while (waitpid(pid, &status, 0) != pid) 5999 continue; 6000 if (statusp != NULL) 6001 *statusp = status; 6002 6003 if (WIFEXITED(status)) { 6004 if (WEXITSTATUS(status) != 0) { 6005 (void) fprintf(stderr, "child exited with code %d\n", 6006 WEXITSTATUS(status)); 6007 exit(2); 6008 } 6009 return (B_FALSE); 6010 } else if (WIFSIGNALED(status)) { 6011 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 6012 (void) fprintf(stderr, "child died with signal %d\n", 6013 WTERMSIG(status)); 6014 exit(3); 6015 } 6016 return (B_TRUE); 6017 } else { 6018 (void) fprintf(stderr, "something strange happened to child\n"); 6019 exit(4); 6020 /* NOTREACHED */ 6021 } 6022} 6023 6024static void 6025ztest_run_init(void) 6026{ 6027 ztest_shared_t *zs = ztest_shared; 6028 6029 ASSERT(ztest_opts.zo_init != 0); 6030 6031 /* 6032 * Blow away any existing copy of zpool.cache 6033 */ 6034 (void) remove(spa_config_path); 6035 6036 /* 6037 * Create and initialize our storage pool. 6038 */ 6039 for (int i = 1; i <= ztest_opts.zo_init; i++) { 6040 bzero(zs, sizeof (ztest_shared_t)); 6041 if (ztest_opts.zo_verbose >= 3 && 6042 ztest_opts.zo_init != 1) { 6043 (void) printf("ztest_init(), pass %d\n", i); 6044 } 6045 ztest_init(zs); 6046 } 6047} 6048 6049int 6050main(int argc, char **argv) 6051{ 6052 int kills = 0; 6053 int iters = 0; 6054 int older = 0; 6055 int newer = 0; 6056 ztest_shared_t *zs; 6057 ztest_info_t *zi; 6058 ztest_shared_callstate_t *zc; 6059 char timebuf[100]; 6060 char numbuf[6]; 6061 spa_t *spa; 6062 char *cmd; 6063 boolean_t hasalt; 6064 char *fd_data_str = getenv("ZTEST_FD_DATA"); 6065 6066 (void) setvbuf(stdout, NULL, _IOLBF, 0); 6067 6068 dprintf_setup(&argc, argv); 6069 zfs_deadman_synctime = 300; 6070 6071 ztest_fd_rand = open("/dev/urandom", O_RDONLY); 6072 ASSERT3S(ztest_fd_rand, >=, 0); 6073 6074 if (!fd_data_str) { 6075 process_options(argc, argv); 6076 6077 setup_data_fd(); 6078 setup_hdr(); 6079 setup_data(); 6080 bcopy(&ztest_opts, ztest_shared_opts, 6081 sizeof (*ztest_shared_opts)); 6082 } else { 6083 ztest_fd_data = atoi(fd_data_str); 6084 setup_data(); 6085 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts)); 6086 } 6087 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 6088 6089 /* Override location of zpool.cache */ 6090 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache", 6091 ztest_opts.zo_dir), !=, -1); 6092 6093 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 6094 UMEM_NOFAIL); 6095 zs = ztest_shared; 6096 6097 if (fd_data_str) { 6098 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang; 6099 metaslab_df_alloc_threshold = 6100 zs->zs_metaslab_df_alloc_threshold; 6101 6102 if (zs->zs_do_init) 6103 ztest_run_init(); 6104 else 6105 ztest_run(zs); 6106 exit(0); 6107 } 6108 6109 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 6110 6111 if (ztest_opts.zo_verbose >= 1) { 6112 (void) printf("%llu vdevs, %d datasets, %d threads," 6113 " %llu seconds...\n", 6114 (u_longlong_t)ztest_opts.zo_vdevs, 6115 ztest_opts.zo_datasets, 6116 ztest_opts.zo_threads, 6117 (u_longlong_t)ztest_opts.zo_time); 6118 } 6119 6120 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 6121 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 6122 6123 zs->zs_do_init = B_TRUE; 6124 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 6125 if (ztest_opts.zo_verbose >= 1) { 6126 (void) printf("Executing older ztest for " 6127 "initialization: %s\n", ztest_opts.zo_alt_ztest); 6128 } 6129 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 6130 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 6131 } else { 6132 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 6133 } 6134 zs->zs_do_init = B_FALSE; 6135 6136 zs->zs_proc_start = gethrtime(); 6137 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 6138 6139 for (int f = 0; f < ZTEST_FUNCS; f++) { 6140 zi = &ztest_info[f]; 6141 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6142 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 6143 zc->zc_next = UINT64_MAX; 6144 else 6145 zc->zc_next = zs->zs_proc_start + 6146 ztest_random(2 * zi->zi_interval[0] + 1); 6147 } 6148 6149 /* 6150 * Run the tests in a loop. These tests include fault injection 6151 * to verify that self-healing data works, and forced crashes 6152 * to verify that we never lose on-disk consistency. 6153 */ 6154 while (gethrtime() < zs->zs_proc_stop) { 6155 int status; 6156 boolean_t killed; 6157 6158 /* 6159 * Initialize the workload counters for each function. 6160 */ 6161 for (int f = 0; f < ZTEST_FUNCS; f++) { 6162 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6163 zc->zc_count = 0; 6164 zc->zc_time = 0; 6165 } 6166 6167 /* Set the allocation switch size */ 6168 zs->zs_metaslab_df_alloc_threshold = 6169 ztest_random(zs->zs_metaslab_sz / 4) + 1; 6170 6171 if (!hasalt || ztest_random(2) == 0) { 6172 if (hasalt && ztest_opts.zo_verbose >= 1) { 6173 (void) printf("Executing newer ztest: %s\n", 6174 cmd); 6175 } 6176 newer++; 6177 killed = exec_child(cmd, NULL, B_TRUE, &status); 6178 } else { 6179 if (hasalt && ztest_opts.zo_verbose >= 1) { 6180 (void) printf("Executing older ztest: %s\n", 6181 ztest_opts.zo_alt_ztest); 6182 } 6183 older++; 6184 killed = exec_child(ztest_opts.zo_alt_ztest, 6185 ztest_opts.zo_alt_libpath, B_TRUE, &status); 6186 } 6187 6188 if (killed) 6189 kills++; 6190 iters++; 6191 6192 if (ztest_opts.zo_verbose >= 1) { 6193 hrtime_t now = gethrtime(); 6194 6195 now = MIN(now, zs->zs_proc_stop); 6196 print_time(zs->zs_proc_stop - now, timebuf); 6197 nicenum(zs->zs_space, numbuf); 6198 6199 (void) printf("Pass %3d, %8s, %3llu ENOSPC, " 6200 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 6201 iters, 6202 WIFEXITED(status) ? "Complete" : "SIGKILL", 6203 (u_longlong_t)zs->zs_enospc_count, 6204 100.0 * zs->zs_alloc / zs->zs_space, 6205 numbuf, 6206 100.0 * (now - zs->zs_proc_start) / 6207 (ztest_opts.zo_time * NANOSEC), timebuf); 6208 } 6209 6210 if (ztest_opts.zo_verbose >= 2) { 6211 (void) printf("\nWorkload summary:\n\n"); 6212 (void) printf("%7s %9s %s\n", 6213 "Calls", "Time", "Function"); 6214 (void) printf("%7s %9s %s\n", 6215 "-----", "----", "--------"); 6216 for (int f = 0; f < ZTEST_FUNCS; f++) { 6217 Dl_info dli; 6218 6219 zi = &ztest_info[f]; 6220 zc = ZTEST_GET_SHARED_CALLSTATE(f); 6221 print_time(zc->zc_time, timebuf); 6222 (void) dladdr((void *)zi->zi_func, &dli); 6223 (void) printf("%7llu %9s %s\n", 6224 (u_longlong_t)zc->zc_count, timebuf, 6225 dli.dli_sname); 6226 } 6227 (void) printf("\n"); 6228 } 6229 6230 /* 6231 * It's possible that we killed a child during a rename test, 6232 * in which case we'll have a 'ztest_tmp' pool lying around 6233 * instead of 'ztest'. Do a blind rename in case this happened. 6234 */ 6235 kernel_init(FREAD); 6236 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) { 6237 spa_close(spa, FTAG); 6238 } else { 6239 char tmpname[MAXNAMELEN]; 6240 kernel_fini(); 6241 kernel_init(FREAD | FWRITE); 6242 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp", 6243 ztest_opts.zo_pool); 6244 (void) spa_rename(tmpname, ztest_opts.zo_pool); 6245 } 6246 kernel_fini(); 6247 6248 ztest_run_zdb(ztest_opts.zo_pool); 6249 } 6250 6251 if (ztest_opts.zo_verbose >= 1) { 6252 if (hasalt) { 6253 (void) printf("%d runs of older ztest: %s\n", older, 6254 ztest_opts.zo_alt_ztest); 6255 (void) printf("%d runs of newer ztest: %s\n", newer, 6256 cmd); 6257 } 6258 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 6259 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 6260 } 6261 6262 umem_free(cmd, MAXNAMELEN); 6263 6264 return (0); 6265} 6266