ztest.c revision 168676
1153838Sdfr/*
2153838Sdfr * CDDL HEADER START
3153838Sdfr *
4178828Sdfr * The contents of this file are subject to the terms of the
5178828Sdfr * Common Development and Distribution License (the "License").
6178828Sdfr * You may not use this file except in compliance with the License.
7178828Sdfr *
8153838Sdfr * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9153838Sdfr * or http://www.opensolaris.org/os/licensing.
10178828Sdfr * See the License for the specific language governing permissions
11153838Sdfr * and limitations under the License.
12178828Sdfr *
13178828Sdfr * When distributing Covered Code, include this CDDL HEADER in each
14178828Sdfr * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15178828Sdfr * If applicable, add the following below this CDDL HEADER, with the
16178828Sdfr * fields enclosed by brackets "[]" replaced with your own identifying
17178828Sdfr * information: Portions Copyright [yyyy] [name of copyright owner]
18178828Sdfr *
19178828Sdfr * CDDL HEADER END
20153838Sdfr */
21178828Sdfr/*
22178828Sdfr * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23178828Sdfr * Use is subject to license terms.
24178828Sdfr */
25178828Sdfr
26178828Sdfr#pragma ident	"%Z%%M%	%I%	%E% SMI"
27178828Sdfr
28153838Sdfr/*
29178828Sdfr * The objective of this program is to provide a DMU/ZAP/SPA stress test
30178828Sdfr * that runs entirely in userland, is easy to use, and easy to extend.
31153838Sdfr *
32178828Sdfr * The overall design of the ztest program is as follows:
33178828Sdfr *
34153838Sdfr * (1) For each major functional area (e.g. adding vdevs to a pool,
35153838Sdfr *     creating and destroying datasets, reading and writing objects, etc)
36178828Sdfr *     we have a simple routine to test that functionality.  These
37178828Sdfr *     individual routines do not have to do anything "stressful".
38153838Sdfr *
39178828Sdfr * (2) We turn these simple functionality tests into a stress test by
40178828Sdfr *     running them all in parallel, with as many threads as desired,
41178828Sdfr *     and spread across as many datasets, objects, and vdevs as desired.
42178828Sdfr *
43181344Sdfr * (3) While all this is happening, we inject faults into the pool to
44178828Sdfr *     verify that self-healing data really works.
45178828Sdfr *
46178828Sdfr * (4) Every time we open a dataset, we change its checksum and compression
47178828Sdfr *     functions.  Thus even individual objects vary from block to block
48178828Sdfr *     in which checksum they use and whether they're compressed.
49178828Sdfr *
50178828Sdfr * (5) To verify that we never lose on-disk consistency after a crash,
51178828Sdfr *     we run the entire test in a child of the main process.
52178828Sdfr *     At random times, the child self-immolates with a SIGKILL.
53178828Sdfr *     This is the software equivalent of pulling the power cord.
54153838Sdfr *     The parent then runs the test again, using the existing
55178828Sdfr *     storage pool, as many times as desired.
56178828Sdfr *
57178828Sdfr * (6) To verify that we don't have future leaks or temporal incursions,
58178828Sdfr *     many of the functional tests record the transaction group number
59153838Sdfr *     as part of their data.  When reading old data, they verify that
60178828Sdfr *     the transaction group number is less than the current, open txg.
61178828Sdfr *     If you add a new test, please do this if applicable.
62178828Sdfr *
63153838Sdfr * When run with no arguments, ztest runs for about five minutes and
64153838Sdfr * produces no output if successful.  To get a little bit of information,
65153838Sdfr * specify -V.  To get more information, specify -VV, and so on.
66153838Sdfr *
67153838Sdfr * To turn this into an overnight stress test, use -T to specify run time.
68153838Sdfr *
69153838Sdfr * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
70153838Sdfr * to increase the pool capacity, fanout, and overall stress level.
71153838Sdfr *
72153838Sdfr * The -N(okill) option will suppress kills, so each child runs to completion.
73153838Sdfr * This can be useful when you're trying to distinguish temporal incursions
74153838Sdfr * from plain old race conditions.
75153838Sdfr */
76153838Sdfr
77153838Sdfr#include <sys/zfs_context.h>
78153838Sdfr#include <sys/spa.h>
79153838Sdfr#include <sys/dmu.h>
80153838Sdfr#include <sys/txg.h>
81153838Sdfr#include <sys/zap.h>
82153838Sdfr#include <sys/dmu_traverse.h>
83153838Sdfr#include <sys/dmu_objset.h>
84153838Sdfr#include <sys/poll.h>
85153838Sdfr#include <sys/stat.h>
86153838Sdfr#include <sys/time.h>
87153838Sdfr#include <sys/wait.h>
88153838Sdfr#include <sys/mman.h>
89153838Sdfr#include <sys/resource.h>
90153838Sdfr#include <sys/zio.h>
91153838Sdfr#include <sys/zio_checksum.h>
92153838Sdfr#include <sys/zio_compress.h>
93153838Sdfr#include <sys/zil.h>
94153838Sdfr#include <sys/vdev_impl.h>
95153838Sdfr#include <sys/spa_impl.h>
96153838Sdfr#include <sys/dsl_prop.h>
97153838Sdfr#include <sys/refcount.h>
98153838Sdfr#include <stdio.h>
99153838Sdfr#include <stdio_ext.h>
100153838Sdfr#include <stdlib.h>
101153838Sdfr#include <unistd.h>
102153838Sdfr#include <signal.h>
103153838Sdfr#include <umem.h>
104153838Sdfr#include <dlfcn.h>
105153838Sdfr#include <ctype.h>
106153838Sdfr#include <math.h>
107153838Sdfr#include <errno.h>
108153838Sdfr#include <sys/fs/zfs.h>
109153838Sdfr
110static char cmdname[] = "ztest";
111static char *zopt_pool = cmdname;
112static char *progname;
113
114static uint64_t zopt_vdevs = 5;
115static uint64_t zopt_vdevtime;
116static int zopt_ashift = SPA_MINBLOCKSHIFT;
117static int zopt_mirrors = 2;
118static int zopt_raidz = 4;
119static int zopt_raidz_parity = 1;
120static size_t zopt_vdev_size = SPA_MINDEVSIZE;
121static int zopt_datasets = 7;
122static int zopt_threads = 23;
123static uint64_t zopt_passtime = 60;	/* 60 seconds */
124static uint64_t zopt_killrate = 70;	/* 70% kill rate */
125static int zopt_verbose = 0;
126static int zopt_init = 1;
127static char *zopt_dir = "/tmp";
128static uint64_t zopt_time = 300;	/* 5 minutes */
129static int zopt_maxfaults;
130
131typedef struct ztest_args {
132	char		*za_pool;
133	objset_t	*za_os;
134	zilog_t		*za_zilog;
135	thread_t	za_thread;
136	uint64_t	za_instance;
137	uint64_t	za_random;
138	uint64_t	za_diroff;
139	uint64_t	za_diroff_shared;
140	uint64_t	za_zil_seq;
141	hrtime_t	za_start;
142	hrtime_t	za_stop;
143	hrtime_t	za_kill;
144	traverse_handle_t *za_th;
145} ztest_args_t;
146
147typedef void ztest_func_t(ztest_args_t *);
148
149/*
150 * Note: these aren't static because we want dladdr() to work.
151 */
152ztest_func_t ztest_dmu_read_write;
153ztest_func_t ztest_dmu_write_parallel;
154ztest_func_t ztest_dmu_object_alloc_free;
155ztest_func_t ztest_zap;
156ztest_func_t ztest_zap_parallel;
157ztest_func_t ztest_traverse;
158ztest_func_t ztest_dsl_prop_get_set;
159ztest_func_t ztest_dmu_objset_create_destroy;
160ztest_func_t ztest_dmu_snapshot_create_destroy;
161ztest_func_t ztest_spa_create_destroy;
162ztest_func_t ztest_fault_inject;
163ztest_func_t ztest_vdev_attach_detach;
164ztest_func_t ztest_vdev_LUN_growth;
165ztest_func_t ztest_vdev_add_remove;
166ztest_func_t ztest_scrub;
167ztest_func_t ztest_spa_rename;
168
169typedef struct ztest_info {
170	ztest_func_t	*zi_func;	/* test function */
171	uint64_t	*zi_interval;	/* execute every <interval> seconds */
172	uint64_t	zi_calls;	/* per-pass count */
173	uint64_t	zi_call_time;	/* per-pass time */
174	uint64_t	zi_call_total;	/* cumulative total */
175	uint64_t	zi_call_target;	/* target cumulative total */
176} ztest_info_t;
177
178uint64_t zopt_always = 0;		/* all the time */
179uint64_t zopt_often = 1;		/* every second */
180uint64_t zopt_sometimes = 10;		/* every 10 seconds */
181uint64_t zopt_rarely = 60;		/* every 60 seconds */
182
183ztest_info_t ztest_info[] = {
184	{ ztest_dmu_read_write,			&zopt_always	},
185	{ ztest_dmu_write_parallel,		&zopt_always	},
186	{ ztest_dmu_object_alloc_free,		&zopt_always	},
187	{ ztest_zap,				&zopt_always	},
188	{ ztest_zap_parallel,			&zopt_always	},
189	{ ztest_traverse,			&zopt_often	},
190	{ ztest_dsl_prop_get_set,		&zopt_sometimes	},
191	{ ztest_dmu_objset_create_destroy,	&zopt_sometimes	},
192	{ ztest_dmu_snapshot_create_destroy,	&zopt_rarely	},
193	{ ztest_spa_create_destroy,		&zopt_sometimes	},
194	{ ztest_fault_inject,			&zopt_sometimes	},
195	{ ztest_spa_rename,			&zopt_rarely	},
196	{ ztest_vdev_attach_detach,		&zopt_rarely	},
197	{ ztest_vdev_LUN_growth,		&zopt_rarely	},
198	{ ztest_vdev_add_remove,		&zopt_vdevtime	},
199	{ ztest_scrub,				&zopt_vdevtime	},
200};
201
202#define	ZTEST_FUNCS	(sizeof (ztest_info) / sizeof (ztest_info_t))
203
204#define	ZTEST_SYNC_LOCKS	16
205
206/*
207 * Stuff we need to share writably between parent and child.
208 */
209typedef struct ztest_shared {
210	mutex_t		zs_vdev_lock;
211	rwlock_t	zs_name_lock;
212	uint64_t	zs_vdev_primaries;
213	uint64_t	zs_enospc_count;
214	hrtime_t	zs_start_time;
215	hrtime_t	zs_stop_time;
216	uint64_t	zs_alloc;
217	uint64_t	zs_space;
218	uint64_t	zs_txg;
219	ztest_info_t	zs_info[ZTEST_FUNCS];
220	mutex_t		zs_sync_lock[ZTEST_SYNC_LOCKS];
221	uint64_t	zs_seq[ZTEST_SYNC_LOCKS];
222} ztest_shared_t;
223
224typedef struct ztest_block_tag {
225	uint64_t	bt_objset;
226	uint64_t	bt_object;
227	uint64_t	bt_offset;
228	uint64_t	bt_txg;
229	uint64_t	bt_thread;
230	uint64_t	bt_seq;
231} ztest_block_tag_t;
232
233static char ztest_dev_template[] = "%s/%s.%llua";
234static ztest_shared_t *ztest_shared;
235
236static int ztest_random_fd;
237static int ztest_dump_core = 1;
238
239extern uint64_t zio_gang_bang;
240extern uint16_t zio_zil_fail_shift;
241
242#define	ZTEST_DIROBJ		1
243#define	ZTEST_MICROZAP_OBJ	2
244#define	ZTEST_FATZAP_OBJ	3
245
246#define	ZTEST_DIROBJ_BLOCKSIZE	(1 << 10)
247#define	ZTEST_DIRSIZE		256
248
249static void usage(boolean_t) __NORETURN;
250
251/*
252 * These libumem hooks provide a reasonable set of defaults for the allocator's
253 * debugging facilities.
254 */
255const char *
256_umem_debug_init()
257{
258	return ("default,verbose"); /* $UMEM_DEBUG setting */
259}
260
261const char *
262_umem_logging_init(void)
263{
264	return ("fail,contents"); /* $UMEM_LOGGING setting */
265}
266
267#define	FATAL_MSG_SZ	1024
268
269char *fatal_msg;
270
271static void
272fatal(int do_perror, char *message, ...)
273{
274	va_list args;
275	int save_errno = errno;
276	char buf[FATAL_MSG_SZ];
277
278	(void) fflush(stdout);
279
280	va_start(args, message);
281	(void) sprintf(buf, "ztest: ");
282	/* LINTED */
283	(void) vsprintf(buf + strlen(buf), message, args);
284	va_end(args);
285	if (do_perror) {
286		(void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
287		    ": %s", strerror(save_errno));
288	}
289	(void) fprintf(stderr, "%s\n", buf);
290	fatal_msg = buf;			/* to ease debugging */
291	if (ztest_dump_core)
292		abort();
293	exit(3);
294}
295
296static int
297str2shift(const char *buf)
298{
299	const char *ends = "BKMGTPEZ";
300	int i;
301
302	if (buf[0] == '\0')
303		return (0);
304	for (i = 0; i < strlen(ends); i++) {
305		if (toupper(buf[0]) == ends[i])
306			break;
307	}
308	if (i == strlen(ends)) {
309		(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
310		    buf);
311		usage(B_FALSE);
312	}
313	if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
314		return (10*i);
315	}
316	(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
317	usage(B_FALSE);
318	/* NOTREACHED */
319}
320
321static uint64_t
322nicenumtoull(const char *buf)
323{
324	char *end;
325	uint64_t val;
326
327	val = strtoull(buf, &end, 0);
328	if (end == buf) {
329		(void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
330		usage(B_FALSE);
331	} else if (end[0] == '.') {
332		double fval = strtod(buf, &end);
333		fval *= pow(2, str2shift(end));
334		if (fval > UINT64_MAX) {
335			(void) fprintf(stderr, "ztest: value too large: %s\n",
336			    buf);
337			usage(B_FALSE);
338		}
339		val = (uint64_t)fval;
340	} else {
341		int shift = str2shift(end);
342		if (shift >= 64 || (val << shift) >> shift != val) {
343			(void) fprintf(stderr, "ztest: value too large: %s\n",
344			    buf);
345			usage(B_FALSE);
346		}
347		val <<= shift;
348	}
349	return (val);
350}
351
352static void
353usage(boolean_t requested)
354{
355	char nice_vdev_size[10];
356	char nice_gang_bang[10];
357	FILE *fp = requested ? stdout : stderr;
358
359	nicenum(zopt_vdev_size, nice_vdev_size);
360	nicenum(zio_gang_bang, nice_gang_bang);
361
362	(void) fprintf(fp, "Usage: %s\n"
363	    "\t[-v vdevs (default: %llu)]\n"
364	    "\t[-s size_of_each_vdev (default: %s)]\n"
365	    "\t[-a alignment_shift (default: %d) (use 0 for random)]\n"
366	    "\t[-m mirror_copies (default: %d)]\n"
367	    "\t[-r raidz_disks (default: %d)]\n"
368	    "\t[-R raidz_parity (default: %d)]\n"
369	    "\t[-d datasets (default: %d)]\n"
370	    "\t[-t threads (default: %d)]\n"
371	    "\t[-g gang_block_threshold (default: %s)]\n"
372	    "\t[-i initialize pool i times (default: %d)]\n"
373	    "\t[-k kill percentage (default: %llu%%)]\n"
374	    "\t[-p pool_name (default: %s)]\n"
375	    "\t[-f file directory for vdev files (default: %s)]\n"
376	    "\t[-V(erbose)] (use multiple times for ever more blather)\n"
377	    "\t[-E(xisting)] (use existing pool instead of creating new one)\n"
378	    "\t[-T time] total run time (default: %llu sec)\n"
379	    "\t[-P passtime] time per pass (default: %llu sec)\n"
380	    "\t[-z zil failure rate (default: fail every 2^%llu allocs)]\n"
381	    "\t[-h] (print help)\n"
382	    "",
383	    cmdname,
384	    (u_longlong_t)zopt_vdevs,		/* -v */
385	    nice_vdev_size,			/* -s */
386	    zopt_ashift,			/* -a */
387	    zopt_mirrors,			/* -m */
388	    zopt_raidz,				/* -r */
389	    zopt_raidz_parity,			/* -R */
390	    zopt_datasets,			/* -d */
391	    zopt_threads,			/* -t */
392	    nice_gang_bang,			/* -g */
393	    zopt_init,				/* -i */
394	    (u_longlong_t)zopt_killrate,	/* -k */
395	    zopt_pool,				/* -p */
396	    zopt_dir,				/* -f */
397	    (u_longlong_t)zopt_time,		/* -T */
398	    (u_longlong_t)zopt_passtime,	/* -P */
399	    (u_longlong_t)zio_zil_fail_shift);	/* -z */
400	exit(requested ? 0 : 1);
401}
402
403static uint64_t
404ztest_random(uint64_t range)
405{
406	uint64_t r;
407
408	if (range == 0)
409		return (0);
410
411	if (read(ztest_random_fd, &r, sizeof (r)) != sizeof (r))
412		fatal(1, "short read from /dev/urandom");
413
414	return (r % range);
415}
416
417static void
418ztest_record_enospc(char *s)
419{
420	dprintf("ENOSPC doing: %s\n", s ? s : "<unknown>");
421	ztest_shared->zs_enospc_count++;
422}
423
424static void
425process_options(int argc, char **argv)
426{
427	int opt;
428	uint64_t value;
429
430	/* Remember program name. */
431	progname = argv[0];
432
433	/* By default, test gang blocks for blocks 32K and greater */
434	zio_gang_bang = 32 << 10;
435
436	/* Default value, fail every 32nd allocation */
437	zio_zil_fail_shift = 5;
438
439	while ((opt = getopt(argc, argv,
440	    "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:z:h")) != EOF) {
441		value = 0;
442		switch (opt) {
443		    case 'v':
444		    case 's':
445		    case 'a':
446		    case 'm':
447		    case 'r':
448		    case 'R':
449		    case 'd':
450		    case 't':
451		    case 'g':
452		    case 'i':
453		    case 'k':
454		    case 'T':
455		    case 'P':
456		    case 'z':
457			value = nicenumtoull(optarg);
458		}
459		switch (opt) {
460		    case 'v':
461			zopt_vdevs = value;
462			break;
463		    case 's':
464			zopt_vdev_size = MAX(SPA_MINDEVSIZE, value);
465			break;
466		    case 'a':
467			zopt_ashift = value;
468			break;
469		    case 'm':
470			zopt_mirrors = value;
471			break;
472		    case 'r':
473			zopt_raidz = MAX(1, value);
474			break;
475		    case 'R':
476			zopt_raidz_parity = MIN(MAX(value, 1), 2);
477			break;
478		    case 'd':
479			zopt_datasets = MAX(1, value);
480			break;
481		    case 't':
482			zopt_threads = MAX(1, value);
483			break;
484		    case 'g':
485			zio_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, value);
486			break;
487		    case 'i':
488			zopt_init = value;
489			break;
490		    case 'k':
491			zopt_killrate = value;
492			break;
493		    case 'p':
494			zopt_pool = strdup(optarg);
495			break;
496		    case 'f':
497			zopt_dir = strdup(optarg);
498			break;
499		    case 'V':
500			zopt_verbose++;
501			break;
502		    case 'E':
503			zopt_init = 0;
504			break;
505		    case 'T':
506			zopt_time = value;
507			break;
508		    case 'P':
509			zopt_passtime = MAX(1, value);
510			break;
511		    case 'z':
512			zio_zil_fail_shift = MIN(value, 16);
513			break;
514		    case 'h':
515			usage(B_TRUE);
516			break;
517		    case '?':
518		    default:
519			usage(B_FALSE);
520			break;
521		}
522	}
523
524	zopt_raidz_parity = MIN(zopt_raidz_parity, zopt_raidz - 1);
525
526	zopt_vdevtime = (zopt_vdevs > 0 ? zopt_time / zopt_vdevs : UINT64_MAX);
527	zopt_maxfaults = MAX(zopt_mirrors, 1) * (zopt_raidz_parity + 1) - 1;
528}
529
530static uint64_t
531ztest_get_ashift(void)
532{
533	if (zopt_ashift == 0)
534		return (SPA_MINBLOCKSHIFT + ztest_random(3));
535	return (zopt_ashift);
536}
537
538static nvlist_t *
539make_vdev_file(size_t size)
540{
541	char dev_name[MAXPATHLEN];
542	uint64_t vdev;
543	uint64_t ashift = ztest_get_ashift();
544	int fd;
545	nvlist_t *file;
546
547	if (size == 0) {
548		(void) snprintf(dev_name, sizeof (dev_name), "%s",
549		    "/dev/bogus");
550	} else {
551		vdev = ztest_shared->zs_vdev_primaries++;
552		(void) sprintf(dev_name, ztest_dev_template,
553		    zopt_dir, zopt_pool, vdev);
554
555		fd = open(dev_name, O_RDWR | O_CREAT | O_TRUNC, 0666);
556		if (fd == -1)
557			fatal(1, "can't open %s", dev_name);
558		if (ftruncate(fd, size) != 0)
559			fatal(1, "can't ftruncate %s", dev_name);
560		(void) close(fd);
561	}
562
563	VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
564	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
565	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, dev_name) == 0);
566	VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
567
568	return (file);
569}
570
571static nvlist_t *
572make_vdev_raidz(size_t size, int r)
573{
574	nvlist_t *raidz, **child;
575	int c;
576
577	if (r < 2)
578		return (make_vdev_file(size));
579
580	child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
581
582	for (c = 0; c < r; c++)
583		child[c] = make_vdev_file(size);
584
585	VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
586	VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
587	    VDEV_TYPE_RAIDZ) == 0);
588	VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
589	    zopt_raidz_parity) == 0);
590	VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
591	    child, r) == 0);
592
593	for (c = 0; c < r; c++)
594		nvlist_free(child[c]);
595
596	umem_free(child, r * sizeof (nvlist_t *));
597
598	return (raidz);
599}
600
601static nvlist_t *
602make_vdev_mirror(size_t size, int r, int m)
603{
604	nvlist_t *mirror, **child;
605	int c;
606
607	if (m < 1)
608		return (make_vdev_raidz(size, r));
609
610	child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
611
612	for (c = 0; c < m; c++)
613		child[c] = make_vdev_raidz(size, r);
614
615	VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
616	VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
617	    VDEV_TYPE_MIRROR) == 0);
618	VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
619	    child, m) == 0);
620
621	for (c = 0; c < m; c++)
622		nvlist_free(child[c]);
623
624	umem_free(child, m * sizeof (nvlist_t *));
625
626	return (mirror);
627}
628
629static nvlist_t *
630make_vdev_root(size_t size, int r, int m, int t)
631{
632	nvlist_t *root, **child;
633	int c;
634
635	ASSERT(t > 0);
636
637	child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
638
639	for (c = 0; c < t; c++)
640		child[c] = make_vdev_mirror(size, r, m);
641
642	VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
643	VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
644	VERIFY(nvlist_add_nvlist_array(root, ZPOOL_CONFIG_CHILDREN,
645	    child, t) == 0);
646
647	for (c = 0; c < t; c++)
648		nvlist_free(child[c]);
649
650	umem_free(child, t * sizeof (nvlist_t *));
651
652	return (root);
653}
654
655static void
656ztest_set_random_blocksize(objset_t *os, uint64_t object, dmu_tx_t *tx)
657{
658	int bs = SPA_MINBLOCKSHIFT +
659	    ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1);
660	int ibs = DN_MIN_INDBLKSHIFT +
661	    ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1);
662	int error;
663
664	error = dmu_object_set_blocksize(os, object, 1ULL << bs, ibs, tx);
665	if (error) {
666		char osname[300];
667		dmu_objset_name(os, osname);
668		fatal(0, "dmu_object_set_blocksize('%s', %llu, %d, %d) = %d",
669		    osname, object, 1 << bs, ibs, error);
670	}
671}
672
673static uint8_t
674ztest_random_checksum(void)
675{
676	uint8_t checksum;
677
678	do {
679		checksum = ztest_random(ZIO_CHECKSUM_FUNCTIONS);
680	} while (zio_checksum_table[checksum].ci_zbt);
681
682	if (checksum == ZIO_CHECKSUM_OFF)
683		checksum = ZIO_CHECKSUM_ON;
684
685	return (checksum);
686}
687
688static uint8_t
689ztest_random_compress(void)
690{
691	return ((uint8_t)ztest_random(ZIO_COMPRESS_FUNCTIONS));
692}
693
694typedef struct ztest_replay {
695	objset_t	*zr_os;
696	uint64_t	zr_assign;
697} ztest_replay_t;
698
699static int
700ztest_replay_create(ztest_replay_t *zr, lr_create_t *lr, boolean_t byteswap)
701{
702	objset_t *os = zr->zr_os;
703	dmu_tx_t *tx;
704	int error;
705
706	if (byteswap)
707		byteswap_uint64_array(lr, sizeof (*lr));
708
709	tx = dmu_tx_create(os);
710	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
711	error = dmu_tx_assign(tx, zr->zr_assign);
712	if (error) {
713		dmu_tx_abort(tx);
714		return (error);
715	}
716
717	error = dmu_object_claim(os, lr->lr_doid, lr->lr_mode, 0,
718	    DMU_OT_NONE, 0, tx);
719	ASSERT3U(error, ==, 0);
720	dmu_tx_commit(tx);
721
722	if (zopt_verbose >= 5) {
723		char osname[MAXNAMELEN];
724		dmu_objset_name(os, osname);
725		(void) printf("replay create of %s object %llu"
726		    " in txg %llu = %d\n",
727		    osname, (u_longlong_t)lr->lr_doid,
728		    (u_longlong_t)zr->zr_assign, error);
729	}
730
731	return (error);
732}
733
734static int
735ztest_replay_remove(ztest_replay_t *zr, lr_remove_t *lr, boolean_t byteswap)
736{
737	objset_t *os = zr->zr_os;
738	dmu_tx_t *tx;
739	int error;
740
741	if (byteswap)
742		byteswap_uint64_array(lr, sizeof (*lr));
743
744	tx = dmu_tx_create(os);
745	dmu_tx_hold_free(tx, lr->lr_doid, 0, DMU_OBJECT_END);
746	error = dmu_tx_assign(tx, zr->zr_assign);
747	if (error) {
748		dmu_tx_abort(tx);
749		return (error);
750	}
751
752	error = dmu_object_free(os, lr->lr_doid, tx);
753	dmu_tx_commit(tx);
754
755	return (error);
756}
757
758zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
759	NULL,			/* 0 no such transaction type */
760	ztest_replay_create,	/* TX_CREATE */
761	NULL,			/* TX_MKDIR */
762	NULL,			/* TX_MKXATTR */
763	NULL,			/* TX_SYMLINK */
764	ztest_replay_remove,	/* TX_REMOVE */
765	NULL,			/* TX_RMDIR */
766	NULL,			/* TX_LINK */
767	NULL,			/* TX_RENAME */
768	NULL,			/* TX_WRITE */
769	NULL,			/* TX_TRUNCATE */
770	NULL,			/* TX_SETATTR */
771	NULL,			/* TX_ACL */
772};
773
774/*
775 * Verify that we can't destroy an active pool, create an existing pool,
776 * or create a pool with a bad vdev spec.
777 */
778void
779ztest_spa_create_destroy(ztest_args_t *za)
780{
781	int error;
782	spa_t *spa;
783	nvlist_t *nvroot;
784
785	/*
786	 * Attempt to create using a bad file.
787	 */
788	nvroot = make_vdev_root(0, 0, 0, 1);
789	error = spa_create("ztest_bad_file", nvroot, NULL);
790	nvlist_free(nvroot);
791	if (error != ENOENT)
792		fatal(0, "spa_create(bad_file) = %d", error);
793
794	/*
795	 * Attempt to create using a bad mirror.
796	 */
797	nvroot = make_vdev_root(0, 0, 2, 1);
798	error = spa_create("ztest_bad_mirror", nvroot, NULL);
799	nvlist_free(nvroot);
800	if (error != ENOENT)
801		fatal(0, "spa_create(bad_mirror) = %d", error);
802
803	/*
804	 * Attempt to create an existing pool.  It shouldn't matter
805	 * what's in the nvroot; we should fail with EEXIST.
806	 */
807	(void) rw_rdlock(&ztest_shared->zs_name_lock);
808	nvroot = make_vdev_root(0, 0, 0, 1);
809	error = spa_create(za->za_pool, nvroot, NULL);
810	nvlist_free(nvroot);
811	if (error != EEXIST)
812		fatal(0, "spa_create(whatever) = %d", error);
813
814	error = spa_open(za->za_pool, &spa, FTAG);
815	if (error)
816		fatal(0, "spa_open() = %d", error);
817
818	error = spa_destroy(za->za_pool);
819	if (error != EBUSY)
820		fatal(0, "spa_destroy() = %d", error);
821
822	spa_close(spa, FTAG);
823	(void) rw_unlock(&ztest_shared->zs_name_lock);
824}
825
826/*
827 * Verify that vdev_add() works as expected.
828 */
829void
830ztest_vdev_add_remove(ztest_args_t *za)
831{
832	spa_t *spa = dmu_objset_spa(za->za_os);
833	uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz;
834	nvlist_t *nvroot;
835	int error;
836
837	if (zopt_verbose >= 6)
838		(void) printf("adding vdev\n");
839
840	(void) mutex_lock(&ztest_shared->zs_vdev_lock);
841
842	spa_config_enter(spa, RW_READER, FTAG);
843
844	ztest_shared->zs_vdev_primaries =
845	    spa->spa_root_vdev->vdev_children * leaves;
846
847	spa_config_exit(spa, FTAG);
848
849	nvroot = make_vdev_root(zopt_vdev_size, zopt_raidz, zopt_mirrors, 1);
850	error = spa_vdev_add(spa, nvroot);
851	nvlist_free(nvroot);
852
853	(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
854
855	if (error == ENOSPC)
856		ztest_record_enospc("spa_vdev_add");
857	else if (error != 0)
858		fatal(0, "spa_vdev_add() = %d", error);
859
860	if (zopt_verbose >= 6)
861		(void) printf("spa_vdev_add = %d, as expected\n", error);
862}
863
864static vdev_t *
865vdev_lookup_by_path(vdev_t *vd, const char *path)
866{
867	int c;
868	vdev_t *mvd;
869
870	if (vd->vdev_path != NULL) {
871		if (vd->vdev_wholedisk == 1) {
872			/*
873			 * For whole disks, the internal path has 's0', but the
874			 * path passed in by the user doesn't.
875			 */
876			if (strlen(path) == strlen(vd->vdev_path) - 2 &&
877			    strncmp(path, vd->vdev_path, strlen(path)) == 0)
878				return (vd);
879		} else if (strcmp(path, vd->vdev_path) == 0) {
880			return (vd);
881		}
882	}
883
884	for (c = 0; c < vd->vdev_children; c++)
885		if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
886		    NULL)
887			return (mvd);
888
889	return (NULL);
890}
891
892/*
893 * Verify that we can attach and detach devices.
894 */
895void
896ztest_vdev_attach_detach(ztest_args_t *za)
897{
898	spa_t *spa = dmu_objset_spa(za->za_os);
899	vdev_t *rvd = spa->spa_root_vdev;
900	vdev_t *oldvd, *newvd, *pvd;
901	nvlist_t *root, *file;
902	uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz;
903	uint64_t leaf, top;
904	uint64_t ashift = ztest_get_ashift();
905	size_t oldsize, newsize;
906	char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
907	int replacing;
908	int error, expected_error;
909	int fd;
910
911	(void) mutex_lock(&ztest_shared->zs_vdev_lock);
912
913	spa_config_enter(spa, RW_READER, FTAG);
914
915	/*
916	 * Decide whether to do an attach or a replace.
917	 */
918	replacing = ztest_random(2);
919
920	/*
921	 * Pick a random top-level vdev.
922	 */
923	top = ztest_random(rvd->vdev_children);
924
925	/*
926	 * Pick a random leaf within it.
927	 */
928	leaf = ztest_random(leaves);
929
930	/*
931	 * Generate the path to this leaf.  The filename will end with 'a'.
932	 * We'll alternate replacements with a filename that ends with 'b'.
933	 */
934	(void) snprintf(oldpath, sizeof (oldpath),
935	    ztest_dev_template, zopt_dir, zopt_pool, top * leaves + leaf);
936
937	bcopy(oldpath, newpath, MAXPATHLEN);
938
939	/*
940	 * If the 'a' file isn't part of the pool, the 'b' file must be.
941	 */
942	if (vdev_lookup_by_path(rvd, oldpath) == NULL)
943		oldpath[strlen(oldpath) - 1] = 'b';
944	else
945		newpath[strlen(newpath) - 1] = 'b';
946
947	/*
948	 * Now oldpath represents something that's already in the pool,
949	 * and newpath is the thing we'll try to attach.
950	 */
951	oldvd = vdev_lookup_by_path(rvd, oldpath);
952	newvd = vdev_lookup_by_path(rvd, newpath);
953	ASSERT(oldvd != NULL);
954	pvd = oldvd->vdev_parent;
955
956	/*
957	 * Make newsize a little bigger or smaller than oldsize.
958	 * If it's smaller, the attach should fail.
959	 * If it's larger, and we're doing a replace,
960	 * we should get dynamic LUN growth when we're done.
961	 */
962	oldsize = vdev_get_rsize(oldvd);
963	newsize = 10 * oldsize / (9 + ztest_random(3));
964
965	/*
966	 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
967	 * unless it's a replace; in that case any non-replacing parent is OK.
968	 *
969	 * If newvd is already part of the pool, it should fail with EBUSY.
970	 *
971	 * If newvd is too small, it should fail with EOVERFLOW.
972	 */
973	if (newvd != NULL)
974		expected_error = EBUSY;
975	else if (pvd->vdev_ops != &vdev_mirror_ops &&
976	    pvd->vdev_ops != &vdev_root_ops &&
977	    (!replacing || pvd->vdev_ops == &vdev_replacing_ops))
978		expected_error = ENOTSUP;
979	else if (newsize < oldsize)
980		expected_error = EOVERFLOW;
981	else if (ashift > oldvd->vdev_top->vdev_ashift)
982		expected_error = EDOM;
983	else
984		expected_error = 0;
985
986	/*
987	 * If newvd isn't already part of the pool, create it.
988	 */
989	if (newvd == NULL) {
990		fd = open(newpath, O_RDWR | O_CREAT | O_TRUNC, 0666);
991		if (fd == -1)
992			fatal(1, "can't open %s", newpath);
993		if (ftruncate(fd, newsize) != 0)
994			fatal(1, "can't ftruncate %s", newpath);
995		(void) close(fd);
996	}
997
998	spa_config_exit(spa, FTAG);
999
1000	/*
1001	 * Build the nvlist describing newpath.
1002	 */
1003	VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
1004	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
1005	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, newpath) == 0);
1006	VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
1007
1008	VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
1009	VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
1010	VERIFY(nvlist_add_nvlist_array(root, ZPOOL_CONFIG_CHILDREN,
1011	    &file, 1) == 0);
1012
1013	error = spa_vdev_attach(spa, oldvd->vdev_guid, root, replacing);
1014
1015	nvlist_free(file);
1016	nvlist_free(root);
1017
1018	/*
1019	 * If our parent was the replacing vdev, but the replace completed,
1020	 * then instead of failing with ENOTSUP we may either succeed,
1021	 * fail with ENODEV, or fail with EOVERFLOW.
1022	 */
1023	if (expected_error == ENOTSUP &&
1024	    (error == 0 || error == ENODEV || error == EOVERFLOW))
1025		expected_error = error;
1026
1027	/*
1028	 * If someone grew the LUN, the replacement may be too small.
1029	 */
1030	if (error == EOVERFLOW)
1031		expected_error = error;
1032
1033	if (error != expected_error) {
1034		fatal(0, "attach (%s, %s, %d) returned %d, expected %d",
1035		    oldpath, newpath, replacing, error, expected_error);
1036	}
1037
1038	(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
1039}
1040
1041/*
1042 * Verify that dynamic LUN growth works as expected.
1043 */
1044/* ARGSUSED */
1045void
1046ztest_vdev_LUN_growth(ztest_args_t *za)
1047{
1048	spa_t *spa = dmu_objset_spa(za->za_os);
1049	char dev_name[MAXPATHLEN];
1050	uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz;
1051	uint64_t vdev;
1052	size_t fsize;
1053	int fd;
1054
1055	(void) mutex_lock(&ztest_shared->zs_vdev_lock);
1056
1057	/*
1058	 * Pick a random leaf vdev.
1059	 */
1060	spa_config_enter(spa, RW_READER, FTAG);
1061	vdev = ztest_random(spa->spa_root_vdev->vdev_children * leaves);
1062	spa_config_exit(spa, FTAG);
1063
1064	(void) sprintf(dev_name, ztest_dev_template, zopt_dir, zopt_pool, vdev);
1065
1066	if ((fd = open(dev_name, O_RDWR)) != -1) {
1067		/*
1068		 * Determine the size.
1069		 */
1070		fsize = lseek(fd, 0, SEEK_END);
1071
1072		/*
1073		 * If it's less than 2x the original size, grow by around 3%.
1074		 */
1075		if (fsize < 2 * zopt_vdev_size) {
1076			size_t newsize = fsize + ztest_random(fsize / 32);
1077			(void) ftruncate(fd, newsize);
1078			if (zopt_verbose >= 6) {
1079				(void) printf("%s grew from %lu to %lu bytes\n",
1080				    dev_name, (ulong_t)fsize, (ulong_t)newsize);
1081			}
1082		}
1083		(void) close(fd);
1084	}
1085
1086	(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
1087}
1088
1089/* ARGSUSED */
1090static void
1091ztest_create_cb(objset_t *os, void *arg, dmu_tx_t *tx)
1092{
1093	/*
1094	 * Create the directory object.
1095	 */
1096	VERIFY(dmu_object_claim(os, ZTEST_DIROBJ,
1097	    DMU_OT_UINT64_OTHER, ZTEST_DIROBJ_BLOCKSIZE,
1098	    DMU_OT_UINT64_OTHER, sizeof (ztest_block_tag_t), tx) == 0);
1099
1100	VERIFY(zap_create_claim(os, ZTEST_MICROZAP_OBJ,
1101	    DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
1102
1103	VERIFY(zap_create_claim(os, ZTEST_FATZAP_OBJ,
1104	    DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
1105}
1106
1107/* ARGSUSED */
1108static int
1109ztest_destroy_cb(char *name, void *arg)
1110{
1111	objset_t *os;
1112	dmu_object_info_t doi;
1113	int error;
1114
1115	/*
1116	 * Verify that the dataset contains a directory object.
1117	 */
1118	error = dmu_objset_open(name, DMU_OST_OTHER,
1119	    DS_MODE_STANDARD | DS_MODE_READONLY, &os);
1120	ASSERT3U(error, ==, 0);
1121	error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
1122	if (error != ENOENT) {
1123		/* We could have crashed in the middle of destroying it */
1124		ASSERT3U(error, ==, 0);
1125		ASSERT3U(doi.doi_type, ==, DMU_OT_UINT64_OTHER);
1126		ASSERT3S(doi.doi_physical_blks, >=, 0);
1127	}
1128	dmu_objset_close(os);
1129
1130	/*
1131	 * Destroy the dataset.
1132	 */
1133	error = dmu_objset_destroy(name);
1134	ASSERT3U(error, ==, 0);
1135	return (0);
1136}
1137
1138/*
1139 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
1140 */
1141static uint64_t
1142ztest_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t object, int mode)
1143{
1144	itx_t *itx;
1145	lr_create_t *lr;
1146	size_t namesize;
1147	char name[24];
1148
1149	(void) sprintf(name, "ZOBJ_%llu", (u_longlong_t)object);
1150	namesize = strlen(name) + 1;
1151
1152	itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize +
1153	    ztest_random(ZIL_MAX_BLKSZ));
1154	lr = (lr_create_t *)&itx->itx_lr;
1155	bzero(lr + 1, lr->lr_common.lrc_reclen - sizeof (*lr));
1156	lr->lr_doid = object;
1157	lr->lr_foid = 0;
1158	lr->lr_mode = mode;
1159	lr->lr_uid = 0;
1160	lr->lr_gid = 0;
1161	lr->lr_gen = dmu_tx_get_txg(tx);
1162	lr->lr_crtime[0] = time(NULL);
1163	lr->lr_crtime[1] = 0;
1164	lr->lr_rdev = 0;
1165	bcopy(name, (char *)(lr + 1), namesize);
1166
1167	return (zil_itx_assign(zilog, itx, tx));
1168}
1169
1170void
1171ztest_dmu_objset_create_destroy(ztest_args_t *za)
1172{
1173	int error;
1174	objset_t *os;
1175	char name[100];
1176	int mode, basemode, expected_error;
1177	zilog_t *zilog;
1178	uint64_t seq;
1179	uint64_t objects;
1180	ztest_replay_t zr;
1181
1182	(void) rw_rdlock(&ztest_shared->zs_name_lock);
1183	(void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool,
1184	    (u_longlong_t)za->za_instance);
1185
1186	basemode = DS_MODE_LEVEL(za->za_instance);
1187	if (basemode == DS_MODE_NONE)
1188		basemode++;
1189
1190	/*
1191	 * If this dataset exists from a previous run, process its replay log
1192	 * half of the time.  If we don't replay it, then dmu_objset_destroy()
1193	 * (invoked from ztest_destroy_cb() below) should just throw it away.
1194	 */
1195	if (ztest_random(2) == 0 &&
1196	    dmu_objset_open(name, DMU_OST_OTHER, DS_MODE_PRIMARY, &os) == 0) {
1197		zr.zr_os = os;
1198		zil_replay(os, &zr, &zr.zr_assign, ztest_replay_vector);
1199		dmu_objset_close(os);
1200	}
1201
1202	/*
1203	 * There may be an old instance of the dataset we're about to
1204	 * create lying around from a previous run.  If so, destroy it
1205	 * and all of its snapshots.
1206	 */
1207	(void) dmu_objset_find(name, ztest_destroy_cb, NULL,
1208	    DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1209
1210	/*
1211	 * Verify that the destroyed dataset is no longer in the namespace.
1212	 */
1213	error = dmu_objset_open(name, DMU_OST_OTHER, basemode, &os);
1214	if (error != ENOENT)
1215		fatal(1, "dmu_objset_open(%s) found destroyed dataset %p",
1216		    name, os);
1217
1218	/*
1219	 * Verify that we can create a new dataset.
1220	 */
1221	error = dmu_objset_create(name, DMU_OST_OTHER, NULL, ztest_create_cb,
1222	    NULL);
1223	if (error) {
1224		if (error == ENOSPC) {
1225			ztest_record_enospc("dmu_objset_create");
1226			(void) rw_unlock(&ztest_shared->zs_name_lock);
1227			return;
1228		}
1229		fatal(0, "dmu_objset_create(%s) = %d", name, error);
1230	}
1231
1232	error = dmu_objset_open(name, DMU_OST_OTHER, basemode, &os);
1233	if (error) {
1234		fatal(0, "dmu_objset_open(%s) = %d", name, error);
1235	}
1236
1237	/*
1238	 * Open the intent log for it.
1239	 */
1240	zilog = zil_open(os, NULL);
1241
1242	/*
1243	 * Put a random number of objects in there.
1244	 */
1245	objects = ztest_random(20);
1246	seq = 0;
1247	while (objects-- != 0) {
1248		uint64_t object;
1249		dmu_tx_t *tx = dmu_tx_create(os);
1250		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, sizeof (name));
1251		error = dmu_tx_assign(tx, TXG_WAIT);
1252		if (error) {
1253			dmu_tx_abort(tx);
1254		} else {
1255			object = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
1256			    DMU_OT_NONE, 0, tx);
1257			ztest_set_random_blocksize(os, object, tx);
1258			seq = ztest_log_create(zilog, tx, object,
1259			    DMU_OT_UINT64_OTHER);
1260			dmu_write(os, object, 0, sizeof (name), name, tx);
1261			dmu_tx_commit(tx);
1262		}
1263		if (ztest_random(5) == 0) {
1264			zil_commit(zilog, seq, object);
1265		}
1266		if (ztest_random(100) == 0) {
1267			error = zil_suspend(zilog);
1268			if (error == 0) {
1269				zil_resume(zilog);
1270			}
1271		}
1272	}
1273
1274	/*
1275	 * Verify that we cannot create an existing dataset.
1276	 */
1277	error = dmu_objset_create(name, DMU_OST_OTHER, NULL, NULL, NULL);
1278	if (error != EEXIST)
1279		fatal(0, "created existing dataset, error = %d", error);
1280
1281	/*
1282	 * Verify that multiple dataset opens are allowed, but only when
1283	 * the new access mode is compatible with the base mode.
1284	 * We use a mixture of typed and typeless opens, and when the
1285	 * open succeeds, verify that the discovered type is correct.
1286	 */
1287	for (mode = DS_MODE_STANDARD; mode < DS_MODE_LEVELS; mode++) {
1288		objset_t *os2;
1289		error = dmu_objset_open(name, DMU_OST_OTHER, mode, &os2);
1290		expected_error = (basemode + mode < DS_MODE_LEVELS) ? 0 : EBUSY;
1291		if (error != expected_error)
1292			fatal(0, "dmu_objset_open('%s') = %d, expected %d",
1293			    name, error, expected_error);
1294		if (error == 0)
1295			dmu_objset_close(os2);
1296	}
1297
1298	zil_close(zilog);
1299	dmu_objset_close(os);
1300
1301	error = dmu_objset_destroy(name);
1302	if (error)
1303		fatal(0, "dmu_objset_destroy(%s) = %d", name, error);
1304
1305	(void) rw_unlock(&ztest_shared->zs_name_lock);
1306}
1307
1308/*
1309 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
1310 */
1311void
1312ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
1313{
1314	int error;
1315	objset_t *os = za->za_os;
1316	char snapname[100];
1317	char osname[MAXNAMELEN];
1318
1319	(void) rw_rdlock(&ztest_shared->zs_name_lock);
1320	dmu_objset_name(os, osname);
1321	(void) snprintf(snapname, 100, "%s@%llu", osname,
1322	    (u_longlong_t)za->za_instance);
1323
1324	error = dmu_objset_destroy(snapname);
1325	if (error != 0 && error != ENOENT)
1326		fatal(0, "dmu_objset_destroy() = %d", error);
1327	error = dmu_objset_snapshot(osname, strchr(snapname, '@')+1, FALSE);
1328	if (error == ENOSPC)
1329		ztest_record_enospc("dmu_take_snapshot");
1330	else if (error != 0 && error != EEXIST)
1331		fatal(0, "dmu_take_snapshot() = %d", error);
1332	(void) rw_unlock(&ztest_shared->zs_name_lock);
1333}
1334
1335#define	ZTEST_TRAVERSE_BLOCKS	1000
1336
1337static int
1338ztest_blk_cb(traverse_blk_cache_t *bc, spa_t *spa, void *arg)
1339{
1340	ztest_args_t *za = arg;
1341	zbookmark_t *zb = &bc->bc_bookmark;
1342	blkptr_t *bp = &bc->bc_blkptr;
1343	dnode_phys_t *dnp = bc->bc_dnode;
1344	traverse_handle_t *th = za->za_th;
1345	uint64_t size = BP_GET_LSIZE(bp);
1346
1347	/*
1348	 * Level -1 indicates the objset_phys_t or something in its intent log.
1349	 */
1350	if (zb->zb_level == -1) {
1351		if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1352			ASSERT3U(zb->zb_object, ==, 0);
1353			ASSERT3U(zb->zb_blkid, ==, 0);
1354			ASSERT3U(size, ==, sizeof (objset_phys_t));
1355			za->za_zil_seq = 0;
1356		} else if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
1357			ASSERT3U(zb->zb_object, ==, 0);
1358			ASSERT3U(zb->zb_blkid, >, za->za_zil_seq);
1359			za->za_zil_seq = zb->zb_blkid;
1360		} else {
1361			ASSERT3U(zb->zb_object, !=, 0);	/* lr_write_t */
1362		}
1363
1364		return (0);
1365	}
1366
1367	ASSERT(dnp != NULL);
1368
1369	if (bc->bc_errno)
1370		return (ERESTART);
1371
1372	/*
1373	 * Once in a while, abort the traverse.   We only do this to odd
1374	 * instance numbers to ensure that even ones can run to completion.
1375	 */
1376	if ((za->za_instance & 1) && ztest_random(10000) == 0)
1377		return (EINTR);
1378
1379	if (bp->blk_birth == 0) {
1380		ASSERT(th->th_advance & ADVANCE_HOLES);
1381		return (0);
1382	}
1383
1384	if (zb->zb_level == 0 && !(th->th_advance & ADVANCE_DATA) &&
1385	    bc == &th->th_cache[ZB_DN_CACHE][0]) {
1386		ASSERT(bc->bc_data == NULL);
1387		return (0);
1388	}
1389
1390	ASSERT(bc->bc_data != NULL);
1391
1392	/*
1393	 * This is an expensive question, so don't ask it too often.
1394	 */
1395	if (((za->za_random ^ th->th_callbacks) & 0xff) == 0) {
1396		void *xbuf = umem_alloc(size, UMEM_NOFAIL);
1397		if (arc_tryread(spa, bp, xbuf) == 0) {
1398			ASSERT(bcmp(bc->bc_data, xbuf, size) == 0);
1399		}
1400		umem_free(xbuf, size);
1401	}
1402
1403	if (zb->zb_level > 0) {
1404		ASSERT3U(size, ==, 1ULL << dnp->dn_indblkshift);
1405		return (0);
1406	}
1407
1408	ASSERT(zb->zb_level == 0);
1409	ASSERT3U(size, ==, dnp->dn_datablkszsec << DEV_BSHIFT);
1410
1411	return (0);
1412}
1413
1414/*
1415 * Verify that live pool traversal works.
1416 */
1417void
1418ztest_traverse(ztest_args_t *za)
1419{
1420	spa_t *spa = dmu_objset_spa(za->za_os);
1421	traverse_handle_t *th = za->za_th;
1422	int rc, advance;
1423	uint64_t cbstart, cblimit;
1424
1425	if (th == NULL) {
1426		advance = 0;
1427
1428		if (ztest_random(2) == 0)
1429			advance |= ADVANCE_PRE;
1430
1431		if (ztest_random(2) == 0)
1432			advance |= ADVANCE_PRUNE;
1433
1434		if (ztest_random(2) == 0)
1435			advance |= ADVANCE_DATA;
1436
1437		if (ztest_random(2) == 0)
1438			advance |= ADVANCE_HOLES;
1439
1440		if (ztest_random(2) == 0)
1441			advance |= ADVANCE_ZIL;
1442
1443		th = za->za_th = traverse_init(spa, ztest_blk_cb, za, advance,
1444		    ZIO_FLAG_CANFAIL);
1445
1446		traverse_add_pool(th, 0, -1ULL);
1447	}
1448
1449	advance = th->th_advance;
1450	cbstart = th->th_callbacks;
1451	cblimit = cbstart + ((advance & ADVANCE_DATA) ? 100 : 1000);
1452
1453	while ((rc = traverse_more(th)) == EAGAIN && th->th_callbacks < cblimit)
1454		continue;
1455
1456	if (zopt_verbose >= 5)
1457		(void) printf("traverse %s%s%s%s %llu blocks to "
1458		    "<%llu, %llu, %lld, %llx>%s\n",
1459		    (advance & ADVANCE_PRE) ? "pre" : "post",
1460		    (advance & ADVANCE_PRUNE) ? "|prune" : "",
1461		    (advance & ADVANCE_DATA) ? "|data" : "",
1462		    (advance & ADVANCE_HOLES) ? "|holes" : "",
1463		    (u_longlong_t)(th->th_callbacks - cbstart),
1464		    (u_longlong_t)th->th_lastcb.zb_objset,
1465		    (u_longlong_t)th->th_lastcb.zb_object,
1466		    (u_longlong_t)th->th_lastcb.zb_level,
1467		    (u_longlong_t)th->th_lastcb.zb_blkid,
1468		    rc == 0 ? " [done]" :
1469		    rc == EINTR ? " [aborted]" :
1470		    rc == EAGAIN ? "" :
1471		    strerror(rc));
1472
1473	if (rc != EAGAIN) {
1474		if (rc != 0 && rc != EINTR)
1475			fatal(0, "traverse_more(%p) = %d", th, rc);
1476		traverse_fini(th);
1477		za->za_th = NULL;
1478	}
1479}
1480
1481/*
1482 * Verify that dmu_object_{alloc,free} work as expected.
1483 */
1484void
1485ztest_dmu_object_alloc_free(ztest_args_t *za)
1486{
1487	objset_t *os = za->za_os;
1488	dmu_buf_t *db;
1489	dmu_tx_t *tx;
1490	uint64_t batchobj, object, batchsize, endoff, temp;
1491	int b, c, error, bonuslen;
1492	dmu_object_info_t doi;
1493	char osname[MAXNAMELEN];
1494
1495	dmu_objset_name(os, osname);
1496
1497	endoff = -8ULL;
1498	batchsize = 2;
1499
1500	/*
1501	 * Create a batch object if necessary, and record it in the directory.
1502	 */
1503	VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff,
1504	    sizeof (uint64_t), &batchobj));
1505	if (batchobj == 0) {
1506		tx = dmu_tx_create(os);
1507		dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff,
1508		    sizeof (uint64_t));
1509		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1510		error = dmu_tx_assign(tx, TXG_WAIT);
1511		if (error) {
1512			ztest_record_enospc("create a batch object");
1513			dmu_tx_abort(tx);
1514			return;
1515		}
1516		batchobj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
1517		    DMU_OT_NONE, 0, tx);
1518		ztest_set_random_blocksize(os, batchobj, tx);
1519		dmu_write(os, ZTEST_DIROBJ, za->za_diroff,
1520		    sizeof (uint64_t), &batchobj, tx);
1521		dmu_tx_commit(tx);
1522	}
1523
1524	/*
1525	 * Destroy the previous batch of objects.
1526	 */
1527	for (b = 0; b < batchsize; b++) {
1528		VERIFY(0 == dmu_read(os, batchobj, b * sizeof (uint64_t),
1529		    sizeof (uint64_t), &object));
1530		if (object == 0)
1531			continue;
1532		/*
1533		 * Read and validate contents.
1534		 * We expect the nth byte of the bonus buffer to be n.
1535		 */
1536		VERIFY(0 == dmu_bonus_hold(os, object, FTAG, &db));
1537
1538		dmu_object_info_from_db(db, &doi);
1539		ASSERT(doi.doi_type == DMU_OT_UINT64_OTHER);
1540		ASSERT(doi.doi_bonus_type == DMU_OT_PLAIN_OTHER);
1541		ASSERT3S(doi.doi_physical_blks, >=, 0);
1542
1543		bonuslen = db->db_size;
1544
1545		for (c = 0; c < bonuslen; c++) {
1546			if (((uint8_t *)db->db_data)[c] !=
1547			    (uint8_t)(c + bonuslen)) {
1548				fatal(0,
1549				    "bad bonus: %s, obj %llu, off %d: %u != %u",
1550				    osname, object, c,
1551				    ((uint8_t *)db->db_data)[c],
1552				    (uint8_t)(c + bonuslen));
1553			}
1554		}
1555
1556		dmu_buf_rele(db, FTAG);
1557
1558		/*
1559		 * We expect the word at endoff to be our object number.
1560		 */
1561		VERIFY(0 == dmu_read(os, object, endoff,
1562		    sizeof (uint64_t), &temp));
1563
1564		if (temp != object) {
1565			fatal(0, "bad data in %s, got %llu, expected %llu",
1566			    osname, temp, object);
1567		}
1568
1569		/*
1570		 * Destroy old object and clear batch entry.
1571		 */
1572		tx = dmu_tx_create(os);
1573		dmu_tx_hold_write(tx, batchobj,
1574		    b * sizeof (uint64_t), sizeof (uint64_t));
1575		dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1576		error = dmu_tx_assign(tx, TXG_WAIT);
1577		if (error) {
1578			ztest_record_enospc("free object");
1579			dmu_tx_abort(tx);
1580			return;
1581		}
1582		error = dmu_object_free(os, object, tx);
1583		if (error) {
1584			fatal(0, "dmu_object_free('%s', %llu) = %d",
1585			    osname, object, error);
1586		}
1587		object = 0;
1588
1589		dmu_object_set_checksum(os, batchobj,
1590		    ztest_random_checksum(), tx);
1591		dmu_object_set_compress(os, batchobj,
1592		    ztest_random_compress(), tx);
1593
1594		dmu_write(os, batchobj, b * sizeof (uint64_t),
1595		    sizeof (uint64_t), &object, tx);
1596
1597		dmu_tx_commit(tx);
1598	}
1599
1600	/*
1601	 * Before creating the new batch of objects, generate a bunch of churn.
1602	 */
1603	for (b = ztest_random(100); b > 0; b--) {
1604		tx = dmu_tx_create(os);
1605		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1606		error = dmu_tx_assign(tx, TXG_WAIT);
1607		if (error) {
1608			ztest_record_enospc("churn objects");
1609			dmu_tx_abort(tx);
1610			return;
1611		}
1612		object = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
1613		    DMU_OT_NONE, 0, tx);
1614		ztest_set_random_blocksize(os, object, tx);
1615		error = dmu_object_free(os, object, tx);
1616		if (error) {
1617			fatal(0, "dmu_object_free('%s', %llu) = %d",
1618			    osname, object, error);
1619		}
1620		dmu_tx_commit(tx);
1621	}
1622
1623	/*
1624	 * Create a new batch of objects with randomly chosen
1625	 * blocksizes and record them in the batch directory.
1626	 */
1627	for (b = 0; b < batchsize; b++) {
1628		uint32_t va_blksize;
1629		u_longlong_t va_nblocks;
1630
1631		tx = dmu_tx_create(os);
1632		dmu_tx_hold_write(tx, batchobj, b * sizeof (uint64_t),
1633		    sizeof (uint64_t));
1634		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1635		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, endoff,
1636		    sizeof (uint64_t));
1637		error = dmu_tx_assign(tx, TXG_WAIT);
1638		if (error) {
1639			ztest_record_enospc("create batchobj");
1640			dmu_tx_abort(tx);
1641			return;
1642		}
1643		bonuslen = (int)ztest_random(dmu_bonus_max()) + 1;
1644
1645		object = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
1646		    DMU_OT_PLAIN_OTHER, bonuslen, tx);
1647
1648		ztest_set_random_blocksize(os, object, tx);
1649
1650		dmu_object_set_checksum(os, object,
1651		    ztest_random_checksum(), tx);
1652		dmu_object_set_compress(os, object,
1653		    ztest_random_compress(), tx);
1654
1655		dmu_write(os, batchobj, b * sizeof (uint64_t),
1656		    sizeof (uint64_t), &object, tx);
1657
1658		/*
1659		 * Write to both the bonus buffer and the regular data.
1660		 */
1661		VERIFY(0 == dmu_bonus_hold(os, object, FTAG, &db));
1662		ASSERT3U(bonuslen, ==, db->db_size);
1663
1664		dmu_object_size_from_db(db, &va_blksize, &va_nblocks);
1665		ASSERT3S(va_nblocks, >=, 0);
1666
1667		dmu_buf_will_dirty(db, tx);
1668
1669		/*
1670		 * See comments above regarding the contents of
1671		 * the bonus buffer and the word at endoff.
1672		 */
1673		for (c = 0; c < db->db_size; c++)
1674			((uint8_t *)db->db_data)[c] = (uint8_t)(c + bonuslen);
1675
1676		dmu_buf_rele(db, FTAG);
1677
1678		/*
1679		 * Write to a large offset to increase indirection.
1680		 */
1681		dmu_write(os, object, endoff, sizeof (uint64_t), &object, tx);
1682
1683		dmu_tx_commit(tx);
1684	}
1685}
1686
1687/*
1688 * Verify that dmu_{read,write} work as expected.
1689 */
1690typedef struct bufwad {
1691	uint64_t	bw_index;
1692	uint64_t	bw_txg;
1693	uint64_t	bw_data;
1694} bufwad_t;
1695
1696typedef struct dmu_read_write_dir {
1697	uint64_t	dd_packobj;
1698	uint64_t	dd_bigobj;
1699	uint64_t	dd_chunk;
1700} dmu_read_write_dir_t;
1701
1702void
1703ztest_dmu_read_write(ztest_args_t *za)
1704{
1705	objset_t *os = za->za_os;
1706	dmu_read_write_dir_t dd;
1707	dmu_tx_t *tx;
1708	int i, freeit, error;
1709	uint64_t n, s, txg;
1710	bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
1711	uint64_t packoff, packsize, bigoff, bigsize;
1712	uint64_t regions = 997;
1713	uint64_t stride = 123456789ULL;
1714	uint64_t width = 40;
1715	int free_percent = 5;
1716
1717	/*
1718	 * This test uses two objects, packobj and bigobj, that are always
1719	 * updated together (i.e. in the same tx) so that their contents are
1720	 * in sync and can be compared.  Their contents relate to each other
1721	 * in a simple way: packobj is a dense array of 'bufwad' structures,
1722	 * while bigobj is a sparse array of the same bufwads.  Specifically,
1723	 * for any index n, there are three bufwads that should be identical:
1724	 *
1725	 *	packobj, at offset n * sizeof (bufwad_t)
1726	 *	bigobj, at the head of the nth chunk
1727	 *	bigobj, at the tail of the nth chunk
1728	 *
1729	 * The chunk size is arbitrary. It doesn't have to be a power of two,
1730	 * and it doesn't have any relation to the object blocksize.
1731	 * The only requirement is that it can hold at least two bufwads.
1732	 *
1733	 * Normally, we write the bufwad to each of these locations.
1734	 * However, free_percent of the time we instead write zeroes to
1735	 * packobj and perform a dmu_free_range() on bigobj.  By comparing
1736	 * bigobj to packobj, we can verify that the DMU is correctly
1737	 * tracking which parts of an object are allocated and free,
1738	 * and that the contents of the allocated blocks are correct.
1739	 */
1740
1741	/*
1742	 * Read the directory info.  If it's the first time, set things up.
1743	 */
1744	VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff,
1745	    sizeof (dd), &dd));
1746	if (dd.dd_chunk == 0) {
1747		ASSERT(dd.dd_packobj == 0);
1748		ASSERT(dd.dd_bigobj == 0);
1749		tx = dmu_tx_create(os);
1750		dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff, sizeof (dd));
1751		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1752		error = dmu_tx_assign(tx, TXG_WAIT);
1753		if (error) {
1754			ztest_record_enospc("create r/w directory");
1755			dmu_tx_abort(tx);
1756			return;
1757		}
1758
1759		dd.dd_packobj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
1760		    DMU_OT_NONE, 0, tx);
1761		dd.dd_bigobj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
1762		    DMU_OT_NONE, 0, tx);
1763		dd.dd_chunk = (1000 + ztest_random(1000)) * sizeof (uint64_t);
1764
1765		ztest_set_random_blocksize(os, dd.dd_packobj, tx);
1766		ztest_set_random_blocksize(os, dd.dd_bigobj, tx);
1767
1768		dmu_write(os, ZTEST_DIROBJ, za->za_diroff, sizeof (dd), &dd,
1769		    tx);
1770		dmu_tx_commit(tx);
1771	}
1772
1773	/*
1774	 * Prefetch a random chunk of the big object.
1775	 * Our aim here is to get some async reads in flight
1776	 * for blocks that we may free below; the DMU should
1777	 * handle this race correctly.
1778	 */
1779	n = ztest_random(regions) * stride + ztest_random(width);
1780	s = 1 + ztest_random(2 * width - 1);
1781	dmu_prefetch(os, dd.dd_bigobj, n * dd.dd_chunk, s * dd.dd_chunk);
1782
1783	/*
1784	 * Pick a random index and compute the offsets into packobj and bigobj.
1785	 */
1786	n = ztest_random(regions) * stride + ztest_random(width);
1787	s = 1 + ztest_random(width - 1);
1788
1789	packoff = n * sizeof (bufwad_t);
1790	packsize = s * sizeof (bufwad_t);
1791
1792	bigoff = n * dd.dd_chunk;
1793	bigsize = s * dd.dd_chunk;
1794
1795	packbuf = umem_alloc(packsize, UMEM_NOFAIL);
1796	bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
1797
1798	/*
1799	 * free_percent of the time, free a range of bigobj rather than
1800	 * overwriting it.
1801	 */
1802	freeit = (ztest_random(100) < free_percent);
1803
1804	/*
1805	 * Read the current contents of our objects.
1806	 */
1807	error = dmu_read(os, dd.dd_packobj, packoff, packsize, packbuf);
1808	ASSERT3U(error, ==, 0);
1809	error = dmu_read(os, dd.dd_bigobj, bigoff, bigsize, bigbuf);
1810	ASSERT3U(error, ==, 0);
1811
1812	/*
1813	 * Get a tx for the mods to both packobj and bigobj.
1814	 */
1815	tx = dmu_tx_create(os);
1816
1817	dmu_tx_hold_write(tx, dd.dd_packobj, packoff, packsize);
1818
1819	if (freeit)
1820		dmu_tx_hold_free(tx, dd.dd_bigobj, bigoff, bigsize);
1821	else
1822		dmu_tx_hold_write(tx, dd.dd_bigobj, bigoff, bigsize);
1823
1824	error = dmu_tx_assign(tx, TXG_WAIT);
1825
1826	if (error) {
1827		ztest_record_enospc("dmu r/w range");
1828		dmu_tx_abort(tx);
1829		umem_free(packbuf, packsize);
1830		umem_free(bigbuf, bigsize);
1831		return;
1832	}
1833
1834	txg = dmu_tx_get_txg(tx);
1835
1836	/*
1837	 * For each index from n to n + s, verify that the existing bufwad
1838	 * in packobj matches the bufwads at the head and tail of the
1839	 * corresponding chunk in bigobj.  Then update all three bufwads
1840	 * with the new values we want to write out.
1841	 */
1842	for (i = 0; i < s; i++) {
1843		/* LINTED */
1844		pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
1845		/* LINTED */
1846		bigH = (bufwad_t *)((char *)bigbuf + i * dd.dd_chunk);
1847		/* LINTED */
1848		bigT = (bufwad_t *)((char *)bigH + dd.dd_chunk) - 1;
1849
1850		ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
1851		ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
1852
1853		if (pack->bw_txg > txg)
1854			fatal(0, "future leak: got %llx, open txg is %llx",
1855			    pack->bw_txg, txg);
1856
1857		if (pack->bw_data != 0 && pack->bw_index != n + i)
1858			fatal(0, "wrong index: got %llx, wanted %llx+%llx",
1859			    pack->bw_index, n, i);
1860
1861		if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
1862			fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
1863
1864		if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
1865			fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
1866
1867		if (freeit) {
1868			bzero(pack, sizeof (bufwad_t));
1869		} else {
1870			pack->bw_index = n + i;
1871			pack->bw_txg = txg;
1872			pack->bw_data = 1 + ztest_random(-2ULL);
1873		}
1874		*bigH = *pack;
1875		*bigT = *pack;
1876	}
1877
1878	/*
1879	 * We've verified all the old bufwads, and made new ones.
1880	 * Now write them out.
1881	 */
1882	dmu_write(os, dd.dd_packobj, packoff, packsize, packbuf, tx);
1883
1884	if (freeit) {
1885		if (zopt_verbose >= 6) {
1886			(void) printf("freeing offset %llx size %llx"
1887			    " txg %llx\n",
1888			    (u_longlong_t)bigoff,
1889			    (u_longlong_t)bigsize,
1890			    (u_longlong_t)txg);
1891		}
1892		VERIFY(0 == dmu_free_range(os, dd.dd_bigobj, bigoff,
1893		    bigsize, tx));
1894	} else {
1895		if (zopt_verbose >= 6) {
1896			(void) printf("writing offset %llx size %llx"
1897			    " txg %llx\n",
1898			    (u_longlong_t)bigoff,
1899			    (u_longlong_t)bigsize,
1900			    (u_longlong_t)txg);
1901		}
1902		dmu_write(os, dd.dd_bigobj, bigoff, bigsize, bigbuf, tx);
1903	}
1904
1905	dmu_tx_commit(tx);
1906
1907	/*
1908	 * Sanity check the stuff we just wrote.
1909	 */
1910	{
1911		void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
1912		void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
1913
1914		VERIFY(0 == dmu_read(os, dd.dd_packobj, packoff,
1915		    packsize, packcheck));
1916		VERIFY(0 == dmu_read(os, dd.dd_bigobj, bigoff,
1917		    bigsize, bigcheck));
1918
1919		ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
1920		ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
1921
1922		umem_free(packcheck, packsize);
1923		umem_free(bigcheck, bigsize);
1924	}
1925
1926	umem_free(packbuf, packsize);
1927	umem_free(bigbuf, bigsize);
1928}
1929
1930void
1931ztest_dmu_check_future_leak(objset_t *os, uint64_t txg)
1932{
1933	dmu_buf_t *db;
1934	ztest_block_tag_t rbt;
1935
1936	if (zopt_verbose >= 3) {
1937		char osname[MAXNAMELEN];
1938		dmu_objset_name(os, osname);
1939		(void) printf("checking %s for future leaks in txg %lld...\n",
1940		    osname, (u_longlong_t)txg);
1941	}
1942
1943	/*
1944	 * Make sure that, if there is a write record in the bonus buffer
1945	 * of the ZTEST_DIROBJ, that the txg for this record is <= the
1946	 * last synced txg of the pool.
1947	 */
1948
1949	VERIFY(0 == dmu_bonus_hold(os, ZTEST_DIROBJ, FTAG, &db));
1950	ASSERT3U(db->db_size, ==, sizeof (rbt));
1951	bcopy(db->db_data, &rbt, db->db_size);
1952	if (rbt.bt_objset != 0) {
1953		ASSERT3U(rbt.bt_objset, ==, dmu_objset_id(os));
1954		ASSERT3U(rbt.bt_object, ==, ZTEST_DIROBJ);
1955		ASSERT3U(rbt.bt_offset, ==, -1ULL);
1956		if (rbt.bt_txg > txg) {
1957			fatal(0,
1958			    "future leak: got %llx, last synced txg is %llx",
1959			    rbt.bt_txg, txg);
1960		}
1961	}
1962	dmu_buf_rele(db, FTAG);
1963}
1964
1965void
1966ztest_dmu_write_parallel(ztest_args_t *za)
1967{
1968	objset_t *os = za->za_os;
1969	dmu_tx_t *tx;
1970	dmu_buf_t *db;
1971	int i, b, error, do_free, bs;
1972	uint64_t off, txg_how, txg;
1973	mutex_t *lp;
1974	char osname[MAXNAMELEN];
1975	char iobuf[SPA_MAXBLOCKSIZE];
1976	ztest_block_tag_t rbt, wbt;
1977
1978	dmu_objset_name(os, osname);
1979	bs = ZTEST_DIROBJ_BLOCKSIZE;
1980
1981	/*
1982	 * Have multiple threads write to large offsets in ZTEST_DIROBJ
1983	 * to verify that having multiple threads writing to the same object
1984	 * in parallel doesn't cause any trouble.
1985	 * Also do parallel writes to the bonus buffer on occasion.
1986	 */
1987	for (i = 0; i < 50; i++) {
1988		b = ztest_random(ZTEST_SYNC_LOCKS);
1989		lp = &ztest_shared->zs_sync_lock[b];
1990
1991		do_free = (ztest_random(4) == 0);
1992
1993		off = za->za_diroff_shared + ((uint64_t)b << SPA_MAXBLOCKSHIFT);
1994
1995		if (ztest_random(4) == 0) {
1996			/*
1997			 * Do the bonus buffer instead of a regular block.
1998			 */
1999			do_free = 0;
2000			off = -1ULL;
2001		}
2002
2003		tx = dmu_tx_create(os);
2004
2005		if (off == -1ULL)
2006			dmu_tx_hold_bonus(tx, ZTEST_DIROBJ);
2007		else if (do_free)
2008			dmu_tx_hold_free(tx, ZTEST_DIROBJ, off, bs);
2009		else
2010			dmu_tx_hold_write(tx, ZTEST_DIROBJ, off, bs);
2011
2012		txg_how = ztest_random(2) == 0 ? TXG_WAIT : TXG_NOWAIT;
2013		error = dmu_tx_assign(tx, txg_how);
2014		if (error) {
2015			if (error == ERESTART) {
2016				ASSERT(txg_how == TXG_NOWAIT);
2017				dmu_tx_wait(tx);
2018				dmu_tx_abort(tx);
2019				continue;
2020			}
2021			dmu_tx_abort(tx);
2022			ztest_record_enospc("dmu write parallel");
2023			return;
2024		}
2025		txg = dmu_tx_get_txg(tx);
2026
2027		if (do_free) {
2028			(void) mutex_lock(lp);
2029			VERIFY(0 == dmu_free_range(os, ZTEST_DIROBJ, off,
2030			    bs, tx));
2031			(void) mutex_unlock(lp);
2032			dmu_tx_commit(tx);
2033			continue;
2034		}
2035
2036		wbt.bt_objset = dmu_objset_id(os);
2037		wbt.bt_object = ZTEST_DIROBJ;
2038		wbt.bt_offset = off;
2039		wbt.bt_txg = txg;
2040		wbt.bt_thread = za->za_instance;
2041
2042		if (off == -1ULL) {
2043			wbt.bt_seq = 0;
2044			VERIFY(0 == dmu_bonus_hold(os, ZTEST_DIROBJ,
2045			    FTAG, &db));
2046			ASSERT3U(db->db_size, ==, sizeof (wbt));
2047			bcopy(db->db_data, &rbt, db->db_size);
2048			if (rbt.bt_objset != 0) {
2049				ASSERT3U(rbt.bt_objset, ==, wbt.bt_objset);
2050				ASSERT3U(rbt.bt_object, ==, wbt.bt_object);
2051				ASSERT3U(rbt.bt_offset, ==, wbt.bt_offset);
2052				ASSERT3U(rbt.bt_txg, <=, wbt.bt_txg);
2053			}
2054			dmu_buf_will_dirty(db, tx);
2055			bcopy(&wbt, db->db_data, db->db_size);
2056			dmu_buf_rele(db, FTAG);
2057			dmu_tx_commit(tx);
2058			continue;
2059		}
2060
2061		(void) mutex_lock(lp);
2062
2063		wbt.bt_seq = ztest_shared->zs_seq[b]++;
2064
2065		dmu_write(os, ZTEST_DIROBJ, off, sizeof (wbt), &wbt, tx);
2066
2067		(void) mutex_unlock(lp);
2068
2069		if (ztest_random(100) == 0)
2070			(void) poll(NULL, 0, 1); /* open dn_notxholds window */
2071
2072		dmu_tx_commit(tx);
2073
2074		if (ztest_random(1000) == 0)
2075			txg_wait_synced(dmu_objset_pool(os), txg);
2076
2077		if (ztest_random(2) == 0) {
2078			blkptr_t blk = { 0 };
2079			uint64_t blkoff;
2080			zbookmark_t zb;
2081
2082			(void) mutex_lock(lp);
2083			blkoff = P2ALIGN_TYPED(off, bs, uint64_t);
2084			error = dmu_buf_hold(os,
2085			    ZTEST_DIROBJ, blkoff, FTAG, &db);
2086			if (error) {
2087				dprintf("dmu_buf_hold(%s, %d, %llx) = %d\n",
2088				    osname, ZTEST_DIROBJ, blkoff, error);
2089				(void) mutex_unlock(lp);
2090				continue;
2091			}
2092			blkoff = off - blkoff;
2093			error = dmu_sync(NULL, db, &blk, txg, NULL, NULL);
2094			dmu_buf_rele(db, FTAG);
2095			(void) mutex_unlock(lp);
2096			if (error) {
2097				dprintf("dmu_sync(%s, %d, %llx) = %d\n",
2098				    osname, ZTEST_DIROBJ, off, error);
2099				continue;
2100			}
2101
2102			if (blk.blk_birth == 0)	{	/* concurrent free */
2103				continue;
2104			}
2105			txg_suspend(dmu_objset_pool(os));
2106
2107			ASSERT(blk.blk_fill == 1);
2108			ASSERT3U(BP_GET_TYPE(&blk), ==, DMU_OT_UINT64_OTHER);
2109			ASSERT3U(BP_GET_LEVEL(&blk), ==, 0);
2110			ASSERT3U(BP_GET_LSIZE(&blk), ==, bs);
2111
2112			/*
2113			 * Read the block that dmu_sync() returned to
2114			 * make sure its contents match what we wrote.
2115			 * We do this while still txg_suspend()ed to ensure
2116			 * that the block can't be reused before we read it.
2117			 */
2118			zb.zb_objset = dmu_objset_id(os);
2119			zb.zb_object = ZTEST_DIROBJ;
2120			zb.zb_level = 0;
2121			zb.zb_blkid = off / bs;
2122			error = zio_wait(zio_read(NULL, dmu_objset_spa(os),
2123			    &blk, iobuf, bs, NULL, NULL,
2124			    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_MUSTSUCCEED, &zb));
2125			ASSERT(error == 0);
2126
2127			txg_resume(dmu_objset_pool(os));
2128
2129			bcopy(&iobuf[blkoff], &rbt, sizeof (rbt));
2130
2131			if (rbt.bt_objset == 0)		/* concurrent free */
2132				continue;
2133
2134			ASSERT3U(rbt.bt_objset, ==, wbt.bt_objset);
2135			ASSERT3U(rbt.bt_object, ==, wbt.bt_object);
2136			ASSERT3U(rbt.bt_offset, ==, wbt.bt_offset);
2137
2138			/*
2139			 * The semantic of dmu_sync() is that we always
2140			 * push the most recent version of the data,
2141			 * so in the face of concurrent updates we may
2142			 * see a newer version of the block.  That's OK.
2143			 */
2144			ASSERT3U(rbt.bt_txg, >=, wbt.bt_txg);
2145			if (rbt.bt_thread == wbt.bt_thread)
2146				ASSERT3U(rbt.bt_seq, ==, wbt.bt_seq);
2147			else
2148				ASSERT3U(rbt.bt_seq, >, wbt.bt_seq);
2149		}
2150	}
2151}
2152
2153/*
2154 * Verify that zap_{create,destroy,add,remove,update} work as expected.
2155 */
2156#define	ZTEST_ZAP_MIN_INTS	1
2157#define	ZTEST_ZAP_MAX_INTS	4
2158#define	ZTEST_ZAP_MAX_PROPS	1000
2159
2160void
2161ztest_zap(ztest_args_t *za)
2162{
2163	objset_t *os = za->za_os;
2164	uint64_t object;
2165	uint64_t txg, last_txg;
2166	uint64_t value[ZTEST_ZAP_MAX_INTS];
2167	uint64_t zl_ints, zl_intsize, prop;
2168	int i, ints;
2169	int iters = 100;
2170	dmu_tx_t *tx;
2171	char propname[100], txgname[100];
2172	int error;
2173	char osname[MAXNAMELEN];
2174	char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
2175
2176	dmu_objset_name(os, osname);
2177
2178	/*
2179	 * Create a new object if necessary, and record it in the directory.
2180	 */
2181	VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff,
2182	    sizeof (uint64_t), &object));
2183
2184	if (object == 0) {
2185		tx = dmu_tx_create(os);
2186		dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff,
2187		    sizeof (uint64_t));
2188		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, NULL);
2189		error = dmu_tx_assign(tx, TXG_WAIT);
2190		if (error) {
2191			ztest_record_enospc("create zap test obj");
2192			dmu_tx_abort(tx);
2193			return;
2194		}
2195		object = zap_create(os, DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx);
2196		if (error) {
2197			fatal(0, "zap_create('%s', %llu) = %d",
2198			    osname, object, error);
2199		}
2200		ASSERT(object != 0);
2201		dmu_write(os, ZTEST_DIROBJ, za->za_diroff,
2202		    sizeof (uint64_t), &object, tx);
2203		/*
2204		 * Generate a known hash collision, and verify that
2205		 * we can lookup and remove both entries.
2206		 */
2207		for (i = 0; i < 2; i++) {
2208			value[i] = i;
2209			error = zap_add(os, object, hc[i], sizeof (uint64_t),
2210			    1, &value[i], tx);
2211			ASSERT3U(error, ==, 0);
2212		}
2213		for (i = 0; i < 2; i++) {
2214			error = zap_add(os, object, hc[i], sizeof (uint64_t),
2215			    1, &value[i], tx);
2216			ASSERT3U(error, ==, EEXIST);
2217			error = zap_length(os, object, hc[i],
2218			    &zl_intsize, &zl_ints);
2219			ASSERT3U(error, ==, 0);
2220			ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
2221			ASSERT3U(zl_ints, ==, 1);
2222		}
2223		for (i = 0; i < 2; i++) {
2224			error = zap_remove(os, object, hc[i], tx);
2225			ASSERT3U(error, ==, 0);
2226		}
2227
2228		dmu_tx_commit(tx);
2229	}
2230
2231	ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
2232
2233	while (--iters >= 0) {
2234		prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
2235		(void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
2236		(void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
2237		bzero(value, sizeof (value));
2238		last_txg = 0;
2239
2240		/*
2241		 * If these zap entries already exist, validate their contents.
2242		 */
2243		error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
2244		if (error == 0) {
2245			ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
2246			ASSERT3U(zl_ints, ==, 1);
2247
2248			error = zap_lookup(os, object, txgname, zl_intsize,
2249			    zl_ints, &last_txg);
2250
2251			ASSERT3U(error, ==, 0);
2252
2253			error = zap_length(os, object, propname, &zl_intsize,
2254			    &zl_ints);
2255
2256			ASSERT3U(error, ==, 0);
2257			ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
2258			ASSERT3U(zl_ints, ==, ints);
2259
2260			error = zap_lookup(os, object, propname, zl_intsize,
2261			    zl_ints, value);
2262
2263			ASSERT3U(error, ==, 0);
2264
2265			for (i = 0; i < ints; i++) {
2266				ASSERT3U(value[i], ==, last_txg + object + i);
2267			}
2268		} else {
2269			ASSERT3U(error, ==, ENOENT);
2270		}
2271
2272		/*
2273		 * Atomically update two entries in our zap object.
2274		 * The first is named txg_%llu, and contains the txg
2275		 * in which the property was last updated.  The second
2276		 * is named prop_%llu, and the nth element of its value
2277		 * should be txg + object + n.
2278		 */
2279		tx = dmu_tx_create(os);
2280		dmu_tx_hold_zap(tx, object, TRUE, NULL);
2281		error = dmu_tx_assign(tx, TXG_WAIT);
2282		if (error) {
2283			ztest_record_enospc("create zap entry");
2284			dmu_tx_abort(tx);
2285			return;
2286		}
2287		txg = dmu_tx_get_txg(tx);
2288
2289		if (last_txg > txg)
2290			fatal(0, "zap future leak: old %llu new %llu",
2291			    last_txg, txg);
2292
2293		for (i = 0; i < ints; i++)
2294			value[i] = txg + object + i;
2295
2296		error = zap_update(os, object, txgname, sizeof (uint64_t),
2297		    1, &txg, tx);
2298		if (error)
2299			fatal(0, "zap_update('%s', %llu, '%s') = %d",
2300			    osname, object, txgname, error);
2301
2302		error = zap_update(os, object, propname, sizeof (uint64_t),
2303		    ints, value, tx);
2304		if (error)
2305			fatal(0, "zap_update('%s', %llu, '%s') = %d",
2306			    osname, object, propname, error);
2307
2308		dmu_tx_commit(tx);
2309
2310		/*
2311		 * Remove a random pair of entries.
2312		 */
2313		prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
2314		(void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
2315		(void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
2316
2317		error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
2318
2319		if (error == ENOENT)
2320			continue;
2321
2322		ASSERT3U(error, ==, 0);
2323
2324		tx = dmu_tx_create(os);
2325		dmu_tx_hold_zap(tx, object, TRUE, NULL);
2326		error = dmu_tx_assign(tx, TXG_WAIT);
2327		if (error) {
2328			ztest_record_enospc("remove zap entry");
2329			dmu_tx_abort(tx);
2330			return;
2331		}
2332		error = zap_remove(os, object, txgname, tx);
2333		if (error)
2334			fatal(0, "zap_remove('%s', %llu, '%s') = %d",
2335			    osname, object, txgname, error);
2336
2337		error = zap_remove(os, object, propname, tx);
2338		if (error)
2339			fatal(0, "zap_remove('%s', %llu, '%s') = %d",
2340			    osname, object, propname, error);
2341
2342		dmu_tx_commit(tx);
2343	}
2344
2345	/*
2346	 * Once in a while, destroy the object.
2347	 */
2348	if (ztest_random(100) != 0)
2349		return;
2350
2351	tx = dmu_tx_create(os);
2352	dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t));
2353	dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
2354	error = dmu_tx_assign(tx, TXG_WAIT);
2355	if (error) {
2356		ztest_record_enospc("destroy zap object");
2357		dmu_tx_abort(tx);
2358		return;
2359	}
2360	error = zap_destroy(os, object, tx);
2361	if (error)
2362		fatal(0, "zap_destroy('%s', %llu) = %d",
2363		    osname, object, error);
2364	object = 0;
2365	dmu_write(os, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t),
2366	    &object, tx);
2367	dmu_tx_commit(tx);
2368}
2369
2370void
2371ztest_zap_parallel(ztest_args_t *za)
2372{
2373	objset_t *os = za->za_os;
2374	uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
2375	int iters = 100;
2376	dmu_tx_t *tx;
2377	int i, namelen, error;
2378	char name[20], string_value[20];
2379	void *data;
2380
2381	while (--iters >= 0) {
2382		/*
2383		 * Generate a random name of the form 'xxx.....' where each
2384		 * x is a random printable character and the dots are dots.
2385		 * There are 94 such characters, and the name length goes from
2386		 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
2387		 */
2388		namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
2389
2390		for (i = 0; i < 3; i++)
2391			name[i] = '!' + ztest_random('~' - '!' + 1);
2392		for (; i < namelen - 1; i++)
2393			name[i] = '.';
2394		name[i] = '\0';
2395
2396		if (ztest_random(2) == 0)
2397			object = ZTEST_MICROZAP_OBJ;
2398		else
2399			object = ZTEST_FATZAP_OBJ;
2400
2401		if ((namelen & 1) || object == ZTEST_MICROZAP_OBJ) {
2402			wsize = sizeof (txg);
2403			wc = 1;
2404			data = &txg;
2405		} else {
2406			wsize = 1;
2407			wc = namelen;
2408			data = string_value;
2409		}
2410
2411		count = -1ULL;
2412		VERIFY(zap_count(os, object, &count) == 0);
2413		ASSERT(count != -1ULL);
2414
2415		/*
2416		 * Select an operation: length, lookup, add, update, remove.
2417		 */
2418		i = ztest_random(5);
2419
2420		if (i >= 2) {
2421			tx = dmu_tx_create(os);
2422			dmu_tx_hold_zap(tx, object, TRUE, NULL);
2423			error = dmu_tx_assign(tx, TXG_WAIT);
2424			if (error) {
2425				ztest_record_enospc("zap parallel");
2426				dmu_tx_abort(tx);
2427				return;
2428			}
2429			txg = dmu_tx_get_txg(tx);
2430			bcopy(name, string_value, namelen);
2431		} else {
2432			tx = NULL;
2433			txg = 0;
2434			bzero(string_value, namelen);
2435		}
2436
2437		switch (i) {
2438
2439		case 0:
2440			error = zap_length(os, object, name, &zl_wsize, &zl_wc);
2441			if (error == 0) {
2442				ASSERT3U(wsize, ==, zl_wsize);
2443				ASSERT3U(wc, ==, zl_wc);
2444			} else {
2445				ASSERT3U(error, ==, ENOENT);
2446			}
2447			break;
2448
2449		case 1:
2450			error = zap_lookup(os, object, name, wsize, wc, data);
2451			if (error == 0) {
2452				if (data == string_value &&
2453				    bcmp(name, data, namelen) != 0)
2454					fatal(0, "name '%s' != val '%s' len %d",
2455					    name, data, namelen);
2456			} else {
2457				ASSERT3U(error, ==, ENOENT);
2458			}
2459			break;
2460
2461		case 2:
2462			error = zap_add(os, object, name, wsize, wc, data, tx);
2463			ASSERT(error == 0 || error == EEXIST);
2464			break;
2465
2466		case 3:
2467			VERIFY(zap_update(os, object, name, wsize, wc,
2468			    data, tx) == 0);
2469			break;
2470
2471		case 4:
2472			error = zap_remove(os, object, name, tx);
2473			ASSERT(error == 0 || error == ENOENT);
2474			break;
2475		}
2476
2477		if (tx != NULL)
2478			dmu_tx_commit(tx);
2479	}
2480}
2481
2482void
2483ztest_dsl_prop_get_set(ztest_args_t *za)
2484{
2485	objset_t *os = za->za_os;
2486	int i, inherit;
2487	uint64_t value;
2488	const char *prop, *valname;
2489	char setpoint[MAXPATHLEN];
2490	char osname[MAXNAMELEN];
2491	int error;
2492
2493	(void) rw_rdlock(&ztest_shared->zs_name_lock);
2494
2495	dmu_objset_name(os, osname);
2496
2497	for (i = 0; i < 2; i++) {
2498		if (i == 0) {
2499			prop = "checksum";
2500			value = ztest_random_checksum();
2501			inherit = (value == ZIO_CHECKSUM_INHERIT);
2502		} else {
2503			prop = "compression";
2504			value = ztest_random_compress();
2505			inherit = (value == ZIO_COMPRESS_INHERIT);
2506		}
2507
2508		error = dsl_prop_set(osname, prop, sizeof (value),
2509		    !inherit, &value);
2510
2511		if (error == ENOSPC) {
2512			ztest_record_enospc("dsl_prop_set");
2513			break;
2514		}
2515
2516		ASSERT3U(error, ==, 0);
2517
2518		VERIFY3U(dsl_prop_get(osname, prop, sizeof (value),
2519		    1, &value, setpoint), ==, 0);
2520
2521		if (i == 0)
2522			valname = zio_checksum_table[value].ci_name;
2523		else
2524			valname = zio_compress_table[value].ci_name;
2525
2526		if (zopt_verbose >= 6) {
2527			(void) printf("%s %s = %s for '%s'\n",
2528			    osname, prop, valname, setpoint);
2529		}
2530	}
2531
2532	(void) rw_unlock(&ztest_shared->zs_name_lock);
2533}
2534
2535static void
2536ztest_error_setup(vdev_t *vd, int mode, int mask, uint64_t arg)
2537{
2538	int c;
2539
2540	for (c = 0; c < vd->vdev_children; c++)
2541		ztest_error_setup(vd->vdev_child[c], mode, mask, arg);
2542
2543	if (vd->vdev_path != NULL) {
2544		vd->vdev_fault_mode = mode;
2545		vd->vdev_fault_mask = mask;
2546		vd->vdev_fault_arg = arg;
2547	}
2548}
2549
2550/*
2551 * Inject random faults into the on-disk data.
2552 */
2553void
2554ztest_fault_inject(ztest_args_t *za)
2555{
2556	int fd;
2557	uint64_t offset;
2558	uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz;
2559	uint64_t bad = 0x1990c0ffeedecadeULL;
2560	uint64_t top, leaf;
2561	char path0[MAXPATHLEN];
2562	char pathrand[MAXPATHLEN];
2563	size_t fsize;
2564	spa_t *spa = dmu_objset_spa(za->za_os);
2565	int bshift = SPA_MAXBLOCKSHIFT + 2;	/* don't scrog all labels */
2566	int iters = 1000;
2567	vdev_t *vd0;
2568	uint64_t guid0 = 0;
2569
2570	/*
2571	 * We can't inject faults when we have no fault tolerance.
2572	 */
2573	if (zopt_maxfaults == 0)
2574		return;
2575
2576	ASSERT(leaves >= 2);
2577
2578	/*
2579	 * Pick a random top-level vdev.
2580	 */
2581	spa_config_enter(spa, RW_READER, FTAG);
2582	top = ztest_random(spa->spa_root_vdev->vdev_children);
2583	spa_config_exit(spa, FTAG);
2584
2585	/*
2586	 * Pick a random leaf.
2587	 */
2588	leaf = ztest_random(leaves);
2589
2590	/*
2591	 * Generate paths to the first two leaves in this top-level vdev,
2592	 * and to the random leaf we selected.  We'll induce transient
2593	 * I/O errors and random online/offline activity on leaf 0,
2594	 * and we'll write random garbage to the randomly chosen leaf.
2595	 */
2596	(void) snprintf(path0, sizeof (path0),
2597	    ztest_dev_template, zopt_dir, zopt_pool, top * leaves + 0);
2598	(void) snprintf(pathrand, sizeof (pathrand),
2599	    ztest_dev_template, zopt_dir, zopt_pool, top * leaves + leaf);
2600
2601	dprintf("damaging %s and %s\n", path0, pathrand);
2602
2603	spa_config_enter(spa, RW_READER, FTAG);
2604
2605	/*
2606	 * If we can tolerate two or more faults, make vd0 fail randomly.
2607	 */
2608	vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
2609	if (vd0 != NULL && zopt_maxfaults >= 2) {
2610		guid0 = vd0->vdev_guid;
2611		ztest_error_setup(vd0, VDEV_FAULT_COUNT,
2612		    (1U << ZIO_TYPE_READ) | (1U << ZIO_TYPE_WRITE), 100);
2613	}
2614
2615	spa_config_exit(spa, FTAG);
2616
2617	/*
2618	 * If we can tolerate two or more faults, randomly online/offline vd0.
2619	 */
2620	if (zopt_maxfaults >= 2 && guid0 != 0) {
2621		if (ztest_random(10) < 6)
2622			(void) vdev_offline(spa, guid0, B_TRUE);
2623		else
2624			(void) vdev_online(spa, guid0);
2625	}
2626
2627	/*
2628	 * We have at least single-fault tolerance, so inject data corruption.
2629	 */
2630	fd = open(pathrand, O_RDWR);
2631
2632	if (fd == -1)	/* we hit a gap in the device namespace */
2633		return;
2634
2635	fsize = lseek(fd, 0, SEEK_END);
2636
2637	while (--iters != 0) {
2638		offset = ztest_random(fsize / (leaves << bshift)) *
2639		    (leaves << bshift) + (leaf << bshift) +
2640		    (ztest_random(1ULL << (bshift - 1)) & -8ULL);
2641
2642		if (offset >= fsize)
2643			continue;
2644
2645		if (zopt_verbose >= 6)
2646			(void) printf("injecting bad word into %s,"
2647			    " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
2648
2649		if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
2650			fatal(1, "can't inject bad word at 0x%llx in %s",
2651			    offset, pathrand);
2652	}
2653
2654	(void) close(fd);
2655}
2656
2657/*
2658 * Scrub the pool.
2659 */
2660void
2661ztest_scrub(ztest_args_t *za)
2662{
2663	spa_t *spa = dmu_objset_spa(za->za_os);
2664
2665	(void) spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_FALSE);
2666	(void) poll(NULL, 0, 1000); /* wait a second, then force a restart */
2667	(void) spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_FALSE);
2668}
2669
2670/*
2671 * Rename the pool to a different name and then rename it back.
2672 */
2673void
2674ztest_spa_rename(ztest_args_t *za)
2675{
2676	char *oldname, *newname;
2677	int error;
2678	spa_t *spa;
2679
2680	(void) rw_wrlock(&ztest_shared->zs_name_lock);
2681
2682	oldname = za->za_pool;
2683	newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
2684	(void) strcpy(newname, oldname);
2685	(void) strcat(newname, "_tmp");
2686
2687	/*
2688	 * Do the rename
2689	 */
2690	error = spa_rename(oldname, newname);
2691	if (error)
2692		fatal(0, "spa_rename('%s', '%s') = %d", oldname,
2693		    newname, error);
2694
2695	/*
2696	 * Try to open it under the old name, which shouldn't exist
2697	 */
2698	error = spa_open(oldname, &spa, FTAG);
2699	if (error != ENOENT)
2700		fatal(0, "spa_open('%s') = %d", oldname, error);
2701
2702	/*
2703	 * Open it under the new name and make sure it's still the same spa_t.
2704	 */
2705	error = spa_open(newname, &spa, FTAG);
2706	if (error != 0)
2707		fatal(0, "spa_open('%s') = %d", newname, error);
2708
2709	ASSERT(spa == dmu_objset_spa(za->za_os));
2710	spa_close(spa, FTAG);
2711
2712	/*
2713	 * Rename it back to the original
2714	 */
2715	error = spa_rename(newname, oldname);
2716	if (error)
2717		fatal(0, "spa_rename('%s', '%s') = %d", newname,
2718		    oldname, error);
2719
2720	/*
2721	 * Make sure it can still be opened
2722	 */
2723	error = spa_open(oldname, &spa, FTAG);
2724	if (error != 0)
2725		fatal(0, "spa_open('%s') = %d", oldname, error);
2726
2727	ASSERT(spa == dmu_objset_spa(za->za_os));
2728	spa_close(spa, FTAG);
2729
2730	umem_free(newname, strlen(newname) + 1);
2731
2732	(void) rw_unlock(&ztest_shared->zs_name_lock);
2733}
2734
2735
2736/*
2737 * Completely obliterate one disk.
2738 */
2739static void
2740ztest_obliterate_one_disk(uint64_t vdev)
2741{
2742	int fd;
2743	char dev_name[MAXPATHLEN], copy_name[MAXPATHLEN];
2744	size_t fsize;
2745
2746	if (zopt_maxfaults < 2)
2747		return;
2748
2749	(void) sprintf(dev_name, ztest_dev_template, zopt_dir, zopt_pool, vdev);
2750	(void) snprintf(copy_name, MAXPATHLEN, "%s.old", dev_name);
2751
2752	fd = open(dev_name, O_RDWR);
2753
2754	if (fd == -1)
2755		fatal(1, "can't open %s", dev_name);
2756
2757	/*
2758	 * Determine the size.
2759	 */
2760	fsize = lseek(fd, 0, SEEK_END);
2761
2762	(void) close(fd);
2763
2764	/*
2765	 * Rename the old device to dev_name.old (useful for debugging).
2766	 */
2767	VERIFY(rename(dev_name, copy_name) == 0);
2768
2769	/*
2770	 * Create a new one.
2771	 */
2772	VERIFY((fd = open(dev_name, O_RDWR | O_CREAT | O_TRUNC, 0666)) >= 0);
2773	VERIFY(ftruncate(fd, fsize) == 0);
2774	(void) close(fd);
2775}
2776
2777static void
2778ztest_replace_one_disk(spa_t *spa, uint64_t vdev)
2779{
2780	char dev_name[MAXPATHLEN];
2781	nvlist_t *file, *root;
2782	int error;
2783	uint64_t guid;
2784	uint64_t ashift = ztest_get_ashift();
2785	vdev_t *vd;
2786
2787	(void) sprintf(dev_name, ztest_dev_template, zopt_dir, zopt_pool, vdev);
2788
2789	/*
2790	 * Build the nvlist describing dev_name.
2791	 */
2792	VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
2793	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
2794	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, dev_name) == 0);
2795	VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
2796
2797	VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
2798	VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
2799	VERIFY(nvlist_add_nvlist_array(root, ZPOOL_CONFIG_CHILDREN,
2800	    &file, 1) == 0);
2801
2802	spa_config_enter(spa, RW_READER, FTAG);
2803	if ((vd = vdev_lookup_by_path(spa->spa_root_vdev, dev_name)) == NULL)
2804		guid = 0;
2805	else
2806		guid = vd->vdev_guid;
2807	spa_config_exit(spa, FTAG);
2808	error = spa_vdev_attach(spa, guid, root, B_TRUE);
2809	if (error != 0 &&
2810	    error != EBUSY &&
2811	    error != ENOTSUP &&
2812	    error != ENODEV &&
2813	    error != EDOM)
2814		fatal(0, "spa_vdev_attach(in-place) = %d", error);
2815
2816	nvlist_free(file);
2817	nvlist_free(root);
2818}
2819
2820static void
2821ztest_verify_blocks(char *pool)
2822{
2823	int status;
2824	char zdb[MAXPATHLEN + MAXNAMELEN + 20];
2825	char zbuf[1024];
2826	char *bin;
2827	FILE *fp;
2828
2829	if (realpath(progname, zdb) == NULL)
2830		assert(!"realpath() failed");
2831
2832	/* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
2833	bin = strstr(zdb, "/usr/bin/");
2834	if (bin == NULL)
2835		bin = zdb;
2836	/* LINTED */
2837	(void) sprintf(bin, "/usr/sbin/zdb -bc%s%s -U -O %s %s",
2838	    zopt_verbose >= 3 ? "s" : "",
2839	    zopt_verbose >= 4 ? "v" : "",
2840	    ztest_random(2) == 0 ? "pre" : "post", pool);
2841
2842	if (zopt_verbose >= 5)
2843		(void) printf("Executing %s\n", strstr(zdb, "zdb "));
2844
2845	fp = popen(zdb, "r");
2846	assert(fp != NULL);
2847
2848	while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
2849		if (zopt_verbose >= 3)
2850			(void) printf("%s", zbuf);
2851
2852	status = pclose(fp);
2853
2854	if (status == 0)
2855		return;
2856
2857	ztest_dump_core = 0;
2858	if (WIFEXITED(status))
2859		fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
2860	else
2861		fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
2862}
2863
2864static void
2865ztest_walk_pool_directory(char *header)
2866{
2867	spa_t *spa = NULL;
2868
2869	if (zopt_verbose >= 6)
2870		(void) printf("%s\n", header);
2871
2872	mutex_enter(&spa_namespace_lock);
2873	while ((spa = spa_next(spa)) != NULL)
2874		if (zopt_verbose >= 6)
2875			(void) printf("\t%s\n", spa_name(spa));
2876	mutex_exit(&spa_namespace_lock);
2877}
2878
2879static void
2880ztest_spa_import_export(char *oldname, char *newname)
2881{
2882	nvlist_t *config;
2883	uint64_t pool_guid;
2884	spa_t *spa;
2885	int error;
2886
2887	if (zopt_verbose >= 4) {
2888		(void) printf("import/export: old = %s, new = %s\n",
2889		    oldname, newname);
2890	}
2891
2892	/*
2893	 * Clean up from previous runs.
2894	 */
2895	(void) spa_destroy(newname);
2896
2897	/*
2898	 * Get the pool's configuration and guid.
2899	 */
2900	error = spa_open(oldname, &spa, FTAG);
2901	if (error)
2902		fatal(0, "spa_open('%s') = %d", oldname, error);
2903
2904	pool_guid = spa_guid(spa);
2905	spa_close(spa, FTAG);
2906
2907	ztest_walk_pool_directory("pools before export");
2908
2909	/*
2910	 * Export it.
2911	 */
2912	error = spa_export(oldname, &config);
2913	if (error)
2914		fatal(0, "spa_export('%s') = %d", oldname, error);
2915
2916	ztest_walk_pool_directory("pools after export");
2917
2918	/*
2919	 * Import it under the new name.
2920	 */
2921	error = spa_import(newname, config, NULL);
2922	if (error)
2923		fatal(0, "spa_import('%s') = %d", newname, error);
2924
2925	ztest_walk_pool_directory("pools after import");
2926
2927	/*
2928	 * Try to import it again -- should fail with EEXIST.
2929	 */
2930	error = spa_import(newname, config, NULL);
2931	if (error != EEXIST)
2932		fatal(0, "spa_import('%s') twice", newname);
2933
2934	/*
2935	 * Try to import it under a different name -- should fail with EEXIST.
2936	 */
2937	error = spa_import(oldname, config, NULL);
2938	if (error != EEXIST)
2939		fatal(0, "spa_import('%s') under multiple names", newname);
2940
2941	/*
2942	 * Verify that the pool is no longer visible under the old name.
2943	 */
2944	error = spa_open(oldname, &spa, FTAG);
2945	if (error != ENOENT)
2946		fatal(0, "spa_open('%s') = %d", newname, error);
2947
2948	/*
2949	 * Verify that we can open and close the pool using the new name.
2950	 */
2951	error = spa_open(newname, &spa, FTAG);
2952	if (error)
2953		fatal(0, "spa_open('%s') = %d", newname, error);
2954	ASSERT(pool_guid == spa_guid(spa));
2955	spa_close(spa, FTAG);
2956
2957	nvlist_free(config);
2958}
2959
2960static void *
2961ztest_thread(void *arg)
2962{
2963	ztest_args_t *za = arg;
2964	ztest_shared_t *zs = ztest_shared;
2965	hrtime_t now, functime;
2966	ztest_info_t *zi;
2967	int f;
2968
2969	while ((now = gethrtime()) < za->za_stop) {
2970		/*
2971		 * See if it's time to force a crash.
2972		 */
2973		if (now > za->za_kill) {
2974			dmu_tx_t *tx;
2975			uint64_t txg;
2976
2977			mutex_enter(&spa_namespace_lock);
2978			tx = dmu_tx_create(za->za_os);
2979			VERIFY(0 == dmu_tx_assign(tx, TXG_NOWAIT));
2980			txg = dmu_tx_get_txg(tx);
2981			dmu_tx_commit(tx);
2982			zs->zs_txg = txg;
2983			if (zopt_verbose >= 3)
2984				(void) printf(
2985				    "killing process after txg %lld\n",
2986				    (u_longlong_t)txg);
2987			txg_wait_synced(dmu_objset_pool(za->za_os), txg);
2988			zs->zs_alloc = spa_get_alloc(dmu_objset_spa(za->za_os));
2989			zs->zs_space = spa_get_space(dmu_objset_spa(za->za_os));
2990			(void) kill(getpid(), SIGKILL);
2991		}
2992
2993		/*
2994		 * Pick a random function.
2995		 */
2996		f = ztest_random(ZTEST_FUNCS);
2997		zi = &zs->zs_info[f];
2998
2999		/*
3000		 * Decide whether to call it, based on the requested frequency.
3001		 */
3002		if (zi->zi_call_target == 0 ||
3003		    (double)zi->zi_call_total / zi->zi_call_target >
3004		    (double)(now - zs->zs_start_time) / (zopt_time * NANOSEC))
3005			continue;
3006
3007		atomic_add_64(&zi->zi_calls, 1);
3008		atomic_add_64(&zi->zi_call_total, 1);
3009
3010		za->za_diroff = (za->za_instance * ZTEST_FUNCS + f) *
3011		    ZTEST_DIRSIZE;
3012		za->za_diroff_shared = (1ULL << 63);
3013
3014		ztest_dmu_write_parallel(za);
3015
3016		zi->zi_func(za);
3017
3018		functime = gethrtime() - now;
3019
3020		atomic_add_64(&zi->zi_call_time, functime);
3021
3022		if (zopt_verbose >= 4) {
3023			Dl_info dli;
3024			(void) dladdr((void *)zi->zi_func, &dli);
3025			(void) printf("%6.2f sec in %s\n",
3026			    (double)functime / NANOSEC, dli.dli_sname);
3027		}
3028
3029		/*
3030		 * If we're getting ENOSPC with some regularity, stop.
3031		 */
3032		if (zs->zs_enospc_count > 10)
3033			break;
3034	}
3035
3036	return (NULL);
3037}
3038
3039/*
3040 * Kick off threads to run tests on all datasets in parallel.
3041 */
3042static void
3043ztest_run(char *pool)
3044{
3045	int t, d, error;
3046	ztest_shared_t *zs = ztest_shared;
3047	ztest_args_t *za;
3048	spa_t *spa;
3049	char name[100];
3050
3051	(void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL);
3052	(void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL);
3053
3054	for (t = 0; t < ZTEST_SYNC_LOCKS; t++)
3055		(void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL);
3056
3057	/*
3058	 * Destroy one disk before we even start.
3059	 * It's mirrored, so everything should work just fine.
3060	 * This makes us exercise fault handling very early in spa_load().
3061	 */
3062	ztest_obliterate_one_disk(0);
3063
3064	/*
3065	 * Verify that the sum of the sizes of all blocks in the pool
3066	 * equals the SPA's allocated space total.
3067	 */
3068	ztest_verify_blocks(pool);
3069
3070	/*
3071	 * Kick off a replacement of the disk we just obliterated.
3072	 */
3073	kernel_init(FREAD | FWRITE);
3074	error = spa_open(pool, &spa, FTAG);
3075	if (error)
3076		fatal(0, "spa_open(%s) = %d", pool, error);
3077	ztest_replace_one_disk(spa, 0);
3078	if (zopt_verbose >= 5)
3079		show_pool_stats(spa);
3080	spa_close(spa, FTAG);
3081	kernel_fini();
3082
3083	kernel_init(FREAD | FWRITE);
3084
3085	/*
3086	 * Verify that we can export the pool and reimport it under a
3087	 * different name.
3088	 */
3089	if (ztest_random(2) == 0) {
3090		(void) snprintf(name, 100, "%s_import", pool);
3091		ztest_spa_import_export(pool, name);
3092		ztest_spa_import_export(name, pool);
3093	}
3094
3095	/*
3096	 * Verify that we can loop over all pools.
3097	 */
3098	mutex_enter(&spa_namespace_lock);
3099	for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) {
3100		if (zopt_verbose > 3) {
3101			(void) printf("spa_next: found %s\n", spa_name(spa));
3102		}
3103	}
3104	mutex_exit(&spa_namespace_lock);
3105
3106	/*
3107	 * Open our pool.
3108	 */
3109	error = spa_open(pool, &spa, FTAG);
3110	if (error)
3111		fatal(0, "spa_open() = %d", error);
3112
3113	/*
3114	 * Verify that we can safely inquire about about any object,
3115	 * whether it's allocated or not.  To make it interesting,
3116	 * we probe a 5-wide window around each power of two.
3117	 * This hits all edge cases, including zero and the max.
3118	 */
3119	for (t = 0; t < 64; t++) {
3120		for (d = -5; d <= 5; d++) {
3121			error = dmu_object_info(spa->spa_meta_objset,
3122			    (1ULL << t) + d, NULL);
3123			ASSERT(error == 0 || error == ENOENT ||
3124			    error == EINVAL);
3125		}
3126	}
3127
3128	/*
3129	 * Now kick off all the tests that run in parallel.
3130	 */
3131	zs->zs_enospc_count = 0;
3132
3133	za = umem_zalloc(zopt_threads * sizeof (ztest_args_t), UMEM_NOFAIL);
3134
3135	if (zopt_verbose >= 4)
3136		(void) printf("starting main threads...\n");
3137
3138	za[0].za_start = gethrtime();
3139	za[0].za_stop = za[0].za_start + zopt_passtime * NANOSEC;
3140	za[0].za_stop = MIN(za[0].za_stop, zs->zs_stop_time);
3141	za[0].za_kill = za[0].za_stop;
3142	if (ztest_random(100) < zopt_killrate)
3143		za[0].za_kill -= ztest_random(zopt_passtime * NANOSEC);
3144
3145	for (t = 0; t < zopt_threads; t++) {
3146		d = t % zopt_datasets;
3147		if (t < zopt_datasets) {
3148			ztest_replay_t zr;
3149			int test_future = FALSE;
3150			(void) rw_rdlock(&ztest_shared->zs_name_lock);
3151			(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
3152			error = dmu_objset_create(name, DMU_OST_OTHER, NULL,
3153			    ztest_create_cb, NULL);
3154			if (error == EEXIST) {
3155				test_future = TRUE;
3156			} else if (error != 0) {
3157				if (error == ENOSPC) {
3158					zs->zs_enospc_count++;
3159					(void) rw_unlock(
3160					    &ztest_shared->zs_name_lock);
3161					break;
3162				}
3163				fatal(0, "dmu_objset_create(%s) = %d",
3164				    name, error);
3165			}
3166			error = dmu_objset_open(name, DMU_OST_OTHER,
3167			    DS_MODE_STANDARD, &za[d].za_os);
3168			if (error)
3169				fatal(0, "dmu_objset_open('%s') = %d",
3170				    name, error);
3171			(void) rw_unlock(&ztest_shared->zs_name_lock);
3172			if (test_future && ztest_shared->zs_txg > 0)
3173				ztest_dmu_check_future_leak(za[d].za_os,
3174				    ztest_shared->zs_txg);
3175			zr.zr_os = za[d].za_os;
3176			zil_replay(zr.zr_os, &zr, &zr.zr_assign,
3177			    ztest_replay_vector);
3178			za[d].za_zilog = zil_open(za[d].za_os, NULL);
3179		}
3180		za[t].za_pool = spa_strdup(pool);
3181		za[t].za_os = za[d].za_os;
3182		za[t].za_zilog = za[d].za_zilog;
3183		za[t].za_instance = t;
3184		za[t].za_random = ztest_random(-1ULL);
3185		za[t].za_start = za[0].za_start;
3186		za[t].za_stop = za[0].za_stop;
3187		za[t].za_kill = za[0].za_kill;
3188
3189		error = thr_create(0, 0, ztest_thread, &za[t], THR_BOUND,
3190		    &za[t].za_thread);
3191		if (error)
3192			fatal(0, "can't create thread %d: error %d",
3193			    t, error);
3194	}
3195	ztest_shared->zs_txg = 0;
3196
3197	while (--t >= 0) {
3198		error = thr_join(za[t].za_thread, NULL, NULL);
3199		if (error)
3200			fatal(0, "thr_join(%d) = %d", t, error);
3201		if (za[t].za_th)
3202			traverse_fini(za[t].za_th);
3203		if (t < zopt_datasets) {
3204			zil_close(za[t].za_zilog);
3205			dmu_objset_close(za[t].za_os);
3206		}
3207		spa_strfree(za[t].za_pool);
3208	}
3209
3210	umem_free(za, zopt_threads * sizeof (ztest_args_t));
3211
3212	if (zopt_verbose >= 3)
3213		show_pool_stats(spa);
3214
3215	txg_wait_synced(spa_get_dsl(spa), 0);
3216
3217	zs->zs_alloc = spa_get_alloc(spa);
3218	zs->zs_space = spa_get_space(spa);
3219
3220	/*
3221	 * Did we have out-of-space errors?  If so, destroy a random objset.
3222	 */
3223	if (zs->zs_enospc_count != 0) {
3224		(void) rw_rdlock(&ztest_shared->zs_name_lock);
3225		(void) snprintf(name, 100, "%s/%s_%d", pool, pool,
3226		    (int)ztest_random(zopt_datasets));
3227		if (zopt_verbose >= 3)
3228			(void) printf("Destroying %s to free up space\n", name);
3229		(void) dmu_objset_find(name, ztest_destroy_cb, NULL,
3230		    DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
3231		(void) rw_unlock(&ztest_shared->zs_name_lock);
3232	}
3233
3234	txg_wait_synced(spa_get_dsl(spa), 0);
3235
3236	/*
3237	 * Right before closing the pool, kick off a bunch of async I/O;
3238	 * spa_close() should wait for it to complete.
3239	 */
3240	for (t = 1; t < 50; t++)
3241		dmu_prefetch(spa->spa_meta_objset, t, 0, 1 << 15);
3242
3243	spa_close(spa, FTAG);
3244
3245	kernel_fini();
3246}
3247
3248void
3249print_time(hrtime_t t, char *timebuf)
3250{
3251	hrtime_t s = t / NANOSEC;
3252	hrtime_t m = s / 60;
3253	hrtime_t h = m / 60;
3254	hrtime_t d = h / 24;
3255
3256	s -= m * 60;
3257	m -= h * 60;
3258	h -= d * 24;
3259
3260	timebuf[0] = '\0';
3261
3262	if (d)
3263		(void) sprintf(timebuf,
3264		    "%llud%02lluh%02llum%02llus", d, h, m, s);
3265	else if (h)
3266		(void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
3267	else if (m)
3268		(void) sprintf(timebuf, "%llum%02llus", m, s);
3269	else
3270		(void) sprintf(timebuf, "%llus", s);
3271}
3272
3273/*
3274 * Create a storage pool with the given name and initial vdev size.
3275 * Then create the specified number of datasets in the pool.
3276 */
3277static void
3278ztest_init(char *pool)
3279{
3280	spa_t *spa;
3281	int error;
3282	nvlist_t *nvroot;
3283
3284	kernel_init(FREAD | FWRITE);
3285
3286	/*
3287	 * Create the storage pool.
3288	 */
3289	(void) spa_destroy(pool);
3290	ztest_shared->zs_vdev_primaries = 0;
3291	nvroot = make_vdev_root(zopt_vdev_size, zopt_raidz, zopt_mirrors, 1);
3292	error = spa_create(pool, nvroot, NULL);
3293	nvlist_free(nvroot);
3294
3295	if (error)
3296		fatal(0, "spa_create() = %d", error);
3297	error = spa_open(pool, &spa, FTAG);
3298	if (error)
3299		fatal(0, "spa_open() = %d", error);
3300
3301	if (zopt_verbose >= 3)
3302		show_pool_stats(spa);
3303
3304	spa_close(spa, FTAG);
3305
3306	kernel_fini();
3307}
3308
3309int
3310main(int argc, char **argv)
3311{
3312	int kills = 0;
3313	int iters = 0;
3314	int i, f;
3315	ztest_shared_t *zs;
3316	ztest_info_t *zi;
3317	char timebuf[100];
3318	char numbuf[6];
3319
3320	(void) setvbuf(stdout, NULL, _IOLBF, 0);
3321
3322	/* Override location of zpool.cache */
3323	spa_config_dir = "/tmp";
3324
3325	ztest_random_fd = open("/dev/urandom", O_RDONLY);
3326
3327	process_options(argc, argv);
3328
3329	argc -= optind;
3330	argv += optind;
3331
3332	dprintf_setup(&argc, argv);
3333
3334	/*
3335	 * Blow away any existing copy of zpool.cache
3336	 */
3337	if (zopt_init != 0)
3338		(void) remove("/tmp/zpool.cache");
3339
3340	zs = ztest_shared = (void *)mmap(0,
3341	    P2ROUNDUP(sizeof (ztest_shared_t), getpagesize()),
3342	    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
3343
3344	if (zopt_verbose >= 1) {
3345		(void) printf("%llu vdevs, %d datasets, %d threads,"
3346		    " %llu seconds...\n",
3347		    (u_longlong_t)zopt_vdevs, zopt_datasets, zopt_threads,
3348		    (u_longlong_t)zopt_time);
3349	}
3350
3351	/*
3352	 * Create and initialize our storage pool.
3353	 */
3354	for (i = 1; i <= zopt_init; i++) {
3355		bzero(zs, sizeof (ztest_shared_t));
3356		if (zopt_verbose >= 3 && zopt_init != 1)
3357			(void) printf("ztest_init(), pass %d\n", i);
3358		ztest_init(zopt_pool);
3359	}
3360
3361	/*
3362	 * Initialize the call targets for each function.
3363	 */
3364	for (f = 0; f < ZTEST_FUNCS; f++) {
3365		zi = &zs->zs_info[f];
3366
3367		*zi = ztest_info[f];
3368
3369		if (*zi->zi_interval == 0)
3370			zi->zi_call_target = UINT64_MAX;
3371		else
3372			zi->zi_call_target = zopt_time / *zi->zi_interval;
3373	}
3374
3375	zs->zs_start_time = gethrtime();
3376	zs->zs_stop_time = zs->zs_start_time + zopt_time * NANOSEC;
3377
3378	/*
3379	 * Run the tests in a loop.  These tests include fault injection
3380	 * to verify that self-healing data works, and forced crashes
3381	 * to verify that we never lose on-disk consistency.
3382	 */
3383	while (gethrtime() < zs->zs_stop_time) {
3384		int status;
3385		pid_t pid;
3386		char *tmp;
3387
3388		/*
3389		 * Initialize the workload counters for each function.
3390		 */
3391		for (f = 0; f < ZTEST_FUNCS; f++) {
3392			zi = &zs->zs_info[f];
3393			zi->zi_calls = 0;
3394			zi->zi_call_time = 0;
3395		}
3396
3397		pid = fork();
3398
3399		if (pid == -1)
3400			fatal(1, "fork failed");
3401
3402		if (pid == 0) {	/* child */
3403			struct rlimit rl = { 1024, 1024 };
3404			(void) setrlimit(RLIMIT_NOFILE, &rl);
3405			(void) enable_extended_FILE_stdio(-1, -1);
3406			ztest_run(zopt_pool);
3407			exit(0);
3408		}
3409
3410		while (waitpid(pid, &status, 0) != pid)
3411			continue;
3412
3413		if (WIFEXITED(status)) {
3414			if (WEXITSTATUS(status) != 0) {
3415				(void) fprintf(stderr,
3416				    "child exited with code %d\n",
3417				    WEXITSTATUS(status));
3418				exit(2);
3419			}
3420		} else if (WIFSIGNALED(status)) {
3421			if (WTERMSIG(status) != SIGKILL) {
3422				(void) fprintf(stderr,
3423				    "child died with signal %d\n",
3424				    WTERMSIG(status));
3425				exit(3);
3426			}
3427			kills++;
3428		} else {
3429			(void) fprintf(stderr, "something strange happened "
3430			    "to child\n");
3431			exit(4);
3432		}
3433
3434		iters++;
3435
3436		if (zopt_verbose >= 1) {
3437			hrtime_t now = gethrtime();
3438
3439			now = MIN(now, zs->zs_stop_time);
3440			print_time(zs->zs_stop_time - now, timebuf);
3441			nicenum(zs->zs_space, numbuf);
3442
3443			(void) printf("Pass %3d, %8s, %3llu ENOSPC, "
3444			    "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
3445			    iters,
3446			    WIFEXITED(status) ? "Complete" : "SIGKILL",
3447			    (u_longlong_t)zs->zs_enospc_count,
3448			    100.0 * zs->zs_alloc / zs->zs_space,
3449			    numbuf,
3450			    100.0 * (now - zs->zs_start_time) /
3451			    (zopt_time * NANOSEC), timebuf);
3452		}
3453
3454		if (zopt_verbose >= 2) {
3455			(void) printf("\nWorkload summary:\n\n");
3456			(void) printf("%7s %9s   %s\n",
3457			    "Calls", "Time", "Function");
3458			(void) printf("%7s %9s   %s\n",
3459			    "-----", "----", "--------");
3460			for (f = 0; f < ZTEST_FUNCS; f++) {
3461				Dl_info dli;
3462
3463				zi = &zs->zs_info[f];
3464				print_time(zi->zi_call_time, timebuf);
3465				(void) dladdr((void *)zi->zi_func, &dli);
3466				(void) printf("%7llu %9s   %s\n",
3467				    (u_longlong_t)zi->zi_calls, timebuf,
3468				    dli.dli_sname);
3469			}
3470			(void) printf("\n");
3471		}
3472
3473		/*
3474		 * It's possible that we killed a child during a rename test, in
3475		 * which case we'll have a 'ztest_tmp' pool lying around instead
3476		 * of 'ztest'.  Do a blind rename in case this happened.
3477		 */
3478		tmp = umem_alloc(strlen(zopt_pool) + 5, UMEM_NOFAIL);
3479		(void) strcpy(tmp, zopt_pool);
3480		(void) strcat(tmp, "_tmp");
3481		kernel_init(FREAD | FWRITE);
3482		(void) spa_rename(tmp, zopt_pool);
3483		kernel_fini();
3484		umem_free(tmp, strlen(tmp) + 1);
3485	}
3486
3487	ztest_verify_blocks(zopt_pool);
3488
3489	if (zopt_verbose >= 1) {
3490		(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
3491		    kills, iters - kills, (100.0 * kills) / MAX(1, iters));
3492	}
3493
3494	return (0);
3495}
3496