1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
24 * Copyright 2016 Gary Mills
25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2019 Joyent, Inc.
28 */
29
30#include <sys/dsl_scan.h>
31#include <sys/dsl_pool.h>
32#include <sys/dsl_dataset.h>
33#include <sys/dsl_prop.h>
34#include <sys/dsl_dir.h>
35#include <sys/dsl_synctask.h>
36#include <sys/dnode.h>
37#include <sys/dmu_tx.h>
38#include <sys/dmu_objset.h>
39#include <sys/arc.h>
40#include <sys/zap.h>
41#include <sys/zio.h>
42#include <sys/zfs_context.h>
43#include <sys/fs/zfs.h>
44#include <sys/zfs_znode.h>
45#include <sys/spa_impl.h>
46#include <sys/vdev_impl.h>
47#include <sys/zil_impl.h>
48#include <sys/zio_checksum.h>
49#include <sys/ddt.h>
50#include <sys/sa.h>
51#include <sys/sa_impl.h>
52#include <sys/zfeature.h>
53#include <sys/abd.h>
54#include <sys/range_tree.h>
55#ifdef _KERNEL
56#include <sys/zfs_vfsops.h>
57#endif
58
59/*
60 * Grand theory statement on scan queue sorting
61 *
62 * Scanning is implemented by recursively traversing all indirection levels
63 * in an object and reading all blocks referenced from said objects. This
64 * results in us approximately traversing the object from lowest logical
65 * offset to the highest. For best performance, we would want the logical
66 * blocks to be physically contiguous. However, this is frequently not the
67 * case with pools given the allocation patterns of copy-on-write filesystems.
68 * So instead, we put the I/Os into a reordering queue and issue them in a
69 * way that will most benefit physical disks (LBA-order).
70 *
71 * Queue management:
72 *
73 * Ideally, we would want to scan all metadata and queue up all block I/O
74 * prior to starting to issue it, because that allows us to do an optimal
75 * sorting job. This can however consume large amounts of memory. Therefore
76 * we continuously monitor the size of the queues and constrain them to 5%
77 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
78 * limit, we clear out a few of the largest extents at the head of the queues
79 * to make room for more scanning. Hopefully, these extents will be fairly
80 * large and contiguous, allowing us to approach sequential I/O throughput
81 * even without a fully sorted tree.
82 *
83 * Metadata scanning takes place in dsl_scan_visit(), which is called from
84 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
85 * metadata on the pool, or we need to make room in memory because our
86 * queues are too large, dsl_scan_visit() is postponed and
87 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
88 * that metadata scanning and queued I/O issuing are mutually exclusive. This
89 * allows us to provide maximum sequential I/O throughput for the majority of
90 * I/O's issued since sequential I/O performance is significantly negatively
91 * impacted if it is interleaved with random I/O.
92 *
93 * Implementation Notes
94 *
95 * One side effect of the queued scanning algorithm is that the scanning code
96 * needs to be notified whenever a block is freed. This is needed to allow
97 * the scanning code to remove these I/Os from the issuing queue. Additionally,
98 * we do not attempt to queue gang blocks to be issued sequentially since this
99 * is very hard to do and would have an extremely limited performance benefit.
100 * Instead, we simply issue gang I/Os as soon as we find them using the legacy
101 * algorithm.
102 *
103 * Backwards compatibility
104 *
105 * This new algorithm is backwards compatible with the legacy on-disk data
106 * structures (and therefore does not require a new feature flag).
107 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
108 * will stop scanning metadata (in logical order) and wait for all outstanding
109 * sorted I/O to complete. Once this is done, we write out a checkpoint
110 * bookmark, indicating that we have scanned everything logically before it.
111 * If the pool is imported on a machine without the new sorting algorithm,
112 * the scan simply resumes from the last checkpoint using the legacy algorithm.
113 */
114
115typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
116    const zbookmark_phys_t *);
117
118static scan_cb_t dsl_scan_scrub_cb;
119
120static int scan_ds_queue_compare(const void *a, const void *b);
121static int scan_prefetch_queue_compare(const void *a, const void *b);
122static void scan_ds_queue_clear(dsl_scan_t *scn);
123static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
124static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
125    uint64_t *txg);
126static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
127static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
128static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
129static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
130
131extern int zfs_vdev_async_write_active_min_dirty_percent;
132
133/*
134 * By default zfs will check to ensure it is not over the hard memory
135 * limit before each txg. If finer-grained control of this is needed
136 * this value can be set to 1 to enable checking before scanning each
137 * block.
138 */
139int zfs_scan_strict_mem_lim = B_FALSE;
140
141/*
142 * Maximum number of parallelly executed bytes per leaf vdev. We attempt
143 * to strike a balance here between keeping the vdev queues full of I/Os
144 * at all times and not overflowing the queues to cause long latency,
145 * which would cause long txg sync times. No matter what, we will not
146 * overload the drives with I/O, since that is protected by
147 * zfs_vdev_scrub_max_active.
148 */
149unsigned long zfs_scan_vdev_limit = 4 << 20;
150
151int zfs_scan_issue_strategy = 0;
152int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
153unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
154
155/*
156 * fill_weight is non-tunable at runtime, so we copy it at module init from
157 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
158 * break queue sorting.
159 */
160int zfs_scan_fill_weight = 3;
161static uint64_t fill_weight;
162
163/* See dsl_scan_should_clear() for details on the memory limit tunables */
164uint64_t zfs_scan_mem_lim_min = 16 << 20;	/* bytes */
165uint64_t zfs_scan_mem_lim_soft_max = 128 << 20;	/* bytes */
166int zfs_scan_mem_lim_fact = 20;		/* fraction of physmem */
167int zfs_scan_mem_lim_soft_fact = 20;	/* fraction of mem lim above */
168
169int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */
170int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */
171int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
172int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
173int zfs_scan_checkpoint_intval = 7200; /* in seconds */
174int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
175int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
176int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
177enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
178/* max number of blocks to free in a single TXG */
179unsigned long zfs_async_block_max_blocks = ULONG_MAX;
180/* max number of dedup blocks to free in a single TXG */
181unsigned long zfs_max_async_dedup_frees = 100000;
182
183int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */
184
185/*
186 * We wait a few txgs after importing a pool to begin scanning so that
187 * the import / mounting code isn't held up by scrub / resilver IO.
188 * Unfortunately, it is a bit difficult to determine exactly how long
189 * this will take since userspace will trigger fs mounts asynchronously
190 * and the kernel will create zvol minors asynchronously. As a result,
191 * the value provided here is a bit arbitrary, but represents a
192 * reasonable estimate of how many txgs it will take to finish fully
193 * importing a pool
194 */
195#define	SCAN_IMPORT_WAIT_TXGS 		5
196
197#define	DSL_SCAN_IS_SCRUB_RESILVER(scn) \
198	((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
199	(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
200
201/*
202 * Enable/disable the processing of the free_bpobj object.
203 */
204int zfs_free_bpobj_enabled = 1;
205
206/* the order has to match pool_scan_type */
207static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
208	NULL,
209	dsl_scan_scrub_cb,	/* POOL_SCAN_SCRUB */
210	dsl_scan_scrub_cb,	/* POOL_SCAN_RESILVER */
211};
212
213/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
214typedef struct {
215	uint64_t	sds_dsobj;
216	uint64_t	sds_txg;
217	avl_node_t	sds_node;
218} scan_ds_t;
219
220/*
221 * This controls what conditions are placed on dsl_scan_sync_state():
222 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
223 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
224 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
225 *	write out the scn_phys_cached version.
226 * See dsl_scan_sync_state for details.
227 */
228typedef enum {
229	SYNC_OPTIONAL,
230	SYNC_MANDATORY,
231	SYNC_CACHED
232} state_sync_type_t;
233
234/*
235 * This struct represents the minimum information needed to reconstruct a
236 * zio for sequential scanning. This is useful because many of these will
237 * accumulate in the sequential IO queues before being issued, so saving
238 * memory matters here.
239 */
240typedef struct scan_io {
241	/* fields from blkptr_t */
242	uint64_t		sio_blk_prop;
243	uint64_t		sio_phys_birth;
244	uint64_t		sio_birth;
245	zio_cksum_t		sio_cksum;
246	uint32_t		sio_nr_dvas;
247
248	/* fields from zio_t */
249	uint32_t		sio_flags;
250	zbookmark_phys_t	sio_zb;
251
252	/* members for queue sorting */
253	union {
254		avl_node_t	sio_addr_node; /* link into issuing queue */
255		list_node_t	sio_list_node; /* link for issuing to disk */
256	} sio_nodes;
257
258	/*
259	 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
260	 * depending on how many were in the original bp. Only the
261	 * first DVA is really used for sorting and issuing purposes.
262	 * The other DVAs (if provided) simply exist so that the zio
263	 * layer can find additional copies to repair from in the
264	 * event of an error. This array must go at the end of the
265	 * struct to allow this for the variable number of elements.
266	 */
267	dva_t			sio_dva[0];
268} scan_io_t;
269
270#define	SIO_SET_OFFSET(sio, x)		DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
271#define	SIO_SET_ASIZE(sio, x)		DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
272#define	SIO_GET_OFFSET(sio)		DVA_GET_OFFSET(&(sio)->sio_dva[0])
273#define	SIO_GET_ASIZE(sio)		DVA_GET_ASIZE(&(sio)->sio_dva[0])
274#define	SIO_GET_END_OFFSET(sio)		\
275	(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
276#define	SIO_GET_MUSED(sio)		\
277	(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
278
279struct dsl_scan_io_queue {
280	dsl_scan_t	*q_scn; /* associated dsl_scan_t */
281	vdev_t		*q_vd; /* top-level vdev that this queue represents */
282
283	/* trees used for sorting I/Os and extents of I/Os */
284	range_tree_t	*q_exts_by_addr;
285	zfs_btree_t		q_exts_by_size;
286	avl_tree_t	q_sios_by_addr;
287	uint64_t	q_sio_memused;
288
289	/* members for zio rate limiting */
290	uint64_t	q_maxinflight_bytes;
291	uint64_t	q_inflight_bytes;
292	kcondvar_t	q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
293
294	/* per txg statistics */
295	uint64_t	q_total_seg_size_this_txg;
296	uint64_t	q_segs_this_txg;
297	uint64_t	q_total_zio_size_this_txg;
298	uint64_t	q_zios_this_txg;
299};
300
301/* private data for dsl_scan_prefetch_cb() */
302typedef struct scan_prefetch_ctx {
303	zfs_refcount_t spc_refcnt;	/* refcount for memory management */
304	dsl_scan_t *spc_scn;		/* dsl_scan_t for the pool */
305	boolean_t spc_root;		/* is this prefetch for an objset? */
306	uint8_t spc_indblkshift;	/* dn_indblkshift of current dnode */
307	uint16_t spc_datablkszsec;	/* dn_idatablkszsec of current dnode */
308} scan_prefetch_ctx_t;
309
310/* private data for dsl_scan_prefetch() */
311typedef struct scan_prefetch_issue_ctx {
312	avl_node_t spic_avl_node;	/* link into scn->scn_prefetch_queue */
313	scan_prefetch_ctx_t *spic_spc;	/* spc for the callback */
314	blkptr_t spic_bp;		/* bp to prefetch */
315	zbookmark_phys_t spic_zb;	/* bookmark to prefetch */
316} scan_prefetch_issue_ctx_t;
317
318static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
319    const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
320static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
321    scan_io_t *sio);
322
323static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
324static void scan_io_queues_destroy(dsl_scan_t *scn);
325
326static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
327
328/* sio->sio_nr_dvas must be set so we know which cache to free from */
329static void
330sio_free(scan_io_t *sio)
331{
332	ASSERT3U(sio->sio_nr_dvas, >, 0);
333	ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
334
335	kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
336}
337
338/* It is up to the caller to set sio->sio_nr_dvas for freeing */
339static scan_io_t *
340sio_alloc(unsigned short nr_dvas)
341{
342	ASSERT3U(nr_dvas, >, 0);
343	ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
344
345	return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
346}
347
348void
349scan_init(void)
350{
351	/*
352	 * This is used in ext_size_compare() to weight segments
353	 * based on how sparse they are. This cannot be changed
354	 * mid-scan and the tree comparison functions don't currently
355	 * have a mechanism for passing additional context to the
356	 * compare functions. Thus we store this value globally and
357	 * we only allow it to be set at module initialization time
358	 */
359	fill_weight = zfs_scan_fill_weight;
360
361	for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
362		char name[36];
363
364		(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
365		sio_cache[i] = kmem_cache_create(name,
366		    (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
367		    0, NULL, NULL, NULL, NULL, NULL, 0);
368	}
369}
370
371void
372scan_fini(void)
373{
374	for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
375		kmem_cache_destroy(sio_cache[i]);
376	}
377}
378
379static inline boolean_t
380dsl_scan_is_running(const dsl_scan_t *scn)
381{
382	return (scn->scn_phys.scn_state == DSS_SCANNING);
383}
384
385boolean_t
386dsl_scan_resilvering(dsl_pool_t *dp)
387{
388	return (dsl_scan_is_running(dp->dp_scan) &&
389	    dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
390}
391
392static inline void
393sio2bp(const scan_io_t *sio, blkptr_t *bp)
394{
395	bzero(bp, sizeof (*bp));
396	bp->blk_prop = sio->sio_blk_prop;
397	bp->blk_phys_birth = sio->sio_phys_birth;
398	bp->blk_birth = sio->sio_birth;
399	bp->blk_fill = 1;	/* we always only work with data pointers */
400	bp->blk_cksum = sio->sio_cksum;
401
402	ASSERT3U(sio->sio_nr_dvas, >, 0);
403	ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
404
405	bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
406}
407
408static inline void
409bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
410{
411	sio->sio_blk_prop = bp->blk_prop;
412	sio->sio_phys_birth = bp->blk_phys_birth;
413	sio->sio_birth = bp->blk_birth;
414	sio->sio_cksum = bp->blk_cksum;
415	sio->sio_nr_dvas = BP_GET_NDVAS(bp);
416
417	/*
418	 * Copy the DVAs to the sio. We need all copies of the block so
419	 * that the self healing code can use the alternate copies if the
420	 * first is corrupted. We want the DVA at index dva_i to be first
421	 * in the sio since this is the primary one that we want to issue.
422	 */
423	for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
424		sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
425	}
426}
427
428int
429dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
430{
431	int err;
432	dsl_scan_t *scn;
433	spa_t *spa = dp->dp_spa;
434	uint64_t f;
435
436	scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
437	scn->scn_dp = dp;
438
439	/*
440	 * It's possible that we're resuming a scan after a reboot so
441	 * make sure that the scan_async_destroying flag is initialized
442	 * appropriately.
443	 */
444	ASSERT(!scn->scn_async_destroying);
445	scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
446	    SPA_FEATURE_ASYNC_DESTROY);
447
448	/*
449	 * Calculate the max number of in-flight bytes for pool-wide
450	 * scanning operations (minimum 1MB). Limits for the issuing
451	 * phase are done per top-level vdev and are handled separately.
452	 */
453	scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
454	    dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
455
456	avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
457	    offsetof(scan_ds_t, sds_node));
458	avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
459	    sizeof (scan_prefetch_issue_ctx_t),
460	    offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
461
462	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
463	    "scrub_func", sizeof (uint64_t), 1, &f);
464	if (err == 0) {
465		/*
466		 * There was an old-style scrub in progress.  Restart a
467		 * new-style scrub from the beginning.
468		 */
469		scn->scn_restart_txg = txg;
470		zfs_dbgmsg("old-style scrub was in progress; "
471		    "restarting new-style scrub in txg %llu",
472		    (longlong_t)scn->scn_restart_txg);
473
474		/*
475		 * Load the queue obj from the old location so that it
476		 * can be freed by dsl_scan_done().
477		 */
478		(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
479		    "scrub_queue", sizeof (uint64_t), 1,
480		    &scn->scn_phys.scn_queue_obj);
481	} else {
482		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
483		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
484		    &scn->scn_phys);
485		/*
486		 * Detect if the pool contains the signature of #2094.  If it
487		 * does properly update the scn->scn_phys structure and notify
488		 * the administrator by setting an errata for the pool.
489		 */
490		if (err == EOVERFLOW) {
491			uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
492			VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
493			VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
494			    (23 * sizeof (uint64_t)));
495
496			err = zap_lookup(dp->dp_meta_objset,
497			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
498			    sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
499			if (err == 0) {
500				uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
501
502				if (overflow & ~DSL_SCAN_FLAGS_MASK ||
503				    scn->scn_async_destroying) {
504					spa->spa_errata =
505					    ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
506					return (EOVERFLOW);
507				}
508
509				bcopy(zaptmp, &scn->scn_phys,
510				    SCAN_PHYS_NUMINTS * sizeof (uint64_t));
511				scn->scn_phys.scn_flags = overflow;
512
513				/* Required scrub already in progress. */
514				if (scn->scn_phys.scn_state == DSS_FINISHED ||
515				    scn->scn_phys.scn_state == DSS_CANCELED)
516					spa->spa_errata =
517					    ZPOOL_ERRATA_ZOL_2094_SCRUB;
518			}
519		}
520
521		if (err == ENOENT)
522			return (0);
523		else if (err)
524			return (err);
525
526		/*
527		 * We might be restarting after a reboot, so jump the issued
528		 * counter to how far we've scanned. We know we're consistent
529		 * up to here.
530		 */
531		scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
532
533		if (dsl_scan_is_running(scn) &&
534		    spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
535			/*
536			 * A new-type scrub was in progress on an old
537			 * pool, and the pool was accessed by old
538			 * software.  Restart from the beginning, since
539			 * the old software may have changed the pool in
540			 * the meantime.
541			 */
542			scn->scn_restart_txg = txg;
543			zfs_dbgmsg("new-style scrub was modified "
544			    "by old software; restarting in txg %llu",
545			    (longlong_t)scn->scn_restart_txg);
546		} else if (dsl_scan_resilvering(dp)) {
547			/*
548			 * If a resilver is in progress and there are already
549			 * errors, restart it instead of finishing this scan and
550			 * then restarting it. If there haven't been any errors
551			 * then remember that the incore DTL is valid.
552			 */
553			if (scn->scn_phys.scn_errors > 0) {
554				scn->scn_restart_txg = txg;
555				zfs_dbgmsg("resilver can't excise DTL_MISSING "
556				    "when finished; restarting in txg %llu",
557				    (u_longlong_t)scn->scn_restart_txg);
558			} else {
559				/* it's safe to excise DTL when finished */
560				spa->spa_scrub_started = B_TRUE;
561			}
562		}
563	}
564
565	bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
566
567	/* reload the queue into the in-core state */
568	if (scn->scn_phys.scn_queue_obj != 0) {
569		zap_cursor_t zc;
570		zap_attribute_t za;
571
572		for (zap_cursor_init(&zc, dp->dp_meta_objset,
573		    scn->scn_phys.scn_queue_obj);
574		    zap_cursor_retrieve(&zc, &za) == 0;
575		    (void) zap_cursor_advance(&zc)) {
576			scan_ds_queue_insert(scn,
577			    zfs_strtonum(za.za_name, NULL),
578			    za.za_first_integer);
579		}
580		zap_cursor_fini(&zc);
581	}
582
583	spa_scan_stat_init(spa);
584	return (0);
585}
586
587void
588dsl_scan_fini(dsl_pool_t *dp)
589{
590	if (dp->dp_scan != NULL) {
591		dsl_scan_t *scn = dp->dp_scan;
592
593		if (scn->scn_taskq != NULL)
594			taskq_destroy(scn->scn_taskq);
595
596		scan_ds_queue_clear(scn);
597		avl_destroy(&scn->scn_queue);
598		scan_ds_prefetch_queue_clear(scn);
599		avl_destroy(&scn->scn_prefetch_queue);
600
601		kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
602		dp->dp_scan = NULL;
603	}
604}
605
606static boolean_t
607dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
608{
609	return (scn->scn_restart_txg != 0 &&
610	    scn->scn_restart_txg <= tx->tx_txg);
611}
612
613boolean_t
614dsl_scan_resilver_scheduled(dsl_pool_t *dp)
615{
616	return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
617	    (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
618}
619
620boolean_t
621dsl_scan_scrubbing(const dsl_pool_t *dp)
622{
623	dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
624
625	return (scn_phys->scn_state == DSS_SCANNING &&
626	    scn_phys->scn_func == POOL_SCAN_SCRUB);
627}
628
629boolean_t
630dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
631{
632	return (dsl_scan_scrubbing(scn->scn_dp) &&
633	    scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
634}
635
636/*
637 * Writes out a persistent dsl_scan_phys_t record to the pool directory.
638 * Because we can be running in the block sorting algorithm, we do not always
639 * want to write out the record, only when it is "safe" to do so. This safety
640 * condition is achieved by making sure that the sorting queues are empty
641 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state
642 * is inconsistent with how much actual scanning progress has been made. The
643 * kind of sync to be performed is specified by the sync_type argument. If the
644 * sync is optional, we only sync if the queues are empty. If the sync is
645 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
646 * third possible state is a "cached" sync. This is done in response to:
647 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
648 *	destroyed, so we wouldn't be able to restart scanning from it.
649 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
650 *	superseded by a newer snapshot.
651 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
652 *	swapped with its clone.
653 * In all cases, a cached sync simply rewrites the last record we've written,
654 * just slightly modified. For the modifications that are performed to the
655 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
656 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
657 */
658static void
659dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
660{
661	int i;
662	spa_t *spa = scn->scn_dp->dp_spa;
663
664	ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0);
665	if (scn->scn_bytes_pending == 0) {
666		for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
667			vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
668			dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
669
670			if (q == NULL)
671				continue;
672
673			mutex_enter(&vd->vdev_scan_io_queue_lock);
674			ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
675			ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
676			    NULL);
677			ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
678			mutex_exit(&vd->vdev_scan_io_queue_lock);
679		}
680
681		if (scn->scn_phys.scn_queue_obj != 0)
682			scan_ds_queue_sync(scn, tx);
683		VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
684		    DMU_POOL_DIRECTORY_OBJECT,
685		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
686		    &scn->scn_phys, tx));
687		bcopy(&scn->scn_phys, &scn->scn_phys_cached,
688		    sizeof (scn->scn_phys));
689
690		if (scn->scn_checkpointing)
691			zfs_dbgmsg("finish scan checkpoint");
692
693		scn->scn_checkpointing = B_FALSE;
694		scn->scn_last_checkpoint = ddi_get_lbolt();
695	} else if (sync_type == SYNC_CACHED) {
696		VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
697		    DMU_POOL_DIRECTORY_OBJECT,
698		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
699		    &scn->scn_phys_cached, tx));
700	}
701}
702
703/* ARGSUSED */
704int
705dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
706{
707	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
708	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
709
710	if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd))
711		return (SET_ERROR(EBUSY));
712
713	return (0);
714}
715
716void
717dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
718{
719	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
720	pool_scan_func_t *funcp = arg;
721	dmu_object_type_t ot = 0;
722	dsl_pool_t *dp = scn->scn_dp;
723	spa_t *spa = dp->dp_spa;
724
725	ASSERT(!dsl_scan_is_running(scn));
726	ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
727	bzero(&scn->scn_phys, sizeof (scn->scn_phys));
728	scn->scn_phys.scn_func = *funcp;
729	scn->scn_phys.scn_state = DSS_SCANNING;
730	scn->scn_phys.scn_min_txg = 0;
731	scn->scn_phys.scn_max_txg = tx->tx_txg;
732	scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
733	scn->scn_phys.scn_start_time = gethrestime_sec();
734	scn->scn_phys.scn_errors = 0;
735	scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
736	scn->scn_issued_before_pass = 0;
737	scn->scn_restart_txg = 0;
738	scn->scn_done_txg = 0;
739	scn->scn_last_checkpoint = 0;
740	scn->scn_checkpointing = B_FALSE;
741	spa_scan_stat_init(spa);
742
743	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
744		scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
745
746		/* rewrite all disk labels */
747		vdev_config_dirty(spa->spa_root_vdev);
748
749		if (vdev_resilver_needed(spa->spa_root_vdev,
750		    &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
751			nvlist_t *aux = fnvlist_alloc();
752			fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
753			    "healing");
754			spa_event_notify(spa, NULL, aux,
755			    ESC_ZFS_RESILVER_START);
756			nvlist_free(aux);
757		} else {
758			spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
759		}
760
761		spa->spa_scrub_started = B_TRUE;
762		/*
763		 * If this is an incremental scrub, limit the DDT scrub phase
764		 * to just the auto-ditto class (for correctness); the rest
765		 * of the scrub should go faster using top-down pruning.
766		 */
767		if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
768			scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
769
770		/*
771		 * When starting a resilver clear any existing rebuild state.
772		 * This is required to prevent stale rebuild status from
773		 * being reported when a rebuild is run, then a resilver and
774		 * finally a scrub.  In which case only the scrub status
775		 * should be reported by 'zpool status'.
776		 */
777		if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
778			vdev_t *rvd = spa->spa_root_vdev;
779			for (uint64_t i = 0; i < rvd->vdev_children; i++) {
780				vdev_t *vd = rvd->vdev_child[i];
781				vdev_rebuild_clear_sync(
782				    (void *)(uintptr_t)vd->vdev_id, tx);
783			}
784		}
785	}
786
787	/* back to the generic stuff */
788
789	if (dp->dp_blkstats == NULL) {
790		dp->dp_blkstats =
791		    vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
792		mutex_init(&dp->dp_blkstats->zab_lock, NULL,
793		    MUTEX_DEFAULT, NULL);
794	}
795	bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
796
797	if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
798		ot = DMU_OT_ZAP_OTHER;
799
800	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
801	    ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
802
803	bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
804
805	dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
806
807	spa_history_log_internal(spa, "scan setup", tx,
808	    "func=%u mintxg=%llu maxtxg=%llu",
809	    *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
810	    (u_longlong_t)scn->scn_phys.scn_max_txg);
811}
812
813/*
814 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
815 * Can also be called to resume a paused scrub.
816 */
817int
818dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
819{
820	spa_t *spa = dp->dp_spa;
821	dsl_scan_t *scn = dp->dp_scan;
822
823	/*
824	 * Purge all vdev caches and probe all devices.  We do this here
825	 * rather than in sync context because this requires a writer lock
826	 * on the spa_config lock, which we can't do from sync context.  The
827	 * spa_scrub_reopen flag indicates that vdev_open() should not
828	 * attempt to start another scrub.
829	 */
830	spa_vdev_state_enter(spa, SCL_NONE);
831	spa->spa_scrub_reopen = B_TRUE;
832	vdev_reopen(spa->spa_root_vdev);
833	spa->spa_scrub_reopen = B_FALSE;
834	(void) spa_vdev_state_exit(spa, NULL, 0);
835
836	if (func == POOL_SCAN_RESILVER) {
837		dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
838		return (0);
839	}
840
841	if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
842		/* got scrub start cmd, resume paused scrub */
843		int err = dsl_scrub_set_pause_resume(scn->scn_dp,
844		    POOL_SCRUB_NORMAL);
845		if (err == 0) {
846			spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
847			return (SET_ERROR(ECANCELED));
848		}
849
850		return (SET_ERROR(err));
851	}
852
853	return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
854	    dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
855}
856
857/* ARGSUSED */
858static void
859dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
860{
861	static const char *old_names[] = {
862		"scrub_bookmark",
863		"scrub_ddt_bookmark",
864		"scrub_ddt_class_max",
865		"scrub_queue",
866		"scrub_min_txg",
867		"scrub_max_txg",
868		"scrub_func",
869		"scrub_errors",
870		NULL
871	};
872
873	dsl_pool_t *dp = scn->scn_dp;
874	spa_t *spa = dp->dp_spa;
875	int i;
876
877	/* Remove any remnants of an old-style scrub. */
878	for (i = 0; old_names[i]; i++) {
879		(void) zap_remove(dp->dp_meta_objset,
880		    DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
881	}
882
883	if (scn->scn_phys.scn_queue_obj != 0) {
884		VERIFY0(dmu_object_free(dp->dp_meta_objset,
885		    scn->scn_phys.scn_queue_obj, tx));
886		scn->scn_phys.scn_queue_obj = 0;
887	}
888	scan_ds_queue_clear(scn);
889	scan_ds_prefetch_queue_clear(scn);
890
891	scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
892
893	/*
894	 * If we were "restarted" from a stopped state, don't bother
895	 * with anything else.
896	 */
897	if (!dsl_scan_is_running(scn)) {
898		ASSERT(!scn->scn_is_sorted);
899		return;
900	}
901
902	if (scn->scn_is_sorted) {
903		scan_io_queues_destroy(scn);
904		scn->scn_is_sorted = B_FALSE;
905
906		if (scn->scn_taskq != NULL) {
907			taskq_destroy(scn->scn_taskq);
908			scn->scn_taskq = NULL;
909		}
910	}
911
912	scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
913
914	spa_notify_waiters(spa);
915
916	if (dsl_scan_restarting(scn, tx))
917		spa_history_log_internal(spa, "scan aborted, restarting", tx,
918		    "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
919	else if (!complete)
920		spa_history_log_internal(spa, "scan cancelled", tx,
921		    "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
922	else
923		spa_history_log_internal(spa, "scan done", tx,
924		    "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
925
926	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
927		spa->spa_scrub_active = B_FALSE;
928
929		/*
930		 * If the scrub/resilver completed, update all DTLs to
931		 * reflect this.  Whether it succeeded or not, vacate
932		 * all temporary scrub DTLs.
933		 *
934		 * As the scrub does not currently support traversing
935		 * data that have been freed but are part of a checkpoint,
936		 * we don't mark the scrub as done in the DTLs as faults
937		 * may still exist in those vdevs.
938		 */
939		if (complete &&
940		    !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
941			vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
942			    scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
943
944			if (scn->scn_phys.scn_min_txg) {
945				nvlist_t *aux = fnvlist_alloc();
946				fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
947				    "healing");
948				spa_event_notify(spa, NULL, aux,
949				    ESC_ZFS_RESILVER_FINISH);
950				nvlist_free(aux);
951			} else {
952				spa_event_notify(spa, NULL, NULL,
953				    ESC_ZFS_SCRUB_FINISH);
954			}
955		} else {
956			vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
957			    0, B_TRUE, B_FALSE);
958		}
959		spa_errlog_rotate(spa);
960
961		/*
962		 * Don't clear flag until after vdev_dtl_reassess to ensure that
963		 * DTL_MISSING will get updated when possible.
964		 */
965		spa->spa_scrub_started = B_FALSE;
966
967		/*
968		 * We may have finished replacing a device.
969		 * Let the async thread assess this and handle the detach.
970		 */
971		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
972
973		/*
974		 * Clear any resilver_deferred flags in the config.
975		 * If there are drives that need resilvering, kick
976		 * off an asynchronous request to start resilver.
977		 * vdev_clear_resilver_deferred() may update the config
978		 * before the resilver can restart. In the event of
979		 * a crash during this period, the spa loading code
980		 * will find the drives that need to be resilvered
981		 * and start the resilver then.
982		 */
983		if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
984		    vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
985			spa_history_log_internal(spa,
986			    "starting deferred resilver", tx, "errors=%llu",
987			    (u_longlong_t)spa_get_errlog_size(spa));
988			spa_async_request(spa, SPA_ASYNC_RESILVER);
989		}
990
991		/* Clear recent error events (i.e. duplicate events tracking) */
992		if (complete)
993			zfs_ereport_clear(spa, NULL);
994	}
995
996	scn->scn_phys.scn_end_time = gethrestime_sec();
997
998	if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
999		spa->spa_errata = 0;
1000
1001	ASSERT(!dsl_scan_is_running(scn));
1002}
1003
1004/* ARGSUSED */
1005static int
1006dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
1007{
1008	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
1009
1010	if (!dsl_scan_is_running(scn))
1011		return (SET_ERROR(ENOENT));
1012	return (0);
1013}
1014
1015/* ARGSUSED */
1016static void
1017dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
1018{
1019	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
1020
1021	dsl_scan_done(scn, B_FALSE, tx);
1022	dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
1023	spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
1024}
1025
1026int
1027dsl_scan_cancel(dsl_pool_t *dp)
1028{
1029	return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
1030	    dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
1031}
1032
1033static int
1034dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
1035{
1036	pool_scrub_cmd_t *cmd = arg;
1037	dsl_pool_t *dp = dmu_tx_pool(tx);
1038	dsl_scan_t *scn = dp->dp_scan;
1039
1040	if (*cmd == POOL_SCRUB_PAUSE) {
1041		/* can't pause a scrub when there is no in-progress scrub */
1042		if (!dsl_scan_scrubbing(dp))
1043			return (SET_ERROR(ENOENT));
1044
1045		/* can't pause a paused scrub */
1046		if (dsl_scan_is_paused_scrub(scn))
1047			return (SET_ERROR(EBUSY));
1048	} else if (*cmd != POOL_SCRUB_NORMAL) {
1049		return (SET_ERROR(ENOTSUP));
1050	}
1051
1052	return (0);
1053}
1054
1055static void
1056dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
1057{
1058	pool_scrub_cmd_t *cmd = arg;
1059	dsl_pool_t *dp = dmu_tx_pool(tx);
1060	spa_t *spa = dp->dp_spa;
1061	dsl_scan_t *scn = dp->dp_scan;
1062
1063	if (*cmd == POOL_SCRUB_PAUSE) {
1064		/* can't pause a scrub when there is no in-progress scrub */
1065		spa->spa_scan_pass_scrub_pause = gethrestime_sec();
1066		scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
1067		scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
1068		dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1069		spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
1070		spa_notify_waiters(spa);
1071	} else {
1072		ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
1073		if (dsl_scan_is_paused_scrub(scn)) {
1074			/*
1075			 * We need to keep track of how much time we spend
1076			 * paused per pass so that we can adjust the scrub rate
1077			 * shown in the output of 'zpool status'
1078			 */
1079			spa->spa_scan_pass_scrub_spent_paused +=
1080			    gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
1081			spa->spa_scan_pass_scrub_pause = 0;
1082			scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
1083			scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
1084			dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1085		}
1086	}
1087}
1088
1089/*
1090 * Set scrub pause/resume state if it makes sense to do so
1091 */
1092int
1093dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
1094{
1095	return (dsl_sync_task(spa_name(dp->dp_spa),
1096	    dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
1097	    ZFS_SPACE_CHECK_RESERVED));
1098}
1099
1100
1101/* start a new scan, or restart an existing one. */
1102void
1103dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
1104{
1105	if (txg == 0) {
1106		dmu_tx_t *tx;
1107		tx = dmu_tx_create_dd(dp->dp_mos_dir);
1108		VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
1109
1110		txg = dmu_tx_get_txg(tx);
1111		dp->dp_scan->scn_restart_txg = txg;
1112		dmu_tx_commit(tx);
1113	} else {
1114		dp->dp_scan->scn_restart_txg = txg;
1115	}
1116	zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t)txg);
1117}
1118
1119void
1120dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
1121{
1122	zio_free(dp->dp_spa, txg, bp);
1123}
1124
1125void
1126dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
1127{
1128	ASSERT(dsl_pool_sync_context(dp));
1129	zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
1130}
1131
1132static int
1133scan_ds_queue_compare(const void *a, const void *b)
1134{
1135	const scan_ds_t *sds_a = a, *sds_b = b;
1136
1137	if (sds_a->sds_dsobj < sds_b->sds_dsobj)
1138		return (-1);
1139	if (sds_a->sds_dsobj == sds_b->sds_dsobj)
1140		return (0);
1141	return (1);
1142}
1143
1144static void
1145scan_ds_queue_clear(dsl_scan_t *scn)
1146{
1147	void *cookie = NULL;
1148	scan_ds_t *sds;
1149	while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
1150		kmem_free(sds, sizeof (*sds));
1151	}
1152}
1153
1154static boolean_t
1155scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
1156{
1157	scan_ds_t srch, *sds;
1158
1159	srch.sds_dsobj = dsobj;
1160	sds = avl_find(&scn->scn_queue, &srch, NULL);
1161	if (sds != NULL && txg != NULL)
1162		*txg = sds->sds_txg;
1163	return (sds != NULL);
1164}
1165
1166static void
1167scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
1168{
1169	scan_ds_t *sds;
1170	avl_index_t where;
1171
1172	sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
1173	sds->sds_dsobj = dsobj;
1174	sds->sds_txg = txg;
1175
1176	VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
1177	avl_insert(&scn->scn_queue, sds, where);
1178}
1179
1180static void
1181scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
1182{
1183	scan_ds_t srch, *sds;
1184
1185	srch.sds_dsobj = dsobj;
1186
1187	sds = avl_find(&scn->scn_queue, &srch, NULL);
1188	VERIFY(sds != NULL);
1189	avl_remove(&scn->scn_queue, sds);
1190	kmem_free(sds, sizeof (*sds));
1191}
1192
1193static void
1194scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
1195{
1196	dsl_pool_t *dp = scn->scn_dp;
1197	spa_t *spa = dp->dp_spa;
1198	dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
1199	    DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
1200
1201	ASSERT0(scn->scn_bytes_pending);
1202	ASSERT(scn->scn_phys.scn_queue_obj != 0);
1203
1204	VERIFY0(dmu_object_free(dp->dp_meta_objset,
1205	    scn->scn_phys.scn_queue_obj, tx));
1206	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
1207	    DMU_OT_NONE, 0, tx);
1208	for (scan_ds_t *sds = avl_first(&scn->scn_queue);
1209	    sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
1210		VERIFY0(zap_add_int_key(dp->dp_meta_objset,
1211		    scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
1212		    sds->sds_txg, tx));
1213	}
1214}
1215
1216/*
1217 * Computes the memory limit state that we're currently in. A sorted scan
1218 * needs quite a bit of memory to hold the sorting queue, so we need to
1219 * reasonably constrain the size so it doesn't impact overall system
1220 * performance. We compute two limits:
1221 * 1) Hard memory limit: if the amount of memory used by the sorting
1222 *	queues on a pool gets above this value, we stop the metadata
1223 *	scanning portion and start issuing the queued up and sorted
1224 *	I/Os to reduce memory usage.
1225 *	This limit is calculated as a fraction of physmem (by default 5%).
1226 *	We constrain the lower bound of the hard limit to an absolute
1227 *	minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1228 *	the upper bound to 5% of the total pool size - no chance we'll
1229 *	ever need that much memory, but just to keep the value in check.
1230 * 2) Soft memory limit: once we hit the hard memory limit, we start
1231 *	issuing I/O to reduce queue memory usage, but we don't want to
1232 *	completely empty out the queues, since we might be able to find I/Os
1233 *	that will fill in the gaps of our non-sequential IOs at some point
1234 *	in the future. So we stop the issuing of I/Os once the amount of
1235 *	memory used drops below the soft limit (at which point we stop issuing
1236 *	I/O and start scanning metadata again).
1237 *
1238 *	This limit is calculated by subtracting a fraction of the hard
1239 *	limit from the hard limit. By default this fraction is 5%, so
1240 *	the soft limit is 95% of the hard limit. We cap the size of the
1241 *	difference between the hard and soft limits at an absolute
1242 *	maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1243 *	sufficient to not cause too frequent switching between the
1244 *	metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1245 *	worth of queues is about 1.2 GiB of on-pool data, so scanning
1246 *	that should take at least a decent fraction of a second).
1247 */
1248static boolean_t
1249dsl_scan_should_clear(dsl_scan_t *scn)
1250{
1251	spa_t *spa = scn->scn_dp->dp_spa;
1252	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
1253	uint64_t alloc, mlim_hard, mlim_soft, mused;
1254
1255	alloc = metaslab_class_get_alloc(spa_normal_class(spa));
1256	alloc += metaslab_class_get_alloc(spa_special_class(spa));
1257	alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
1258
1259	mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
1260	    zfs_scan_mem_lim_min);
1261	mlim_hard = MIN(mlim_hard, alloc / 20);
1262	mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
1263	    zfs_scan_mem_lim_soft_max);
1264	mused = 0;
1265	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1266		vdev_t *tvd = rvd->vdev_child[i];
1267		dsl_scan_io_queue_t *queue;
1268
1269		mutex_enter(&tvd->vdev_scan_io_queue_lock);
1270		queue = tvd->vdev_scan_io_queue;
1271		if (queue != NULL) {
1272			/* # extents in exts_by_size = # in exts_by_addr */
1273			mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
1274			    sizeof (range_seg_gap_t) + queue->q_sio_memused;
1275		}
1276		mutex_exit(&tvd->vdev_scan_io_queue_lock);
1277	}
1278
1279	dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
1280
1281	if (mused == 0)
1282		ASSERT0(scn->scn_bytes_pending);
1283
1284	/*
1285	 * If we are above our hard limit, we need to clear out memory.
1286	 * If we are below our soft limit, we need to accumulate sequential IOs.
1287	 * Otherwise, we should keep doing whatever we are currently doing.
1288	 */
1289	if (mused >= mlim_hard)
1290		return (B_TRUE);
1291	else if (mused < mlim_soft)
1292		return (B_FALSE);
1293	else
1294		return (scn->scn_clearing);
1295}
1296
1297static boolean_t
1298dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
1299{
1300	/* we never skip user/group accounting objects */
1301	if (zb && (int64_t)zb->zb_object < 0)
1302		return (B_FALSE);
1303
1304	if (scn->scn_suspending)
1305		return (B_TRUE); /* we're already suspending */
1306
1307	if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
1308		return (B_FALSE); /* we're resuming */
1309
1310	/* We only know how to resume from level-0 and objset blocks. */
1311	if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
1312		return (B_FALSE);
1313
1314	/*
1315	 * We suspend if:
1316	 *  - we have scanned for at least the minimum time (default 1 sec
1317	 *    for scrub, 3 sec for resilver), and either we have sufficient
1318	 *    dirty data that we are starting to write more quickly
1319	 *    (default 30%), someone is explicitly waiting for this txg
1320	 *    to complete, or we have used up all of the time in the txg
1321	 *    timeout (default 5 sec).
1322	 *  or
1323	 *  - the spa is shutting down because this pool is being exported
1324	 *    or the machine is rebooting.
1325	 *  or
1326	 *  - the scan queue has reached its memory use limit
1327	 */
1328	uint64_t curr_time_ns = gethrtime();
1329	uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
1330	uint64_t sync_time_ns = curr_time_ns -
1331	    scn->scn_dp->dp_spa->spa_sync_starttime;
1332	int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
1333	int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
1334	    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
1335
1336	if ((NSEC2MSEC(scan_time_ns) > mintime &&
1337	    (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
1338	    txg_sync_waiting(scn->scn_dp) ||
1339	    NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
1340	    spa_shutting_down(scn->scn_dp->dp_spa) ||
1341	    (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
1342		if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
1343			dprintf("suspending at first available bookmark "
1344			    "%llx/%llx/%llx/%llx\n",
1345			    (longlong_t)zb->zb_objset,
1346			    (longlong_t)zb->zb_object,
1347			    (longlong_t)zb->zb_level,
1348			    (longlong_t)zb->zb_blkid);
1349			SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
1350			    zb->zb_objset, 0, 0, 0);
1351		} else if (zb != NULL) {
1352			dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1353			    (longlong_t)zb->zb_objset,
1354			    (longlong_t)zb->zb_object,
1355			    (longlong_t)zb->zb_level,
1356			    (longlong_t)zb->zb_blkid);
1357			scn->scn_phys.scn_bookmark = *zb;
1358		} else {
1359#ifdef ZFS_DEBUG
1360			dsl_scan_phys_t *scnp = &scn->scn_phys;
1361			dprintf("suspending at at DDT bookmark "
1362			    "%llx/%llx/%llx/%llx\n",
1363			    (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
1364			    (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
1365			    (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
1366			    (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
1367#endif
1368		}
1369		scn->scn_suspending = B_TRUE;
1370		return (B_TRUE);
1371	}
1372	return (B_FALSE);
1373}
1374
1375typedef struct zil_scan_arg {
1376	dsl_pool_t	*zsa_dp;
1377	zil_header_t	*zsa_zh;
1378} zil_scan_arg_t;
1379
1380/* ARGSUSED */
1381static int
1382dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
1383    uint64_t claim_txg)
1384{
1385	zil_scan_arg_t *zsa = arg;
1386	dsl_pool_t *dp = zsa->zsa_dp;
1387	dsl_scan_t *scn = dp->dp_scan;
1388	zil_header_t *zh = zsa->zsa_zh;
1389	zbookmark_phys_t zb;
1390
1391	ASSERT(!BP_IS_REDACTED(bp));
1392	if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
1393		return (0);
1394
1395	/*
1396	 * One block ("stubby") can be allocated a long time ago; we
1397	 * want to visit that one because it has been allocated
1398	 * (on-disk) even if it hasn't been claimed (even though for
1399	 * scrub there's nothing to do to it).
1400	 */
1401	if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
1402		return (0);
1403
1404	SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1405	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
1406
1407	VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1408	return (0);
1409}
1410
1411/* ARGSUSED */
1412static int
1413dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
1414    uint64_t claim_txg)
1415{
1416	if (lrc->lrc_txtype == TX_WRITE) {
1417		zil_scan_arg_t *zsa = arg;
1418		dsl_pool_t *dp = zsa->zsa_dp;
1419		dsl_scan_t *scn = dp->dp_scan;
1420		zil_header_t *zh = zsa->zsa_zh;
1421		const lr_write_t *lr = (const lr_write_t *)lrc;
1422		const blkptr_t *bp = &lr->lr_blkptr;
1423		zbookmark_phys_t zb;
1424
1425		ASSERT(!BP_IS_REDACTED(bp));
1426		if (BP_IS_HOLE(bp) ||
1427		    bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
1428			return (0);
1429
1430		/*
1431		 * birth can be < claim_txg if this record's txg is
1432		 * already txg sync'ed (but this log block contains
1433		 * other records that are not synced)
1434		 */
1435		if (claim_txg == 0 || bp->blk_birth < claim_txg)
1436			return (0);
1437
1438		SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1439		    lr->lr_foid, ZB_ZIL_LEVEL,
1440		    lr->lr_offset / BP_GET_LSIZE(bp));
1441
1442		VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1443	}
1444	return (0);
1445}
1446
1447static void
1448dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
1449{
1450	uint64_t claim_txg = zh->zh_claim_txg;
1451	zil_scan_arg_t zsa = { dp, zh };
1452	zilog_t *zilog;
1453
1454	ASSERT(spa_writeable(dp->dp_spa));
1455
1456	/*
1457	 * We only want to visit blocks that have been claimed but not yet
1458	 * replayed (or, in read-only mode, blocks that *would* be claimed).
1459	 */
1460	if (claim_txg == 0)
1461		return;
1462
1463	zilog = zil_alloc(dp->dp_meta_objset, zh);
1464
1465	(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
1466	    claim_txg, B_FALSE);
1467
1468	zil_free(zilog);
1469}
1470
1471/*
1472 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1473 * here is to sort the AVL tree by the order each block will be needed.
1474 */
1475static int
1476scan_prefetch_queue_compare(const void *a, const void *b)
1477{
1478	const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
1479	const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
1480	const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
1481
1482	return (zbookmark_compare(spc_a->spc_datablkszsec,
1483	    spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
1484	    spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
1485}
1486
1487static void
1488scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
1489{
1490	if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
1491		zfs_refcount_destroy(&spc->spc_refcnt);
1492		kmem_free(spc, sizeof (scan_prefetch_ctx_t));
1493	}
1494}
1495
1496static scan_prefetch_ctx_t *
1497scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
1498{
1499	scan_prefetch_ctx_t *spc;
1500
1501	spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
1502	zfs_refcount_create(&spc->spc_refcnt);
1503	zfs_refcount_add(&spc->spc_refcnt, tag);
1504	spc->spc_scn = scn;
1505	if (dnp != NULL) {
1506		spc->spc_datablkszsec = dnp->dn_datablkszsec;
1507		spc->spc_indblkshift = dnp->dn_indblkshift;
1508		spc->spc_root = B_FALSE;
1509	} else {
1510		spc->spc_datablkszsec = 0;
1511		spc->spc_indblkshift = 0;
1512		spc->spc_root = B_TRUE;
1513	}
1514
1515	return (spc);
1516}
1517
1518static void
1519scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
1520{
1521	zfs_refcount_add(&spc->spc_refcnt, tag);
1522}
1523
1524static void
1525scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
1526{
1527	spa_t *spa = scn->scn_dp->dp_spa;
1528	void *cookie = NULL;
1529	scan_prefetch_issue_ctx_t *spic = NULL;
1530
1531	mutex_enter(&spa->spa_scrub_lock);
1532	while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
1533	    &cookie)) != NULL) {
1534		scan_prefetch_ctx_rele(spic->spic_spc, scn);
1535		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1536	}
1537	mutex_exit(&spa->spa_scrub_lock);
1538}
1539
1540static boolean_t
1541dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
1542    const zbookmark_phys_t *zb)
1543{
1544	zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
1545	dnode_phys_t tmp_dnp;
1546	dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
1547
1548	if (zb->zb_objset != last_zb->zb_objset)
1549		return (B_TRUE);
1550	if ((int64_t)zb->zb_object < 0)
1551		return (B_FALSE);
1552
1553	tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
1554	tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
1555
1556	if (zbookmark_subtree_completed(dnp, zb, last_zb))
1557		return (B_TRUE);
1558
1559	return (B_FALSE);
1560}
1561
1562static void
1563dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
1564{
1565	avl_index_t idx;
1566	dsl_scan_t *scn = spc->spc_scn;
1567	spa_t *spa = scn->scn_dp->dp_spa;
1568	scan_prefetch_issue_ctx_t *spic;
1569
1570	if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
1571		return;
1572
1573	if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
1574	    (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
1575	    BP_GET_TYPE(bp) != DMU_OT_OBJSET))
1576		return;
1577
1578	if (dsl_scan_check_prefetch_resume(spc, zb))
1579		return;
1580
1581	scan_prefetch_ctx_add_ref(spc, scn);
1582	spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
1583	spic->spic_spc = spc;
1584	spic->spic_bp = *bp;
1585	spic->spic_zb = *zb;
1586
1587	/*
1588	 * Add the IO to the queue of blocks to prefetch. This allows us to
1589	 * prioritize blocks that we will need first for the main traversal
1590	 * thread.
1591	 */
1592	mutex_enter(&spa->spa_scrub_lock);
1593	if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
1594		/* this block is already queued for prefetch */
1595		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1596		scan_prefetch_ctx_rele(spc, scn);
1597		mutex_exit(&spa->spa_scrub_lock);
1598		return;
1599	}
1600
1601	avl_insert(&scn->scn_prefetch_queue, spic, idx);
1602	cv_broadcast(&spa->spa_scrub_io_cv);
1603	mutex_exit(&spa->spa_scrub_lock);
1604}
1605
1606static void
1607dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
1608    uint64_t objset, uint64_t object)
1609{
1610	int i;
1611	zbookmark_phys_t zb;
1612	scan_prefetch_ctx_t *spc;
1613
1614	if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1615		return;
1616
1617	SET_BOOKMARK(&zb, objset, object, 0, 0);
1618
1619	spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
1620
1621	for (i = 0; i < dnp->dn_nblkptr; i++) {
1622		zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
1623		zb.zb_blkid = i;
1624		dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
1625	}
1626
1627	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1628		zb.zb_level = 0;
1629		zb.zb_blkid = DMU_SPILL_BLKID;
1630		dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
1631	}
1632
1633	scan_prefetch_ctx_rele(spc, FTAG);
1634}
1635
1636static void
1637dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1638    arc_buf_t *buf, void *private)
1639{
1640	scan_prefetch_ctx_t *spc = private;
1641	dsl_scan_t *scn = spc->spc_scn;
1642	spa_t *spa = scn->scn_dp->dp_spa;
1643
1644	/* broadcast that the IO has completed for rate limiting purposes */
1645	mutex_enter(&spa->spa_scrub_lock);
1646	ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
1647	spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
1648	cv_broadcast(&spa->spa_scrub_io_cv);
1649	mutex_exit(&spa->spa_scrub_lock);
1650
1651	/* if there was an error or we are done prefetching, just cleanup */
1652	if (buf == NULL || scn->scn_prefetch_stop)
1653		goto out;
1654
1655	if (BP_GET_LEVEL(bp) > 0) {
1656		int i;
1657		blkptr_t *cbp;
1658		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1659		zbookmark_phys_t czb;
1660
1661		for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
1662			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1663			    zb->zb_level - 1, zb->zb_blkid * epb + i);
1664			dsl_scan_prefetch(spc, cbp, &czb);
1665		}
1666	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
1667		dnode_phys_t *cdnp;
1668		int i;
1669		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
1670
1671		for (i = 0, cdnp = buf->b_data; i < epb;
1672		    i += cdnp->dn_extra_slots + 1,
1673		    cdnp += cdnp->dn_extra_slots + 1) {
1674			dsl_scan_prefetch_dnode(scn, cdnp,
1675			    zb->zb_objset, zb->zb_blkid * epb + i);
1676		}
1677	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1678		objset_phys_t *osp = buf->b_data;
1679
1680		dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
1681		    zb->zb_objset, DMU_META_DNODE_OBJECT);
1682
1683		if (OBJSET_BUF_HAS_USERUSED(buf)) {
1684			dsl_scan_prefetch_dnode(scn,
1685			    &osp->os_groupused_dnode, zb->zb_objset,
1686			    DMU_GROUPUSED_OBJECT);
1687			dsl_scan_prefetch_dnode(scn,
1688			    &osp->os_userused_dnode, zb->zb_objset,
1689			    DMU_USERUSED_OBJECT);
1690		}
1691	}
1692
1693out:
1694	if (buf != NULL)
1695		arc_buf_destroy(buf, private);
1696	scan_prefetch_ctx_rele(spc, scn);
1697}
1698
1699/* ARGSUSED */
1700static void
1701dsl_scan_prefetch_thread(void *arg)
1702{
1703	dsl_scan_t *scn = arg;
1704	spa_t *spa = scn->scn_dp->dp_spa;
1705	scan_prefetch_issue_ctx_t *spic;
1706
1707	/* loop until we are told to stop */
1708	while (!scn->scn_prefetch_stop) {
1709		arc_flags_t flags = ARC_FLAG_NOWAIT |
1710		    ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
1711		int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
1712
1713		mutex_enter(&spa->spa_scrub_lock);
1714
1715		/*
1716		 * Wait until we have an IO to issue and are not above our
1717		 * maximum in flight limit.
1718		 */
1719		while (!scn->scn_prefetch_stop &&
1720		    (avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
1721		    spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
1722			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1723		}
1724
1725		/* recheck if we should stop since we waited for the cv */
1726		if (scn->scn_prefetch_stop) {
1727			mutex_exit(&spa->spa_scrub_lock);
1728			break;
1729		}
1730
1731		/* remove the prefetch IO from the tree */
1732		spic = avl_first(&scn->scn_prefetch_queue);
1733		spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
1734		avl_remove(&scn->scn_prefetch_queue, spic);
1735
1736		mutex_exit(&spa->spa_scrub_lock);
1737
1738		if (BP_IS_PROTECTED(&spic->spic_bp)) {
1739			ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
1740			    BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
1741			ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
1742			zio_flags |= ZIO_FLAG_RAW;
1743		}
1744
1745		/* issue the prefetch asynchronously */
1746		(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
1747		    &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
1748		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
1749
1750		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1751	}
1752
1753	ASSERT(scn->scn_prefetch_stop);
1754
1755	/* free any prefetches we didn't get to complete */
1756	mutex_enter(&spa->spa_scrub_lock);
1757	while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
1758		avl_remove(&scn->scn_prefetch_queue, spic);
1759		scan_prefetch_ctx_rele(spic->spic_spc, scn);
1760		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1761	}
1762	ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
1763	mutex_exit(&spa->spa_scrub_lock);
1764}
1765
1766static boolean_t
1767dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
1768    const zbookmark_phys_t *zb)
1769{
1770	/*
1771	 * We never skip over user/group accounting objects (obj<0)
1772	 */
1773	if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
1774	    (int64_t)zb->zb_object >= 0) {
1775		/*
1776		 * If we already visited this bp & everything below (in
1777		 * a prior txg sync), don't bother doing it again.
1778		 */
1779		if (zbookmark_subtree_completed(dnp, zb,
1780		    &scn->scn_phys.scn_bookmark))
1781			return (B_TRUE);
1782
1783		/*
1784		 * If we found the block we're trying to resume from, or
1785		 * we went past it to a different object, zero it out to
1786		 * indicate that it's OK to start checking for suspending
1787		 * again.
1788		 */
1789		if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
1790		    zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
1791			dprintf("resuming at %llx/%llx/%llx/%llx\n",
1792			    (longlong_t)zb->zb_objset,
1793			    (longlong_t)zb->zb_object,
1794			    (longlong_t)zb->zb_level,
1795			    (longlong_t)zb->zb_blkid);
1796			bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
1797		}
1798	}
1799	return (B_FALSE);
1800}
1801
1802static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
1803    dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
1804    dmu_objset_type_t ostype, dmu_tx_t *tx);
1805inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
1806    dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
1807    dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
1808
1809/*
1810 * Return nonzero on i/o error.
1811 * Return new buf to write out in *bufp.
1812 */
1813inline __attribute__((always_inline)) static int
1814dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
1815    dnode_phys_t *dnp, const blkptr_t *bp,
1816    const zbookmark_phys_t *zb, dmu_tx_t *tx)
1817{
1818	dsl_pool_t *dp = scn->scn_dp;
1819	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
1820	int err;
1821
1822	ASSERT(!BP_IS_REDACTED(bp));
1823
1824	if (BP_GET_LEVEL(bp) > 0) {
1825		arc_flags_t flags = ARC_FLAG_WAIT;
1826		int i;
1827		blkptr_t *cbp;
1828		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1829		arc_buf_t *buf;
1830
1831		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1832		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1833		if (err) {
1834			scn->scn_phys.scn_errors++;
1835			return (err);
1836		}
1837		for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
1838			zbookmark_phys_t czb;
1839
1840			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1841			    zb->zb_level - 1,
1842			    zb->zb_blkid * epb + i);
1843			dsl_scan_visitbp(cbp, &czb, dnp,
1844			    ds, scn, ostype, tx);
1845		}
1846		arc_buf_destroy(buf, &buf);
1847	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
1848		arc_flags_t flags = ARC_FLAG_WAIT;
1849		dnode_phys_t *cdnp;
1850		int i;
1851		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
1852		arc_buf_t *buf;
1853
1854		if (BP_IS_PROTECTED(bp)) {
1855			ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
1856			zio_flags |= ZIO_FLAG_RAW;
1857		}
1858
1859		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1860		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1861		if (err) {
1862			scn->scn_phys.scn_errors++;
1863			return (err);
1864		}
1865		for (i = 0, cdnp = buf->b_data; i < epb;
1866		    i += cdnp->dn_extra_slots + 1,
1867		    cdnp += cdnp->dn_extra_slots + 1) {
1868			dsl_scan_visitdnode(scn, ds, ostype,
1869			    cdnp, zb->zb_blkid * epb + i, tx);
1870		}
1871
1872		arc_buf_destroy(buf, &buf);
1873	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1874		arc_flags_t flags = ARC_FLAG_WAIT;
1875		objset_phys_t *osp;
1876		arc_buf_t *buf;
1877
1878		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1879		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1880		if (err) {
1881			scn->scn_phys.scn_errors++;
1882			return (err);
1883		}
1884
1885		osp = buf->b_data;
1886
1887		dsl_scan_visitdnode(scn, ds, osp->os_type,
1888		    &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
1889
1890		if (OBJSET_BUF_HAS_USERUSED(buf)) {
1891			/*
1892			 * We also always visit user/group/project accounting
1893			 * objects, and never skip them, even if we are
1894			 * suspending. This is necessary so that the
1895			 * space deltas from this txg get integrated.
1896			 */
1897			if (OBJSET_BUF_HAS_PROJECTUSED(buf))
1898				dsl_scan_visitdnode(scn, ds, osp->os_type,
1899				    &osp->os_projectused_dnode,
1900				    DMU_PROJECTUSED_OBJECT, tx);
1901			dsl_scan_visitdnode(scn, ds, osp->os_type,
1902			    &osp->os_groupused_dnode,
1903			    DMU_GROUPUSED_OBJECT, tx);
1904			dsl_scan_visitdnode(scn, ds, osp->os_type,
1905			    &osp->os_userused_dnode,
1906			    DMU_USERUSED_OBJECT, tx);
1907		}
1908		arc_buf_destroy(buf, &buf);
1909	}
1910
1911	return (0);
1912}
1913
1914inline __attribute__((always_inline)) static void
1915dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
1916    dmu_objset_type_t ostype, dnode_phys_t *dnp,
1917    uint64_t object, dmu_tx_t *tx)
1918{
1919	int j;
1920
1921	for (j = 0; j < dnp->dn_nblkptr; j++) {
1922		zbookmark_phys_t czb;
1923
1924		SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
1925		    dnp->dn_nlevels - 1, j);
1926		dsl_scan_visitbp(&dnp->dn_blkptr[j],
1927		    &czb, dnp, ds, scn, ostype, tx);
1928	}
1929
1930	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1931		zbookmark_phys_t czb;
1932		SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
1933		    0, DMU_SPILL_BLKID);
1934		dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
1935		    &czb, dnp, ds, scn, ostype, tx);
1936	}
1937}
1938
1939/*
1940 * The arguments are in this order because mdb can only print the
1941 * first 5; we want them to be useful.
1942 */
1943static void
1944dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
1945    dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
1946    dmu_objset_type_t ostype, dmu_tx_t *tx)
1947{
1948	dsl_pool_t *dp = scn->scn_dp;
1949	blkptr_t *bp_toread = NULL;
1950
1951	if (dsl_scan_check_suspend(scn, zb))
1952		return;
1953
1954	if (dsl_scan_check_resume(scn, dnp, zb))
1955		return;
1956
1957	scn->scn_visited_this_txg++;
1958
1959	/*
1960	 * This debugging is commented out to conserve stack space.  This
1961	 * function is called recursively and the debugging adds several
1962	 * bytes to the stack for each call.  It can be commented back in
1963	 * if required to debug an issue in dsl_scan_visitbp().
1964	 *
1965	 * dprintf_bp(bp,
1966	 *     "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
1967	 *     ds, ds ? ds->ds_object : 0,
1968	 *     zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
1969	 *     bp);
1970	 */
1971
1972	if (BP_IS_HOLE(bp)) {
1973		scn->scn_holes_this_txg++;
1974		return;
1975	}
1976
1977	if (BP_IS_REDACTED(bp)) {
1978		ASSERT(dsl_dataset_feature_is_active(ds,
1979		    SPA_FEATURE_REDACTED_DATASETS));
1980		return;
1981	}
1982
1983	if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
1984		scn->scn_lt_min_this_txg++;
1985		return;
1986	}
1987
1988	bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
1989	*bp_toread = *bp;
1990
1991	if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
1992		goto out;
1993
1994	/*
1995	 * If dsl_scan_ddt() has already visited this block, it will have
1996	 * already done any translations or scrubbing, so don't call the
1997	 * callback again.
1998	 */
1999	if (ddt_class_contains(dp->dp_spa,
2000	    scn->scn_phys.scn_ddt_class_max, bp)) {
2001		scn->scn_ddt_contained_this_txg++;
2002		goto out;
2003	}
2004
2005	/*
2006	 * If this block is from the future (after cur_max_txg), then we
2007	 * are doing this on behalf of a deleted snapshot, and we will
2008	 * revisit the future block on the next pass of this dataset.
2009	 * Don't scan it now unless we need to because something
2010	 * under it was modified.
2011	 */
2012	if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
2013		scn->scn_gt_max_this_txg++;
2014		goto out;
2015	}
2016
2017	scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
2018
2019out:
2020	kmem_free(bp_toread, sizeof (blkptr_t));
2021}
2022
2023static void
2024dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
2025    dmu_tx_t *tx)
2026{
2027	zbookmark_phys_t zb;
2028	scan_prefetch_ctx_t *spc;
2029
2030	SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
2031	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
2032
2033	if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
2034		SET_BOOKMARK(&scn->scn_prefetch_bookmark,
2035		    zb.zb_objset, 0, 0, 0);
2036	} else {
2037		scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
2038	}
2039
2040	scn->scn_objsets_visited_this_txg++;
2041
2042	spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
2043	dsl_scan_prefetch(spc, bp, &zb);
2044	scan_prefetch_ctx_rele(spc, FTAG);
2045
2046	dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
2047
2048	dprintf_ds(ds, "finished scan%s", "");
2049}
2050
2051static void
2052ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
2053{
2054	if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
2055		if (ds->ds_is_snapshot) {
2056			/*
2057			 * Note:
2058			 *  - scn_cur_{min,max}_txg stays the same.
2059			 *  - Setting the flag is not really necessary if
2060			 *    scn_cur_max_txg == scn_max_txg, because there
2061			 *    is nothing after this snapshot that we care
2062			 *    about.  However, we set it anyway and then
2063			 *    ignore it when we retraverse it in
2064			 *    dsl_scan_visitds().
2065			 */
2066			scn_phys->scn_bookmark.zb_objset =
2067			    dsl_dataset_phys(ds)->ds_next_snap_obj;
2068			zfs_dbgmsg("destroying ds %llu; currently traversing; "
2069			    "reset zb_objset to %llu",
2070			    (u_longlong_t)ds->ds_object,
2071			    (u_longlong_t)dsl_dataset_phys(ds)->
2072			    ds_next_snap_obj);
2073			scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
2074		} else {
2075			SET_BOOKMARK(&scn_phys->scn_bookmark,
2076			    ZB_DESTROYED_OBJSET, 0, 0, 0);
2077			zfs_dbgmsg("destroying ds %llu; currently traversing; "
2078			    "reset bookmark to -1,0,0,0",
2079			    (u_longlong_t)ds->ds_object);
2080		}
2081	}
2082}
2083
2084/*
2085 * Invoked when a dataset is destroyed. We need to make sure that:
2086 *
2087 * 1) If it is the dataset that was currently being scanned, we write
2088 *	a new dsl_scan_phys_t and marking the objset reference in it
2089 *	as destroyed.
2090 * 2) Remove it from the work queue, if it was present.
2091 *
2092 * If the dataset was actually a snapshot, instead of marking the dataset
2093 * as destroyed, we instead substitute the next snapshot in line.
2094 */
2095void
2096dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
2097{
2098	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2099	dsl_scan_t *scn = dp->dp_scan;
2100	uint64_t mintxg;
2101
2102	if (!dsl_scan_is_running(scn))
2103		return;
2104
2105	ds_destroyed_scn_phys(ds, &scn->scn_phys);
2106	ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
2107
2108	if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
2109		scan_ds_queue_remove(scn, ds->ds_object);
2110		if (ds->ds_is_snapshot)
2111			scan_ds_queue_insert(scn,
2112			    dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
2113	}
2114
2115	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2116	    ds->ds_object, &mintxg) == 0) {
2117		ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
2118		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2119		    scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2120		if (ds->ds_is_snapshot) {
2121			/*
2122			 * We keep the same mintxg; it could be >
2123			 * ds_creation_txg if the previous snapshot was
2124			 * deleted too.
2125			 */
2126			VERIFY(zap_add_int_key(dp->dp_meta_objset,
2127			    scn->scn_phys.scn_queue_obj,
2128			    dsl_dataset_phys(ds)->ds_next_snap_obj,
2129			    mintxg, tx) == 0);
2130			zfs_dbgmsg("destroying ds %llu; in queue; "
2131			    "replacing with %llu",
2132			    (u_longlong_t)ds->ds_object,
2133			    (u_longlong_t)dsl_dataset_phys(ds)->
2134			    ds_next_snap_obj);
2135		} else {
2136			zfs_dbgmsg("destroying ds %llu; in queue; removing",
2137			    (u_longlong_t)ds->ds_object);
2138		}
2139	}
2140
2141	/*
2142	 * dsl_scan_sync() should be called after this, and should sync
2143	 * out our changed state, but just to be safe, do it here.
2144	 */
2145	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2146}
2147
2148static void
2149ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
2150{
2151	if (scn_bookmark->zb_objset == ds->ds_object) {
2152		scn_bookmark->zb_objset =
2153		    dsl_dataset_phys(ds)->ds_prev_snap_obj;
2154		zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
2155		    "reset zb_objset to %llu",
2156		    (u_longlong_t)ds->ds_object,
2157		    (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2158	}
2159}
2160
2161/*
2162 * Called when a dataset is snapshotted. If we were currently traversing
2163 * this snapshot, we reset our bookmark to point at the newly created
2164 * snapshot. We also modify our work queue to remove the old snapshot and
2165 * replace with the new one.
2166 */
2167void
2168dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
2169{
2170	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2171	dsl_scan_t *scn = dp->dp_scan;
2172	uint64_t mintxg;
2173
2174	if (!dsl_scan_is_running(scn))
2175		return;
2176
2177	ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
2178
2179	ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
2180	ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
2181
2182	if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
2183		scan_ds_queue_remove(scn, ds->ds_object);
2184		scan_ds_queue_insert(scn,
2185		    dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
2186	}
2187
2188	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2189	    ds->ds_object, &mintxg) == 0) {
2190		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2191		    scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2192		VERIFY(zap_add_int_key(dp->dp_meta_objset,
2193		    scn->scn_phys.scn_queue_obj,
2194		    dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
2195		zfs_dbgmsg("snapshotting ds %llu; in queue; "
2196		    "replacing with %llu",
2197		    (u_longlong_t)ds->ds_object,
2198		    (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2199	}
2200
2201	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2202}
2203
2204static void
2205ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
2206    zbookmark_phys_t *scn_bookmark)
2207{
2208	if (scn_bookmark->zb_objset == ds1->ds_object) {
2209		scn_bookmark->zb_objset = ds2->ds_object;
2210		zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2211		    "reset zb_objset to %llu",
2212		    (u_longlong_t)ds1->ds_object,
2213		    (u_longlong_t)ds2->ds_object);
2214	} else if (scn_bookmark->zb_objset == ds2->ds_object) {
2215		scn_bookmark->zb_objset = ds1->ds_object;
2216		zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2217		    "reset zb_objset to %llu",
2218		    (u_longlong_t)ds2->ds_object,
2219		    (u_longlong_t)ds1->ds_object);
2220	}
2221}
2222
2223/*
2224 * Called when an origin dataset and its clone are swapped.  If we were
2225 * currently traversing the dataset, we need to switch to traversing the
2226 * newly promoted clone.
2227 */
2228void
2229dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
2230{
2231	dsl_pool_t *dp = ds1->ds_dir->dd_pool;
2232	dsl_scan_t *scn = dp->dp_scan;
2233	uint64_t mintxg1, mintxg2;
2234	boolean_t ds1_queued, ds2_queued;
2235
2236	if (!dsl_scan_is_running(scn))
2237		return;
2238
2239	ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
2240	ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
2241
2242	/*
2243	 * Handle the in-memory scan queue.
2244	 */
2245	ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
2246	ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
2247
2248	/* Sanity checking. */
2249	if (ds1_queued) {
2250		ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2251		ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2252	}
2253	if (ds2_queued) {
2254		ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2255		ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2256	}
2257
2258	if (ds1_queued && ds2_queued) {
2259		/*
2260		 * If both are queued, we don't need to do anything.
2261		 * The swapping code below would not handle this case correctly,
2262		 * since we can't insert ds2 if it is already there. That's
2263		 * because scan_ds_queue_insert() prohibits a duplicate insert
2264		 * and panics.
2265		 */
2266	} else if (ds1_queued) {
2267		scan_ds_queue_remove(scn, ds1->ds_object);
2268		scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
2269	} else if (ds2_queued) {
2270		scan_ds_queue_remove(scn, ds2->ds_object);
2271		scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
2272	}
2273
2274	/*
2275	 * Handle the on-disk scan queue.
2276	 * The on-disk state is an out-of-date version of the in-memory state,
2277	 * so the in-memory and on-disk values for ds1_queued and ds2_queued may
2278	 * be different. Therefore we need to apply the swap logic to the
2279	 * on-disk state independently of the in-memory state.
2280	 */
2281	ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
2282	    scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
2283	ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
2284	    scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
2285
2286	/* Sanity checking. */
2287	if (ds1_queued) {
2288		ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2289		ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2290	}
2291	if (ds2_queued) {
2292		ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2293		ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2294	}
2295
2296	if (ds1_queued && ds2_queued) {
2297		/*
2298		 * If both are queued, we don't need to do anything.
2299		 * Alternatively, we could check for EEXIST from
2300		 * zap_add_int_key() and back out to the original state, but
2301		 * that would be more work than checking for this case upfront.
2302		 */
2303	} else if (ds1_queued) {
2304		VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
2305		    scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
2306		VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
2307		    scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
2308		zfs_dbgmsg("clone_swap ds %llu; in queue; "
2309		    "replacing with %llu",
2310		    (u_longlong_t)ds1->ds_object,
2311		    (u_longlong_t)ds2->ds_object);
2312	} else if (ds2_queued) {
2313		VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
2314		    scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
2315		VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
2316		    scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
2317		zfs_dbgmsg("clone_swap ds %llu; in queue; "
2318		    "replacing with %llu",
2319		    (u_longlong_t)ds2->ds_object,
2320		    (u_longlong_t)ds1->ds_object);
2321	}
2322
2323	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2324}
2325
2326/* ARGSUSED */
2327static int
2328enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2329{
2330	uint64_t originobj = *(uint64_t *)arg;
2331	dsl_dataset_t *ds;
2332	int err;
2333	dsl_scan_t *scn = dp->dp_scan;
2334
2335	if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
2336		return (0);
2337
2338	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2339	if (err)
2340		return (err);
2341
2342	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
2343		dsl_dataset_t *prev;
2344		err = dsl_dataset_hold_obj(dp,
2345		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2346
2347		dsl_dataset_rele(ds, FTAG);
2348		if (err)
2349			return (err);
2350		ds = prev;
2351	}
2352	scan_ds_queue_insert(scn, ds->ds_object,
2353	    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2354	dsl_dataset_rele(ds, FTAG);
2355	return (0);
2356}
2357
2358static void
2359dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
2360{
2361	dsl_pool_t *dp = scn->scn_dp;
2362	dsl_dataset_t *ds;
2363
2364	VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2365
2366	if (scn->scn_phys.scn_cur_min_txg >=
2367	    scn->scn_phys.scn_max_txg) {
2368		/*
2369		 * This can happen if this snapshot was created after the
2370		 * scan started, and we already completed a previous snapshot
2371		 * that was created after the scan started.  This snapshot
2372		 * only references blocks with:
2373		 *
2374		 *	birth < our ds_creation_txg
2375		 *	cur_min_txg is no less than ds_creation_txg.
2376		 *	We have already visited these blocks.
2377		 * or
2378		 *	birth > scn_max_txg
2379		 *	The scan requested not to visit these blocks.
2380		 *
2381		 * Subsequent snapshots (and clones) can reference our
2382		 * blocks, or blocks with even higher birth times.
2383		 * Therefore we do not need to visit them either,
2384		 * so we do not add them to the work queue.
2385		 *
2386		 * Note that checking for cur_min_txg >= cur_max_txg
2387		 * is not sufficient, because in that case we may need to
2388		 * visit subsequent snapshots.  This happens when min_txg > 0,
2389		 * which raises cur_min_txg.  In this case we will visit
2390		 * this dataset but skip all of its blocks, because the
2391		 * rootbp's birth time is < cur_min_txg.  Then we will
2392		 * add the next snapshots/clones to the work queue.
2393		 */
2394		char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
2395		dsl_dataset_name(ds, dsname);
2396		zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2397		    "cur_min_txg (%llu) >= max_txg (%llu)",
2398		    (longlong_t)dsobj, dsname,
2399		    (longlong_t)scn->scn_phys.scn_cur_min_txg,
2400		    (longlong_t)scn->scn_phys.scn_max_txg);
2401		kmem_free(dsname, MAXNAMELEN);
2402
2403		goto out;
2404	}
2405
2406	/*
2407	 * Only the ZIL in the head (non-snapshot) is valid. Even though
2408	 * snapshots can have ZIL block pointers (which may be the same
2409	 * BP as in the head), they must be ignored. In addition, $ORIGIN
2410	 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2411	 * need to look for a ZIL in it either. So we traverse the ZIL here,
2412	 * rather than in scan_recurse(), because the regular snapshot
2413	 * block-sharing rules don't apply to it.
2414	 */
2415	if (!dsl_dataset_is_snapshot(ds) &&
2416	    (dp->dp_origin_snap == NULL ||
2417	    ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
2418		objset_t *os;
2419		if (dmu_objset_from_ds(ds, &os) != 0) {
2420			goto out;
2421		}
2422		dsl_scan_zil(dp, &os->os_zil_header);
2423	}
2424
2425	/*
2426	 * Iterate over the bps in this ds.
2427	 */
2428	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2429	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2430	dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
2431	rrw_exit(&ds->ds_bp_rwlock, FTAG);
2432
2433	char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
2434	dsl_dataset_name(ds, dsname);
2435	zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2436	    "suspending=%u",
2437	    (longlong_t)dsobj, dsname,
2438	    (longlong_t)scn->scn_phys.scn_cur_min_txg,
2439	    (longlong_t)scn->scn_phys.scn_cur_max_txg,
2440	    (int)scn->scn_suspending);
2441	kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
2442
2443	if (scn->scn_suspending)
2444		goto out;
2445
2446	/*
2447	 * We've finished this pass over this dataset.
2448	 */
2449
2450	/*
2451	 * If we did not completely visit this dataset, do another pass.
2452	 */
2453	if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
2454		zfs_dbgmsg("incomplete pass; visiting again");
2455		scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
2456		scan_ds_queue_insert(scn, ds->ds_object,
2457		    scn->scn_phys.scn_cur_max_txg);
2458		goto out;
2459	}
2460
2461	/*
2462	 * Add descendant datasets to work queue.
2463	 */
2464	if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
2465		scan_ds_queue_insert(scn,
2466		    dsl_dataset_phys(ds)->ds_next_snap_obj,
2467		    dsl_dataset_phys(ds)->ds_creation_txg);
2468	}
2469	if (dsl_dataset_phys(ds)->ds_num_children > 1) {
2470		boolean_t usenext = B_FALSE;
2471		if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
2472			uint64_t count;
2473			/*
2474			 * A bug in a previous version of the code could
2475			 * cause upgrade_clones_cb() to not set
2476			 * ds_next_snap_obj when it should, leading to a
2477			 * missing entry.  Therefore we can only use the
2478			 * next_clones_obj when its count is correct.
2479			 */
2480			int err = zap_count(dp->dp_meta_objset,
2481			    dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
2482			if (err == 0 &&
2483			    count == dsl_dataset_phys(ds)->ds_num_children - 1)
2484				usenext = B_TRUE;
2485		}
2486
2487		if (usenext) {
2488			zap_cursor_t zc;
2489			zap_attribute_t za;
2490			for (zap_cursor_init(&zc, dp->dp_meta_objset,
2491			    dsl_dataset_phys(ds)->ds_next_clones_obj);
2492			    zap_cursor_retrieve(&zc, &za) == 0;
2493			    (void) zap_cursor_advance(&zc)) {
2494				scan_ds_queue_insert(scn,
2495				    zfs_strtonum(za.za_name, NULL),
2496				    dsl_dataset_phys(ds)->ds_creation_txg);
2497			}
2498			zap_cursor_fini(&zc);
2499		} else {
2500			VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2501			    enqueue_clones_cb, &ds->ds_object,
2502			    DS_FIND_CHILDREN));
2503		}
2504	}
2505
2506out:
2507	dsl_dataset_rele(ds, FTAG);
2508}
2509
2510/* ARGSUSED */
2511static int
2512enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2513{
2514	dsl_dataset_t *ds;
2515	int err;
2516	dsl_scan_t *scn = dp->dp_scan;
2517
2518	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2519	if (err)
2520		return (err);
2521
2522	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
2523		dsl_dataset_t *prev;
2524		err = dsl_dataset_hold_obj(dp,
2525		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2526		if (err) {
2527			dsl_dataset_rele(ds, FTAG);
2528			return (err);
2529		}
2530
2531		/*
2532		 * If this is a clone, we don't need to worry about it for now.
2533		 */
2534		if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
2535			dsl_dataset_rele(ds, FTAG);
2536			dsl_dataset_rele(prev, FTAG);
2537			return (0);
2538		}
2539		dsl_dataset_rele(ds, FTAG);
2540		ds = prev;
2541	}
2542
2543	scan_ds_queue_insert(scn, ds->ds_object,
2544	    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2545	dsl_dataset_rele(ds, FTAG);
2546	return (0);
2547}
2548
2549/* ARGSUSED */
2550void
2551dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
2552    ddt_entry_t *dde, dmu_tx_t *tx)
2553{
2554	const ddt_key_t *ddk = &dde->dde_key;
2555	ddt_phys_t *ddp = dde->dde_phys;
2556	blkptr_t bp;
2557	zbookmark_phys_t zb = { 0 };
2558	int p;
2559
2560	if (!dsl_scan_is_running(scn))
2561		return;
2562
2563	/*
2564	 * This function is special because it is the only thing
2565	 * that can add scan_io_t's to the vdev scan queues from
2566	 * outside dsl_scan_sync(). For the most part this is ok
2567	 * as long as it is called from within syncing context.
2568	 * However, dsl_scan_sync() expects that no new sio's will
2569	 * be added between when all the work for a scan is done
2570	 * and the next txg when the scan is actually marked as
2571	 * completed. This check ensures we do not issue new sio's
2572	 * during this period.
2573	 */
2574	if (scn->scn_done_txg != 0)
2575		return;
2576
2577	for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2578		if (ddp->ddp_phys_birth == 0 ||
2579		    ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
2580			continue;
2581		ddt_bp_create(checksum, ddk, ddp, &bp);
2582
2583		scn->scn_visited_this_txg++;
2584		scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
2585	}
2586}
2587
2588/*
2589 * Scrub/dedup interaction.
2590 *
2591 * If there are N references to a deduped block, we don't want to scrub it
2592 * N times -- ideally, we should scrub it exactly once.
2593 *
2594 * We leverage the fact that the dde's replication class (enum ddt_class)
2595 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2596 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2597 *
2598 * To prevent excess scrubbing, the scrub begins by walking the DDT
2599 * to find all blocks with refcnt > 1, and scrubs each of these once.
2600 * Since there are two replication classes which contain blocks with
2601 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2602 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2603 *
2604 * There would be nothing more to say if a block's refcnt couldn't change
2605 * during a scrub, but of course it can so we must account for changes
2606 * in a block's replication class.
2607 *
2608 * Here's an example of what can occur:
2609 *
2610 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2611 * when visited during the top-down scrub phase, it will be scrubbed twice.
2612 * This negates our scrub optimization, but is otherwise harmless.
2613 *
2614 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
2615 * on each visit during the top-down scrub phase, it will never be scrubbed.
2616 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
2617 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
2618 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
2619 * while a scrub is in progress, it scrubs the block right then.
2620 */
2621static void
2622dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
2623{
2624	ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
2625	ddt_entry_t dde;
2626	int error;
2627	uint64_t n = 0;
2628
2629	bzero(&dde, sizeof (ddt_entry_t));
2630
2631	while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
2632		ddt_t *ddt;
2633
2634		if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
2635			break;
2636		dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
2637		    (longlong_t)ddb->ddb_class,
2638		    (longlong_t)ddb->ddb_type,
2639		    (longlong_t)ddb->ddb_checksum,
2640		    (longlong_t)ddb->ddb_cursor);
2641
2642		/* There should be no pending changes to the dedup table */
2643		ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
2644		ASSERT(avl_first(&ddt->ddt_tree) == NULL);
2645
2646		dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
2647		n++;
2648
2649		if (dsl_scan_check_suspend(scn, NULL))
2650			break;
2651	}
2652
2653	zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; "
2654	    "suspending=%u", (longlong_t)n,
2655	    (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
2656
2657	ASSERT(error == 0 || error == ENOENT);
2658	ASSERT(error != ENOENT ||
2659	    ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
2660}
2661
2662static uint64_t
2663dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
2664{
2665	uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
2666	if (ds->ds_is_snapshot)
2667		return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
2668	return (smt);
2669}
2670
2671static void
2672dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
2673{
2674	scan_ds_t *sds;
2675	dsl_pool_t *dp = scn->scn_dp;
2676
2677	if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
2678	    scn->scn_phys.scn_ddt_class_max) {
2679		scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
2680		scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
2681		dsl_scan_ddt(scn, tx);
2682		if (scn->scn_suspending)
2683			return;
2684	}
2685
2686	if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
2687		/* First do the MOS & ORIGIN */
2688
2689		scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
2690		scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
2691		dsl_scan_visit_rootbp(scn, NULL,
2692		    &dp->dp_meta_rootbp, tx);
2693		spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
2694		if (scn->scn_suspending)
2695			return;
2696
2697		if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
2698			VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2699			    enqueue_cb, NULL, DS_FIND_CHILDREN));
2700		} else {
2701			dsl_scan_visitds(scn,
2702			    dp->dp_origin_snap->ds_object, tx);
2703		}
2704		ASSERT(!scn->scn_suspending);
2705	} else if (scn->scn_phys.scn_bookmark.zb_objset !=
2706	    ZB_DESTROYED_OBJSET) {
2707		uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
2708		/*
2709		 * If we were suspended, continue from here. Note if the
2710		 * ds we were suspended on was deleted, the zb_objset may
2711		 * be -1, so we will skip this and find a new objset
2712		 * below.
2713		 */
2714		dsl_scan_visitds(scn, dsobj, tx);
2715		if (scn->scn_suspending)
2716			return;
2717	}
2718
2719	/*
2720	 * In case we suspended right at the end of the ds, zero the
2721	 * bookmark so we don't think that we're still trying to resume.
2722	 */
2723	bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
2724
2725	/*
2726	 * Keep pulling things out of the dataset avl queue. Updates to the
2727	 * persistent zap-object-as-queue happen only at checkpoints.
2728	 */
2729	while ((sds = avl_first(&scn->scn_queue)) != NULL) {
2730		dsl_dataset_t *ds;
2731		uint64_t dsobj = sds->sds_dsobj;
2732		uint64_t txg = sds->sds_txg;
2733
2734		/* dequeue and free the ds from the queue */
2735		scan_ds_queue_remove(scn, dsobj);
2736		sds = NULL;
2737
2738		/* set up min / max txg */
2739		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2740		if (txg != 0) {
2741			scn->scn_phys.scn_cur_min_txg =
2742			    MAX(scn->scn_phys.scn_min_txg, txg);
2743		} else {
2744			scn->scn_phys.scn_cur_min_txg =
2745			    MAX(scn->scn_phys.scn_min_txg,
2746			    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2747		}
2748		scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
2749		dsl_dataset_rele(ds, FTAG);
2750
2751		dsl_scan_visitds(scn, dsobj, tx);
2752		if (scn->scn_suspending)
2753			return;
2754	}
2755
2756	/* No more objsets to fetch, we're done */
2757	scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
2758	ASSERT0(scn->scn_suspending);
2759}
2760
2761static uint64_t
2762dsl_scan_count_data_disks(vdev_t *rvd)
2763{
2764	uint64_t i, leaves = 0;
2765
2766	for (i = 0; i < rvd->vdev_children; i++) {
2767		vdev_t *vd = rvd->vdev_child[i];
2768		if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
2769			continue;
2770		leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
2771	}
2772	return (leaves);
2773}
2774
2775static void
2776scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
2777{
2778	int i;
2779	uint64_t cur_size = 0;
2780
2781	for (i = 0; i < BP_GET_NDVAS(bp); i++) {
2782		cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
2783	}
2784
2785	q->q_total_zio_size_this_txg += cur_size;
2786	q->q_zios_this_txg++;
2787}
2788
2789static void
2790scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
2791    uint64_t end)
2792{
2793	q->q_total_seg_size_this_txg += end - start;
2794	q->q_segs_this_txg++;
2795}
2796
2797static boolean_t
2798scan_io_queue_check_suspend(dsl_scan_t *scn)
2799{
2800	/* See comment in dsl_scan_check_suspend() */
2801	uint64_t curr_time_ns = gethrtime();
2802	uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
2803	uint64_t sync_time_ns = curr_time_ns -
2804	    scn->scn_dp->dp_spa->spa_sync_starttime;
2805	int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
2806	int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
2807	    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
2808
2809	return ((NSEC2MSEC(scan_time_ns) > mintime &&
2810	    (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
2811	    txg_sync_waiting(scn->scn_dp) ||
2812	    NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
2813	    spa_shutting_down(scn->scn_dp->dp_spa));
2814}
2815
2816/*
2817 * Given a list of scan_io_t's in io_list, this issues the I/Os out to
2818 * disk. This consumes the io_list and frees the scan_io_t's. This is
2819 * called when emptying queues, either when we're up against the memory
2820 * limit or when we have finished scanning. Returns B_TRUE if we stopped
2821 * processing the list before we finished. Any sios that were not issued
2822 * will remain in the io_list.
2823 */
2824static boolean_t
2825scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
2826{
2827	dsl_scan_t *scn = queue->q_scn;
2828	scan_io_t *sio;
2829	int64_t bytes_issued = 0;
2830	boolean_t suspended = B_FALSE;
2831
2832	while ((sio = list_head(io_list)) != NULL) {
2833		blkptr_t bp;
2834
2835		if (scan_io_queue_check_suspend(scn)) {
2836			suspended = B_TRUE;
2837			break;
2838		}
2839
2840		sio2bp(sio, &bp);
2841		bytes_issued += SIO_GET_ASIZE(sio);
2842		scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
2843		    &sio->sio_zb, queue);
2844		(void) list_remove_head(io_list);
2845		scan_io_queues_update_zio_stats(queue, &bp);
2846		sio_free(sio);
2847	}
2848
2849	atomic_add_64(&scn->scn_bytes_pending, -bytes_issued);
2850
2851	return (suspended);
2852}
2853
2854/*
2855 * This function removes sios from an IO queue which reside within a given
2856 * range_seg_t and inserts them (in offset order) into a list. Note that
2857 * we only ever return a maximum of 32 sios at once. If there are more sios
2858 * to process within this segment that did not make it onto the list we
2859 * return B_TRUE and otherwise B_FALSE.
2860 */
2861static boolean_t
2862scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
2863{
2864	scan_io_t *srch_sio, *sio, *next_sio;
2865	avl_index_t idx;
2866	uint_t num_sios = 0;
2867	int64_t bytes_issued = 0;
2868
2869	ASSERT(rs != NULL);
2870	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
2871
2872	srch_sio = sio_alloc(1);
2873	srch_sio->sio_nr_dvas = 1;
2874	SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
2875
2876	/*
2877	 * The exact start of the extent might not contain any matching zios,
2878	 * so if that's the case, examine the next one in the tree.
2879	 */
2880	sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
2881	sio_free(srch_sio);
2882
2883	if (sio == NULL)
2884		sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
2885
2886	while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
2887	    queue->q_exts_by_addr) && num_sios <= 32) {
2888		ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
2889		    queue->q_exts_by_addr));
2890		ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
2891		    queue->q_exts_by_addr));
2892
2893		next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
2894		avl_remove(&queue->q_sios_by_addr, sio);
2895		queue->q_sio_memused -= SIO_GET_MUSED(sio);
2896
2897		bytes_issued += SIO_GET_ASIZE(sio);
2898		num_sios++;
2899		list_insert_tail(list, sio);
2900		sio = next_sio;
2901	}
2902
2903	/*
2904	 * We limit the number of sios we process at once to 32 to avoid
2905	 * biting off more than we can chew. If we didn't take everything
2906	 * in the segment we update it to reflect the work we were able to
2907	 * complete. Otherwise, we remove it from the range tree entirely.
2908	 */
2909	if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
2910	    queue->q_exts_by_addr)) {
2911		range_tree_adjust_fill(queue->q_exts_by_addr, rs,
2912		    -bytes_issued);
2913		range_tree_resize_segment(queue->q_exts_by_addr, rs,
2914		    SIO_GET_OFFSET(sio), rs_get_end(rs,
2915		    queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
2916
2917		return (B_TRUE);
2918	} else {
2919		uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
2920		uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
2921		range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
2922		return (B_FALSE);
2923	}
2924}
2925
2926/*
2927 * This is called from the queue emptying thread and selects the next
2928 * extent from which we are to issue I/Os. The behavior of this function
2929 * depends on the state of the scan, the current memory consumption and
2930 * whether or not we are performing a scan shutdown.
2931 * 1) We select extents in an elevator algorithm (LBA-order) if the scan
2932 * 	needs to perform a checkpoint
2933 * 2) We select the largest available extent if we are up against the
2934 * 	memory limit.
2935 * 3) Otherwise we don't select any extents.
2936 */
2937static range_seg_t *
2938scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
2939{
2940	dsl_scan_t *scn = queue->q_scn;
2941	range_tree_t *rt = queue->q_exts_by_addr;
2942
2943	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
2944	ASSERT(scn->scn_is_sorted);
2945
2946	/* handle tunable overrides */
2947	if (scn->scn_checkpointing || scn->scn_clearing) {
2948		if (zfs_scan_issue_strategy == 1) {
2949			return (range_tree_first(rt));
2950		} else if (zfs_scan_issue_strategy == 2) {
2951			/*
2952			 * We need to get the original entry in the by_addr
2953			 * tree so we can modify it.
2954			 */
2955			range_seg_t *size_rs =
2956			    zfs_btree_first(&queue->q_exts_by_size, NULL);
2957			if (size_rs == NULL)
2958				return (NULL);
2959			uint64_t start = rs_get_start(size_rs, rt);
2960			uint64_t size = rs_get_end(size_rs, rt) - start;
2961			range_seg_t *addr_rs = range_tree_find(rt, start,
2962			    size);
2963			ASSERT3P(addr_rs, !=, NULL);
2964			ASSERT3U(rs_get_start(size_rs, rt), ==,
2965			    rs_get_start(addr_rs, rt));
2966			ASSERT3U(rs_get_end(size_rs, rt), ==,
2967			    rs_get_end(addr_rs, rt));
2968			return (addr_rs);
2969		}
2970	}
2971
2972	/*
2973	 * During normal clearing, we want to issue our largest segments
2974	 * first, keeping IO as sequential as possible, and leaving the
2975	 * smaller extents for later with the hope that they might eventually
2976	 * grow to larger sequential segments. However, when the scan is
2977	 * checkpointing, no new extents will be added to the sorting queue,
2978	 * so the way we are sorted now is as good as it will ever get.
2979	 * In this case, we instead switch to issuing extents in LBA order.
2980	 */
2981	if (scn->scn_checkpointing) {
2982		return (range_tree_first(rt));
2983	} else if (scn->scn_clearing) {
2984		/*
2985		 * We need to get the original entry in the by_addr
2986		 * tree so we can modify it.
2987		 */
2988		range_seg_t *size_rs = zfs_btree_first(&queue->q_exts_by_size,
2989		    NULL);
2990		if (size_rs == NULL)
2991			return (NULL);
2992		uint64_t start = rs_get_start(size_rs, rt);
2993		uint64_t size = rs_get_end(size_rs, rt) - start;
2994		range_seg_t *addr_rs = range_tree_find(rt, start, size);
2995		ASSERT3P(addr_rs, !=, NULL);
2996		ASSERT3U(rs_get_start(size_rs, rt), ==, rs_get_start(addr_rs,
2997		    rt));
2998		ASSERT3U(rs_get_end(size_rs, rt), ==, rs_get_end(addr_rs, rt));
2999		return (addr_rs);
3000	} else {
3001		return (NULL);
3002	}
3003}
3004
3005static void
3006scan_io_queues_run_one(void *arg)
3007{
3008	dsl_scan_io_queue_t *queue = arg;
3009	kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
3010	boolean_t suspended = B_FALSE;
3011	range_seg_t *rs = NULL;
3012	scan_io_t *sio = NULL;
3013	list_t sio_list;
3014
3015	ASSERT(queue->q_scn->scn_is_sorted);
3016
3017	list_create(&sio_list, sizeof (scan_io_t),
3018	    offsetof(scan_io_t, sio_nodes.sio_list_node));
3019	mutex_enter(q_lock);
3020
3021	/* Calculate maximum in-flight bytes for this vdev. */
3022	queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
3023	    (vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
3024
3025	/* reset per-queue scan statistics for this txg */
3026	queue->q_total_seg_size_this_txg = 0;
3027	queue->q_segs_this_txg = 0;
3028	queue->q_total_zio_size_this_txg = 0;
3029	queue->q_zios_this_txg = 0;
3030
3031	/* loop until we run out of time or sios */
3032	while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
3033		uint64_t seg_start = 0, seg_end = 0;
3034		boolean_t more_left = B_TRUE;
3035
3036		ASSERT(list_is_empty(&sio_list));
3037
3038		/* loop while we still have sios left to process in this rs */
3039		while (more_left) {
3040			scan_io_t *first_sio, *last_sio;
3041
3042			/*
3043			 * We have selected which extent needs to be
3044			 * processed next. Gather up the corresponding sios.
3045			 */
3046			more_left = scan_io_queue_gather(queue, rs, &sio_list);
3047			ASSERT(!list_is_empty(&sio_list));
3048			first_sio = list_head(&sio_list);
3049			last_sio = list_tail(&sio_list);
3050
3051			seg_end = SIO_GET_END_OFFSET(last_sio);
3052			if (seg_start == 0)
3053				seg_start = SIO_GET_OFFSET(first_sio);
3054
3055			/*
3056			 * Issuing sios can take a long time so drop the
3057			 * queue lock. The sio queue won't be updated by
3058			 * other threads since we're in syncing context so
3059			 * we can be sure that our trees will remain exactly
3060			 * as we left them.
3061			 */
3062			mutex_exit(q_lock);
3063			suspended = scan_io_queue_issue(queue, &sio_list);
3064			mutex_enter(q_lock);
3065
3066			if (suspended)
3067				break;
3068		}
3069
3070		/* update statistics for debugging purposes */
3071		scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
3072
3073		if (suspended)
3074			break;
3075	}
3076
3077	/*
3078	 * If we were suspended in the middle of processing,
3079	 * requeue any unfinished sios and exit.
3080	 */
3081	while ((sio = list_head(&sio_list)) != NULL) {
3082		list_remove(&sio_list, sio);
3083		scan_io_queue_insert_impl(queue, sio);
3084	}
3085
3086	mutex_exit(q_lock);
3087	list_destroy(&sio_list);
3088}
3089
3090/*
3091 * Performs an emptying run on all scan queues in the pool. This just
3092 * punches out one thread per top-level vdev, each of which processes
3093 * only that vdev's scan queue. We can parallelize the I/O here because
3094 * we know that each queue's I/Os only affect its own top-level vdev.
3095 *
3096 * This function waits for the queue runs to complete, and must be
3097 * called from dsl_scan_sync (or in general, syncing context).
3098 */
3099static void
3100scan_io_queues_run(dsl_scan_t *scn)
3101{
3102	spa_t *spa = scn->scn_dp->dp_spa;
3103
3104	ASSERT(scn->scn_is_sorted);
3105	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3106
3107	if (scn->scn_bytes_pending == 0)
3108		return;
3109
3110	if (scn->scn_taskq == NULL) {
3111		int nthreads = spa->spa_root_vdev->vdev_children;
3112
3113		/*
3114		 * We need to make this taskq *always* execute as many
3115		 * threads in parallel as we have top-level vdevs and no
3116		 * less, otherwise strange serialization of the calls to
3117		 * scan_io_queues_run_one can occur during spa_sync runs
3118		 * and that significantly impacts performance.
3119		 */
3120		scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
3121		    minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
3122	}
3123
3124	for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
3125		vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
3126
3127		mutex_enter(&vd->vdev_scan_io_queue_lock);
3128		if (vd->vdev_scan_io_queue != NULL) {
3129			VERIFY(taskq_dispatch(scn->scn_taskq,
3130			    scan_io_queues_run_one, vd->vdev_scan_io_queue,
3131			    TQ_SLEEP) != TASKQID_INVALID);
3132		}
3133		mutex_exit(&vd->vdev_scan_io_queue_lock);
3134	}
3135
3136	/*
3137	 * Wait for the queues to finish issuing their IOs for this run
3138	 * before we return. There may still be IOs in flight at this
3139	 * point.
3140	 */
3141	taskq_wait(scn->scn_taskq);
3142}
3143
3144static boolean_t
3145dsl_scan_async_block_should_pause(dsl_scan_t *scn)
3146{
3147	uint64_t elapsed_nanosecs;
3148
3149	if (zfs_recover)
3150		return (B_FALSE);
3151
3152	if (zfs_async_block_max_blocks != 0 &&
3153	    scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
3154		return (B_TRUE);
3155	}
3156
3157	if (zfs_max_async_dedup_frees != 0 &&
3158	    scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
3159		return (B_TRUE);
3160	}
3161
3162	elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
3163	return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
3164	    (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
3165	    txg_sync_waiting(scn->scn_dp)) ||
3166	    spa_shutting_down(scn->scn_dp->dp_spa));
3167}
3168
3169static int
3170dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3171{
3172	dsl_scan_t *scn = arg;
3173
3174	if (!scn->scn_is_bptree ||
3175	    (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
3176		if (dsl_scan_async_block_should_pause(scn))
3177			return (SET_ERROR(ERESTART));
3178	}
3179
3180	zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
3181	    dmu_tx_get_txg(tx), bp, 0));
3182	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
3183	    -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
3184	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
3185	scn->scn_visited_this_txg++;
3186	if (BP_GET_DEDUP(bp))
3187		scn->scn_dedup_frees_this_txg++;
3188	return (0);
3189}
3190
3191static void
3192dsl_scan_update_stats(dsl_scan_t *scn)
3193{
3194	spa_t *spa = scn->scn_dp->dp_spa;
3195	uint64_t i;
3196	uint64_t seg_size_total = 0, zio_size_total = 0;
3197	uint64_t seg_count_total = 0, zio_count_total = 0;
3198
3199	for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
3200		vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
3201		dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
3202
3203		if (queue == NULL)
3204			continue;
3205
3206		seg_size_total += queue->q_total_seg_size_this_txg;
3207		zio_size_total += queue->q_total_zio_size_this_txg;
3208		seg_count_total += queue->q_segs_this_txg;
3209		zio_count_total += queue->q_zios_this_txg;
3210	}
3211
3212	if (seg_count_total == 0 || zio_count_total == 0) {
3213		scn->scn_avg_seg_size_this_txg = 0;
3214		scn->scn_avg_zio_size_this_txg = 0;
3215		scn->scn_segs_this_txg = 0;
3216		scn->scn_zios_this_txg = 0;
3217		return;
3218	}
3219
3220	scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
3221	scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
3222	scn->scn_segs_this_txg = seg_count_total;
3223	scn->scn_zios_this_txg = zio_count_total;
3224}
3225
3226static int
3227bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3228    dmu_tx_t *tx)
3229{
3230	ASSERT(!bp_freed);
3231	return (dsl_scan_free_block_cb(arg, bp, tx));
3232}
3233
3234static int
3235dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3236    dmu_tx_t *tx)
3237{
3238	ASSERT(!bp_freed);
3239	dsl_scan_t *scn = arg;
3240	const dva_t *dva = &bp->blk_dva[0];
3241
3242	if (dsl_scan_async_block_should_pause(scn))
3243		return (SET_ERROR(ERESTART));
3244
3245	spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
3246	    DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
3247	    DVA_GET_ASIZE(dva), tx);
3248	scn->scn_visited_this_txg++;
3249	return (0);
3250}
3251
3252boolean_t
3253dsl_scan_active(dsl_scan_t *scn)
3254{
3255	spa_t *spa = scn->scn_dp->dp_spa;
3256	uint64_t used = 0, comp, uncomp;
3257	boolean_t clones_left;
3258
3259	if (spa->spa_load_state != SPA_LOAD_NONE)
3260		return (B_FALSE);
3261	if (spa_shutting_down(spa))
3262		return (B_FALSE);
3263	if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
3264	    (scn->scn_async_destroying && !scn->scn_async_stalled))
3265		return (B_TRUE);
3266
3267	if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
3268		(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
3269		    &used, &comp, &uncomp);
3270	}
3271	clones_left = spa_livelist_delete_check(spa);
3272	return ((used != 0) || (clones_left));
3273}
3274
3275static boolean_t
3276dsl_scan_check_deferred(vdev_t *vd)
3277{
3278	boolean_t need_resilver = B_FALSE;
3279
3280	for (int c = 0; c < vd->vdev_children; c++) {
3281		need_resilver |=
3282		    dsl_scan_check_deferred(vd->vdev_child[c]);
3283	}
3284
3285	if (!vdev_is_concrete(vd) || vd->vdev_aux ||
3286	    !vd->vdev_ops->vdev_op_leaf)
3287		return (need_resilver);
3288
3289	if (!vd->vdev_resilver_deferred)
3290		need_resilver = B_TRUE;
3291
3292	return (need_resilver);
3293}
3294
3295static boolean_t
3296dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
3297    uint64_t phys_birth)
3298{
3299	vdev_t *vd;
3300
3301	vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3302
3303	if (vd->vdev_ops == &vdev_indirect_ops) {
3304		/*
3305		 * The indirect vdev can point to multiple
3306		 * vdevs.  For simplicity, always create
3307		 * the resilver zio_t. zio_vdev_io_start()
3308		 * will bypass the child resilver i/o's if
3309		 * they are on vdevs that don't have DTL's.
3310		 */
3311		return (B_TRUE);
3312	}
3313
3314	if (DVA_GET_GANG(dva)) {
3315		/*
3316		 * Gang members may be spread across multiple
3317		 * vdevs, so the best estimate we have is the
3318		 * scrub range, which has already been checked.
3319		 * XXX -- it would be better to change our
3320		 * allocation policy to ensure that all
3321		 * gang members reside on the same vdev.
3322		 */
3323		return (B_TRUE);
3324	}
3325
3326	/*
3327	 * Check if the top-level vdev must resilver this offset.
3328	 * When the offset does not intersect with a dirty leaf DTL
3329	 * then it may be possible to skip the resilver IO.  The psize
3330	 * is provided instead of asize to simplify the check for RAIDZ.
3331	 */
3332	if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
3333		return (B_FALSE);
3334
3335	/*
3336	 * Check that this top-level vdev has a device under it which
3337	 * is resilvering and is not deferred.
3338	 */
3339	if (!dsl_scan_check_deferred(vd))
3340		return (B_FALSE);
3341
3342	return (B_TRUE);
3343}
3344
3345static int
3346dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
3347{
3348	dsl_scan_t *scn = dp->dp_scan;
3349	spa_t *spa = dp->dp_spa;
3350	int err = 0;
3351
3352	if (spa_suspend_async_destroy(spa))
3353		return (0);
3354
3355	if (zfs_free_bpobj_enabled &&
3356	    spa_version(spa) >= SPA_VERSION_DEADLISTS) {
3357		scn->scn_is_bptree = B_FALSE;
3358		scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
3359		scn->scn_zio_root = zio_root(spa, NULL,
3360		    NULL, ZIO_FLAG_MUSTSUCCEED);
3361		err = bpobj_iterate(&dp->dp_free_bpobj,
3362		    bpobj_dsl_scan_free_block_cb, scn, tx);
3363		VERIFY0(zio_wait(scn->scn_zio_root));
3364		scn->scn_zio_root = NULL;
3365
3366		if (err != 0 && err != ERESTART)
3367			zfs_panic_recover("error %u from bpobj_iterate()", err);
3368	}
3369
3370	if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
3371		ASSERT(scn->scn_async_destroying);
3372		scn->scn_is_bptree = B_TRUE;
3373		scn->scn_zio_root = zio_root(spa, NULL,
3374		    NULL, ZIO_FLAG_MUSTSUCCEED);
3375		err = bptree_iterate(dp->dp_meta_objset,
3376		    dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
3377		VERIFY0(zio_wait(scn->scn_zio_root));
3378		scn->scn_zio_root = NULL;
3379
3380		if (err == EIO || err == ECKSUM) {
3381			err = 0;
3382		} else if (err != 0 && err != ERESTART) {
3383			zfs_panic_recover("error %u from "
3384			    "traverse_dataset_destroyed()", err);
3385		}
3386
3387		if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
3388			/* finished; deactivate async destroy feature */
3389			spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
3390			ASSERT(!spa_feature_is_active(spa,
3391			    SPA_FEATURE_ASYNC_DESTROY));
3392			VERIFY0(zap_remove(dp->dp_meta_objset,
3393			    DMU_POOL_DIRECTORY_OBJECT,
3394			    DMU_POOL_BPTREE_OBJ, tx));
3395			VERIFY0(bptree_free(dp->dp_meta_objset,
3396			    dp->dp_bptree_obj, tx));
3397			dp->dp_bptree_obj = 0;
3398			scn->scn_async_destroying = B_FALSE;
3399			scn->scn_async_stalled = B_FALSE;
3400		} else {
3401			/*
3402			 * If we didn't make progress, mark the async
3403			 * destroy as stalled, so that we will not initiate
3404			 * a spa_sync() on its behalf.  Note that we only
3405			 * check this if we are not finished, because if the
3406			 * bptree had no blocks for us to visit, we can
3407			 * finish without "making progress".
3408			 */
3409			scn->scn_async_stalled =
3410			    (scn->scn_visited_this_txg == 0);
3411		}
3412	}
3413	if (scn->scn_visited_this_txg) {
3414		zfs_dbgmsg("freed %llu blocks in %llums from "
3415		    "free_bpobj/bptree txg %llu; err=%u",
3416		    (longlong_t)scn->scn_visited_this_txg,
3417		    (longlong_t)
3418		    NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
3419		    (longlong_t)tx->tx_txg, err);
3420		scn->scn_visited_this_txg = 0;
3421		scn->scn_dedup_frees_this_txg = 0;
3422
3423		/*
3424		 * Write out changes to the DDT that may be required as a
3425		 * result of the blocks freed.  This ensures that the DDT
3426		 * is clean when a scrub/resilver runs.
3427		 */
3428		ddt_sync(spa, tx->tx_txg);
3429	}
3430	if (err != 0)
3431		return (err);
3432	if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
3433	    zfs_free_leak_on_eio &&
3434	    (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
3435	    dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
3436	    dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
3437		/*
3438		 * We have finished background destroying, but there is still
3439		 * some space left in the dp_free_dir. Transfer this leaked
3440		 * space to the dp_leak_dir.
3441		 */
3442		if (dp->dp_leak_dir == NULL) {
3443			rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
3444			(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
3445			    LEAK_DIR_NAME, tx);
3446			VERIFY0(dsl_pool_open_special_dir(dp,
3447			    LEAK_DIR_NAME, &dp->dp_leak_dir));
3448			rrw_exit(&dp->dp_config_rwlock, FTAG);
3449		}
3450		dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
3451		    dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3452		    dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3453		    dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3454		dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
3455		    -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3456		    -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3457		    -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3458	}
3459
3460	if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
3461	    !spa_livelist_delete_check(spa)) {
3462		/* finished; verify that space accounting went to zero */
3463		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
3464		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
3465		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
3466	}
3467
3468	spa_notify_waiters(spa);
3469
3470	EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
3471	    0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3472	    DMU_POOL_OBSOLETE_BPOBJ));
3473	if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
3474		ASSERT(spa_feature_is_active(dp->dp_spa,
3475		    SPA_FEATURE_OBSOLETE_COUNTS));
3476
3477		scn->scn_is_bptree = B_FALSE;
3478		scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
3479		err = bpobj_iterate(&dp->dp_obsolete_bpobj,
3480		    dsl_scan_obsolete_block_cb, scn, tx);
3481		if (err != 0 && err != ERESTART)
3482			zfs_panic_recover("error %u from bpobj_iterate()", err);
3483
3484		if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
3485			dsl_pool_destroy_obsolete_bpobj(dp, tx);
3486	}
3487	return (0);
3488}
3489
3490/*
3491 * This is the primary entry point for scans that is called from syncing
3492 * context. Scans must happen entirely during syncing context so that we
3493 * can guarantee that blocks we are currently scanning will not change out
3494 * from under us. While a scan is active, this function controls how quickly
3495 * transaction groups proceed, instead of the normal handling provided by
3496 * txg_sync_thread().
3497 */
3498void
3499dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
3500{
3501	int err = 0;
3502	dsl_scan_t *scn = dp->dp_scan;
3503	spa_t *spa = dp->dp_spa;
3504	state_sync_type_t sync_type = SYNC_OPTIONAL;
3505
3506	if (spa->spa_resilver_deferred &&
3507	    !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
3508		spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
3509
3510	/*
3511	 * Check for scn_restart_txg before checking spa_load_state, so
3512	 * that we can restart an old-style scan while the pool is being
3513	 * imported (see dsl_scan_init). We also restart scans if there
3514	 * is a deferred resilver and the user has manually disabled
3515	 * deferred resilvers via the tunable.
3516	 */
3517	if (dsl_scan_restarting(scn, tx) ||
3518	    (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
3519		pool_scan_func_t func = POOL_SCAN_SCRUB;
3520		dsl_scan_done(scn, B_FALSE, tx);
3521		if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
3522			func = POOL_SCAN_RESILVER;
3523		zfs_dbgmsg("restarting scan func=%u txg=%llu",
3524		    func, (longlong_t)tx->tx_txg);
3525		dsl_scan_setup_sync(&func, tx);
3526	}
3527
3528	/*
3529	 * Only process scans in sync pass 1.
3530	 */
3531	if (spa_sync_pass(spa) > 1)
3532		return;
3533
3534	/*
3535	 * If the spa is shutting down, then stop scanning. This will
3536	 * ensure that the scan does not dirty any new data during the
3537	 * shutdown phase.
3538	 */
3539	if (spa_shutting_down(spa))
3540		return;
3541
3542	/*
3543	 * If the scan is inactive due to a stalled async destroy, try again.
3544	 */
3545	if (!scn->scn_async_stalled && !dsl_scan_active(scn))
3546		return;
3547
3548	/* reset scan statistics */
3549	scn->scn_visited_this_txg = 0;
3550	scn->scn_dedup_frees_this_txg = 0;
3551	scn->scn_holes_this_txg = 0;
3552	scn->scn_lt_min_this_txg = 0;
3553	scn->scn_gt_max_this_txg = 0;
3554	scn->scn_ddt_contained_this_txg = 0;
3555	scn->scn_objsets_visited_this_txg = 0;
3556	scn->scn_avg_seg_size_this_txg = 0;
3557	scn->scn_segs_this_txg = 0;
3558	scn->scn_avg_zio_size_this_txg = 0;
3559	scn->scn_zios_this_txg = 0;
3560	scn->scn_suspending = B_FALSE;
3561	scn->scn_sync_start_time = gethrtime();
3562	spa->spa_scrub_active = B_TRUE;
3563
3564	/*
3565	 * First process the async destroys.  If we suspend, don't do
3566	 * any scrubbing or resilvering.  This ensures that there are no
3567	 * async destroys while we are scanning, so the scan code doesn't
3568	 * have to worry about traversing it.  It is also faster to free the
3569	 * blocks than to scrub them.
3570	 */
3571	err = dsl_process_async_destroys(dp, tx);
3572	if (err != 0)
3573		return;
3574
3575	if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
3576		return;
3577
3578	/*
3579	 * Wait a few txgs after importing to begin scanning so that
3580	 * we can get the pool imported quickly.
3581	 */
3582	if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
3583		return;
3584
3585	/*
3586	 * zfs_scan_suspend_progress can be set to disable scan progress.
3587	 * We don't want to spin the txg_sync thread, so we add a delay
3588	 * here to simulate the time spent doing a scan. This is mostly
3589	 * useful for testing and debugging.
3590	 */
3591	if (zfs_scan_suspend_progress) {
3592		uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
3593		int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
3594		    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
3595
3596		while (zfs_scan_suspend_progress &&
3597		    !txg_sync_waiting(scn->scn_dp) &&
3598		    !spa_shutting_down(scn->scn_dp->dp_spa) &&
3599		    NSEC2MSEC(scan_time_ns) < mintime) {
3600			delay(hz);
3601			scan_time_ns = gethrtime() - scn->scn_sync_start_time;
3602		}
3603		return;
3604	}
3605
3606	/*
3607	 * It is possible to switch from unsorted to sorted at any time,
3608	 * but afterwards the scan will remain sorted unless reloaded from
3609	 * a checkpoint after a reboot.
3610	 */
3611	if (!zfs_scan_legacy) {
3612		scn->scn_is_sorted = B_TRUE;
3613		if (scn->scn_last_checkpoint == 0)
3614			scn->scn_last_checkpoint = ddi_get_lbolt();
3615	}
3616
3617	/*
3618	 * For sorted scans, determine what kind of work we will be doing
3619	 * this txg based on our memory limitations and whether or not we
3620	 * need to perform a checkpoint.
3621	 */
3622	if (scn->scn_is_sorted) {
3623		/*
3624		 * If we are over our checkpoint interval, set scn_clearing
3625		 * so that we can begin checkpointing immediately. The
3626		 * checkpoint allows us to save a consistent bookmark
3627		 * representing how much data we have scrubbed so far.
3628		 * Otherwise, use the memory limit to determine if we should
3629		 * scan for metadata or start issue scrub IOs. We accumulate
3630		 * metadata until we hit our hard memory limit at which point
3631		 * we issue scrub IOs until we are at our soft memory limit.
3632		 */
3633		if (scn->scn_checkpointing ||
3634		    ddi_get_lbolt() - scn->scn_last_checkpoint >
3635		    SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
3636			if (!scn->scn_checkpointing)
3637				zfs_dbgmsg("begin scan checkpoint");
3638
3639			scn->scn_checkpointing = B_TRUE;
3640			scn->scn_clearing = B_TRUE;
3641		} else {
3642			boolean_t should_clear = dsl_scan_should_clear(scn);
3643			if (should_clear && !scn->scn_clearing) {
3644				zfs_dbgmsg("begin scan clearing");
3645				scn->scn_clearing = B_TRUE;
3646			} else if (!should_clear && scn->scn_clearing) {
3647				zfs_dbgmsg("finish scan clearing");
3648				scn->scn_clearing = B_FALSE;
3649			}
3650		}
3651	} else {
3652		ASSERT0(scn->scn_checkpointing);
3653		ASSERT0(scn->scn_clearing);
3654	}
3655
3656	if (!scn->scn_clearing && scn->scn_done_txg == 0) {
3657		/* Need to scan metadata for more blocks to scrub */
3658		dsl_scan_phys_t *scnp = &scn->scn_phys;
3659		taskqid_t prefetch_tqid;
3660
3661		/*
3662		 * Recalculate the max number of in-flight bytes for pool-wide
3663		 * scanning operations (minimum 1MB). Limits for the issuing
3664		 * phase are done per top-level vdev and are handled separately.
3665		 */
3666		scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
3667		    dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
3668
3669		if (scnp->scn_ddt_bookmark.ddb_class <=
3670		    scnp->scn_ddt_class_max) {
3671			ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
3672			zfs_dbgmsg("doing scan sync txg %llu; "
3673			    "ddt bm=%llu/%llu/%llu/%llx",
3674			    (longlong_t)tx->tx_txg,
3675			    (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
3676			    (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
3677			    (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
3678			    (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
3679		} else {
3680			zfs_dbgmsg("doing scan sync txg %llu; "
3681			    "bm=%llu/%llu/%llu/%llu",
3682			    (longlong_t)tx->tx_txg,
3683			    (longlong_t)scnp->scn_bookmark.zb_objset,
3684			    (longlong_t)scnp->scn_bookmark.zb_object,
3685			    (longlong_t)scnp->scn_bookmark.zb_level,
3686			    (longlong_t)scnp->scn_bookmark.zb_blkid);
3687		}
3688
3689		scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
3690		    NULL, ZIO_FLAG_CANFAIL);
3691
3692		scn->scn_prefetch_stop = B_FALSE;
3693		prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
3694		    dsl_scan_prefetch_thread, scn, TQ_SLEEP);
3695		ASSERT(prefetch_tqid != TASKQID_INVALID);
3696
3697		dsl_pool_config_enter(dp, FTAG);
3698		dsl_scan_visit(scn, tx);
3699		dsl_pool_config_exit(dp, FTAG);
3700
3701		mutex_enter(&dp->dp_spa->spa_scrub_lock);
3702		scn->scn_prefetch_stop = B_TRUE;
3703		cv_broadcast(&spa->spa_scrub_io_cv);
3704		mutex_exit(&dp->dp_spa->spa_scrub_lock);
3705
3706		taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
3707		(void) zio_wait(scn->scn_zio_root);
3708		scn->scn_zio_root = NULL;
3709
3710		zfs_dbgmsg("scan visited %llu blocks in %llums "
3711		    "(%llu os's, %llu holes, %llu < mintxg, "
3712		    "%llu in ddt, %llu > maxtxg)",
3713		    (longlong_t)scn->scn_visited_this_txg,
3714		    (longlong_t)NSEC2MSEC(gethrtime() -
3715		    scn->scn_sync_start_time),
3716		    (longlong_t)scn->scn_objsets_visited_this_txg,
3717		    (longlong_t)scn->scn_holes_this_txg,
3718		    (longlong_t)scn->scn_lt_min_this_txg,
3719		    (longlong_t)scn->scn_ddt_contained_this_txg,
3720		    (longlong_t)scn->scn_gt_max_this_txg);
3721
3722		if (!scn->scn_suspending) {
3723			ASSERT0(avl_numnodes(&scn->scn_queue));
3724			scn->scn_done_txg = tx->tx_txg + 1;
3725			if (scn->scn_is_sorted) {
3726				scn->scn_checkpointing = B_TRUE;
3727				scn->scn_clearing = B_TRUE;
3728			}
3729			zfs_dbgmsg("scan complete txg %llu",
3730			    (longlong_t)tx->tx_txg);
3731		}
3732	} else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
3733		ASSERT(scn->scn_clearing);
3734
3735		/* need to issue scrubbing IOs from per-vdev queues */
3736		scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
3737		    NULL, ZIO_FLAG_CANFAIL);
3738		scan_io_queues_run(scn);
3739		(void) zio_wait(scn->scn_zio_root);
3740		scn->scn_zio_root = NULL;
3741
3742		/* calculate and dprintf the current memory usage */
3743		(void) dsl_scan_should_clear(scn);
3744		dsl_scan_update_stats(scn);
3745
3746		zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums "
3747		    "(avg_block_size = %llu, avg_seg_size = %llu)",
3748		    (longlong_t)scn->scn_zios_this_txg,
3749		    (longlong_t)scn->scn_segs_this_txg,
3750		    (longlong_t)NSEC2MSEC(gethrtime() -
3751		    scn->scn_sync_start_time),
3752		    (longlong_t)scn->scn_avg_zio_size_this_txg,
3753		    (longlong_t)scn->scn_avg_seg_size_this_txg);
3754	} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
3755		/* Finished with everything. Mark the scrub as complete */
3756		zfs_dbgmsg("scan issuing complete txg %llu",
3757		    (longlong_t)tx->tx_txg);
3758		ASSERT3U(scn->scn_done_txg, !=, 0);
3759		ASSERT0(spa->spa_scrub_inflight);
3760		ASSERT0(scn->scn_bytes_pending);
3761		dsl_scan_done(scn, B_TRUE, tx);
3762		sync_type = SYNC_MANDATORY;
3763	}
3764
3765	dsl_scan_sync_state(scn, tx, sync_type);
3766}
3767
3768static void
3769count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
3770{
3771	int i;
3772
3773	/*
3774	 * Don't count embedded bp's, since we already did the work of
3775	 * scanning these when we scanned the containing block.
3776	 */
3777	if (BP_IS_EMBEDDED(bp))
3778		return;
3779
3780	/*
3781	 * Update the spa's stats on how many bytes we have issued.
3782	 * Sequential scrubs create a zio for each DVA of the bp. Each
3783	 * of these will include all DVAs for repair purposes, but the
3784	 * zio code will only try the first one unless there is an issue.
3785	 * Therefore, we should only count the first DVA for these IOs.
3786	 */
3787	if (scn->scn_is_sorted) {
3788		atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
3789		    DVA_GET_ASIZE(&bp->blk_dva[0]));
3790	} else {
3791		spa_t *spa = scn->scn_dp->dp_spa;
3792
3793		for (i = 0; i < BP_GET_NDVAS(bp); i++) {
3794			atomic_add_64(&spa->spa_scan_pass_issued,
3795			    DVA_GET_ASIZE(&bp->blk_dva[i]));
3796		}
3797	}
3798
3799	/*
3800	 * If we resume after a reboot, zab will be NULL; don't record
3801	 * incomplete stats in that case.
3802	 */
3803	if (zab == NULL)
3804		return;
3805
3806	mutex_enter(&zab->zab_lock);
3807
3808	for (i = 0; i < 4; i++) {
3809		int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
3810		int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
3811
3812		if (t & DMU_OT_NEWTYPE)
3813			t = DMU_OT_OTHER;
3814		zfs_blkstat_t *zb = &zab->zab_type[l][t];
3815		int equal;
3816
3817		zb->zb_count++;
3818		zb->zb_asize += BP_GET_ASIZE(bp);
3819		zb->zb_lsize += BP_GET_LSIZE(bp);
3820		zb->zb_psize += BP_GET_PSIZE(bp);
3821		zb->zb_gangs += BP_COUNT_GANG(bp);
3822
3823		switch (BP_GET_NDVAS(bp)) {
3824		case 2:
3825			if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3826			    DVA_GET_VDEV(&bp->blk_dva[1]))
3827				zb->zb_ditto_2_of_2_samevdev++;
3828			break;
3829		case 3:
3830			equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3831			    DVA_GET_VDEV(&bp->blk_dva[1])) +
3832			    (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3833			    DVA_GET_VDEV(&bp->blk_dva[2])) +
3834			    (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3835			    DVA_GET_VDEV(&bp->blk_dva[2]));
3836			if (equal == 1)
3837				zb->zb_ditto_2_of_3_samevdev++;
3838			else if (equal == 3)
3839				zb->zb_ditto_3_of_3_samevdev++;
3840			break;
3841		}
3842	}
3843
3844	mutex_exit(&zab->zab_lock);
3845}
3846
3847static void
3848scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
3849{
3850	avl_index_t idx;
3851	int64_t asize = SIO_GET_ASIZE(sio);
3852	dsl_scan_t *scn = queue->q_scn;
3853
3854	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3855
3856	if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
3857		/* block is already scheduled for reading */
3858		atomic_add_64(&scn->scn_bytes_pending, -asize);
3859		sio_free(sio);
3860		return;
3861	}
3862	avl_insert(&queue->q_sios_by_addr, sio, idx);
3863	queue->q_sio_memused += SIO_GET_MUSED(sio);
3864	range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), asize);
3865}
3866
3867/*
3868 * Given all the info we got from our metadata scanning process, we
3869 * construct a scan_io_t and insert it into the scan sorting queue. The
3870 * I/O must already be suitable for us to process. This is controlled
3871 * by dsl_scan_enqueue().
3872 */
3873static void
3874scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
3875    int zio_flags, const zbookmark_phys_t *zb)
3876{
3877	dsl_scan_t *scn = queue->q_scn;
3878	scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
3879
3880	ASSERT0(BP_IS_GANG(bp));
3881	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3882
3883	bp2sio(bp, sio, dva_i);
3884	sio->sio_flags = zio_flags;
3885	sio->sio_zb = *zb;
3886
3887	/*
3888	 * Increment the bytes pending counter now so that we can't
3889	 * get an integer underflow in case the worker processes the
3890	 * zio before we get to incrementing this counter.
3891	 */
3892	atomic_add_64(&scn->scn_bytes_pending, SIO_GET_ASIZE(sio));
3893
3894	scan_io_queue_insert_impl(queue, sio);
3895}
3896
3897/*
3898 * Given a set of I/O parameters as discovered by the metadata traversal
3899 * process, attempts to place the I/O into the sorted queues (if allowed),
3900 * or immediately executes the I/O.
3901 */
3902static void
3903dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
3904    const zbookmark_phys_t *zb)
3905{
3906	spa_t *spa = dp->dp_spa;
3907
3908	ASSERT(!BP_IS_EMBEDDED(bp));
3909
3910	/*
3911	 * Gang blocks are hard to issue sequentially, so we just issue them
3912	 * here immediately instead of queuing them.
3913	 */
3914	if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
3915		scan_exec_io(dp, bp, zio_flags, zb, NULL);
3916		return;
3917	}
3918
3919	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
3920		dva_t dva;
3921		vdev_t *vdev;
3922
3923		dva = bp->blk_dva[i];
3924		vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
3925		ASSERT(vdev != NULL);
3926
3927		mutex_enter(&vdev->vdev_scan_io_queue_lock);
3928		if (vdev->vdev_scan_io_queue == NULL)
3929			vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
3930		ASSERT(dp->dp_scan != NULL);
3931		scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
3932		    i, zio_flags, zb);
3933		mutex_exit(&vdev->vdev_scan_io_queue_lock);
3934	}
3935}
3936
3937static int
3938dsl_scan_scrub_cb(dsl_pool_t *dp,
3939    const blkptr_t *bp, const zbookmark_phys_t *zb)
3940{
3941	dsl_scan_t *scn = dp->dp_scan;
3942	spa_t *spa = dp->dp_spa;
3943	uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
3944	size_t psize = BP_GET_PSIZE(bp);
3945	boolean_t needs_io = B_FALSE;
3946	int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
3947
3948
3949	if (phys_birth <= scn->scn_phys.scn_min_txg ||
3950	    phys_birth >= scn->scn_phys.scn_max_txg) {
3951		count_block(scn, dp->dp_blkstats, bp);
3952		return (0);
3953	}
3954
3955	/* Embedded BP's have phys_birth==0, so we reject them above. */
3956	ASSERT(!BP_IS_EMBEDDED(bp));
3957
3958	ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
3959	if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
3960		zio_flags |= ZIO_FLAG_SCRUB;
3961		needs_io = B_TRUE;
3962	} else {
3963		ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
3964		zio_flags |= ZIO_FLAG_RESILVER;
3965		needs_io = B_FALSE;
3966	}
3967
3968	/* If it's an intent log block, failure is expected. */
3969	if (zb->zb_level == ZB_ZIL_LEVEL)
3970		zio_flags |= ZIO_FLAG_SPECULATIVE;
3971
3972	for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
3973		const dva_t *dva = &bp->blk_dva[d];
3974
3975		/*
3976		 * Keep track of how much data we've examined so that
3977		 * zpool(8) status can make useful progress reports.
3978		 */
3979		scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva);
3980		spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva);
3981
3982		/* if it's a resilver, this may not be in the target range */
3983		if (!needs_io)
3984			needs_io = dsl_scan_need_resilver(spa, dva, psize,
3985			    phys_birth);
3986	}
3987
3988	if (needs_io && !zfs_no_scrub_io) {
3989		dsl_scan_enqueue(dp, bp, zio_flags, zb);
3990	} else {
3991		count_block(scn, dp->dp_blkstats, bp);
3992	}
3993
3994	/* do not relocate this block */
3995	return (0);
3996}
3997
3998static void
3999dsl_scan_scrub_done(zio_t *zio)
4000{
4001	spa_t *spa = zio->io_spa;
4002	blkptr_t *bp = zio->io_bp;
4003	dsl_scan_io_queue_t *queue = zio->io_private;
4004
4005	abd_free(zio->io_abd);
4006
4007	if (queue == NULL) {
4008		mutex_enter(&spa->spa_scrub_lock);
4009		ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
4010		spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
4011		cv_broadcast(&spa->spa_scrub_io_cv);
4012		mutex_exit(&spa->spa_scrub_lock);
4013	} else {
4014		mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
4015		ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
4016		queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
4017		cv_broadcast(&queue->q_zio_cv);
4018		mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
4019	}
4020
4021	if (zio->io_error && (zio->io_error != ECKSUM ||
4022	    !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
4023		atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
4024	}
4025}
4026
4027/*
4028 * Given a scanning zio's information, executes the zio. The zio need
4029 * not necessarily be only sortable, this function simply executes the
4030 * zio, no matter what it is. The optional queue argument allows the
4031 * caller to specify that they want per top level vdev IO rate limiting
4032 * instead of the legacy global limiting.
4033 */
4034static void
4035scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
4036    const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
4037{
4038	spa_t *spa = dp->dp_spa;
4039	dsl_scan_t *scn = dp->dp_scan;
4040	size_t size = BP_GET_PSIZE(bp);
4041	abd_t *data = abd_alloc_for_io(size, B_FALSE);
4042
4043	if (queue == NULL) {
4044		ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
4045		mutex_enter(&spa->spa_scrub_lock);
4046		while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
4047			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
4048		spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
4049		mutex_exit(&spa->spa_scrub_lock);
4050	} else {
4051		kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
4052
4053		ASSERT3U(queue->q_maxinflight_bytes, >, 0);
4054		mutex_enter(q_lock);
4055		while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
4056			cv_wait(&queue->q_zio_cv, q_lock);
4057		queue->q_inflight_bytes += BP_GET_PSIZE(bp);
4058		mutex_exit(q_lock);
4059	}
4060
4061	count_block(scn, dp->dp_blkstats, bp);
4062	zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size,
4063	    dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
4064}
4065
4066/*
4067 * This is the primary extent sorting algorithm. We balance two parameters:
4068 * 1) how many bytes of I/O are in an extent
4069 * 2) how well the extent is filled with I/O (as a fraction of its total size)
4070 * Since we allow extents to have gaps between their constituent I/Os, it's
4071 * possible to have a fairly large extent that contains the same amount of
4072 * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
4073 * The algorithm sorts based on a score calculated from the extent's size,
4074 * the relative fill volume (in %) and a "fill weight" parameter that controls
4075 * the split between whether we prefer larger extents or more well populated
4076 * extents:
4077 *
4078 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
4079 *
4080 * Example:
4081 * 1) assume extsz = 64 MiB
4082 * 2) assume fill = 32 MiB (extent is half full)
4083 * 3) assume fill_weight = 3
4084 * 4)	SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
4085 *	SCORE = 32M + (50 * 3 * 32M) / 100
4086 *	SCORE = 32M + (4800M / 100)
4087 *	SCORE = 32M + 48M
4088 *	         ^     ^
4089 *	         |     +--- final total relative fill-based score
4090 *	         +--------- final total fill-based score
4091 *	SCORE = 80M
4092 *
4093 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
4094 * extents that are more completely filled (in a 3:2 ratio) vs just larger.
4095 * Note that as an optimization, we replace multiplication and division by
4096 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
4097 */
4098static int
4099ext_size_compare(const void *x, const void *y)
4100{
4101	const range_seg_gap_t *rsa = x, *rsb = y;
4102
4103	uint64_t sa = rsa->rs_end - rsa->rs_start;
4104	uint64_t sb = rsb->rs_end - rsb->rs_start;
4105	uint64_t score_a, score_b;
4106
4107	score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) *
4108	    fill_weight * rsa->rs_fill) >> 7);
4109	score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) *
4110	    fill_weight * rsb->rs_fill) >> 7);
4111
4112	if (score_a > score_b)
4113		return (-1);
4114	if (score_a == score_b) {
4115		if (rsa->rs_start < rsb->rs_start)
4116			return (-1);
4117		if (rsa->rs_start == rsb->rs_start)
4118			return (0);
4119		return (1);
4120	}
4121	return (1);
4122}
4123
4124/*
4125 * Comparator for the q_sios_by_addr tree. Sorting is simply performed
4126 * based on LBA-order (from lowest to highest).
4127 */
4128static int
4129sio_addr_compare(const void *x, const void *y)
4130{
4131	const scan_io_t *a = x, *b = y;
4132
4133	return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
4134}
4135
4136/* IO queues are created on demand when they are needed. */
4137static dsl_scan_io_queue_t *
4138scan_io_queue_create(vdev_t *vd)
4139{
4140	dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
4141	dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
4142
4143	q->q_scn = scn;
4144	q->q_vd = vd;
4145	q->q_sio_memused = 0;
4146	cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
4147	q->q_exts_by_addr = range_tree_create_impl(&rt_btree_ops, RANGE_SEG_GAP,
4148	    &q->q_exts_by_size, 0, 0, ext_size_compare, zfs_scan_max_ext_gap);
4149	avl_create(&q->q_sios_by_addr, sio_addr_compare,
4150	    sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
4151
4152	return (q);
4153}
4154
4155/*
4156 * Destroys a scan queue and all segments and scan_io_t's contained in it.
4157 * No further execution of I/O occurs, anything pending in the queue is
4158 * simply freed without being executed.
4159 */
4160void
4161dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
4162{
4163	dsl_scan_t *scn = queue->q_scn;
4164	scan_io_t *sio;
4165	void *cookie = NULL;
4166	int64_t bytes_dequeued = 0;
4167
4168	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
4169
4170	while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
4171	    NULL) {
4172		ASSERT(range_tree_contains(queue->q_exts_by_addr,
4173		    SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
4174		bytes_dequeued += SIO_GET_ASIZE(sio);
4175		queue->q_sio_memused -= SIO_GET_MUSED(sio);
4176		sio_free(sio);
4177	}
4178
4179	ASSERT0(queue->q_sio_memused);
4180	atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued);
4181	range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
4182	range_tree_destroy(queue->q_exts_by_addr);
4183	avl_destroy(&queue->q_sios_by_addr);
4184	cv_destroy(&queue->q_zio_cv);
4185
4186	kmem_free(queue, sizeof (*queue));
4187}
4188
4189/*
4190 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
4191 * called on behalf of vdev_top_transfer when creating or destroying
4192 * a mirror vdev due to zpool attach/detach.
4193 */
4194void
4195dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
4196{
4197	mutex_enter(&svd->vdev_scan_io_queue_lock);
4198	mutex_enter(&tvd->vdev_scan_io_queue_lock);
4199
4200	VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
4201	tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
4202	svd->vdev_scan_io_queue = NULL;
4203	if (tvd->vdev_scan_io_queue != NULL)
4204		tvd->vdev_scan_io_queue->q_vd = tvd;
4205
4206	mutex_exit(&tvd->vdev_scan_io_queue_lock);
4207	mutex_exit(&svd->vdev_scan_io_queue_lock);
4208}
4209
4210static void
4211scan_io_queues_destroy(dsl_scan_t *scn)
4212{
4213	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
4214
4215	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
4216		vdev_t *tvd = rvd->vdev_child[i];
4217
4218		mutex_enter(&tvd->vdev_scan_io_queue_lock);
4219		if (tvd->vdev_scan_io_queue != NULL)
4220			dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
4221		tvd->vdev_scan_io_queue = NULL;
4222		mutex_exit(&tvd->vdev_scan_io_queue_lock);
4223	}
4224}
4225
4226static void
4227dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
4228{
4229	dsl_pool_t *dp = spa->spa_dsl_pool;
4230	dsl_scan_t *scn = dp->dp_scan;
4231	vdev_t *vdev;
4232	kmutex_t *q_lock;
4233	dsl_scan_io_queue_t *queue;
4234	scan_io_t *srch_sio, *sio;
4235	avl_index_t idx;
4236	uint64_t start, size;
4237
4238	vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
4239	ASSERT(vdev != NULL);
4240	q_lock = &vdev->vdev_scan_io_queue_lock;
4241	queue = vdev->vdev_scan_io_queue;
4242
4243	mutex_enter(q_lock);
4244	if (queue == NULL) {
4245		mutex_exit(q_lock);
4246		return;
4247	}
4248
4249	srch_sio = sio_alloc(BP_GET_NDVAS(bp));
4250	bp2sio(bp, srch_sio, dva_i);
4251	start = SIO_GET_OFFSET(srch_sio);
4252	size = SIO_GET_ASIZE(srch_sio);
4253
4254	/*
4255	 * We can find the zio in two states:
4256	 * 1) Cold, just sitting in the queue of zio's to be issued at
4257	 *	some point in the future. In this case, all we do is
4258	 *	remove the zio from the q_sios_by_addr tree, decrement
4259	 *	its data volume from the containing range_seg_t and
4260	 *	resort the q_exts_by_size tree to reflect that the
4261	 *	range_seg_t has lost some of its 'fill'. We don't shorten
4262	 *	the range_seg_t - this is usually rare enough not to be
4263	 *	worth the extra hassle of trying keep track of precise
4264	 *	extent boundaries.
4265	 * 2) Hot, where the zio is currently in-flight in
4266	 *	dsl_scan_issue_ios. In this case, we can't simply
4267	 *	reach in and stop the in-flight zio's, so we instead
4268	 *	block the caller. Eventually, dsl_scan_issue_ios will
4269	 *	be done with issuing the zio's it gathered and will
4270	 *	signal us.
4271	 */
4272	sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
4273	sio_free(srch_sio);
4274
4275	if (sio != NULL) {
4276		int64_t asize = SIO_GET_ASIZE(sio);
4277		blkptr_t tmpbp;
4278
4279		/* Got it while it was cold in the queue */
4280		ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
4281		ASSERT3U(size, ==, asize);
4282		avl_remove(&queue->q_sios_by_addr, sio);
4283		queue->q_sio_memused -= SIO_GET_MUSED(sio);
4284
4285		ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
4286		range_tree_remove_fill(queue->q_exts_by_addr, start, size);
4287
4288		/*
4289		 * We only update scn_bytes_pending in the cold path,
4290		 * otherwise it will already have been accounted for as
4291		 * part of the zio's execution.
4292		 */
4293		atomic_add_64(&scn->scn_bytes_pending, -asize);
4294
4295		/* count the block as though we issued it */
4296		sio2bp(sio, &tmpbp);
4297		count_block(scn, dp->dp_blkstats, &tmpbp);
4298
4299		sio_free(sio);
4300	}
4301	mutex_exit(q_lock);
4302}
4303
4304/*
4305 * Callback invoked when a zio_free() zio is executing. This needs to be
4306 * intercepted to prevent the zio from deallocating a particular portion
4307 * of disk space and it then getting reallocated and written to, while we
4308 * still have it queued up for processing.
4309 */
4310void
4311dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
4312{
4313	dsl_pool_t *dp = spa->spa_dsl_pool;
4314	dsl_scan_t *scn = dp->dp_scan;
4315
4316	ASSERT(!BP_IS_EMBEDDED(bp));
4317	ASSERT(scn != NULL);
4318	if (!dsl_scan_is_running(scn))
4319		return;
4320
4321	for (int i = 0; i < BP_GET_NDVAS(bp); i++)
4322		dsl_scan_freed_dva(spa, bp, i);
4323}
4324
4325/*
4326 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
4327 * not started, start it. Otherwise, only restart if max txg in DTL range is
4328 * greater than the max txg in the current scan. If the DTL max is less than
4329 * the scan max, then the vdev has not missed any new data since the resilver
4330 * started, so a restart is not needed.
4331 */
4332void
4333dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
4334{
4335	uint64_t min, max;
4336
4337	if (!vdev_resilver_needed(vd, &min, &max))
4338		return;
4339
4340	if (!dsl_scan_resilvering(dp)) {
4341		spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
4342		return;
4343	}
4344
4345	if (max <= dp->dp_scan->scn_phys.scn_max_txg)
4346		return;
4347
4348	/* restart is needed, check if it can be deferred */
4349	if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
4350		vdev_defer_resilver(vd);
4351	else
4352		spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
4353}
4354
4355/* BEGIN CSTYLED */
4356ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
4357	"Max bytes in flight per leaf vdev for scrubs and resilvers");
4358
4359ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW,
4360	"Min millisecs to scrub per txg");
4361
4362ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW,
4363	"Min millisecs to obsolete per txg");
4364
4365ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW,
4366	"Min millisecs to free per txg");
4367
4368ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW,
4369	"Min millisecs to resilver per txg");
4370
4371ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
4372	"Set to prevent scans from progressing");
4373
4374ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
4375	"Set to disable scrub I/O");
4376
4377ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
4378	"Set to disable scrub prefetching");
4379
4380ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, ULONG, ZMOD_RW,
4381	"Max number of blocks freed in one txg");
4382
4383ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
4384	"Max number of dedup blocks freed in one txg");
4385
4386ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
4387	"Enable processing of the free_bpobj");
4388
4389ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
4390	"Fraction of RAM for scan hard limit");
4391
4392ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW,
4393	"IO issuing strategy during scrubbing. "
4394	"0 = default, 1 = LBA, 2 = size");
4395
4396ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
4397	"Scrub using legacy non-sequential method");
4398
4399ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW,
4400	"Scan progress on-disk checkpointing interval");
4401
4402ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
4403	"Max gap in bytes between sequential scrub / resilver I/Os");
4404
4405ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW,
4406	"Fraction of hard limit used as soft limit");
4407
4408ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
4409	"Tunable to attempt to reduce lock contention");
4410
4411ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW,
4412	"Tunable to adjust bias towards more filled segments during scans");
4413
4414ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
4415	"Process all resilvers immediately");
4416/* END CSTYLED */
4417