1/*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source.  A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14 */
15
16/*
17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
18 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
19 * Copyright (c) 2014, 2020 by Delphix. All rights reserved.
20 */
21
22#include <sys/zfs_context.h>
23#include <sys/spa.h>
24#include <sys/spa_impl.h>
25#include <sys/vdev_impl.h>
26#include <sys/fs/zfs.h>
27#include <sys/zio.h>
28#include <sys/zio_checksum.h>
29#include <sys/metaslab.h>
30#include <sys/dmu.h>
31#include <sys/vdev_indirect_mapping.h>
32#include <sys/dmu_tx.h>
33#include <sys/dsl_synctask.h>
34#include <sys/zap.h>
35#include <sys/abd.h>
36#include <sys/zthr.h>
37
38/*
39 * An indirect vdev corresponds to a vdev that has been removed.  Since
40 * we cannot rewrite block pointers of snapshots, etc., we keep a
41 * mapping from old location on the removed device to the new location
42 * on another device in the pool and use this mapping whenever we need
43 * to access the DVA.  Unfortunately, this mapping did not respect
44 * logical block boundaries when it was first created, and so a DVA on
45 * this indirect vdev may be "split" into multiple sections that each
46 * map to a different location.  As a consequence, not all DVAs can be
47 * translated to an equivalent new DVA.  Instead we must provide a
48 * "vdev_remap" operation that executes a callback on each contiguous
49 * segment of the new location.  This function is used in multiple ways:
50 *
51 *  - I/Os to this vdev use the callback to determine where the
52 *    data is now located, and issue child I/Os for each segment's new
53 *    location.
54 *
55 *  - frees and claims to this vdev use the callback to free or claim
56 *    each mapped segment.  (Note that we don't actually need to claim
57 *    log blocks on indirect vdevs, because we don't allocate to
58 *    removing vdevs.  However, zdb uses zio_claim() for its leak
59 *    detection.)
60 */
61
62/*
63 * "Big theory statement" for how we mark blocks obsolete.
64 *
65 * When a block on an indirect vdev is freed or remapped, a section of
66 * that vdev's mapping may no longer be referenced (aka "obsolete").  We
67 * keep track of how much of each mapping entry is obsolete.  When
68 * an entry becomes completely obsolete, we can remove it, thus reducing
69 * the memory used by the mapping.  The complete picture of obsolescence
70 * is given by the following data structures, described below:
71 *  - the entry-specific obsolete count
72 *  - the vdev-specific obsolete spacemap
73 *  - the pool-specific obsolete bpobj
74 *
75 * == On disk data structures used ==
76 *
77 * We track the obsolete space for the pool using several objects.  Each
78 * of these objects is created on demand and freed when no longer
79 * needed, and is assumed to be empty if it does not exist.
80 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
81 *
82 *  - Each vic_mapping_object (associated with an indirect vdev) can
83 *    have a vimp_counts_object.  This is an array of uint32_t's
84 *    with the same number of entries as the vic_mapping_object.  When
85 *    the mapping is condensed, entries from the vic_obsolete_sm_object
86 *    (see below) are folded into the counts.  Therefore, each
87 *    obsolete_counts entry tells us the number of bytes in the
88 *    corresponding mapping entry that were not referenced when the
89 *    mapping was last condensed.
90 *
91 *  - Each indirect or removing vdev can have a vic_obsolete_sm_object.
92 *    This is a space map containing an alloc entry for every DVA that
93 *    has been obsoleted since the last time this indirect vdev was
94 *    condensed.  We use this object in order to improve performance
95 *    when marking a DVA as obsolete.  Instead of modifying an arbitrary
96 *    offset of the vimp_counts_object, we only need to append an entry
97 *    to the end of this object.  When a DVA becomes obsolete, it is
98 *    added to the obsolete space map.  This happens when the DVA is
99 *    freed, remapped and not referenced by a snapshot, or the last
100 *    snapshot referencing it is destroyed.
101 *
102 *  - Each dataset can have a ds_remap_deadlist object.  This is a
103 *    deadlist object containing all blocks that were remapped in this
104 *    dataset but referenced in a previous snapshot.  Blocks can *only*
105 *    appear on this list if they were remapped (dsl_dataset_block_remapped);
106 *    blocks that were killed in a head dataset are put on the normal
107 *    ds_deadlist and marked obsolete when they are freed.
108 *
109 *  - The pool can have a dp_obsolete_bpobj.  This is a list of blocks
110 *    in the pool that need to be marked obsolete.  When a snapshot is
111 *    destroyed, we move some of the ds_remap_deadlist to the obsolete
112 *    bpobj (see dsl_destroy_snapshot_handle_remaps()).  We then
113 *    asynchronously process the obsolete bpobj, moving its entries to
114 *    the specific vdevs' obsolete space maps.
115 *
116 * == Summary of how we mark blocks as obsolete ==
117 *
118 * - When freeing a block: if any DVA is on an indirect vdev, append to
119 *   vic_obsolete_sm_object.
120 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
121 *   references; otherwise append to vic_obsolete_sm_object).
122 * - When freeing a snapshot: move parts of ds_remap_deadlist to
123 *   dp_obsolete_bpobj (same algorithm as ds_deadlist).
124 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
125 *   individual vdev's vic_obsolete_sm_object.
126 */
127
128/*
129 * "Big theory statement" for how we condense indirect vdevs.
130 *
131 * Condensing an indirect vdev's mapping is the process of determining
132 * the precise counts of obsolete space for each mapping entry (by
133 * integrating the obsolete spacemap into the obsolete counts) and
134 * writing out a new mapping that contains only referenced entries.
135 *
136 * We condense a vdev when we expect the mapping to shrink (see
137 * vdev_indirect_should_condense()), but only perform one condense at a
138 * time to limit the memory usage.  In addition, we use a separate
139 * open-context thread (spa_condense_indirect_thread) to incrementally
140 * create the new mapping object in a way that minimizes the impact on
141 * the rest of the system.
142 *
143 * == Generating a new mapping ==
144 *
145 * To generate a new mapping, we follow these steps:
146 *
147 * 1. Save the old obsolete space map and create a new mapping object
148 *    (see spa_condense_indirect_start_sync()).  This initializes the
149 *    spa_condensing_indirect_phys with the "previous obsolete space map",
150 *    which is now read only.  Newly obsolete DVAs will be added to a
151 *    new (initially empty) obsolete space map, and will not be
152 *    considered as part of this condense operation.
153 *
154 * 2. Construct in memory the precise counts of obsolete space for each
155 *    mapping entry, by incorporating the obsolete space map into the
156 *    counts.  (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
157 *
158 * 3. Iterate through each mapping entry, writing to the new mapping any
159 *    entries that are not completely obsolete (i.e. which don't have
160 *    obsolete count == mapping length).  (See
161 *    spa_condense_indirect_generate_new_mapping().)
162 *
163 * 4. Destroy the old mapping object and switch over to the new one
164 *    (spa_condense_indirect_complete_sync).
165 *
166 * == Restarting from failure ==
167 *
168 * To restart the condense when we import/open the pool, we must start
169 * at the 2nd step above: reconstruct the precise counts in memory,
170 * based on the space map + counts.  Then in the 3rd step, we start
171 * iterating where we left off: at vimp_max_offset of the new mapping
172 * object.
173 */
174
175static int zfs_condense_indirect_vdevs_enable = B_TRUE;
176
177/*
178 * Condense if at least this percent of the bytes in the mapping is
179 * obsolete.  With the default of 25%, the amount of space mapped
180 * will be reduced to 1% of its original size after at most 16
181 * condenses.  Higher values will condense less often (causing less
182 * i/o); lower values will reduce the mapping size more quickly.
183 */
184static uint_t zfs_condense_indirect_obsolete_pct = 25;
185
186/*
187 * Condense if the obsolete space map takes up more than this amount of
188 * space on disk (logically).  This limits the amount of disk space
189 * consumed by the obsolete space map; the default of 1GB is small enough
190 * that we typically don't mind "wasting" it.
191 */
192static uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
193
194/*
195 * Don't bother condensing if the mapping uses less than this amount of
196 * memory.  The default of 128KB is considered a "trivial" amount of
197 * memory and not worth reducing.
198 */
199static uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
200
201/*
202 * This is used by the test suite so that it can ensure that certain
203 * actions happen while in the middle of a condense (which might otherwise
204 * complete too quickly).  If used to reduce the performance impact of
205 * condensing in production, a maximum value of 1 should be sufficient.
206 */
207static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0;
208
209/*
210 * If an indirect split block contains more than this many possible unique
211 * combinations when being reconstructed, consider it too computationally
212 * expensive to check them all. Instead, try at most 100 randomly-selected
213 * combinations each time the block is accessed.  This allows all segment
214 * copies to participate fairly in the reconstruction when all combinations
215 * cannot be checked and prevents repeated use of one bad copy.
216 */
217uint_t zfs_reconstruct_indirect_combinations_max = 4096;
218
219/*
220 * Enable to simulate damaged segments and validate reconstruction.  This
221 * is intentionally not exposed as a module parameter.
222 */
223unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
224
225/*
226 * The indirect_child_t represents the vdev that we will read from, when we
227 * need to read all copies of the data (e.g. for scrub or reconstruction).
228 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
229 * ic_vdev is the same as is_vdev.  However, for mirror top-level vdevs,
230 * ic_vdev is a child of the mirror.
231 */
232typedef struct indirect_child {
233	abd_t *ic_data;
234	vdev_t *ic_vdev;
235
236	/*
237	 * ic_duplicate is NULL when the ic_data contents are unique, when it
238	 * is determined to be a duplicate it references the primary child.
239	 */
240	struct indirect_child *ic_duplicate;
241	list_node_t ic_node; /* node on is_unique_child */
242	int ic_error; /* set when a child does not contain the data */
243} indirect_child_t;
244
245/*
246 * The indirect_split_t represents one mapped segment of an i/o to the
247 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
248 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
249 * For split blocks, there will be several of these.
250 */
251typedef struct indirect_split {
252	list_node_t is_node; /* link on iv_splits */
253
254	/*
255	 * is_split_offset is the offset into the i/o.
256	 * This is the sum of the previous splits' is_size's.
257	 */
258	uint64_t is_split_offset;
259
260	vdev_t *is_vdev; /* top-level vdev */
261	uint64_t is_target_offset; /* offset on is_vdev */
262	uint64_t is_size;
263	int is_children; /* number of entries in is_child[] */
264	int is_unique_children; /* number of entries in is_unique_child */
265	list_t is_unique_child;
266
267	/*
268	 * is_good_child is the child that we are currently using to
269	 * attempt reconstruction.
270	 */
271	indirect_child_t *is_good_child;
272
273	indirect_child_t is_child[];
274} indirect_split_t;
275
276/*
277 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
278 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
279 */
280typedef struct indirect_vsd {
281	boolean_t iv_split_block;
282	boolean_t iv_reconstruct;
283	uint64_t iv_unique_combinations;
284	uint64_t iv_attempts;
285	uint64_t iv_attempts_max;
286
287	list_t iv_splits; /* list of indirect_split_t's */
288} indirect_vsd_t;
289
290static void
291vdev_indirect_map_free(zio_t *zio)
292{
293	indirect_vsd_t *iv = zio->io_vsd;
294
295	indirect_split_t *is;
296	while ((is = list_remove_head(&iv->iv_splits)) != NULL) {
297		for (int c = 0; c < is->is_children; c++) {
298			indirect_child_t *ic = &is->is_child[c];
299			if (ic->ic_data != NULL)
300				abd_free(ic->ic_data);
301		}
302
303		indirect_child_t *ic;
304		while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
305			;
306
307		list_destroy(&is->is_unique_child);
308
309		kmem_free(is,
310		    offsetof(indirect_split_t, is_child[is->is_children]));
311	}
312	kmem_free(iv, sizeof (*iv));
313}
314
315static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
316	.vsd_free = vdev_indirect_map_free,
317};
318
319/*
320 * Mark the given offset and size as being obsolete.
321 */
322void
323vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
324{
325	spa_t *spa = vd->vdev_spa;
326
327	ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
328	ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
329	ASSERT(size > 0);
330	VERIFY(vdev_indirect_mapping_entry_for_offset(
331	    vd->vdev_indirect_mapping, offset) != NULL);
332
333	if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
334		mutex_enter(&vd->vdev_obsolete_lock);
335		range_tree_add(vd->vdev_obsolete_segments, offset, size);
336		mutex_exit(&vd->vdev_obsolete_lock);
337		vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
338	}
339}
340
341/*
342 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
343 * wrapper is provided because the DMU does not know about vdev_t's and
344 * cannot directly call vdev_indirect_mark_obsolete.
345 */
346void
347spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
348    uint64_t size, dmu_tx_t *tx)
349{
350	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
351	ASSERT(dmu_tx_is_syncing(tx));
352
353	/* The DMU can only remap indirect vdevs. */
354	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
355	vdev_indirect_mark_obsolete(vd, offset, size);
356}
357
358static spa_condensing_indirect_t *
359spa_condensing_indirect_create(spa_t *spa)
360{
361	spa_condensing_indirect_phys_t *scip =
362	    &spa->spa_condensing_indirect_phys;
363	spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
364	objset_t *mos = spa->spa_meta_objset;
365
366	for (int i = 0; i < TXG_SIZE; i++) {
367		list_create(&sci->sci_new_mapping_entries[i],
368		    sizeof (vdev_indirect_mapping_entry_t),
369		    offsetof(vdev_indirect_mapping_entry_t, vime_node));
370	}
371
372	sci->sci_new_mapping =
373	    vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
374
375	return (sci);
376}
377
378static void
379spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
380{
381	for (int i = 0; i < TXG_SIZE; i++)
382		list_destroy(&sci->sci_new_mapping_entries[i]);
383
384	if (sci->sci_new_mapping != NULL)
385		vdev_indirect_mapping_close(sci->sci_new_mapping);
386
387	kmem_free(sci, sizeof (*sci));
388}
389
390boolean_t
391vdev_indirect_should_condense(vdev_t *vd)
392{
393	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
394	spa_t *spa = vd->vdev_spa;
395
396	ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
397
398	if (!zfs_condense_indirect_vdevs_enable)
399		return (B_FALSE);
400
401	/*
402	 * We can only condense one indirect vdev at a time.
403	 */
404	if (spa->spa_condensing_indirect != NULL)
405		return (B_FALSE);
406
407	if (spa_shutting_down(spa))
408		return (B_FALSE);
409
410	/*
411	 * The mapping object size must not change while we are
412	 * condensing, so we can only condense indirect vdevs
413	 * (not vdevs that are still in the middle of being removed).
414	 */
415	if (vd->vdev_ops != &vdev_indirect_ops)
416		return (B_FALSE);
417
418	/*
419	 * If nothing new has been marked obsolete, there is no
420	 * point in condensing.
421	 */
422	uint64_t obsolete_sm_obj __maybe_unused;
423	ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
424	if (vd->vdev_obsolete_sm == NULL) {
425		ASSERT0(obsolete_sm_obj);
426		return (B_FALSE);
427	}
428
429	ASSERT(vd->vdev_obsolete_sm != NULL);
430
431	ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
432
433	uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
434	uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
435	uint64_t mapping_size = vdev_indirect_mapping_size(vim);
436	uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
437
438	ASSERT3U(bytes_obsolete, <=, bytes_mapped);
439
440	/*
441	 * If a high percentage of the bytes that are mapped have become
442	 * obsolete, condense (unless the mapping is already small enough).
443	 * This has a good chance of reducing the amount of memory used
444	 * by the mapping.
445	 */
446	if (bytes_obsolete * 100 / bytes_mapped >=
447	    zfs_condense_indirect_obsolete_pct &&
448	    mapping_size > zfs_condense_min_mapping_bytes) {
449		zfs_dbgmsg("should condense vdev %llu because obsolete "
450		    "spacemap covers %d%% of %lluMB mapping",
451		    (u_longlong_t)vd->vdev_id,
452		    (int)(bytes_obsolete * 100 / bytes_mapped),
453		    (u_longlong_t)bytes_mapped / 1024 / 1024);
454		return (B_TRUE);
455	}
456
457	/*
458	 * If the obsolete space map takes up too much space on disk,
459	 * condense in order to free up this disk space.
460	 */
461	if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
462		zfs_dbgmsg("should condense vdev %llu because obsolete sm "
463		    "length %lluMB >= max size %lluMB",
464		    (u_longlong_t)vd->vdev_id,
465		    (u_longlong_t)obsolete_sm_size / 1024 / 1024,
466		    (u_longlong_t)zfs_condense_max_obsolete_bytes /
467		    1024 / 1024);
468		return (B_TRUE);
469	}
470
471	return (B_FALSE);
472}
473
474/*
475 * This sync task completes (finishes) a condense, deleting the old
476 * mapping and replacing it with the new one.
477 */
478static void
479spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
480{
481	spa_condensing_indirect_t *sci = arg;
482	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
483	spa_condensing_indirect_phys_t *scip =
484	    &spa->spa_condensing_indirect_phys;
485	vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
486	vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
487	objset_t *mos = spa->spa_meta_objset;
488	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
489	uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
490	uint64_t new_count =
491	    vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
492
493	ASSERT(dmu_tx_is_syncing(tx));
494	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
495	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
496	for (int i = 0; i < TXG_SIZE; i++) {
497		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
498	}
499	ASSERT(vic->vic_mapping_object != 0);
500	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
501	ASSERT(scip->scip_next_mapping_object != 0);
502	ASSERT(scip->scip_prev_obsolete_sm_object != 0);
503
504	/*
505	 * Reset vdev_indirect_mapping to refer to the new object.
506	 */
507	rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
508	vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
509	vd->vdev_indirect_mapping = sci->sci_new_mapping;
510	rw_exit(&vd->vdev_indirect_rwlock);
511
512	sci->sci_new_mapping = NULL;
513	vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
514	vic->vic_mapping_object = scip->scip_next_mapping_object;
515	scip->scip_next_mapping_object = 0;
516
517	space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
518	spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
519	scip->scip_prev_obsolete_sm_object = 0;
520
521	scip->scip_vdev = 0;
522
523	VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
524	    DMU_POOL_CONDENSING_INDIRECT, tx));
525	spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
526	spa->spa_condensing_indirect = NULL;
527
528	zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
529	    "new mapping object %llu has %llu entries "
530	    "(was %llu entries)",
531	    (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
532	    (u_longlong_t)vic->vic_mapping_object,
533	    (u_longlong_t)new_count, (u_longlong_t)old_count);
534
535	vdev_config_dirty(spa->spa_root_vdev);
536}
537
538/*
539 * This sync task appends entries to the new mapping object.
540 */
541static void
542spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
543{
544	spa_condensing_indirect_t *sci = arg;
545	uint64_t txg = dmu_tx_get_txg(tx);
546	spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
547
548	ASSERT(dmu_tx_is_syncing(tx));
549	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
550
551	vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
552	    &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
553	ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
554}
555
556/*
557 * Open-context function to add one entry to the new mapping.  The new
558 * entry will be remembered and written from syncing context.
559 */
560static void
561spa_condense_indirect_commit_entry(spa_t *spa,
562    vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
563{
564	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
565
566	ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
567
568	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
569	dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
570	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
571	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
572
573	/*
574	 * If we are the first entry committed this txg, kick off the sync
575	 * task to write to the MOS on our behalf.
576	 */
577	if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
578		dsl_sync_task_nowait(dmu_tx_pool(tx),
579		    spa_condense_indirect_commit_sync, sci, tx);
580	}
581
582	vdev_indirect_mapping_entry_t *vime =
583	    kmem_alloc(sizeof (*vime), KM_SLEEP);
584	vime->vime_mapping = *vimep;
585	vime->vime_obsolete_count = count;
586	list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
587
588	dmu_tx_commit(tx);
589}
590
591static void
592spa_condense_indirect_generate_new_mapping(vdev_t *vd,
593    uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
594{
595	spa_t *spa = vd->vdev_spa;
596	uint64_t mapi = start_index;
597	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
598	uint64_t old_num_entries =
599	    vdev_indirect_mapping_num_entries(old_mapping);
600
601	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
602	ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
603
604	zfs_dbgmsg("starting condense of vdev %llu from index %llu",
605	    (u_longlong_t)vd->vdev_id,
606	    (u_longlong_t)mapi);
607
608	while (mapi < old_num_entries) {
609
610		if (zthr_iscancelled(zthr)) {
611			zfs_dbgmsg("pausing condense of vdev %llu "
612			    "at index %llu", (u_longlong_t)vd->vdev_id,
613			    (u_longlong_t)mapi);
614			break;
615		}
616
617		vdev_indirect_mapping_entry_phys_t *entry =
618		    &old_mapping->vim_entries[mapi];
619		uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
620		ASSERT3U(obsolete_counts[mapi], <=, entry_size);
621		if (obsolete_counts[mapi] < entry_size) {
622			spa_condense_indirect_commit_entry(spa, entry,
623			    obsolete_counts[mapi]);
624
625			/*
626			 * This delay may be requested for testing, debugging,
627			 * or performance reasons.
628			 */
629			hrtime_t now = gethrtime();
630			hrtime_t sleep_until = now + MSEC2NSEC(
631			    zfs_condense_indirect_commit_entry_delay_ms);
632			zfs_sleep_until(sleep_until);
633		}
634
635		mapi++;
636	}
637}
638
639static boolean_t
640spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
641{
642	(void) zthr;
643	spa_t *spa = arg;
644
645	return (spa->spa_condensing_indirect != NULL);
646}
647
648static void
649spa_condense_indirect_thread(void *arg, zthr_t *zthr)
650{
651	spa_t *spa = arg;
652	vdev_t *vd;
653
654	ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
655	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
656	vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
657	ASSERT3P(vd, !=, NULL);
658	spa_config_exit(spa, SCL_VDEV, FTAG);
659
660	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
661	spa_condensing_indirect_phys_t *scip =
662	    &spa->spa_condensing_indirect_phys;
663	uint32_t *counts;
664	uint64_t start_index;
665	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
666	space_map_t *prev_obsolete_sm = NULL;
667
668	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
669	ASSERT(scip->scip_next_mapping_object != 0);
670	ASSERT(scip->scip_prev_obsolete_sm_object != 0);
671	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
672
673	for (int i = 0; i < TXG_SIZE; i++) {
674		/*
675		 * The list must start out empty in order for the
676		 * _commit_sync() sync task to be properly registered
677		 * on the first call to _commit_entry(); so it's wise
678		 * to double check and ensure we actually are starting
679		 * with empty lists.
680		 */
681		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
682	}
683
684	VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
685	    scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
686	counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
687	if (prev_obsolete_sm != NULL) {
688		vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
689		    counts, prev_obsolete_sm);
690	}
691	space_map_close(prev_obsolete_sm);
692
693	/*
694	 * Generate new mapping.  Determine what index to continue from
695	 * based on the max offset that we've already written in the
696	 * new mapping.
697	 */
698	uint64_t max_offset =
699	    vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
700	if (max_offset == 0) {
701		/* We haven't written anything to the new mapping yet. */
702		start_index = 0;
703	} else {
704		/*
705		 * Pick up from where we left off. _entry_for_offset()
706		 * returns a pointer into the vim_entries array. If
707		 * max_offset is greater than any of the mappings
708		 * contained in the table  NULL will be returned and
709		 * that indicates we've exhausted our iteration of the
710		 * old_mapping.
711		 */
712
713		vdev_indirect_mapping_entry_phys_t *entry =
714		    vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
715		    max_offset);
716
717		if (entry == NULL) {
718			/*
719			 * We've already written the whole new mapping.
720			 * This special value will cause us to skip the
721			 * generate_new_mapping step and just do the sync
722			 * task to complete the condense.
723			 */
724			start_index = UINT64_MAX;
725		} else {
726			start_index = entry - old_mapping->vim_entries;
727			ASSERT3U(start_index, <,
728			    vdev_indirect_mapping_num_entries(old_mapping));
729		}
730	}
731
732	spa_condense_indirect_generate_new_mapping(vd, counts,
733	    start_index, zthr);
734
735	vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
736
737	/*
738	 * If the zthr has received a cancellation signal while running
739	 * in generate_new_mapping() or at any point after that, then bail
740	 * early. We don't want to complete the condense if the spa is
741	 * shutting down.
742	 */
743	if (zthr_iscancelled(zthr))
744		return;
745
746	VERIFY0(dsl_sync_task(spa_name(spa), NULL,
747	    spa_condense_indirect_complete_sync, sci, 0,
748	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
749}
750
751/*
752 * Sync task to begin the condensing process.
753 */
754void
755spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
756{
757	spa_t *spa = vd->vdev_spa;
758	spa_condensing_indirect_phys_t *scip =
759	    &spa->spa_condensing_indirect_phys;
760
761	ASSERT0(scip->scip_next_mapping_object);
762	ASSERT0(scip->scip_prev_obsolete_sm_object);
763	ASSERT0(scip->scip_vdev);
764	ASSERT(dmu_tx_is_syncing(tx));
765	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
766	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
767	ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
768
769	uint64_t obsolete_sm_obj;
770	VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
771	ASSERT3U(obsolete_sm_obj, !=, 0);
772
773	scip->scip_vdev = vd->vdev_id;
774	scip->scip_next_mapping_object =
775	    vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
776
777	scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
778
779	/*
780	 * We don't need to allocate a new space map object, since
781	 * vdev_indirect_sync_obsolete will allocate one when needed.
782	 */
783	space_map_close(vd->vdev_obsolete_sm);
784	vd->vdev_obsolete_sm = NULL;
785	VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
786	    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
787
788	VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
789	    DMU_POOL_DIRECTORY_OBJECT,
790	    DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
791	    sizeof (*scip) / sizeof (uint64_t), scip, tx));
792
793	ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
794	spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
795
796	zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
797	    "posm=%llu nm=%llu",
798	    (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
799	    (u_longlong_t)scip->scip_prev_obsolete_sm_object,
800	    (u_longlong_t)scip->scip_next_mapping_object);
801
802	zthr_wakeup(spa->spa_condense_zthr);
803}
804
805/*
806 * Sync to the given vdev's obsolete space map any segments that are no longer
807 * referenced as of the given txg.
808 *
809 * If the obsolete space map doesn't exist yet, create and open it.
810 */
811void
812vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
813{
814	spa_t *spa = vd->vdev_spa;
815	vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
816
817	ASSERT3U(vic->vic_mapping_object, !=, 0);
818	ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
819	ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
820	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
821
822	uint64_t obsolete_sm_object;
823	VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
824	if (obsolete_sm_object == 0) {
825		obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
826		    zfs_vdev_standard_sm_blksz, tx);
827
828		ASSERT(vd->vdev_top_zap != 0);
829		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
830		    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
831		    sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
832		ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
833		ASSERT3U(obsolete_sm_object, !=, 0);
834
835		spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
836		VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
837		    spa->spa_meta_objset, obsolete_sm_object,
838		    0, vd->vdev_asize, 0));
839	}
840
841	ASSERT(vd->vdev_obsolete_sm != NULL);
842	ASSERT3U(obsolete_sm_object, ==,
843	    space_map_object(vd->vdev_obsolete_sm));
844
845	space_map_write(vd->vdev_obsolete_sm,
846	    vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
847	range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
848}
849
850int
851spa_condense_init(spa_t *spa)
852{
853	int error = zap_lookup(spa->spa_meta_objset,
854	    DMU_POOL_DIRECTORY_OBJECT,
855	    DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
856	    sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
857	    &spa->spa_condensing_indirect_phys);
858	if (error == 0) {
859		if (spa_writeable(spa)) {
860			spa->spa_condensing_indirect =
861			    spa_condensing_indirect_create(spa);
862		}
863		return (0);
864	} else if (error == ENOENT) {
865		return (0);
866	} else {
867		return (error);
868	}
869}
870
871void
872spa_condense_fini(spa_t *spa)
873{
874	if (spa->spa_condensing_indirect != NULL) {
875		spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
876		spa->spa_condensing_indirect = NULL;
877	}
878}
879
880void
881spa_start_indirect_condensing_thread(spa_t *spa)
882{
883	ASSERT3P(spa->spa_condense_zthr, ==, NULL);
884	spa->spa_condense_zthr = zthr_create("z_indirect_condense",
885	    spa_condense_indirect_thread_check,
886	    spa_condense_indirect_thread, spa, minclsyspri);
887}
888
889/*
890 * Gets the obsolete spacemap object from the vdev's ZAP.  On success sm_obj
891 * will contain either the obsolete spacemap object or zero if none exists.
892 * All other errors are returned to the caller.
893 */
894int
895vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
896{
897	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
898
899	if (vd->vdev_top_zap == 0) {
900		*sm_obj = 0;
901		return (0);
902	}
903
904	int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
905	    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
906	if (error == ENOENT) {
907		*sm_obj = 0;
908		error = 0;
909	}
910
911	return (error);
912}
913
914/*
915 * Gets the obsolete count are precise spacemap object from the vdev's ZAP.
916 * On success are_precise will be set to reflect if the counts are precise.
917 * All other errors are returned to the caller.
918 */
919int
920vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
921{
922	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
923
924	if (vd->vdev_top_zap == 0) {
925		*are_precise = B_FALSE;
926		return (0);
927	}
928
929	uint64_t val = 0;
930	int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
931	    VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
932	if (error == 0) {
933		*are_precise = (val != 0);
934	} else if (error == ENOENT) {
935		*are_precise = B_FALSE;
936		error = 0;
937	}
938
939	return (error);
940}
941
942static void
943vdev_indirect_close(vdev_t *vd)
944{
945	(void) vd;
946}
947
948static int
949vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
950    uint64_t *logical_ashift, uint64_t *physical_ashift)
951{
952	*psize = *max_psize = vd->vdev_asize +
953	    VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
954	*logical_ashift = vd->vdev_ashift;
955	*physical_ashift = vd->vdev_physical_ashift;
956	return (0);
957}
958
959typedef struct remap_segment {
960	vdev_t *rs_vd;
961	uint64_t rs_offset;
962	uint64_t rs_asize;
963	uint64_t rs_split_offset;
964	list_node_t rs_node;
965} remap_segment_t;
966
967static remap_segment_t *
968rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
969{
970	remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
971	rs->rs_vd = vd;
972	rs->rs_offset = offset;
973	rs->rs_asize = asize;
974	rs->rs_split_offset = split_offset;
975	return (rs);
976}
977
978/*
979 * Given an indirect vdev and an extent on that vdev, it duplicates the
980 * physical entries of the indirect mapping that correspond to the extent
981 * to a new array and returns a pointer to it. In addition, copied_entries
982 * is populated with the number of mapping entries that were duplicated.
983 *
984 * Note that the function assumes that the caller holds vdev_indirect_rwlock.
985 * This ensures that the mapping won't change due to condensing as we
986 * copy over its contents.
987 *
988 * Finally, since we are doing an allocation, it is up to the caller to
989 * free the array allocated in this function.
990 */
991static vdev_indirect_mapping_entry_phys_t *
992vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
993    uint64_t asize, uint64_t *copied_entries)
994{
995	vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
996	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
997	uint64_t entries = 0;
998
999	ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
1000
1001	vdev_indirect_mapping_entry_phys_t *first_mapping =
1002	    vdev_indirect_mapping_entry_for_offset(vim, offset);
1003	ASSERT3P(first_mapping, !=, NULL);
1004
1005	vdev_indirect_mapping_entry_phys_t *m = first_mapping;
1006	while (asize > 0) {
1007		uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1008
1009		ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
1010		ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1011
1012		uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
1013		uint64_t inner_size = MIN(asize, size - inner_offset);
1014
1015		offset += inner_size;
1016		asize -= inner_size;
1017		entries++;
1018		m++;
1019	}
1020
1021	size_t copy_length = entries * sizeof (*first_mapping);
1022	duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
1023	memcpy(duplicate_mappings, first_mapping, copy_length);
1024	*copied_entries = entries;
1025
1026	return (duplicate_mappings);
1027}
1028
1029/*
1030 * Goes through the relevant indirect mappings until it hits a concrete vdev
1031 * and issues the callback. On the way to the concrete vdev, if any other
1032 * indirect vdevs are encountered, then the callback will also be called on
1033 * each of those indirect vdevs. For example, if the segment is mapped to
1034 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1035 * mapped to segment B on concrete vdev 2, then the callback will be called on
1036 * both vdev 1 and vdev 2.
1037 *
1038 * While the callback passed to vdev_indirect_remap() is called on every vdev
1039 * the function encounters, certain callbacks only care about concrete vdevs.
1040 * These types of callbacks should return immediately and explicitly when they
1041 * are called on an indirect vdev.
1042 *
1043 * Because there is a possibility that a DVA section in the indirect device
1044 * has been split into multiple sections in our mapping, we keep track
1045 * of the relevant contiguous segments of the new location (remap_segment_t)
1046 * in a stack. This way we can call the callback for each of the new sections
1047 * created by a single section of the indirect device. Note though, that in
1048 * this scenario the callbacks in each split block won't occur in-order in
1049 * terms of offset, so callers should not make any assumptions about that.
1050 *
1051 * For callbacks that don't handle split blocks and immediately return when
1052 * they encounter them (as is the case for remap_blkptr_cb), the caller can
1053 * assume that its callback will be applied from the first indirect vdev
1054 * encountered to the last one and then the concrete vdev, in that order.
1055 */
1056static void
1057vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1058    void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1059{
1060	list_t stack;
1061	spa_t *spa = vd->vdev_spa;
1062
1063	list_create(&stack, sizeof (remap_segment_t),
1064	    offsetof(remap_segment_t, rs_node));
1065
1066	for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1067	    rs != NULL; rs = list_remove_head(&stack)) {
1068		vdev_t *v = rs->rs_vd;
1069		uint64_t num_entries = 0;
1070
1071		ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1072		ASSERT(rs->rs_asize > 0);
1073
1074		/*
1075		 * Note: As this function can be called from open context
1076		 * (e.g. zio_read()), we need the following rwlock to
1077		 * prevent the mapping from being changed by condensing.
1078		 *
1079		 * So we grab the lock and we make a copy of the entries
1080		 * that are relevant to the extent that we are working on.
1081		 * Once that is done, we drop the lock and iterate over
1082		 * our copy of the mapping. Once we are done with the with
1083		 * the remap segment and we free it, we also free our copy
1084		 * of the indirect mapping entries that are relevant to it.
1085		 *
1086		 * This way we don't need to wait until the function is
1087		 * finished with a segment, to condense it. In addition, we
1088		 * don't need a recursive rwlock for the case that a call to
1089		 * vdev_indirect_remap() needs to call itself (through the
1090		 * codepath of its callback) for the same vdev in the middle
1091		 * of its execution.
1092		 */
1093		rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1094		ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
1095
1096		vdev_indirect_mapping_entry_phys_t *mapping =
1097		    vdev_indirect_mapping_duplicate_adjacent_entries(v,
1098		    rs->rs_offset, rs->rs_asize, &num_entries);
1099		ASSERT3P(mapping, !=, NULL);
1100		ASSERT3U(num_entries, >, 0);
1101		rw_exit(&v->vdev_indirect_rwlock);
1102
1103		for (uint64_t i = 0; i < num_entries; i++) {
1104			/*
1105			 * Note: the vdev_indirect_mapping can not change
1106			 * while we are running.  It only changes while the
1107			 * removal is in progress, and then only from syncing
1108			 * context. While a removal is in progress, this
1109			 * function is only called for frees, which also only
1110			 * happen from syncing context.
1111			 */
1112			vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1113
1114			ASSERT3P(m, !=, NULL);
1115			ASSERT3U(rs->rs_asize, >, 0);
1116
1117			uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1118			uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1119			uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1120
1121			ASSERT3U(rs->rs_offset, >=,
1122			    DVA_MAPPING_GET_SRC_OFFSET(m));
1123			ASSERT3U(rs->rs_offset, <,
1124			    DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1125			ASSERT3U(dst_vdev, !=, v->vdev_id);
1126
1127			uint64_t inner_offset = rs->rs_offset -
1128			    DVA_MAPPING_GET_SRC_OFFSET(m);
1129			uint64_t inner_size =
1130			    MIN(rs->rs_asize, size - inner_offset);
1131
1132			vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1133			ASSERT3P(dst_v, !=, NULL);
1134
1135			if (dst_v->vdev_ops == &vdev_indirect_ops) {
1136				list_insert_head(&stack,
1137				    rs_alloc(dst_v, dst_offset + inner_offset,
1138				    inner_size, rs->rs_split_offset));
1139
1140			}
1141
1142			if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1143			    IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1144				/*
1145				 * Note: This clause exists only solely for
1146				 * testing purposes. We use it to ensure that
1147				 * split blocks work and that the callbacks
1148				 * using them yield the same result if issued
1149				 * in reverse order.
1150				 */
1151				uint64_t inner_half = inner_size / 2;
1152
1153				func(rs->rs_split_offset + inner_half, dst_v,
1154				    dst_offset + inner_offset + inner_half,
1155				    inner_half, arg);
1156
1157				func(rs->rs_split_offset, dst_v,
1158				    dst_offset + inner_offset,
1159				    inner_half, arg);
1160			} else {
1161				func(rs->rs_split_offset, dst_v,
1162				    dst_offset + inner_offset,
1163				    inner_size, arg);
1164			}
1165
1166			rs->rs_offset += inner_size;
1167			rs->rs_asize -= inner_size;
1168			rs->rs_split_offset += inner_size;
1169		}
1170		VERIFY0(rs->rs_asize);
1171
1172		kmem_free(mapping, num_entries * sizeof (*mapping));
1173		kmem_free(rs, sizeof (remap_segment_t));
1174	}
1175	list_destroy(&stack);
1176}
1177
1178static void
1179vdev_indirect_child_io_done(zio_t *zio)
1180{
1181	zio_t *pio = zio->io_private;
1182
1183	mutex_enter(&pio->io_lock);
1184	pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1185	mutex_exit(&pio->io_lock);
1186
1187	abd_free(zio->io_abd);
1188}
1189
1190/*
1191 * This is a callback for vdev_indirect_remap() which allocates an
1192 * indirect_split_t for each split segment and adds it to iv_splits.
1193 */
1194static void
1195vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1196    uint64_t size, void *arg)
1197{
1198	zio_t *zio = arg;
1199	indirect_vsd_t *iv = zio->io_vsd;
1200
1201	ASSERT3P(vd, !=, NULL);
1202
1203	if (vd->vdev_ops == &vdev_indirect_ops)
1204		return;
1205
1206	int n = 1;
1207	if (vd->vdev_ops == &vdev_mirror_ops)
1208		n = vd->vdev_children;
1209
1210	indirect_split_t *is =
1211	    kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1212
1213	is->is_children = n;
1214	is->is_size = size;
1215	is->is_split_offset = split_offset;
1216	is->is_target_offset = offset;
1217	is->is_vdev = vd;
1218	list_create(&is->is_unique_child, sizeof (indirect_child_t),
1219	    offsetof(indirect_child_t, ic_node));
1220
1221	/*
1222	 * Note that we only consider multiple copies of the data for
1223	 * *mirror* vdevs.  We don't for "replacing" or "spare" vdevs, even
1224	 * though they use the same ops as mirror, because there's only one
1225	 * "good" copy under the replacing/spare.
1226	 */
1227	if (vd->vdev_ops == &vdev_mirror_ops) {
1228		for (int i = 0; i < n; i++) {
1229			is->is_child[i].ic_vdev = vd->vdev_child[i];
1230			list_link_init(&is->is_child[i].ic_node);
1231		}
1232	} else {
1233		is->is_child[0].ic_vdev = vd;
1234	}
1235
1236	list_insert_tail(&iv->iv_splits, is);
1237}
1238
1239static void
1240vdev_indirect_read_split_done(zio_t *zio)
1241{
1242	indirect_child_t *ic = zio->io_private;
1243
1244	if (zio->io_error != 0) {
1245		/*
1246		 * Clear ic_data to indicate that we do not have data for this
1247		 * child.
1248		 */
1249		abd_free(ic->ic_data);
1250		ic->ic_data = NULL;
1251	}
1252}
1253
1254/*
1255 * Issue reads for all copies (mirror children) of all splits.
1256 */
1257static void
1258vdev_indirect_read_all(zio_t *zio)
1259{
1260	indirect_vsd_t *iv = zio->io_vsd;
1261
1262	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1263
1264	for (indirect_split_t *is = list_head(&iv->iv_splits);
1265	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1266		for (int i = 0; i < is->is_children; i++) {
1267			indirect_child_t *ic = &is->is_child[i];
1268
1269			if (!vdev_readable(ic->ic_vdev))
1270				continue;
1271
1272			/*
1273			 * If a child is missing the data, set ic_error. Used
1274			 * in vdev_indirect_repair(). We perform the read
1275			 * nevertheless which provides the opportunity to
1276			 * reconstruct the split block if at all possible.
1277			 */
1278			if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
1279			    zio->io_txg, 1))
1280				ic->ic_error = SET_ERROR(ESTALE);
1281
1282			ic->ic_data = abd_alloc_sametype(zio->io_abd,
1283			    is->is_size);
1284			ic->ic_duplicate = NULL;
1285
1286			zio_nowait(zio_vdev_child_io(zio, NULL,
1287			    ic->ic_vdev, is->is_target_offset, ic->ic_data,
1288			    is->is_size, zio->io_type, zio->io_priority, 0,
1289			    vdev_indirect_read_split_done, ic));
1290		}
1291	}
1292	iv->iv_reconstruct = B_TRUE;
1293}
1294
1295static void
1296vdev_indirect_io_start(zio_t *zio)
1297{
1298	spa_t *spa __maybe_unused = zio->io_spa;
1299	indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1300	list_create(&iv->iv_splits,
1301	    sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1302
1303	zio->io_vsd = iv;
1304	zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1305
1306	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1307	if (zio->io_type != ZIO_TYPE_READ) {
1308		ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1309		/*
1310		 * Note: this code can handle other kinds of writes,
1311		 * but we don't expect them.
1312		 */
1313		ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1314		    ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1315	}
1316
1317	vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1318	    vdev_indirect_gather_splits, zio);
1319
1320	indirect_split_t *first = list_head(&iv->iv_splits);
1321	ASSERT3P(first, !=, NULL);
1322	if (first->is_size == zio->io_size) {
1323		/*
1324		 * This is not a split block; we are pointing to the entire
1325		 * data, which will checksum the same as the original data.
1326		 * Pass the BP down so that the child i/o can verify the
1327		 * checksum, and try a different location if available
1328		 * (e.g. on a mirror).
1329		 *
1330		 * While this special case could be handled the same as the
1331		 * general (split block) case, doing it this way ensures
1332		 * that the vast majority of blocks on indirect vdevs
1333		 * (which are not split) are handled identically to blocks
1334		 * on non-indirect vdevs.  This allows us to be less strict
1335		 * about performance in the general (but rare) case.
1336		 */
1337		ASSERT0(first->is_split_offset);
1338		ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1339		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1340		    first->is_vdev, first->is_target_offset,
1341		    abd_get_offset(zio->io_abd, 0),
1342		    zio->io_size, zio->io_type, zio->io_priority, 0,
1343		    vdev_indirect_child_io_done, zio));
1344	} else {
1345		iv->iv_split_block = B_TRUE;
1346		if (zio->io_type == ZIO_TYPE_READ &&
1347		    zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1348			/*
1349			 * Read all copies.  Note that for simplicity,
1350			 * we don't bother consulting the DTL in the
1351			 * resilver case.
1352			 */
1353			vdev_indirect_read_all(zio);
1354		} else {
1355			/*
1356			 * If this is a read zio, we read one copy of each
1357			 * split segment, from the top-level vdev.  Since
1358			 * we don't know the checksum of each split
1359			 * individually, the child zio can't ensure that
1360			 * we get the right data. E.g. if it's a mirror,
1361			 * it will just read from a random (healthy) leaf
1362			 * vdev. We have to verify the checksum in
1363			 * vdev_indirect_io_done().
1364			 *
1365			 * For write zios, the vdev code will ensure we write
1366			 * to all children.
1367			 */
1368			for (indirect_split_t *is = list_head(&iv->iv_splits);
1369			    is != NULL; is = list_next(&iv->iv_splits, is)) {
1370				zio_nowait(zio_vdev_child_io(zio, NULL,
1371				    is->is_vdev, is->is_target_offset,
1372				    abd_get_offset_size(zio->io_abd,
1373				    is->is_split_offset, is->is_size),
1374				    is->is_size, zio->io_type,
1375				    zio->io_priority, 0,
1376				    vdev_indirect_child_io_done, zio));
1377			}
1378
1379		}
1380	}
1381
1382	zio_execute(zio);
1383}
1384
1385/*
1386 * Report a checksum error for a child.
1387 */
1388static void
1389vdev_indirect_checksum_error(zio_t *zio,
1390    indirect_split_t *is, indirect_child_t *ic)
1391{
1392	vdev_t *vd = ic->ic_vdev;
1393
1394	if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1395		return;
1396
1397	mutex_enter(&vd->vdev_stat_lock);
1398	vd->vdev_stat.vs_checksum_errors++;
1399	mutex_exit(&vd->vdev_stat_lock);
1400
1401	zio_bad_cksum_t zbc = { 0 };
1402	abd_t *bad_abd = ic->ic_data;
1403	abd_t *good_abd = is->is_good_child->ic_data;
1404	(void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
1405	    is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
1406}
1407
1408/*
1409 * Issue repair i/os for any incorrect copies.  We do this by comparing
1410 * each split segment's correct data (is_good_child's ic_data) with each
1411 * other copy of the data.  If they differ, then we overwrite the bad data
1412 * with the good copy.  The DTL is checked in vdev_indirect_read_all() and
1413 * if a vdev is missing a copy of the data we set ic_error and the read is
1414 * performed. This provides the opportunity to reconstruct the split block
1415 * if at all possible. ic_error is checked here and if set it suppresses
1416 * incrementing the checksum counter. Aside from this DTLs are not checked,
1417 * which simplifies this code and also issues the optimal number of writes
1418 * (based on which copies actually read bad data, as opposed to which we
1419 * think might be wrong).  For the same reason, we always use
1420 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1421 */
1422static void
1423vdev_indirect_repair(zio_t *zio)
1424{
1425	indirect_vsd_t *iv = zio->io_vsd;
1426
1427	if (!spa_writeable(zio->io_spa))
1428		return;
1429
1430	for (indirect_split_t *is = list_head(&iv->iv_splits);
1431	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1432		for (int c = 0; c < is->is_children; c++) {
1433			indirect_child_t *ic = &is->is_child[c];
1434			if (ic == is->is_good_child)
1435				continue;
1436			if (ic->ic_data == NULL)
1437				continue;
1438			if (ic->ic_duplicate == is->is_good_child)
1439				continue;
1440
1441			zio_nowait(zio_vdev_child_io(zio, NULL,
1442			    ic->ic_vdev, is->is_target_offset,
1443			    is->is_good_child->ic_data, is->is_size,
1444			    ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1445			    ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1446			    NULL, NULL));
1447
1448			/*
1449			 * If ic_error is set the current child does not have
1450			 * a copy of the data, so suppress incrementing the
1451			 * checksum counter.
1452			 */
1453			if (ic->ic_error == ESTALE)
1454				continue;
1455
1456			vdev_indirect_checksum_error(zio, is, ic);
1457		}
1458	}
1459}
1460
1461/*
1462 * Report checksum errors on all children that we read from.
1463 */
1464static void
1465vdev_indirect_all_checksum_errors(zio_t *zio)
1466{
1467	indirect_vsd_t *iv = zio->io_vsd;
1468
1469	if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1470		return;
1471
1472	for (indirect_split_t *is = list_head(&iv->iv_splits);
1473	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1474		for (int c = 0; c < is->is_children; c++) {
1475			indirect_child_t *ic = &is->is_child[c];
1476
1477			if (ic->ic_data == NULL)
1478				continue;
1479
1480			vdev_t *vd = ic->ic_vdev;
1481
1482			mutex_enter(&vd->vdev_stat_lock);
1483			vd->vdev_stat.vs_checksum_errors++;
1484			mutex_exit(&vd->vdev_stat_lock);
1485			(void) zfs_ereport_post_checksum(zio->io_spa, vd,
1486			    NULL, zio, is->is_target_offset, is->is_size,
1487			    NULL, NULL, NULL);
1488		}
1489	}
1490}
1491
1492/*
1493 * Copy data from all the splits to a main zio then validate the checksum.
1494 * If then checksum is successfully validated return success.
1495 */
1496static int
1497vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
1498{
1499	zio_bad_cksum_t zbc;
1500
1501	for (indirect_split_t *is = list_head(&iv->iv_splits);
1502	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1503
1504		ASSERT3P(is->is_good_child->ic_data, !=, NULL);
1505		ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
1506
1507		abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
1508		    is->is_split_offset, 0, is->is_size);
1509	}
1510
1511	return (zio_checksum_error(zio, &zbc));
1512}
1513
1514/*
1515 * There are relatively few possible combinations making it feasible to
1516 * deterministically check them all.  We do this by setting the good_child
1517 * to the next unique split version.  If we reach the end of the list then
1518 * "carry over" to the next unique split version (like counting in base
1519 * is_unique_children, but each digit can have a different base).
1520 */
1521static int
1522vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
1523{
1524	boolean_t more = B_TRUE;
1525
1526	iv->iv_attempts = 0;
1527
1528	for (indirect_split_t *is = list_head(&iv->iv_splits);
1529	    is != NULL; is = list_next(&iv->iv_splits, is))
1530		is->is_good_child = list_head(&is->is_unique_child);
1531
1532	while (more == B_TRUE) {
1533		iv->iv_attempts++;
1534		more = B_FALSE;
1535
1536		if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1537			return (0);
1538
1539		for (indirect_split_t *is = list_head(&iv->iv_splits);
1540		    is != NULL; is = list_next(&iv->iv_splits, is)) {
1541			is->is_good_child = list_next(&is->is_unique_child,
1542			    is->is_good_child);
1543			if (is->is_good_child != NULL) {
1544				more = B_TRUE;
1545				break;
1546			}
1547
1548			is->is_good_child = list_head(&is->is_unique_child);
1549		}
1550	}
1551
1552	ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
1553
1554	return (SET_ERROR(ECKSUM));
1555}
1556
1557/*
1558 * There are too many combinations to try all of them in a reasonable amount
1559 * of time.  So try a fixed number of random combinations from the unique
1560 * split versions, after which we'll consider the block unrecoverable.
1561 */
1562static int
1563vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
1564{
1565	iv->iv_attempts = 0;
1566
1567	while (iv->iv_attempts < iv->iv_attempts_max) {
1568		iv->iv_attempts++;
1569
1570		for (indirect_split_t *is = list_head(&iv->iv_splits);
1571		    is != NULL; is = list_next(&iv->iv_splits, is)) {
1572			indirect_child_t *ic = list_head(&is->is_unique_child);
1573			int children = is->is_unique_children;
1574
1575			for (int i = random_in_range(children); i > 0; i--)
1576				ic = list_next(&is->is_unique_child, ic);
1577
1578			ASSERT3P(ic, !=, NULL);
1579			is->is_good_child = ic;
1580		}
1581
1582		if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1583			return (0);
1584	}
1585
1586	return (SET_ERROR(ECKSUM));
1587}
1588
1589/*
1590 * This is a validation function for reconstruction.  It randomly selects
1591 * a good combination, if one can be found, and then it intentionally
1592 * damages all other segment copes by zeroing them.  This forces the
1593 * reconstruction algorithm to locate the one remaining known good copy.
1594 */
1595static int
1596vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
1597{
1598	int error;
1599
1600	/* Presume all the copies are unique for initial selection. */
1601	for (indirect_split_t *is = list_head(&iv->iv_splits);
1602	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1603		is->is_unique_children = 0;
1604
1605		for (int i = 0; i < is->is_children; i++) {
1606			indirect_child_t *ic = &is->is_child[i];
1607			if (ic->ic_data != NULL) {
1608				is->is_unique_children++;
1609				list_insert_tail(&is->is_unique_child, ic);
1610			}
1611		}
1612
1613		if (list_is_empty(&is->is_unique_child)) {
1614			error = SET_ERROR(EIO);
1615			goto out;
1616		}
1617	}
1618
1619	/*
1620	 * Set each is_good_child to a randomly-selected child which
1621	 * is known to contain validated data.
1622	 */
1623	error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1624	if (error)
1625		goto out;
1626
1627	/*
1628	 * Damage all but the known good copy by zeroing it.  This will
1629	 * result in two or less unique copies per indirect_child_t.
1630	 * Both may need to be checked in order to reconstruct the block.
1631	 * Set iv->iv_attempts_max such that all unique combinations will
1632	 * enumerated, but limit the damage to at most 12 indirect splits.
1633	 */
1634	iv->iv_attempts_max = 1;
1635
1636	for (indirect_split_t *is = list_head(&iv->iv_splits);
1637	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1638		for (int c = 0; c < is->is_children; c++) {
1639			indirect_child_t *ic = &is->is_child[c];
1640
1641			if (ic == is->is_good_child)
1642				continue;
1643			if (ic->ic_data == NULL)
1644				continue;
1645
1646			abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
1647		}
1648
1649		iv->iv_attempts_max *= 2;
1650		if (iv->iv_attempts_max >= (1ULL << 12)) {
1651			iv->iv_attempts_max = UINT64_MAX;
1652			break;
1653		}
1654	}
1655
1656out:
1657	/* Empty the unique children lists so they can be reconstructed. */
1658	for (indirect_split_t *is = list_head(&iv->iv_splits);
1659	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1660		indirect_child_t *ic;
1661		while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
1662			;
1663
1664		is->is_unique_children = 0;
1665	}
1666
1667	return (error);
1668}
1669
1670/*
1671 * This function is called when we have read all copies of the data and need
1672 * to try to find a combination of copies that gives us the right checksum.
1673 *
1674 * If we pointed to any mirror vdevs, this effectively does the job of the
1675 * mirror.  The mirror vdev code can't do its own job because we don't know
1676 * the checksum of each split segment individually.
1677 *
1678 * We have to try every unique combination of copies of split segments, until
1679 * we find one that checksums correctly.  Duplicate segment copies are first
1680 * identified and latter skipped during reconstruction.  This optimization
1681 * reduces the search space and ensures that of the remaining combinations
1682 * at most one is correct.
1683 *
1684 * When the total number of combinations is small they can all be checked.
1685 * For example, if we have 3 segments in the split, and each points to a
1686 * 2-way mirror with unique copies, we will have the following pieces of data:
1687 *
1688 *       |     mirror child
1689 * split |     [0]        [1]
1690 * ======|=====================
1691 *   A   |  data_A_0   data_A_1
1692 *   B   |  data_B_0   data_B_1
1693 *   C   |  data_C_0   data_C_1
1694 *
1695 * We will try the following (mirror children)^(number of splits) (2^3=8)
1696 * combinations, which is similar to bitwise-little-endian counting in
1697 * binary.  In general each "digit" corresponds to a split segment, and the
1698 * base of each digit is is_children, which can be different for each
1699 * digit.
1700 *
1701 * "low bit"        "high bit"
1702 *        v                 v
1703 * data_A_0 data_B_0 data_C_0
1704 * data_A_1 data_B_0 data_C_0
1705 * data_A_0 data_B_1 data_C_0
1706 * data_A_1 data_B_1 data_C_0
1707 * data_A_0 data_B_0 data_C_1
1708 * data_A_1 data_B_0 data_C_1
1709 * data_A_0 data_B_1 data_C_1
1710 * data_A_1 data_B_1 data_C_1
1711 *
1712 * Note that the split segments may be on the same or different top-level
1713 * vdevs. In either case, we may need to try lots of combinations (see
1714 * zfs_reconstruct_indirect_combinations_max).  This ensures that if a mirror
1715 * has small silent errors on all of its children, we can still reconstruct
1716 * the correct data, as long as those errors are at sufficiently-separated
1717 * offsets (specifically, separated by the largest block size - default of
1718 * 128KB, but up to 16MB).
1719 */
1720static void
1721vdev_indirect_reconstruct_io_done(zio_t *zio)
1722{
1723	indirect_vsd_t *iv = zio->io_vsd;
1724	boolean_t known_good = B_FALSE;
1725	int error;
1726
1727	iv->iv_unique_combinations = 1;
1728	iv->iv_attempts_max = UINT64_MAX;
1729
1730	if (zfs_reconstruct_indirect_combinations_max > 0)
1731		iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
1732
1733	/*
1734	 * If nonzero, every 1/x blocks will be damaged, in order to validate
1735	 * reconstruction when there are split segments with damaged copies.
1736	 * Known_good will be TRUE when reconstruction is known to be possible.
1737	 */
1738	if (zfs_reconstruct_indirect_damage_fraction != 0 &&
1739	    random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
1740		known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
1741
1742	/*
1743	 * Determine the unique children for a split segment and add them
1744	 * to the is_unique_child list.  By restricting reconstruction
1745	 * to these children, only unique combinations will be considered.
1746	 * This can vastly reduce the search space when there are a large
1747	 * number of indirect splits.
1748	 */
1749	for (indirect_split_t *is = list_head(&iv->iv_splits);
1750	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1751		is->is_unique_children = 0;
1752
1753		for (int i = 0; i < is->is_children; i++) {
1754			indirect_child_t *ic_i = &is->is_child[i];
1755
1756			if (ic_i->ic_data == NULL ||
1757			    ic_i->ic_duplicate != NULL)
1758				continue;
1759
1760			for (int j = i + 1; j < is->is_children; j++) {
1761				indirect_child_t *ic_j = &is->is_child[j];
1762
1763				if (ic_j->ic_data == NULL ||
1764				    ic_j->ic_duplicate != NULL)
1765					continue;
1766
1767				if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
1768					ic_j->ic_duplicate = ic_i;
1769			}
1770
1771			is->is_unique_children++;
1772			list_insert_tail(&is->is_unique_child, ic_i);
1773		}
1774
1775		/* Reconstruction is impossible, no valid children */
1776		EQUIV(list_is_empty(&is->is_unique_child),
1777		    is->is_unique_children == 0);
1778		if (list_is_empty(&is->is_unique_child)) {
1779			zio->io_error = EIO;
1780			vdev_indirect_all_checksum_errors(zio);
1781			zio_checksum_verified(zio);
1782			return;
1783		}
1784
1785		iv->iv_unique_combinations *= is->is_unique_children;
1786	}
1787
1788	if (iv->iv_unique_combinations <= iv->iv_attempts_max)
1789		error = vdev_indirect_splits_enumerate_all(iv, zio);
1790	else
1791		error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1792
1793	if (error != 0) {
1794		/* All attempted combinations failed. */
1795		ASSERT3B(known_good, ==, B_FALSE);
1796		zio->io_error = error;
1797		vdev_indirect_all_checksum_errors(zio);
1798	} else {
1799		/*
1800		 * The checksum has been successfully validated.  Issue
1801		 * repair I/Os to any copies of splits which don't match
1802		 * the validated version.
1803		 */
1804		ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
1805		vdev_indirect_repair(zio);
1806		zio_checksum_verified(zio);
1807	}
1808}
1809
1810static void
1811vdev_indirect_io_done(zio_t *zio)
1812{
1813	indirect_vsd_t *iv = zio->io_vsd;
1814
1815	if (iv->iv_reconstruct) {
1816		/*
1817		 * We have read all copies of the data (e.g. from mirrors),
1818		 * either because this was a scrub/resilver, or because the
1819		 * one-copy read didn't checksum correctly.
1820		 */
1821		vdev_indirect_reconstruct_io_done(zio);
1822		return;
1823	}
1824
1825	if (!iv->iv_split_block) {
1826		/*
1827		 * This was not a split block, so we passed the BP down,
1828		 * and the checksum was handled by the (one) child zio.
1829		 */
1830		return;
1831	}
1832
1833	zio_bad_cksum_t zbc;
1834	int ret = zio_checksum_error(zio, &zbc);
1835	if (ret == 0) {
1836		zio_checksum_verified(zio);
1837		return;
1838	}
1839
1840	/*
1841	 * The checksum didn't match.  Read all copies of all splits, and
1842	 * then we will try to reconstruct.  The next time
1843	 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1844	 */
1845	vdev_indirect_read_all(zio);
1846
1847	zio_vdev_io_redone(zio);
1848}
1849
1850vdev_ops_t vdev_indirect_ops = {
1851	.vdev_op_init = NULL,
1852	.vdev_op_fini = NULL,
1853	.vdev_op_open = vdev_indirect_open,
1854	.vdev_op_close = vdev_indirect_close,
1855	.vdev_op_asize = vdev_default_asize,
1856	.vdev_op_min_asize = vdev_default_min_asize,
1857	.vdev_op_min_alloc = NULL,
1858	.vdev_op_io_start = vdev_indirect_io_start,
1859	.vdev_op_io_done = vdev_indirect_io_done,
1860	.vdev_op_state_change = NULL,
1861	.vdev_op_need_resilver = NULL,
1862	.vdev_op_hold = NULL,
1863	.vdev_op_rele = NULL,
1864	.vdev_op_remap = vdev_indirect_remap,
1865	.vdev_op_xlate = NULL,
1866	.vdev_op_rebuild_asize = NULL,
1867	.vdev_op_metaslab_init = NULL,
1868	.vdev_op_config_generate = NULL,
1869	.vdev_op_nparity = NULL,
1870	.vdev_op_ndisks = NULL,
1871	.vdev_op_type = VDEV_TYPE_INDIRECT,	/* name of this vdev type */
1872	.vdev_op_leaf = B_FALSE			/* leaf vdev */
1873};
1874
1875EXPORT_SYMBOL(spa_condense_fini);
1876EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
1877EXPORT_SYMBOL(spa_condense_indirect_start_sync);
1878EXPORT_SYMBOL(spa_condense_init);
1879EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
1880EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
1881EXPORT_SYMBOL(vdev_indirect_should_condense);
1882EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
1883EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
1884EXPORT_SYMBOL(vdev_obsolete_sm_object);
1885
1886/* BEGIN CSTYLED */
1887ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
1888	ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
1889
1890ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT,
1891	ZMOD_RW,
1892	"Minimum obsolete percent of bytes in the mapping "
1893	"to attempt condensing");
1894
1895ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, U64, ZMOD_RW,
1896	"Don't bother condensing if the mapping uses less than this amount of "
1897	"memory");
1898
1899ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, U64,
1900	ZMOD_RW,
1901	"Minimum size obsolete spacemap to attempt condensing");
1902
1903ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
1904	UINT, ZMOD_RW,
1905	"Used by tests to ensure certain actions happen in the middle of a "
1906	"condense. A maximum value of 1 should be sufficient.");
1907
1908ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
1909	UINT, ZMOD_RW,
1910	"Maximum number of combinations when reconstructing split segments");
1911/* END CSTYLED */
1912