1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2018 Intel Corporation.
23 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa.h>
28#include <sys/spa_impl.h>
29#include <sys/vdev_impl.h>
30#include <sys/vdev_draid.h>
31#include <sys/vdev_raidz.h>
32#include <sys/vdev_rebuild.h>
33#include <sys/abd.h>
34#include <sys/zio.h>
35#include <sys/nvpair.h>
36#include <sys/zio_checksum.h>
37#include <sys/fs/zfs.h>
38#include <sys/fm/fs/zfs.h>
39#include <zfs_fletcher.h>
40
41#ifdef ZFS_DEBUG
42#include <sys/vdev.h>	/* For vdev_xlate() in vdev_draid_io_verify() */
43#endif
44
45/*
46 * dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
47 * comprised of multiple raidz redundancy groups which are spread over the
48 * dRAID children. To ensure an even distribution, and avoid hot spots, a
49 * permutation mapping is applied to the order of the dRAID children.
50 * This mixing effectively distributes the parity columns evenly over all
51 * of the disks in the dRAID.
52 *
53 * This is beneficial because it means when resilvering all of the disks
54 * can participate thereby increasing the available IOPs and bandwidth.
55 * Furthermore, by reserving a small fraction of each child's total capacity
56 * virtual distributed spare disks can be created. These spares similarly
57 * benefit from the performance gains of spanning all of the children. The
58 * consequence of which is that resilvering to a distributed spare can
59 * substantially reduce the time required to restore full parity to pool
60 * with a failed disks.
61 *
62 * === dRAID group layout ===
63 *
64 * First, let's define a "row" in the configuration to be a 16M chunk from
65 * each physical drive at the same offset. This is the minimum allowable
66 * size since it must be possible to store a full 16M block when there is
67 * only a single data column. Next, we define a "group" to be a set of
68 * sequential disks containing both the parity and data columns. We allow
69 * groups to span multiple rows in order to align any group size to any
70 * number of physical drives. Finally, a "slice" is comprised of the rows
71 * which contain the target number of groups. The permutation mappings
72 * are applied in a round robin fashion to each slice.
73 *
74 * Given D+P drives in a group (including parity drives) and C-S physical
75 * drives (not including the spare drives), we can distribute the groups
76 * across R rows without remainder by selecting the least common multiple
77 * of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
78 *
79 * In the example below, there are C=14 physical drives in the configuration
80 * with S=2 drives worth of spare capacity. Each group has a width of 9
81 * which includes D=8 data and P=1 parity drive. There are 4 groups and
82 * 3 rows per slice.  Each group has a size of 144M (16M * 9) and a slice
83 * size is 576M (144M * 4). When allocating from a dRAID each group is
84 * filled before moving on to the next as show in slice0 below.
85 *
86 *             data disks (8 data + 1 parity)          spares (2)
87 *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
88 *  ^  | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
89 *  |  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
90 *  |  |              group 0              |  group 1..|       |
91 *  |  +-----------------------------------+-----------+-------|
92 *  |  | 0   1   2   3   4   5   6   7   8 | 36  37  38|       |  r
93 *  |  | 9   10  11  12  13  14  15  16  17| 45  46  47|       |  o
94 *  |  | 18  19  20  21  22  23  24  25  26| 54  55  56|       |  w
95 *     | 27  28  29  30  31  32  33  34  35| 63  64  65|       |  0
96 *  s  +-----------------------+-----------------------+-------+
97 *  l  |       ..group 1       |        group 2..      |       |
98 *  i  +-----------------------+-----------------------+-------+
99 *  c  | 39  40  41  42  43  44| 72  73  74  75  76  77|       |  r
100 *  e  | 48  49  50  51  52  53| 81  82  83  84  85  86|       |  o
101 *  0  | 57  58  59  60  61  62| 90  91  92  93  94  95|       |  w
102 *     | 66  67  68  69  70  71| 99 100 101 102 103 104|       |  1
103 *  |  +-----------+-----------+-----------------------+-------+
104 *  |  |..group 2  |            group 3                |       |
105 *  |  +-----------+-----------+-----------------------+-------+
106 *  |  | 78  79  80|108 109 110 111 112 113 114 115 116|       |  r
107 *  |  | 87  88  89|117 118 119 120 121 122 123 124 125|       |  o
108 *  |  | 96  97  98|126 127 128 129 130 131 132 133 134|       |  w
109 *  v  |105 106 107|135 136 137 138 139 140 141 142 143|       |  2
110 *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
111 *     | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
112 *  s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
113 *  l  |              group 4              |  group 5..|       | row 3
114 *  i  +-----------------------+-----------+-----------+-------|
115 *  c  |       ..group 5       |        group 6..      |       | row 4
116 *  e  +-----------+-----------+-----------------------+-------+
117 *  1  |..group 6  |            group 7                |       | row 5
118 *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
119 *     | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
120 *  s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
121 *  l  |              group 8              |  group 9..|       | row 6
122 *  i  +-----------------------------------------------+-------|
123 *  c  |       ..group 9       |        group 10..     |       | row 7
124 *  e  +-----------------------+-----------------------+-------+
125 *  2  |..group 10 |            group 11               |       | row 8
126 *     +-----------+-----------------------------------+-------+
127 *
128 * This layout has several advantages over requiring that each row contain
129 * a whole number of groups.
130 *
131 * 1. The group count is not a relevant parameter when defining a dRAID
132 *    layout. Only the group width is needed, and *all* groups will have
133 *    the desired size.
134 *
135 * 2. All possible group widths (<= physical disk count) can be supported.
136 *
137 * 3. The logic within vdev_draid.c is simplified when the group width is
138 *    the same for all groups (although some of the logic around computing
139 *    permutation numbers and drive offsets is more complicated).
140 *
141 * N.B. The following array describes all valid dRAID permutation maps.
142 * Each row is used to generate a permutation map for a different number
143 * of children from a unique seed. The seeds were generated and carefully
144 * evaluated by the 'draid' utility in order to provide balanced mappings.
145 * In addition to the seed a checksum of the in-memory mapping is stored
146 * for verification.
147 *
148 * The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
149 * with a given permutation map) is the ratio of the amounts of I/O that will
150 * be sent to the least and most busy disks when resilvering. The average
151 * imbalance ratio (of a given number of disks and permutation map) is the
152 * average of the ratios of all possible single and double disk failures.
153 *
154 * In order to achieve a low imbalance ratio the number of permutations in
155 * the mapping must be significantly larger than the number of children.
156 * For dRAID the number of permutations has been limited to 512 to minimize
157 * the map size. This does result in a gradually increasing imbalance ratio
158 * as seen in the table below. Increasing the number of permutations for
159 * larger child counts would reduce the imbalance ratio. However, in practice
160 * when there are a large number of children each child is responsible for
161 * fewer total IOs so it's less of a concern.
162 *
163 * Note these values are hard coded and must never be changed.  Existing
164 * pools depend on the same mapping always being generated in order to
165 * read and write from the correct locations.  Any change would make
166 * existing pools completely inaccessible.
167 */
168static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
169	{   2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d },	/* 1.000 */
170	{   3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 },	/* 1.000 */
171	{   4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 },	/* 1.000 */
172	{   5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 },	/* 1.010 */
173	{   6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 },	/* 1.031 */
174	{   7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee },	/* 1.043 */
175	{   8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 },	/* 1.059 */
176	{   9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 },	/* 1.056 */
177	{  10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 },	/* 1.072 */
178	{  11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c },	/* 1.083 */
179	{  12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e },	/* 1.097 */
180	{  13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 },	/* 1.100 */
181	{  14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 },	/* 1.121 */
182	{  15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 },	/* 1.103 */
183	{  16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 },	/* 1.111 */
184	{  17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe },	/* 1.133 */
185	{  18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 },	/* 1.131 */
186	{  19, 256, 0x892e343f2f31d690, 0x00000029eb392835 },	/* 1.130 */
187	{  20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c },	/* 1.141 */
188	{  21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 },	/* 1.139 */
189	{  22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 },	/* 1.150 */
190	{  23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f },	/* 1.174 */
191	{  24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 },	/* 1.168 */
192	{  25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 },	/* 1.180 */
193	{  26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba },	/* 1.226 */
194	{  27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 },	/* 1.228 */
195	{  28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c },	/* 1.217 */
196	{  29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c },	/* 1.239 */
197	{  30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 },	/* 1.238 */
198	{  31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f },	/* 1.273 */
199	{  32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 },	/* 1.191 */
200	{  33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 },	/* 1.199 */
201	{  34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 },	/* 1.195 */
202	{  35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 },	/* 1.201 */
203	{  36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef },	/* 1.194 */
204	{  37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 },	/* 1.237 */
205	{  38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 },	/* 1.242 */
206	{  39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd },	/* 1.231 */
207	{  40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 },	/* 1.233 */
208	{  41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 },	/* 1.271 */
209	{  42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 },	/* 1.263 */
210	{  43, 512, 0xbaa5125faa781854, 0x000001c76789e278 },	/* 1.270 */
211	{  44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb },	/* 1.281 */
212	{  45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 },	/* 1.282 */
213	{  46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b },	/* 1.286 */
214	{  47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 },	/* 1.329 */
215	{  48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b },	/* 1.286 */
216	{  49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 },	/* 1.322 */
217	{  50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 },	/* 1.335 */
218	{  51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 },	/* 1.305 */
219	{  52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf },	/* 1.330 */
220	{  53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 },	/* 1.365 */
221	{  54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 },	/* 1.334 */
222	{  55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 },	/* 1.364 */
223	{  56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e },	/* 1.374 */
224	{  57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 },	/* 1.363 */
225	{  58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 },	/* 1.401 */
226	{  59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c },	/* 1.392 */
227	{  60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 },	/* 1.360 */
228	{  61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd },	/* 1.396 */
229	{  62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c },	/* 1.453 */
230	{  63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 },	/* 1.437 */
231	{  64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 },	/* 1.402 */
232	{  65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 },	/* 1.459 */
233	{  66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 },	/* 1.423 */
234	{  67, 512, 0x910b9714f698a877, 0x00000451ea65d5db },	/* 1.447 */
235	{  68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 },	/* 1.450 */
236	{  69, 512, 0x836d4968fbaa3706, 0x000004954068a380 },	/* 1.455 */
237	{  70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d },	/* 1.463 */
238	{  71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 },	/* 1.463 */
239	{  72, 512, 0x42763a680d5bed8e, 0x000005084275c680 },	/* 1.452 */
240	{  73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab },	/* 1.498 */
241	{  74, 512, 0x9fa08548b1621a44, 0x0000054708019247 },	/* 1.526 */
242	{  75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 },	/* 1.491 */
243	{  76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 },	/* 1.470 */
244	{  77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 },	/* 1.527 */
245	{  78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 },	/* 1.509 */
246	{  79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e },	/* 1.569 */
247	{  80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c },	/* 1.555 */
248	{  81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 },	/* 1.509 */
249	{  82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 },	/* 1.596 */
250	{  83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e },	/* 1.568 */
251	{  84, 512, 0xba02545069ddc6dc, 0x000006d19861364f },	/* 1.541 */
252	{  85, 512, 0x447c73192c35073e, 0x000006fce315ce35 },	/* 1.623 */
253	{  86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b },	/* 1.620 */
254	{  87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 },	/* 1.597 */
255	{  88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b },	/* 1.575 */
256	{  89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc },	/* 1.627 */
257	{  90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb },	/* 1.596 */
258	{  91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 },	/* 1.622 */
259	{  92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e },	/* 1.695 */
260	{  93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c },	/* 1.605 */
261	{  94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc },	/* 1.625 */
262	{  95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 },	/* 1.687 */
263	{  96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a },	/* 1.621 */
264	{  97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 },	/* 1.699 */
265	{  98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b },	/* 1.688 */
266	{  99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce },	/* 1.642 */
267	{ 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc },	/* 1.683 */
268	{ 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 },	/* 1.755 */
269	{ 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 },	/* 1.692 */
270	{ 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 },	/* 1.747 */
271	{ 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 },	/* 1.751 */
272	{ 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 },	/* 1.751 */
273	{ 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f },	/* 1.726 */
274	{ 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d },	/* 1.788 */
275	{ 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 },	/* 1.740 */
276	{ 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 },	/* 1.780 */
277	{ 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 },	/* 1.836 */
278	{ 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 },	/* 1.778 */
279	{ 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 },	/* 1.831 */
280	{ 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df },	/* 1.825 */
281	{ 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 },	/* 1.826 */
282	{ 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 },	/* 1.843 */
283	{ 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d },	/* 1.826 */
284	{ 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b },	/* 1.803 */
285	{ 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 },	/* 1.857 */
286	{ 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 },	/* 1.877 */
287	{ 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 },	/* 1.849 */
288	{ 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d },	/* 1.867 */
289	{ 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 },	/* 1.978 */
290	{ 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d },	/* 1.947 */
291	{ 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea },	/* 1.865 */
292	{ 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f },	/* 1.881 */
293	{ 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b },	/* 1.882 */
294	{ 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e },	/* 1.867 */
295	{ 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e },	/* 1.972 */
296	{ 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 },	/* 1.896 */
297	{ 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d },	/* 1.965 */
298	{ 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 },	/* 1.963 */
299	{ 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 },	/* 1.925 */
300	{ 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 },	/* 1.862 */
301	{ 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 },	/* 2.042 */
302	{ 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 },	/* 1.935 */
303	{ 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 },	/* 2.005 */
304	{ 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c },	/* 2.041 */
305	{ 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 },	/* 1.997 */
306	{ 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 },	/* 1.996 */
307	{ 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d },	/* 2.053 */
308	{ 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a },	/* 1.971 */
309	{ 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 },	/* 2.018 */
310	{ 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd },	/* 1.961 */
311	{ 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 },	/* 2.046 */
312	{ 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb },	/* 1.968 */
313	{ 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 },	/* 2.143 */
314	{ 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 },	/* 2.064 */
315	{ 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 },	/* 2.023 */
316	{ 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c },	/* 2.136 */
317	{ 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 },	/* 2.063 */
318	{ 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 },	/* 1.974 */
319	{ 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 },	/* 2.210 */
320	{ 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a },	/* 2.006 */
321	{ 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 },	/* 2.193 */
322	{ 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 },	/* 2.163 */
323	{ 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc },	/* 2.046 */
324	{ 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 },	/* 2.084 */
325	{ 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 },	/* 2.264 */
326	{ 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 },	/* 2.074 */
327	{ 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 },	/* 2.282 */
328	{ 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf },	/* 2.148 */
329	{ 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 },	/* 2.355 */
330	{ 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 },	/* 2.164 */
331	{ 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a },	/* 2.393 */
332	{ 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 },	/* 2.178 */
333	{ 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc },	/* 2.334 */
334	{ 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b },	/* 2.266 */
335	{ 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 },	/* 2.304 */
336	{ 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d },	/* 2.218 */
337	{ 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff },	/* 2.377 */
338	{ 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 },	/* 2.155 */
339	{ 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 },	/* 2.404 */
340	{ 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 },	/* 2.205 */
341	{ 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d },	/* 2.359 */
342	{ 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 },	/* 2.158 */
343	{ 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b },	/* 2.614 */
344	{ 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc },	/* 2.239 */
345	{ 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc },	/* 2.493 */
346	{ 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c },	/* 2.327 */
347	{ 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 },	/* 2.231 */
348	{ 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c },	/* 2.237 */
349	{ 182, 512, 0xe6035defea48f933, 0x00002038e3346658 },	/* 2.691 */
350	{ 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e },	/* 2.170 */
351	{ 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 },	/* 2.600 */
352	{ 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc },	/* 2.391 */
353	{ 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 },	/* 2.677 */
354	{ 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c },	/* 2.410 */
355	{ 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 },	/* 2.776 */
356	{ 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 },	/* 2.266 */
357	{ 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 },	/* 2.717 */
358	{ 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c },	/* 2.474 */
359	{ 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 },	/* 2.673 */
360	{ 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 },	/* 2.420 */
361	{ 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 },	/* 2.898 */
362	{ 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c },	/* 2.363 */
363	{ 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e },	/* 2.747 */
364	{ 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 },	/* 2.531 */
365	{ 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 },	/* 2.707 */
366	{ 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 },	/* 2.315 */
367	{ 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf },	/* 3.012 */
368	{ 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 },	/* 2.378 */
369	{ 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 },	/* 2.969 */
370	{ 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d },	/* 2.594 */
371	{ 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd },	/* 2.763 */
372	{ 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 },	/* 2.457 */
373	{ 206, 512, 0xc02fc96684715a16, 0x0000297515608601 },	/* 3.057 */
374	{ 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 },	/* 2.590 */
375	{ 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b },	/* 3.047 */
376	{ 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 },	/* 2.676 */
377	{ 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 },	/* 2.993 */
378	{ 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 },	/* 2.457 */
379	{ 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 },	/* 3.182 */
380	{ 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 },	/* 2.563 */
381	{ 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 },	/* 3.025 */
382	{ 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f },	/* 2.730 */
383	{ 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 },	/* 3.036 */
384	{ 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 },	/* 2.722 */
385	{ 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 },	/* 3.356 */
386	{ 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 },	/* 2.697 */
387	{ 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 },	/* 2.979 */
388	{ 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 },	/* 2.858 */
389	{ 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e },	/* 3.258 */
390	{ 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 },	/* 2.693 */
391	{ 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 },	/* 3.259 */
392	{ 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c },	/* 2.733 */
393	{ 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 },	/* 3.235 */
394	{ 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 },	/* 2.983 */
395	{ 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e },	/* 3.308 */
396	{ 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 },	/* 2.715 */
397	{ 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f },	/* 3.540 */
398	{ 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 },	/* 2.779 */
399	{ 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c },	/* 3.084 */
400	{ 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc },	/* 2.987 */
401	{ 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae },	/* 3.341 */
402	{ 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 },	/* 2.793 */
403	{ 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 },	/* 3.518 */
404	{ 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 },	/* 2.962 */
405	{ 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 },	/* 3.196 */
406	{ 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 },	/* 2.914 */
407	{ 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 },	/* 3.408 */
408	{ 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 },	/* 2.903 */
409	{ 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 },	/* 3.778 */
410	{ 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c },	/* 3.026 */
411	{ 244, 512, 0xc740263f0301efa8, 0x00003a147146512d },	/* 3.347 */
412	{ 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d },	/* 3.212 */
413	{ 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 },	/* 3.482 */
414	{ 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 },	/* 3.146 */
415	{ 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f },	/* 3.626 */
416	{ 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 },	/* 2.952 */
417	{ 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e },	/* 3.463 */
418	{ 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 },	/* 3.131 */
419	{ 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c },	/* 3.538 */
420	{ 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac },	/* 2.974 */
421	{ 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 },	/* 3.843 */
422	{ 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 },	/* 3.088 */
423};
424
425/*
426 * Verify the map is valid. Each device index must appear exactly
427 * once in every row, and the permutation array checksum must match.
428 */
429static int
430verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
431    uint64_t checksum)
432{
433	int countssz = sizeof (uint16_t) * children;
434	uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
435
436	for (int i = 0; i < nperms; i++) {
437		for (int j = 0; j < children; j++) {
438			uint8_t val = perms[(i * children) + j];
439
440			if (val >= children || counts[val] != i) {
441				kmem_free(counts, countssz);
442				return (EINVAL);
443			}
444
445			counts[val]++;
446		}
447	}
448
449	if (checksum != 0) {
450		int permssz = sizeof (uint8_t) * children * nperms;
451		zio_cksum_t cksum;
452
453		fletcher_4_native_varsize(perms, permssz, &cksum);
454
455		if (checksum != cksum.zc_word[0]) {
456			kmem_free(counts, countssz);
457			return (ECKSUM);
458		}
459	}
460
461	kmem_free(counts, countssz);
462
463	return (0);
464}
465
466/*
467 * Generate the permutation array for the draid_map_t.  These maps control
468 * the placement of all data in a dRAID.  Therefore it's critical that the
469 * seed always generates the same mapping.  We provide our own pseudo-random
470 * number generator for this purpose.
471 */
472int
473vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
474{
475	VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
476	VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
477	VERIFY3U(map->dm_seed, !=, 0);
478	VERIFY3U(map->dm_nperms, !=, 0);
479	VERIFY3P(map->dm_perms, ==, NULL);
480
481#ifdef _KERNEL
482	/*
483	 * The kernel code always provides both a map_seed and checksum.
484	 * Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
485	 * a zero checksum when generating new candidate maps.
486	 */
487	VERIFY3U(map->dm_checksum, !=, 0);
488#endif
489	uint64_t children = map->dm_children;
490	uint64_t nperms = map->dm_nperms;
491	int rowsz = sizeof (uint8_t) * children;
492	int permssz = rowsz * nperms;
493	uint8_t *perms;
494
495	/* Allocate the permutation array */
496	perms = vmem_alloc(permssz, KM_SLEEP);
497
498	/* Setup an initial row with a known pattern */
499	uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
500	for (int i = 0; i < children; i++)
501		initial_row[i] = i;
502
503	uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
504	uint8_t *current_row, *previous_row = initial_row;
505
506	/*
507	 * Perform a Fisher-Yates shuffle of each row using the previous
508	 * row as the starting point.  An initial_row with known pattern
509	 * is used as the input for the first row.
510	 */
511	for (int i = 0; i < nperms; i++) {
512		current_row = &perms[i * children];
513		memcpy(current_row, previous_row, rowsz);
514
515		for (int j = children - 1; j > 0; j--) {
516			uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
517			uint8_t val = current_row[j];
518			current_row[j] = current_row[k];
519			current_row[k] = val;
520		}
521
522		previous_row = current_row;
523	}
524
525	kmem_free(initial_row, rowsz);
526
527	int error = verify_perms(perms, children, nperms, map->dm_checksum);
528	if (error) {
529		vmem_free(perms, permssz);
530		return (error);
531	}
532
533	*permsp = perms;
534
535	return (0);
536}
537
538/*
539 * Lookup the fixed draid_map_t for the requested number of children.
540 */
541int
542vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
543{
544	for (int i = 0; i <= VDEV_DRAID_MAX_MAPS; i++) {
545		if (draid_maps[i].dm_children == children) {
546			*mapp = &draid_maps[i];
547			return (0);
548		}
549	}
550
551	return (ENOENT);
552}
553
554/*
555 * Lookup the permutation array and iteration id for the provided offset.
556 */
557static void
558vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
559    uint8_t **base, uint64_t *iter)
560{
561	uint64_t ncols = vdc->vdc_children;
562	uint64_t poff = pindex % (vdc->vdc_nperms * ncols);
563
564	*base = vdc->vdc_perms + (poff / ncols) * ncols;
565	*iter = poff % ncols;
566}
567
568static inline uint64_t
569vdev_draid_permute_id(vdev_draid_config_t *vdc,
570    uint8_t *base, uint64_t iter, uint64_t index)
571{
572	return ((base[index] + iter) % vdc->vdc_children);
573}
574
575/*
576 * Return the asize which is the psize rounded up to a full group width.
577 * i.e. vdev_draid_psize_to_asize().
578 */
579static uint64_t
580vdev_draid_asize(vdev_t *vd, uint64_t psize)
581{
582	vdev_draid_config_t *vdc = vd->vdev_tsd;
583	uint64_t ashift = vd->vdev_ashift;
584
585	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
586
587	uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
588	uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
589
590	ASSERT3U(asize, !=, 0);
591	ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
592
593	return (asize);
594}
595
596/*
597 * Deflate the asize to the psize, this includes stripping parity.
598 */
599uint64_t
600vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize)
601{
602	vdev_draid_config_t *vdc = vd->vdev_tsd;
603
604	ASSERT0(asize % vdc->vdc_groupwidth);
605
606	return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
607}
608
609/*
610 * Convert a logical offset to the corresponding group number.
611 */
612static uint64_t
613vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
614{
615	vdev_draid_config_t *vdc = vd->vdev_tsd;
616
617	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
618
619	return (offset / vdc->vdc_groupsz);
620}
621
622/*
623 * Convert a group number to the logical starting offset for that group.
624 */
625static uint64_t
626vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
627{
628	vdev_draid_config_t *vdc = vd->vdev_tsd;
629
630	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
631
632	return (group * vdc->vdc_groupsz);
633}
634
635/*
636 * Full stripe writes.  When writing, all columns (D+P) are required.  Parity
637 * is calculated over all the columns, including empty zero filled sectors,
638 * and each is written to disk.  While only the data columns are needed for
639 * a normal read, all of the columns are required for reconstruction when
640 * performing a sequential resilver.
641 *
642 * For "big columns" it's sufficient to map the correct range of the zio ABD.
643 * Partial columns require allocating a gang ABD in order to zero fill the
644 * empty sectors.  When the column is empty a zero filled sector must be
645 * mapped.  In all cases the data ABDs must be the same size as the parity
646 * ABDs (e.g. rc->rc_size == parity_size).
647 */
648static void
649vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
650{
651	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
652	uint64_t parity_size = rr->rr_col[0].rc_size;
653	uint64_t abd_off = abd_offset;
654
655	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
656	ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
657
658	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
659		raidz_col_t *rc = &rr->rr_col[c];
660
661		if (rc->rc_size == 0) {
662			/* empty data column (small write), add a skip sector */
663			ASSERT3U(skip_size, ==, parity_size);
664			rc->rc_abd = abd_get_zeros(skip_size);
665		} else if (rc->rc_size == parity_size) {
666			/* this is a "big column" */
667			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
668			    zio->io_abd, abd_off, rc->rc_size);
669		} else {
670			/* short data column, add a skip sector */
671			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
672			rc->rc_abd = abd_alloc_gang();
673			abd_gang_add(rc->rc_abd, abd_get_offset_size(
674			    zio->io_abd, abd_off, rc->rc_size), B_TRUE);
675			abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
676			    B_TRUE);
677		}
678
679		ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
680
681		abd_off += rc->rc_size;
682		rc->rc_size = parity_size;
683	}
684
685	IMPLY(abd_offset != 0, abd_off == zio->io_size);
686}
687
688/*
689 * Scrub/resilver reads.  In order to store the contents of the skip sectors
690 * an additional ABD is allocated.  The columns are handled in the same way
691 * as a full stripe write except instead of using the zero ABD the newly
692 * allocated skip ABD is used to back the skip sectors.  In all cases the
693 * data ABD must be the same size as the parity ABDs.
694 */
695static void
696vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
697{
698	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
699	uint64_t parity_size = rr->rr_col[0].rc_size;
700	uint64_t abd_off = abd_offset;
701	uint64_t skip_off = 0;
702
703	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
704	ASSERT3P(rr->rr_abd_empty, ==, NULL);
705
706	if (rr->rr_nempty > 0) {
707		rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
708		    B_FALSE);
709	}
710
711	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
712		raidz_col_t *rc = &rr->rr_col[c];
713
714		if (rc->rc_size == 0) {
715			/* empty data column (small read), add a skip sector */
716			ASSERT3U(skip_size, ==, parity_size);
717			ASSERT3U(rr->rr_nempty, !=, 0);
718			rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
719			    skip_off, skip_size);
720			skip_off += skip_size;
721		} else if (rc->rc_size == parity_size) {
722			/* this is a "big column" */
723			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
724			    zio->io_abd, abd_off, rc->rc_size);
725		} else {
726			/* short data column, add a skip sector */
727			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
728			ASSERT3U(rr->rr_nempty, !=, 0);
729			rc->rc_abd = abd_alloc_gang();
730			abd_gang_add(rc->rc_abd, abd_get_offset_size(
731			    zio->io_abd, abd_off, rc->rc_size), B_TRUE);
732			abd_gang_add(rc->rc_abd, abd_get_offset_size(
733			    rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
734			skip_off += skip_size;
735		}
736
737		uint64_t abd_size = abd_get_size(rc->rc_abd);
738		ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
739
740		/*
741		 * Increase rc_size so the skip ABD is included in subsequent
742		 * parity calculations.
743		 */
744		abd_off += rc->rc_size;
745		rc->rc_size = abd_size;
746	}
747
748	IMPLY(abd_offset != 0, abd_off == zio->io_size);
749	ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
750}
751
752/*
753 * Normal reads.  In this common case only the columns containing data
754 * are read in to the zio ABDs.  Neither the parity columns or empty skip
755 * sectors are read unless the checksum fails verification.  In which case
756 * vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
757 * the raid map in order to allow reconstruction using the parity data and
758 * skip sectors.
759 */
760static void
761vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
762{
763	uint64_t abd_off = abd_offset;
764
765	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
766
767	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
768		raidz_col_t *rc = &rr->rr_col[c];
769
770		if (rc->rc_size > 0) {
771			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
772			    zio->io_abd, abd_off, rc->rc_size);
773			abd_off += rc->rc_size;
774		}
775	}
776
777	IMPLY(abd_offset != 0, abd_off == zio->io_size);
778}
779
780/*
781 * Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
782 * difference is that an ABD is allocated to back skip sectors so they may
783 * be read in to memory, verified, and repaired if needed.
784 */
785void
786vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
787{
788	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
789	uint64_t parity_size = rr->rr_col[0].rc_size;
790	uint64_t skip_off = 0;
791
792	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
793	ASSERT3P(rr->rr_abd_empty, ==, NULL);
794
795	if (rr->rr_nempty > 0) {
796		rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
797		    B_FALSE);
798	}
799
800	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
801		raidz_col_t *rc = &rr->rr_col[c];
802
803		if (rc->rc_size == 0) {
804			/* empty data column (small read), add a skip sector */
805			ASSERT3U(skip_size, ==, parity_size);
806			ASSERT3U(rr->rr_nempty, !=, 0);
807			ASSERT3P(rc->rc_abd, ==, NULL);
808			rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
809			    skip_off, skip_size);
810			skip_off += skip_size;
811		} else if (rc->rc_size == parity_size) {
812			/* this is a "big column", nothing to add */
813			ASSERT3P(rc->rc_abd, !=, NULL);
814		} else {
815			/*
816			 * short data column, add a skip sector and clear
817			 * rc_tried to force the entire column to be re-read
818			 * thereby including the missing skip sector data
819			 * which is needed for reconstruction.
820			 */
821			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
822			ASSERT3U(rr->rr_nempty, !=, 0);
823			ASSERT3P(rc->rc_abd, !=, NULL);
824			ASSERT(!abd_is_gang(rc->rc_abd));
825			abd_t *read_abd = rc->rc_abd;
826			rc->rc_abd = abd_alloc_gang();
827			abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
828			abd_gang_add(rc->rc_abd, abd_get_offset_size(
829			    rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
830			skip_off += skip_size;
831			rc->rc_tried = 0;
832		}
833
834		/*
835		 * Increase rc_size so the empty ABD is included in subsequent
836		 * parity calculations.
837		 */
838		rc->rc_size = parity_size;
839	}
840
841	ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
842}
843
844/*
845 * Given a logical address within a dRAID configuration, return the physical
846 * address on the first drive in the group that this address maps to
847 * (at position 'start' in permutation number 'perm').
848 */
849static uint64_t
850vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
851    uint64_t *perm, uint64_t *start)
852{
853	vdev_draid_config_t *vdc = vd->vdev_tsd;
854
855	/* b is the dRAID (parent) sector offset. */
856	uint64_t ashift = vd->vdev_top->vdev_ashift;
857	uint64_t b_offset = logical_offset >> ashift;
858
859	/*
860	 * The height of a row in units of the vdev's minimum sector size.
861	 * This is the amount of data written to each disk of each group
862	 * in a given permutation.
863	 */
864	uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
865
866	/*
867	 * We cycle through a disk permutation every groupsz * ngroups chunk
868	 * of address space. Note that ngroups * groupsz must be a multiple
869	 * of the number of data drives (ndisks) in order to guarantee
870	 * alignment. So, for example, if our row height is 16MB, our group
871	 * size is 10, and there are 13 data drives in the draid, then ngroups
872	 * will be 13, we will change permutation every 2.08GB and each
873	 * disk will have 160MB of data per chunk.
874	 */
875	uint64_t groupwidth = vdc->vdc_groupwidth;
876	uint64_t ngroups = vdc->vdc_ngroups;
877	uint64_t ndisks = vdc->vdc_ndisks;
878
879	/*
880	 * groupstart is where the group this IO will land in "starts" in
881	 * the permutation array.
882	 */
883	uint64_t group = logical_offset / vdc->vdc_groupsz;
884	uint64_t groupstart = (group * groupwidth) % ndisks;
885	ASSERT3U(groupstart + groupwidth, <=, ndisks + groupstart);
886	*start = groupstart;
887
888	/* b_offset is the sector offset within a group chunk */
889	b_offset = b_offset % (rowheight_sectors * groupwidth);
890	ASSERT0(b_offset % groupwidth);
891
892	/*
893	 * Find the starting byte offset on each child vdev:
894	 * - within a permutation there are ngroups groups spread over the
895	 *   rows, where each row covers a slice portion of the disk
896	 * - each permutation has (groupwidth * ngroups) / ndisks rows
897	 * - so each permutation covers rows * slice portion of the disk
898	 * - so we need to find the row where this IO group target begins
899	 */
900	*perm = group / ngroups;
901	uint64_t row = (*perm * ((groupwidth * ngroups) / ndisks)) +
902	    (((group % ngroups) * groupwidth) / ndisks);
903
904	return (((rowheight_sectors * row) +
905	    (b_offset / groupwidth)) << ashift);
906}
907
908static uint64_t
909vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
910    uint64_t abd_offset, uint64_t abd_size)
911{
912	vdev_t *vd = zio->io_vd;
913	vdev_draid_config_t *vdc = vd->vdev_tsd;
914	uint64_t ashift = vd->vdev_top->vdev_ashift;
915	uint64_t io_size = abd_size;
916	uint64_t io_asize = vdev_draid_asize(vd, io_size);
917	uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
918	uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
919
920	/*
921	 * Limit the io_size to the space remaining in the group.  A second
922	 * row in the raidz_map_t is created for the remainder.
923	 */
924	if (io_offset + io_asize > start_offset) {
925		io_size = vdev_draid_asize_to_psize(vd,
926		    start_offset - io_offset);
927	}
928
929	/*
930	 * At most a block may span the logical end of one group and the start
931	 * of the next group. Therefore, at the end of a group the io_size must
932	 * span the group width evenly and the remainder must be aligned to the
933	 * start of the next group.
934	 */
935	IMPLY(abd_offset == 0 && io_size < zio->io_size,
936	    (io_asize >> ashift) % vdc->vdc_groupwidth == 0);
937	IMPLY(abd_offset != 0,
938	    vdev_draid_group_to_offset(vd, group) == io_offset);
939
940	/* Lookup starting byte offset on each child vdev */
941	uint64_t groupstart, perm;
942	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
943	    io_offset, &perm, &groupstart);
944
945	/*
946	 * If there is less than groupwidth drives available after the group
947	 * start, the group is going to wrap onto the next row. 'wrap' is the
948	 * group disk number that starts on the next row.
949	 */
950	uint64_t ndisks = vdc->vdc_ndisks;
951	uint64_t groupwidth = vdc->vdc_groupwidth;
952	uint64_t wrap = groupwidth;
953
954	if (groupstart + groupwidth > ndisks)
955		wrap = ndisks - groupstart;
956
957	/* The io size in units of the vdev's minimum sector size. */
958	const uint64_t psize = io_size >> ashift;
959
960	/*
961	 * "Quotient": The number of data sectors for this stripe on all but
962	 * the "big column" child vdevs that also contain "remainder" data.
963	 */
964	uint64_t q = psize / vdc->vdc_ndata;
965
966	/*
967	 * "Remainder": The number of partial stripe data sectors in this I/O.
968	 * This will add a sector to some, but not all, child vdevs.
969	 */
970	uint64_t r = psize - q * vdc->vdc_ndata;
971
972	/* The number of "big columns" - those which contain remainder data. */
973	uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
974	ASSERT3U(bc, <, groupwidth);
975
976	/* The total number of data and parity sectors for this I/O. */
977	uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
978
979	raidz_row_t *rr;
980	rr = kmem_alloc(offsetof(raidz_row_t, rr_col[groupwidth]), KM_SLEEP);
981	rr->rr_cols = groupwidth;
982	rr->rr_scols = groupwidth;
983	rr->rr_bigcols = bc;
984	rr->rr_missingdata = 0;
985	rr->rr_missingparity = 0;
986	rr->rr_firstdatacol = vdc->vdc_nparity;
987	rr->rr_abd_empty = NULL;
988#ifdef ZFS_DEBUG
989	rr->rr_offset = io_offset;
990	rr->rr_size = io_size;
991#endif
992	*rrp = rr;
993
994	uint8_t *base;
995	uint64_t iter, asize = 0;
996	vdev_draid_get_perm(vdc, perm, &base, &iter);
997	for (uint64_t i = 0; i < groupwidth; i++) {
998		raidz_col_t *rc = &rr->rr_col[i];
999		uint64_t c = (groupstart + i) % ndisks;
1000
1001		/* increment the offset if we wrap to the next row */
1002		if (i == wrap)
1003			physical_offset += VDEV_DRAID_ROWHEIGHT;
1004
1005		rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
1006		rc->rc_offset = physical_offset;
1007		rc->rc_abd = NULL;
1008		rc->rc_orig_data = NULL;
1009		rc->rc_error = 0;
1010		rc->rc_tried = 0;
1011		rc->rc_skipped = 0;
1012		rc->rc_force_repair = 0;
1013		rc->rc_allow_repair = 1;
1014		rc->rc_need_orig_restore = B_FALSE;
1015
1016		if (q == 0 && i >= bc)
1017			rc->rc_size = 0;
1018		else if (i < bc)
1019			rc->rc_size = (q + 1) << ashift;
1020		else
1021			rc->rc_size = q << ashift;
1022
1023		asize += rc->rc_size;
1024	}
1025
1026	ASSERT3U(asize, ==, tot << ashift);
1027	rr->rr_nempty = roundup(tot, groupwidth) - tot;
1028	IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
1029
1030	/* Allocate buffers for the parity columns */
1031	for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
1032		raidz_col_t *rc = &rr->rr_col[c];
1033		rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
1034	}
1035
1036	/*
1037	 * Map buffers for data columns and allocate/map buffers for skip
1038	 * sectors.  There are three distinct cases for dRAID which are
1039	 * required to support sequential rebuild.
1040	 */
1041	if (zio->io_type == ZIO_TYPE_WRITE) {
1042		vdev_draid_map_alloc_write(zio, abd_offset, rr);
1043	} else if ((rr->rr_nempty > 0) &&
1044	    (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1045		vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
1046	} else {
1047		ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1048		vdev_draid_map_alloc_read(zio, abd_offset, rr);
1049	}
1050
1051	return (io_size);
1052}
1053
1054/*
1055 * Allocate the raidz mapping to be applied to the dRAID I/O.  The parity
1056 * calculations for dRAID are identical to raidz however there are a few
1057 * differences in the layout.
1058 *
1059 * - dRAID always allocates a full stripe width. Any extra sectors due
1060 *   this padding are zero filled and written to disk. They will be read
1061 *   back during a scrub or repair operation since they are included in
1062 *   the parity calculation. This property enables sequential resilvering.
1063 *
1064 * - When the block at the logical offset spans redundancy groups then two
1065 *   rows are allocated in the raidz_map_t. One row resides at the end of
1066 *   the first group and the other at the start of the following group.
1067 */
1068static raidz_map_t *
1069vdev_draid_map_alloc(zio_t *zio)
1070{
1071	raidz_row_t *rr[2];
1072	uint64_t abd_offset = 0;
1073	uint64_t abd_size = zio->io_size;
1074	uint64_t io_offset = zio->io_offset;
1075	uint64_t size;
1076	int nrows = 1;
1077
1078	size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
1079	    abd_offset, abd_size);
1080	if (size < abd_size) {
1081		vdev_t *vd = zio->io_vd;
1082
1083		io_offset += vdev_draid_asize(vd, size);
1084		abd_offset += size;
1085		abd_size -= size;
1086		nrows++;
1087
1088		ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
1089		    vd, vdev_draid_offset_to_group(vd, io_offset)));
1090		ASSERT3U(abd_offset, <, zio->io_size);
1091		ASSERT3U(abd_size, !=, 0);
1092
1093		size = vdev_draid_map_alloc_row(zio, &rr[1],
1094		    io_offset, abd_offset, abd_size);
1095		VERIFY3U(size, ==, abd_size);
1096	}
1097
1098	raidz_map_t *rm;
1099	rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
1100	rm->rm_ops = vdev_raidz_math_get_ops();
1101	rm->rm_nrows = nrows;
1102	rm->rm_row[0] = rr[0];
1103	if (nrows == 2)
1104		rm->rm_row[1] = rr[1];
1105
1106	return (rm);
1107}
1108
1109/*
1110 * Given an offset into a dRAID return the next group width aligned offset
1111 * which can be used to start an allocation.
1112 */
1113static uint64_t
1114vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
1115{
1116	vdev_draid_config_t *vdc = vd->vdev_tsd;
1117
1118	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1119
1120	return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
1121}
1122
1123/*
1124 * Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
1125 * rounded down to the last full slice.  So each child must provide at least
1126 * 1 / (children - nspares) of its asize.
1127 */
1128static uint64_t
1129vdev_draid_min_asize(vdev_t *vd)
1130{
1131	vdev_draid_config_t *vdc = vd->vdev_tsd;
1132
1133	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1134
1135	return (VDEV_DRAID_REFLOW_RESERVE +
1136	    (vd->vdev_min_asize + vdc->vdc_ndisks - 1) / (vdc->vdc_ndisks));
1137}
1138
1139/*
1140 * When using dRAID the minimum allocation size is determined by the number
1141 * of data disks in the redundancy group.  Full stripes are always used.
1142 */
1143static uint64_t
1144vdev_draid_min_alloc(vdev_t *vd)
1145{
1146	vdev_draid_config_t *vdc = vd->vdev_tsd;
1147
1148	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1149
1150	return (vdc->vdc_ndata << vd->vdev_ashift);
1151}
1152
1153/*
1154 * Returns true if the txg range does not exist on any leaf vdev.
1155 *
1156 * A dRAID spare does not fit into the DTL model. While it has child vdevs
1157 * there is no redundancy among them, and the effective child vdev is
1158 * determined by offset. Essentially we do a vdev_dtl_reassess() on the
1159 * fly by replacing a dRAID spare with the child vdev under the offset.
1160 * Note that it is a recursive process because the child vdev can be
1161 * another dRAID spare and so on.
1162 */
1163boolean_t
1164vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1165    uint64_t size)
1166{
1167	if (vd->vdev_ops == &vdev_spare_ops ||
1168	    vd->vdev_ops == &vdev_replacing_ops) {
1169		/*
1170		 * Check all of the readable children, if any child
1171		 * contains the txg range the data it is not missing.
1172		 */
1173		for (int c = 0; c < vd->vdev_children; c++) {
1174			vdev_t *cvd = vd->vdev_child[c];
1175
1176			if (!vdev_readable(cvd))
1177				continue;
1178
1179			if (!vdev_draid_missing(cvd, physical_offset,
1180			    txg, size))
1181				return (B_FALSE);
1182		}
1183
1184		return (B_TRUE);
1185	}
1186
1187	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1188		/*
1189		 * When sequentially resilvering we don't have a proper
1190		 * txg range so instead we must presume all txgs are
1191		 * missing on this vdev until the resilver completes.
1192		 */
1193		if (vd->vdev_rebuild_txg != 0)
1194			return (B_TRUE);
1195
1196		/*
1197		 * DTL_MISSING is set for all prior txgs when a resilver
1198		 * is started in spa_vdev_attach().
1199		 */
1200		if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1201			return (B_TRUE);
1202
1203		/*
1204		 * Consult the DTL on the relevant vdev. Either a vdev
1205		 * leaf or spare/replace mirror child may be returned so
1206		 * we must recursively call vdev_draid_missing_impl().
1207		 */
1208		vd = vdev_draid_spare_get_child(vd, physical_offset);
1209		if (vd == NULL)
1210			return (B_TRUE);
1211
1212		return (vdev_draid_missing(vd, physical_offset,
1213		    txg, size));
1214	}
1215
1216	return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1217}
1218
1219/*
1220 * Returns true if the txg is only partially replicated on the leaf vdevs.
1221 */
1222static boolean_t
1223vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1224    uint64_t size)
1225{
1226	if (vd->vdev_ops == &vdev_spare_ops ||
1227	    vd->vdev_ops == &vdev_replacing_ops) {
1228		/*
1229		 * Check all of the readable children, if any child is
1230		 * missing the txg range then it is partially replicated.
1231		 */
1232		for (int c = 0; c < vd->vdev_children; c++) {
1233			vdev_t *cvd = vd->vdev_child[c];
1234
1235			if (!vdev_readable(cvd))
1236				continue;
1237
1238			if (vdev_draid_partial(cvd, physical_offset, txg, size))
1239				return (B_TRUE);
1240		}
1241
1242		return (B_FALSE);
1243	}
1244
1245	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1246		/*
1247		 * When sequentially resilvering we don't have a proper
1248		 * txg range so instead we must presume all txgs are
1249		 * missing on this vdev until the resilver completes.
1250		 */
1251		if (vd->vdev_rebuild_txg != 0)
1252			return (B_TRUE);
1253
1254		/*
1255		 * DTL_MISSING is set for all prior txgs when a resilver
1256		 * is started in spa_vdev_attach().
1257		 */
1258		if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1259			return (B_TRUE);
1260
1261		/*
1262		 * Consult the DTL on the relevant vdev. Either a vdev
1263		 * leaf or spare/replace mirror child may be returned so
1264		 * we must recursively call vdev_draid_missing_impl().
1265		 */
1266		vd = vdev_draid_spare_get_child(vd, physical_offset);
1267		if (vd == NULL)
1268			return (B_TRUE);
1269
1270		return (vdev_draid_partial(vd, physical_offset, txg, size));
1271	}
1272
1273	return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1274}
1275
1276/*
1277 * Determine if the vdev is readable at the given offset.
1278 */
1279boolean_t
1280vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
1281{
1282	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1283		vd = vdev_draid_spare_get_child(vd, physical_offset);
1284		if (vd == NULL)
1285			return (B_FALSE);
1286	}
1287
1288	if (vd->vdev_ops == &vdev_spare_ops ||
1289	    vd->vdev_ops == &vdev_replacing_ops) {
1290
1291		for (int c = 0; c < vd->vdev_children; c++) {
1292			vdev_t *cvd = vd->vdev_child[c];
1293
1294			if (!vdev_readable(cvd))
1295				continue;
1296
1297			if (vdev_draid_readable(cvd, physical_offset))
1298				return (B_TRUE);
1299		}
1300
1301		return (B_FALSE);
1302	}
1303
1304	return (vdev_readable(vd));
1305}
1306
1307/*
1308 * Returns the first distributed spare found under the provided vdev tree.
1309 */
1310static vdev_t *
1311vdev_draid_find_spare(vdev_t *vd)
1312{
1313	if (vd->vdev_ops == &vdev_draid_spare_ops)
1314		return (vd);
1315
1316	for (int c = 0; c < vd->vdev_children; c++) {
1317		vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
1318		if (svd != NULL)
1319			return (svd);
1320	}
1321
1322	return (NULL);
1323}
1324
1325/*
1326 * Returns B_TRUE if the passed in vdev is currently "faulted".
1327 * Faulted, in this context, means that the vdev represents a
1328 * replacing or sparing vdev tree.
1329 */
1330static boolean_t
1331vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
1332{
1333	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1334		vd = vdev_draid_spare_get_child(vd, physical_offset);
1335		if (vd == NULL)
1336			return (B_FALSE);
1337
1338		/*
1339		 * After resolving the distributed spare to a leaf vdev
1340		 * check the parent to determine if it's "faulted".
1341		 */
1342		vd = vd->vdev_parent;
1343	}
1344
1345	return (vd->vdev_ops == &vdev_replacing_ops ||
1346	    vd->vdev_ops == &vdev_spare_ops);
1347}
1348
1349/*
1350 * Determine if the dRAID block at the logical offset is degraded.
1351 * Used by sequential resilver.
1352 */
1353static boolean_t
1354vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
1355{
1356	vdev_draid_config_t *vdc = vd->vdev_tsd;
1357
1358	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1359	ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1360
1361	uint64_t groupstart, perm;
1362	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1363	    offset, &perm, &groupstart);
1364
1365	uint8_t *base;
1366	uint64_t iter;
1367	vdev_draid_get_perm(vdc, perm, &base, &iter);
1368
1369	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1370		uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
1371		uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1372		vdev_t *cvd = vd->vdev_child[cid];
1373
1374		/* Group contains a faulted vdev. */
1375		if (vdev_draid_faulted(cvd, physical_offset))
1376			return (B_TRUE);
1377
1378		/*
1379		 * Always check groups with active distributed spares
1380		 * because any vdev failure in the pool will affect them.
1381		 */
1382		if (vdev_draid_find_spare(cvd) != NULL)
1383			return (B_TRUE);
1384	}
1385
1386	return (B_FALSE);
1387}
1388
1389/*
1390 * Determine if the txg is missing.  Used by healing resilver.
1391 */
1392static boolean_t
1393vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
1394    uint64_t size)
1395{
1396	vdev_draid_config_t *vdc = vd->vdev_tsd;
1397
1398	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1399	ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1400
1401	uint64_t groupstart, perm;
1402	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1403	    offset, &perm, &groupstart);
1404
1405	uint8_t *base;
1406	uint64_t iter;
1407	vdev_draid_get_perm(vdc, perm, &base, &iter);
1408
1409	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1410		uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
1411		uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1412		vdev_t *cvd = vd->vdev_child[cid];
1413
1414		/* Transaction group is known to be partially replicated. */
1415		if (vdev_draid_partial(cvd, physical_offset, txg, size))
1416			return (B_TRUE);
1417
1418		/*
1419		 * Always check groups with active distributed spares
1420		 * because any vdev failure in the pool will affect them.
1421		 */
1422		if (vdev_draid_find_spare(cvd) != NULL)
1423			return (B_TRUE);
1424	}
1425
1426	return (B_FALSE);
1427}
1428
1429/*
1430 * Find the smallest child asize and largest sector size to calculate the
1431 * available capacity.  Distributed spares are ignored since their capacity
1432 * is also based of the minimum child size in the top-level dRAID.
1433 */
1434static void
1435vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
1436    uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
1437{
1438	uint64_t logical_ashift = 0, physical_ashift = 0;
1439	uint64_t asize = 0, max_asize = 0;
1440
1441	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1442
1443	for (int c = 0; c < vd->vdev_children; c++) {
1444		vdev_t *cvd = vd->vdev_child[c];
1445
1446		if (cvd->vdev_ops == &vdev_draid_spare_ops)
1447			continue;
1448
1449		asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
1450		max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
1451		logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
1452		physical_ashift = MAX(physical_ashift,
1453		    cvd->vdev_physical_ashift);
1454	}
1455
1456	*asizep = asize;
1457	*max_asizep = max_asize;
1458	*logical_ashiftp = logical_ashift;
1459	*physical_ashiftp = physical_ashift;
1460}
1461
1462/*
1463 * Open spare vdevs.
1464 */
1465static boolean_t
1466vdev_draid_open_spares(vdev_t *vd)
1467{
1468	return (vd->vdev_ops == &vdev_draid_spare_ops ||
1469	    vd->vdev_ops == &vdev_replacing_ops ||
1470	    vd->vdev_ops == &vdev_spare_ops);
1471}
1472
1473/*
1474 * Open all children, excluding spares.
1475 */
1476static boolean_t
1477vdev_draid_open_children(vdev_t *vd)
1478{
1479	return (!vdev_draid_open_spares(vd));
1480}
1481
1482/*
1483 * Open a top-level dRAID vdev.
1484 */
1485static int
1486vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
1487    uint64_t *logical_ashift, uint64_t *physical_ashift)
1488{
1489	vdev_draid_config_t *vdc =  vd->vdev_tsd;
1490	uint64_t nparity = vdc->vdc_nparity;
1491	int open_errors = 0;
1492
1493	if (nparity > VDEV_DRAID_MAXPARITY ||
1494	    vd->vdev_children < nparity + 1) {
1495		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
1496		return (SET_ERROR(EINVAL));
1497	}
1498
1499	/*
1500	 * First open the normal children then the distributed spares.  This
1501	 * ordering is important to ensure the distributed spares calculate
1502	 * the correct psize in the event that the dRAID vdevs were expanded.
1503	 */
1504	vdev_open_children_subset(vd, vdev_draid_open_children);
1505	vdev_open_children_subset(vd, vdev_draid_open_spares);
1506
1507	/* Verify enough of the children are available to continue. */
1508	for (int c = 0; c < vd->vdev_children; c++) {
1509		if (vd->vdev_child[c]->vdev_open_error != 0) {
1510			if ((++open_errors) > nparity) {
1511				vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
1512				return (SET_ERROR(ENXIO));
1513			}
1514		}
1515	}
1516
1517	/*
1518	 * Allocatable capacity is the sum of the space on all children less
1519	 * the number of distributed spares rounded down to last full row
1520	 * and then to the last full group. An additional 32MB of scratch
1521	 * space is reserved at the end of each child for use by the dRAID
1522	 * expansion feature.
1523	 */
1524	uint64_t child_asize, child_max_asize;
1525	vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
1526	    logical_ashift, physical_ashift);
1527
1528	/*
1529	 * Should be unreachable since the minimum child size is 64MB, but
1530	 * we want to make sure an underflow absolutely cannot occur here.
1531	 */
1532	if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
1533	    child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
1534		return (SET_ERROR(ENXIO));
1535	}
1536
1537	child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
1538	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1539	child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
1540	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1541
1542	*asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1543	    vdc->vdc_groupsz);
1544	*max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1545	    vdc->vdc_groupsz);
1546
1547	return (0);
1548}
1549
1550/*
1551 * Close a top-level dRAID vdev.
1552 */
1553static void
1554vdev_draid_close(vdev_t *vd)
1555{
1556	for (int c = 0; c < vd->vdev_children; c++) {
1557		if (vd->vdev_child[c] != NULL)
1558			vdev_close(vd->vdev_child[c]);
1559	}
1560}
1561
1562/*
1563 * Return the maximum asize for a rebuild zio in the provided range
1564 * given the following constraints.  A dRAID chunks may not:
1565 *
1566 * - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
1567 * - Span dRAID redundancy groups.
1568 */
1569static uint64_t
1570vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
1571    uint64_t max_segment)
1572{
1573	vdev_draid_config_t *vdc = vd->vdev_tsd;
1574
1575	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1576
1577	uint64_t ashift = vd->vdev_ashift;
1578	uint64_t ndata = vdc->vdc_ndata;
1579	uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
1580	    SPA_MAXBLOCKSIZE);
1581
1582	ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
1583	ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
1584
1585	/* Chunks must evenly span all data columns in the group. */
1586	psize = (((psize >> ashift) / ndata) * ndata) << ashift;
1587	uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
1588
1589	/* Reduce the chunk size to the group space remaining. */
1590	uint64_t group = vdev_draid_offset_to_group(vd, start);
1591	uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
1592	chunk_size = MIN(chunk_size, left);
1593
1594	ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
1595	ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
1596	    vdev_draid_offset_to_group(vd, start + chunk_size - 1));
1597
1598	return (chunk_size);
1599}
1600
1601/*
1602 * Align the start of the metaslab to the group width and slightly reduce
1603 * its size to a multiple of the group width.  Since full stripe writes are
1604 * required by dRAID this space is unallocable.  Furthermore, aligning the
1605 * metaslab start is important for vdev initialize and TRIM which both operate
1606 * on metaslab boundaries which vdev_xlate() expects to be aligned.
1607 */
1608static void
1609vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
1610{
1611	vdev_draid_config_t *vdc = vd->vdev_tsd;
1612
1613	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1614
1615	uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
1616	uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
1617	uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
1618
1619	*ms_start = astart;
1620	*ms_size = asize;
1621
1622	ASSERT0(*ms_start % sz);
1623	ASSERT0(*ms_size % sz);
1624}
1625
1626/*
1627 * Add virtual dRAID spares to the list of valid spares. In order to accomplish
1628 * this the existing array must be freed and reallocated with the additional
1629 * entries.
1630 */
1631int
1632vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
1633    uint64_t next_vdev_id)
1634{
1635	uint64_t draid_nspares = 0;
1636	uint64_t ndraid = 0;
1637	int error;
1638
1639	for (uint64_t i = 0; i < vd->vdev_children; i++) {
1640		vdev_t *cvd = vd->vdev_child[i];
1641
1642		if (cvd->vdev_ops == &vdev_draid_ops) {
1643			vdev_draid_config_t *vdc = cvd->vdev_tsd;
1644			draid_nspares += vdc->vdc_nspares;
1645			ndraid++;
1646		}
1647	}
1648
1649	if (draid_nspares == 0) {
1650		*ndraidp = ndraid;
1651		return (0);
1652	}
1653
1654	nvlist_t **old_spares, **new_spares;
1655	uint_t old_nspares;
1656	error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1657	    &old_spares, &old_nspares);
1658	if (error)
1659		old_nspares = 0;
1660
1661	/* Allocate memory and copy of the existing spares. */
1662	new_spares = kmem_alloc(sizeof (nvlist_t *) *
1663	    (draid_nspares + old_nspares), KM_SLEEP);
1664	for (uint_t i = 0; i < old_nspares; i++)
1665		new_spares[i] = fnvlist_dup(old_spares[i]);
1666
1667	/* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
1668	uint64_t n = old_nspares;
1669	for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
1670		vdev_t *cvd = vd->vdev_child[vdev_id];
1671		char path[64];
1672
1673		if (cvd->vdev_ops != &vdev_draid_ops)
1674			continue;
1675
1676		vdev_draid_config_t *vdc = cvd->vdev_tsd;
1677		uint64_t nspares = vdc->vdc_nspares;
1678		uint64_t nparity = vdc->vdc_nparity;
1679
1680		for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
1681			bzero(path, sizeof (path));
1682			(void) snprintf(path, sizeof (path) - 1,
1683			    "%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
1684			    (u_longlong_t)nparity,
1685			    (u_longlong_t)next_vdev_id + vdev_id,
1686			    (u_longlong_t)spare_id);
1687
1688			nvlist_t *spare = fnvlist_alloc();
1689			fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
1690			fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
1691			    VDEV_TYPE_DRAID_SPARE);
1692			fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
1693			    cvd->vdev_guid);
1694			fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
1695			    spare_id);
1696			fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
1697			fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
1698			fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
1699			fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
1700			    cvd->vdev_ashift);
1701
1702			new_spares[n] = spare;
1703			n++;
1704		}
1705	}
1706
1707	if (n > 0) {
1708		(void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
1709		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1710		    new_spares, n);
1711	}
1712
1713	for (int i = 0; i < n; i++)
1714		nvlist_free(new_spares[i]);
1715
1716	kmem_free(new_spares, sizeof (*new_spares) * n);
1717	*ndraidp = ndraid;
1718
1719	return (0);
1720}
1721
1722/*
1723 * Determine if any portion of the provided block resides on a child vdev
1724 * with a dirty DTL and therefore needs to be resilvered.
1725 */
1726static boolean_t
1727vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
1728    uint64_t phys_birth)
1729{
1730	uint64_t offset = DVA_GET_OFFSET(dva);
1731	uint64_t asize = vdev_draid_asize(vd, psize);
1732
1733	if (phys_birth == TXG_UNKNOWN) {
1734		/*
1735		 * Sequential resilver.  There is no meaningful phys_birth
1736		 * for this block, we can only determine if block resides
1737		 * in a degraded group in which case it must be resilvered.
1738		 */
1739		ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
1740		    vdev_draid_offset_to_group(vd, offset + asize - 1));
1741
1742		return (vdev_draid_group_degraded(vd, offset));
1743	} else {
1744		/*
1745		 * Healing resilver.  TXGs not in DTL_PARTIAL are intact,
1746		 * as are blocks in non-degraded groups.
1747		 */
1748		if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
1749			return (B_FALSE);
1750
1751		if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
1752			return (B_TRUE);
1753
1754		/* The block may span groups in which case check both. */
1755		if (vdev_draid_offset_to_group(vd, offset) !=
1756		    vdev_draid_offset_to_group(vd, offset + asize - 1)) {
1757			if (vdev_draid_group_missing(vd,
1758			    offset + asize, phys_birth, 1))
1759				return (B_TRUE);
1760		}
1761
1762		return (B_FALSE);
1763	}
1764}
1765
1766static boolean_t
1767vdev_draid_rebuilding(vdev_t *vd)
1768{
1769	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
1770		return (B_TRUE);
1771
1772	for (int i = 0; i < vd->vdev_children; i++) {
1773		if (vdev_draid_rebuilding(vd->vdev_child[i])) {
1774			return (B_TRUE);
1775		}
1776	}
1777
1778	return (B_FALSE);
1779}
1780
1781static void
1782vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
1783{
1784#ifdef ZFS_DEBUG
1785	range_seg64_t logical_rs, physical_rs, remain_rs;
1786	logical_rs.rs_start = rr->rr_offset;
1787	logical_rs.rs_end = logical_rs.rs_start +
1788	    vdev_draid_asize(vd, rr->rr_size);
1789
1790	raidz_col_t *rc = &rr->rr_col[col];
1791	vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
1792
1793	vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
1794	ASSERT(vdev_xlate_is_empty(&remain_rs));
1795	ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
1796	ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
1797	ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
1798#endif
1799}
1800
1801/*
1802 * For write operations:
1803 * 1. Generate the parity data
1804 * 2. Create child zio write operations to each column's vdev, for both
1805 *    data and parity.  A gang ABD is allocated by vdev_draid_map_alloc()
1806 *    if a skip sector needs to be added to a column.
1807 */
1808static void
1809vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
1810{
1811	vdev_t *vd = zio->io_vd;
1812	raidz_map_t *rm = zio->io_vsd;
1813
1814	vdev_raidz_generate_parity_row(rm, rr);
1815
1816	for (int c = 0; c < rr->rr_cols; c++) {
1817		raidz_col_t *rc = &rr->rr_col[c];
1818
1819		/*
1820		 * Empty columns are zero filled and included in the parity
1821		 * calculation and therefore must be written.
1822		 */
1823		ASSERT3U(rc->rc_size, !=, 0);
1824
1825		/* Verify physical to logical translation */
1826		vdev_draid_io_verify(vd, rr, c);
1827
1828		zio_nowait(zio_vdev_child_io(zio, NULL,
1829		    vd->vdev_child[rc->rc_devidx], rc->rc_offset,
1830		    rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
1831		    0, vdev_raidz_child_done, rc));
1832	}
1833}
1834
1835/*
1836 * For read operations:
1837 * 1. The vdev_draid_map_alloc() function will create a minimal raidz
1838 *    mapping for the read based on the zio->io_flags.  There are two
1839 *    possible mappings either 1) a normal read, or 2) a scrub/resilver.
1840 * 2. Create the zio read operations.  This will include all parity
1841 *    columns and skip sectors for a scrub/resilver.
1842 */
1843static void
1844vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
1845{
1846	vdev_t *vd = zio->io_vd;
1847
1848	/* Sequential rebuild must do IO at redundancy group boundary. */
1849	IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
1850
1851	/*
1852	 * Iterate over the columns in reverse order so that we hit the parity
1853	 * last.  Any errors along the way will force us to read the parity.
1854	 * For scrub/resilver IOs which verify skip sectors, a gang ABD will
1855	 * have been allocated to store them and rc->rc_size is increased.
1856	 */
1857	for (int c = rr->rr_cols - 1; c >= 0; c--) {
1858		raidz_col_t *rc = &rr->rr_col[c];
1859		vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
1860
1861		if (!vdev_draid_readable(cvd, rc->rc_offset)) {
1862			if (c >= rr->rr_firstdatacol)
1863				rr->rr_missingdata++;
1864			else
1865				rr->rr_missingparity++;
1866			rc->rc_error = SET_ERROR(ENXIO);
1867			rc->rc_tried = 1;
1868			rc->rc_skipped = 1;
1869			continue;
1870		}
1871
1872		if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
1873			if (c >= rr->rr_firstdatacol)
1874				rr->rr_missingdata++;
1875			else
1876				rr->rr_missingparity++;
1877			rc->rc_error = SET_ERROR(ESTALE);
1878			rc->rc_skipped = 1;
1879			continue;
1880		}
1881
1882		/*
1883		 * Empty columns may be read during vdev_draid_io_done().
1884		 * Only skip them after the readable and missing checks
1885		 * verify they are available.
1886		 */
1887		if (rc->rc_size == 0) {
1888			rc->rc_skipped = 1;
1889			continue;
1890		}
1891
1892		if (zio->io_flags & ZIO_FLAG_RESILVER) {
1893			vdev_t *svd;
1894
1895			/*
1896			 * Sequential rebuilds need to always consider the data
1897			 * on the child being rebuilt to be stale.  This is
1898			 * important when all columns are available to aid
1899			 * known reconstruction in identifing which columns
1900			 * contain incorrect data.
1901			 *
1902			 * Furthermore, all repairs need to be constrained to
1903			 * the devices being rebuilt because without a checksum
1904			 * we cannot verify the data is actually correct and
1905			 * performing an incorrect repair could result in
1906			 * locking in damage and making the data unrecoverable.
1907			 */
1908			if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
1909				if (vdev_draid_rebuilding(cvd)) {
1910					if (c >= rr->rr_firstdatacol)
1911						rr->rr_missingdata++;
1912					else
1913						rr->rr_missingparity++;
1914					rc->rc_error = SET_ERROR(ESTALE);
1915					rc->rc_skipped = 1;
1916					rc->rc_allow_repair = 1;
1917					continue;
1918				} else {
1919					rc->rc_allow_repair = 0;
1920				}
1921			} else {
1922				rc->rc_allow_repair = 1;
1923			}
1924
1925			/*
1926			 * If this child is a distributed spare then the
1927			 * offset might reside on the vdev being replaced.
1928			 * In which case this data must be written to the
1929			 * new device.  Failure to do so would result in
1930			 * checksum errors when the old device is detached
1931			 * and the pool is scrubbed.
1932			 */
1933			if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
1934				svd = vdev_draid_spare_get_child(svd,
1935				    rc->rc_offset);
1936				if (svd && (svd->vdev_ops == &vdev_spare_ops ||
1937				    svd->vdev_ops == &vdev_replacing_ops)) {
1938					rc->rc_force_repair = 1;
1939
1940					if (vdev_draid_rebuilding(svd))
1941						rc->rc_allow_repair = 1;
1942				}
1943			}
1944
1945			/*
1946			 * Always issue a repair IO to this child when its
1947			 * a spare or replacing vdev with an active rebuild.
1948			 */
1949			if ((cvd->vdev_ops == &vdev_spare_ops ||
1950			    cvd->vdev_ops == &vdev_replacing_ops) &&
1951			    vdev_draid_rebuilding(cvd)) {
1952				rc->rc_force_repair = 1;
1953				rc->rc_allow_repair = 1;
1954			}
1955		}
1956	}
1957
1958	/*
1959	 * Either a parity or data column is missing this means a repair
1960	 * may be attempted by vdev_draid_io_done().  Expand the raid map
1961	 * to read in empty columns which are needed along with the parity
1962	 * during reconstruction.
1963	 */
1964	if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
1965	    rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
1966		vdev_draid_map_alloc_empty(zio, rr);
1967	}
1968
1969	for (int c = rr->rr_cols - 1; c >= 0; c--) {
1970		raidz_col_t *rc = &rr->rr_col[c];
1971		vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
1972
1973		if (rc->rc_error || rc->rc_size == 0)
1974			continue;
1975
1976		if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
1977		    (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1978			zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1979			    rc->rc_offset, rc->rc_abd, rc->rc_size,
1980			    zio->io_type, zio->io_priority, 0,
1981			    vdev_raidz_child_done, rc));
1982		}
1983	}
1984}
1985
1986/*
1987 * Start an IO operation to a dRAID vdev.
1988 */
1989static void
1990vdev_draid_io_start(zio_t *zio)
1991{
1992	vdev_t *vd __maybe_unused = zio->io_vd;
1993
1994	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1995	ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
1996
1997	raidz_map_t *rm = vdev_draid_map_alloc(zio);
1998	zio->io_vsd = rm;
1999	zio->io_vsd_ops = &vdev_raidz_vsd_ops;
2000
2001	if (zio->io_type == ZIO_TYPE_WRITE) {
2002		for (int i = 0; i < rm->rm_nrows; i++) {
2003			vdev_draid_io_start_write(zio, rm->rm_row[i]);
2004		}
2005	} else {
2006		ASSERT(zio->io_type == ZIO_TYPE_READ);
2007
2008		for (int i = 0; i < rm->rm_nrows; i++) {
2009			vdev_draid_io_start_read(zio, rm->rm_row[i]);
2010		}
2011	}
2012
2013	zio_execute(zio);
2014}
2015
2016/*
2017 * Complete an IO operation on a dRAID vdev.  The raidz logic can be applied
2018 * to dRAID since the layout is fully described by the raidz_map_t.
2019 */
2020static void
2021vdev_draid_io_done(zio_t *zio)
2022{
2023	vdev_raidz_io_done(zio);
2024}
2025
2026static void
2027vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
2028{
2029	vdev_draid_config_t *vdc = vd->vdev_tsd;
2030	ASSERT(vd->vdev_ops == &vdev_draid_ops);
2031
2032	if (faulted > vdc->vdc_nparity)
2033		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2034		    VDEV_AUX_NO_REPLICAS);
2035	else if (degraded + faulted != 0)
2036		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
2037	else
2038		vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
2039}
2040
2041static void
2042vdev_draid_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
2043    range_seg64_t *physical_rs, range_seg64_t *remain_rs)
2044{
2045	vdev_t *raidvd = cvd->vdev_parent;
2046	ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
2047
2048	vdev_draid_config_t *vdc = raidvd->vdev_tsd;
2049	uint64_t ashift = raidvd->vdev_top->vdev_ashift;
2050
2051	/* Make sure the offsets are block-aligned */
2052	ASSERT0(logical_rs->rs_start % (1 << ashift));
2053	ASSERT0(logical_rs->rs_end % (1 << ashift));
2054
2055	uint64_t logical_start = logical_rs->rs_start;
2056	uint64_t logical_end = logical_rs->rs_end;
2057
2058	/*
2059	 * Unaligned ranges must be skipped. All metaslabs are correctly
2060	 * aligned so this should not happen, but this case is handled in
2061	 * case it's needed by future callers.
2062	 */
2063	uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
2064	if (astart != logical_start) {
2065		physical_rs->rs_start = logical_start;
2066		physical_rs->rs_end = logical_start;
2067		remain_rs->rs_start = MIN(astart, logical_end);
2068		remain_rs->rs_end = logical_end;
2069		return;
2070	}
2071
2072	/*
2073	 * Unlike with mirrors and raidz a dRAID logical range can map
2074	 * to multiple non-contiguous physical ranges. This is handled by
2075	 * limiting the size of the logical range to a single group and
2076	 * setting the remain argument such that it describes the remaining
2077	 * unmapped logical range. This is stricter than absolutely
2078	 * necessary but helps simplify the logic below.
2079	 */
2080	uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
2081	uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
2082	if (logical_end > nextstart)
2083		logical_end = nextstart;
2084
2085	/* Find the starting offset for each vdev in the group */
2086	uint64_t perm, groupstart;
2087	uint64_t start = vdev_draid_logical_to_physical(raidvd,
2088	    logical_start, &perm, &groupstart);
2089	uint64_t end = start;
2090
2091	uint8_t *base;
2092	uint64_t iter, id;
2093	vdev_draid_get_perm(vdc, perm, &base, &iter);
2094
2095	/*
2096	 * Check if the passed child falls within the group.  If it does
2097	 * update the start and end to reflect the physical range.
2098	 * Otherwise, leave them unmodified which will result in an empty
2099	 * (zero-length) physical range being returned.
2100	 */
2101	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
2102		uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
2103
2104		if (c == 0 && i != 0) {
2105			/* the group wrapped, increment the start */
2106			start += VDEV_DRAID_ROWHEIGHT;
2107			end = start;
2108		}
2109
2110		id = vdev_draid_permute_id(vdc, base, iter, c);
2111		if (id == cvd->vdev_id) {
2112			uint64_t b_size = (logical_end >> ashift) -
2113			    (logical_start >> ashift);
2114			ASSERT3U(b_size, >, 0);
2115			end = start + ((((b_size - 1) /
2116			    vdc->vdc_groupwidth) + 1) << ashift);
2117			break;
2118		}
2119	}
2120	physical_rs->rs_start = start;
2121	physical_rs->rs_end = end;
2122
2123	/*
2124	 * Only top-level vdevs are allowed to set remain_rs because
2125	 * when .vdev_op_xlate() is called for their children the full
2126	 * logical range is not provided by vdev_xlate().
2127	 */
2128	remain_rs->rs_start = logical_end;
2129	remain_rs->rs_end = logical_rs->rs_end;
2130
2131	ASSERT3U(physical_rs->rs_start, <=, logical_start);
2132	ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
2133	    logical_end - logical_start);
2134}
2135
2136/*
2137 * Add dRAID specific fields to the config nvlist.
2138 */
2139static void
2140vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
2141{
2142	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2143	vdev_draid_config_t *vdc = vd->vdev_tsd;
2144
2145	fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
2146	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
2147	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
2148	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
2149}
2150
2151/*
2152 * Initialize private dRAID specific fields from the nvlist.
2153 */
2154static int
2155vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
2156{
2157	uint64_t ndata, nparity, nspares, ngroups;
2158	int error;
2159
2160	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
2161		return (SET_ERROR(EINVAL));
2162
2163	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
2164	    nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
2165		return (SET_ERROR(EINVAL));
2166	}
2167
2168	uint_t children;
2169	nvlist_t **child;
2170	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2171	    &child, &children) != 0 || children == 0 ||
2172	    children > VDEV_DRAID_MAX_CHILDREN) {
2173		return (SET_ERROR(EINVAL));
2174	}
2175
2176	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
2177	    nspares > 100 || nspares > (children - (ndata + nparity))) {
2178		return (SET_ERROR(EINVAL));
2179	}
2180
2181	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
2182	    ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
2183		return (SET_ERROR(EINVAL));
2184	}
2185
2186	/*
2187	 * Validate the minimum number of children exist per group for the
2188	 * specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
2189	 */
2190	if (children < (ndata + nparity + nspares))
2191		return (SET_ERROR(EINVAL));
2192
2193	/*
2194	 * Create the dRAID configuration using the pool nvlist configuration
2195	 * and the fixed mapping for the correct number of children.
2196	 */
2197	vdev_draid_config_t *vdc;
2198	const draid_map_t *map;
2199
2200	error = vdev_draid_lookup_map(children, &map);
2201	if (error)
2202		return (SET_ERROR(EINVAL));
2203
2204	vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
2205	vdc->vdc_ndata = ndata;
2206	vdc->vdc_nparity = nparity;
2207	vdc->vdc_nspares = nspares;
2208	vdc->vdc_children = children;
2209	vdc->vdc_ngroups = ngroups;
2210	vdc->vdc_nperms = map->dm_nperms;
2211
2212	error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
2213	if (error) {
2214		kmem_free(vdc, sizeof (*vdc));
2215		return (SET_ERROR(EINVAL));
2216	}
2217
2218	/*
2219	 * Derived constants.
2220	 */
2221	vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
2222	vdc->vdc_ndisks = vdc->vdc_children - vdc->vdc_nspares;
2223	vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
2224	vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
2225	    vdc->vdc_ndisks;
2226
2227	ASSERT3U(vdc->vdc_groupwidth, >=, 2);
2228	ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
2229	ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
2230	ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
2231	ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
2232	ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
2233	    vdc->vdc_ndisks, ==, 0);
2234
2235	*tsd = vdc;
2236
2237	return (0);
2238}
2239
2240static void
2241vdev_draid_fini(vdev_t *vd)
2242{
2243	vdev_draid_config_t *vdc = vd->vdev_tsd;
2244
2245	vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
2246	    vdc->vdc_children * vdc->vdc_nperms);
2247	kmem_free(vdc, sizeof (*vdc));
2248}
2249
2250static uint64_t
2251vdev_draid_nparity(vdev_t *vd)
2252{
2253	vdev_draid_config_t *vdc = vd->vdev_tsd;
2254
2255	return (vdc->vdc_nparity);
2256}
2257
2258static uint64_t
2259vdev_draid_ndisks(vdev_t *vd)
2260{
2261	vdev_draid_config_t *vdc = vd->vdev_tsd;
2262
2263	return (vdc->vdc_ndisks);
2264}
2265
2266vdev_ops_t vdev_draid_ops = {
2267	.vdev_op_init = vdev_draid_init,
2268	.vdev_op_fini = vdev_draid_fini,
2269	.vdev_op_open = vdev_draid_open,
2270	.vdev_op_close = vdev_draid_close,
2271	.vdev_op_asize = vdev_draid_asize,
2272	.vdev_op_min_asize = vdev_draid_min_asize,
2273	.vdev_op_min_alloc = vdev_draid_min_alloc,
2274	.vdev_op_io_start = vdev_draid_io_start,
2275	.vdev_op_io_done = vdev_draid_io_done,
2276	.vdev_op_state_change = vdev_draid_state_change,
2277	.vdev_op_need_resilver = vdev_draid_need_resilver,
2278	.vdev_op_hold = NULL,
2279	.vdev_op_rele = NULL,
2280	.vdev_op_remap = NULL,
2281	.vdev_op_xlate = vdev_draid_xlate,
2282	.vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
2283	.vdev_op_metaslab_init = vdev_draid_metaslab_init,
2284	.vdev_op_config_generate = vdev_draid_config_generate,
2285	.vdev_op_nparity = vdev_draid_nparity,
2286	.vdev_op_ndisks = vdev_draid_ndisks,
2287	.vdev_op_type = VDEV_TYPE_DRAID,
2288	.vdev_op_leaf = B_FALSE,
2289};
2290
2291
2292/*
2293 * A dRAID distributed spare is a virtual leaf vdev which is included in the
2294 * parent dRAID configuration.  The last N columns of the dRAID permutation
2295 * table are used to determine on which dRAID children a specific offset
2296 * should be written.  These spare leaf vdevs can only be used to replace
2297 * faulted children in the same dRAID configuration.
2298 */
2299
2300/*
2301 * Distributed spare state.  All fields are set when the distributed spare is
2302 * first opened and are immutable.
2303 */
2304typedef struct {
2305	vdev_t *vds_draid_vdev;		/* top-level parent dRAID vdev */
2306	uint64_t vds_top_guid;		/* top-level parent dRAID guid */
2307	uint64_t vds_spare_id;		/* spare id (0 - vdc->vdc_nspares-1) */
2308} vdev_draid_spare_t;
2309
2310/*
2311 * Returns the parent dRAID vdev to which the distributed spare belongs.
2312 * This may be safely called even when the vdev is not open.
2313 */
2314vdev_t *
2315vdev_draid_spare_get_parent(vdev_t *vd)
2316{
2317	vdev_draid_spare_t *vds = vd->vdev_tsd;
2318
2319	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2320
2321	if (vds->vds_draid_vdev != NULL)
2322		return (vds->vds_draid_vdev);
2323
2324	return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
2325	    vds->vds_top_guid));
2326}
2327
2328/*
2329 * A dRAID space is active when it's the child of a vdev using the
2330 * vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
2331 */
2332static boolean_t
2333vdev_draid_spare_is_active(vdev_t *vd)
2334{
2335	vdev_t *pvd = vd->vdev_parent;
2336
2337	if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
2338	    pvd->vdev_ops == &vdev_replacing_ops ||
2339	    pvd->vdev_ops == &vdev_draid_ops)) {
2340		return (B_TRUE);
2341	} else {
2342		return (B_FALSE);
2343	}
2344}
2345
2346/*
2347 * Given a dRAID distribute spare vdev, returns the physical child vdev
2348 * on which the provided offset resides.  This may involve recursing through
2349 * multiple layers of distributed spares.  Note that offset is relative to
2350 * this vdev.
2351 */
2352vdev_t *
2353vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
2354{
2355	vdev_draid_spare_t *vds = vd->vdev_tsd;
2356
2357	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2358
2359	/* The vdev is closed */
2360	if (vds->vds_draid_vdev == NULL)
2361		return (NULL);
2362
2363	vdev_t *tvd = vds->vds_draid_vdev;
2364	vdev_draid_config_t *vdc = tvd->vdev_tsd;
2365
2366	ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
2367	ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
2368
2369	uint8_t *base;
2370	uint64_t iter;
2371	uint64_t perm = physical_offset / vdc->vdc_devslicesz;
2372
2373	vdev_draid_get_perm(vdc, perm, &base, &iter);
2374
2375	uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
2376	    (tvd->vdev_children - 1) - vds->vds_spare_id);
2377	vdev_t *cvd = tvd->vdev_child[cid];
2378
2379	if (cvd->vdev_ops == &vdev_draid_spare_ops)
2380		return (vdev_draid_spare_get_child(cvd, physical_offset));
2381
2382	return (cvd);
2383}
2384
2385/* ARGSUSED */
2386static void
2387vdev_draid_spare_close(vdev_t *vd)
2388{
2389	vdev_draid_spare_t *vds = vd->vdev_tsd;
2390	vds->vds_draid_vdev = NULL;
2391}
2392
2393/*
2394 * Opening a dRAID spare device is done by looking up the associated dRAID
2395 * top-level vdev guid from the spare configuration.
2396 */
2397static int
2398vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
2399    uint64_t *logical_ashift, uint64_t *physical_ashift)
2400{
2401	vdev_draid_spare_t *vds = vd->vdev_tsd;
2402	vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2403	uint64_t asize, max_asize;
2404
2405	vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
2406	if (tvd == NULL) {
2407		/*
2408		 * When spa_vdev_add() is labeling new spares the
2409		 * associated dRAID is not attached to the root vdev
2410		 * nor does this spare have a parent.  Simulate a valid
2411		 * device in order to allow the label to be initialized
2412		 * and the distributed spare added to the configuration.
2413		 */
2414		if (vd->vdev_parent == NULL) {
2415			*psize = *max_psize = SPA_MINDEVSIZE;
2416			*logical_ashift = *physical_ashift = ASHIFT_MIN;
2417			return (0);
2418		}
2419
2420		return (SET_ERROR(EINVAL));
2421	}
2422
2423	vdev_draid_config_t *vdc = tvd->vdev_tsd;
2424	if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
2425		return (SET_ERROR(EINVAL));
2426
2427	if (vds->vds_spare_id >= vdc->vdc_nspares)
2428		return (SET_ERROR(EINVAL));
2429
2430	/*
2431	 * Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
2432	 * because the caller may be vdev_draid_open() in which case the
2433	 * values are stale as they haven't yet been updated by vdev_open().
2434	 * To avoid this always recalculate the dRAID asize and max_asize.
2435	 */
2436	vdev_draid_calculate_asize(tvd, &asize, &max_asize,
2437	    logical_ashift, physical_ashift);
2438
2439	*psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2440	*max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2441
2442	vds->vds_draid_vdev = tvd;
2443
2444	return (0);
2445}
2446
2447/*
2448 * Completed distributed spare IO.  Store the result in the parent zio
2449 * as if it had performed the operation itself.  Only the first error is
2450 * preserved if there are multiple errors.
2451 */
2452static void
2453vdev_draid_spare_child_done(zio_t *zio)
2454{
2455	zio_t *pio = zio->io_private;
2456
2457	/*
2458	 * IOs are issued to non-writable vdevs in order to keep their
2459	 * DTLs accurate.  However, we don't want to propagate the
2460	 * error in to the distributed spare's DTL.  When resilvering
2461	 * vdev_draid_need_resilver() will consult the relevant DTL
2462	 * to determine if the data is missing and must be repaired.
2463	 */
2464	if (!vdev_writeable(zio->io_vd))
2465		return;
2466
2467	if (pio->io_error == 0)
2468		pio->io_error = zio->io_error;
2469}
2470
2471/*
2472 * Returns a valid label nvlist for the distributed spare vdev.  This is
2473 * used to bypass the IO pipeline to avoid the complexity of constructing
2474 * a complete label with valid checksum to return when read.
2475 */
2476nvlist_t *
2477vdev_draid_read_config_spare(vdev_t *vd)
2478{
2479	spa_t *spa = vd->vdev_spa;
2480	spa_aux_vdev_t *sav = &spa->spa_spares;
2481	uint64_t guid = vd->vdev_guid;
2482
2483	nvlist_t *nv = fnvlist_alloc();
2484	fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
2485	fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
2486	fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
2487	fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
2488	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
2489	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
2490	fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
2491	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
2492	    vdev_draid_spare_is_active(vd) ?
2493	    POOL_STATE_ACTIVE : POOL_STATE_SPARE);
2494
2495	/* Set the vdev guid based on the vdev list in sav_count. */
2496	for (int i = 0; i < sav->sav_count; i++) {
2497		if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
2498		    strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
2499			guid = sav->sav_vdevs[i]->vdev_guid;
2500			break;
2501		}
2502	}
2503
2504	fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
2505
2506	return (nv);
2507}
2508
2509/*
2510 * Handle any ioctl requested of the distributed spare.  Only flushes
2511 * are supported in which case all children must be flushed.
2512 */
2513static int
2514vdev_draid_spare_ioctl(zio_t *zio)
2515{
2516	vdev_t *vd = zio->io_vd;
2517	int error = 0;
2518
2519	if (zio->io_cmd == DKIOCFLUSHWRITECACHE) {
2520		for (int c = 0; c < vd->vdev_children; c++) {
2521			zio_nowait(zio_vdev_child_io(zio, NULL,
2522			    vd->vdev_child[c], zio->io_offset, zio->io_abd,
2523			    zio->io_size, zio->io_type, zio->io_priority, 0,
2524			    vdev_draid_spare_child_done, zio));
2525		}
2526	} else {
2527		error = SET_ERROR(ENOTSUP);
2528	}
2529
2530	return (error);
2531}
2532
2533/*
2534 * Initiate an IO to the distributed spare.  For normal IOs this entails using
2535 * the zio->io_offset and permutation table to calculate which child dRAID vdev
2536 * is responsible for the data.  Then passing along the zio to that child to
2537 * perform the actual IO.  The label ranges are not stored on disk and require
2538 * some special handling which is described below.
2539 */
2540static void
2541vdev_draid_spare_io_start(zio_t *zio)
2542{
2543	vdev_t *cvd = NULL, *vd = zio->io_vd;
2544	vdev_draid_spare_t *vds = vd->vdev_tsd;
2545	uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
2546
2547	/*
2548	 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
2549	 * Nothing to be done here but return failure.
2550	 */
2551	if (vds == NULL) {
2552		zio->io_error = ENXIO;
2553		zio_interrupt(zio);
2554		return;
2555	}
2556
2557	switch (zio->io_type) {
2558	case ZIO_TYPE_IOCTL:
2559		zio->io_error = vdev_draid_spare_ioctl(zio);
2560		break;
2561
2562	case ZIO_TYPE_WRITE:
2563		if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2564			/*
2565			 * Accept probe IOs and config writers to simulate the
2566			 * existence of an on disk label.  vdev_label_sync(),
2567			 * vdev_uberblock_sync() and vdev_copy_uberblocks()
2568			 * skip the distributed spares.  This only leaves
2569			 * vdev_label_init() which is allowed to succeed to
2570			 * avoid adding special cases the function.
2571			 */
2572			if (zio->io_flags & ZIO_FLAG_PROBE ||
2573			    zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
2574				zio->io_error = 0;
2575			} else {
2576				zio->io_error = SET_ERROR(EIO);
2577			}
2578		} else {
2579			cvd = vdev_draid_spare_get_child(vd, offset);
2580
2581			if (cvd == NULL) {
2582				zio->io_error = SET_ERROR(ENXIO);
2583			} else {
2584				zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2585				    offset, zio->io_abd, zio->io_size,
2586				    zio->io_type, zio->io_priority, 0,
2587				    vdev_draid_spare_child_done, zio));
2588			}
2589		}
2590		break;
2591
2592	case ZIO_TYPE_READ:
2593		if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2594			/*
2595			 * Accept probe IOs to simulate the existence of a
2596			 * label.  vdev_label_read_config() bypasses the
2597			 * pipeline to read the label configuration and
2598			 * vdev_uberblock_load() skips distributed spares
2599			 * when attempting to locate the best uberblock.
2600			 */
2601			if (zio->io_flags & ZIO_FLAG_PROBE) {
2602				zio->io_error = 0;
2603			} else {
2604				zio->io_error = SET_ERROR(EIO);
2605			}
2606		} else {
2607			cvd = vdev_draid_spare_get_child(vd, offset);
2608
2609			if (cvd == NULL || !vdev_readable(cvd)) {
2610				zio->io_error = SET_ERROR(ENXIO);
2611			} else {
2612				zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2613				    offset, zio->io_abd, zio->io_size,
2614				    zio->io_type, zio->io_priority, 0,
2615				    vdev_draid_spare_child_done, zio));
2616			}
2617		}
2618		break;
2619
2620	case ZIO_TYPE_TRIM:
2621		/* The vdev label ranges are never trimmed */
2622		ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
2623
2624		cvd = vdev_draid_spare_get_child(vd, offset);
2625
2626		if (cvd == NULL || !cvd->vdev_has_trim) {
2627			zio->io_error = SET_ERROR(ENXIO);
2628		} else {
2629			zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2630			    offset, zio->io_abd, zio->io_size,
2631			    zio->io_type, zio->io_priority, 0,
2632			    vdev_draid_spare_child_done, zio));
2633		}
2634		break;
2635
2636	default:
2637		zio->io_error = SET_ERROR(ENOTSUP);
2638		break;
2639	}
2640
2641	zio_execute(zio);
2642}
2643
2644/* ARGSUSED */
2645static void
2646vdev_draid_spare_io_done(zio_t *zio)
2647{
2648}
2649
2650/*
2651 * Lookup the full spare config in spa->spa_spares.sav_config and
2652 * return the top_guid and spare_id for the named spare.
2653 */
2654static int
2655vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
2656    uint64_t *spare_idp)
2657{
2658	nvlist_t **spares;
2659	uint_t nspares;
2660	int error;
2661
2662	if ((spa->spa_spares.sav_config == NULL) ||
2663	    (nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2664	    ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
2665		return (SET_ERROR(ENOENT));
2666	}
2667
2668	char *spare_name;
2669	error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
2670	if (error != 0)
2671		return (SET_ERROR(EINVAL));
2672
2673	for (int i = 0; i < nspares; i++) {
2674		nvlist_t *spare = spares[i];
2675		uint64_t top_guid, spare_id;
2676		char *type, *path;
2677
2678		/* Skip non-distributed spares */
2679		error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
2680		if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
2681			continue;
2682
2683		/* Skip spares with the wrong name */
2684		error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
2685		if (error != 0 || strcmp(path, spare_name) != 0)
2686			continue;
2687
2688		/* Found the matching spare */
2689		error = nvlist_lookup_uint64(spare,
2690		    ZPOOL_CONFIG_TOP_GUID, &top_guid);
2691		if (error == 0) {
2692			error = nvlist_lookup_uint64(spare,
2693			    ZPOOL_CONFIG_SPARE_ID, &spare_id);
2694		}
2695
2696		if (error != 0) {
2697			return (SET_ERROR(EINVAL));
2698		} else {
2699			*top_guidp = top_guid;
2700			*spare_idp = spare_id;
2701			return (0);
2702		}
2703	}
2704
2705	return (SET_ERROR(ENOENT));
2706}
2707
2708/*
2709 * Initialize private dRAID spare specific fields from the nvlist.
2710 */
2711static int
2712vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
2713{
2714	vdev_draid_spare_t *vds;
2715	uint64_t top_guid = 0;
2716	uint64_t spare_id;
2717
2718	/*
2719	 * In the normal case check the list of spares stored in the spa
2720	 * to lookup the top_guid and spare_id for provided spare config.
2721	 * When creating a new pool or adding vdevs the spare list is not
2722	 * yet populated and the values are provided in the passed config.
2723	 */
2724	if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
2725		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
2726		    &top_guid) != 0)
2727			return (SET_ERROR(EINVAL));
2728
2729		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
2730		    &spare_id) != 0)
2731			return (SET_ERROR(EINVAL));
2732	}
2733
2734	vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
2735	vds->vds_draid_vdev = NULL;
2736	vds->vds_top_guid = top_guid;
2737	vds->vds_spare_id = spare_id;
2738
2739	*tsd = vds;
2740
2741	return (0);
2742}
2743
2744static void
2745vdev_draid_spare_fini(vdev_t *vd)
2746{
2747	kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
2748}
2749
2750static void
2751vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
2752{
2753	vdev_draid_spare_t *vds = vd->vdev_tsd;
2754
2755	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2756
2757	fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
2758	fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
2759}
2760
2761vdev_ops_t vdev_draid_spare_ops = {
2762	.vdev_op_init = vdev_draid_spare_init,
2763	.vdev_op_fini = vdev_draid_spare_fini,
2764	.vdev_op_open = vdev_draid_spare_open,
2765	.vdev_op_close = vdev_draid_spare_close,
2766	.vdev_op_asize = vdev_default_asize,
2767	.vdev_op_min_asize = vdev_default_min_asize,
2768	.vdev_op_min_alloc = NULL,
2769	.vdev_op_io_start = vdev_draid_spare_io_start,
2770	.vdev_op_io_done = vdev_draid_spare_io_done,
2771	.vdev_op_state_change = NULL,
2772	.vdev_op_need_resilver = NULL,
2773	.vdev_op_hold = NULL,
2774	.vdev_op_rele = NULL,
2775	.vdev_op_remap = NULL,
2776	.vdev_op_xlate = vdev_default_xlate,
2777	.vdev_op_rebuild_asize = NULL,
2778	.vdev_op_metaslab_init = NULL,
2779	.vdev_op_config_generate = vdev_draid_spare_config_generate,
2780	.vdev_op_nparity = NULL,
2781	.vdev_op_ndisks = NULL,
2782	.vdev_op_type = VDEV_TYPE_DRAID_SPARE,
2783	.vdev_op_leaf = B_TRUE,
2784};
2785