Deleted Added
full compact
vdev.c (240415) vdev.c (240868)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/fm/fs/zfs.h>
30#include <sys/spa.h>
31#include <sys/spa_impl.h>
32#include <sys/dmu.h>
33#include <sys/dmu_tx.h>
34#include <sys/vdev_impl.h>
35#include <sys/uberblock_impl.h>
36#include <sys/metaslab.h>
37#include <sys/metaslab_impl.h>
38#include <sys/space_map.h>
39#include <sys/zio.h>
40#include <sys/zap.h>
41#include <sys/fs/zfs.h>
42#include <sys/arc.h>
43#include <sys/zil.h>
44#include <sys/dsl_scan.h>
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/fm/fs/zfs.h>
30#include <sys/spa.h>
31#include <sys/spa_impl.h>
32#include <sys/dmu.h>
33#include <sys/dmu_tx.h>
34#include <sys/vdev_impl.h>
35#include <sys/uberblock_impl.h>
36#include <sys/metaslab.h>
37#include <sys/metaslab_impl.h>
38#include <sys/space_map.h>
39#include <sys/zio.h>
40#include <sys/zap.h>
41#include <sys/fs/zfs.h>
42#include <sys/arc.h>
43#include <sys/zil.h>
44#include <sys/dsl_scan.h>
45#include <sys/trim_map.h>
45
46SYSCTL_DECL(_vfs_zfs);
47SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
48
49/*
50 * Virtual device management.
51 */
52
53static vdev_ops_t *vdev_ops_table[] = {
54 &vdev_root_ops,
55 &vdev_raidz_ops,
56 &vdev_mirror_ops,
57 &vdev_replacing_ops,
58 &vdev_spare_ops,
59#ifdef _KERNEL
60 &vdev_geom_ops,
61#else
62 &vdev_disk_ops,
63#endif
64 &vdev_file_ops,
65 &vdev_missing_ops,
66 &vdev_hole_ops,
67 NULL
68};
69
70
71/*
72 * Given a vdev type, return the appropriate ops vector.
73 */
74static vdev_ops_t *
75vdev_getops(const char *type)
76{
77 vdev_ops_t *ops, **opspp;
78
79 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
80 if (strcmp(ops->vdev_op_type, type) == 0)
81 break;
82
83 return (ops);
84}
85
86/*
87 * Default asize function: return the MAX of psize with the asize of
88 * all children. This is what's used by anything other than RAID-Z.
89 */
90uint64_t
91vdev_default_asize(vdev_t *vd, uint64_t psize)
92{
93 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
94 uint64_t csize;
95
96 for (int c = 0; c < vd->vdev_children; c++) {
97 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
98 asize = MAX(asize, csize);
99 }
100
101 return (asize);
102}
103
104/*
105 * Get the minimum allocatable size. We define the allocatable size as
106 * the vdev's asize rounded to the nearest metaslab. This allows us to
107 * replace or attach devices which don't have the same physical size but
108 * can still satisfy the same number of allocations.
109 */
110uint64_t
111vdev_get_min_asize(vdev_t *vd)
112{
113 vdev_t *pvd = vd->vdev_parent;
114
115 /*
116 * If our parent is NULL (inactive spare or cache) or is the root,
117 * just return our own asize.
118 */
119 if (pvd == NULL)
120 return (vd->vdev_asize);
121
122 /*
123 * The top-level vdev just returns the allocatable size rounded
124 * to the nearest metaslab.
125 */
126 if (vd == vd->vdev_top)
127 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
128
129 /*
130 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
131 * so each child must provide at least 1/Nth of its asize.
132 */
133 if (pvd->vdev_ops == &vdev_raidz_ops)
134 return (pvd->vdev_min_asize / pvd->vdev_children);
135
136 return (pvd->vdev_min_asize);
137}
138
139void
140vdev_set_min_asize(vdev_t *vd)
141{
142 vd->vdev_min_asize = vdev_get_min_asize(vd);
143
144 for (int c = 0; c < vd->vdev_children; c++)
145 vdev_set_min_asize(vd->vdev_child[c]);
146}
147
148vdev_t *
149vdev_lookup_top(spa_t *spa, uint64_t vdev)
150{
151 vdev_t *rvd = spa->spa_root_vdev;
152
153 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
154
155 if (vdev < rvd->vdev_children) {
156 ASSERT(rvd->vdev_child[vdev] != NULL);
157 return (rvd->vdev_child[vdev]);
158 }
159
160 return (NULL);
161}
162
163vdev_t *
164vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
165{
166 vdev_t *mvd;
167
168 if (vd->vdev_guid == guid)
169 return (vd);
170
171 for (int c = 0; c < vd->vdev_children; c++)
172 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
173 NULL)
174 return (mvd);
175
176 return (NULL);
177}
178
179void
180vdev_add_child(vdev_t *pvd, vdev_t *cvd)
181{
182 size_t oldsize, newsize;
183 uint64_t id = cvd->vdev_id;
184 vdev_t **newchild;
185
186 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
187 ASSERT(cvd->vdev_parent == NULL);
188
189 cvd->vdev_parent = pvd;
190
191 if (pvd == NULL)
192 return;
193
194 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
195
196 oldsize = pvd->vdev_children * sizeof (vdev_t *);
197 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
198 newsize = pvd->vdev_children * sizeof (vdev_t *);
199
200 newchild = kmem_zalloc(newsize, KM_SLEEP);
201 if (pvd->vdev_child != NULL) {
202 bcopy(pvd->vdev_child, newchild, oldsize);
203 kmem_free(pvd->vdev_child, oldsize);
204 }
205
206 pvd->vdev_child = newchild;
207 pvd->vdev_child[id] = cvd;
208
209 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
210 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
211
212 /*
213 * Walk up all ancestors to update guid sum.
214 */
215 for (; pvd != NULL; pvd = pvd->vdev_parent)
216 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
217}
218
219void
220vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
221{
222 int c;
223 uint_t id = cvd->vdev_id;
224
225 ASSERT(cvd->vdev_parent == pvd);
226
227 if (pvd == NULL)
228 return;
229
230 ASSERT(id < pvd->vdev_children);
231 ASSERT(pvd->vdev_child[id] == cvd);
232
233 pvd->vdev_child[id] = NULL;
234 cvd->vdev_parent = NULL;
235
236 for (c = 0; c < pvd->vdev_children; c++)
237 if (pvd->vdev_child[c])
238 break;
239
240 if (c == pvd->vdev_children) {
241 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
242 pvd->vdev_child = NULL;
243 pvd->vdev_children = 0;
244 }
245
246 /*
247 * Walk up all ancestors to update guid sum.
248 */
249 for (; pvd != NULL; pvd = pvd->vdev_parent)
250 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
251}
252
253/*
254 * Remove any holes in the child array.
255 */
256void
257vdev_compact_children(vdev_t *pvd)
258{
259 vdev_t **newchild, *cvd;
260 int oldc = pvd->vdev_children;
261 int newc;
262
263 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
264
265 for (int c = newc = 0; c < oldc; c++)
266 if (pvd->vdev_child[c])
267 newc++;
268
269 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
270
271 for (int c = newc = 0; c < oldc; c++) {
272 if ((cvd = pvd->vdev_child[c]) != NULL) {
273 newchild[newc] = cvd;
274 cvd->vdev_id = newc++;
275 }
276 }
277
278 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
279 pvd->vdev_child = newchild;
280 pvd->vdev_children = newc;
281}
282
283/*
284 * Allocate and minimally initialize a vdev_t.
285 */
286vdev_t *
287vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
288{
289 vdev_t *vd;
290
291 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
292
293 if (spa->spa_root_vdev == NULL) {
294 ASSERT(ops == &vdev_root_ops);
295 spa->spa_root_vdev = vd;
296 spa->spa_load_guid = spa_generate_guid(NULL);
297 }
298
299 if (guid == 0 && ops != &vdev_hole_ops) {
300 if (spa->spa_root_vdev == vd) {
301 /*
302 * The root vdev's guid will also be the pool guid,
303 * which must be unique among all pools.
304 */
305 guid = spa_generate_guid(NULL);
306 } else {
307 /*
308 * Any other vdev's guid must be unique within the pool.
309 */
310 guid = spa_generate_guid(spa);
311 }
312 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
313 }
314
315 vd->vdev_spa = spa;
316 vd->vdev_id = id;
317 vd->vdev_guid = guid;
318 vd->vdev_guid_sum = guid;
319 vd->vdev_ops = ops;
320 vd->vdev_state = VDEV_STATE_CLOSED;
321 vd->vdev_ishole = (ops == &vdev_hole_ops);
322
323 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
324 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
325 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
326 for (int t = 0; t < DTL_TYPES; t++) {
327 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
328 &vd->vdev_dtl_lock);
329 }
330 txg_list_create(&vd->vdev_ms_list,
331 offsetof(struct metaslab, ms_txg_node));
332 txg_list_create(&vd->vdev_dtl_list,
333 offsetof(struct vdev, vdev_dtl_node));
334 vd->vdev_stat.vs_timestamp = gethrtime();
335 vdev_queue_init(vd);
336 vdev_cache_init(vd);
337
338 return (vd);
339}
340
341/*
342 * Allocate a new vdev. The 'alloctype' is used to control whether we are
343 * creating a new vdev or loading an existing one - the behavior is slightly
344 * different for each case.
345 */
346int
347vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
348 int alloctype)
349{
350 vdev_ops_t *ops;
351 char *type;
352 uint64_t guid = 0, islog, nparity;
353 vdev_t *vd;
354
355 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
356
357 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
358 return (EINVAL);
359
360 if ((ops = vdev_getops(type)) == NULL)
361 return (EINVAL);
362
363 /*
364 * If this is a load, get the vdev guid from the nvlist.
365 * Otherwise, vdev_alloc_common() will generate one for us.
366 */
367 if (alloctype == VDEV_ALLOC_LOAD) {
368 uint64_t label_id;
369
370 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
371 label_id != id)
372 return (EINVAL);
373
374 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
375 return (EINVAL);
376 } else if (alloctype == VDEV_ALLOC_SPARE) {
377 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
378 return (EINVAL);
379 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
380 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
381 return (EINVAL);
382 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
383 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
384 return (EINVAL);
385 }
386
387 /*
388 * The first allocated vdev must be of type 'root'.
389 */
390 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
391 return (EINVAL);
392
393 /*
394 * Determine whether we're a log vdev.
395 */
396 islog = 0;
397 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
398 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
399 return (ENOTSUP);
400
401 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
402 return (ENOTSUP);
403
404 /*
405 * Set the nparity property for RAID-Z vdevs.
406 */
407 nparity = -1ULL;
408 if (ops == &vdev_raidz_ops) {
409 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
410 &nparity) == 0) {
411 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
412 return (EINVAL);
413 /*
414 * Previous versions could only support 1 or 2 parity
415 * device.
416 */
417 if (nparity > 1 &&
418 spa_version(spa) < SPA_VERSION_RAIDZ2)
419 return (ENOTSUP);
420 if (nparity > 2 &&
421 spa_version(spa) < SPA_VERSION_RAIDZ3)
422 return (ENOTSUP);
423 } else {
424 /*
425 * We require the parity to be specified for SPAs that
426 * support multiple parity levels.
427 */
428 if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
429 return (EINVAL);
430 /*
431 * Otherwise, we default to 1 parity device for RAID-Z.
432 */
433 nparity = 1;
434 }
435 } else {
436 nparity = 0;
437 }
438 ASSERT(nparity != -1ULL);
439
440 vd = vdev_alloc_common(spa, id, guid, ops);
441
442 vd->vdev_islog = islog;
443 vd->vdev_nparity = nparity;
444
445 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
446 vd->vdev_path = spa_strdup(vd->vdev_path);
447 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
448 vd->vdev_devid = spa_strdup(vd->vdev_devid);
449 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
450 &vd->vdev_physpath) == 0)
451 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
452 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
453 vd->vdev_fru = spa_strdup(vd->vdev_fru);
454
455 /*
456 * Set the whole_disk property. If it's not specified, leave the value
457 * as -1.
458 */
459 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
460 &vd->vdev_wholedisk) != 0)
461 vd->vdev_wholedisk = -1ULL;
462
463 /*
464 * Look for the 'not present' flag. This will only be set if the device
465 * was not present at the time of import.
466 */
467 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
468 &vd->vdev_not_present);
469
470 /*
471 * Get the alignment requirement.
472 */
473 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
474
475 /*
476 * Retrieve the vdev creation time.
477 */
478 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
479 &vd->vdev_crtxg);
480
481 /*
482 * If we're a top-level vdev, try to load the allocation parameters.
483 */
484 if (parent && !parent->vdev_parent &&
485 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
486 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
487 &vd->vdev_ms_array);
488 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
489 &vd->vdev_ms_shift);
490 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
491 &vd->vdev_asize);
492 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
493 &vd->vdev_removing);
494 }
495
496 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
497 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
498 alloctype == VDEV_ALLOC_ADD ||
499 alloctype == VDEV_ALLOC_SPLIT ||
500 alloctype == VDEV_ALLOC_ROOTPOOL);
501 vd->vdev_mg = metaslab_group_create(islog ?
502 spa_log_class(spa) : spa_normal_class(spa), vd);
503 }
504
505 /*
506 * If we're a leaf vdev, try to load the DTL object and other state.
507 */
508 if (vd->vdev_ops->vdev_op_leaf &&
509 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
510 alloctype == VDEV_ALLOC_ROOTPOOL)) {
511 if (alloctype == VDEV_ALLOC_LOAD) {
512 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
513 &vd->vdev_dtl_smo.smo_object);
514 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
515 &vd->vdev_unspare);
516 }
517
518 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
519 uint64_t spare = 0;
520
521 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
522 &spare) == 0 && spare)
523 spa_spare_add(vd);
524 }
525
526 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
527 &vd->vdev_offline);
528
529 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING,
530 &vd->vdev_resilvering);
531
532 /*
533 * When importing a pool, we want to ignore the persistent fault
534 * state, as the diagnosis made on another system may not be
535 * valid in the current context. Local vdevs will
536 * remain in the faulted state.
537 */
538 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
539 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
540 &vd->vdev_faulted);
541 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
542 &vd->vdev_degraded);
543 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
544 &vd->vdev_removed);
545
546 if (vd->vdev_faulted || vd->vdev_degraded) {
547 char *aux;
548
549 vd->vdev_label_aux =
550 VDEV_AUX_ERR_EXCEEDED;
551 if (nvlist_lookup_string(nv,
552 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
553 strcmp(aux, "external") == 0)
554 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
555 }
556 }
557 }
558
559 /*
560 * Add ourselves to the parent's list of children.
561 */
562 vdev_add_child(parent, vd);
563
564 *vdp = vd;
565
566 return (0);
567}
568
569void
570vdev_free(vdev_t *vd)
571{
572 spa_t *spa = vd->vdev_spa;
573
574 /*
575 * vdev_free() implies closing the vdev first. This is simpler than
576 * trying to ensure complicated semantics for all callers.
577 */
578 vdev_close(vd);
579
580 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
581 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
582
583 /*
584 * Free all children.
585 */
586 for (int c = 0; c < vd->vdev_children; c++)
587 vdev_free(vd->vdev_child[c]);
588
589 ASSERT(vd->vdev_child == NULL);
590 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
591
592 /*
593 * Discard allocation state.
594 */
595 if (vd->vdev_mg != NULL) {
596 vdev_metaslab_fini(vd);
597 metaslab_group_destroy(vd->vdev_mg);
598 }
599
600 ASSERT0(vd->vdev_stat.vs_space);
601 ASSERT0(vd->vdev_stat.vs_dspace);
602 ASSERT0(vd->vdev_stat.vs_alloc);
603
604 /*
605 * Remove this vdev from its parent's child list.
606 */
607 vdev_remove_child(vd->vdev_parent, vd);
608
609 ASSERT(vd->vdev_parent == NULL);
610
611 /*
612 * Clean up vdev structure.
613 */
614 vdev_queue_fini(vd);
615 vdev_cache_fini(vd);
616
617 if (vd->vdev_path)
618 spa_strfree(vd->vdev_path);
619 if (vd->vdev_devid)
620 spa_strfree(vd->vdev_devid);
621 if (vd->vdev_physpath)
622 spa_strfree(vd->vdev_physpath);
623 if (vd->vdev_fru)
624 spa_strfree(vd->vdev_fru);
625
626 if (vd->vdev_isspare)
627 spa_spare_remove(vd);
628 if (vd->vdev_isl2cache)
629 spa_l2cache_remove(vd);
630
631 txg_list_destroy(&vd->vdev_ms_list);
632 txg_list_destroy(&vd->vdev_dtl_list);
633
634 mutex_enter(&vd->vdev_dtl_lock);
635 for (int t = 0; t < DTL_TYPES; t++) {
636 space_map_unload(&vd->vdev_dtl[t]);
637 space_map_destroy(&vd->vdev_dtl[t]);
638 }
639 mutex_exit(&vd->vdev_dtl_lock);
640
641 mutex_destroy(&vd->vdev_dtl_lock);
642 mutex_destroy(&vd->vdev_stat_lock);
643 mutex_destroy(&vd->vdev_probe_lock);
644
645 if (vd == spa->spa_root_vdev)
646 spa->spa_root_vdev = NULL;
647
648 kmem_free(vd, sizeof (vdev_t));
649}
650
651/*
652 * Transfer top-level vdev state from svd to tvd.
653 */
654static void
655vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
656{
657 spa_t *spa = svd->vdev_spa;
658 metaslab_t *msp;
659 vdev_t *vd;
660 int t;
661
662 ASSERT(tvd == tvd->vdev_top);
663
664 tvd->vdev_ms_array = svd->vdev_ms_array;
665 tvd->vdev_ms_shift = svd->vdev_ms_shift;
666 tvd->vdev_ms_count = svd->vdev_ms_count;
667
668 svd->vdev_ms_array = 0;
669 svd->vdev_ms_shift = 0;
670 svd->vdev_ms_count = 0;
671
672 if (tvd->vdev_mg)
673 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
674 tvd->vdev_mg = svd->vdev_mg;
675 tvd->vdev_ms = svd->vdev_ms;
676
677 svd->vdev_mg = NULL;
678 svd->vdev_ms = NULL;
679
680 if (tvd->vdev_mg != NULL)
681 tvd->vdev_mg->mg_vd = tvd;
682
683 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
684 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
685 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
686
687 svd->vdev_stat.vs_alloc = 0;
688 svd->vdev_stat.vs_space = 0;
689 svd->vdev_stat.vs_dspace = 0;
690
691 for (t = 0; t < TXG_SIZE; t++) {
692 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
693 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
694 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
695 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
696 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
697 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
698 }
699
700 if (list_link_active(&svd->vdev_config_dirty_node)) {
701 vdev_config_clean(svd);
702 vdev_config_dirty(tvd);
703 }
704
705 if (list_link_active(&svd->vdev_state_dirty_node)) {
706 vdev_state_clean(svd);
707 vdev_state_dirty(tvd);
708 }
709
710 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
711 svd->vdev_deflate_ratio = 0;
712
713 tvd->vdev_islog = svd->vdev_islog;
714 svd->vdev_islog = 0;
715}
716
717static void
718vdev_top_update(vdev_t *tvd, vdev_t *vd)
719{
720 if (vd == NULL)
721 return;
722
723 vd->vdev_top = tvd;
724
725 for (int c = 0; c < vd->vdev_children; c++)
726 vdev_top_update(tvd, vd->vdev_child[c]);
727}
728
729/*
730 * Add a mirror/replacing vdev above an existing vdev.
731 */
732vdev_t *
733vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
734{
735 spa_t *spa = cvd->vdev_spa;
736 vdev_t *pvd = cvd->vdev_parent;
737 vdev_t *mvd;
738
739 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
740
741 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
742
743 mvd->vdev_asize = cvd->vdev_asize;
744 mvd->vdev_min_asize = cvd->vdev_min_asize;
745 mvd->vdev_max_asize = cvd->vdev_max_asize;
746 mvd->vdev_ashift = cvd->vdev_ashift;
747 mvd->vdev_state = cvd->vdev_state;
748 mvd->vdev_crtxg = cvd->vdev_crtxg;
749
750 vdev_remove_child(pvd, cvd);
751 vdev_add_child(pvd, mvd);
752 cvd->vdev_id = mvd->vdev_children;
753 vdev_add_child(mvd, cvd);
754 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
755
756 if (mvd == mvd->vdev_top)
757 vdev_top_transfer(cvd, mvd);
758
759 return (mvd);
760}
761
762/*
763 * Remove a 1-way mirror/replacing vdev from the tree.
764 */
765void
766vdev_remove_parent(vdev_t *cvd)
767{
768 vdev_t *mvd = cvd->vdev_parent;
769 vdev_t *pvd = mvd->vdev_parent;
770
771 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
772
773 ASSERT(mvd->vdev_children == 1);
774 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
775 mvd->vdev_ops == &vdev_replacing_ops ||
776 mvd->vdev_ops == &vdev_spare_ops);
777 cvd->vdev_ashift = mvd->vdev_ashift;
778
779 vdev_remove_child(mvd, cvd);
780 vdev_remove_child(pvd, mvd);
781
782 /*
783 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
784 * Otherwise, we could have detached an offline device, and when we
785 * go to import the pool we'll think we have two top-level vdevs,
786 * instead of a different version of the same top-level vdev.
787 */
788 if (mvd->vdev_top == mvd) {
789 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
790 cvd->vdev_orig_guid = cvd->vdev_guid;
791 cvd->vdev_guid += guid_delta;
792 cvd->vdev_guid_sum += guid_delta;
793 }
794 cvd->vdev_id = mvd->vdev_id;
795 vdev_add_child(pvd, cvd);
796 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
797
798 if (cvd == cvd->vdev_top)
799 vdev_top_transfer(mvd, cvd);
800
801 ASSERT(mvd->vdev_children == 0);
802 vdev_free(mvd);
803}
804
805int
806vdev_metaslab_init(vdev_t *vd, uint64_t txg)
807{
808 spa_t *spa = vd->vdev_spa;
809 objset_t *mos = spa->spa_meta_objset;
810 uint64_t m;
811 uint64_t oldc = vd->vdev_ms_count;
812 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
813 metaslab_t **mspp;
814 int error;
815
816 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
817
818 /*
819 * This vdev is not being allocated from yet or is a hole.
820 */
821 if (vd->vdev_ms_shift == 0)
822 return (0);
823
824 ASSERT(!vd->vdev_ishole);
825
826 /*
827 * Compute the raidz-deflation ratio. Note, we hard-code
828 * in 128k (1 << 17) because it is the current "typical" blocksize.
829 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change,
830 * or we will inconsistently account for existing bp's.
831 */
832 vd->vdev_deflate_ratio = (1 << 17) /
833 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
834
835 ASSERT(oldc <= newc);
836
837 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
838
839 if (oldc != 0) {
840 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
841 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
842 }
843
844 vd->vdev_ms = mspp;
845 vd->vdev_ms_count = newc;
846
847 for (m = oldc; m < newc; m++) {
848 space_map_obj_t smo = { 0, 0, 0 };
849 if (txg == 0) {
850 uint64_t object = 0;
851 error = dmu_read(mos, vd->vdev_ms_array,
852 m * sizeof (uint64_t), sizeof (uint64_t), &object,
853 DMU_READ_PREFETCH);
854 if (error)
855 return (error);
856 if (object != 0) {
857 dmu_buf_t *db;
858 error = dmu_bonus_hold(mos, object, FTAG, &db);
859 if (error)
860 return (error);
861 ASSERT3U(db->db_size, >=, sizeof (smo));
862 bcopy(db->db_data, &smo, sizeof (smo));
863 ASSERT3U(smo.smo_object, ==, object);
864 dmu_buf_rele(db, FTAG);
865 }
866 }
867 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
868 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
869 }
870
871 if (txg == 0)
872 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
873
874 /*
875 * If the vdev is being removed we don't activate
876 * the metaslabs since we want to ensure that no new
877 * allocations are performed on this device.
878 */
879 if (oldc == 0 && !vd->vdev_removing)
880 metaslab_group_activate(vd->vdev_mg);
881
882 if (txg == 0)
883 spa_config_exit(spa, SCL_ALLOC, FTAG);
884
885 return (0);
886}
887
888void
889vdev_metaslab_fini(vdev_t *vd)
890{
891 uint64_t m;
892 uint64_t count = vd->vdev_ms_count;
893
894 if (vd->vdev_ms != NULL) {
895 metaslab_group_passivate(vd->vdev_mg);
896 for (m = 0; m < count; m++)
897 if (vd->vdev_ms[m] != NULL)
898 metaslab_fini(vd->vdev_ms[m]);
899 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
900 vd->vdev_ms = NULL;
901 }
902}
903
904typedef struct vdev_probe_stats {
905 boolean_t vps_readable;
906 boolean_t vps_writeable;
907 int vps_flags;
908} vdev_probe_stats_t;
909
910static void
911vdev_probe_done(zio_t *zio)
912{
913 spa_t *spa = zio->io_spa;
914 vdev_t *vd = zio->io_vd;
915 vdev_probe_stats_t *vps = zio->io_private;
916
917 ASSERT(vd->vdev_probe_zio != NULL);
918
919 if (zio->io_type == ZIO_TYPE_READ) {
920 if (zio->io_error == 0)
921 vps->vps_readable = 1;
922 if (zio->io_error == 0 && spa_writeable(spa)) {
923 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
924 zio->io_offset, zio->io_size, zio->io_data,
925 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
926 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
927 } else {
928 zio_buf_free(zio->io_data, zio->io_size);
929 }
930 } else if (zio->io_type == ZIO_TYPE_WRITE) {
931 if (zio->io_error == 0)
932 vps->vps_writeable = 1;
933 zio_buf_free(zio->io_data, zio->io_size);
934 } else if (zio->io_type == ZIO_TYPE_NULL) {
935 zio_t *pio;
936
937 vd->vdev_cant_read |= !vps->vps_readable;
938 vd->vdev_cant_write |= !vps->vps_writeable;
939
940 if (vdev_readable(vd) &&
941 (vdev_writeable(vd) || !spa_writeable(spa))) {
942 zio->io_error = 0;
943 } else {
944 ASSERT(zio->io_error != 0);
945 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
946 spa, vd, NULL, 0, 0);
947 zio->io_error = ENXIO;
948 }
949
950 mutex_enter(&vd->vdev_probe_lock);
951 ASSERT(vd->vdev_probe_zio == zio);
952 vd->vdev_probe_zio = NULL;
953 mutex_exit(&vd->vdev_probe_lock);
954
955 while ((pio = zio_walk_parents(zio)) != NULL)
956 if (!vdev_accessible(vd, pio))
957 pio->io_error = ENXIO;
958
959 kmem_free(vps, sizeof (*vps));
960 }
961}
962
963/*
964 * Determine whether this device is accessible by reading and writing
965 * to several known locations: the pad regions of each vdev label
966 * but the first (which we leave alone in case it contains a VTOC).
967 */
968zio_t *
969vdev_probe(vdev_t *vd, zio_t *zio)
970{
971 spa_t *spa = vd->vdev_spa;
972 vdev_probe_stats_t *vps = NULL;
973 zio_t *pio;
974
975 ASSERT(vd->vdev_ops->vdev_op_leaf);
976
977 /*
978 * Don't probe the probe.
979 */
980 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
981 return (NULL);
982
983 /*
984 * To prevent 'probe storms' when a device fails, we create
985 * just one probe i/o at a time. All zios that want to probe
986 * this vdev will become parents of the probe io.
987 */
988 mutex_enter(&vd->vdev_probe_lock);
989
990 if ((pio = vd->vdev_probe_zio) == NULL) {
991 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
992
993 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
994 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
995 ZIO_FLAG_TRYHARD;
996
997 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
998 /*
999 * vdev_cant_read and vdev_cant_write can only
1000 * transition from TRUE to FALSE when we have the
1001 * SCL_ZIO lock as writer; otherwise they can only
1002 * transition from FALSE to TRUE. This ensures that
1003 * any zio looking at these values can assume that
1004 * failures persist for the life of the I/O. That's
1005 * important because when a device has intermittent
1006 * connectivity problems, we want to ensure that
1007 * they're ascribed to the device (ENXIO) and not
1008 * the zio (EIO).
1009 *
1010 * Since we hold SCL_ZIO as writer here, clear both
1011 * values so the probe can reevaluate from first
1012 * principles.
1013 */
1014 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1015 vd->vdev_cant_read = B_FALSE;
1016 vd->vdev_cant_write = B_FALSE;
1017 }
1018
1019 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1020 vdev_probe_done, vps,
1021 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1022
1023 /*
1024 * We can't change the vdev state in this context, so we
1025 * kick off an async task to do it on our behalf.
1026 */
1027 if (zio != NULL) {
1028 vd->vdev_probe_wanted = B_TRUE;
1029 spa_async_request(spa, SPA_ASYNC_PROBE);
1030 }
1031 }
1032
1033 if (zio != NULL)
1034 zio_add_child(zio, pio);
1035
1036 mutex_exit(&vd->vdev_probe_lock);
1037
1038 if (vps == NULL) {
1039 ASSERT(zio != NULL);
1040 return (NULL);
1041 }
1042
1043 for (int l = 1; l < VDEV_LABELS; l++) {
1044 zio_nowait(zio_read_phys(pio, vd,
1045 vdev_label_offset(vd->vdev_psize, l,
1046 offsetof(vdev_label_t, vl_pad2)),
1047 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
1048 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1049 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1050 }
1051
1052 if (zio == NULL)
1053 return (pio);
1054
1055 zio_nowait(pio);
1056 return (NULL);
1057}
1058
1059static void
1060vdev_open_child(void *arg)
1061{
1062 vdev_t *vd = arg;
1063
1064 vd->vdev_open_thread = curthread;
1065 vd->vdev_open_error = vdev_open(vd);
1066 vd->vdev_open_thread = NULL;
1067}
1068
1069boolean_t
1070vdev_uses_zvols(vdev_t *vd)
1071{
1072 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1073 strlen(ZVOL_DIR)) == 0)
1074 return (B_TRUE);
1075 for (int c = 0; c < vd->vdev_children; c++)
1076 if (vdev_uses_zvols(vd->vdev_child[c]))
1077 return (B_TRUE);
1078 return (B_FALSE);
1079}
1080
1081void
1082vdev_open_children(vdev_t *vd)
1083{
1084 taskq_t *tq;
1085 int children = vd->vdev_children;
1086
1087 /*
1088 * in order to handle pools on top of zvols, do the opens
1089 * in a single thread so that the same thread holds the
1090 * spa_namespace_lock
1091 */
1092 if (B_TRUE || vdev_uses_zvols(vd)) {
1093 for (int c = 0; c < children; c++)
1094 vd->vdev_child[c]->vdev_open_error =
1095 vdev_open(vd->vdev_child[c]);
1096 return;
1097 }
1098 tq = taskq_create("vdev_open", children, minclsyspri,
1099 children, children, TASKQ_PREPOPULATE);
1100
1101 for (int c = 0; c < children; c++)
1102 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1103 TQ_SLEEP) != 0);
1104
1105 taskq_destroy(tq);
1106}
1107
1108/*
1109 * Prepare a virtual device for access.
1110 */
1111int
1112vdev_open(vdev_t *vd)
1113{
1114 spa_t *spa = vd->vdev_spa;
1115 int error;
1116 uint64_t osize = 0;
1117 uint64_t max_osize = 0;
1118 uint64_t asize, max_asize, psize;
1119 uint64_t ashift = 0;
1120
1121 ASSERT(vd->vdev_open_thread == curthread ||
1122 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1123 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1124 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1125 vd->vdev_state == VDEV_STATE_OFFLINE);
1126
1127 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1128 vd->vdev_cant_read = B_FALSE;
1129 vd->vdev_cant_write = B_FALSE;
1130 vd->vdev_min_asize = vdev_get_min_asize(vd);
1131
1132 /*
1133 * If this vdev is not removed, check its fault status. If it's
1134 * faulted, bail out of the open.
1135 */
1136 if (!vd->vdev_removed && vd->vdev_faulted) {
1137 ASSERT(vd->vdev_children == 0);
1138 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1139 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1140 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1141 vd->vdev_label_aux);
1142 return (ENXIO);
1143 } else if (vd->vdev_offline) {
1144 ASSERT(vd->vdev_children == 0);
1145 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1146 return (ENXIO);
1147 }
1148
1149 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
1150
1151 /*
1152 * Reset the vdev_reopening flag so that we actually close
1153 * the vdev on error.
1154 */
1155 vd->vdev_reopening = B_FALSE;
1156 if (zio_injection_enabled && error == 0)
1157 error = zio_handle_device_injection(vd, NULL, ENXIO);
1158
1159 if (error) {
1160 if (vd->vdev_removed &&
1161 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1162 vd->vdev_removed = B_FALSE;
1163
1164 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1165 vd->vdev_stat.vs_aux);
1166 return (error);
1167 }
1168
1169 vd->vdev_removed = B_FALSE;
1170
1171 /*
1172 * Recheck the faulted flag now that we have confirmed that
1173 * the vdev is accessible. If we're faulted, bail.
1174 */
1175 if (vd->vdev_faulted) {
1176 ASSERT(vd->vdev_children == 0);
1177 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1178 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1179 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1180 vd->vdev_label_aux);
1181 return (ENXIO);
1182 }
1183
1184 if (vd->vdev_degraded) {
1185 ASSERT(vd->vdev_children == 0);
1186 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1187 VDEV_AUX_ERR_EXCEEDED);
1188 } else {
1189 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1190 }
1191
1192 /*
1193 * For hole or missing vdevs we just return success.
1194 */
1195 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1196 return (0);
1197
46
47SYSCTL_DECL(_vfs_zfs);
48SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
49
50/*
51 * Virtual device management.
52 */
53
54static vdev_ops_t *vdev_ops_table[] = {
55 &vdev_root_ops,
56 &vdev_raidz_ops,
57 &vdev_mirror_ops,
58 &vdev_replacing_ops,
59 &vdev_spare_ops,
60#ifdef _KERNEL
61 &vdev_geom_ops,
62#else
63 &vdev_disk_ops,
64#endif
65 &vdev_file_ops,
66 &vdev_missing_ops,
67 &vdev_hole_ops,
68 NULL
69};
70
71
72/*
73 * Given a vdev type, return the appropriate ops vector.
74 */
75static vdev_ops_t *
76vdev_getops(const char *type)
77{
78 vdev_ops_t *ops, **opspp;
79
80 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
81 if (strcmp(ops->vdev_op_type, type) == 0)
82 break;
83
84 return (ops);
85}
86
87/*
88 * Default asize function: return the MAX of psize with the asize of
89 * all children. This is what's used by anything other than RAID-Z.
90 */
91uint64_t
92vdev_default_asize(vdev_t *vd, uint64_t psize)
93{
94 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
95 uint64_t csize;
96
97 for (int c = 0; c < vd->vdev_children; c++) {
98 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
99 asize = MAX(asize, csize);
100 }
101
102 return (asize);
103}
104
105/*
106 * Get the minimum allocatable size. We define the allocatable size as
107 * the vdev's asize rounded to the nearest metaslab. This allows us to
108 * replace or attach devices which don't have the same physical size but
109 * can still satisfy the same number of allocations.
110 */
111uint64_t
112vdev_get_min_asize(vdev_t *vd)
113{
114 vdev_t *pvd = vd->vdev_parent;
115
116 /*
117 * If our parent is NULL (inactive spare or cache) or is the root,
118 * just return our own asize.
119 */
120 if (pvd == NULL)
121 return (vd->vdev_asize);
122
123 /*
124 * The top-level vdev just returns the allocatable size rounded
125 * to the nearest metaslab.
126 */
127 if (vd == vd->vdev_top)
128 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
129
130 /*
131 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
132 * so each child must provide at least 1/Nth of its asize.
133 */
134 if (pvd->vdev_ops == &vdev_raidz_ops)
135 return (pvd->vdev_min_asize / pvd->vdev_children);
136
137 return (pvd->vdev_min_asize);
138}
139
140void
141vdev_set_min_asize(vdev_t *vd)
142{
143 vd->vdev_min_asize = vdev_get_min_asize(vd);
144
145 for (int c = 0; c < vd->vdev_children; c++)
146 vdev_set_min_asize(vd->vdev_child[c]);
147}
148
149vdev_t *
150vdev_lookup_top(spa_t *spa, uint64_t vdev)
151{
152 vdev_t *rvd = spa->spa_root_vdev;
153
154 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
155
156 if (vdev < rvd->vdev_children) {
157 ASSERT(rvd->vdev_child[vdev] != NULL);
158 return (rvd->vdev_child[vdev]);
159 }
160
161 return (NULL);
162}
163
164vdev_t *
165vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
166{
167 vdev_t *mvd;
168
169 if (vd->vdev_guid == guid)
170 return (vd);
171
172 for (int c = 0; c < vd->vdev_children; c++)
173 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
174 NULL)
175 return (mvd);
176
177 return (NULL);
178}
179
180void
181vdev_add_child(vdev_t *pvd, vdev_t *cvd)
182{
183 size_t oldsize, newsize;
184 uint64_t id = cvd->vdev_id;
185 vdev_t **newchild;
186
187 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
188 ASSERT(cvd->vdev_parent == NULL);
189
190 cvd->vdev_parent = pvd;
191
192 if (pvd == NULL)
193 return;
194
195 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
196
197 oldsize = pvd->vdev_children * sizeof (vdev_t *);
198 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
199 newsize = pvd->vdev_children * sizeof (vdev_t *);
200
201 newchild = kmem_zalloc(newsize, KM_SLEEP);
202 if (pvd->vdev_child != NULL) {
203 bcopy(pvd->vdev_child, newchild, oldsize);
204 kmem_free(pvd->vdev_child, oldsize);
205 }
206
207 pvd->vdev_child = newchild;
208 pvd->vdev_child[id] = cvd;
209
210 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
211 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
212
213 /*
214 * Walk up all ancestors to update guid sum.
215 */
216 for (; pvd != NULL; pvd = pvd->vdev_parent)
217 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
218}
219
220void
221vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
222{
223 int c;
224 uint_t id = cvd->vdev_id;
225
226 ASSERT(cvd->vdev_parent == pvd);
227
228 if (pvd == NULL)
229 return;
230
231 ASSERT(id < pvd->vdev_children);
232 ASSERT(pvd->vdev_child[id] == cvd);
233
234 pvd->vdev_child[id] = NULL;
235 cvd->vdev_parent = NULL;
236
237 for (c = 0; c < pvd->vdev_children; c++)
238 if (pvd->vdev_child[c])
239 break;
240
241 if (c == pvd->vdev_children) {
242 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
243 pvd->vdev_child = NULL;
244 pvd->vdev_children = 0;
245 }
246
247 /*
248 * Walk up all ancestors to update guid sum.
249 */
250 for (; pvd != NULL; pvd = pvd->vdev_parent)
251 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
252}
253
254/*
255 * Remove any holes in the child array.
256 */
257void
258vdev_compact_children(vdev_t *pvd)
259{
260 vdev_t **newchild, *cvd;
261 int oldc = pvd->vdev_children;
262 int newc;
263
264 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
265
266 for (int c = newc = 0; c < oldc; c++)
267 if (pvd->vdev_child[c])
268 newc++;
269
270 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
271
272 for (int c = newc = 0; c < oldc; c++) {
273 if ((cvd = pvd->vdev_child[c]) != NULL) {
274 newchild[newc] = cvd;
275 cvd->vdev_id = newc++;
276 }
277 }
278
279 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
280 pvd->vdev_child = newchild;
281 pvd->vdev_children = newc;
282}
283
284/*
285 * Allocate and minimally initialize a vdev_t.
286 */
287vdev_t *
288vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
289{
290 vdev_t *vd;
291
292 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
293
294 if (spa->spa_root_vdev == NULL) {
295 ASSERT(ops == &vdev_root_ops);
296 spa->spa_root_vdev = vd;
297 spa->spa_load_guid = spa_generate_guid(NULL);
298 }
299
300 if (guid == 0 && ops != &vdev_hole_ops) {
301 if (spa->spa_root_vdev == vd) {
302 /*
303 * The root vdev's guid will also be the pool guid,
304 * which must be unique among all pools.
305 */
306 guid = spa_generate_guid(NULL);
307 } else {
308 /*
309 * Any other vdev's guid must be unique within the pool.
310 */
311 guid = spa_generate_guid(spa);
312 }
313 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
314 }
315
316 vd->vdev_spa = spa;
317 vd->vdev_id = id;
318 vd->vdev_guid = guid;
319 vd->vdev_guid_sum = guid;
320 vd->vdev_ops = ops;
321 vd->vdev_state = VDEV_STATE_CLOSED;
322 vd->vdev_ishole = (ops == &vdev_hole_ops);
323
324 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
325 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
326 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
327 for (int t = 0; t < DTL_TYPES; t++) {
328 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
329 &vd->vdev_dtl_lock);
330 }
331 txg_list_create(&vd->vdev_ms_list,
332 offsetof(struct metaslab, ms_txg_node));
333 txg_list_create(&vd->vdev_dtl_list,
334 offsetof(struct vdev, vdev_dtl_node));
335 vd->vdev_stat.vs_timestamp = gethrtime();
336 vdev_queue_init(vd);
337 vdev_cache_init(vd);
338
339 return (vd);
340}
341
342/*
343 * Allocate a new vdev. The 'alloctype' is used to control whether we are
344 * creating a new vdev or loading an existing one - the behavior is slightly
345 * different for each case.
346 */
347int
348vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
349 int alloctype)
350{
351 vdev_ops_t *ops;
352 char *type;
353 uint64_t guid = 0, islog, nparity;
354 vdev_t *vd;
355
356 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
357
358 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
359 return (EINVAL);
360
361 if ((ops = vdev_getops(type)) == NULL)
362 return (EINVAL);
363
364 /*
365 * If this is a load, get the vdev guid from the nvlist.
366 * Otherwise, vdev_alloc_common() will generate one for us.
367 */
368 if (alloctype == VDEV_ALLOC_LOAD) {
369 uint64_t label_id;
370
371 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
372 label_id != id)
373 return (EINVAL);
374
375 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
376 return (EINVAL);
377 } else if (alloctype == VDEV_ALLOC_SPARE) {
378 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
379 return (EINVAL);
380 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
381 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
382 return (EINVAL);
383 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
384 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
385 return (EINVAL);
386 }
387
388 /*
389 * The first allocated vdev must be of type 'root'.
390 */
391 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
392 return (EINVAL);
393
394 /*
395 * Determine whether we're a log vdev.
396 */
397 islog = 0;
398 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
399 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
400 return (ENOTSUP);
401
402 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
403 return (ENOTSUP);
404
405 /*
406 * Set the nparity property for RAID-Z vdevs.
407 */
408 nparity = -1ULL;
409 if (ops == &vdev_raidz_ops) {
410 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
411 &nparity) == 0) {
412 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
413 return (EINVAL);
414 /*
415 * Previous versions could only support 1 or 2 parity
416 * device.
417 */
418 if (nparity > 1 &&
419 spa_version(spa) < SPA_VERSION_RAIDZ2)
420 return (ENOTSUP);
421 if (nparity > 2 &&
422 spa_version(spa) < SPA_VERSION_RAIDZ3)
423 return (ENOTSUP);
424 } else {
425 /*
426 * We require the parity to be specified for SPAs that
427 * support multiple parity levels.
428 */
429 if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
430 return (EINVAL);
431 /*
432 * Otherwise, we default to 1 parity device for RAID-Z.
433 */
434 nparity = 1;
435 }
436 } else {
437 nparity = 0;
438 }
439 ASSERT(nparity != -1ULL);
440
441 vd = vdev_alloc_common(spa, id, guid, ops);
442
443 vd->vdev_islog = islog;
444 vd->vdev_nparity = nparity;
445
446 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
447 vd->vdev_path = spa_strdup(vd->vdev_path);
448 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
449 vd->vdev_devid = spa_strdup(vd->vdev_devid);
450 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
451 &vd->vdev_physpath) == 0)
452 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
453 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
454 vd->vdev_fru = spa_strdup(vd->vdev_fru);
455
456 /*
457 * Set the whole_disk property. If it's not specified, leave the value
458 * as -1.
459 */
460 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
461 &vd->vdev_wholedisk) != 0)
462 vd->vdev_wholedisk = -1ULL;
463
464 /*
465 * Look for the 'not present' flag. This will only be set if the device
466 * was not present at the time of import.
467 */
468 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
469 &vd->vdev_not_present);
470
471 /*
472 * Get the alignment requirement.
473 */
474 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
475
476 /*
477 * Retrieve the vdev creation time.
478 */
479 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
480 &vd->vdev_crtxg);
481
482 /*
483 * If we're a top-level vdev, try to load the allocation parameters.
484 */
485 if (parent && !parent->vdev_parent &&
486 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
487 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
488 &vd->vdev_ms_array);
489 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
490 &vd->vdev_ms_shift);
491 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
492 &vd->vdev_asize);
493 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
494 &vd->vdev_removing);
495 }
496
497 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
498 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
499 alloctype == VDEV_ALLOC_ADD ||
500 alloctype == VDEV_ALLOC_SPLIT ||
501 alloctype == VDEV_ALLOC_ROOTPOOL);
502 vd->vdev_mg = metaslab_group_create(islog ?
503 spa_log_class(spa) : spa_normal_class(spa), vd);
504 }
505
506 /*
507 * If we're a leaf vdev, try to load the DTL object and other state.
508 */
509 if (vd->vdev_ops->vdev_op_leaf &&
510 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
511 alloctype == VDEV_ALLOC_ROOTPOOL)) {
512 if (alloctype == VDEV_ALLOC_LOAD) {
513 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
514 &vd->vdev_dtl_smo.smo_object);
515 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
516 &vd->vdev_unspare);
517 }
518
519 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
520 uint64_t spare = 0;
521
522 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
523 &spare) == 0 && spare)
524 spa_spare_add(vd);
525 }
526
527 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
528 &vd->vdev_offline);
529
530 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING,
531 &vd->vdev_resilvering);
532
533 /*
534 * When importing a pool, we want to ignore the persistent fault
535 * state, as the diagnosis made on another system may not be
536 * valid in the current context. Local vdevs will
537 * remain in the faulted state.
538 */
539 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
540 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
541 &vd->vdev_faulted);
542 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
543 &vd->vdev_degraded);
544 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
545 &vd->vdev_removed);
546
547 if (vd->vdev_faulted || vd->vdev_degraded) {
548 char *aux;
549
550 vd->vdev_label_aux =
551 VDEV_AUX_ERR_EXCEEDED;
552 if (nvlist_lookup_string(nv,
553 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
554 strcmp(aux, "external") == 0)
555 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
556 }
557 }
558 }
559
560 /*
561 * Add ourselves to the parent's list of children.
562 */
563 vdev_add_child(parent, vd);
564
565 *vdp = vd;
566
567 return (0);
568}
569
570void
571vdev_free(vdev_t *vd)
572{
573 spa_t *spa = vd->vdev_spa;
574
575 /*
576 * vdev_free() implies closing the vdev first. This is simpler than
577 * trying to ensure complicated semantics for all callers.
578 */
579 vdev_close(vd);
580
581 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
582 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
583
584 /*
585 * Free all children.
586 */
587 for (int c = 0; c < vd->vdev_children; c++)
588 vdev_free(vd->vdev_child[c]);
589
590 ASSERT(vd->vdev_child == NULL);
591 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
592
593 /*
594 * Discard allocation state.
595 */
596 if (vd->vdev_mg != NULL) {
597 vdev_metaslab_fini(vd);
598 metaslab_group_destroy(vd->vdev_mg);
599 }
600
601 ASSERT0(vd->vdev_stat.vs_space);
602 ASSERT0(vd->vdev_stat.vs_dspace);
603 ASSERT0(vd->vdev_stat.vs_alloc);
604
605 /*
606 * Remove this vdev from its parent's child list.
607 */
608 vdev_remove_child(vd->vdev_parent, vd);
609
610 ASSERT(vd->vdev_parent == NULL);
611
612 /*
613 * Clean up vdev structure.
614 */
615 vdev_queue_fini(vd);
616 vdev_cache_fini(vd);
617
618 if (vd->vdev_path)
619 spa_strfree(vd->vdev_path);
620 if (vd->vdev_devid)
621 spa_strfree(vd->vdev_devid);
622 if (vd->vdev_physpath)
623 spa_strfree(vd->vdev_physpath);
624 if (vd->vdev_fru)
625 spa_strfree(vd->vdev_fru);
626
627 if (vd->vdev_isspare)
628 spa_spare_remove(vd);
629 if (vd->vdev_isl2cache)
630 spa_l2cache_remove(vd);
631
632 txg_list_destroy(&vd->vdev_ms_list);
633 txg_list_destroy(&vd->vdev_dtl_list);
634
635 mutex_enter(&vd->vdev_dtl_lock);
636 for (int t = 0; t < DTL_TYPES; t++) {
637 space_map_unload(&vd->vdev_dtl[t]);
638 space_map_destroy(&vd->vdev_dtl[t]);
639 }
640 mutex_exit(&vd->vdev_dtl_lock);
641
642 mutex_destroy(&vd->vdev_dtl_lock);
643 mutex_destroy(&vd->vdev_stat_lock);
644 mutex_destroy(&vd->vdev_probe_lock);
645
646 if (vd == spa->spa_root_vdev)
647 spa->spa_root_vdev = NULL;
648
649 kmem_free(vd, sizeof (vdev_t));
650}
651
652/*
653 * Transfer top-level vdev state from svd to tvd.
654 */
655static void
656vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
657{
658 spa_t *spa = svd->vdev_spa;
659 metaslab_t *msp;
660 vdev_t *vd;
661 int t;
662
663 ASSERT(tvd == tvd->vdev_top);
664
665 tvd->vdev_ms_array = svd->vdev_ms_array;
666 tvd->vdev_ms_shift = svd->vdev_ms_shift;
667 tvd->vdev_ms_count = svd->vdev_ms_count;
668
669 svd->vdev_ms_array = 0;
670 svd->vdev_ms_shift = 0;
671 svd->vdev_ms_count = 0;
672
673 if (tvd->vdev_mg)
674 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
675 tvd->vdev_mg = svd->vdev_mg;
676 tvd->vdev_ms = svd->vdev_ms;
677
678 svd->vdev_mg = NULL;
679 svd->vdev_ms = NULL;
680
681 if (tvd->vdev_mg != NULL)
682 tvd->vdev_mg->mg_vd = tvd;
683
684 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
685 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
686 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
687
688 svd->vdev_stat.vs_alloc = 0;
689 svd->vdev_stat.vs_space = 0;
690 svd->vdev_stat.vs_dspace = 0;
691
692 for (t = 0; t < TXG_SIZE; t++) {
693 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
694 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
695 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
696 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
697 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
698 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
699 }
700
701 if (list_link_active(&svd->vdev_config_dirty_node)) {
702 vdev_config_clean(svd);
703 vdev_config_dirty(tvd);
704 }
705
706 if (list_link_active(&svd->vdev_state_dirty_node)) {
707 vdev_state_clean(svd);
708 vdev_state_dirty(tvd);
709 }
710
711 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
712 svd->vdev_deflate_ratio = 0;
713
714 tvd->vdev_islog = svd->vdev_islog;
715 svd->vdev_islog = 0;
716}
717
718static void
719vdev_top_update(vdev_t *tvd, vdev_t *vd)
720{
721 if (vd == NULL)
722 return;
723
724 vd->vdev_top = tvd;
725
726 for (int c = 0; c < vd->vdev_children; c++)
727 vdev_top_update(tvd, vd->vdev_child[c]);
728}
729
730/*
731 * Add a mirror/replacing vdev above an existing vdev.
732 */
733vdev_t *
734vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
735{
736 spa_t *spa = cvd->vdev_spa;
737 vdev_t *pvd = cvd->vdev_parent;
738 vdev_t *mvd;
739
740 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
741
742 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
743
744 mvd->vdev_asize = cvd->vdev_asize;
745 mvd->vdev_min_asize = cvd->vdev_min_asize;
746 mvd->vdev_max_asize = cvd->vdev_max_asize;
747 mvd->vdev_ashift = cvd->vdev_ashift;
748 mvd->vdev_state = cvd->vdev_state;
749 mvd->vdev_crtxg = cvd->vdev_crtxg;
750
751 vdev_remove_child(pvd, cvd);
752 vdev_add_child(pvd, mvd);
753 cvd->vdev_id = mvd->vdev_children;
754 vdev_add_child(mvd, cvd);
755 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
756
757 if (mvd == mvd->vdev_top)
758 vdev_top_transfer(cvd, mvd);
759
760 return (mvd);
761}
762
763/*
764 * Remove a 1-way mirror/replacing vdev from the tree.
765 */
766void
767vdev_remove_parent(vdev_t *cvd)
768{
769 vdev_t *mvd = cvd->vdev_parent;
770 vdev_t *pvd = mvd->vdev_parent;
771
772 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
773
774 ASSERT(mvd->vdev_children == 1);
775 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
776 mvd->vdev_ops == &vdev_replacing_ops ||
777 mvd->vdev_ops == &vdev_spare_ops);
778 cvd->vdev_ashift = mvd->vdev_ashift;
779
780 vdev_remove_child(mvd, cvd);
781 vdev_remove_child(pvd, mvd);
782
783 /*
784 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
785 * Otherwise, we could have detached an offline device, and when we
786 * go to import the pool we'll think we have two top-level vdevs,
787 * instead of a different version of the same top-level vdev.
788 */
789 if (mvd->vdev_top == mvd) {
790 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
791 cvd->vdev_orig_guid = cvd->vdev_guid;
792 cvd->vdev_guid += guid_delta;
793 cvd->vdev_guid_sum += guid_delta;
794 }
795 cvd->vdev_id = mvd->vdev_id;
796 vdev_add_child(pvd, cvd);
797 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
798
799 if (cvd == cvd->vdev_top)
800 vdev_top_transfer(mvd, cvd);
801
802 ASSERT(mvd->vdev_children == 0);
803 vdev_free(mvd);
804}
805
806int
807vdev_metaslab_init(vdev_t *vd, uint64_t txg)
808{
809 spa_t *spa = vd->vdev_spa;
810 objset_t *mos = spa->spa_meta_objset;
811 uint64_t m;
812 uint64_t oldc = vd->vdev_ms_count;
813 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
814 metaslab_t **mspp;
815 int error;
816
817 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
818
819 /*
820 * This vdev is not being allocated from yet or is a hole.
821 */
822 if (vd->vdev_ms_shift == 0)
823 return (0);
824
825 ASSERT(!vd->vdev_ishole);
826
827 /*
828 * Compute the raidz-deflation ratio. Note, we hard-code
829 * in 128k (1 << 17) because it is the current "typical" blocksize.
830 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change,
831 * or we will inconsistently account for existing bp's.
832 */
833 vd->vdev_deflate_ratio = (1 << 17) /
834 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
835
836 ASSERT(oldc <= newc);
837
838 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
839
840 if (oldc != 0) {
841 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
842 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
843 }
844
845 vd->vdev_ms = mspp;
846 vd->vdev_ms_count = newc;
847
848 for (m = oldc; m < newc; m++) {
849 space_map_obj_t smo = { 0, 0, 0 };
850 if (txg == 0) {
851 uint64_t object = 0;
852 error = dmu_read(mos, vd->vdev_ms_array,
853 m * sizeof (uint64_t), sizeof (uint64_t), &object,
854 DMU_READ_PREFETCH);
855 if (error)
856 return (error);
857 if (object != 0) {
858 dmu_buf_t *db;
859 error = dmu_bonus_hold(mos, object, FTAG, &db);
860 if (error)
861 return (error);
862 ASSERT3U(db->db_size, >=, sizeof (smo));
863 bcopy(db->db_data, &smo, sizeof (smo));
864 ASSERT3U(smo.smo_object, ==, object);
865 dmu_buf_rele(db, FTAG);
866 }
867 }
868 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
869 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
870 }
871
872 if (txg == 0)
873 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
874
875 /*
876 * If the vdev is being removed we don't activate
877 * the metaslabs since we want to ensure that no new
878 * allocations are performed on this device.
879 */
880 if (oldc == 0 && !vd->vdev_removing)
881 metaslab_group_activate(vd->vdev_mg);
882
883 if (txg == 0)
884 spa_config_exit(spa, SCL_ALLOC, FTAG);
885
886 return (0);
887}
888
889void
890vdev_metaslab_fini(vdev_t *vd)
891{
892 uint64_t m;
893 uint64_t count = vd->vdev_ms_count;
894
895 if (vd->vdev_ms != NULL) {
896 metaslab_group_passivate(vd->vdev_mg);
897 for (m = 0; m < count; m++)
898 if (vd->vdev_ms[m] != NULL)
899 metaslab_fini(vd->vdev_ms[m]);
900 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
901 vd->vdev_ms = NULL;
902 }
903}
904
905typedef struct vdev_probe_stats {
906 boolean_t vps_readable;
907 boolean_t vps_writeable;
908 int vps_flags;
909} vdev_probe_stats_t;
910
911static void
912vdev_probe_done(zio_t *zio)
913{
914 spa_t *spa = zio->io_spa;
915 vdev_t *vd = zio->io_vd;
916 vdev_probe_stats_t *vps = zio->io_private;
917
918 ASSERT(vd->vdev_probe_zio != NULL);
919
920 if (zio->io_type == ZIO_TYPE_READ) {
921 if (zio->io_error == 0)
922 vps->vps_readable = 1;
923 if (zio->io_error == 0 && spa_writeable(spa)) {
924 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
925 zio->io_offset, zio->io_size, zio->io_data,
926 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
927 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
928 } else {
929 zio_buf_free(zio->io_data, zio->io_size);
930 }
931 } else if (zio->io_type == ZIO_TYPE_WRITE) {
932 if (zio->io_error == 0)
933 vps->vps_writeable = 1;
934 zio_buf_free(zio->io_data, zio->io_size);
935 } else if (zio->io_type == ZIO_TYPE_NULL) {
936 zio_t *pio;
937
938 vd->vdev_cant_read |= !vps->vps_readable;
939 vd->vdev_cant_write |= !vps->vps_writeable;
940
941 if (vdev_readable(vd) &&
942 (vdev_writeable(vd) || !spa_writeable(spa))) {
943 zio->io_error = 0;
944 } else {
945 ASSERT(zio->io_error != 0);
946 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
947 spa, vd, NULL, 0, 0);
948 zio->io_error = ENXIO;
949 }
950
951 mutex_enter(&vd->vdev_probe_lock);
952 ASSERT(vd->vdev_probe_zio == zio);
953 vd->vdev_probe_zio = NULL;
954 mutex_exit(&vd->vdev_probe_lock);
955
956 while ((pio = zio_walk_parents(zio)) != NULL)
957 if (!vdev_accessible(vd, pio))
958 pio->io_error = ENXIO;
959
960 kmem_free(vps, sizeof (*vps));
961 }
962}
963
964/*
965 * Determine whether this device is accessible by reading and writing
966 * to several known locations: the pad regions of each vdev label
967 * but the first (which we leave alone in case it contains a VTOC).
968 */
969zio_t *
970vdev_probe(vdev_t *vd, zio_t *zio)
971{
972 spa_t *spa = vd->vdev_spa;
973 vdev_probe_stats_t *vps = NULL;
974 zio_t *pio;
975
976 ASSERT(vd->vdev_ops->vdev_op_leaf);
977
978 /*
979 * Don't probe the probe.
980 */
981 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
982 return (NULL);
983
984 /*
985 * To prevent 'probe storms' when a device fails, we create
986 * just one probe i/o at a time. All zios that want to probe
987 * this vdev will become parents of the probe io.
988 */
989 mutex_enter(&vd->vdev_probe_lock);
990
991 if ((pio = vd->vdev_probe_zio) == NULL) {
992 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
993
994 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
995 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
996 ZIO_FLAG_TRYHARD;
997
998 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
999 /*
1000 * vdev_cant_read and vdev_cant_write can only
1001 * transition from TRUE to FALSE when we have the
1002 * SCL_ZIO lock as writer; otherwise they can only
1003 * transition from FALSE to TRUE. This ensures that
1004 * any zio looking at these values can assume that
1005 * failures persist for the life of the I/O. That's
1006 * important because when a device has intermittent
1007 * connectivity problems, we want to ensure that
1008 * they're ascribed to the device (ENXIO) and not
1009 * the zio (EIO).
1010 *
1011 * Since we hold SCL_ZIO as writer here, clear both
1012 * values so the probe can reevaluate from first
1013 * principles.
1014 */
1015 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1016 vd->vdev_cant_read = B_FALSE;
1017 vd->vdev_cant_write = B_FALSE;
1018 }
1019
1020 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1021 vdev_probe_done, vps,
1022 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1023
1024 /*
1025 * We can't change the vdev state in this context, so we
1026 * kick off an async task to do it on our behalf.
1027 */
1028 if (zio != NULL) {
1029 vd->vdev_probe_wanted = B_TRUE;
1030 spa_async_request(spa, SPA_ASYNC_PROBE);
1031 }
1032 }
1033
1034 if (zio != NULL)
1035 zio_add_child(zio, pio);
1036
1037 mutex_exit(&vd->vdev_probe_lock);
1038
1039 if (vps == NULL) {
1040 ASSERT(zio != NULL);
1041 return (NULL);
1042 }
1043
1044 for (int l = 1; l < VDEV_LABELS; l++) {
1045 zio_nowait(zio_read_phys(pio, vd,
1046 vdev_label_offset(vd->vdev_psize, l,
1047 offsetof(vdev_label_t, vl_pad2)),
1048 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
1049 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1050 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1051 }
1052
1053 if (zio == NULL)
1054 return (pio);
1055
1056 zio_nowait(pio);
1057 return (NULL);
1058}
1059
1060static void
1061vdev_open_child(void *arg)
1062{
1063 vdev_t *vd = arg;
1064
1065 vd->vdev_open_thread = curthread;
1066 vd->vdev_open_error = vdev_open(vd);
1067 vd->vdev_open_thread = NULL;
1068}
1069
1070boolean_t
1071vdev_uses_zvols(vdev_t *vd)
1072{
1073 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1074 strlen(ZVOL_DIR)) == 0)
1075 return (B_TRUE);
1076 for (int c = 0; c < vd->vdev_children; c++)
1077 if (vdev_uses_zvols(vd->vdev_child[c]))
1078 return (B_TRUE);
1079 return (B_FALSE);
1080}
1081
1082void
1083vdev_open_children(vdev_t *vd)
1084{
1085 taskq_t *tq;
1086 int children = vd->vdev_children;
1087
1088 /*
1089 * in order to handle pools on top of zvols, do the opens
1090 * in a single thread so that the same thread holds the
1091 * spa_namespace_lock
1092 */
1093 if (B_TRUE || vdev_uses_zvols(vd)) {
1094 for (int c = 0; c < children; c++)
1095 vd->vdev_child[c]->vdev_open_error =
1096 vdev_open(vd->vdev_child[c]);
1097 return;
1098 }
1099 tq = taskq_create("vdev_open", children, minclsyspri,
1100 children, children, TASKQ_PREPOPULATE);
1101
1102 for (int c = 0; c < children; c++)
1103 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1104 TQ_SLEEP) != 0);
1105
1106 taskq_destroy(tq);
1107}
1108
1109/*
1110 * Prepare a virtual device for access.
1111 */
1112int
1113vdev_open(vdev_t *vd)
1114{
1115 spa_t *spa = vd->vdev_spa;
1116 int error;
1117 uint64_t osize = 0;
1118 uint64_t max_osize = 0;
1119 uint64_t asize, max_asize, psize;
1120 uint64_t ashift = 0;
1121
1122 ASSERT(vd->vdev_open_thread == curthread ||
1123 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1124 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1125 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1126 vd->vdev_state == VDEV_STATE_OFFLINE);
1127
1128 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1129 vd->vdev_cant_read = B_FALSE;
1130 vd->vdev_cant_write = B_FALSE;
1131 vd->vdev_min_asize = vdev_get_min_asize(vd);
1132
1133 /*
1134 * If this vdev is not removed, check its fault status. If it's
1135 * faulted, bail out of the open.
1136 */
1137 if (!vd->vdev_removed && vd->vdev_faulted) {
1138 ASSERT(vd->vdev_children == 0);
1139 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1140 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1141 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1142 vd->vdev_label_aux);
1143 return (ENXIO);
1144 } else if (vd->vdev_offline) {
1145 ASSERT(vd->vdev_children == 0);
1146 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1147 return (ENXIO);
1148 }
1149
1150 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
1151
1152 /*
1153 * Reset the vdev_reopening flag so that we actually close
1154 * the vdev on error.
1155 */
1156 vd->vdev_reopening = B_FALSE;
1157 if (zio_injection_enabled && error == 0)
1158 error = zio_handle_device_injection(vd, NULL, ENXIO);
1159
1160 if (error) {
1161 if (vd->vdev_removed &&
1162 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1163 vd->vdev_removed = B_FALSE;
1164
1165 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1166 vd->vdev_stat.vs_aux);
1167 return (error);
1168 }
1169
1170 vd->vdev_removed = B_FALSE;
1171
1172 /*
1173 * Recheck the faulted flag now that we have confirmed that
1174 * the vdev is accessible. If we're faulted, bail.
1175 */
1176 if (vd->vdev_faulted) {
1177 ASSERT(vd->vdev_children == 0);
1178 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1179 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1180 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1181 vd->vdev_label_aux);
1182 return (ENXIO);
1183 }
1184
1185 if (vd->vdev_degraded) {
1186 ASSERT(vd->vdev_children == 0);
1187 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1188 VDEV_AUX_ERR_EXCEEDED);
1189 } else {
1190 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1191 }
1192
1193 /*
1194 * For hole or missing vdevs we just return success.
1195 */
1196 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1197 return (0);
1198
1199 if (vd->vdev_ops->vdev_op_leaf) {
1200 vd->vdev_notrim = B_FALSE;
1201 trim_map_create(vd);
1202 }
1203
1198 for (int c = 0; c < vd->vdev_children; c++) {
1199 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1200 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1201 VDEV_AUX_NONE);
1202 break;
1203 }
1204 }
1205
1206 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1207 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
1208
1209 if (vd->vdev_children == 0) {
1210 if (osize < SPA_MINDEVSIZE) {
1211 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1212 VDEV_AUX_TOO_SMALL);
1213 return (EOVERFLOW);
1214 }
1215 psize = osize;
1216 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1217 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1218 VDEV_LABEL_END_SIZE);
1219 } else {
1220 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1221 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1222 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1223 VDEV_AUX_TOO_SMALL);
1224 return (EOVERFLOW);
1225 }
1226 psize = 0;
1227 asize = osize;
1228 max_asize = max_osize;
1229 }
1230
1231 vd->vdev_psize = psize;
1232
1233 /*
1234 * Make sure the allocatable size hasn't shrunk.
1235 */
1236 if (asize < vd->vdev_min_asize) {
1237 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1238 VDEV_AUX_BAD_LABEL);
1239 return (EINVAL);
1240 }
1241
1242 if (vd->vdev_asize == 0) {
1243 /*
1244 * This is the first-ever open, so use the computed values.
1245 * For testing purposes, a higher ashift can be requested.
1246 */
1247 vd->vdev_asize = asize;
1248 vd->vdev_max_asize = max_asize;
1249 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
1250 } else {
1251 /*
1252 * Make sure the alignment requirement hasn't increased.
1253 */
1254 if (ashift > vd->vdev_top->vdev_ashift) {
1255 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1256 VDEV_AUX_BAD_LABEL);
1257 return (EINVAL);
1258 }
1259 vd->vdev_max_asize = max_asize;
1260 }
1261
1262 /*
1263 * If all children are healthy and the asize has increased,
1264 * then we've experienced dynamic LUN growth. If automatic
1265 * expansion is enabled then use the additional space.
1266 */
1267 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
1268 (vd->vdev_expanding || spa->spa_autoexpand))
1269 vd->vdev_asize = asize;
1270
1271 vdev_set_min_asize(vd);
1272
1273 /*
1274 * Ensure we can issue some IO before declaring the
1275 * vdev open for business.
1276 */
1277 if (vd->vdev_ops->vdev_op_leaf &&
1278 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1279 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1280 VDEV_AUX_ERR_EXCEEDED);
1281 return (error);
1282 }
1283
1284 /*
1285 * If a leaf vdev has a DTL, and seems healthy, then kick off a
1286 * resilver. But don't do this if we are doing a reopen for a scrub,
1287 * since this would just restart the scrub we are already doing.
1288 */
1289 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1290 vdev_resilver_needed(vd, NULL, NULL))
1291 spa_async_request(spa, SPA_ASYNC_RESILVER);
1292
1293 return (0);
1294}
1295
1296/*
1297 * Called once the vdevs are all opened, this routine validates the label
1298 * contents. This needs to be done before vdev_load() so that we don't
1299 * inadvertently do repair I/Os to the wrong device.
1300 *
1301 * If 'strict' is false ignore the spa guid check. This is necessary because
1302 * if the machine crashed during a re-guid the new guid might have been written
1303 * to all of the vdev labels, but not the cached config. The strict check
1304 * will be performed when the pool is opened again using the mos config.
1305 *
1306 * This function will only return failure if one of the vdevs indicates that it
1307 * has since been destroyed or exported. This is only possible if
1308 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1309 * will be updated but the function will return 0.
1310 */
1311int
1312vdev_validate(vdev_t *vd, boolean_t strict)
1313{
1314 spa_t *spa = vd->vdev_spa;
1315 nvlist_t *label;
1316 uint64_t guid = 0, top_guid;
1317 uint64_t state;
1318
1319 for (int c = 0; c < vd->vdev_children; c++)
1320 if (vdev_validate(vd->vdev_child[c], strict) != 0)
1321 return (EBADF);
1322
1323 /*
1324 * If the device has already failed, or was marked offline, don't do
1325 * any further validation. Otherwise, label I/O will fail and we will
1326 * overwrite the previous state.
1327 */
1328 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1329 uint64_t aux_guid = 0;
1330 nvlist_t *nvl;
1331 uint64_t txg = strict ? spa->spa_config_txg : -1ULL;
1332
1333 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1334 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1335 VDEV_AUX_BAD_LABEL);
1336 return (0);
1337 }
1338
1339 /*
1340 * Determine if this vdev has been split off into another
1341 * pool. If so, then refuse to open it.
1342 */
1343 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1344 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1345 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1346 VDEV_AUX_SPLIT_POOL);
1347 nvlist_free(label);
1348 return (0);
1349 }
1350
1351 if (strict && (nvlist_lookup_uint64(label,
1352 ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
1353 guid != spa_guid(spa))) {
1354 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1355 VDEV_AUX_CORRUPT_DATA);
1356 nvlist_free(label);
1357 return (0);
1358 }
1359
1360 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1361 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1362 &aux_guid) != 0)
1363 aux_guid = 0;
1364
1365 /*
1366 * If this vdev just became a top-level vdev because its
1367 * sibling was detached, it will have adopted the parent's
1368 * vdev guid -- but the label may or may not be on disk yet.
1369 * Fortunately, either version of the label will have the
1370 * same top guid, so if we're a top-level vdev, we can
1371 * safely compare to that instead.
1372 *
1373 * If we split this vdev off instead, then we also check the
1374 * original pool's guid. We don't want to consider the vdev
1375 * corrupt if it is partway through a split operation.
1376 */
1377 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
1378 &guid) != 0 ||
1379 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
1380 &top_guid) != 0 ||
1381 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
1382 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
1383 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1384 VDEV_AUX_CORRUPT_DATA);
1385 nvlist_free(label);
1386 return (0);
1387 }
1388
1389 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1390 &state) != 0) {
1391 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1392 VDEV_AUX_CORRUPT_DATA);
1393 nvlist_free(label);
1394 return (0);
1395 }
1396
1397 nvlist_free(label);
1398
1399 /*
1400 * If this is a verbatim import, no need to check the
1401 * state of the pool.
1402 */
1403 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
1404 spa_load_state(spa) == SPA_LOAD_OPEN &&
1405 state != POOL_STATE_ACTIVE)
1406 return (EBADF);
1407
1408 /*
1409 * If we were able to open and validate a vdev that was
1410 * previously marked permanently unavailable, clear that state
1411 * now.
1412 */
1413 if (vd->vdev_not_present)
1414 vd->vdev_not_present = 0;
1415 }
1416
1417 return (0);
1418}
1419
1420/*
1421 * Close a virtual device.
1422 */
1423void
1424vdev_close(vdev_t *vd)
1425{
1426 spa_t *spa = vd->vdev_spa;
1427 vdev_t *pvd = vd->vdev_parent;
1428
1429 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1430
1431 /*
1432 * If our parent is reopening, then we are as well, unless we are
1433 * going offline.
1434 */
1435 if (pvd != NULL && pvd->vdev_reopening)
1436 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
1437
1438 vd->vdev_ops->vdev_op_close(vd);
1439
1440 vdev_cache_purge(vd);
1441
1204 for (int c = 0; c < vd->vdev_children; c++) {
1205 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1206 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1207 VDEV_AUX_NONE);
1208 break;
1209 }
1210 }
1211
1212 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1213 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
1214
1215 if (vd->vdev_children == 0) {
1216 if (osize < SPA_MINDEVSIZE) {
1217 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1218 VDEV_AUX_TOO_SMALL);
1219 return (EOVERFLOW);
1220 }
1221 psize = osize;
1222 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1223 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1224 VDEV_LABEL_END_SIZE);
1225 } else {
1226 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1227 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1228 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1229 VDEV_AUX_TOO_SMALL);
1230 return (EOVERFLOW);
1231 }
1232 psize = 0;
1233 asize = osize;
1234 max_asize = max_osize;
1235 }
1236
1237 vd->vdev_psize = psize;
1238
1239 /*
1240 * Make sure the allocatable size hasn't shrunk.
1241 */
1242 if (asize < vd->vdev_min_asize) {
1243 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1244 VDEV_AUX_BAD_LABEL);
1245 return (EINVAL);
1246 }
1247
1248 if (vd->vdev_asize == 0) {
1249 /*
1250 * This is the first-ever open, so use the computed values.
1251 * For testing purposes, a higher ashift can be requested.
1252 */
1253 vd->vdev_asize = asize;
1254 vd->vdev_max_asize = max_asize;
1255 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
1256 } else {
1257 /*
1258 * Make sure the alignment requirement hasn't increased.
1259 */
1260 if (ashift > vd->vdev_top->vdev_ashift) {
1261 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1262 VDEV_AUX_BAD_LABEL);
1263 return (EINVAL);
1264 }
1265 vd->vdev_max_asize = max_asize;
1266 }
1267
1268 /*
1269 * If all children are healthy and the asize has increased,
1270 * then we've experienced dynamic LUN growth. If automatic
1271 * expansion is enabled then use the additional space.
1272 */
1273 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
1274 (vd->vdev_expanding || spa->spa_autoexpand))
1275 vd->vdev_asize = asize;
1276
1277 vdev_set_min_asize(vd);
1278
1279 /*
1280 * Ensure we can issue some IO before declaring the
1281 * vdev open for business.
1282 */
1283 if (vd->vdev_ops->vdev_op_leaf &&
1284 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1285 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1286 VDEV_AUX_ERR_EXCEEDED);
1287 return (error);
1288 }
1289
1290 /*
1291 * If a leaf vdev has a DTL, and seems healthy, then kick off a
1292 * resilver. But don't do this if we are doing a reopen for a scrub,
1293 * since this would just restart the scrub we are already doing.
1294 */
1295 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1296 vdev_resilver_needed(vd, NULL, NULL))
1297 spa_async_request(spa, SPA_ASYNC_RESILVER);
1298
1299 return (0);
1300}
1301
1302/*
1303 * Called once the vdevs are all opened, this routine validates the label
1304 * contents. This needs to be done before vdev_load() so that we don't
1305 * inadvertently do repair I/Os to the wrong device.
1306 *
1307 * If 'strict' is false ignore the spa guid check. This is necessary because
1308 * if the machine crashed during a re-guid the new guid might have been written
1309 * to all of the vdev labels, but not the cached config. The strict check
1310 * will be performed when the pool is opened again using the mos config.
1311 *
1312 * This function will only return failure if one of the vdevs indicates that it
1313 * has since been destroyed or exported. This is only possible if
1314 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1315 * will be updated but the function will return 0.
1316 */
1317int
1318vdev_validate(vdev_t *vd, boolean_t strict)
1319{
1320 spa_t *spa = vd->vdev_spa;
1321 nvlist_t *label;
1322 uint64_t guid = 0, top_guid;
1323 uint64_t state;
1324
1325 for (int c = 0; c < vd->vdev_children; c++)
1326 if (vdev_validate(vd->vdev_child[c], strict) != 0)
1327 return (EBADF);
1328
1329 /*
1330 * If the device has already failed, or was marked offline, don't do
1331 * any further validation. Otherwise, label I/O will fail and we will
1332 * overwrite the previous state.
1333 */
1334 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1335 uint64_t aux_guid = 0;
1336 nvlist_t *nvl;
1337 uint64_t txg = strict ? spa->spa_config_txg : -1ULL;
1338
1339 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1340 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1341 VDEV_AUX_BAD_LABEL);
1342 return (0);
1343 }
1344
1345 /*
1346 * Determine if this vdev has been split off into another
1347 * pool. If so, then refuse to open it.
1348 */
1349 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1350 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1351 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1352 VDEV_AUX_SPLIT_POOL);
1353 nvlist_free(label);
1354 return (0);
1355 }
1356
1357 if (strict && (nvlist_lookup_uint64(label,
1358 ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
1359 guid != spa_guid(spa))) {
1360 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1361 VDEV_AUX_CORRUPT_DATA);
1362 nvlist_free(label);
1363 return (0);
1364 }
1365
1366 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1367 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1368 &aux_guid) != 0)
1369 aux_guid = 0;
1370
1371 /*
1372 * If this vdev just became a top-level vdev because its
1373 * sibling was detached, it will have adopted the parent's
1374 * vdev guid -- but the label may or may not be on disk yet.
1375 * Fortunately, either version of the label will have the
1376 * same top guid, so if we're a top-level vdev, we can
1377 * safely compare to that instead.
1378 *
1379 * If we split this vdev off instead, then we also check the
1380 * original pool's guid. We don't want to consider the vdev
1381 * corrupt if it is partway through a split operation.
1382 */
1383 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
1384 &guid) != 0 ||
1385 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
1386 &top_guid) != 0 ||
1387 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
1388 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
1389 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1390 VDEV_AUX_CORRUPT_DATA);
1391 nvlist_free(label);
1392 return (0);
1393 }
1394
1395 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1396 &state) != 0) {
1397 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1398 VDEV_AUX_CORRUPT_DATA);
1399 nvlist_free(label);
1400 return (0);
1401 }
1402
1403 nvlist_free(label);
1404
1405 /*
1406 * If this is a verbatim import, no need to check the
1407 * state of the pool.
1408 */
1409 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
1410 spa_load_state(spa) == SPA_LOAD_OPEN &&
1411 state != POOL_STATE_ACTIVE)
1412 return (EBADF);
1413
1414 /*
1415 * If we were able to open and validate a vdev that was
1416 * previously marked permanently unavailable, clear that state
1417 * now.
1418 */
1419 if (vd->vdev_not_present)
1420 vd->vdev_not_present = 0;
1421 }
1422
1423 return (0);
1424}
1425
1426/*
1427 * Close a virtual device.
1428 */
1429void
1430vdev_close(vdev_t *vd)
1431{
1432 spa_t *spa = vd->vdev_spa;
1433 vdev_t *pvd = vd->vdev_parent;
1434
1435 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1436
1437 /*
1438 * If our parent is reopening, then we are as well, unless we are
1439 * going offline.
1440 */
1441 if (pvd != NULL && pvd->vdev_reopening)
1442 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
1443
1444 vd->vdev_ops->vdev_op_close(vd);
1445
1446 vdev_cache_purge(vd);
1447
1448 if (vd->vdev_ops->vdev_op_leaf)
1449 trim_map_destroy(vd);
1450
1442 /*
1443 * We record the previous state before we close it, so that if we are
1444 * doing a reopen(), we don't generate FMA ereports if we notice that
1445 * it's still faulted.
1446 */
1447 vd->vdev_prevstate = vd->vdev_state;
1448
1449 if (vd->vdev_offline)
1450 vd->vdev_state = VDEV_STATE_OFFLINE;
1451 else
1452 vd->vdev_state = VDEV_STATE_CLOSED;
1453 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1454}
1455
1456void
1457vdev_hold(vdev_t *vd)
1458{
1459 spa_t *spa = vd->vdev_spa;
1460
1461 ASSERT(spa_is_root(spa));
1462 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1463 return;
1464
1465 for (int c = 0; c < vd->vdev_children; c++)
1466 vdev_hold(vd->vdev_child[c]);
1467
1468 if (vd->vdev_ops->vdev_op_leaf)
1469 vd->vdev_ops->vdev_op_hold(vd);
1470}
1471
1472void
1473vdev_rele(vdev_t *vd)
1474{
1475 spa_t *spa = vd->vdev_spa;
1476
1477 ASSERT(spa_is_root(spa));
1478 for (int c = 0; c < vd->vdev_children; c++)
1479 vdev_rele(vd->vdev_child[c]);
1480
1481 if (vd->vdev_ops->vdev_op_leaf)
1482 vd->vdev_ops->vdev_op_rele(vd);
1483}
1484
1485/*
1486 * Reopen all interior vdevs and any unopened leaves. We don't actually
1487 * reopen leaf vdevs which had previously been opened as they might deadlock
1488 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
1489 * If the leaf has never been opened then open it, as usual.
1490 */
1491void
1492vdev_reopen(vdev_t *vd)
1493{
1494 spa_t *spa = vd->vdev_spa;
1495
1496 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1497
1498 /* set the reopening flag unless we're taking the vdev offline */
1499 vd->vdev_reopening = !vd->vdev_offline;
1500 vdev_close(vd);
1501 (void) vdev_open(vd);
1502
1503 /*
1504 * Call vdev_validate() here to make sure we have the same device.
1505 * Otherwise, a device with an invalid label could be successfully
1506 * opened in response to vdev_reopen().
1507 */
1508 if (vd->vdev_aux) {
1509 (void) vdev_validate_aux(vd);
1510 if (vdev_readable(vd) && vdev_writeable(vd) &&
1511 vd->vdev_aux == &spa->spa_l2cache &&
1512 !l2arc_vdev_present(vd))
1513 l2arc_add_vdev(spa, vd);
1514 } else {
1515 (void) vdev_validate(vd, spa_last_synced_txg(spa));
1516 }
1517
1518 /*
1519 * Reassess parent vdev's health.
1520 */
1521 vdev_propagate_state(vd);
1522}
1523
1524int
1525vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1526{
1527 int error;
1528
1529 /*
1530 * Normally, partial opens (e.g. of a mirror) are allowed.
1531 * For a create, however, we want to fail the request if
1532 * there are any components we can't open.
1533 */
1534 error = vdev_open(vd);
1535
1536 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1537 vdev_close(vd);
1538 return (error ? error : ENXIO);
1539 }
1540
1541 /*
1542 * Recursively initialize all labels.
1543 */
1544 if ((error = vdev_label_init(vd, txg, isreplacing ?
1545 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1546 vdev_close(vd);
1547 return (error);
1548 }
1549
1550 return (0);
1551}
1552
1553void
1554vdev_metaslab_set_size(vdev_t *vd)
1555{
1556 /*
1557 * Aim for roughly 200 metaslabs per vdev.
1558 */
1559 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
1560 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1561}
1562
1563void
1564vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1565{
1566 ASSERT(vd == vd->vdev_top);
1567 ASSERT(!vd->vdev_ishole);
1568 ASSERT(ISP2(flags));
1569 ASSERT(spa_writeable(vd->vdev_spa));
1570
1571 if (flags & VDD_METASLAB)
1572 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1573
1574 if (flags & VDD_DTL)
1575 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1576
1577 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1578}
1579
1580/*
1581 * DTLs.
1582 *
1583 * A vdev's DTL (dirty time log) is the set of transaction groups for which
1584 * the vdev has less than perfect replication. There are four kinds of DTL:
1585 *
1586 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
1587 *
1588 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
1589 *
1590 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
1591 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
1592 * txgs that was scrubbed.
1593 *
1594 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
1595 * persistent errors or just some device being offline.
1596 * Unlike the other three, the DTL_OUTAGE map is not generally
1597 * maintained; it's only computed when needed, typically to
1598 * determine whether a device can be detached.
1599 *
1600 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
1601 * either has the data or it doesn't.
1602 *
1603 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
1604 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
1605 * if any child is less than fully replicated, then so is its parent.
1606 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
1607 * comprising only those txgs which appear in 'maxfaults' or more children;
1608 * those are the txgs we don't have enough replication to read. For example,
1609 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
1610 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
1611 * two child DTL_MISSING maps.
1612 *
1613 * It should be clear from the above that to compute the DTLs and outage maps
1614 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
1615 * Therefore, that is all we keep on disk. When loading the pool, or after
1616 * a configuration change, we generate all other DTLs from first principles.
1617 */
1618void
1619vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1620{
1621 space_map_t *sm = &vd->vdev_dtl[t];
1622
1623 ASSERT(t < DTL_TYPES);
1624 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1625 ASSERT(spa_writeable(vd->vdev_spa));
1626
1627 mutex_enter(sm->sm_lock);
1628 if (!space_map_contains(sm, txg, size))
1629 space_map_add(sm, txg, size);
1630 mutex_exit(sm->sm_lock);
1631}
1632
1633boolean_t
1634vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1635{
1636 space_map_t *sm = &vd->vdev_dtl[t];
1637 boolean_t dirty = B_FALSE;
1638
1639 ASSERT(t < DTL_TYPES);
1640 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1641
1642 mutex_enter(sm->sm_lock);
1643 if (sm->sm_space != 0)
1644 dirty = space_map_contains(sm, txg, size);
1645 mutex_exit(sm->sm_lock);
1646
1647 return (dirty);
1648}
1649
1650boolean_t
1651vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
1652{
1653 space_map_t *sm = &vd->vdev_dtl[t];
1654 boolean_t empty;
1655
1656 mutex_enter(sm->sm_lock);
1657 empty = (sm->sm_space == 0);
1658 mutex_exit(sm->sm_lock);
1659
1660 return (empty);
1661}
1662
1663/*
1664 * Reassess DTLs after a config change or scrub completion.
1665 */
1666void
1667vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1668{
1669 spa_t *spa = vd->vdev_spa;
1670 avl_tree_t reftree;
1671 int minref;
1672
1673 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1674
1675 for (int c = 0; c < vd->vdev_children; c++)
1676 vdev_dtl_reassess(vd->vdev_child[c], txg,
1677 scrub_txg, scrub_done);
1678
1679 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
1680 return;
1681
1682 if (vd->vdev_ops->vdev_op_leaf) {
1683 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
1684
1685 mutex_enter(&vd->vdev_dtl_lock);
1686 if (scrub_txg != 0 &&
1687 (spa->spa_scrub_started ||
1688 (scn && scn->scn_phys.scn_errors == 0))) {
1689 /*
1690 * We completed a scrub up to scrub_txg. If we
1691 * did it without rebooting, then the scrub dtl
1692 * will be valid, so excise the old region and
1693 * fold in the scrub dtl. Otherwise, leave the
1694 * dtl as-is if there was an error.
1695 *
1696 * There's little trick here: to excise the beginning
1697 * of the DTL_MISSING map, we put it into a reference
1698 * tree and then add a segment with refcnt -1 that
1699 * covers the range [0, scrub_txg). This means
1700 * that each txg in that range has refcnt -1 or 0.
1701 * We then add DTL_SCRUB with a refcnt of 2, so that
1702 * entries in the range [0, scrub_txg) will have a
1703 * positive refcnt -- either 1 or 2. We then convert
1704 * the reference tree into the new DTL_MISSING map.
1705 */
1706 space_map_ref_create(&reftree);
1707 space_map_ref_add_map(&reftree,
1708 &vd->vdev_dtl[DTL_MISSING], 1);
1709 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1);
1710 space_map_ref_add_map(&reftree,
1711 &vd->vdev_dtl[DTL_SCRUB], 2);
1712 space_map_ref_generate_map(&reftree,
1713 &vd->vdev_dtl[DTL_MISSING], 1);
1714 space_map_ref_destroy(&reftree);
1715 }
1716 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
1717 space_map_walk(&vd->vdev_dtl[DTL_MISSING],
1718 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]);
1719 if (scrub_done)
1720 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
1721 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
1722 if (!vdev_readable(vd))
1723 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
1724 else
1725 space_map_walk(&vd->vdev_dtl[DTL_MISSING],
1726 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]);
1727 mutex_exit(&vd->vdev_dtl_lock);
1728
1729 if (txg != 0)
1730 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1731 return;
1732 }
1733
1734 mutex_enter(&vd->vdev_dtl_lock);
1735 for (int t = 0; t < DTL_TYPES; t++) {
1736 /* account for child's outage in parent's missing map */
1737 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
1738 if (t == DTL_SCRUB)
1739 continue; /* leaf vdevs only */
1740 if (t == DTL_PARTIAL)
1741 minref = 1; /* i.e. non-zero */
1742 else if (vd->vdev_nparity != 0)
1743 minref = vd->vdev_nparity + 1; /* RAID-Z */
1744 else
1745 minref = vd->vdev_children; /* any kind of mirror */
1746 space_map_ref_create(&reftree);
1747 for (int c = 0; c < vd->vdev_children; c++) {
1748 vdev_t *cvd = vd->vdev_child[c];
1749 mutex_enter(&cvd->vdev_dtl_lock);
1750 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1);
1751 mutex_exit(&cvd->vdev_dtl_lock);
1752 }
1753 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref);
1754 space_map_ref_destroy(&reftree);
1755 }
1756 mutex_exit(&vd->vdev_dtl_lock);
1757}
1758
1759static int
1760vdev_dtl_load(vdev_t *vd)
1761{
1762 spa_t *spa = vd->vdev_spa;
1763 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1764 objset_t *mos = spa->spa_meta_objset;
1765 dmu_buf_t *db;
1766 int error;
1767
1768 ASSERT(vd->vdev_children == 0);
1769
1770 if (smo->smo_object == 0)
1771 return (0);
1772
1773 ASSERT(!vd->vdev_ishole);
1774
1775 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
1776 return (error);
1777
1778 ASSERT3U(db->db_size, >=, sizeof (*smo));
1779 bcopy(db->db_data, smo, sizeof (*smo));
1780 dmu_buf_rele(db, FTAG);
1781
1782 mutex_enter(&vd->vdev_dtl_lock);
1783 error = space_map_load(&vd->vdev_dtl[DTL_MISSING],
1784 NULL, SM_ALLOC, smo, mos);
1785 mutex_exit(&vd->vdev_dtl_lock);
1786
1787 return (error);
1788}
1789
1790void
1791vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1792{
1793 spa_t *spa = vd->vdev_spa;
1794 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1795 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
1796 objset_t *mos = spa->spa_meta_objset;
1797 space_map_t smsync;
1798 kmutex_t smlock;
1799 dmu_buf_t *db;
1800 dmu_tx_t *tx;
1801
1802 ASSERT(!vd->vdev_ishole);
1803
1804 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1805
1806 if (vd->vdev_detached) {
1807 if (smo->smo_object != 0) {
1808 int err = dmu_object_free(mos, smo->smo_object, tx);
1809 ASSERT0(err);
1810 smo->smo_object = 0;
1811 }
1812 dmu_tx_commit(tx);
1813 return;
1814 }
1815
1816 if (smo->smo_object == 0) {
1817 ASSERT(smo->smo_objsize == 0);
1818 ASSERT(smo->smo_alloc == 0);
1819 smo->smo_object = dmu_object_alloc(mos,
1820 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1821 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1822 ASSERT(smo->smo_object != 0);
1823 vdev_config_dirty(vd->vdev_top);
1824 }
1825
1826 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1827
1828 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1829 &smlock);
1830
1831 mutex_enter(&smlock);
1832
1833 mutex_enter(&vd->vdev_dtl_lock);
1834 space_map_walk(sm, space_map_add, &smsync);
1835 mutex_exit(&vd->vdev_dtl_lock);
1836
1837 space_map_truncate(smo, mos, tx);
1838 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
1839
1840 space_map_destroy(&smsync);
1841
1842 mutex_exit(&smlock);
1843 mutex_destroy(&smlock);
1844
1845 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1846 dmu_buf_will_dirty(db, tx);
1847 ASSERT3U(db->db_size, >=, sizeof (*smo));
1848 bcopy(smo, db->db_data, sizeof (*smo));
1849 dmu_buf_rele(db, FTAG);
1850
1851 dmu_tx_commit(tx);
1852}
1853
1854/*
1855 * Determine whether the specified vdev can be offlined/detached/removed
1856 * without losing data.
1857 */
1858boolean_t
1859vdev_dtl_required(vdev_t *vd)
1860{
1861 spa_t *spa = vd->vdev_spa;
1862 vdev_t *tvd = vd->vdev_top;
1863 uint8_t cant_read = vd->vdev_cant_read;
1864 boolean_t required;
1865
1866 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1867
1868 if (vd == spa->spa_root_vdev || vd == tvd)
1869 return (B_TRUE);
1870
1871 /*
1872 * Temporarily mark the device as unreadable, and then determine
1873 * whether this results in any DTL outages in the top-level vdev.
1874 * If not, we can safely offline/detach/remove the device.
1875 */
1876 vd->vdev_cant_read = B_TRUE;
1877 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
1878 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
1879 vd->vdev_cant_read = cant_read;
1880 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
1881
1882 if (!required && zio_injection_enabled)
1883 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
1884
1885 return (required);
1886}
1887
1888/*
1889 * Determine if resilver is needed, and if so the txg range.
1890 */
1891boolean_t
1892vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
1893{
1894 boolean_t needed = B_FALSE;
1895 uint64_t thismin = UINT64_MAX;
1896 uint64_t thismax = 0;
1897
1898 if (vd->vdev_children == 0) {
1899 mutex_enter(&vd->vdev_dtl_lock);
1900 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 &&
1901 vdev_writeable(vd)) {
1902 space_seg_t *ss;
1903
1904 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root);
1905 thismin = ss->ss_start - 1;
1906 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root);
1907 thismax = ss->ss_end;
1908 needed = B_TRUE;
1909 }
1910 mutex_exit(&vd->vdev_dtl_lock);
1911 } else {
1912 for (int c = 0; c < vd->vdev_children; c++) {
1913 vdev_t *cvd = vd->vdev_child[c];
1914 uint64_t cmin, cmax;
1915
1916 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
1917 thismin = MIN(thismin, cmin);
1918 thismax = MAX(thismax, cmax);
1919 needed = B_TRUE;
1920 }
1921 }
1922 }
1923
1924 if (needed && minp) {
1925 *minp = thismin;
1926 *maxp = thismax;
1927 }
1928 return (needed);
1929}
1930
1931void
1932vdev_load(vdev_t *vd)
1933{
1934 /*
1935 * Recursively load all children.
1936 */
1937 for (int c = 0; c < vd->vdev_children; c++)
1938 vdev_load(vd->vdev_child[c]);
1939
1940 /*
1941 * If this is a top-level vdev, initialize its metaslabs.
1942 */
1943 if (vd == vd->vdev_top && !vd->vdev_ishole &&
1944 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
1945 vdev_metaslab_init(vd, 0) != 0))
1946 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1947 VDEV_AUX_CORRUPT_DATA);
1948
1949 /*
1950 * If this is a leaf vdev, load its DTL.
1951 */
1952 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
1953 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1954 VDEV_AUX_CORRUPT_DATA);
1955}
1956
1957/*
1958 * The special vdev case is used for hot spares and l2cache devices. Its
1959 * sole purpose it to set the vdev state for the associated vdev. To do this,
1960 * we make sure that we can open the underlying device, then try to read the
1961 * label, and make sure that the label is sane and that it hasn't been
1962 * repurposed to another pool.
1963 */
1964int
1965vdev_validate_aux(vdev_t *vd)
1966{
1967 nvlist_t *label;
1968 uint64_t guid, version;
1969 uint64_t state;
1970
1971 if (!vdev_readable(vd))
1972 return (0);
1973
1974 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
1975 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1976 VDEV_AUX_CORRUPT_DATA);
1977 return (-1);
1978 }
1979
1980 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
1981 !SPA_VERSION_IS_SUPPORTED(version) ||
1982 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
1983 guid != vd->vdev_guid ||
1984 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
1985 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1986 VDEV_AUX_CORRUPT_DATA);
1987 nvlist_free(label);
1988 return (-1);
1989 }
1990
1991 /*
1992 * We don't actually check the pool state here. If it's in fact in
1993 * use by another pool, we update this fact on the fly when requested.
1994 */
1995 nvlist_free(label);
1996 return (0);
1997}
1998
1999void
2000vdev_remove(vdev_t *vd, uint64_t txg)
2001{
2002 spa_t *spa = vd->vdev_spa;
2003 objset_t *mos = spa->spa_meta_objset;
2004 dmu_tx_t *tx;
2005
2006 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2007
2008 if (vd->vdev_dtl_smo.smo_object) {
2009 ASSERT0(vd->vdev_dtl_smo.smo_alloc);
2010 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
2011 vd->vdev_dtl_smo.smo_object = 0;
2012 }
2013
2014 if (vd->vdev_ms != NULL) {
2015 for (int m = 0; m < vd->vdev_ms_count; m++) {
2016 metaslab_t *msp = vd->vdev_ms[m];
2017
2018 if (msp == NULL || msp->ms_smo.smo_object == 0)
2019 continue;
2020
2021 ASSERT0(msp->ms_smo.smo_alloc);
2022 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
2023 msp->ms_smo.smo_object = 0;
2024 }
2025 }
2026
2027 if (vd->vdev_ms_array) {
2028 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2029 vd->vdev_ms_array = 0;
2030 vd->vdev_ms_shift = 0;
2031 }
2032 dmu_tx_commit(tx);
2033}
2034
2035void
2036vdev_sync_done(vdev_t *vd, uint64_t txg)
2037{
2038 metaslab_t *msp;
2039 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2040
2041 ASSERT(!vd->vdev_ishole);
2042
2043 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
2044 metaslab_sync_done(msp, txg);
2045
2046 if (reassess)
2047 metaslab_sync_reassess(vd->vdev_mg);
2048}
2049
2050void
2051vdev_sync(vdev_t *vd, uint64_t txg)
2052{
2053 spa_t *spa = vd->vdev_spa;
2054 vdev_t *lvd;
2055 metaslab_t *msp;
2056 dmu_tx_t *tx;
2057
2058 ASSERT(!vd->vdev_ishole);
2059
2060 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
2061 ASSERT(vd == vd->vdev_top);
2062 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2063 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
2064 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
2065 ASSERT(vd->vdev_ms_array != 0);
2066 vdev_config_dirty(vd);
2067 dmu_tx_commit(tx);
2068 }
2069
2070 /*
2071 * Remove the metadata associated with this vdev once it's empty.
2072 */
2073 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
2074 vdev_remove(vd, txg);
2075
2076 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
2077 metaslab_sync(msp, txg);
2078 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
2079 }
2080
2081 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
2082 vdev_dtl_sync(lvd, txg);
2083
2084 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
2085}
2086
2087uint64_t
2088vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
2089{
2090 return (vd->vdev_ops->vdev_op_asize(vd, psize));
2091}
2092
2093/*
2094 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
2095 * not be opened, and no I/O is attempted.
2096 */
2097int
2098vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
2099{
2100 vdev_t *vd, *tvd;
2101
2102 spa_vdev_state_enter(spa, SCL_NONE);
2103
2104 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2105 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2106
2107 if (!vd->vdev_ops->vdev_op_leaf)
2108 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2109
2110 tvd = vd->vdev_top;
2111
2112 /*
2113 * We don't directly use the aux state here, but if we do a
2114 * vdev_reopen(), we need this value to be present to remember why we
2115 * were faulted.
2116 */
2117 vd->vdev_label_aux = aux;
2118
2119 /*
2120 * Faulted state takes precedence over degraded.
2121 */
2122 vd->vdev_delayed_close = B_FALSE;
2123 vd->vdev_faulted = 1ULL;
2124 vd->vdev_degraded = 0ULL;
2125 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
2126
2127 /*
2128 * If this device has the only valid copy of the data, then
2129 * back off and simply mark the vdev as degraded instead.
2130 */
2131 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
2132 vd->vdev_degraded = 1ULL;
2133 vd->vdev_faulted = 0ULL;
2134
2135 /*
2136 * If we reopen the device and it's not dead, only then do we
2137 * mark it degraded.
2138 */
2139 vdev_reopen(tvd);
2140
2141 if (vdev_readable(vd))
2142 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
2143 }
2144
2145 return (spa_vdev_state_exit(spa, vd, 0));
2146}
2147
2148/*
2149 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
2150 * user that something is wrong. The vdev continues to operate as normal as far
2151 * as I/O is concerned.
2152 */
2153int
2154vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
2155{
2156 vdev_t *vd;
2157
2158 spa_vdev_state_enter(spa, SCL_NONE);
2159
2160 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2161 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2162
2163 if (!vd->vdev_ops->vdev_op_leaf)
2164 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2165
2166 /*
2167 * If the vdev is already faulted, then don't do anything.
2168 */
2169 if (vd->vdev_faulted || vd->vdev_degraded)
2170 return (spa_vdev_state_exit(spa, NULL, 0));
2171
2172 vd->vdev_degraded = 1ULL;
2173 if (!vdev_is_dead(vd))
2174 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
2175 aux);
2176
2177 return (spa_vdev_state_exit(spa, vd, 0));
2178}
2179
2180/*
2181 * Online the given vdev. If 'unspare' is set, it implies two things. First,
2182 * any attached spare device should be detached when the device finishes
2183 * resilvering. Second, the online should be treated like a 'test' online case,
2184 * so no FMA events are generated if the device fails to open.
2185 */
2186int
2187vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
2188{
2189 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
2190
2191 spa_vdev_state_enter(spa, SCL_NONE);
2192
2193 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2194 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2195
2196 if (!vd->vdev_ops->vdev_op_leaf)
2197 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2198
2199 tvd = vd->vdev_top;
2200 vd->vdev_offline = B_FALSE;
2201 vd->vdev_tmpoffline = B_FALSE;
2202 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
2203 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
2204
2205 /* XXX - L2ARC 1.0 does not support expansion */
2206 if (!vd->vdev_aux) {
2207 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2208 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
2209 }
2210
2211 vdev_reopen(tvd);
2212 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
2213
2214 if (!vd->vdev_aux) {
2215 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2216 pvd->vdev_expanding = B_FALSE;
2217 }
2218
2219 if (newstate)
2220 *newstate = vd->vdev_state;
2221 if ((flags & ZFS_ONLINE_UNSPARE) &&
2222 !vdev_is_dead(vd) && vd->vdev_parent &&
2223 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2224 vd->vdev_parent->vdev_child[0] == vd)
2225 vd->vdev_unspare = B_TRUE;
2226
2227 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
2228
2229 /* XXX - L2ARC 1.0 does not support expansion */
2230 if (vd->vdev_aux)
2231 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
2232 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2233 }
2234 return (spa_vdev_state_exit(spa, vd, 0));
2235}
2236
2237static int
2238vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
2239{
2240 vdev_t *vd, *tvd;
2241 int error = 0;
2242 uint64_t generation;
2243 metaslab_group_t *mg;
2244
2245top:
2246 spa_vdev_state_enter(spa, SCL_ALLOC);
2247
2248 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2249 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2250
2251 if (!vd->vdev_ops->vdev_op_leaf)
2252 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2253
2254 tvd = vd->vdev_top;
2255 mg = tvd->vdev_mg;
2256 generation = spa->spa_config_generation + 1;
2257
2258 /*
2259 * If the device isn't already offline, try to offline it.
2260 */
2261 if (!vd->vdev_offline) {
2262 /*
2263 * If this device has the only valid copy of some data,
2264 * don't allow it to be offlined. Log devices are always
2265 * expendable.
2266 */
2267 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2268 vdev_dtl_required(vd))
2269 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2270
2271 /*
2272 * If the top-level is a slog and it has had allocations
2273 * then proceed. We check that the vdev's metaslab group
2274 * is not NULL since it's possible that we may have just
2275 * added this vdev but not yet initialized its metaslabs.
2276 */
2277 if (tvd->vdev_islog && mg != NULL) {
2278 /*
2279 * Prevent any future allocations.
2280 */
2281 metaslab_group_passivate(mg);
2282 (void) spa_vdev_state_exit(spa, vd, 0);
2283
2284 error = spa_offline_log(spa);
2285
2286 spa_vdev_state_enter(spa, SCL_ALLOC);
2287
2288 /*
2289 * Check to see if the config has changed.
2290 */
2291 if (error || generation != spa->spa_config_generation) {
2292 metaslab_group_activate(mg);
2293 if (error)
2294 return (spa_vdev_state_exit(spa,
2295 vd, error));
2296 (void) spa_vdev_state_exit(spa, vd, 0);
2297 goto top;
2298 }
2299 ASSERT0(tvd->vdev_stat.vs_alloc);
2300 }
2301
2302 /*
2303 * Offline this device and reopen its top-level vdev.
2304 * If the top-level vdev is a log device then just offline
2305 * it. Otherwise, if this action results in the top-level
2306 * vdev becoming unusable, undo it and fail the request.
2307 */
2308 vd->vdev_offline = B_TRUE;
2309 vdev_reopen(tvd);
2310
2311 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2312 vdev_is_dead(tvd)) {
2313 vd->vdev_offline = B_FALSE;
2314 vdev_reopen(tvd);
2315 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2316 }
2317
2318 /*
2319 * Add the device back into the metaslab rotor so that
2320 * once we online the device it's open for business.
2321 */
2322 if (tvd->vdev_islog && mg != NULL)
2323 metaslab_group_activate(mg);
2324 }
2325
2326 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
2327
2328 return (spa_vdev_state_exit(spa, vd, 0));
2329}
2330
2331int
2332vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
2333{
2334 int error;
2335
2336 mutex_enter(&spa->spa_vdev_top_lock);
2337 error = vdev_offline_locked(spa, guid, flags);
2338 mutex_exit(&spa->spa_vdev_top_lock);
2339
2340 return (error);
2341}
2342
2343/*
2344 * Clear the error counts associated with this vdev. Unlike vdev_online() and
2345 * vdev_offline(), we assume the spa config is locked. We also clear all
2346 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
2347 */
2348void
2349vdev_clear(spa_t *spa, vdev_t *vd)
2350{
2351 vdev_t *rvd = spa->spa_root_vdev;
2352
2353 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2354
2355 if (vd == NULL)
2356 vd = rvd;
2357
2358 vd->vdev_stat.vs_read_errors = 0;
2359 vd->vdev_stat.vs_write_errors = 0;
2360 vd->vdev_stat.vs_checksum_errors = 0;
2361
2362 for (int c = 0; c < vd->vdev_children; c++)
2363 vdev_clear(spa, vd->vdev_child[c]);
2364
2365 /*
2366 * If we're in the FAULTED state or have experienced failed I/O, then
2367 * clear the persistent state and attempt to reopen the device. We
2368 * also mark the vdev config dirty, so that the new faulted state is
2369 * written out to disk.
2370 */
2371 if (vd->vdev_faulted || vd->vdev_degraded ||
2372 !vdev_readable(vd) || !vdev_writeable(vd)) {
2373
2374 /*
2375 * When reopening in reponse to a clear event, it may be due to
2376 * a fmadm repair request. In this case, if the device is
2377 * still broken, we want to still post the ereport again.
2378 */
2379 vd->vdev_forcefault = B_TRUE;
2380
2381 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
2382 vd->vdev_cant_read = B_FALSE;
2383 vd->vdev_cant_write = B_FALSE;
2384
2385 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
2386
2387 vd->vdev_forcefault = B_FALSE;
2388
2389 if (vd != rvd && vdev_writeable(vd->vdev_top))
2390 vdev_state_dirty(vd->vdev_top);
2391
2392 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
2393 spa_async_request(spa, SPA_ASYNC_RESILVER);
2394
2395 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
2396 }
2397
2398 /*
2399 * When clearing a FMA-diagnosed fault, we always want to
2400 * unspare the device, as we assume that the original spare was
2401 * done in response to the FMA fault.
2402 */
2403 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
2404 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2405 vd->vdev_parent->vdev_child[0] == vd)
2406 vd->vdev_unspare = B_TRUE;
2407}
2408
2409boolean_t
2410vdev_is_dead(vdev_t *vd)
2411{
2412 /*
2413 * Holes and missing devices are always considered "dead".
2414 * This simplifies the code since we don't have to check for
2415 * these types of devices in the various code paths.
2416 * Instead we rely on the fact that we skip over dead devices
2417 * before issuing I/O to them.
2418 */
2419 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
2420 vd->vdev_ops == &vdev_missing_ops);
2421}
2422
2423boolean_t
2424vdev_readable(vdev_t *vd)
2425{
2426 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
2427}
2428
2429boolean_t
2430vdev_writeable(vdev_t *vd)
2431{
2432 return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
2433}
2434
2435boolean_t
2436vdev_allocatable(vdev_t *vd)
2437{
2438 uint64_t state = vd->vdev_state;
2439
2440 /*
2441 * We currently allow allocations from vdevs which may be in the
2442 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
2443 * fails to reopen then we'll catch it later when we're holding
2444 * the proper locks. Note that we have to get the vdev state
2445 * in a local variable because although it changes atomically,
2446 * we're asking two separate questions about it.
2447 */
2448 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
2449 !vd->vdev_cant_write && !vd->vdev_ishole);
2450}
2451
2452boolean_t
2453vdev_accessible(vdev_t *vd, zio_t *zio)
2454{
2455 ASSERT(zio->io_vd == vd);
2456
2457 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2458 return (B_FALSE);
2459
2460 if (zio->io_type == ZIO_TYPE_READ)
2461 return (!vd->vdev_cant_read);
2462
2463 if (zio->io_type == ZIO_TYPE_WRITE)
2464 return (!vd->vdev_cant_write);
2465
2466 return (B_TRUE);
2467}
2468
2469/*
2470 * Get statistics for the given vdev.
2471 */
2472void
2473vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2474{
2475 vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2476
2477 mutex_enter(&vd->vdev_stat_lock);
2478 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
2479 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2480 vs->vs_state = vd->vdev_state;
2481 vs->vs_rsize = vdev_get_min_asize(vd);
2482 if (vd->vdev_ops->vdev_op_leaf)
2483 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2484 vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
2485 mutex_exit(&vd->vdev_stat_lock);
2486
2487 /*
2488 * If we're getting stats on the root vdev, aggregate the I/O counts
2489 * over all top-level vdevs (i.e. the direct children of the root).
2490 */
2491 if (vd == rvd) {
2492 for (int c = 0; c < rvd->vdev_children; c++) {
2493 vdev_t *cvd = rvd->vdev_child[c];
2494 vdev_stat_t *cvs = &cvd->vdev_stat;
2495
2496 mutex_enter(&vd->vdev_stat_lock);
2497 for (int t = 0; t < ZIO_TYPES; t++) {
2498 vs->vs_ops[t] += cvs->vs_ops[t];
2499 vs->vs_bytes[t] += cvs->vs_bytes[t];
2500 }
2501 cvs->vs_scan_removing = cvd->vdev_removing;
2502 mutex_exit(&vd->vdev_stat_lock);
2503 }
2504 }
2505}
2506
2507void
2508vdev_clear_stats(vdev_t *vd)
2509{
2510 mutex_enter(&vd->vdev_stat_lock);
2511 vd->vdev_stat.vs_space = 0;
2512 vd->vdev_stat.vs_dspace = 0;
2513 vd->vdev_stat.vs_alloc = 0;
2514 mutex_exit(&vd->vdev_stat_lock);
2515}
2516
2517void
2518vdev_scan_stat_init(vdev_t *vd)
2519{
2520 vdev_stat_t *vs = &vd->vdev_stat;
2521
2522 for (int c = 0; c < vd->vdev_children; c++)
2523 vdev_scan_stat_init(vd->vdev_child[c]);
2524
2525 mutex_enter(&vd->vdev_stat_lock);
2526 vs->vs_scan_processed = 0;
2527 mutex_exit(&vd->vdev_stat_lock);
2528}
2529
2530void
2531vdev_stat_update(zio_t *zio, uint64_t psize)
2532{
2533 spa_t *spa = zio->io_spa;
2534 vdev_t *rvd = spa->spa_root_vdev;
2535 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
2536 vdev_t *pvd;
2537 uint64_t txg = zio->io_txg;
2538 vdev_stat_t *vs = &vd->vdev_stat;
2539 zio_type_t type = zio->io_type;
2540 int flags = zio->io_flags;
2541
2542 /*
2543 * If this i/o is a gang leader, it didn't do any actual work.
2544 */
2545 if (zio->io_gang_tree)
2546 return;
2547
2548 if (zio->io_error == 0) {
2549 /*
2550 * If this is a root i/o, don't count it -- we've already
2551 * counted the top-level vdevs, and vdev_get_stats() will
2552 * aggregate them when asked. This reduces contention on
2553 * the root vdev_stat_lock and implicitly handles blocks
2554 * that compress away to holes, for which there is no i/o.
2555 * (Holes never create vdev children, so all the counters
2556 * remain zero, which is what we want.)
2557 *
2558 * Note: this only applies to successful i/o (io_error == 0)
2559 * because unlike i/o counts, errors are not additive.
2560 * When reading a ditto block, for example, failure of
2561 * one top-level vdev does not imply a root-level error.
2562 */
2563 if (vd == rvd)
2564 return;
2565
2566 ASSERT(vd == zio->io_vd);
2567
2568 if (flags & ZIO_FLAG_IO_BYPASS)
2569 return;
2570
2571 mutex_enter(&vd->vdev_stat_lock);
2572
2573 if (flags & ZIO_FLAG_IO_REPAIR) {
2574 if (flags & ZIO_FLAG_SCAN_THREAD) {
2575 dsl_scan_phys_t *scn_phys =
2576 &spa->spa_dsl_pool->dp_scan->scn_phys;
2577 uint64_t *processed = &scn_phys->scn_processed;
2578
2579 /* XXX cleanup? */
2580 if (vd->vdev_ops->vdev_op_leaf)
2581 atomic_add_64(processed, psize);
2582 vs->vs_scan_processed += psize;
2583 }
2584
2585 if (flags & ZIO_FLAG_SELF_HEAL)
2586 vs->vs_self_healed += psize;
2587 }
2588
2589 vs->vs_ops[type]++;
2590 vs->vs_bytes[type] += psize;
2591
2592 mutex_exit(&vd->vdev_stat_lock);
2593 return;
2594 }
2595
2596 if (flags & ZIO_FLAG_SPECULATIVE)
2597 return;
2598
2599 /*
2600 * If this is an I/O error that is going to be retried, then ignore the
2601 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
2602 * hard errors, when in reality they can happen for any number of
2603 * innocuous reasons (bus resets, MPxIO link failure, etc).
2604 */
2605 if (zio->io_error == EIO &&
2606 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
2607 return;
2608
2609 /*
2610 * Intent logs writes won't propagate their error to the root
2611 * I/O so don't mark these types of failures as pool-level
2612 * errors.
2613 */
2614 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
2615 return;
2616
2617 mutex_enter(&vd->vdev_stat_lock);
2618 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
2619 if (zio->io_error == ECKSUM)
2620 vs->vs_checksum_errors++;
2621 else
2622 vs->vs_read_errors++;
2623 }
2624 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
2625 vs->vs_write_errors++;
2626 mutex_exit(&vd->vdev_stat_lock);
2627
2628 if (type == ZIO_TYPE_WRITE && txg != 0 &&
2629 (!(flags & ZIO_FLAG_IO_REPAIR) ||
2630 (flags & ZIO_FLAG_SCAN_THREAD) ||
2631 spa->spa_claiming)) {
2632 /*
2633 * This is either a normal write (not a repair), or it's
2634 * a repair induced by the scrub thread, or it's a repair
2635 * made by zil_claim() during spa_load() in the first txg.
2636 * In the normal case, we commit the DTL change in the same
2637 * txg as the block was born. In the scrub-induced repair
2638 * case, we know that scrubs run in first-pass syncing context,
2639 * so we commit the DTL change in spa_syncing_txg(spa).
2640 * In the zil_claim() case, we commit in spa_first_txg(spa).
2641 *
2642 * We currently do not make DTL entries for failed spontaneous
2643 * self-healing writes triggered by normal (non-scrubbing)
2644 * reads, because we have no transactional context in which to
2645 * do so -- and it's not clear that it'd be desirable anyway.
2646 */
2647 if (vd->vdev_ops->vdev_op_leaf) {
2648 uint64_t commit_txg = txg;
2649 if (flags & ZIO_FLAG_SCAN_THREAD) {
2650 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
2651 ASSERT(spa_sync_pass(spa) == 1);
2652 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
2653 commit_txg = spa_syncing_txg(spa);
2654 } else if (spa->spa_claiming) {
2655 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
2656 commit_txg = spa_first_txg(spa);
2657 }
2658 ASSERT(commit_txg >= spa_syncing_txg(spa));
2659 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
2660 return;
2661 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2662 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
2663 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
2664 }
2665 if (vd != rvd)
2666 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
2667 }
2668}
2669
2670/*
2671 * Update the in-core space usage stats for this vdev, its metaslab class,
2672 * and the root vdev.
2673 */
2674void
2675vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
2676 int64_t space_delta)
2677{
2678 int64_t dspace_delta = space_delta;
2679 spa_t *spa = vd->vdev_spa;
2680 vdev_t *rvd = spa->spa_root_vdev;
2681 metaslab_group_t *mg = vd->vdev_mg;
2682 metaslab_class_t *mc = mg ? mg->mg_class : NULL;
2683
2684 ASSERT(vd == vd->vdev_top);
2685
2686 /*
2687 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
2688 * factor. We must calculate this here and not at the root vdev
2689 * because the root vdev's psize-to-asize is simply the max of its
2690 * childrens', thus not accurate enough for us.
2691 */
2692 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
2693 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
2694 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
2695 vd->vdev_deflate_ratio;
2696
2697 mutex_enter(&vd->vdev_stat_lock);
2698 vd->vdev_stat.vs_alloc += alloc_delta;
2699 vd->vdev_stat.vs_space += space_delta;
2700 vd->vdev_stat.vs_dspace += dspace_delta;
2701 mutex_exit(&vd->vdev_stat_lock);
2702
2703 if (mc == spa_normal_class(spa)) {
2704 mutex_enter(&rvd->vdev_stat_lock);
2705 rvd->vdev_stat.vs_alloc += alloc_delta;
2706 rvd->vdev_stat.vs_space += space_delta;
2707 rvd->vdev_stat.vs_dspace += dspace_delta;
2708 mutex_exit(&rvd->vdev_stat_lock);
2709 }
2710
2711 if (mc != NULL) {
2712 ASSERT(rvd == vd->vdev_parent);
2713 ASSERT(vd->vdev_ms_count != 0);
2714
2715 metaslab_class_space_update(mc,
2716 alloc_delta, defer_delta, space_delta, dspace_delta);
2717 }
2718}
2719
2720/*
2721 * Mark a top-level vdev's config as dirty, placing it on the dirty list
2722 * so that it will be written out next time the vdev configuration is synced.
2723 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
2724 */
2725void
2726vdev_config_dirty(vdev_t *vd)
2727{
2728 spa_t *spa = vd->vdev_spa;
2729 vdev_t *rvd = spa->spa_root_vdev;
2730 int c;
2731
2732 ASSERT(spa_writeable(spa));
2733
2734 /*
2735 * If this is an aux vdev (as with l2cache and spare devices), then we
2736 * update the vdev config manually and set the sync flag.
2737 */
2738 if (vd->vdev_aux != NULL) {
2739 spa_aux_vdev_t *sav = vd->vdev_aux;
2740 nvlist_t **aux;
2741 uint_t naux;
2742
2743 for (c = 0; c < sav->sav_count; c++) {
2744 if (sav->sav_vdevs[c] == vd)
2745 break;
2746 }
2747
2748 if (c == sav->sav_count) {
2749 /*
2750 * We're being removed. There's nothing more to do.
2751 */
2752 ASSERT(sav->sav_sync == B_TRUE);
2753 return;
2754 }
2755
2756 sav->sav_sync = B_TRUE;
2757
2758 if (nvlist_lookup_nvlist_array(sav->sav_config,
2759 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
2760 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
2761 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
2762 }
2763
2764 ASSERT(c < naux);
2765
2766 /*
2767 * Setting the nvlist in the middle if the array is a little
2768 * sketchy, but it will work.
2769 */
2770 nvlist_free(aux[c]);
2771 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
2772
2773 return;
2774 }
2775
2776 /*
2777 * The dirty list is protected by the SCL_CONFIG lock. The caller
2778 * must either hold SCL_CONFIG as writer, or must be the sync thread
2779 * (which holds SCL_CONFIG as reader). There's only one sync thread,
2780 * so this is sufficient to ensure mutual exclusion.
2781 */
2782 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2783 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2784 spa_config_held(spa, SCL_CONFIG, RW_READER)));
2785
2786 if (vd == rvd) {
2787 for (c = 0; c < rvd->vdev_children; c++)
2788 vdev_config_dirty(rvd->vdev_child[c]);
2789 } else {
2790 ASSERT(vd == vd->vdev_top);
2791
2792 if (!list_link_active(&vd->vdev_config_dirty_node) &&
2793 !vd->vdev_ishole)
2794 list_insert_head(&spa->spa_config_dirty_list, vd);
2795 }
2796}
2797
2798void
2799vdev_config_clean(vdev_t *vd)
2800{
2801 spa_t *spa = vd->vdev_spa;
2802
2803 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2804 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2805 spa_config_held(spa, SCL_CONFIG, RW_READER)));
2806
2807 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
2808 list_remove(&spa->spa_config_dirty_list, vd);
2809}
2810
2811/*
2812 * Mark a top-level vdev's state as dirty, so that the next pass of
2813 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
2814 * the state changes from larger config changes because they require
2815 * much less locking, and are often needed for administrative actions.
2816 */
2817void
2818vdev_state_dirty(vdev_t *vd)
2819{
2820 spa_t *spa = vd->vdev_spa;
2821
2822 ASSERT(spa_writeable(spa));
2823 ASSERT(vd == vd->vdev_top);
2824
2825 /*
2826 * The state list is protected by the SCL_STATE lock. The caller
2827 * must either hold SCL_STATE as writer, or must be the sync thread
2828 * (which holds SCL_STATE as reader). There's only one sync thread,
2829 * so this is sufficient to ensure mutual exclusion.
2830 */
2831 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2832 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2833 spa_config_held(spa, SCL_STATE, RW_READER)));
2834
2835 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
2836 list_insert_head(&spa->spa_state_dirty_list, vd);
2837}
2838
2839void
2840vdev_state_clean(vdev_t *vd)
2841{
2842 spa_t *spa = vd->vdev_spa;
2843
2844 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2845 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2846 spa_config_held(spa, SCL_STATE, RW_READER)));
2847
2848 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
2849 list_remove(&spa->spa_state_dirty_list, vd);
2850}
2851
2852/*
2853 * Propagate vdev state up from children to parent.
2854 */
2855void
2856vdev_propagate_state(vdev_t *vd)
2857{
2858 spa_t *spa = vd->vdev_spa;
2859 vdev_t *rvd = spa->spa_root_vdev;
2860 int degraded = 0, faulted = 0;
2861 int corrupted = 0;
2862 vdev_t *child;
2863
2864 if (vd->vdev_children > 0) {
2865 for (int c = 0; c < vd->vdev_children; c++) {
2866 child = vd->vdev_child[c];
2867
2868 /*
2869 * Don't factor holes into the decision.
2870 */
2871 if (child->vdev_ishole)
2872 continue;
2873
2874 if (!vdev_readable(child) ||
2875 (!vdev_writeable(child) && spa_writeable(spa))) {
2876 /*
2877 * Root special: if there is a top-level log
2878 * device, treat the root vdev as if it were
2879 * degraded.
2880 */
2881 if (child->vdev_islog && vd == rvd)
2882 degraded++;
2883 else
2884 faulted++;
2885 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
2886 degraded++;
2887 }
2888
2889 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
2890 corrupted++;
2891 }
2892
2893 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
2894
2895 /*
2896 * Root special: if there is a top-level vdev that cannot be
2897 * opened due to corrupted metadata, then propagate the root
2898 * vdev's aux state as 'corrupt' rather than 'insufficient
2899 * replicas'.
2900 */
2901 if (corrupted && vd == rvd &&
2902 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
2903 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
2904 VDEV_AUX_CORRUPT_DATA);
2905 }
2906
2907 if (vd->vdev_parent)
2908 vdev_propagate_state(vd->vdev_parent);
2909}
2910
2911/*
2912 * Set a vdev's state. If this is during an open, we don't update the parent
2913 * state, because we're in the process of opening children depth-first.
2914 * Otherwise, we propagate the change to the parent.
2915 *
2916 * If this routine places a device in a faulted state, an appropriate ereport is
2917 * generated.
2918 */
2919void
2920vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
2921{
2922 uint64_t save_state;
2923 spa_t *spa = vd->vdev_spa;
2924
2925 if (state == vd->vdev_state) {
2926 vd->vdev_stat.vs_aux = aux;
2927 return;
2928 }
2929
2930 save_state = vd->vdev_state;
2931
2932 vd->vdev_state = state;
2933 vd->vdev_stat.vs_aux = aux;
2934
2935 /*
2936 * If we are setting the vdev state to anything but an open state, then
2937 * always close the underlying device unless the device has requested
2938 * a delayed close (i.e. we're about to remove or fault the device).
2939 * Otherwise, we keep accessible but invalid devices open forever.
2940 * We don't call vdev_close() itself, because that implies some extra
2941 * checks (offline, etc) that we don't want here. This is limited to
2942 * leaf devices, because otherwise closing the device will affect other
2943 * children.
2944 */
2945 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
2946 vd->vdev_ops->vdev_op_leaf)
2947 vd->vdev_ops->vdev_op_close(vd);
2948
2949 /*
2950 * If we have brought this vdev back into service, we need
2951 * to notify fmd so that it can gracefully repair any outstanding
2952 * cases due to a missing device. We do this in all cases, even those
2953 * that probably don't correlate to a repaired fault. This is sure to
2954 * catch all cases, and we let the zfs-retire agent sort it out. If
2955 * this is a transient state it's OK, as the retire agent will
2956 * double-check the state of the vdev before repairing it.
2957 */
2958 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
2959 vd->vdev_prevstate != state)
2960 zfs_post_state_change(spa, vd);
2961
2962 if (vd->vdev_removed &&
2963 state == VDEV_STATE_CANT_OPEN &&
2964 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
2965 /*
2966 * If the previous state is set to VDEV_STATE_REMOVED, then this
2967 * device was previously marked removed and someone attempted to
2968 * reopen it. If this failed due to a nonexistent device, then
2969 * keep the device in the REMOVED state. We also let this be if
2970 * it is one of our special test online cases, which is only
2971 * attempting to online the device and shouldn't generate an FMA
2972 * fault.
2973 */
2974 vd->vdev_state = VDEV_STATE_REMOVED;
2975 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2976 } else if (state == VDEV_STATE_REMOVED) {
2977 vd->vdev_removed = B_TRUE;
2978 } else if (state == VDEV_STATE_CANT_OPEN) {
2979 /*
2980 * If we fail to open a vdev during an import or recovery, we
2981 * mark it as "not available", which signifies that it was
2982 * never there to begin with. Failure to open such a device
2983 * is not considered an error.
2984 */
2985 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
2986 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
2987 vd->vdev_ops->vdev_op_leaf)
2988 vd->vdev_not_present = 1;
2989
2990 /*
2991 * Post the appropriate ereport. If the 'prevstate' field is
2992 * set to something other than VDEV_STATE_UNKNOWN, it indicates
2993 * that this is part of a vdev_reopen(). In this case, we don't
2994 * want to post the ereport if the device was already in the
2995 * CANT_OPEN state beforehand.
2996 *
2997 * If the 'checkremove' flag is set, then this is an attempt to
2998 * online the device in response to an insertion event. If we
2999 * hit this case, then we have detected an insertion event for a
3000 * faulted or offline device that wasn't in the removed state.
3001 * In this scenario, we don't post an ereport because we are
3002 * about to replace the device, or attempt an online with
3003 * vdev_forcefault, which will generate the fault for us.
3004 */
3005 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
3006 !vd->vdev_not_present && !vd->vdev_checkremove &&
3007 vd != spa->spa_root_vdev) {
3008 const char *class;
3009
3010 switch (aux) {
3011 case VDEV_AUX_OPEN_FAILED:
3012 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
3013 break;
3014 case VDEV_AUX_CORRUPT_DATA:
3015 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
3016 break;
3017 case VDEV_AUX_NO_REPLICAS:
3018 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
3019 break;
3020 case VDEV_AUX_BAD_GUID_SUM:
3021 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
3022 break;
3023 case VDEV_AUX_TOO_SMALL:
3024 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
3025 break;
3026 case VDEV_AUX_BAD_LABEL:
3027 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
3028 break;
3029 default:
3030 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
3031 }
3032
3033 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
3034 }
3035
3036 /* Erase any notion of persistent removed state */
3037 vd->vdev_removed = B_FALSE;
3038 } else {
3039 vd->vdev_removed = B_FALSE;
3040 }
3041
3042 if (!isopen && vd->vdev_parent)
3043 vdev_propagate_state(vd->vdev_parent);
3044}
3045
3046/*
3047 * Check the vdev configuration to ensure that it's capable of supporting
3048 * a root pool.
3049 *
3050 * On Solaris, we do not support RAID-Z or partial configuration. In
3051 * addition, only a single top-level vdev is allowed and none of the
3052 * leaves can be wholedisks.
3053 *
3054 * For FreeBSD, we can boot from any configuration. There is a
3055 * limitation that the boot filesystem must be either uncompressed or
3056 * compresses with lzjb compression but I'm not sure how to enforce
3057 * that here.
3058 */
3059boolean_t
3060vdev_is_bootable(vdev_t *vd)
3061{
3062#ifdef sun
3063 if (!vd->vdev_ops->vdev_op_leaf) {
3064 char *vdev_type = vd->vdev_ops->vdev_op_type;
3065
3066 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
3067 vd->vdev_children > 1) {
3068 return (B_FALSE);
3069 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
3070 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
3071 return (B_FALSE);
3072 }
3073 } else if (vd->vdev_wholedisk == 1) {
3074 return (B_FALSE);
3075 }
3076
3077 for (int c = 0; c < vd->vdev_children; c++) {
3078 if (!vdev_is_bootable(vd->vdev_child[c]))
3079 return (B_FALSE);
3080 }
3081#endif /* sun */
3082 return (B_TRUE);
3083}
3084
3085/*
3086 * Load the state from the original vdev tree (ovd) which
3087 * we've retrieved from the MOS config object. If the original
3088 * vdev was offline or faulted then we transfer that state to the
3089 * device in the current vdev tree (nvd).
3090 */
3091void
3092vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
3093{
3094 spa_t *spa = nvd->vdev_spa;
3095
3096 ASSERT(nvd->vdev_top->vdev_islog);
3097 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3098 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
3099
3100 for (int c = 0; c < nvd->vdev_children; c++)
3101 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
3102
3103 if (nvd->vdev_ops->vdev_op_leaf) {
3104 /*
3105 * Restore the persistent vdev state
3106 */
3107 nvd->vdev_offline = ovd->vdev_offline;
3108 nvd->vdev_faulted = ovd->vdev_faulted;
3109 nvd->vdev_degraded = ovd->vdev_degraded;
3110 nvd->vdev_removed = ovd->vdev_removed;
3111 }
3112}
3113
3114/*
3115 * Determine if a log device has valid content. If the vdev was
3116 * removed or faulted in the MOS config then we know that
3117 * the content on the log device has already been written to the pool.
3118 */
3119boolean_t
3120vdev_log_state_valid(vdev_t *vd)
3121{
3122 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
3123 !vd->vdev_removed)
3124 return (B_TRUE);
3125
3126 for (int c = 0; c < vd->vdev_children; c++)
3127 if (vdev_log_state_valid(vd->vdev_child[c]))
3128 return (B_TRUE);
3129
3130 return (B_FALSE);
3131}
3132
3133/*
3134 * Expand a vdev if possible.
3135 */
3136void
3137vdev_expand(vdev_t *vd, uint64_t txg)
3138{
3139 ASSERT(vd->vdev_top == vd);
3140 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3141
3142 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
3143 VERIFY(vdev_metaslab_init(vd, txg) == 0);
3144 vdev_config_dirty(vd);
3145 }
3146}
3147
3148/*
3149 * Split a vdev.
3150 */
3151void
3152vdev_split(vdev_t *vd)
3153{
3154 vdev_t *cvd, *pvd = vd->vdev_parent;
3155
3156 vdev_remove_child(pvd, vd);
3157 vdev_compact_children(pvd);
3158
3159 cvd = pvd->vdev_child[0];
3160 if (pvd->vdev_children == 1) {
3161 vdev_remove_parent(cvd);
3162 cvd->vdev_splitting = B_TRUE;
3163 }
3164 vdev_propagate_state(cvd);
3165}
1451 /*
1452 * We record the previous state before we close it, so that if we are
1453 * doing a reopen(), we don't generate FMA ereports if we notice that
1454 * it's still faulted.
1455 */
1456 vd->vdev_prevstate = vd->vdev_state;
1457
1458 if (vd->vdev_offline)
1459 vd->vdev_state = VDEV_STATE_OFFLINE;
1460 else
1461 vd->vdev_state = VDEV_STATE_CLOSED;
1462 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1463}
1464
1465void
1466vdev_hold(vdev_t *vd)
1467{
1468 spa_t *spa = vd->vdev_spa;
1469
1470 ASSERT(spa_is_root(spa));
1471 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1472 return;
1473
1474 for (int c = 0; c < vd->vdev_children; c++)
1475 vdev_hold(vd->vdev_child[c]);
1476
1477 if (vd->vdev_ops->vdev_op_leaf)
1478 vd->vdev_ops->vdev_op_hold(vd);
1479}
1480
1481void
1482vdev_rele(vdev_t *vd)
1483{
1484 spa_t *spa = vd->vdev_spa;
1485
1486 ASSERT(spa_is_root(spa));
1487 for (int c = 0; c < vd->vdev_children; c++)
1488 vdev_rele(vd->vdev_child[c]);
1489
1490 if (vd->vdev_ops->vdev_op_leaf)
1491 vd->vdev_ops->vdev_op_rele(vd);
1492}
1493
1494/*
1495 * Reopen all interior vdevs and any unopened leaves. We don't actually
1496 * reopen leaf vdevs which had previously been opened as they might deadlock
1497 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
1498 * If the leaf has never been opened then open it, as usual.
1499 */
1500void
1501vdev_reopen(vdev_t *vd)
1502{
1503 spa_t *spa = vd->vdev_spa;
1504
1505 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1506
1507 /* set the reopening flag unless we're taking the vdev offline */
1508 vd->vdev_reopening = !vd->vdev_offline;
1509 vdev_close(vd);
1510 (void) vdev_open(vd);
1511
1512 /*
1513 * Call vdev_validate() here to make sure we have the same device.
1514 * Otherwise, a device with an invalid label could be successfully
1515 * opened in response to vdev_reopen().
1516 */
1517 if (vd->vdev_aux) {
1518 (void) vdev_validate_aux(vd);
1519 if (vdev_readable(vd) && vdev_writeable(vd) &&
1520 vd->vdev_aux == &spa->spa_l2cache &&
1521 !l2arc_vdev_present(vd))
1522 l2arc_add_vdev(spa, vd);
1523 } else {
1524 (void) vdev_validate(vd, spa_last_synced_txg(spa));
1525 }
1526
1527 /*
1528 * Reassess parent vdev's health.
1529 */
1530 vdev_propagate_state(vd);
1531}
1532
1533int
1534vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1535{
1536 int error;
1537
1538 /*
1539 * Normally, partial opens (e.g. of a mirror) are allowed.
1540 * For a create, however, we want to fail the request if
1541 * there are any components we can't open.
1542 */
1543 error = vdev_open(vd);
1544
1545 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1546 vdev_close(vd);
1547 return (error ? error : ENXIO);
1548 }
1549
1550 /*
1551 * Recursively initialize all labels.
1552 */
1553 if ((error = vdev_label_init(vd, txg, isreplacing ?
1554 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1555 vdev_close(vd);
1556 return (error);
1557 }
1558
1559 return (0);
1560}
1561
1562void
1563vdev_metaslab_set_size(vdev_t *vd)
1564{
1565 /*
1566 * Aim for roughly 200 metaslabs per vdev.
1567 */
1568 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
1569 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1570}
1571
1572void
1573vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1574{
1575 ASSERT(vd == vd->vdev_top);
1576 ASSERT(!vd->vdev_ishole);
1577 ASSERT(ISP2(flags));
1578 ASSERT(spa_writeable(vd->vdev_spa));
1579
1580 if (flags & VDD_METASLAB)
1581 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1582
1583 if (flags & VDD_DTL)
1584 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1585
1586 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1587}
1588
1589/*
1590 * DTLs.
1591 *
1592 * A vdev's DTL (dirty time log) is the set of transaction groups for which
1593 * the vdev has less than perfect replication. There are four kinds of DTL:
1594 *
1595 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
1596 *
1597 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
1598 *
1599 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
1600 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
1601 * txgs that was scrubbed.
1602 *
1603 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
1604 * persistent errors or just some device being offline.
1605 * Unlike the other three, the DTL_OUTAGE map is not generally
1606 * maintained; it's only computed when needed, typically to
1607 * determine whether a device can be detached.
1608 *
1609 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
1610 * either has the data or it doesn't.
1611 *
1612 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
1613 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
1614 * if any child is less than fully replicated, then so is its parent.
1615 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
1616 * comprising only those txgs which appear in 'maxfaults' or more children;
1617 * those are the txgs we don't have enough replication to read. For example,
1618 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
1619 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
1620 * two child DTL_MISSING maps.
1621 *
1622 * It should be clear from the above that to compute the DTLs and outage maps
1623 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
1624 * Therefore, that is all we keep on disk. When loading the pool, or after
1625 * a configuration change, we generate all other DTLs from first principles.
1626 */
1627void
1628vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1629{
1630 space_map_t *sm = &vd->vdev_dtl[t];
1631
1632 ASSERT(t < DTL_TYPES);
1633 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1634 ASSERT(spa_writeable(vd->vdev_spa));
1635
1636 mutex_enter(sm->sm_lock);
1637 if (!space_map_contains(sm, txg, size))
1638 space_map_add(sm, txg, size);
1639 mutex_exit(sm->sm_lock);
1640}
1641
1642boolean_t
1643vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1644{
1645 space_map_t *sm = &vd->vdev_dtl[t];
1646 boolean_t dirty = B_FALSE;
1647
1648 ASSERT(t < DTL_TYPES);
1649 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1650
1651 mutex_enter(sm->sm_lock);
1652 if (sm->sm_space != 0)
1653 dirty = space_map_contains(sm, txg, size);
1654 mutex_exit(sm->sm_lock);
1655
1656 return (dirty);
1657}
1658
1659boolean_t
1660vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
1661{
1662 space_map_t *sm = &vd->vdev_dtl[t];
1663 boolean_t empty;
1664
1665 mutex_enter(sm->sm_lock);
1666 empty = (sm->sm_space == 0);
1667 mutex_exit(sm->sm_lock);
1668
1669 return (empty);
1670}
1671
1672/*
1673 * Reassess DTLs after a config change or scrub completion.
1674 */
1675void
1676vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1677{
1678 spa_t *spa = vd->vdev_spa;
1679 avl_tree_t reftree;
1680 int minref;
1681
1682 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1683
1684 for (int c = 0; c < vd->vdev_children; c++)
1685 vdev_dtl_reassess(vd->vdev_child[c], txg,
1686 scrub_txg, scrub_done);
1687
1688 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
1689 return;
1690
1691 if (vd->vdev_ops->vdev_op_leaf) {
1692 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
1693
1694 mutex_enter(&vd->vdev_dtl_lock);
1695 if (scrub_txg != 0 &&
1696 (spa->spa_scrub_started ||
1697 (scn && scn->scn_phys.scn_errors == 0))) {
1698 /*
1699 * We completed a scrub up to scrub_txg. If we
1700 * did it without rebooting, then the scrub dtl
1701 * will be valid, so excise the old region and
1702 * fold in the scrub dtl. Otherwise, leave the
1703 * dtl as-is if there was an error.
1704 *
1705 * There's little trick here: to excise the beginning
1706 * of the DTL_MISSING map, we put it into a reference
1707 * tree and then add a segment with refcnt -1 that
1708 * covers the range [0, scrub_txg). This means
1709 * that each txg in that range has refcnt -1 or 0.
1710 * We then add DTL_SCRUB with a refcnt of 2, so that
1711 * entries in the range [0, scrub_txg) will have a
1712 * positive refcnt -- either 1 or 2. We then convert
1713 * the reference tree into the new DTL_MISSING map.
1714 */
1715 space_map_ref_create(&reftree);
1716 space_map_ref_add_map(&reftree,
1717 &vd->vdev_dtl[DTL_MISSING], 1);
1718 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1);
1719 space_map_ref_add_map(&reftree,
1720 &vd->vdev_dtl[DTL_SCRUB], 2);
1721 space_map_ref_generate_map(&reftree,
1722 &vd->vdev_dtl[DTL_MISSING], 1);
1723 space_map_ref_destroy(&reftree);
1724 }
1725 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
1726 space_map_walk(&vd->vdev_dtl[DTL_MISSING],
1727 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]);
1728 if (scrub_done)
1729 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
1730 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
1731 if (!vdev_readable(vd))
1732 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
1733 else
1734 space_map_walk(&vd->vdev_dtl[DTL_MISSING],
1735 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]);
1736 mutex_exit(&vd->vdev_dtl_lock);
1737
1738 if (txg != 0)
1739 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1740 return;
1741 }
1742
1743 mutex_enter(&vd->vdev_dtl_lock);
1744 for (int t = 0; t < DTL_TYPES; t++) {
1745 /* account for child's outage in parent's missing map */
1746 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
1747 if (t == DTL_SCRUB)
1748 continue; /* leaf vdevs only */
1749 if (t == DTL_PARTIAL)
1750 minref = 1; /* i.e. non-zero */
1751 else if (vd->vdev_nparity != 0)
1752 minref = vd->vdev_nparity + 1; /* RAID-Z */
1753 else
1754 minref = vd->vdev_children; /* any kind of mirror */
1755 space_map_ref_create(&reftree);
1756 for (int c = 0; c < vd->vdev_children; c++) {
1757 vdev_t *cvd = vd->vdev_child[c];
1758 mutex_enter(&cvd->vdev_dtl_lock);
1759 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1);
1760 mutex_exit(&cvd->vdev_dtl_lock);
1761 }
1762 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref);
1763 space_map_ref_destroy(&reftree);
1764 }
1765 mutex_exit(&vd->vdev_dtl_lock);
1766}
1767
1768static int
1769vdev_dtl_load(vdev_t *vd)
1770{
1771 spa_t *spa = vd->vdev_spa;
1772 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1773 objset_t *mos = spa->spa_meta_objset;
1774 dmu_buf_t *db;
1775 int error;
1776
1777 ASSERT(vd->vdev_children == 0);
1778
1779 if (smo->smo_object == 0)
1780 return (0);
1781
1782 ASSERT(!vd->vdev_ishole);
1783
1784 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
1785 return (error);
1786
1787 ASSERT3U(db->db_size, >=, sizeof (*smo));
1788 bcopy(db->db_data, smo, sizeof (*smo));
1789 dmu_buf_rele(db, FTAG);
1790
1791 mutex_enter(&vd->vdev_dtl_lock);
1792 error = space_map_load(&vd->vdev_dtl[DTL_MISSING],
1793 NULL, SM_ALLOC, smo, mos);
1794 mutex_exit(&vd->vdev_dtl_lock);
1795
1796 return (error);
1797}
1798
1799void
1800vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1801{
1802 spa_t *spa = vd->vdev_spa;
1803 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1804 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
1805 objset_t *mos = spa->spa_meta_objset;
1806 space_map_t smsync;
1807 kmutex_t smlock;
1808 dmu_buf_t *db;
1809 dmu_tx_t *tx;
1810
1811 ASSERT(!vd->vdev_ishole);
1812
1813 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1814
1815 if (vd->vdev_detached) {
1816 if (smo->smo_object != 0) {
1817 int err = dmu_object_free(mos, smo->smo_object, tx);
1818 ASSERT0(err);
1819 smo->smo_object = 0;
1820 }
1821 dmu_tx_commit(tx);
1822 return;
1823 }
1824
1825 if (smo->smo_object == 0) {
1826 ASSERT(smo->smo_objsize == 0);
1827 ASSERT(smo->smo_alloc == 0);
1828 smo->smo_object = dmu_object_alloc(mos,
1829 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1830 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1831 ASSERT(smo->smo_object != 0);
1832 vdev_config_dirty(vd->vdev_top);
1833 }
1834
1835 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1836
1837 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1838 &smlock);
1839
1840 mutex_enter(&smlock);
1841
1842 mutex_enter(&vd->vdev_dtl_lock);
1843 space_map_walk(sm, space_map_add, &smsync);
1844 mutex_exit(&vd->vdev_dtl_lock);
1845
1846 space_map_truncate(smo, mos, tx);
1847 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
1848
1849 space_map_destroy(&smsync);
1850
1851 mutex_exit(&smlock);
1852 mutex_destroy(&smlock);
1853
1854 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1855 dmu_buf_will_dirty(db, tx);
1856 ASSERT3U(db->db_size, >=, sizeof (*smo));
1857 bcopy(smo, db->db_data, sizeof (*smo));
1858 dmu_buf_rele(db, FTAG);
1859
1860 dmu_tx_commit(tx);
1861}
1862
1863/*
1864 * Determine whether the specified vdev can be offlined/detached/removed
1865 * without losing data.
1866 */
1867boolean_t
1868vdev_dtl_required(vdev_t *vd)
1869{
1870 spa_t *spa = vd->vdev_spa;
1871 vdev_t *tvd = vd->vdev_top;
1872 uint8_t cant_read = vd->vdev_cant_read;
1873 boolean_t required;
1874
1875 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1876
1877 if (vd == spa->spa_root_vdev || vd == tvd)
1878 return (B_TRUE);
1879
1880 /*
1881 * Temporarily mark the device as unreadable, and then determine
1882 * whether this results in any DTL outages in the top-level vdev.
1883 * If not, we can safely offline/detach/remove the device.
1884 */
1885 vd->vdev_cant_read = B_TRUE;
1886 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
1887 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
1888 vd->vdev_cant_read = cant_read;
1889 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
1890
1891 if (!required && zio_injection_enabled)
1892 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
1893
1894 return (required);
1895}
1896
1897/*
1898 * Determine if resilver is needed, and if so the txg range.
1899 */
1900boolean_t
1901vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
1902{
1903 boolean_t needed = B_FALSE;
1904 uint64_t thismin = UINT64_MAX;
1905 uint64_t thismax = 0;
1906
1907 if (vd->vdev_children == 0) {
1908 mutex_enter(&vd->vdev_dtl_lock);
1909 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 &&
1910 vdev_writeable(vd)) {
1911 space_seg_t *ss;
1912
1913 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root);
1914 thismin = ss->ss_start - 1;
1915 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root);
1916 thismax = ss->ss_end;
1917 needed = B_TRUE;
1918 }
1919 mutex_exit(&vd->vdev_dtl_lock);
1920 } else {
1921 for (int c = 0; c < vd->vdev_children; c++) {
1922 vdev_t *cvd = vd->vdev_child[c];
1923 uint64_t cmin, cmax;
1924
1925 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
1926 thismin = MIN(thismin, cmin);
1927 thismax = MAX(thismax, cmax);
1928 needed = B_TRUE;
1929 }
1930 }
1931 }
1932
1933 if (needed && minp) {
1934 *minp = thismin;
1935 *maxp = thismax;
1936 }
1937 return (needed);
1938}
1939
1940void
1941vdev_load(vdev_t *vd)
1942{
1943 /*
1944 * Recursively load all children.
1945 */
1946 for (int c = 0; c < vd->vdev_children; c++)
1947 vdev_load(vd->vdev_child[c]);
1948
1949 /*
1950 * If this is a top-level vdev, initialize its metaslabs.
1951 */
1952 if (vd == vd->vdev_top && !vd->vdev_ishole &&
1953 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
1954 vdev_metaslab_init(vd, 0) != 0))
1955 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1956 VDEV_AUX_CORRUPT_DATA);
1957
1958 /*
1959 * If this is a leaf vdev, load its DTL.
1960 */
1961 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
1962 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1963 VDEV_AUX_CORRUPT_DATA);
1964}
1965
1966/*
1967 * The special vdev case is used for hot spares and l2cache devices. Its
1968 * sole purpose it to set the vdev state for the associated vdev. To do this,
1969 * we make sure that we can open the underlying device, then try to read the
1970 * label, and make sure that the label is sane and that it hasn't been
1971 * repurposed to another pool.
1972 */
1973int
1974vdev_validate_aux(vdev_t *vd)
1975{
1976 nvlist_t *label;
1977 uint64_t guid, version;
1978 uint64_t state;
1979
1980 if (!vdev_readable(vd))
1981 return (0);
1982
1983 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
1984 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1985 VDEV_AUX_CORRUPT_DATA);
1986 return (-1);
1987 }
1988
1989 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
1990 !SPA_VERSION_IS_SUPPORTED(version) ||
1991 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
1992 guid != vd->vdev_guid ||
1993 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
1994 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1995 VDEV_AUX_CORRUPT_DATA);
1996 nvlist_free(label);
1997 return (-1);
1998 }
1999
2000 /*
2001 * We don't actually check the pool state here. If it's in fact in
2002 * use by another pool, we update this fact on the fly when requested.
2003 */
2004 nvlist_free(label);
2005 return (0);
2006}
2007
2008void
2009vdev_remove(vdev_t *vd, uint64_t txg)
2010{
2011 spa_t *spa = vd->vdev_spa;
2012 objset_t *mos = spa->spa_meta_objset;
2013 dmu_tx_t *tx;
2014
2015 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2016
2017 if (vd->vdev_dtl_smo.smo_object) {
2018 ASSERT0(vd->vdev_dtl_smo.smo_alloc);
2019 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
2020 vd->vdev_dtl_smo.smo_object = 0;
2021 }
2022
2023 if (vd->vdev_ms != NULL) {
2024 for (int m = 0; m < vd->vdev_ms_count; m++) {
2025 metaslab_t *msp = vd->vdev_ms[m];
2026
2027 if (msp == NULL || msp->ms_smo.smo_object == 0)
2028 continue;
2029
2030 ASSERT0(msp->ms_smo.smo_alloc);
2031 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
2032 msp->ms_smo.smo_object = 0;
2033 }
2034 }
2035
2036 if (vd->vdev_ms_array) {
2037 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2038 vd->vdev_ms_array = 0;
2039 vd->vdev_ms_shift = 0;
2040 }
2041 dmu_tx_commit(tx);
2042}
2043
2044void
2045vdev_sync_done(vdev_t *vd, uint64_t txg)
2046{
2047 metaslab_t *msp;
2048 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2049
2050 ASSERT(!vd->vdev_ishole);
2051
2052 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
2053 metaslab_sync_done(msp, txg);
2054
2055 if (reassess)
2056 metaslab_sync_reassess(vd->vdev_mg);
2057}
2058
2059void
2060vdev_sync(vdev_t *vd, uint64_t txg)
2061{
2062 spa_t *spa = vd->vdev_spa;
2063 vdev_t *lvd;
2064 metaslab_t *msp;
2065 dmu_tx_t *tx;
2066
2067 ASSERT(!vd->vdev_ishole);
2068
2069 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
2070 ASSERT(vd == vd->vdev_top);
2071 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2072 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
2073 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
2074 ASSERT(vd->vdev_ms_array != 0);
2075 vdev_config_dirty(vd);
2076 dmu_tx_commit(tx);
2077 }
2078
2079 /*
2080 * Remove the metadata associated with this vdev once it's empty.
2081 */
2082 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
2083 vdev_remove(vd, txg);
2084
2085 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
2086 metaslab_sync(msp, txg);
2087 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
2088 }
2089
2090 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
2091 vdev_dtl_sync(lvd, txg);
2092
2093 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
2094}
2095
2096uint64_t
2097vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
2098{
2099 return (vd->vdev_ops->vdev_op_asize(vd, psize));
2100}
2101
2102/*
2103 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
2104 * not be opened, and no I/O is attempted.
2105 */
2106int
2107vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
2108{
2109 vdev_t *vd, *tvd;
2110
2111 spa_vdev_state_enter(spa, SCL_NONE);
2112
2113 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2114 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2115
2116 if (!vd->vdev_ops->vdev_op_leaf)
2117 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2118
2119 tvd = vd->vdev_top;
2120
2121 /*
2122 * We don't directly use the aux state here, but if we do a
2123 * vdev_reopen(), we need this value to be present to remember why we
2124 * were faulted.
2125 */
2126 vd->vdev_label_aux = aux;
2127
2128 /*
2129 * Faulted state takes precedence over degraded.
2130 */
2131 vd->vdev_delayed_close = B_FALSE;
2132 vd->vdev_faulted = 1ULL;
2133 vd->vdev_degraded = 0ULL;
2134 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
2135
2136 /*
2137 * If this device has the only valid copy of the data, then
2138 * back off and simply mark the vdev as degraded instead.
2139 */
2140 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
2141 vd->vdev_degraded = 1ULL;
2142 vd->vdev_faulted = 0ULL;
2143
2144 /*
2145 * If we reopen the device and it's not dead, only then do we
2146 * mark it degraded.
2147 */
2148 vdev_reopen(tvd);
2149
2150 if (vdev_readable(vd))
2151 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
2152 }
2153
2154 return (spa_vdev_state_exit(spa, vd, 0));
2155}
2156
2157/*
2158 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
2159 * user that something is wrong. The vdev continues to operate as normal as far
2160 * as I/O is concerned.
2161 */
2162int
2163vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
2164{
2165 vdev_t *vd;
2166
2167 spa_vdev_state_enter(spa, SCL_NONE);
2168
2169 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2170 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2171
2172 if (!vd->vdev_ops->vdev_op_leaf)
2173 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2174
2175 /*
2176 * If the vdev is already faulted, then don't do anything.
2177 */
2178 if (vd->vdev_faulted || vd->vdev_degraded)
2179 return (spa_vdev_state_exit(spa, NULL, 0));
2180
2181 vd->vdev_degraded = 1ULL;
2182 if (!vdev_is_dead(vd))
2183 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
2184 aux);
2185
2186 return (spa_vdev_state_exit(spa, vd, 0));
2187}
2188
2189/*
2190 * Online the given vdev. If 'unspare' is set, it implies two things. First,
2191 * any attached spare device should be detached when the device finishes
2192 * resilvering. Second, the online should be treated like a 'test' online case,
2193 * so no FMA events are generated if the device fails to open.
2194 */
2195int
2196vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
2197{
2198 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
2199
2200 spa_vdev_state_enter(spa, SCL_NONE);
2201
2202 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2203 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2204
2205 if (!vd->vdev_ops->vdev_op_leaf)
2206 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2207
2208 tvd = vd->vdev_top;
2209 vd->vdev_offline = B_FALSE;
2210 vd->vdev_tmpoffline = B_FALSE;
2211 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
2212 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
2213
2214 /* XXX - L2ARC 1.0 does not support expansion */
2215 if (!vd->vdev_aux) {
2216 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2217 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
2218 }
2219
2220 vdev_reopen(tvd);
2221 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
2222
2223 if (!vd->vdev_aux) {
2224 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2225 pvd->vdev_expanding = B_FALSE;
2226 }
2227
2228 if (newstate)
2229 *newstate = vd->vdev_state;
2230 if ((flags & ZFS_ONLINE_UNSPARE) &&
2231 !vdev_is_dead(vd) && vd->vdev_parent &&
2232 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2233 vd->vdev_parent->vdev_child[0] == vd)
2234 vd->vdev_unspare = B_TRUE;
2235
2236 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
2237
2238 /* XXX - L2ARC 1.0 does not support expansion */
2239 if (vd->vdev_aux)
2240 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
2241 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2242 }
2243 return (spa_vdev_state_exit(spa, vd, 0));
2244}
2245
2246static int
2247vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
2248{
2249 vdev_t *vd, *tvd;
2250 int error = 0;
2251 uint64_t generation;
2252 metaslab_group_t *mg;
2253
2254top:
2255 spa_vdev_state_enter(spa, SCL_ALLOC);
2256
2257 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2258 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2259
2260 if (!vd->vdev_ops->vdev_op_leaf)
2261 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2262
2263 tvd = vd->vdev_top;
2264 mg = tvd->vdev_mg;
2265 generation = spa->spa_config_generation + 1;
2266
2267 /*
2268 * If the device isn't already offline, try to offline it.
2269 */
2270 if (!vd->vdev_offline) {
2271 /*
2272 * If this device has the only valid copy of some data,
2273 * don't allow it to be offlined. Log devices are always
2274 * expendable.
2275 */
2276 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2277 vdev_dtl_required(vd))
2278 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2279
2280 /*
2281 * If the top-level is a slog and it has had allocations
2282 * then proceed. We check that the vdev's metaslab group
2283 * is not NULL since it's possible that we may have just
2284 * added this vdev but not yet initialized its metaslabs.
2285 */
2286 if (tvd->vdev_islog && mg != NULL) {
2287 /*
2288 * Prevent any future allocations.
2289 */
2290 metaslab_group_passivate(mg);
2291 (void) spa_vdev_state_exit(spa, vd, 0);
2292
2293 error = spa_offline_log(spa);
2294
2295 spa_vdev_state_enter(spa, SCL_ALLOC);
2296
2297 /*
2298 * Check to see if the config has changed.
2299 */
2300 if (error || generation != spa->spa_config_generation) {
2301 metaslab_group_activate(mg);
2302 if (error)
2303 return (spa_vdev_state_exit(spa,
2304 vd, error));
2305 (void) spa_vdev_state_exit(spa, vd, 0);
2306 goto top;
2307 }
2308 ASSERT0(tvd->vdev_stat.vs_alloc);
2309 }
2310
2311 /*
2312 * Offline this device and reopen its top-level vdev.
2313 * If the top-level vdev is a log device then just offline
2314 * it. Otherwise, if this action results in the top-level
2315 * vdev becoming unusable, undo it and fail the request.
2316 */
2317 vd->vdev_offline = B_TRUE;
2318 vdev_reopen(tvd);
2319
2320 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2321 vdev_is_dead(tvd)) {
2322 vd->vdev_offline = B_FALSE;
2323 vdev_reopen(tvd);
2324 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2325 }
2326
2327 /*
2328 * Add the device back into the metaslab rotor so that
2329 * once we online the device it's open for business.
2330 */
2331 if (tvd->vdev_islog && mg != NULL)
2332 metaslab_group_activate(mg);
2333 }
2334
2335 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
2336
2337 return (spa_vdev_state_exit(spa, vd, 0));
2338}
2339
2340int
2341vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
2342{
2343 int error;
2344
2345 mutex_enter(&spa->spa_vdev_top_lock);
2346 error = vdev_offline_locked(spa, guid, flags);
2347 mutex_exit(&spa->spa_vdev_top_lock);
2348
2349 return (error);
2350}
2351
2352/*
2353 * Clear the error counts associated with this vdev. Unlike vdev_online() and
2354 * vdev_offline(), we assume the spa config is locked. We also clear all
2355 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
2356 */
2357void
2358vdev_clear(spa_t *spa, vdev_t *vd)
2359{
2360 vdev_t *rvd = spa->spa_root_vdev;
2361
2362 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2363
2364 if (vd == NULL)
2365 vd = rvd;
2366
2367 vd->vdev_stat.vs_read_errors = 0;
2368 vd->vdev_stat.vs_write_errors = 0;
2369 vd->vdev_stat.vs_checksum_errors = 0;
2370
2371 for (int c = 0; c < vd->vdev_children; c++)
2372 vdev_clear(spa, vd->vdev_child[c]);
2373
2374 /*
2375 * If we're in the FAULTED state or have experienced failed I/O, then
2376 * clear the persistent state and attempt to reopen the device. We
2377 * also mark the vdev config dirty, so that the new faulted state is
2378 * written out to disk.
2379 */
2380 if (vd->vdev_faulted || vd->vdev_degraded ||
2381 !vdev_readable(vd) || !vdev_writeable(vd)) {
2382
2383 /*
2384 * When reopening in reponse to a clear event, it may be due to
2385 * a fmadm repair request. In this case, if the device is
2386 * still broken, we want to still post the ereport again.
2387 */
2388 vd->vdev_forcefault = B_TRUE;
2389
2390 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
2391 vd->vdev_cant_read = B_FALSE;
2392 vd->vdev_cant_write = B_FALSE;
2393
2394 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
2395
2396 vd->vdev_forcefault = B_FALSE;
2397
2398 if (vd != rvd && vdev_writeable(vd->vdev_top))
2399 vdev_state_dirty(vd->vdev_top);
2400
2401 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
2402 spa_async_request(spa, SPA_ASYNC_RESILVER);
2403
2404 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
2405 }
2406
2407 /*
2408 * When clearing a FMA-diagnosed fault, we always want to
2409 * unspare the device, as we assume that the original spare was
2410 * done in response to the FMA fault.
2411 */
2412 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
2413 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2414 vd->vdev_parent->vdev_child[0] == vd)
2415 vd->vdev_unspare = B_TRUE;
2416}
2417
2418boolean_t
2419vdev_is_dead(vdev_t *vd)
2420{
2421 /*
2422 * Holes and missing devices are always considered "dead".
2423 * This simplifies the code since we don't have to check for
2424 * these types of devices in the various code paths.
2425 * Instead we rely on the fact that we skip over dead devices
2426 * before issuing I/O to them.
2427 */
2428 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
2429 vd->vdev_ops == &vdev_missing_ops);
2430}
2431
2432boolean_t
2433vdev_readable(vdev_t *vd)
2434{
2435 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
2436}
2437
2438boolean_t
2439vdev_writeable(vdev_t *vd)
2440{
2441 return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
2442}
2443
2444boolean_t
2445vdev_allocatable(vdev_t *vd)
2446{
2447 uint64_t state = vd->vdev_state;
2448
2449 /*
2450 * We currently allow allocations from vdevs which may be in the
2451 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
2452 * fails to reopen then we'll catch it later when we're holding
2453 * the proper locks. Note that we have to get the vdev state
2454 * in a local variable because although it changes atomically,
2455 * we're asking two separate questions about it.
2456 */
2457 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
2458 !vd->vdev_cant_write && !vd->vdev_ishole);
2459}
2460
2461boolean_t
2462vdev_accessible(vdev_t *vd, zio_t *zio)
2463{
2464 ASSERT(zio->io_vd == vd);
2465
2466 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2467 return (B_FALSE);
2468
2469 if (zio->io_type == ZIO_TYPE_READ)
2470 return (!vd->vdev_cant_read);
2471
2472 if (zio->io_type == ZIO_TYPE_WRITE)
2473 return (!vd->vdev_cant_write);
2474
2475 return (B_TRUE);
2476}
2477
2478/*
2479 * Get statistics for the given vdev.
2480 */
2481void
2482vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2483{
2484 vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2485
2486 mutex_enter(&vd->vdev_stat_lock);
2487 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
2488 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2489 vs->vs_state = vd->vdev_state;
2490 vs->vs_rsize = vdev_get_min_asize(vd);
2491 if (vd->vdev_ops->vdev_op_leaf)
2492 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2493 vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
2494 mutex_exit(&vd->vdev_stat_lock);
2495
2496 /*
2497 * If we're getting stats on the root vdev, aggregate the I/O counts
2498 * over all top-level vdevs (i.e. the direct children of the root).
2499 */
2500 if (vd == rvd) {
2501 for (int c = 0; c < rvd->vdev_children; c++) {
2502 vdev_t *cvd = rvd->vdev_child[c];
2503 vdev_stat_t *cvs = &cvd->vdev_stat;
2504
2505 mutex_enter(&vd->vdev_stat_lock);
2506 for (int t = 0; t < ZIO_TYPES; t++) {
2507 vs->vs_ops[t] += cvs->vs_ops[t];
2508 vs->vs_bytes[t] += cvs->vs_bytes[t];
2509 }
2510 cvs->vs_scan_removing = cvd->vdev_removing;
2511 mutex_exit(&vd->vdev_stat_lock);
2512 }
2513 }
2514}
2515
2516void
2517vdev_clear_stats(vdev_t *vd)
2518{
2519 mutex_enter(&vd->vdev_stat_lock);
2520 vd->vdev_stat.vs_space = 0;
2521 vd->vdev_stat.vs_dspace = 0;
2522 vd->vdev_stat.vs_alloc = 0;
2523 mutex_exit(&vd->vdev_stat_lock);
2524}
2525
2526void
2527vdev_scan_stat_init(vdev_t *vd)
2528{
2529 vdev_stat_t *vs = &vd->vdev_stat;
2530
2531 for (int c = 0; c < vd->vdev_children; c++)
2532 vdev_scan_stat_init(vd->vdev_child[c]);
2533
2534 mutex_enter(&vd->vdev_stat_lock);
2535 vs->vs_scan_processed = 0;
2536 mutex_exit(&vd->vdev_stat_lock);
2537}
2538
2539void
2540vdev_stat_update(zio_t *zio, uint64_t psize)
2541{
2542 spa_t *spa = zio->io_spa;
2543 vdev_t *rvd = spa->spa_root_vdev;
2544 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
2545 vdev_t *pvd;
2546 uint64_t txg = zio->io_txg;
2547 vdev_stat_t *vs = &vd->vdev_stat;
2548 zio_type_t type = zio->io_type;
2549 int flags = zio->io_flags;
2550
2551 /*
2552 * If this i/o is a gang leader, it didn't do any actual work.
2553 */
2554 if (zio->io_gang_tree)
2555 return;
2556
2557 if (zio->io_error == 0) {
2558 /*
2559 * If this is a root i/o, don't count it -- we've already
2560 * counted the top-level vdevs, and vdev_get_stats() will
2561 * aggregate them when asked. This reduces contention on
2562 * the root vdev_stat_lock and implicitly handles blocks
2563 * that compress away to holes, for which there is no i/o.
2564 * (Holes never create vdev children, so all the counters
2565 * remain zero, which is what we want.)
2566 *
2567 * Note: this only applies to successful i/o (io_error == 0)
2568 * because unlike i/o counts, errors are not additive.
2569 * When reading a ditto block, for example, failure of
2570 * one top-level vdev does not imply a root-level error.
2571 */
2572 if (vd == rvd)
2573 return;
2574
2575 ASSERT(vd == zio->io_vd);
2576
2577 if (flags & ZIO_FLAG_IO_BYPASS)
2578 return;
2579
2580 mutex_enter(&vd->vdev_stat_lock);
2581
2582 if (flags & ZIO_FLAG_IO_REPAIR) {
2583 if (flags & ZIO_FLAG_SCAN_THREAD) {
2584 dsl_scan_phys_t *scn_phys =
2585 &spa->spa_dsl_pool->dp_scan->scn_phys;
2586 uint64_t *processed = &scn_phys->scn_processed;
2587
2588 /* XXX cleanup? */
2589 if (vd->vdev_ops->vdev_op_leaf)
2590 atomic_add_64(processed, psize);
2591 vs->vs_scan_processed += psize;
2592 }
2593
2594 if (flags & ZIO_FLAG_SELF_HEAL)
2595 vs->vs_self_healed += psize;
2596 }
2597
2598 vs->vs_ops[type]++;
2599 vs->vs_bytes[type] += psize;
2600
2601 mutex_exit(&vd->vdev_stat_lock);
2602 return;
2603 }
2604
2605 if (flags & ZIO_FLAG_SPECULATIVE)
2606 return;
2607
2608 /*
2609 * If this is an I/O error that is going to be retried, then ignore the
2610 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
2611 * hard errors, when in reality they can happen for any number of
2612 * innocuous reasons (bus resets, MPxIO link failure, etc).
2613 */
2614 if (zio->io_error == EIO &&
2615 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
2616 return;
2617
2618 /*
2619 * Intent logs writes won't propagate their error to the root
2620 * I/O so don't mark these types of failures as pool-level
2621 * errors.
2622 */
2623 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
2624 return;
2625
2626 mutex_enter(&vd->vdev_stat_lock);
2627 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
2628 if (zio->io_error == ECKSUM)
2629 vs->vs_checksum_errors++;
2630 else
2631 vs->vs_read_errors++;
2632 }
2633 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
2634 vs->vs_write_errors++;
2635 mutex_exit(&vd->vdev_stat_lock);
2636
2637 if (type == ZIO_TYPE_WRITE && txg != 0 &&
2638 (!(flags & ZIO_FLAG_IO_REPAIR) ||
2639 (flags & ZIO_FLAG_SCAN_THREAD) ||
2640 spa->spa_claiming)) {
2641 /*
2642 * This is either a normal write (not a repair), or it's
2643 * a repair induced by the scrub thread, or it's a repair
2644 * made by zil_claim() during spa_load() in the first txg.
2645 * In the normal case, we commit the DTL change in the same
2646 * txg as the block was born. In the scrub-induced repair
2647 * case, we know that scrubs run in first-pass syncing context,
2648 * so we commit the DTL change in spa_syncing_txg(spa).
2649 * In the zil_claim() case, we commit in spa_first_txg(spa).
2650 *
2651 * We currently do not make DTL entries for failed spontaneous
2652 * self-healing writes triggered by normal (non-scrubbing)
2653 * reads, because we have no transactional context in which to
2654 * do so -- and it's not clear that it'd be desirable anyway.
2655 */
2656 if (vd->vdev_ops->vdev_op_leaf) {
2657 uint64_t commit_txg = txg;
2658 if (flags & ZIO_FLAG_SCAN_THREAD) {
2659 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
2660 ASSERT(spa_sync_pass(spa) == 1);
2661 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
2662 commit_txg = spa_syncing_txg(spa);
2663 } else if (spa->spa_claiming) {
2664 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
2665 commit_txg = spa_first_txg(spa);
2666 }
2667 ASSERT(commit_txg >= spa_syncing_txg(spa));
2668 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
2669 return;
2670 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2671 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
2672 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
2673 }
2674 if (vd != rvd)
2675 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
2676 }
2677}
2678
2679/*
2680 * Update the in-core space usage stats for this vdev, its metaslab class,
2681 * and the root vdev.
2682 */
2683void
2684vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
2685 int64_t space_delta)
2686{
2687 int64_t dspace_delta = space_delta;
2688 spa_t *spa = vd->vdev_spa;
2689 vdev_t *rvd = spa->spa_root_vdev;
2690 metaslab_group_t *mg = vd->vdev_mg;
2691 metaslab_class_t *mc = mg ? mg->mg_class : NULL;
2692
2693 ASSERT(vd == vd->vdev_top);
2694
2695 /*
2696 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
2697 * factor. We must calculate this here and not at the root vdev
2698 * because the root vdev's psize-to-asize is simply the max of its
2699 * childrens', thus not accurate enough for us.
2700 */
2701 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
2702 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
2703 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
2704 vd->vdev_deflate_ratio;
2705
2706 mutex_enter(&vd->vdev_stat_lock);
2707 vd->vdev_stat.vs_alloc += alloc_delta;
2708 vd->vdev_stat.vs_space += space_delta;
2709 vd->vdev_stat.vs_dspace += dspace_delta;
2710 mutex_exit(&vd->vdev_stat_lock);
2711
2712 if (mc == spa_normal_class(spa)) {
2713 mutex_enter(&rvd->vdev_stat_lock);
2714 rvd->vdev_stat.vs_alloc += alloc_delta;
2715 rvd->vdev_stat.vs_space += space_delta;
2716 rvd->vdev_stat.vs_dspace += dspace_delta;
2717 mutex_exit(&rvd->vdev_stat_lock);
2718 }
2719
2720 if (mc != NULL) {
2721 ASSERT(rvd == vd->vdev_parent);
2722 ASSERT(vd->vdev_ms_count != 0);
2723
2724 metaslab_class_space_update(mc,
2725 alloc_delta, defer_delta, space_delta, dspace_delta);
2726 }
2727}
2728
2729/*
2730 * Mark a top-level vdev's config as dirty, placing it on the dirty list
2731 * so that it will be written out next time the vdev configuration is synced.
2732 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
2733 */
2734void
2735vdev_config_dirty(vdev_t *vd)
2736{
2737 spa_t *spa = vd->vdev_spa;
2738 vdev_t *rvd = spa->spa_root_vdev;
2739 int c;
2740
2741 ASSERT(spa_writeable(spa));
2742
2743 /*
2744 * If this is an aux vdev (as with l2cache and spare devices), then we
2745 * update the vdev config manually and set the sync flag.
2746 */
2747 if (vd->vdev_aux != NULL) {
2748 spa_aux_vdev_t *sav = vd->vdev_aux;
2749 nvlist_t **aux;
2750 uint_t naux;
2751
2752 for (c = 0; c < sav->sav_count; c++) {
2753 if (sav->sav_vdevs[c] == vd)
2754 break;
2755 }
2756
2757 if (c == sav->sav_count) {
2758 /*
2759 * We're being removed. There's nothing more to do.
2760 */
2761 ASSERT(sav->sav_sync == B_TRUE);
2762 return;
2763 }
2764
2765 sav->sav_sync = B_TRUE;
2766
2767 if (nvlist_lookup_nvlist_array(sav->sav_config,
2768 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
2769 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
2770 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
2771 }
2772
2773 ASSERT(c < naux);
2774
2775 /*
2776 * Setting the nvlist in the middle if the array is a little
2777 * sketchy, but it will work.
2778 */
2779 nvlist_free(aux[c]);
2780 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
2781
2782 return;
2783 }
2784
2785 /*
2786 * The dirty list is protected by the SCL_CONFIG lock. The caller
2787 * must either hold SCL_CONFIG as writer, or must be the sync thread
2788 * (which holds SCL_CONFIG as reader). There's only one sync thread,
2789 * so this is sufficient to ensure mutual exclusion.
2790 */
2791 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2792 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2793 spa_config_held(spa, SCL_CONFIG, RW_READER)));
2794
2795 if (vd == rvd) {
2796 for (c = 0; c < rvd->vdev_children; c++)
2797 vdev_config_dirty(rvd->vdev_child[c]);
2798 } else {
2799 ASSERT(vd == vd->vdev_top);
2800
2801 if (!list_link_active(&vd->vdev_config_dirty_node) &&
2802 !vd->vdev_ishole)
2803 list_insert_head(&spa->spa_config_dirty_list, vd);
2804 }
2805}
2806
2807void
2808vdev_config_clean(vdev_t *vd)
2809{
2810 spa_t *spa = vd->vdev_spa;
2811
2812 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2813 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2814 spa_config_held(spa, SCL_CONFIG, RW_READER)));
2815
2816 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
2817 list_remove(&spa->spa_config_dirty_list, vd);
2818}
2819
2820/*
2821 * Mark a top-level vdev's state as dirty, so that the next pass of
2822 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
2823 * the state changes from larger config changes because they require
2824 * much less locking, and are often needed for administrative actions.
2825 */
2826void
2827vdev_state_dirty(vdev_t *vd)
2828{
2829 spa_t *spa = vd->vdev_spa;
2830
2831 ASSERT(spa_writeable(spa));
2832 ASSERT(vd == vd->vdev_top);
2833
2834 /*
2835 * The state list is protected by the SCL_STATE lock. The caller
2836 * must either hold SCL_STATE as writer, or must be the sync thread
2837 * (which holds SCL_STATE as reader). There's only one sync thread,
2838 * so this is sufficient to ensure mutual exclusion.
2839 */
2840 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2841 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2842 spa_config_held(spa, SCL_STATE, RW_READER)));
2843
2844 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
2845 list_insert_head(&spa->spa_state_dirty_list, vd);
2846}
2847
2848void
2849vdev_state_clean(vdev_t *vd)
2850{
2851 spa_t *spa = vd->vdev_spa;
2852
2853 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2854 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2855 spa_config_held(spa, SCL_STATE, RW_READER)));
2856
2857 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
2858 list_remove(&spa->spa_state_dirty_list, vd);
2859}
2860
2861/*
2862 * Propagate vdev state up from children to parent.
2863 */
2864void
2865vdev_propagate_state(vdev_t *vd)
2866{
2867 spa_t *spa = vd->vdev_spa;
2868 vdev_t *rvd = spa->spa_root_vdev;
2869 int degraded = 0, faulted = 0;
2870 int corrupted = 0;
2871 vdev_t *child;
2872
2873 if (vd->vdev_children > 0) {
2874 for (int c = 0; c < vd->vdev_children; c++) {
2875 child = vd->vdev_child[c];
2876
2877 /*
2878 * Don't factor holes into the decision.
2879 */
2880 if (child->vdev_ishole)
2881 continue;
2882
2883 if (!vdev_readable(child) ||
2884 (!vdev_writeable(child) && spa_writeable(spa))) {
2885 /*
2886 * Root special: if there is a top-level log
2887 * device, treat the root vdev as if it were
2888 * degraded.
2889 */
2890 if (child->vdev_islog && vd == rvd)
2891 degraded++;
2892 else
2893 faulted++;
2894 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
2895 degraded++;
2896 }
2897
2898 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
2899 corrupted++;
2900 }
2901
2902 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
2903
2904 /*
2905 * Root special: if there is a top-level vdev that cannot be
2906 * opened due to corrupted metadata, then propagate the root
2907 * vdev's aux state as 'corrupt' rather than 'insufficient
2908 * replicas'.
2909 */
2910 if (corrupted && vd == rvd &&
2911 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
2912 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
2913 VDEV_AUX_CORRUPT_DATA);
2914 }
2915
2916 if (vd->vdev_parent)
2917 vdev_propagate_state(vd->vdev_parent);
2918}
2919
2920/*
2921 * Set a vdev's state. If this is during an open, we don't update the parent
2922 * state, because we're in the process of opening children depth-first.
2923 * Otherwise, we propagate the change to the parent.
2924 *
2925 * If this routine places a device in a faulted state, an appropriate ereport is
2926 * generated.
2927 */
2928void
2929vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
2930{
2931 uint64_t save_state;
2932 spa_t *spa = vd->vdev_spa;
2933
2934 if (state == vd->vdev_state) {
2935 vd->vdev_stat.vs_aux = aux;
2936 return;
2937 }
2938
2939 save_state = vd->vdev_state;
2940
2941 vd->vdev_state = state;
2942 vd->vdev_stat.vs_aux = aux;
2943
2944 /*
2945 * If we are setting the vdev state to anything but an open state, then
2946 * always close the underlying device unless the device has requested
2947 * a delayed close (i.e. we're about to remove or fault the device).
2948 * Otherwise, we keep accessible but invalid devices open forever.
2949 * We don't call vdev_close() itself, because that implies some extra
2950 * checks (offline, etc) that we don't want here. This is limited to
2951 * leaf devices, because otherwise closing the device will affect other
2952 * children.
2953 */
2954 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
2955 vd->vdev_ops->vdev_op_leaf)
2956 vd->vdev_ops->vdev_op_close(vd);
2957
2958 /*
2959 * If we have brought this vdev back into service, we need
2960 * to notify fmd so that it can gracefully repair any outstanding
2961 * cases due to a missing device. We do this in all cases, even those
2962 * that probably don't correlate to a repaired fault. This is sure to
2963 * catch all cases, and we let the zfs-retire agent sort it out. If
2964 * this is a transient state it's OK, as the retire agent will
2965 * double-check the state of the vdev before repairing it.
2966 */
2967 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
2968 vd->vdev_prevstate != state)
2969 zfs_post_state_change(spa, vd);
2970
2971 if (vd->vdev_removed &&
2972 state == VDEV_STATE_CANT_OPEN &&
2973 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
2974 /*
2975 * If the previous state is set to VDEV_STATE_REMOVED, then this
2976 * device was previously marked removed and someone attempted to
2977 * reopen it. If this failed due to a nonexistent device, then
2978 * keep the device in the REMOVED state. We also let this be if
2979 * it is one of our special test online cases, which is only
2980 * attempting to online the device and shouldn't generate an FMA
2981 * fault.
2982 */
2983 vd->vdev_state = VDEV_STATE_REMOVED;
2984 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2985 } else if (state == VDEV_STATE_REMOVED) {
2986 vd->vdev_removed = B_TRUE;
2987 } else if (state == VDEV_STATE_CANT_OPEN) {
2988 /*
2989 * If we fail to open a vdev during an import or recovery, we
2990 * mark it as "not available", which signifies that it was
2991 * never there to begin with. Failure to open such a device
2992 * is not considered an error.
2993 */
2994 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
2995 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
2996 vd->vdev_ops->vdev_op_leaf)
2997 vd->vdev_not_present = 1;
2998
2999 /*
3000 * Post the appropriate ereport. If the 'prevstate' field is
3001 * set to something other than VDEV_STATE_UNKNOWN, it indicates
3002 * that this is part of a vdev_reopen(). In this case, we don't
3003 * want to post the ereport if the device was already in the
3004 * CANT_OPEN state beforehand.
3005 *
3006 * If the 'checkremove' flag is set, then this is an attempt to
3007 * online the device in response to an insertion event. If we
3008 * hit this case, then we have detected an insertion event for a
3009 * faulted or offline device that wasn't in the removed state.
3010 * In this scenario, we don't post an ereport because we are
3011 * about to replace the device, or attempt an online with
3012 * vdev_forcefault, which will generate the fault for us.
3013 */
3014 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
3015 !vd->vdev_not_present && !vd->vdev_checkremove &&
3016 vd != spa->spa_root_vdev) {
3017 const char *class;
3018
3019 switch (aux) {
3020 case VDEV_AUX_OPEN_FAILED:
3021 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
3022 break;
3023 case VDEV_AUX_CORRUPT_DATA:
3024 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
3025 break;
3026 case VDEV_AUX_NO_REPLICAS:
3027 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
3028 break;
3029 case VDEV_AUX_BAD_GUID_SUM:
3030 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
3031 break;
3032 case VDEV_AUX_TOO_SMALL:
3033 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
3034 break;
3035 case VDEV_AUX_BAD_LABEL:
3036 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
3037 break;
3038 default:
3039 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
3040 }
3041
3042 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
3043 }
3044
3045 /* Erase any notion of persistent removed state */
3046 vd->vdev_removed = B_FALSE;
3047 } else {
3048 vd->vdev_removed = B_FALSE;
3049 }
3050
3051 if (!isopen && vd->vdev_parent)
3052 vdev_propagate_state(vd->vdev_parent);
3053}
3054
3055/*
3056 * Check the vdev configuration to ensure that it's capable of supporting
3057 * a root pool.
3058 *
3059 * On Solaris, we do not support RAID-Z or partial configuration. In
3060 * addition, only a single top-level vdev is allowed and none of the
3061 * leaves can be wholedisks.
3062 *
3063 * For FreeBSD, we can boot from any configuration. There is a
3064 * limitation that the boot filesystem must be either uncompressed or
3065 * compresses with lzjb compression but I'm not sure how to enforce
3066 * that here.
3067 */
3068boolean_t
3069vdev_is_bootable(vdev_t *vd)
3070{
3071#ifdef sun
3072 if (!vd->vdev_ops->vdev_op_leaf) {
3073 char *vdev_type = vd->vdev_ops->vdev_op_type;
3074
3075 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
3076 vd->vdev_children > 1) {
3077 return (B_FALSE);
3078 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
3079 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
3080 return (B_FALSE);
3081 }
3082 } else if (vd->vdev_wholedisk == 1) {
3083 return (B_FALSE);
3084 }
3085
3086 for (int c = 0; c < vd->vdev_children; c++) {
3087 if (!vdev_is_bootable(vd->vdev_child[c]))
3088 return (B_FALSE);
3089 }
3090#endif /* sun */
3091 return (B_TRUE);
3092}
3093
3094/*
3095 * Load the state from the original vdev tree (ovd) which
3096 * we've retrieved from the MOS config object. If the original
3097 * vdev was offline or faulted then we transfer that state to the
3098 * device in the current vdev tree (nvd).
3099 */
3100void
3101vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
3102{
3103 spa_t *spa = nvd->vdev_spa;
3104
3105 ASSERT(nvd->vdev_top->vdev_islog);
3106 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3107 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
3108
3109 for (int c = 0; c < nvd->vdev_children; c++)
3110 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
3111
3112 if (nvd->vdev_ops->vdev_op_leaf) {
3113 /*
3114 * Restore the persistent vdev state
3115 */
3116 nvd->vdev_offline = ovd->vdev_offline;
3117 nvd->vdev_faulted = ovd->vdev_faulted;
3118 nvd->vdev_degraded = ovd->vdev_degraded;
3119 nvd->vdev_removed = ovd->vdev_removed;
3120 }
3121}
3122
3123/*
3124 * Determine if a log device has valid content. If the vdev was
3125 * removed or faulted in the MOS config then we know that
3126 * the content on the log device has already been written to the pool.
3127 */
3128boolean_t
3129vdev_log_state_valid(vdev_t *vd)
3130{
3131 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
3132 !vd->vdev_removed)
3133 return (B_TRUE);
3134
3135 for (int c = 0; c < vd->vdev_children; c++)
3136 if (vdev_log_state_valid(vd->vdev_child[c]))
3137 return (B_TRUE);
3138
3139 return (B_FALSE);
3140}
3141
3142/*
3143 * Expand a vdev if possible.
3144 */
3145void
3146vdev_expand(vdev_t *vd, uint64_t txg)
3147{
3148 ASSERT(vd->vdev_top == vd);
3149 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3150
3151 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
3152 VERIFY(vdev_metaslab_init(vd, txg) == 0);
3153 vdev_config_dirty(vd);
3154 }
3155}
3156
3157/*
3158 * Split a vdev.
3159 */
3160void
3161vdev_split(vdev_t *vd)
3162{
3163 vdev_t *cvd, *pvd = vd->vdev_parent;
3164
3165 vdev_remove_child(pvd, vd);
3166 vdev_compact_children(pvd);
3167
3168 cvd = pvd->vdev_child[0];
3169 if (pvd->vdev_children == 1) {
3170 vdev_remove_parent(cvd);
3171 cvd->vdev_splitting = B_TRUE;
3172 }
3173 vdev_propagate_state(cvd);
3174}