vdev.c revision 339139
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright 2017 Nexenta Systems, Inc. 26 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright 2016 Toomas Soome <tsoome@me.com> 29 * Copyright 2017 Joyent, Inc. 30 */ 31 32#include <sys/zfs_context.h> 33#include <sys/fm/fs/zfs.h> 34#include <sys/spa.h> 35#include <sys/spa_impl.h> 36#include <sys/bpobj.h> 37#include <sys/dmu.h> 38#include <sys/dmu_tx.h> 39#include <sys/dsl_dir.h> 40#include <sys/vdev_impl.h> 41#include <sys/uberblock_impl.h> 42#include <sys/metaslab.h> 43#include <sys/metaslab_impl.h> 44#include <sys/space_map.h> 45#include <sys/space_reftree.h> 46#include <sys/zio.h> 47#include <sys/zap.h> 48#include <sys/fs/zfs.h> 49#include <sys/arc.h> 50#include <sys/zil.h> 51#include <sys/dsl_scan.h> 52#include <sys/abd.h> 53#include <sys/trim_map.h> 54#include <sys/vdev_initialize.h> 55 56SYSCTL_DECL(_vfs_zfs); 57SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV"); 58 59/* 60 * Virtual device management. 61 */ 62 63/* 64 * The limit for ZFS to automatically increase a top-level vdev's ashift 65 * from logical ashift to physical ashift. 66 * 67 * Example: one or more 512B emulation child vdevs 68 * child->vdev_ashift = 9 (512 bytes) 69 * child->vdev_physical_ashift = 12 (4096 bytes) 70 * zfs_max_auto_ashift = 11 (2048 bytes) 71 * zfs_min_auto_ashift = 9 (512 bytes) 72 * 73 * On pool creation or the addition of a new top-level vdev, ZFS will 74 * increase the ashift of the top-level vdev to 2048 as limited by 75 * zfs_max_auto_ashift. 76 * 77 * Example: one or more 512B emulation child vdevs 78 * child->vdev_ashift = 9 (512 bytes) 79 * child->vdev_physical_ashift = 12 (4096 bytes) 80 * zfs_max_auto_ashift = 13 (8192 bytes) 81 * zfs_min_auto_ashift = 9 (512 bytes) 82 * 83 * On pool creation or the addition of a new top-level vdev, ZFS will 84 * increase the ashift of the top-level vdev to 4096 to match the 85 * max vdev_physical_ashift. 86 * 87 * Example: one or more 512B emulation child vdevs 88 * child->vdev_ashift = 9 (512 bytes) 89 * child->vdev_physical_ashift = 9 (512 bytes) 90 * zfs_max_auto_ashift = 13 (8192 bytes) 91 * zfs_min_auto_ashift = 12 (4096 bytes) 92 * 93 * On pool creation or the addition of a new top-level vdev, ZFS will 94 * increase the ashift of the top-level vdev to 4096 to match the 95 * zfs_min_auto_ashift. 96 */ 97static uint64_t zfs_max_auto_ashift = SPA_MAXASHIFT; 98static uint64_t zfs_min_auto_ashift = SPA_MINASHIFT; 99 100static int 101sysctl_vfs_zfs_max_auto_ashift(SYSCTL_HANDLER_ARGS) 102{ 103 uint64_t val; 104 int err; 105 106 val = zfs_max_auto_ashift; 107 err = sysctl_handle_64(oidp, &val, 0, req); 108 if (err != 0 || req->newptr == NULL) 109 return (err); 110 111 if (val > SPA_MAXASHIFT || val < zfs_min_auto_ashift) 112 return (EINVAL); 113 114 zfs_max_auto_ashift = val; 115 116 return (0); 117} 118SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift, 119 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t), 120 sysctl_vfs_zfs_max_auto_ashift, "QU", 121 "Max ashift used when optimising for logical -> physical sectors size on " 122 "new top-level vdevs."); 123 124static int 125sysctl_vfs_zfs_min_auto_ashift(SYSCTL_HANDLER_ARGS) 126{ 127 uint64_t val; 128 int err; 129 130 val = zfs_min_auto_ashift; 131 err = sysctl_handle_64(oidp, &val, 0, req); 132 if (err != 0 || req->newptr == NULL) 133 return (err); 134 135 if (val < SPA_MINASHIFT || val > zfs_max_auto_ashift) 136 return (EINVAL); 137 138 zfs_min_auto_ashift = val; 139 140 return (0); 141} 142SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift, 143 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t), 144 sysctl_vfs_zfs_min_auto_ashift, "QU", 145 "Min ashift used when creating new top-level vdevs."); 146 147static vdev_ops_t *vdev_ops_table[] = { 148 &vdev_root_ops, 149 &vdev_raidz_ops, 150 &vdev_mirror_ops, 151 &vdev_replacing_ops, 152 &vdev_spare_ops, 153#ifdef _KERNEL 154 &vdev_geom_ops, 155#else 156 &vdev_disk_ops, 157#endif 158 &vdev_file_ops, 159 &vdev_missing_ops, 160 &vdev_hole_ops, 161 &vdev_indirect_ops, 162 NULL 163}; 164 165 166/* target number of metaslabs per top-level vdev */ 167int vdev_max_ms_count = 200; 168SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_count, CTLFLAG_RDTUN, 169 &vdev_max_ms_count, 0, 170 "Maximum number of metaslabs per top-level vdev"); 171 172/* minimum number of metaslabs per top-level vdev */ 173int vdev_min_ms_count = 16; 174SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_ms_count, CTLFLAG_RDTUN, 175 &vdev_min_ms_count, 0, 176 "Minimum number of metaslabs per top-level vdev"); 177 178/* practical upper limit of total metaslabs per top-level vdev */ 179int vdev_ms_count_limit = 1ULL << 17; 180 181/* lower limit for metaslab size (512M) */ 182int vdev_default_ms_shift = 29; 183SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, default_ms_shift, CTLFLAG_RDTUN, 184 &vdev_default_ms_shift, 0, 185 "Shift between vdev size and number of metaslabs"); 186 187/* upper limit for metaslab size (256G) */ 188int vdev_max_ms_shift = 38; 189 190boolean_t vdev_validate_skip = B_FALSE; 191 192/* 193 * Since the DTL space map of a vdev is not expected to have a lot of 194 * entries, we default its block size to 4K. 195 */ 196int vdev_dtl_sm_blksz = (1 << 12); 197SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, CTLFLAG_RDTUN, 198 &vdev_dtl_sm_blksz, 0, 199 "Block size for DTL space map. Power of 2 and greater than 4096."); 200 201/* 202 * vdev-wide space maps that have lots of entries written to them at 203 * the end of each transaction can benefit from a higher I/O bandwidth 204 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. 205 */ 206int vdev_standard_sm_blksz = (1 << 17); 207SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, CTLFLAG_RDTUN, 208 &vdev_standard_sm_blksz, 0, 209 "Block size for standard space map. Power of 2 and greater than 4096."); 210 211/*PRINTFLIKE2*/ 212void 213vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) 214{ 215 va_list adx; 216 char buf[256]; 217 218 va_start(adx, fmt); 219 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 220 va_end(adx); 221 222 if (vd->vdev_path != NULL) { 223 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, 224 vd->vdev_path, buf); 225 } else { 226 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", 227 vd->vdev_ops->vdev_op_type, 228 (u_longlong_t)vd->vdev_id, 229 (u_longlong_t)vd->vdev_guid, buf); 230 } 231} 232 233void 234vdev_dbgmsg_print_tree(vdev_t *vd, int indent) 235{ 236 char state[20]; 237 238 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { 239 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id, 240 vd->vdev_ops->vdev_op_type); 241 return; 242 } 243 244 switch (vd->vdev_state) { 245 case VDEV_STATE_UNKNOWN: 246 (void) snprintf(state, sizeof (state), "unknown"); 247 break; 248 case VDEV_STATE_CLOSED: 249 (void) snprintf(state, sizeof (state), "closed"); 250 break; 251 case VDEV_STATE_OFFLINE: 252 (void) snprintf(state, sizeof (state), "offline"); 253 break; 254 case VDEV_STATE_REMOVED: 255 (void) snprintf(state, sizeof (state), "removed"); 256 break; 257 case VDEV_STATE_CANT_OPEN: 258 (void) snprintf(state, sizeof (state), "can't open"); 259 break; 260 case VDEV_STATE_FAULTED: 261 (void) snprintf(state, sizeof (state), "faulted"); 262 break; 263 case VDEV_STATE_DEGRADED: 264 (void) snprintf(state, sizeof (state), "degraded"); 265 break; 266 case VDEV_STATE_HEALTHY: 267 (void) snprintf(state, sizeof (state), "healthy"); 268 break; 269 default: 270 (void) snprintf(state, sizeof (state), "<state %u>", 271 (uint_t)vd->vdev_state); 272 } 273 274 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, 275 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, 276 vd->vdev_islog ? " (log)" : "", 277 (u_longlong_t)vd->vdev_guid, 278 vd->vdev_path ? vd->vdev_path : "N/A", state); 279 280 for (uint64_t i = 0; i < vd->vdev_children; i++) 281 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); 282} 283 284/* 285 * Given a vdev type, return the appropriate ops vector. 286 */ 287static vdev_ops_t * 288vdev_getops(const char *type) 289{ 290 vdev_ops_t *ops, **opspp; 291 292 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 293 if (strcmp(ops->vdev_op_type, type) == 0) 294 break; 295 296 return (ops); 297} 298 299/* ARGSUSED */ 300void 301vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res) 302{ 303 res->rs_start = in->rs_start; 304 res->rs_end = in->rs_end; 305} 306 307/* 308 * Default asize function: return the MAX of psize with the asize of 309 * all children. This is what's used by anything other than RAID-Z. 310 */ 311uint64_t 312vdev_default_asize(vdev_t *vd, uint64_t psize) 313{ 314 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 315 uint64_t csize; 316 317 for (int c = 0; c < vd->vdev_children; c++) { 318 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 319 asize = MAX(asize, csize); 320 } 321 322 return (asize); 323} 324 325/* 326 * Get the minimum allocatable size. We define the allocatable size as 327 * the vdev's asize rounded to the nearest metaslab. This allows us to 328 * replace or attach devices which don't have the same physical size but 329 * can still satisfy the same number of allocations. 330 */ 331uint64_t 332vdev_get_min_asize(vdev_t *vd) 333{ 334 vdev_t *pvd = vd->vdev_parent; 335 336 /* 337 * If our parent is NULL (inactive spare or cache) or is the root, 338 * just return our own asize. 339 */ 340 if (pvd == NULL) 341 return (vd->vdev_asize); 342 343 /* 344 * The top-level vdev just returns the allocatable size rounded 345 * to the nearest metaslab. 346 */ 347 if (vd == vd->vdev_top) 348 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 349 350 /* 351 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 352 * so each child must provide at least 1/Nth of its asize. 353 */ 354 if (pvd->vdev_ops == &vdev_raidz_ops) 355 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) / 356 pvd->vdev_children); 357 358 return (pvd->vdev_min_asize); 359} 360 361void 362vdev_set_min_asize(vdev_t *vd) 363{ 364 vd->vdev_min_asize = vdev_get_min_asize(vd); 365 366 for (int c = 0; c < vd->vdev_children; c++) 367 vdev_set_min_asize(vd->vdev_child[c]); 368} 369 370vdev_t * 371vdev_lookup_top(spa_t *spa, uint64_t vdev) 372{ 373 vdev_t *rvd = spa->spa_root_vdev; 374 375 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 376 377 if (vdev < rvd->vdev_children) { 378 ASSERT(rvd->vdev_child[vdev] != NULL); 379 return (rvd->vdev_child[vdev]); 380 } 381 382 return (NULL); 383} 384 385vdev_t * 386vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 387{ 388 vdev_t *mvd; 389 390 if (vd->vdev_guid == guid) 391 return (vd); 392 393 for (int c = 0; c < vd->vdev_children; c++) 394 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 395 NULL) 396 return (mvd); 397 398 return (NULL); 399} 400 401static int 402vdev_count_leaves_impl(vdev_t *vd) 403{ 404 int n = 0; 405 406 if (vd->vdev_ops->vdev_op_leaf) 407 return (1); 408 409 for (int c = 0; c < vd->vdev_children; c++) 410 n += vdev_count_leaves_impl(vd->vdev_child[c]); 411 412 return (n); 413} 414 415int 416vdev_count_leaves(spa_t *spa) 417{ 418 return (vdev_count_leaves_impl(spa->spa_root_vdev)); 419} 420 421void 422vdev_add_child(vdev_t *pvd, vdev_t *cvd) 423{ 424 size_t oldsize, newsize; 425 uint64_t id = cvd->vdev_id; 426 vdev_t **newchild; 427 spa_t *spa = cvd->vdev_spa; 428 429 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 430 ASSERT(cvd->vdev_parent == NULL); 431 432 cvd->vdev_parent = pvd; 433 434 if (pvd == NULL) 435 return; 436 437 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 438 439 oldsize = pvd->vdev_children * sizeof (vdev_t *); 440 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 441 newsize = pvd->vdev_children * sizeof (vdev_t *); 442 443 newchild = kmem_zalloc(newsize, KM_SLEEP); 444 if (pvd->vdev_child != NULL) { 445 bcopy(pvd->vdev_child, newchild, oldsize); 446 kmem_free(pvd->vdev_child, oldsize); 447 } 448 449 pvd->vdev_child = newchild; 450 pvd->vdev_child[id] = cvd; 451 452 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 453 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 454 455 /* 456 * Walk up all ancestors to update guid sum. 457 */ 458 for (; pvd != NULL; pvd = pvd->vdev_parent) 459 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 460} 461 462void 463vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 464{ 465 int c; 466 uint_t id = cvd->vdev_id; 467 468 ASSERT(cvd->vdev_parent == pvd); 469 470 if (pvd == NULL) 471 return; 472 473 ASSERT(id < pvd->vdev_children); 474 ASSERT(pvd->vdev_child[id] == cvd); 475 476 pvd->vdev_child[id] = NULL; 477 cvd->vdev_parent = NULL; 478 479 for (c = 0; c < pvd->vdev_children; c++) 480 if (pvd->vdev_child[c]) 481 break; 482 483 if (c == pvd->vdev_children) { 484 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 485 pvd->vdev_child = NULL; 486 pvd->vdev_children = 0; 487 } 488 489 /* 490 * Walk up all ancestors to update guid sum. 491 */ 492 for (; pvd != NULL; pvd = pvd->vdev_parent) 493 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 494} 495 496/* 497 * Remove any holes in the child array. 498 */ 499void 500vdev_compact_children(vdev_t *pvd) 501{ 502 vdev_t **newchild, *cvd; 503 int oldc = pvd->vdev_children; 504 int newc; 505 506 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 507 508 for (int c = newc = 0; c < oldc; c++) 509 if (pvd->vdev_child[c]) 510 newc++; 511 512 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 513 514 for (int c = newc = 0; c < oldc; c++) { 515 if ((cvd = pvd->vdev_child[c]) != NULL) { 516 newchild[newc] = cvd; 517 cvd->vdev_id = newc++; 518 } 519 } 520 521 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 522 pvd->vdev_child = newchild; 523 pvd->vdev_children = newc; 524} 525 526/* 527 * Allocate and minimally initialize a vdev_t. 528 */ 529vdev_t * 530vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 531{ 532 vdev_t *vd; 533 vdev_indirect_config_t *vic; 534 535 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 536 vic = &vd->vdev_indirect_config; 537 538 if (spa->spa_root_vdev == NULL) { 539 ASSERT(ops == &vdev_root_ops); 540 spa->spa_root_vdev = vd; 541 spa->spa_load_guid = spa_generate_guid(NULL); 542 } 543 544 if (guid == 0 && ops != &vdev_hole_ops) { 545 if (spa->spa_root_vdev == vd) { 546 /* 547 * The root vdev's guid will also be the pool guid, 548 * which must be unique among all pools. 549 */ 550 guid = spa_generate_guid(NULL); 551 } else { 552 /* 553 * Any other vdev's guid must be unique within the pool. 554 */ 555 guid = spa_generate_guid(spa); 556 } 557 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 558 } 559 560 vd->vdev_spa = spa; 561 vd->vdev_id = id; 562 vd->vdev_guid = guid; 563 vd->vdev_guid_sum = guid; 564 vd->vdev_ops = ops; 565 vd->vdev_state = VDEV_STATE_CLOSED; 566 vd->vdev_ishole = (ops == &vdev_hole_ops); 567 vic->vic_prev_indirect_vdev = UINT64_MAX; 568 569 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); 570 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); 571 vd->vdev_obsolete_segments = range_tree_create(NULL, NULL); 572 573 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 574 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 575 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 576 mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL); 577 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL); 578 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); 579 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); 580 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); 581 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); 582 583 for (int t = 0; t < DTL_TYPES; t++) { 584 vd->vdev_dtl[t] = range_tree_create(NULL, NULL); 585 } 586 txg_list_create(&vd->vdev_ms_list, spa, 587 offsetof(struct metaslab, ms_txg_node)); 588 txg_list_create(&vd->vdev_dtl_list, spa, 589 offsetof(struct vdev, vdev_dtl_node)); 590 vd->vdev_stat.vs_timestamp = gethrtime(); 591 vdev_queue_init(vd); 592 vdev_cache_init(vd); 593 594 return (vd); 595} 596 597/* 598 * Allocate a new vdev. The 'alloctype' is used to control whether we are 599 * creating a new vdev or loading an existing one - the behavior is slightly 600 * different for each case. 601 */ 602int 603vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 604 int alloctype) 605{ 606 vdev_ops_t *ops; 607 char *type; 608 uint64_t guid = 0, islog, nparity; 609 vdev_t *vd; 610 vdev_indirect_config_t *vic; 611 612 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 613 614 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 615 return (SET_ERROR(EINVAL)); 616 617 if ((ops = vdev_getops(type)) == NULL) 618 return (SET_ERROR(EINVAL)); 619 620 /* 621 * If this is a load, get the vdev guid from the nvlist. 622 * Otherwise, vdev_alloc_common() will generate one for us. 623 */ 624 if (alloctype == VDEV_ALLOC_LOAD) { 625 uint64_t label_id; 626 627 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 628 label_id != id) 629 return (SET_ERROR(EINVAL)); 630 631 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 632 return (SET_ERROR(EINVAL)); 633 } else if (alloctype == VDEV_ALLOC_SPARE) { 634 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 635 return (SET_ERROR(EINVAL)); 636 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 637 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 638 return (SET_ERROR(EINVAL)); 639 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 640 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 641 return (SET_ERROR(EINVAL)); 642 } 643 644 /* 645 * The first allocated vdev must be of type 'root'. 646 */ 647 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 648 return (SET_ERROR(EINVAL)); 649 650 /* 651 * Determine whether we're a log vdev. 652 */ 653 islog = 0; 654 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 655 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 656 return (SET_ERROR(ENOTSUP)); 657 658 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 659 return (SET_ERROR(ENOTSUP)); 660 661 /* 662 * Set the nparity property for RAID-Z vdevs. 663 */ 664 nparity = -1ULL; 665 if (ops == &vdev_raidz_ops) { 666 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 667 &nparity) == 0) { 668 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 669 return (SET_ERROR(EINVAL)); 670 /* 671 * Previous versions could only support 1 or 2 parity 672 * device. 673 */ 674 if (nparity > 1 && 675 spa_version(spa) < SPA_VERSION_RAIDZ2) 676 return (SET_ERROR(ENOTSUP)); 677 if (nparity > 2 && 678 spa_version(spa) < SPA_VERSION_RAIDZ3) 679 return (SET_ERROR(ENOTSUP)); 680 } else { 681 /* 682 * We require the parity to be specified for SPAs that 683 * support multiple parity levels. 684 */ 685 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 686 return (SET_ERROR(EINVAL)); 687 /* 688 * Otherwise, we default to 1 parity device for RAID-Z. 689 */ 690 nparity = 1; 691 } 692 } else { 693 nparity = 0; 694 } 695 ASSERT(nparity != -1ULL); 696 697 vd = vdev_alloc_common(spa, id, guid, ops); 698 vic = &vd->vdev_indirect_config; 699 700 vd->vdev_islog = islog; 701 vd->vdev_nparity = nparity; 702 703 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 704 vd->vdev_path = spa_strdup(vd->vdev_path); 705 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 706 vd->vdev_devid = spa_strdup(vd->vdev_devid); 707 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 708 &vd->vdev_physpath) == 0) 709 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 710 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 711 vd->vdev_fru = spa_strdup(vd->vdev_fru); 712 713 /* 714 * Set the whole_disk property. If it's not specified, leave the value 715 * as -1. 716 */ 717 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 718 &vd->vdev_wholedisk) != 0) 719 vd->vdev_wholedisk = -1ULL; 720 721 ASSERT0(vic->vic_mapping_object); 722 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 723 &vic->vic_mapping_object); 724 ASSERT0(vic->vic_births_object); 725 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 726 &vic->vic_births_object); 727 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); 728 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 729 &vic->vic_prev_indirect_vdev); 730 731 /* 732 * Look for the 'not present' flag. This will only be set if the device 733 * was not present at the time of import. 734 */ 735 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 736 &vd->vdev_not_present); 737 738 /* 739 * Get the alignment requirement. 740 */ 741 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 742 743 /* 744 * Retrieve the vdev creation time. 745 */ 746 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 747 &vd->vdev_crtxg); 748 749 /* 750 * If we're a top-level vdev, try to load the allocation parameters. 751 */ 752 if (parent && !parent->vdev_parent && 753 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 754 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 755 &vd->vdev_ms_array); 756 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 757 &vd->vdev_ms_shift); 758 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 759 &vd->vdev_asize); 760 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 761 &vd->vdev_removing); 762 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 763 &vd->vdev_top_zap); 764 } else { 765 ASSERT0(vd->vdev_top_zap); 766 } 767 768 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { 769 ASSERT(alloctype == VDEV_ALLOC_LOAD || 770 alloctype == VDEV_ALLOC_ADD || 771 alloctype == VDEV_ALLOC_SPLIT || 772 alloctype == VDEV_ALLOC_ROOTPOOL); 773 vd->vdev_mg = metaslab_group_create(islog ? 774 spa_log_class(spa) : spa_normal_class(spa), vd, 775 spa->spa_alloc_count); 776 } 777 778 if (vd->vdev_ops->vdev_op_leaf && 779 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 780 (void) nvlist_lookup_uint64(nv, 781 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); 782 } else { 783 ASSERT0(vd->vdev_leaf_zap); 784 } 785 786 /* 787 * If we're a leaf vdev, try to load the DTL object and other state. 788 */ 789 790 if (vd->vdev_ops->vdev_op_leaf && 791 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 792 alloctype == VDEV_ALLOC_ROOTPOOL)) { 793 if (alloctype == VDEV_ALLOC_LOAD) { 794 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 795 &vd->vdev_dtl_object); 796 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 797 &vd->vdev_unspare); 798 } 799 800 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 801 uint64_t spare = 0; 802 803 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 804 &spare) == 0 && spare) 805 spa_spare_add(vd); 806 } 807 808 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 809 &vd->vdev_offline); 810 811 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 812 &vd->vdev_resilver_txg); 813 814 /* 815 * When importing a pool, we want to ignore the persistent fault 816 * state, as the diagnosis made on another system may not be 817 * valid in the current context. Local vdevs will 818 * remain in the faulted state. 819 */ 820 if (spa_load_state(spa) == SPA_LOAD_OPEN) { 821 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 822 &vd->vdev_faulted); 823 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 824 &vd->vdev_degraded); 825 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 826 &vd->vdev_removed); 827 828 if (vd->vdev_faulted || vd->vdev_degraded) { 829 char *aux; 830 831 vd->vdev_label_aux = 832 VDEV_AUX_ERR_EXCEEDED; 833 if (nvlist_lookup_string(nv, 834 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 835 strcmp(aux, "external") == 0) 836 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 837 } 838 } 839 } 840 841 /* 842 * Add ourselves to the parent's list of children. 843 */ 844 vdev_add_child(parent, vd); 845 846 *vdp = vd; 847 848 return (0); 849} 850 851void 852vdev_free(vdev_t *vd) 853{ 854 spa_t *spa = vd->vdev_spa; 855 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 856 857 /* 858 * Scan queues are normally destroyed at the end of a scan. If the 859 * queue exists here, that implies the vdev is being removed while 860 * the scan is still running. 861 */ 862 if (vd->vdev_scan_io_queue != NULL) { 863 mutex_enter(&vd->vdev_scan_io_queue_lock); 864 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue); 865 vd->vdev_scan_io_queue = NULL; 866 mutex_exit(&vd->vdev_scan_io_queue_lock); 867 } 868 869 /* 870 * vdev_free() implies closing the vdev first. This is simpler than 871 * trying to ensure complicated semantics for all callers. 872 */ 873 vdev_close(vd); 874 875 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 876 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 877 878 /* 879 * Free all children. 880 */ 881 for (int c = 0; c < vd->vdev_children; c++) 882 vdev_free(vd->vdev_child[c]); 883 884 ASSERT(vd->vdev_child == NULL); 885 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 886 ASSERT(vd->vdev_initialize_thread == NULL); 887 888 /* 889 * Discard allocation state. 890 */ 891 if (vd->vdev_mg != NULL) { 892 vdev_metaslab_fini(vd); 893 metaslab_group_destroy(vd->vdev_mg); 894 } 895 896 ASSERT0(vd->vdev_stat.vs_space); 897 ASSERT0(vd->vdev_stat.vs_dspace); 898 ASSERT0(vd->vdev_stat.vs_alloc); 899 900 /* 901 * Remove this vdev from its parent's child list. 902 */ 903 vdev_remove_child(vd->vdev_parent, vd); 904 905 ASSERT(vd->vdev_parent == NULL); 906 907 /* 908 * Clean up vdev structure. 909 */ 910 vdev_queue_fini(vd); 911 vdev_cache_fini(vd); 912 913 if (vd->vdev_path) 914 spa_strfree(vd->vdev_path); 915 if (vd->vdev_devid) 916 spa_strfree(vd->vdev_devid); 917 if (vd->vdev_physpath) 918 spa_strfree(vd->vdev_physpath); 919 if (vd->vdev_fru) 920 spa_strfree(vd->vdev_fru); 921 922 if (vd->vdev_isspare) 923 spa_spare_remove(vd); 924 if (vd->vdev_isl2cache) 925 spa_l2cache_remove(vd); 926 927 txg_list_destroy(&vd->vdev_ms_list); 928 txg_list_destroy(&vd->vdev_dtl_list); 929 930 mutex_enter(&vd->vdev_dtl_lock); 931 space_map_close(vd->vdev_dtl_sm); 932 for (int t = 0; t < DTL_TYPES; t++) { 933 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); 934 range_tree_destroy(vd->vdev_dtl[t]); 935 } 936 mutex_exit(&vd->vdev_dtl_lock); 937 938 EQUIV(vd->vdev_indirect_births != NULL, 939 vd->vdev_indirect_mapping != NULL); 940 if (vd->vdev_indirect_births != NULL) { 941 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 942 vdev_indirect_births_close(vd->vdev_indirect_births); 943 } 944 945 if (vd->vdev_obsolete_sm != NULL) { 946 ASSERT(vd->vdev_removing || 947 vd->vdev_ops == &vdev_indirect_ops); 948 space_map_close(vd->vdev_obsolete_sm); 949 vd->vdev_obsolete_sm = NULL; 950 } 951 range_tree_destroy(vd->vdev_obsolete_segments); 952 rw_destroy(&vd->vdev_indirect_rwlock); 953 mutex_destroy(&vd->vdev_obsolete_lock); 954 955 mutex_destroy(&vd->vdev_queue_lock); 956 mutex_destroy(&vd->vdev_dtl_lock); 957 mutex_destroy(&vd->vdev_stat_lock); 958 mutex_destroy(&vd->vdev_probe_lock); 959 mutex_destroy(&vd->vdev_scan_io_queue_lock); 960 mutex_destroy(&vd->vdev_initialize_lock); 961 mutex_destroy(&vd->vdev_initialize_io_lock); 962 cv_destroy(&vd->vdev_initialize_io_cv); 963 cv_destroy(&vd->vdev_initialize_cv); 964 965 if (vd == spa->spa_root_vdev) 966 spa->spa_root_vdev = NULL; 967 968 kmem_free(vd, sizeof (vdev_t)); 969} 970 971/* 972 * Transfer top-level vdev state from svd to tvd. 973 */ 974static void 975vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 976{ 977 spa_t *spa = svd->vdev_spa; 978 metaslab_t *msp; 979 vdev_t *vd; 980 int t; 981 982 ASSERT(tvd == tvd->vdev_top); 983 984 tvd->vdev_ms_array = svd->vdev_ms_array; 985 tvd->vdev_ms_shift = svd->vdev_ms_shift; 986 tvd->vdev_ms_count = svd->vdev_ms_count; 987 tvd->vdev_top_zap = svd->vdev_top_zap; 988 989 svd->vdev_ms_array = 0; 990 svd->vdev_ms_shift = 0; 991 svd->vdev_ms_count = 0; 992 svd->vdev_top_zap = 0; 993 994 if (tvd->vdev_mg) 995 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); 996 tvd->vdev_mg = svd->vdev_mg; 997 tvd->vdev_ms = svd->vdev_ms; 998 999 svd->vdev_mg = NULL; 1000 svd->vdev_ms = NULL; 1001 1002 if (tvd->vdev_mg != NULL) 1003 tvd->vdev_mg->mg_vd = tvd; 1004 1005 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; 1006 svd->vdev_checkpoint_sm = NULL; 1007 1008 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 1009 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 1010 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 1011 1012 svd->vdev_stat.vs_alloc = 0; 1013 svd->vdev_stat.vs_space = 0; 1014 svd->vdev_stat.vs_dspace = 0; 1015 1016 /* 1017 * State which may be set on a top-level vdev that's in the 1018 * process of being removed. 1019 */ 1020 ASSERT0(tvd->vdev_indirect_config.vic_births_object); 1021 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); 1022 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); 1023 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); 1024 ASSERT3P(tvd->vdev_indirect_births, ==, NULL); 1025 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); 1026 ASSERT0(tvd->vdev_removing); 1027 tvd->vdev_removing = svd->vdev_removing; 1028 tvd->vdev_indirect_config = svd->vdev_indirect_config; 1029 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; 1030 tvd->vdev_indirect_births = svd->vdev_indirect_births; 1031 range_tree_swap(&svd->vdev_obsolete_segments, 1032 &tvd->vdev_obsolete_segments); 1033 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; 1034 svd->vdev_indirect_config.vic_mapping_object = 0; 1035 svd->vdev_indirect_config.vic_births_object = 0; 1036 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; 1037 svd->vdev_indirect_mapping = NULL; 1038 svd->vdev_indirect_births = NULL; 1039 svd->vdev_obsolete_sm = NULL; 1040 svd->vdev_removing = 0; 1041 1042 for (t = 0; t < TXG_SIZE; t++) { 1043 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 1044 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 1045 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 1046 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 1047 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 1048 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 1049 } 1050 1051 if (list_link_active(&svd->vdev_config_dirty_node)) { 1052 vdev_config_clean(svd); 1053 vdev_config_dirty(tvd); 1054 } 1055 1056 if (list_link_active(&svd->vdev_state_dirty_node)) { 1057 vdev_state_clean(svd); 1058 vdev_state_dirty(tvd); 1059 } 1060 1061 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 1062 svd->vdev_deflate_ratio = 0; 1063 1064 tvd->vdev_islog = svd->vdev_islog; 1065 svd->vdev_islog = 0; 1066 1067 dsl_scan_io_queue_vdev_xfer(svd, tvd); 1068} 1069 1070static void 1071vdev_top_update(vdev_t *tvd, vdev_t *vd) 1072{ 1073 if (vd == NULL) 1074 return; 1075 1076 vd->vdev_top = tvd; 1077 1078 for (int c = 0; c < vd->vdev_children; c++) 1079 vdev_top_update(tvd, vd->vdev_child[c]); 1080} 1081 1082/* 1083 * Add a mirror/replacing vdev above an existing vdev. 1084 */ 1085vdev_t * 1086vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 1087{ 1088 spa_t *spa = cvd->vdev_spa; 1089 vdev_t *pvd = cvd->vdev_parent; 1090 vdev_t *mvd; 1091 1092 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1093 1094 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 1095 1096 mvd->vdev_asize = cvd->vdev_asize; 1097 mvd->vdev_min_asize = cvd->vdev_min_asize; 1098 mvd->vdev_max_asize = cvd->vdev_max_asize; 1099 mvd->vdev_psize = cvd->vdev_psize; 1100 mvd->vdev_ashift = cvd->vdev_ashift; 1101 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift; 1102 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift; 1103 mvd->vdev_state = cvd->vdev_state; 1104 mvd->vdev_crtxg = cvd->vdev_crtxg; 1105 1106 vdev_remove_child(pvd, cvd); 1107 vdev_add_child(pvd, mvd); 1108 cvd->vdev_id = mvd->vdev_children; 1109 vdev_add_child(mvd, cvd); 1110 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1111 1112 if (mvd == mvd->vdev_top) 1113 vdev_top_transfer(cvd, mvd); 1114 1115 return (mvd); 1116} 1117 1118/* 1119 * Remove a 1-way mirror/replacing vdev from the tree. 1120 */ 1121void 1122vdev_remove_parent(vdev_t *cvd) 1123{ 1124 vdev_t *mvd = cvd->vdev_parent; 1125 vdev_t *pvd = mvd->vdev_parent; 1126 1127 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1128 1129 ASSERT(mvd->vdev_children == 1); 1130 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 1131 mvd->vdev_ops == &vdev_replacing_ops || 1132 mvd->vdev_ops == &vdev_spare_ops); 1133 cvd->vdev_ashift = mvd->vdev_ashift; 1134 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift; 1135 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift; 1136 1137 vdev_remove_child(mvd, cvd); 1138 vdev_remove_child(pvd, mvd); 1139 1140 /* 1141 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 1142 * Otherwise, we could have detached an offline device, and when we 1143 * go to import the pool we'll think we have two top-level vdevs, 1144 * instead of a different version of the same top-level vdev. 1145 */ 1146 if (mvd->vdev_top == mvd) { 1147 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 1148 cvd->vdev_orig_guid = cvd->vdev_guid; 1149 cvd->vdev_guid += guid_delta; 1150 cvd->vdev_guid_sum += guid_delta; 1151 } 1152 cvd->vdev_id = mvd->vdev_id; 1153 vdev_add_child(pvd, cvd); 1154 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1155 1156 if (cvd == cvd->vdev_top) 1157 vdev_top_transfer(mvd, cvd); 1158 1159 ASSERT(mvd->vdev_children == 0); 1160 vdev_free(mvd); 1161} 1162 1163int 1164vdev_metaslab_init(vdev_t *vd, uint64_t txg) 1165{ 1166 spa_t *spa = vd->vdev_spa; 1167 objset_t *mos = spa->spa_meta_objset; 1168 uint64_t m; 1169 uint64_t oldc = vd->vdev_ms_count; 1170 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 1171 metaslab_t **mspp; 1172 int error; 1173 1174 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1175 1176 /* 1177 * This vdev is not being allocated from yet or is a hole. 1178 */ 1179 if (vd->vdev_ms_shift == 0) 1180 return (0); 1181 1182 ASSERT(!vd->vdev_ishole); 1183 1184 ASSERT(oldc <= newc); 1185 1186 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 1187 1188 if (oldc != 0) { 1189 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 1190 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 1191 } 1192 1193 vd->vdev_ms = mspp; 1194 vd->vdev_ms_count = newc; 1195 for (m = oldc; m < newc; m++) { 1196 uint64_t object = 0; 1197 1198 /* 1199 * vdev_ms_array may be 0 if we are creating the "fake" 1200 * metaslabs for an indirect vdev for zdb's leak detection. 1201 * See zdb_leak_init(). 1202 */ 1203 if (txg == 0 && vd->vdev_ms_array != 0) { 1204 error = dmu_read(mos, vd->vdev_ms_array, 1205 m * sizeof (uint64_t), sizeof (uint64_t), &object, 1206 DMU_READ_PREFETCH); 1207 if (error != 0) { 1208 vdev_dbgmsg(vd, "unable to read the metaslab " 1209 "array [error=%d]", error); 1210 return (error); 1211 } 1212 } 1213 1214 error = metaslab_init(vd->vdev_mg, m, object, txg, 1215 &(vd->vdev_ms[m])); 1216 if (error != 0) { 1217 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", 1218 error); 1219 return (error); 1220 } 1221 } 1222 1223 if (txg == 0) 1224 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 1225 1226 /* 1227 * If the vdev is being removed we don't activate 1228 * the metaslabs since we want to ensure that no new 1229 * allocations are performed on this device. 1230 */ 1231 if (oldc == 0 && !vd->vdev_removing) 1232 metaslab_group_activate(vd->vdev_mg); 1233 1234 if (txg == 0) 1235 spa_config_exit(spa, SCL_ALLOC, FTAG); 1236 1237 return (0); 1238} 1239 1240void 1241vdev_metaslab_fini(vdev_t *vd) 1242{ 1243 if (vd->vdev_checkpoint_sm != NULL) { 1244 ASSERT(spa_feature_is_active(vd->vdev_spa, 1245 SPA_FEATURE_POOL_CHECKPOINT)); 1246 space_map_close(vd->vdev_checkpoint_sm); 1247 /* 1248 * Even though we close the space map, we need to set its 1249 * pointer to NULL. The reason is that vdev_metaslab_fini() 1250 * may be called multiple times for certain operations 1251 * (i.e. when destroying a pool) so we need to ensure that 1252 * this clause never executes twice. This logic is similar 1253 * to the one used for the vdev_ms clause below. 1254 */ 1255 vd->vdev_checkpoint_sm = NULL; 1256 } 1257 1258 if (vd->vdev_ms != NULL) { 1259 uint64_t count = vd->vdev_ms_count; 1260 1261 metaslab_group_passivate(vd->vdev_mg); 1262 for (uint64_t m = 0; m < count; m++) { 1263 metaslab_t *msp = vd->vdev_ms[m]; 1264 1265 if (msp != NULL) 1266 metaslab_fini(msp); 1267 } 1268 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 1269 vd->vdev_ms = NULL; 1270 1271 vd->vdev_ms_count = 0; 1272 } 1273 ASSERT0(vd->vdev_ms_count); 1274} 1275 1276typedef struct vdev_probe_stats { 1277 boolean_t vps_readable; 1278 boolean_t vps_writeable; 1279 int vps_flags; 1280} vdev_probe_stats_t; 1281 1282static void 1283vdev_probe_done(zio_t *zio) 1284{ 1285 spa_t *spa = zio->io_spa; 1286 vdev_t *vd = zio->io_vd; 1287 vdev_probe_stats_t *vps = zio->io_private; 1288 1289 ASSERT(vd->vdev_probe_zio != NULL); 1290 1291 if (zio->io_type == ZIO_TYPE_READ) { 1292 if (zio->io_error == 0) 1293 vps->vps_readable = 1; 1294 if (zio->io_error == 0 && spa_writeable(spa)) { 1295 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 1296 zio->io_offset, zio->io_size, zio->io_abd, 1297 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1298 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 1299 } else { 1300 abd_free(zio->io_abd); 1301 } 1302 } else if (zio->io_type == ZIO_TYPE_WRITE) { 1303 if (zio->io_error == 0) 1304 vps->vps_writeable = 1; 1305 abd_free(zio->io_abd); 1306 } else if (zio->io_type == ZIO_TYPE_NULL) { 1307 zio_t *pio; 1308 1309 vd->vdev_cant_read |= !vps->vps_readable; 1310 vd->vdev_cant_write |= !vps->vps_writeable; 1311 1312 if (vdev_readable(vd) && 1313 (vdev_writeable(vd) || !spa_writeable(spa))) { 1314 zio->io_error = 0; 1315 } else { 1316 ASSERT(zio->io_error != 0); 1317 vdev_dbgmsg(vd, "failed probe"); 1318 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 1319 spa, vd, NULL, 0, 0); 1320 zio->io_error = SET_ERROR(ENXIO); 1321 } 1322 1323 mutex_enter(&vd->vdev_probe_lock); 1324 ASSERT(vd->vdev_probe_zio == zio); 1325 vd->vdev_probe_zio = NULL; 1326 mutex_exit(&vd->vdev_probe_lock); 1327 1328 zio_link_t *zl = NULL; 1329 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 1330 if (!vdev_accessible(vd, pio)) 1331 pio->io_error = SET_ERROR(ENXIO); 1332 1333 kmem_free(vps, sizeof (*vps)); 1334 } 1335} 1336 1337/* 1338 * Determine whether this device is accessible. 1339 * 1340 * Read and write to several known locations: the pad regions of each 1341 * vdev label but the first, which we leave alone in case it contains 1342 * a VTOC. 1343 */ 1344zio_t * 1345vdev_probe(vdev_t *vd, zio_t *zio) 1346{ 1347 spa_t *spa = vd->vdev_spa; 1348 vdev_probe_stats_t *vps = NULL; 1349 zio_t *pio; 1350 1351 ASSERT(vd->vdev_ops->vdev_op_leaf); 1352 1353 /* 1354 * Don't probe the probe. 1355 */ 1356 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 1357 return (NULL); 1358 1359 /* 1360 * To prevent 'probe storms' when a device fails, we create 1361 * just one probe i/o at a time. All zios that want to probe 1362 * this vdev will become parents of the probe io. 1363 */ 1364 mutex_enter(&vd->vdev_probe_lock); 1365 1366 if ((pio = vd->vdev_probe_zio) == NULL) { 1367 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 1368 1369 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 1370 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 1371 ZIO_FLAG_TRYHARD; 1372 1373 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 1374 /* 1375 * vdev_cant_read and vdev_cant_write can only 1376 * transition from TRUE to FALSE when we have the 1377 * SCL_ZIO lock as writer; otherwise they can only 1378 * transition from FALSE to TRUE. This ensures that 1379 * any zio looking at these values can assume that 1380 * failures persist for the life of the I/O. That's 1381 * important because when a device has intermittent 1382 * connectivity problems, we want to ensure that 1383 * they're ascribed to the device (ENXIO) and not 1384 * the zio (EIO). 1385 * 1386 * Since we hold SCL_ZIO as writer here, clear both 1387 * values so the probe can reevaluate from first 1388 * principles. 1389 */ 1390 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1391 vd->vdev_cant_read = B_FALSE; 1392 vd->vdev_cant_write = B_FALSE; 1393 } 1394 1395 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1396 vdev_probe_done, vps, 1397 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1398 1399 /* 1400 * We can't change the vdev state in this context, so we 1401 * kick off an async task to do it on our behalf. 1402 */ 1403 if (zio != NULL) { 1404 vd->vdev_probe_wanted = B_TRUE; 1405 spa_async_request(spa, SPA_ASYNC_PROBE); 1406 } 1407 } 1408 1409 if (zio != NULL) 1410 zio_add_child(zio, pio); 1411 1412 mutex_exit(&vd->vdev_probe_lock); 1413 1414 if (vps == NULL) { 1415 ASSERT(zio != NULL); 1416 return (NULL); 1417 } 1418 1419 for (int l = 1; l < VDEV_LABELS; l++) { 1420 zio_nowait(zio_read_phys(pio, vd, 1421 vdev_label_offset(vd->vdev_psize, l, 1422 offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE, 1423 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), 1424 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1425 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1426 } 1427 1428 if (zio == NULL) 1429 return (pio); 1430 1431 zio_nowait(pio); 1432 return (NULL); 1433} 1434 1435static void 1436vdev_open_child(void *arg) 1437{ 1438 vdev_t *vd = arg; 1439 1440 vd->vdev_open_thread = curthread; 1441 vd->vdev_open_error = vdev_open(vd); 1442 vd->vdev_open_thread = NULL; 1443} 1444 1445boolean_t 1446vdev_uses_zvols(vdev_t *vd) 1447{ 1448 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1449 strlen(ZVOL_DIR)) == 0) 1450 return (B_TRUE); 1451 for (int c = 0; c < vd->vdev_children; c++) 1452 if (vdev_uses_zvols(vd->vdev_child[c])) 1453 return (B_TRUE); 1454 return (B_FALSE); 1455} 1456 1457void 1458vdev_open_children(vdev_t *vd) 1459{ 1460 taskq_t *tq; 1461 int children = vd->vdev_children; 1462 1463 /* 1464 * in order to handle pools on top of zvols, do the opens 1465 * in a single thread so that the same thread holds the 1466 * spa_namespace_lock 1467 */ 1468 if (B_TRUE || vdev_uses_zvols(vd)) { 1469 for (int c = 0; c < children; c++) 1470 vd->vdev_child[c]->vdev_open_error = 1471 vdev_open(vd->vdev_child[c]); 1472 return; 1473 } 1474 tq = taskq_create("vdev_open", children, minclsyspri, 1475 children, children, TASKQ_PREPOPULATE); 1476 1477 for (int c = 0; c < children; c++) 1478 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1479 TQ_SLEEP) != 0); 1480 1481 taskq_destroy(tq); 1482} 1483 1484/* 1485 * Compute the raidz-deflation ratio. Note, we hard-code 1486 * in 128k (1 << 17) because it is the "typical" blocksize. 1487 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, 1488 * otherwise it would inconsistently account for existing bp's. 1489 */ 1490static void 1491vdev_set_deflate_ratio(vdev_t *vd) 1492{ 1493 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { 1494 vd->vdev_deflate_ratio = (1 << 17) / 1495 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 1496 } 1497} 1498 1499/* 1500 * Prepare a virtual device for access. 1501 */ 1502int 1503vdev_open(vdev_t *vd) 1504{ 1505 spa_t *spa = vd->vdev_spa; 1506 int error; 1507 uint64_t osize = 0; 1508 uint64_t max_osize = 0; 1509 uint64_t asize, max_asize, psize; 1510 uint64_t logical_ashift = 0; 1511 uint64_t physical_ashift = 0; 1512 1513 ASSERT(vd->vdev_open_thread == curthread || 1514 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1515 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1516 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1517 vd->vdev_state == VDEV_STATE_OFFLINE); 1518 1519 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1520 vd->vdev_cant_read = B_FALSE; 1521 vd->vdev_cant_write = B_FALSE; 1522 vd->vdev_notrim = B_FALSE; 1523 vd->vdev_min_asize = vdev_get_min_asize(vd); 1524 1525 /* 1526 * If this vdev is not removed, check its fault status. If it's 1527 * faulted, bail out of the open. 1528 */ 1529 if (!vd->vdev_removed && vd->vdev_faulted) { 1530 ASSERT(vd->vdev_children == 0); 1531 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1532 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1533 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1534 vd->vdev_label_aux); 1535 return (SET_ERROR(ENXIO)); 1536 } else if (vd->vdev_offline) { 1537 ASSERT(vd->vdev_children == 0); 1538 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1539 return (SET_ERROR(ENXIO)); 1540 } 1541 1542 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, 1543 &logical_ashift, &physical_ashift); 1544 1545 /* 1546 * Reset the vdev_reopening flag so that we actually close 1547 * the vdev on error. 1548 */ 1549 vd->vdev_reopening = B_FALSE; 1550 if (zio_injection_enabled && error == 0) 1551 error = zio_handle_device_injection(vd, NULL, ENXIO); 1552 1553 if (error) { 1554 if (vd->vdev_removed && 1555 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1556 vd->vdev_removed = B_FALSE; 1557 1558 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { 1559 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, 1560 vd->vdev_stat.vs_aux); 1561 } else { 1562 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1563 vd->vdev_stat.vs_aux); 1564 } 1565 return (error); 1566 } 1567 1568 vd->vdev_removed = B_FALSE; 1569 1570 /* 1571 * Recheck the faulted flag now that we have confirmed that 1572 * the vdev is accessible. If we're faulted, bail. 1573 */ 1574 if (vd->vdev_faulted) { 1575 ASSERT(vd->vdev_children == 0); 1576 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1577 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1578 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1579 vd->vdev_label_aux); 1580 return (SET_ERROR(ENXIO)); 1581 } 1582 1583 if (vd->vdev_degraded) { 1584 ASSERT(vd->vdev_children == 0); 1585 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1586 VDEV_AUX_ERR_EXCEEDED); 1587 } else { 1588 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1589 } 1590 1591 /* 1592 * For hole or missing vdevs we just return success. 1593 */ 1594 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1595 return (0); 1596 1597 if (zfs_trim_enabled && !vd->vdev_notrim && vd->vdev_ops->vdev_op_leaf) 1598 trim_map_create(vd); 1599 1600 for (int c = 0; c < vd->vdev_children; c++) { 1601 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1602 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1603 VDEV_AUX_NONE); 1604 break; 1605 } 1606 } 1607 1608 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1609 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); 1610 1611 if (vd->vdev_children == 0) { 1612 if (osize < SPA_MINDEVSIZE) { 1613 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1614 VDEV_AUX_TOO_SMALL); 1615 return (SET_ERROR(EOVERFLOW)); 1616 } 1617 psize = osize; 1618 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1619 max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1620 VDEV_LABEL_END_SIZE); 1621 } else { 1622 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1623 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1624 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1625 VDEV_AUX_TOO_SMALL); 1626 return (SET_ERROR(EOVERFLOW)); 1627 } 1628 psize = 0; 1629 asize = osize; 1630 max_asize = max_osize; 1631 } 1632 1633 vd->vdev_psize = psize; 1634 1635 /* 1636 * Make sure the allocatable size hasn't shrunk too much. 1637 */ 1638 if (asize < vd->vdev_min_asize) { 1639 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1640 VDEV_AUX_BAD_LABEL); 1641 return (SET_ERROR(EINVAL)); 1642 } 1643 1644 vd->vdev_physical_ashift = 1645 MAX(physical_ashift, vd->vdev_physical_ashift); 1646 vd->vdev_logical_ashift = MAX(logical_ashift, vd->vdev_logical_ashift); 1647 vd->vdev_ashift = MAX(vd->vdev_logical_ashift, vd->vdev_ashift); 1648 1649 if (vd->vdev_logical_ashift > SPA_MAXASHIFT) { 1650 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1651 VDEV_AUX_ASHIFT_TOO_BIG); 1652 return (EINVAL); 1653 } 1654 1655 if (vd->vdev_asize == 0) { 1656 /* 1657 * This is the first-ever open, so use the computed values. 1658 * For testing purposes, a higher ashift can be requested. 1659 */ 1660 vd->vdev_asize = asize; 1661 vd->vdev_max_asize = max_asize; 1662 } else { 1663 /* 1664 * Make sure the alignment requirement hasn't increased. 1665 */ 1666 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift && 1667 vd->vdev_ops->vdev_op_leaf) { 1668 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1669 VDEV_AUX_BAD_LABEL); 1670 return (EINVAL); 1671 } 1672 vd->vdev_max_asize = max_asize; 1673 } 1674 1675 /* 1676 * If all children are healthy we update asize if either: 1677 * The asize has increased, due to a device expansion caused by dynamic 1678 * LUN growth or vdev replacement, and automatic expansion is enabled; 1679 * making the additional space available. 1680 * 1681 * The asize has decreased, due to a device shrink usually caused by a 1682 * vdev replace with a smaller device. This ensures that calculations 1683 * based of max_asize and asize e.g. esize are always valid. It's safe 1684 * to do this as we've already validated that asize is greater than 1685 * vdev_min_asize. 1686 */ 1687 if (vd->vdev_state == VDEV_STATE_HEALTHY && 1688 ((asize > vd->vdev_asize && 1689 (vd->vdev_expanding || spa->spa_autoexpand)) || 1690 (asize < vd->vdev_asize))) 1691 vd->vdev_asize = asize; 1692 1693 vdev_set_min_asize(vd); 1694 1695 /* 1696 * Ensure we can issue some IO before declaring the 1697 * vdev open for business. 1698 */ 1699 if (vd->vdev_ops->vdev_op_leaf && 1700 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1701 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1702 VDEV_AUX_ERR_EXCEEDED); 1703 return (error); 1704 } 1705 1706 /* 1707 * Track the min and max ashift values for normal data devices. 1708 */ 1709 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 1710 !vd->vdev_islog && vd->vdev_aux == NULL) { 1711 if (vd->vdev_ashift > spa->spa_max_ashift) 1712 spa->spa_max_ashift = vd->vdev_ashift; 1713 if (vd->vdev_ashift < spa->spa_min_ashift) 1714 spa->spa_min_ashift = vd->vdev_ashift; 1715 } 1716 1717 /* 1718 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1719 * resilver. But don't do this if we are doing a reopen for a scrub, 1720 * since this would just restart the scrub we are already doing. 1721 */ 1722 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1723 vdev_resilver_needed(vd, NULL, NULL)) 1724 spa_async_request(spa, SPA_ASYNC_RESILVER); 1725 1726 return (0); 1727} 1728 1729/* 1730 * Called once the vdevs are all opened, this routine validates the label 1731 * contents. This needs to be done before vdev_load() so that we don't 1732 * inadvertently do repair I/Os to the wrong device. 1733 * 1734 * This function will only return failure if one of the vdevs indicates that it 1735 * has since been destroyed or exported. This is only possible if 1736 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1737 * will be updated but the function will return 0. 1738 */ 1739int 1740vdev_validate(vdev_t *vd) 1741{ 1742 spa_t *spa = vd->vdev_spa; 1743 nvlist_t *label; 1744 uint64_t guid = 0, aux_guid = 0, top_guid; 1745 uint64_t state; 1746 nvlist_t *nvl; 1747 uint64_t txg; 1748 1749 if (vdev_validate_skip) 1750 return (0); 1751 1752 for (uint64_t c = 0; c < vd->vdev_children; c++) 1753 if (vdev_validate(vd->vdev_child[c]) != 0) 1754 return (SET_ERROR(EBADF)); 1755 1756 /* 1757 * If the device has already failed, or was marked offline, don't do 1758 * any further validation. Otherwise, label I/O will fail and we will 1759 * overwrite the previous state. 1760 */ 1761 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) 1762 return (0); 1763 1764 /* 1765 * If we are performing an extreme rewind, we allow for a label that 1766 * was modified at a point after the current txg. 1767 * If config lock is not held do not check for the txg. spa_sync could 1768 * be updating the vdev's label before updating spa_last_synced_txg. 1769 */ 1770 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || 1771 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) 1772 txg = UINT64_MAX; 1773 else 1774 txg = spa_last_synced_txg(spa); 1775 1776 if ((label = vdev_label_read_config(vd, txg)) == NULL) { 1777 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1778 VDEV_AUX_BAD_LABEL); 1779 vdev_dbgmsg(vd, "vdev_validate: failed reading config for " 1780 "txg %llu", (u_longlong_t)txg); 1781 return (0); 1782 } 1783 1784 /* 1785 * Determine if this vdev has been split off into another 1786 * pool. If so, then refuse to open it. 1787 */ 1788 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 1789 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 1790 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1791 VDEV_AUX_SPLIT_POOL); 1792 nvlist_free(label); 1793 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); 1794 return (0); 1795 } 1796 1797 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { 1798 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1799 VDEV_AUX_CORRUPT_DATA); 1800 nvlist_free(label); 1801 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1802 ZPOOL_CONFIG_POOL_GUID); 1803 return (0); 1804 } 1805 1806 /* 1807 * If config is not trusted then ignore the spa guid check. This is 1808 * necessary because if the machine crashed during a re-guid the new 1809 * guid might have been written to all of the vdev labels, but not the 1810 * cached config. The check will be performed again once we have the 1811 * trusted config from the MOS. 1812 */ 1813 if (spa->spa_trust_config && guid != spa_guid(spa)) { 1814 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1815 VDEV_AUX_CORRUPT_DATA); 1816 nvlist_free(label); 1817 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " 1818 "match config (%llu != %llu)", (u_longlong_t)guid, 1819 (u_longlong_t)spa_guid(spa)); 1820 return (0); 1821 } 1822 1823 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 1824 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 1825 &aux_guid) != 0) 1826 aux_guid = 0; 1827 1828 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { 1829 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1830 VDEV_AUX_CORRUPT_DATA); 1831 nvlist_free(label); 1832 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1833 ZPOOL_CONFIG_GUID); 1834 return (0); 1835 } 1836 1837 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) 1838 != 0) { 1839 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1840 VDEV_AUX_CORRUPT_DATA); 1841 nvlist_free(label); 1842 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1843 ZPOOL_CONFIG_TOP_GUID); 1844 return (0); 1845 } 1846 1847 /* 1848 * If this vdev just became a top-level vdev because its sibling was 1849 * detached, it will have adopted the parent's vdev guid -- but the 1850 * label may or may not be on disk yet. Fortunately, either version 1851 * of the label will have the same top guid, so if we're a top-level 1852 * vdev, we can safely compare to that instead. 1853 * However, if the config comes from a cachefile that failed to update 1854 * after the detach, a top-level vdev will appear as a non top-level 1855 * vdev in the config. Also relax the constraints if we perform an 1856 * extreme rewind. 1857 * 1858 * If we split this vdev off instead, then we also check the 1859 * original pool's guid. We don't want to consider the vdev 1860 * corrupt if it is partway through a split operation. 1861 */ 1862 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { 1863 boolean_t mismatch = B_FALSE; 1864 if (spa->spa_trust_config && !spa->spa_extreme_rewind) { 1865 if (vd != vd->vdev_top || vd->vdev_guid != top_guid) 1866 mismatch = B_TRUE; 1867 } else { 1868 if (vd->vdev_guid != top_guid && 1869 vd->vdev_top->vdev_guid != guid) 1870 mismatch = B_TRUE; 1871 } 1872 1873 if (mismatch) { 1874 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1875 VDEV_AUX_CORRUPT_DATA); 1876 nvlist_free(label); 1877 vdev_dbgmsg(vd, "vdev_validate: config guid " 1878 "doesn't match label guid"); 1879 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", 1880 (u_longlong_t)vd->vdev_guid, 1881 (u_longlong_t)vd->vdev_top->vdev_guid); 1882 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " 1883 "aux_guid %llu", (u_longlong_t)guid, 1884 (u_longlong_t)top_guid, (u_longlong_t)aux_guid); 1885 return (0); 1886 } 1887 } 1888 1889 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1890 &state) != 0) { 1891 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1892 VDEV_AUX_CORRUPT_DATA); 1893 nvlist_free(label); 1894 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1895 ZPOOL_CONFIG_POOL_STATE); 1896 return (0); 1897 } 1898 1899 nvlist_free(label); 1900 1901 /* 1902 * If this is a verbatim import, no need to check the 1903 * state of the pool. 1904 */ 1905 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 1906 spa_load_state(spa) == SPA_LOAD_OPEN && 1907 state != POOL_STATE_ACTIVE) { 1908 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " 1909 "for spa %s", (u_longlong_t)state, spa->spa_name); 1910 return (SET_ERROR(EBADF)); 1911 } 1912 1913 /* 1914 * If we were able to open and validate a vdev that was 1915 * previously marked permanently unavailable, clear that state 1916 * now. 1917 */ 1918 if (vd->vdev_not_present) 1919 vd->vdev_not_present = 0; 1920 1921 return (0); 1922} 1923 1924static void 1925vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) 1926{ 1927 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { 1928 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { 1929 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " 1930 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, 1931 dvd->vdev_path, svd->vdev_path); 1932 spa_strfree(dvd->vdev_path); 1933 dvd->vdev_path = spa_strdup(svd->vdev_path); 1934 } 1935 } else if (svd->vdev_path != NULL) { 1936 dvd->vdev_path = spa_strdup(svd->vdev_path); 1937 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", 1938 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); 1939 } 1940} 1941 1942/* 1943 * Recursively copy vdev paths from one vdev to another. Source and destination 1944 * vdev trees must have same geometry otherwise return error. Intended to copy 1945 * paths from userland config into MOS config. 1946 */ 1947int 1948vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) 1949{ 1950 if ((svd->vdev_ops == &vdev_missing_ops) || 1951 (svd->vdev_ishole && dvd->vdev_ishole) || 1952 (dvd->vdev_ops == &vdev_indirect_ops)) 1953 return (0); 1954 1955 if (svd->vdev_ops != dvd->vdev_ops) { 1956 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", 1957 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); 1958 return (SET_ERROR(EINVAL)); 1959 } 1960 1961 if (svd->vdev_guid != dvd->vdev_guid) { 1962 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " 1963 "%llu)", (u_longlong_t)svd->vdev_guid, 1964 (u_longlong_t)dvd->vdev_guid); 1965 return (SET_ERROR(EINVAL)); 1966 } 1967 1968 if (svd->vdev_children != dvd->vdev_children) { 1969 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " 1970 "%llu != %llu", (u_longlong_t)svd->vdev_children, 1971 (u_longlong_t)dvd->vdev_children); 1972 return (SET_ERROR(EINVAL)); 1973 } 1974 1975 for (uint64_t i = 0; i < svd->vdev_children; i++) { 1976 int error = vdev_copy_path_strict(svd->vdev_child[i], 1977 dvd->vdev_child[i]); 1978 if (error != 0) 1979 return (error); 1980 } 1981 1982 if (svd->vdev_ops->vdev_op_leaf) 1983 vdev_copy_path_impl(svd, dvd); 1984 1985 return (0); 1986} 1987 1988static void 1989vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) 1990{ 1991 ASSERT(stvd->vdev_top == stvd); 1992 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); 1993 1994 for (uint64_t i = 0; i < dvd->vdev_children; i++) { 1995 vdev_copy_path_search(stvd, dvd->vdev_child[i]); 1996 } 1997 1998 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) 1999 return; 2000 2001 /* 2002 * The idea here is that while a vdev can shift positions within 2003 * a top vdev (when replacing, attaching mirror, etc.) it cannot 2004 * step outside of it. 2005 */ 2006 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); 2007 2008 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) 2009 return; 2010 2011 ASSERT(vd->vdev_ops->vdev_op_leaf); 2012 2013 vdev_copy_path_impl(vd, dvd); 2014} 2015 2016/* 2017 * Recursively copy vdev paths from one root vdev to another. Source and 2018 * destination vdev trees may differ in geometry. For each destination leaf 2019 * vdev, search a vdev with the same guid and top vdev id in the source. 2020 * Intended to copy paths from userland config into MOS config. 2021 */ 2022void 2023vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) 2024{ 2025 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); 2026 ASSERT(srvd->vdev_ops == &vdev_root_ops); 2027 ASSERT(drvd->vdev_ops == &vdev_root_ops); 2028 2029 for (uint64_t i = 0; i < children; i++) { 2030 vdev_copy_path_search(srvd->vdev_child[i], 2031 drvd->vdev_child[i]); 2032 } 2033} 2034 2035/* 2036 * Close a virtual device. 2037 */ 2038void 2039vdev_close(vdev_t *vd) 2040{ 2041 spa_t *spa = vd->vdev_spa; 2042 vdev_t *pvd = vd->vdev_parent; 2043 2044 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2045 2046 /* 2047 * If our parent is reopening, then we are as well, unless we are 2048 * going offline. 2049 */ 2050 if (pvd != NULL && pvd->vdev_reopening) 2051 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 2052 2053 vd->vdev_ops->vdev_op_close(vd); 2054 2055 vdev_cache_purge(vd); 2056 2057 if (vd->vdev_ops->vdev_op_leaf) 2058 trim_map_destroy(vd); 2059 2060 /* 2061 * We record the previous state before we close it, so that if we are 2062 * doing a reopen(), we don't generate FMA ereports if we notice that 2063 * it's still faulted. 2064 */ 2065 vd->vdev_prevstate = vd->vdev_state; 2066 2067 if (vd->vdev_offline) 2068 vd->vdev_state = VDEV_STATE_OFFLINE; 2069 else 2070 vd->vdev_state = VDEV_STATE_CLOSED; 2071 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2072} 2073 2074void 2075vdev_hold(vdev_t *vd) 2076{ 2077 spa_t *spa = vd->vdev_spa; 2078 2079 ASSERT(spa_is_root(spa)); 2080 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 2081 return; 2082 2083 for (int c = 0; c < vd->vdev_children; c++) 2084 vdev_hold(vd->vdev_child[c]); 2085 2086 if (vd->vdev_ops->vdev_op_leaf) 2087 vd->vdev_ops->vdev_op_hold(vd); 2088} 2089 2090void 2091vdev_rele(vdev_t *vd) 2092{ 2093 spa_t *spa = vd->vdev_spa; 2094 2095 ASSERT(spa_is_root(spa)); 2096 for (int c = 0; c < vd->vdev_children; c++) 2097 vdev_rele(vd->vdev_child[c]); 2098 2099 if (vd->vdev_ops->vdev_op_leaf) 2100 vd->vdev_ops->vdev_op_rele(vd); 2101} 2102 2103/* 2104 * Reopen all interior vdevs and any unopened leaves. We don't actually 2105 * reopen leaf vdevs which had previously been opened as they might deadlock 2106 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 2107 * If the leaf has never been opened then open it, as usual. 2108 */ 2109void 2110vdev_reopen(vdev_t *vd) 2111{ 2112 spa_t *spa = vd->vdev_spa; 2113 2114 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2115 2116 /* set the reopening flag unless we're taking the vdev offline */ 2117 vd->vdev_reopening = !vd->vdev_offline; 2118 vdev_close(vd); 2119 (void) vdev_open(vd); 2120 2121 /* 2122 * Call vdev_validate() here to make sure we have the same device. 2123 * Otherwise, a device with an invalid label could be successfully 2124 * opened in response to vdev_reopen(). 2125 */ 2126 if (vd->vdev_aux) { 2127 (void) vdev_validate_aux(vd); 2128 if (vdev_readable(vd) && vdev_writeable(vd) && 2129 vd->vdev_aux == &spa->spa_l2cache && 2130 !l2arc_vdev_present(vd)) 2131 l2arc_add_vdev(spa, vd); 2132 } else { 2133 (void) vdev_validate(vd); 2134 } 2135 2136 /* 2137 * Reassess parent vdev's health. 2138 */ 2139 vdev_propagate_state(vd); 2140} 2141 2142int 2143vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 2144{ 2145 int error; 2146 2147 /* 2148 * Normally, partial opens (e.g. of a mirror) are allowed. 2149 * For a create, however, we want to fail the request if 2150 * there are any components we can't open. 2151 */ 2152 error = vdev_open(vd); 2153 2154 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 2155 vdev_close(vd); 2156 return (error ? error : ENXIO); 2157 } 2158 2159 /* 2160 * Recursively load DTLs and initialize all labels. 2161 */ 2162 if ((error = vdev_dtl_load(vd)) != 0 || 2163 (error = vdev_label_init(vd, txg, isreplacing ? 2164 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 2165 vdev_close(vd); 2166 return (error); 2167 } 2168 2169 return (0); 2170} 2171 2172void 2173vdev_metaslab_set_size(vdev_t *vd) 2174{ 2175 uint64_t asize = vd->vdev_asize; 2176 uint64_t ms_count = asize >> vdev_default_ms_shift; 2177 uint64_t ms_shift; 2178 2179 /* 2180 * There are two dimensions to the metaslab sizing calculation: 2181 * the size of the metaslab and the count of metaslabs per vdev. 2182 * In general, we aim for vdev_max_ms_count (200) metaslabs. The 2183 * range of the dimensions are as follows: 2184 * 2185 * 2^29 <= ms_size <= 2^38 2186 * 16 <= ms_count <= 131,072 2187 * 2188 * On the lower end of vdev sizes, we aim for metaslabs sizes of 2189 * at least 512MB (2^29) to minimize fragmentation effects when 2190 * testing with smaller devices. However, the count constraint 2191 * of at least 16 metaslabs will override this minimum size goal. 2192 * 2193 * On the upper end of vdev sizes, we aim for a maximum metaslab 2194 * size of 256GB. However, we will cap the total count to 2^17 2195 * metaslabs to keep our memory footprint in check. 2196 * 2197 * The net effect of applying above constrains is summarized below. 2198 * 2199 * vdev size metaslab count 2200 * -------------|----------------- 2201 * < 8GB ~16 2202 * 8GB - 100GB one per 512MB 2203 * 100GB - 50TB ~200 2204 * 50TB - 32PB one per 256GB 2205 * > 32PB ~131,072 2206 * ------------------------------- 2207 */ 2208 2209 if (ms_count < vdev_min_ms_count) 2210 ms_shift = highbit64(asize / vdev_min_ms_count); 2211 else if (ms_count > vdev_max_ms_count) 2212 ms_shift = highbit64(asize / vdev_max_ms_count); 2213 else 2214 ms_shift = vdev_default_ms_shift; 2215 2216 if (ms_shift < SPA_MAXBLOCKSHIFT) { 2217 ms_shift = SPA_MAXBLOCKSHIFT; 2218 } else if (ms_shift > vdev_max_ms_shift) { 2219 ms_shift = vdev_max_ms_shift; 2220 /* cap the total count to constrain memory footprint */ 2221 if ((asize >> ms_shift) > vdev_ms_count_limit) 2222 ms_shift = highbit64(asize / vdev_ms_count_limit); 2223 } 2224 2225 vd->vdev_ms_shift = ms_shift; 2226 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); 2227} 2228 2229/* 2230 * Maximize performance by inflating the configured ashift for top level 2231 * vdevs to be as close to the physical ashift as possible while maintaining 2232 * administrator defined limits and ensuring it doesn't go below the 2233 * logical ashift. 2234 */ 2235void 2236vdev_ashift_optimize(vdev_t *vd) 2237{ 2238 if (vd == vd->vdev_top) { 2239 if (vd->vdev_ashift < vd->vdev_physical_ashift) { 2240 vd->vdev_ashift = MIN( 2241 MAX(zfs_max_auto_ashift, vd->vdev_ashift), 2242 MAX(zfs_min_auto_ashift, vd->vdev_physical_ashift)); 2243 } else { 2244 /* 2245 * Unusual case where logical ashift > physical ashift 2246 * so we can't cap the calculated ashift based on max 2247 * ashift as that would cause failures. 2248 * We still check if we need to increase it to match 2249 * the min ashift. 2250 */ 2251 vd->vdev_ashift = MAX(zfs_min_auto_ashift, 2252 vd->vdev_ashift); 2253 } 2254 } 2255} 2256 2257void 2258vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 2259{ 2260 ASSERT(vd == vd->vdev_top); 2261 /* indirect vdevs don't have metaslabs or dtls */ 2262 ASSERT(vdev_is_concrete(vd) || flags == 0); 2263 ASSERT(ISP2(flags)); 2264 ASSERT(spa_writeable(vd->vdev_spa)); 2265 2266 if (flags & VDD_METASLAB) 2267 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 2268 2269 if (flags & VDD_DTL) 2270 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 2271 2272 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 2273} 2274 2275void 2276vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) 2277{ 2278 for (int c = 0; c < vd->vdev_children; c++) 2279 vdev_dirty_leaves(vd->vdev_child[c], flags, txg); 2280 2281 if (vd->vdev_ops->vdev_op_leaf) 2282 vdev_dirty(vd->vdev_top, flags, vd, txg); 2283} 2284 2285/* 2286 * DTLs. 2287 * 2288 * A vdev's DTL (dirty time log) is the set of transaction groups for which 2289 * the vdev has less than perfect replication. There are four kinds of DTL: 2290 * 2291 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 2292 * 2293 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 2294 * 2295 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 2296 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 2297 * txgs that was scrubbed. 2298 * 2299 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 2300 * persistent errors or just some device being offline. 2301 * Unlike the other three, the DTL_OUTAGE map is not generally 2302 * maintained; it's only computed when needed, typically to 2303 * determine whether a device can be detached. 2304 * 2305 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 2306 * either has the data or it doesn't. 2307 * 2308 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 2309 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 2310 * if any child is less than fully replicated, then so is its parent. 2311 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 2312 * comprising only those txgs which appear in 'maxfaults' or more children; 2313 * those are the txgs we don't have enough replication to read. For example, 2314 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 2315 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 2316 * two child DTL_MISSING maps. 2317 * 2318 * It should be clear from the above that to compute the DTLs and outage maps 2319 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 2320 * Therefore, that is all we keep on disk. When loading the pool, or after 2321 * a configuration change, we generate all other DTLs from first principles. 2322 */ 2323void 2324vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2325{ 2326 range_tree_t *rt = vd->vdev_dtl[t]; 2327 2328 ASSERT(t < DTL_TYPES); 2329 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2330 ASSERT(spa_writeable(vd->vdev_spa)); 2331 2332 mutex_enter(&vd->vdev_dtl_lock); 2333 if (!range_tree_contains(rt, txg, size)) 2334 range_tree_add(rt, txg, size); 2335 mutex_exit(&vd->vdev_dtl_lock); 2336} 2337 2338boolean_t 2339vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2340{ 2341 range_tree_t *rt = vd->vdev_dtl[t]; 2342 boolean_t dirty = B_FALSE; 2343 2344 ASSERT(t < DTL_TYPES); 2345 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2346 2347 /* 2348 * While we are loading the pool, the DTLs have not been loaded yet. 2349 * Ignore the DTLs and try all devices. This avoids a recursive 2350 * mutex enter on the vdev_dtl_lock, and also makes us try hard 2351 * when loading the pool (relying on the checksum to ensure that 2352 * we get the right data -- note that we while loading, we are 2353 * only reading the MOS, which is always checksummed). 2354 */ 2355 if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE) 2356 return (B_FALSE); 2357 2358 mutex_enter(&vd->vdev_dtl_lock); 2359 if (!range_tree_is_empty(rt)) 2360 dirty = range_tree_contains(rt, txg, size); 2361 mutex_exit(&vd->vdev_dtl_lock); 2362 2363 return (dirty); 2364} 2365 2366boolean_t 2367vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 2368{ 2369 range_tree_t *rt = vd->vdev_dtl[t]; 2370 boolean_t empty; 2371 2372 mutex_enter(&vd->vdev_dtl_lock); 2373 empty = range_tree_is_empty(rt); 2374 mutex_exit(&vd->vdev_dtl_lock); 2375 2376 return (empty); 2377} 2378 2379/* 2380 * Returns B_TRUE if vdev determines offset needs to be resilvered. 2381 */ 2382boolean_t 2383vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize) 2384{ 2385 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2386 2387 if (vd->vdev_ops->vdev_op_need_resilver == NULL || 2388 vd->vdev_ops->vdev_op_leaf) 2389 return (B_TRUE); 2390 2391 return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize)); 2392} 2393 2394/* 2395 * Returns the lowest txg in the DTL range. 2396 */ 2397static uint64_t 2398vdev_dtl_min(vdev_t *vd) 2399{ 2400 range_seg_t *rs; 2401 2402 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2403 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2404 ASSERT0(vd->vdev_children); 2405 2406 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2407 return (rs->rs_start - 1); 2408} 2409 2410/* 2411 * Returns the highest txg in the DTL. 2412 */ 2413static uint64_t 2414vdev_dtl_max(vdev_t *vd) 2415{ 2416 range_seg_t *rs; 2417 2418 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2419 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2420 ASSERT0(vd->vdev_children); 2421 2422 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2423 return (rs->rs_end); 2424} 2425 2426/* 2427 * Determine if a resilvering vdev should remove any DTL entries from 2428 * its range. If the vdev was resilvering for the entire duration of the 2429 * scan then it should excise that range from its DTLs. Otherwise, this 2430 * vdev is considered partially resilvered and should leave its DTL 2431 * entries intact. The comment in vdev_dtl_reassess() describes how we 2432 * excise the DTLs. 2433 */ 2434static boolean_t 2435vdev_dtl_should_excise(vdev_t *vd) 2436{ 2437 spa_t *spa = vd->vdev_spa; 2438 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2439 2440 ASSERT0(scn->scn_phys.scn_errors); 2441 ASSERT0(vd->vdev_children); 2442 2443 if (vd->vdev_state < VDEV_STATE_DEGRADED) 2444 return (B_FALSE); 2445 2446 if (vd->vdev_resilver_txg == 0 || 2447 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) 2448 return (B_TRUE); 2449 2450 /* 2451 * When a resilver is initiated the scan will assign the scn_max_txg 2452 * value to the highest txg value that exists in all DTLs. If this 2453 * device's max DTL is not part of this scan (i.e. it is not in 2454 * the range (scn_min_txg, scn_max_txg] then it is not eligible 2455 * for excision. 2456 */ 2457 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { 2458 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd)); 2459 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg); 2460 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg); 2461 return (B_TRUE); 2462 } 2463 return (B_FALSE); 2464} 2465 2466/* 2467 * Reassess DTLs after a config change or scrub completion. 2468 */ 2469void 2470vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 2471{ 2472 spa_t *spa = vd->vdev_spa; 2473 avl_tree_t reftree; 2474 int minref; 2475 2476 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2477 2478 for (int c = 0; c < vd->vdev_children; c++) 2479 vdev_dtl_reassess(vd->vdev_child[c], txg, 2480 scrub_txg, scrub_done); 2481 2482 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) 2483 return; 2484 2485 if (vd->vdev_ops->vdev_op_leaf) { 2486 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2487 2488 mutex_enter(&vd->vdev_dtl_lock); 2489 2490 /* 2491 * If we've completed a scan cleanly then determine 2492 * if this vdev should remove any DTLs. We only want to 2493 * excise regions on vdevs that were available during 2494 * the entire duration of this scan. 2495 */ 2496 if (scrub_txg != 0 && 2497 (spa->spa_scrub_started || 2498 (scn != NULL && scn->scn_phys.scn_errors == 0)) && 2499 vdev_dtl_should_excise(vd)) { 2500 /* 2501 * We completed a scrub up to scrub_txg. If we 2502 * did it without rebooting, then the scrub dtl 2503 * will be valid, so excise the old region and 2504 * fold in the scrub dtl. Otherwise, leave the 2505 * dtl as-is if there was an error. 2506 * 2507 * There's little trick here: to excise the beginning 2508 * of the DTL_MISSING map, we put it into a reference 2509 * tree and then add a segment with refcnt -1 that 2510 * covers the range [0, scrub_txg). This means 2511 * that each txg in that range has refcnt -1 or 0. 2512 * We then add DTL_SCRUB with a refcnt of 2, so that 2513 * entries in the range [0, scrub_txg) will have a 2514 * positive refcnt -- either 1 or 2. We then convert 2515 * the reference tree into the new DTL_MISSING map. 2516 */ 2517 space_reftree_create(&reftree); 2518 space_reftree_add_map(&reftree, 2519 vd->vdev_dtl[DTL_MISSING], 1); 2520 space_reftree_add_seg(&reftree, 0, scrub_txg, -1); 2521 space_reftree_add_map(&reftree, 2522 vd->vdev_dtl[DTL_SCRUB], 2); 2523 space_reftree_generate_map(&reftree, 2524 vd->vdev_dtl[DTL_MISSING], 1); 2525 space_reftree_destroy(&reftree); 2526 } 2527 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 2528 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2529 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); 2530 if (scrub_done) 2531 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 2532 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 2533 if (!vdev_readable(vd)) 2534 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 2535 else 2536 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2537 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); 2538 2539 /* 2540 * If the vdev was resilvering and no longer has any 2541 * DTLs then reset its resilvering flag and dirty 2542 * the top level so that we persist the change. 2543 */ 2544 if (vd->vdev_resilver_txg != 0 && 2545 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2546 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) { 2547 vd->vdev_resilver_txg = 0; 2548 vdev_config_dirty(vd->vdev_top); 2549 } 2550 2551 mutex_exit(&vd->vdev_dtl_lock); 2552 2553 if (txg != 0) 2554 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 2555 return; 2556 } 2557 2558 mutex_enter(&vd->vdev_dtl_lock); 2559 for (int t = 0; t < DTL_TYPES; t++) { 2560 /* account for child's outage in parent's missing map */ 2561 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 2562 if (t == DTL_SCRUB) 2563 continue; /* leaf vdevs only */ 2564 if (t == DTL_PARTIAL) 2565 minref = 1; /* i.e. non-zero */ 2566 else if (vd->vdev_nparity != 0) 2567 minref = vd->vdev_nparity + 1; /* RAID-Z */ 2568 else 2569 minref = vd->vdev_children; /* any kind of mirror */ 2570 space_reftree_create(&reftree); 2571 for (int c = 0; c < vd->vdev_children; c++) { 2572 vdev_t *cvd = vd->vdev_child[c]; 2573 mutex_enter(&cvd->vdev_dtl_lock); 2574 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); 2575 mutex_exit(&cvd->vdev_dtl_lock); 2576 } 2577 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); 2578 space_reftree_destroy(&reftree); 2579 } 2580 mutex_exit(&vd->vdev_dtl_lock); 2581} 2582 2583int 2584vdev_dtl_load(vdev_t *vd) 2585{ 2586 spa_t *spa = vd->vdev_spa; 2587 objset_t *mos = spa->spa_meta_objset; 2588 int error = 0; 2589 2590 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { 2591 ASSERT(vdev_is_concrete(vd)); 2592 2593 error = space_map_open(&vd->vdev_dtl_sm, mos, 2594 vd->vdev_dtl_object, 0, -1ULL, 0); 2595 if (error) 2596 return (error); 2597 ASSERT(vd->vdev_dtl_sm != NULL); 2598 2599 mutex_enter(&vd->vdev_dtl_lock); 2600 2601 /* 2602 * Now that we've opened the space_map we need to update 2603 * the in-core DTL. 2604 */ 2605 space_map_update(vd->vdev_dtl_sm); 2606 2607 error = space_map_load(vd->vdev_dtl_sm, 2608 vd->vdev_dtl[DTL_MISSING], SM_ALLOC); 2609 mutex_exit(&vd->vdev_dtl_lock); 2610 2611 return (error); 2612 } 2613 2614 for (int c = 0; c < vd->vdev_children; c++) { 2615 error = vdev_dtl_load(vd->vdev_child[c]); 2616 if (error != 0) 2617 break; 2618 } 2619 2620 return (error); 2621} 2622 2623void 2624vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) 2625{ 2626 spa_t *spa = vd->vdev_spa; 2627 2628 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); 2629 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2630 zapobj, tx)); 2631} 2632 2633uint64_t 2634vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) 2635{ 2636 spa_t *spa = vd->vdev_spa; 2637 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, 2638 DMU_OT_NONE, 0, tx); 2639 2640 ASSERT(zap != 0); 2641 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2642 zap, tx)); 2643 2644 return (zap); 2645} 2646 2647void 2648vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) 2649{ 2650 if (vd->vdev_ops != &vdev_hole_ops && 2651 vd->vdev_ops != &vdev_missing_ops && 2652 vd->vdev_ops != &vdev_root_ops && 2653 !vd->vdev_top->vdev_removing) { 2654 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { 2655 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); 2656 } 2657 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { 2658 vd->vdev_top_zap = vdev_create_link_zap(vd, tx); 2659 } 2660 } 2661 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2662 vdev_construct_zaps(vd->vdev_child[i], tx); 2663 } 2664} 2665 2666void 2667vdev_dtl_sync(vdev_t *vd, uint64_t txg) 2668{ 2669 spa_t *spa = vd->vdev_spa; 2670 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; 2671 objset_t *mos = spa->spa_meta_objset; 2672 range_tree_t *rtsync; 2673 dmu_tx_t *tx; 2674 uint64_t object = space_map_object(vd->vdev_dtl_sm); 2675 2676 ASSERT(vdev_is_concrete(vd)); 2677 ASSERT(vd->vdev_ops->vdev_op_leaf); 2678 2679 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2680 2681 if (vd->vdev_detached || vd->vdev_top->vdev_removing) { 2682 mutex_enter(&vd->vdev_dtl_lock); 2683 space_map_free(vd->vdev_dtl_sm, tx); 2684 space_map_close(vd->vdev_dtl_sm); 2685 vd->vdev_dtl_sm = NULL; 2686 mutex_exit(&vd->vdev_dtl_lock); 2687 2688 /* 2689 * We only destroy the leaf ZAP for detached leaves or for 2690 * removed log devices. Removed data devices handle leaf ZAP 2691 * cleanup later, once cancellation is no longer possible. 2692 */ 2693 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || 2694 vd->vdev_top->vdev_islog)) { 2695 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); 2696 vd->vdev_leaf_zap = 0; 2697 } 2698 2699 dmu_tx_commit(tx); 2700 return; 2701 } 2702 2703 if (vd->vdev_dtl_sm == NULL) { 2704 uint64_t new_object; 2705 2706 new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx); 2707 VERIFY3U(new_object, !=, 0); 2708 2709 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, 2710 0, -1ULL, 0)); 2711 ASSERT(vd->vdev_dtl_sm != NULL); 2712 } 2713 2714 rtsync = range_tree_create(NULL, NULL); 2715 2716 mutex_enter(&vd->vdev_dtl_lock); 2717 range_tree_walk(rt, range_tree_add, rtsync); 2718 mutex_exit(&vd->vdev_dtl_lock); 2719 2720 space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx); 2721 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); 2722 range_tree_vacate(rtsync, NULL, NULL); 2723 2724 range_tree_destroy(rtsync); 2725 2726 /* 2727 * If the object for the space map has changed then dirty 2728 * the top level so that we update the config. 2729 */ 2730 if (object != space_map_object(vd->vdev_dtl_sm)) { 2731 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " 2732 "new object %llu", (u_longlong_t)txg, spa_name(spa), 2733 (u_longlong_t)object, 2734 (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); 2735 vdev_config_dirty(vd->vdev_top); 2736 } 2737 2738 dmu_tx_commit(tx); 2739 2740 mutex_enter(&vd->vdev_dtl_lock); 2741 space_map_update(vd->vdev_dtl_sm); 2742 mutex_exit(&vd->vdev_dtl_lock); 2743} 2744 2745/* 2746 * Determine whether the specified vdev can be offlined/detached/removed 2747 * without losing data. 2748 */ 2749boolean_t 2750vdev_dtl_required(vdev_t *vd) 2751{ 2752 spa_t *spa = vd->vdev_spa; 2753 vdev_t *tvd = vd->vdev_top; 2754 uint8_t cant_read = vd->vdev_cant_read; 2755 boolean_t required; 2756 2757 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2758 2759 if (vd == spa->spa_root_vdev || vd == tvd) 2760 return (B_TRUE); 2761 2762 /* 2763 * Temporarily mark the device as unreadable, and then determine 2764 * whether this results in any DTL outages in the top-level vdev. 2765 * If not, we can safely offline/detach/remove the device. 2766 */ 2767 vd->vdev_cant_read = B_TRUE; 2768 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2769 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 2770 vd->vdev_cant_read = cant_read; 2771 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2772 2773 if (!required && zio_injection_enabled) 2774 required = !!zio_handle_device_injection(vd, NULL, ECHILD); 2775 2776 return (required); 2777} 2778 2779/* 2780 * Determine if resilver is needed, and if so the txg range. 2781 */ 2782boolean_t 2783vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 2784{ 2785 boolean_t needed = B_FALSE; 2786 uint64_t thismin = UINT64_MAX; 2787 uint64_t thismax = 0; 2788 2789 if (vd->vdev_children == 0) { 2790 mutex_enter(&vd->vdev_dtl_lock); 2791 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2792 vdev_writeable(vd)) { 2793 2794 thismin = vdev_dtl_min(vd); 2795 thismax = vdev_dtl_max(vd); 2796 needed = B_TRUE; 2797 } 2798 mutex_exit(&vd->vdev_dtl_lock); 2799 } else { 2800 for (int c = 0; c < vd->vdev_children; c++) { 2801 vdev_t *cvd = vd->vdev_child[c]; 2802 uint64_t cmin, cmax; 2803 2804 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 2805 thismin = MIN(thismin, cmin); 2806 thismax = MAX(thismax, cmax); 2807 needed = B_TRUE; 2808 } 2809 } 2810 } 2811 2812 if (needed && minp) { 2813 *minp = thismin; 2814 *maxp = thismax; 2815 } 2816 return (needed); 2817} 2818 2819/* 2820 * Gets the checkpoint space map object from the vdev's ZAP. 2821 * Returns the spacemap object, or 0 if it wasn't in the ZAP 2822 * or the ZAP doesn't exist yet. 2823 */ 2824int 2825vdev_checkpoint_sm_object(vdev_t *vd) 2826{ 2827 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 2828 if (vd->vdev_top_zap == 0) { 2829 return (0); 2830 } 2831 2832 uint64_t sm_obj = 0; 2833 int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, 2834 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj); 2835 2836 ASSERT(err == 0 || err == ENOENT); 2837 2838 return (sm_obj); 2839} 2840 2841int 2842vdev_load(vdev_t *vd) 2843{ 2844 int error = 0; 2845 /* 2846 * Recursively load all children. 2847 */ 2848 for (int c = 0; c < vd->vdev_children; c++) { 2849 error = vdev_load(vd->vdev_child[c]); 2850 if (error != 0) { 2851 return (error); 2852 } 2853 } 2854 2855 vdev_set_deflate_ratio(vd); 2856 2857 /* 2858 * If this is a top-level vdev, initialize its metaslabs. 2859 */ 2860 if (vd == vd->vdev_top && vdev_is_concrete(vd)) { 2861 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { 2862 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2863 VDEV_AUX_CORRUPT_DATA); 2864 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " 2865 "asize=%llu", (u_longlong_t)vd->vdev_ashift, 2866 (u_longlong_t)vd->vdev_asize); 2867 return (SET_ERROR(ENXIO)); 2868 } else if ((error = vdev_metaslab_init(vd, 0)) != 0) { 2869 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " 2870 "[error=%d]", error); 2871 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2872 VDEV_AUX_CORRUPT_DATA); 2873 return (error); 2874 } 2875 2876 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd); 2877 if (checkpoint_sm_obj != 0) { 2878 objset_t *mos = spa_meta_objset(vd->vdev_spa); 2879 ASSERT(vd->vdev_asize != 0); 2880 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); 2881 2882 if ((error = space_map_open(&vd->vdev_checkpoint_sm, 2883 mos, checkpoint_sm_obj, 0, vd->vdev_asize, 2884 vd->vdev_ashift))) { 2885 vdev_dbgmsg(vd, "vdev_load: space_map_open " 2886 "failed for checkpoint spacemap (obj %llu) " 2887 "[error=%d]", 2888 (u_longlong_t)checkpoint_sm_obj, error); 2889 return (error); 2890 } 2891 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 2892 space_map_update(vd->vdev_checkpoint_sm); 2893 2894 /* 2895 * Since the checkpoint_sm contains free entries 2896 * exclusively we can use sm_alloc to indicate the 2897 * culmulative checkpointed space that has been freed. 2898 */ 2899 vd->vdev_stat.vs_checkpoint_space = 2900 -vd->vdev_checkpoint_sm->sm_alloc; 2901 vd->vdev_spa->spa_checkpoint_info.sci_dspace += 2902 vd->vdev_stat.vs_checkpoint_space; 2903 } 2904 } 2905 2906 /* 2907 * If this is a leaf vdev, load its DTL. 2908 */ 2909 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { 2910 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2911 VDEV_AUX_CORRUPT_DATA); 2912 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " 2913 "[error=%d]", error); 2914 return (error); 2915 } 2916 2917 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd); 2918 if (obsolete_sm_object != 0) { 2919 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2920 ASSERT(vd->vdev_asize != 0); 2921 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 2922 2923 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, 2924 obsolete_sm_object, 0, vd->vdev_asize, 0))) { 2925 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2926 VDEV_AUX_CORRUPT_DATA); 2927 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " 2928 "obsolete spacemap (obj %llu) [error=%d]", 2929 (u_longlong_t)obsolete_sm_object, error); 2930 return (error); 2931 } 2932 space_map_update(vd->vdev_obsolete_sm); 2933 } 2934 2935 return (0); 2936} 2937 2938/* 2939 * The special vdev case is used for hot spares and l2cache devices. Its 2940 * sole purpose it to set the vdev state for the associated vdev. To do this, 2941 * we make sure that we can open the underlying device, then try to read the 2942 * label, and make sure that the label is sane and that it hasn't been 2943 * repurposed to another pool. 2944 */ 2945int 2946vdev_validate_aux(vdev_t *vd) 2947{ 2948 nvlist_t *label; 2949 uint64_t guid, version; 2950 uint64_t state; 2951 2952 if (!vdev_readable(vd)) 2953 return (0); 2954 2955 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { 2956 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2957 VDEV_AUX_CORRUPT_DATA); 2958 return (-1); 2959 } 2960 2961 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 2962 !SPA_VERSION_IS_SUPPORTED(version) || 2963 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 2964 guid != vd->vdev_guid || 2965 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 2966 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2967 VDEV_AUX_CORRUPT_DATA); 2968 nvlist_free(label); 2969 return (-1); 2970 } 2971 2972 /* 2973 * We don't actually check the pool state here. If it's in fact in 2974 * use by another pool, we update this fact on the fly when requested. 2975 */ 2976 nvlist_free(label); 2977 return (0); 2978} 2979 2980/* 2981 * Free the objects used to store this vdev's spacemaps, and the array 2982 * that points to them. 2983 */ 2984void 2985vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) 2986{ 2987 if (vd->vdev_ms_array == 0) 2988 return; 2989 2990 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2991 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; 2992 size_t array_bytes = array_count * sizeof (uint64_t); 2993 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); 2994 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, 2995 array_bytes, smobj_array, 0)); 2996 2997 for (uint64_t i = 0; i < array_count; i++) { 2998 uint64_t smobj = smobj_array[i]; 2999 if (smobj == 0) 3000 continue; 3001 3002 space_map_free_obj(mos, smobj, tx); 3003 } 3004 3005 kmem_free(smobj_array, array_bytes); 3006 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); 3007 vd->vdev_ms_array = 0; 3008} 3009 3010static void 3011vdev_remove_empty(vdev_t *vd, uint64_t txg) 3012{ 3013 spa_t *spa = vd->vdev_spa; 3014 dmu_tx_t *tx; 3015 3016 ASSERT(vd == vd->vdev_top); 3017 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 3018 3019 if (vd->vdev_ms != NULL) { 3020 metaslab_group_t *mg = vd->vdev_mg; 3021 3022 metaslab_group_histogram_verify(mg); 3023 metaslab_class_histogram_verify(mg->mg_class); 3024 3025 for (int m = 0; m < vd->vdev_ms_count; m++) { 3026 metaslab_t *msp = vd->vdev_ms[m]; 3027 3028 if (msp == NULL || msp->ms_sm == NULL) 3029 continue; 3030 3031 mutex_enter(&msp->ms_lock); 3032 /* 3033 * If the metaslab was not loaded when the vdev 3034 * was removed then the histogram accounting may 3035 * not be accurate. Update the histogram information 3036 * here so that we ensure that the metaslab group 3037 * and metaslab class are up-to-date. 3038 */ 3039 metaslab_group_histogram_remove(mg, msp); 3040 3041 VERIFY0(space_map_allocated(msp->ms_sm)); 3042 space_map_close(msp->ms_sm); 3043 msp->ms_sm = NULL; 3044 mutex_exit(&msp->ms_lock); 3045 } 3046 3047 if (vd->vdev_checkpoint_sm != NULL) { 3048 ASSERT(spa_has_checkpoint(spa)); 3049 space_map_close(vd->vdev_checkpoint_sm); 3050 vd->vdev_checkpoint_sm = NULL; 3051 } 3052 3053 metaslab_group_histogram_verify(mg); 3054 metaslab_class_histogram_verify(mg->mg_class); 3055 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 3056 ASSERT0(mg->mg_histogram[i]); 3057 } 3058 3059 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 3060 vdev_destroy_spacemaps(vd, tx); 3061 3062 if (vd->vdev_islog && vd->vdev_top_zap != 0) { 3063 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); 3064 vd->vdev_top_zap = 0; 3065 } 3066 dmu_tx_commit(tx); 3067} 3068 3069void 3070vdev_sync_done(vdev_t *vd, uint64_t txg) 3071{ 3072 metaslab_t *msp; 3073 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 3074 3075 ASSERT(vdev_is_concrete(vd)); 3076 3077 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 3078 != NULL) 3079 metaslab_sync_done(msp, txg); 3080 3081 if (reassess) 3082 metaslab_sync_reassess(vd->vdev_mg); 3083} 3084 3085void 3086vdev_sync(vdev_t *vd, uint64_t txg) 3087{ 3088 spa_t *spa = vd->vdev_spa; 3089 vdev_t *lvd; 3090 metaslab_t *msp; 3091 dmu_tx_t *tx; 3092 3093 if (range_tree_space(vd->vdev_obsolete_segments) > 0) { 3094 dmu_tx_t *tx; 3095 3096 ASSERT(vd->vdev_removing || 3097 vd->vdev_ops == &vdev_indirect_ops); 3098 3099 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3100 vdev_indirect_sync_obsolete(vd, tx); 3101 dmu_tx_commit(tx); 3102 3103 /* 3104 * If the vdev is indirect, it can't have dirty 3105 * metaslabs or DTLs. 3106 */ 3107 if (vd->vdev_ops == &vdev_indirect_ops) { 3108 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); 3109 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); 3110 return; 3111 } 3112 } 3113 3114 ASSERT(vdev_is_concrete(vd)); 3115 3116 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && 3117 !vd->vdev_removing) { 3118 ASSERT(vd == vd->vdev_top); 3119 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 3120 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3121 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 3122 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 3123 ASSERT(vd->vdev_ms_array != 0); 3124 vdev_config_dirty(vd); 3125 dmu_tx_commit(tx); 3126 } 3127 3128 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 3129 metaslab_sync(msp, txg); 3130 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 3131 } 3132 3133 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 3134 vdev_dtl_sync(lvd, txg); 3135 3136 /* 3137 * Remove the metadata associated with this vdev once it's empty. 3138 * Note that this is typically used for log/cache device removal; 3139 * we don't empty toplevel vdevs when removing them. But if 3140 * a toplevel happens to be emptied, this is not harmful. 3141 */ 3142 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) { 3143 vdev_remove_empty(vd, txg); 3144 } 3145 3146 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 3147} 3148 3149uint64_t 3150vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 3151{ 3152 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 3153} 3154 3155/* 3156 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 3157 * not be opened, and no I/O is attempted. 3158 */ 3159int 3160vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3161{ 3162 vdev_t *vd, *tvd; 3163 3164 spa_vdev_state_enter(spa, SCL_NONE); 3165 3166 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3167 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3168 3169 if (!vd->vdev_ops->vdev_op_leaf) 3170 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3171 3172 tvd = vd->vdev_top; 3173 3174 /* 3175 * We don't directly use the aux state here, but if we do a 3176 * vdev_reopen(), we need this value to be present to remember why we 3177 * were faulted. 3178 */ 3179 vd->vdev_label_aux = aux; 3180 3181 /* 3182 * Faulted state takes precedence over degraded. 3183 */ 3184 vd->vdev_delayed_close = B_FALSE; 3185 vd->vdev_faulted = 1ULL; 3186 vd->vdev_degraded = 0ULL; 3187 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 3188 3189 /* 3190 * If this device has the only valid copy of the data, then 3191 * back off and simply mark the vdev as degraded instead. 3192 */ 3193 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 3194 vd->vdev_degraded = 1ULL; 3195 vd->vdev_faulted = 0ULL; 3196 3197 /* 3198 * If we reopen the device and it's not dead, only then do we 3199 * mark it degraded. 3200 */ 3201 vdev_reopen(tvd); 3202 3203 if (vdev_readable(vd)) 3204 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 3205 } 3206 3207 return (spa_vdev_state_exit(spa, vd, 0)); 3208} 3209 3210/* 3211 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 3212 * user that something is wrong. The vdev continues to operate as normal as far 3213 * as I/O is concerned. 3214 */ 3215int 3216vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3217{ 3218 vdev_t *vd; 3219 3220 spa_vdev_state_enter(spa, SCL_NONE); 3221 3222 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3223 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3224 3225 if (!vd->vdev_ops->vdev_op_leaf) 3226 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3227 3228 /* 3229 * If the vdev is already faulted, then don't do anything. 3230 */ 3231 if (vd->vdev_faulted || vd->vdev_degraded) 3232 return (spa_vdev_state_exit(spa, NULL, 0)); 3233 3234 vd->vdev_degraded = 1ULL; 3235 if (!vdev_is_dead(vd)) 3236 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 3237 aux); 3238 3239 return (spa_vdev_state_exit(spa, vd, 0)); 3240} 3241 3242/* 3243 * Online the given vdev. 3244 * 3245 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached 3246 * spare device should be detached when the device finishes resilvering. 3247 * Second, the online should be treated like a 'test' online case, so no FMA 3248 * events are generated if the device fails to open. 3249 */ 3250int 3251vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 3252{ 3253 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 3254 boolean_t wasoffline; 3255 vdev_state_t oldstate; 3256 3257 spa_vdev_state_enter(spa, SCL_NONE); 3258 3259 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3260 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3261 3262 if (!vd->vdev_ops->vdev_op_leaf) 3263 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3264 3265 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); 3266 oldstate = vd->vdev_state; 3267 3268 tvd = vd->vdev_top; 3269 vd->vdev_offline = B_FALSE; 3270 vd->vdev_tmpoffline = B_FALSE; 3271 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 3272 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 3273 3274 /* XXX - L2ARC 1.0 does not support expansion */ 3275 if (!vd->vdev_aux) { 3276 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3277 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 3278 } 3279 3280 vdev_reopen(tvd); 3281 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 3282 3283 if (!vd->vdev_aux) { 3284 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3285 pvd->vdev_expanding = B_FALSE; 3286 } 3287 3288 if (newstate) 3289 *newstate = vd->vdev_state; 3290 if ((flags & ZFS_ONLINE_UNSPARE) && 3291 !vdev_is_dead(vd) && vd->vdev_parent && 3292 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3293 vd->vdev_parent->vdev_child[0] == vd) 3294 vd->vdev_unspare = B_TRUE; 3295 3296 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 3297 3298 /* XXX - L2ARC 1.0 does not support expansion */ 3299 if (vd->vdev_aux) 3300 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 3301 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3302 } 3303 3304 /* Restart initializing if necessary */ 3305 mutex_enter(&vd->vdev_initialize_lock); 3306 if (vdev_writeable(vd) && 3307 vd->vdev_initialize_thread == NULL && 3308 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { 3309 (void) vdev_initialize(vd); 3310 } 3311 mutex_exit(&vd->vdev_initialize_lock); 3312 3313 if (wasoffline || 3314 (oldstate < VDEV_STATE_DEGRADED && 3315 vd->vdev_state >= VDEV_STATE_DEGRADED)) 3316 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); 3317 3318 return (spa_vdev_state_exit(spa, vd, 0)); 3319} 3320 3321static int 3322vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 3323{ 3324 vdev_t *vd, *tvd; 3325 int error = 0; 3326 uint64_t generation; 3327 metaslab_group_t *mg; 3328 3329top: 3330 spa_vdev_state_enter(spa, SCL_ALLOC); 3331 3332 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3333 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3334 3335 if (!vd->vdev_ops->vdev_op_leaf) 3336 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3337 3338 tvd = vd->vdev_top; 3339 mg = tvd->vdev_mg; 3340 generation = spa->spa_config_generation + 1; 3341 3342 /* 3343 * If the device isn't already offline, try to offline it. 3344 */ 3345 if (!vd->vdev_offline) { 3346 /* 3347 * If this device has the only valid copy of some data, 3348 * don't allow it to be offlined. Log devices are always 3349 * expendable. 3350 */ 3351 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3352 vdev_dtl_required(vd)) 3353 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3354 3355 /* 3356 * If the top-level is a slog and it has had allocations 3357 * then proceed. We check that the vdev's metaslab group 3358 * is not NULL since it's possible that we may have just 3359 * added this vdev but not yet initialized its metaslabs. 3360 */ 3361 if (tvd->vdev_islog && mg != NULL) { 3362 /* 3363 * Prevent any future allocations. 3364 */ 3365 metaslab_group_passivate(mg); 3366 (void) spa_vdev_state_exit(spa, vd, 0); 3367 3368 error = spa_reset_logs(spa); 3369 3370 /* 3371 * If the log device was successfully reset but has 3372 * checkpointed data, do not offline it. 3373 */ 3374 if (error == 0 && 3375 tvd->vdev_checkpoint_sm != NULL) { 3376 ASSERT3U(tvd->vdev_checkpoint_sm->sm_alloc, 3377 !=, 0); 3378 error = ZFS_ERR_CHECKPOINT_EXISTS; 3379 } 3380 3381 spa_vdev_state_enter(spa, SCL_ALLOC); 3382 3383 /* 3384 * Check to see if the config has changed. 3385 */ 3386 if (error || generation != spa->spa_config_generation) { 3387 metaslab_group_activate(mg); 3388 if (error) 3389 return (spa_vdev_state_exit(spa, 3390 vd, error)); 3391 (void) spa_vdev_state_exit(spa, vd, 0); 3392 goto top; 3393 } 3394 ASSERT0(tvd->vdev_stat.vs_alloc); 3395 } 3396 3397 /* 3398 * Offline this device and reopen its top-level vdev. 3399 * If the top-level vdev is a log device then just offline 3400 * it. Otherwise, if this action results in the top-level 3401 * vdev becoming unusable, undo it and fail the request. 3402 */ 3403 vd->vdev_offline = B_TRUE; 3404 vdev_reopen(tvd); 3405 3406 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3407 vdev_is_dead(tvd)) { 3408 vd->vdev_offline = B_FALSE; 3409 vdev_reopen(tvd); 3410 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3411 } 3412 3413 /* 3414 * Add the device back into the metaslab rotor so that 3415 * once we online the device it's open for business. 3416 */ 3417 if (tvd->vdev_islog && mg != NULL) 3418 metaslab_group_activate(mg); 3419 } 3420 3421 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 3422 3423 return (spa_vdev_state_exit(spa, vd, 0)); 3424} 3425 3426int 3427vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 3428{ 3429 int error; 3430 3431 mutex_enter(&spa->spa_vdev_top_lock); 3432 error = vdev_offline_locked(spa, guid, flags); 3433 mutex_exit(&spa->spa_vdev_top_lock); 3434 3435 return (error); 3436} 3437 3438/* 3439 * Clear the error counts associated with this vdev. Unlike vdev_online() and 3440 * vdev_offline(), we assume the spa config is locked. We also clear all 3441 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 3442 */ 3443void 3444vdev_clear(spa_t *spa, vdev_t *vd) 3445{ 3446 vdev_t *rvd = spa->spa_root_vdev; 3447 3448 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3449 3450 if (vd == NULL) 3451 vd = rvd; 3452 3453 vd->vdev_stat.vs_read_errors = 0; 3454 vd->vdev_stat.vs_write_errors = 0; 3455 vd->vdev_stat.vs_checksum_errors = 0; 3456 3457 for (int c = 0; c < vd->vdev_children; c++) 3458 vdev_clear(spa, vd->vdev_child[c]); 3459 3460 if (vd == rvd) { 3461 for (int c = 0; c < spa->spa_l2cache.sav_count; c++) 3462 vdev_clear(spa, spa->spa_l2cache.sav_vdevs[c]); 3463 3464 for (int c = 0; c < spa->spa_spares.sav_count; c++) 3465 vdev_clear(spa, spa->spa_spares.sav_vdevs[c]); 3466 } 3467 3468 /* 3469 * It makes no sense to "clear" an indirect vdev. 3470 */ 3471 if (!vdev_is_concrete(vd)) 3472 return; 3473 3474 /* 3475 * If we're in the FAULTED state or have experienced failed I/O, then 3476 * clear the persistent state and attempt to reopen the device. We 3477 * also mark the vdev config dirty, so that the new faulted state is 3478 * written out to disk. 3479 */ 3480 if (vd->vdev_faulted || vd->vdev_degraded || 3481 !vdev_readable(vd) || !vdev_writeable(vd)) { 3482 3483 /* 3484 * When reopening in reponse to a clear event, it may be due to 3485 * a fmadm repair request. In this case, if the device is 3486 * still broken, we want to still post the ereport again. 3487 */ 3488 vd->vdev_forcefault = B_TRUE; 3489 3490 vd->vdev_faulted = vd->vdev_degraded = 0ULL; 3491 vd->vdev_cant_read = B_FALSE; 3492 vd->vdev_cant_write = B_FALSE; 3493 3494 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 3495 3496 vd->vdev_forcefault = B_FALSE; 3497 3498 if (vd != rvd && vdev_writeable(vd->vdev_top)) 3499 vdev_state_dirty(vd->vdev_top); 3500 3501 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 3502 spa_async_request(spa, SPA_ASYNC_RESILVER); 3503 3504 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); 3505 } 3506 3507 /* 3508 * When clearing a FMA-diagnosed fault, we always want to 3509 * unspare the device, as we assume that the original spare was 3510 * done in response to the FMA fault. 3511 */ 3512 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 3513 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3514 vd->vdev_parent->vdev_child[0] == vd) 3515 vd->vdev_unspare = B_TRUE; 3516} 3517 3518boolean_t 3519vdev_is_dead(vdev_t *vd) 3520{ 3521 /* 3522 * Holes and missing devices are always considered "dead". 3523 * This simplifies the code since we don't have to check for 3524 * these types of devices in the various code paths. 3525 * Instead we rely on the fact that we skip over dead devices 3526 * before issuing I/O to them. 3527 */ 3528 return (vd->vdev_state < VDEV_STATE_DEGRADED || 3529 vd->vdev_ops == &vdev_hole_ops || 3530 vd->vdev_ops == &vdev_missing_ops); 3531} 3532 3533boolean_t 3534vdev_readable(vdev_t *vd) 3535{ 3536 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 3537} 3538 3539boolean_t 3540vdev_writeable(vdev_t *vd) 3541{ 3542 return (!vdev_is_dead(vd) && !vd->vdev_cant_write && 3543 vdev_is_concrete(vd)); 3544} 3545 3546boolean_t 3547vdev_allocatable(vdev_t *vd) 3548{ 3549 uint64_t state = vd->vdev_state; 3550 3551 /* 3552 * We currently allow allocations from vdevs which may be in the 3553 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 3554 * fails to reopen then we'll catch it later when we're holding 3555 * the proper locks. Note that we have to get the vdev state 3556 * in a local variable because although it changes atomically, 3557 * we're asking two separate questions about it. 3558 */ 3559 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 3560 !vd->vdev_cant_write && vdev_is_concrete(vd) && 3561 vd->vdev_mg->mg_initialized); 3562} 3563 3564boolean_t 3565vdev_accessible(vdev_t *vd, zio_t *zio) 3566{ 3567 ASSERT(zio->io_vd == vd); 3568 3569 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 3570 return (B_FALSE); 3571 3572 if (zio->io_type == ZIO_TYPE_READ) 3573 return (!vd->vdev_cant_read); 3574 3575 if (zio->io_type == ZIO_TYPE_WRITE) 3576 return (!vd->vdev_cant_write); 3577 3578 return (B_TRUE); 3579} 3580 3581boolean_t 3582vdev_is_spacemap_addressable(vdev_t *vd) 3583{ 3584 /* 3585 * Assuming 47 bits of the space map entry dedicated for the entry's 3586 * offset (see description in space_map.h), we calculate the maximum 3587 * address that can be described by a space map entry for the given 3588 * device. 3589 */ 3590 uint64_t shift = vd->vdev_ashift + 47; 3591 3592 if (shift >= 63) /* detect potential overflow */ 3593 return (B_TRUE); 3594 3595 return (vd->vdev_asize < (1ULL << shift)); 3596} 3597 3598/* 3599 * Get statistics for the given vdev. 3600 */ 3601void 3602vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 3603{ 3604 spa_t *spa = vd->vdev_spa; 3605 vdev_t *rvd = spa->spa_root_vdev; 3606 vdev_t *tvd = vd->vdev_top; 3607 3608 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 3609 3610 mutex_enter(&vd->vdev_stat_lock); 3611 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 3612 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 3613 vs->vs_state = vd->vdev_state; 3614 vs->vs_rsize = vdev_get_min_asize(vd); 3615 if (vd->vdev_ops->vdev_op_leaf) { 3616 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 3617 /* 3618 * Report intializing progress. Since we don't have the 3619 * initializing locks held, this is only an estimate (although a 3620 * fairly accurate one). 3621 */ 3622 vs->vs_initialize_bytes_done = vd->vdev_initialize_bytes_done; 3623 vs->vs_initialize_bytes_est = vd->vdev_initialize_bytes_est; 3624 vs->vs_initialize_state = vd->vdev_initialize_state; 3625 vs->vs_initialize_action_time = vd->vdev_initialize_action_time; 3626 } 3627 /* 3628 * Report expandable space on top-level, non-auxillary devices only. 3629 * The expandable space is reported in terms of metaslab sized units 3630 * since that determines how much space the pool can expand. 3631 */ 3632 if (vd->vdev_aux == NULL && tvd != NULL && vd->vdev_max_asize != 0) { 3633 vs->vs_esize = P2ALIGN(vd->vdev_max_asize - vd->vdev_asize - 3634 spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift); 3635 } 3636 vs->vs_configured_ashift = vd->vdev_top != NULL 3637 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift; 3638 vs->vs_logical_ashift = vd->vdev_logical_ashift; 3639 vs->vs_physical_ashift = vd->vdev_physical_ashift; 3640 if (vd->vdev_aux == NULL && vd == vd->vdev_top && 3641 vdev_is_concrete(vd)) { 3642 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation; 3643 } 3644 3645 /* 3646 * If we're getting stats on the root vdev, aggregate the I/O counts 3647 * over all top-level vdevs (i.e. the direct children of the root). 3648 */ 3649 if (vd == rvd) { 3650 for (int c = 0; c < rvd->vdev_children; c++) { 3651 vdev_t *cvd = rvd->vdev_child[c]; 3652 vdev_stat_t *cvs = &cvd->vdev_stat; 3653 3654 for (int t = 0; t < ZIO_TYPES; t++) { 3655 vs->vs_ops[t] += cvs->vs_ops[t]; 3656 vs->vs_bytes[t] += cvs->vs_bytes[t]; 3657 } 3658 cvs->vs_scan_removing = cvd->vdev_removing; 3659 } 3660 } 3661 mutex_exit(&vd->vdev_stat_lock); 3662} 3663 3664void 3665vdev_clear_stats(vdev_t *vd) 3666{ 3667 mutex_enter(&vd->vdev_stat_lock); 3668 vd->vdev_stat.vs_space = 0; 3669 vd->vdev_stat.vs_dspace = 0; 3670 vd->vdev_stat.vs_alloc = 0; 3671 mutex_exit(&vd->vdev_stat_lock); 3672} 3673 3674void 3675vdev_scan_stat_init(vdev_t *vd) 3676{ 3677 vdev_stat_t *vs = &vd->vdev_stat; 3678 3679 for (int c = 0; c < vd->vdev_children; c++) 3680 vdev_scan_stat_init(vd->vdev_child[c]); 3681 3682 mutex_enter(&vd->vdev_stat_lock); 3683 vs->vs_scan_processed = 0; 3684 mutex_exit(&vd->vdev_stat_lock); 3685} 3686 3687void 3688vdev_stat_update(zio_t *zio, uint64_t psize) 3689{ 3690 spa_t *spa = zio->io_spa; 3691 vdev_t *rvd = spa->spa_root_vdev; 3692 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 3693 vdev_t *pvd; 3694 uint64_t txg = zio->io_txg; 3695 vdev_stat_t *vs = &vd->vdev_stat; 3696 zio_type_t type = zio->io_type; 3697 int flags = zio->io_flags; 3698 3699 /* 3700 * If this i/o is a gang leader, it didn't do any actual work. 3701 */ 3702 if (zio->io_gang_tree) 3703 return; 3704 3705 if (zio->io_error == 0) { 3706 /* 3707 * If this is a root i/o, don't count it -- we've already 3708 * counted the top-level vdevs, and vdev_get_stats() will 3709 * aggregate them when asked. This reduces contention on 3710 * the root vdev_stat_lock and implicitly handles blocks 3711 * that compress away to holes, for which there is no i/o. 3712 * (Holes never create vdev children, so all the counters 3713 * remain zero, which is what we want.) 3714 * 3715 * Note: this only applies to successful i/o (io_error == 0) 3716 * because unlike i/o counts, errors are not additive. 3717 * When reading a ditto block, for example, failure of 3718 * one top-level vdev does not imply a root-level error. 3719 */ 3720 if (vd == rvd) 3721 return; 3722 3723 ASSERT(vd == zio->io_vd); 3724 3725 if (flags & ZIO_FLAG_IO_BYPASS) 3726 return; 3727 3728 mutex_enter(&vd->vdev_stat_lock); 3729 3730 if (flags & ZIO_FLAG_IO_REPAIR) { 3731 if (flags & ZIO_FLAG_SCAN_THREAD) { 3732 dsl_scan_phys_t *scn_phys = 3733 &spa->spa_dsl_pool->dp_scan->scn_phys; 3734 uint64_t *processed = &scn_phys->scn_processed; 3735 3736 /* XXX cleanup? */ 3737 if (vd->vdev_ops->vdev_op_leaf) 3738 atomic_add_64(processed, psize); 3739 vs->vs_scan_processed += psize; 3740 } 3741 3742 if (flags & ZIO_FLAG_SELF_HEAL) 3743 vs->vs_self_healed += psize; 3744 } 3745 3746 vs->vs_ops[type]++; 3747 vs->vs_bytes[type] += psize; 3748 3749 mutex_exit(&vd->vdev_stat_lock); 3750 return; 3751 } 3752 3753 if (flags & ZIO_FLAG_SPECULATIVE) 3754 return; 3755 3756 /* 3757 * If this is an I/O error that is going to be retried, then ignore the 3758 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 3759 * hard errors, when in reality they can happen for any number of 3760 * innocuous reasons (bus resets, MPxIO link failure, etc). 3761 */ 3762 if (zio->io_error == EIO && 3763 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 3764 return; 3765 3766 /* 3767 * Intent logs writes won't propagate their error to the root 3768 * I/O so don't mark these types of failures as pool-level 3769 * errors. 3770 */ 3771 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 3772 return; 3773 3774 mutex_enter(&vd->vdev_stat_lock); 3775 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 3776 if (zio->io_error == ECKSUM) 3777 vs->vs_checksum_errors++; 3778 else 3779 vs->vs_read_errors++; 3780 } 3781 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 3782 vs->vs_write_errors++; 3783 mutex_exit(&vd->vdev_stat_lock); 3784 3785 if (spa->spa_load_state == SPA_LOAD_NONE && 3786 type == ZIO_TYPE_WRITE && txg != 0 && 3787 (!(flags & ZIO_FLAG_IO_REPAIR) || 3788 (flags & ZIO_FLAG_SCAN_THREAD) || 3789 spa->spa_claiming)) { 3790 /* 3791 * This is either a normal write (not a repair), or it's 3792 * a repair induced by the scrub thread, or it's a repair 3793 * made by zil_claim() during spa_load() in the first txg. 3794 * In the normal case, we commit the DTL change in the same 3795 * txg as the block was born. In the scrub-induced repair 3796 * case, we know that scrubs run in first-pass syncing context, 3797 * so we commit the DTL change in spa_syncing_txg(spa). 3798 * In the zil_claim() case, we commit in spa_first_txg(spa). 3799 * 3800 * We currently do not make DTL entries for failed spontaneous 3801 * self-healing writes triggered by normal (non-scrubbing) 3802 * reads, because we have no transactional context in which to 3803 * do so -- and it's not clear that it'd be desirable anyway. 3804 */ 3805 if (vd->vdev_ops->vdev_op_leaf) { 3806 uint64_t commit_txg = txg; 3807 if (flags & ZIO_FLAG_SCAN_THREAD) { 3808 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3809 ASSERT(spa_sync_pass(spa) == 1); 3810 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 3811 commit_txg = spa_syncing_txg(spa); 3812 } else if (spa->spa_claiming) { 3813 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3814 commit_txg = spa_first_txg(spa); 3815 } 3816 ASSERT(commit_txg >= spa_syncing_txg(spa)); 3817 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 3818 return; 3819 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3820 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 3821 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 3822 } 3823 if (vd != rvd) 3824 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 3825 } 3826} 3827 3828/* 3829 * Update the in-core space usage stats for this vdev, its metaslab class, 3830 * and the root vdev. 3831 */ 3832void 3833vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 3834 int64_t space_delta) 3835{ 3836 int64_t dspace_delta = space_delta; 3837 spa_t *spa = vd->vdev_spa; 3838 vdev_t *rvd = spa->spa_root_vdev; 3839 metaslab_group_t *mg = vd->vdev_mg; 3840 metaslab_class_t *mc = mg ? mg->mg_class : NULL; 3841 3842 ASSERT(vd == vd->vdev_top); 3843 3844 /* 3845 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 3846 * factor. We must calculate this here and not at the root vdev 3847 * because the root vdev's psize-to-asize is simply the max of its 3848 * childrens', thus not accurate enough for us. 3849 */ 3850 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 3851 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 3852 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 3853 vd->vdev_deflate_ratio; 3854 3855 mutex_enter(&vd->vdev_stat_lock); 3856 vd->vdev_stat.vs_alloc += alloc_delta; 3857 vd->vdev_stat.vs_space += space_delta; 3858 vd->vdev_stat.vs_dspace += dspace_delta; 3859 mutex_exit(&vd->vdev_stat_lock); 3860 3861 if (mc == spa_normal_class(spa)) { 3862 mutex_enter(&rvd->vdev_stat_lock); 3863 rvd->vdev_stat.vs_alloc += alloc_delta; 3864 rvd->vdev_stat.vs_space += space_delta; 3865 rvd->vdev_stat.vs_dspace += dspace_delta; 3866 mutex_exit(&rvd->vdev_stat_lock); 3867 } 3868 3869 if (mc != NULL) { 3870 ASSERT(rvd == vd->vdev_parent); 3871 ASSERT(vd->vdev_ms_count != 0); 3872 3873 metaslab_class_space_update(mc, 3874 alloc_delta, defer_delta, space_delta, dspace_delta); 3875 } 3876} 3877 3878/* 3879 * Mark a top-level vdev's config as dirty, placing it on the dirty list 3880 * so that it will be written out next time the vdev configuration is synced. 3881 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 3882 */ 3883void 3884vdev_config_dirty(vdev_t *vd) 3885{ 3886 spa_t *spa = vd->vdev_spa; 3887 vdev_t *rvd = spa->spa_root_vdev; 3888 int c; 3889 3890 ASSERT(spa_writeable(spa)); 3891 3892 /* 3893 * If this is an aux vdev (as with l2cache and spare devices), then we 3894 * update the vdev config manually and set the sync flag. 3895 */ 3896 if (vd->vdev_aux != NULL) { 3897 spa_aux_vdev_t *sav = vd->vdev_aux; 3898 nvlist_t **aux; 3899 uint_t naux; 3900 3901 for (c = 0; c < sav->sav_count; c++) { 3902 if (sav->sav_vdevs[c] == vd) 3903 break; 3904 } 3905 3906 if (c == sav->sav_count) { 3907 /* 3908 * We're being removed. There's nothing more to do. 3909 */ 3910 ASSERT(sav->sav_sync == B_TRUE); 3911 return; 3912 } 3913 3914 sav->sav_sync = B_TRUE; 3915 3916 if (nvlist_lookup_nvlist_array(sav->sav_config, 3917 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 3918 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 3919 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 3920 } 3921 3922 ASSERT(c < naux); 3923 3924 /* 3925 * Setting the nvlist in the middle if the array is a little 3926 * sketchy, but it will work. 3927 */ 3928 nvlist_free(aux[c]); 3929 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 3930 3931 return; 3932 } 3933 3934 /* 3935 * The dirty list is protected by the SCL_CONFIG lock. The caller 3936 * must either hold SCL_CONFIG as writer, or must be the sync thread 3937 * (which holds SCL_CONFIG as reader). There's only one sync thread, 3938 * so this is sufficient to ensure mutual exclusion. 3939 */ 3940 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3941 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3942 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3943 3944 if (vd == rvd) { 3945 for (c = 0; c < rvd->vdev_children; c++) 3946 vdev_config_dirty(rvd->vdev_child[c]); 3947 } else { 3948 ASSERT(vd == vd->vdev_top); 3949 3950 if (!list_link_active(&vd->vdev_config_dirty_node) && 3951 vdev_is_concrete(vd)) { 3952 list_insert_head(&spa->spa_config_dirty_list, vd); 3953 } 3954 } 3955} 3956 3957void 3958vdev_config_clean(vdev_t *vd) 3959{ 3960 spa_t *spa = vd->vdev_spa; 3961 3962 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3963 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3964 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3965 3966 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 3967 list_remove(&spa->spa_config_dirty_list, vd); 3968} 3969 3970/* 3971 * Mark a top-level vdev's state as dirty, so that the next pass of 3972 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 3973 * the state changes from larger config changes because they require 3974 * much less locking, and are often needed for administrative actions. 3975 */ 3976void 3977vdev_state_dirty(vdev_t *vd) 3978{ 3979 spa_t *spa = vd->vdev_spa; 3980 3981 ASSERT(spa_writeable(spa)); 3982 ASSERT(vd == vd->vdev_top); 3983 3984 /* 3985 * The state list is protected by the SCL_STATE lock. The caller 3986 * must either hold SCL_STATE as writer, or must be the sync thread 3987 * (which holds SCL_STATE as reader). There's only one sync thread, 3988 * so this is sufficient to ensure mutual exclusion. 3989 */ 3990 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 3991 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3992 spa_config_held(spa, SCL_STATE, RW_READER))); 3993 3994 if (!list_link_active(&vd->vdev_state_dirty_node) && 3995 vdev_is_concrete(vd)) 3996 list_insert_head(&spa->spa_state_dirty_list, vd); 3997} 3998 3999void 4000vdev_state_clean(vdev_t *vd) 4001{ 4002 spa_t *spa = vd->vdev_spa; 4003 4004 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 4005 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4006 spa_config_held(spa, SCL_STATE, RW_READER))); 4007 4008 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 4009 list_remove(&spa->spa_state_dirty_list, vd); 4010} 4011 4012/* 4013 * Propagate vdev state up from children to parent. 4014 */ 4015void 4016vdev_propagate_state(vdev_t *vd) 4017{ 4018 spa_t *spa = vd->vdev_spa; 4019 vdev_t *rvd = spa->spa_root_vdev; 4020 int degraded = 0, faulted = 0; 4021 int corrupted = 0; 4022 vdev_t *child; 4023 4024 if (vd->vdev_children > 0) { 4025 for (int c = 0; c < vd->vdev_children; c++) { 4026 child = vd->vdev_child[c]; 4027 4028 /* 4029 * Don't factor holes or indirect vdevs into the 4030 * decision. 4031 */ 4032 if (!vdev_is_concrete(child)) 4033 continue; 4034 4035 if (!vdev_readable(child) || 4036 (!vdev_writeable(child) && spa_writeable(spa))) { 4037 /* 4038 * Root special: if there is a top-level log 4039 * device, treat the root vdev as if it were 4040 * degraded. 4041 */ 4042 if (child->vdev_islog && vd == rvd) 4043 degraded++; 4044 else 4045 faulted++; 4046 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 4047 degraded++; 4048 } 4049 4050 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 4051 corrupted++; 4052 } 4053 4054 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 4055 4056 /* 4057 * Root special: if there is a top-level vdev that cannot be 4058 * opened due to corrupted metadata, then propagate the root 4059 * vdev's aux state as 'corrupt' rather than 'insufficient 4060 * replicas'. 4061 */ 4062 if (corrupted && vd == rvd && 4063 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 4064 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 4065 VDEV_AUX_CORRUPT_DATA); 4066 } 4067 4068 if (vd->vdev_parent) 4069 vdev_propagate_state(vd->vdev_parent); 4070} 4071 4072/* 4073 * Set a vdev's state. If this is during an open, we don't update the parent 4074 * state, because we're in the process of opening children depth-first. 4075 * Otherwise, we propagate the change to the parent. 4076 * 4077 * If this routine places a device in a faulted state, an appropriate ereport is 4078 * generated. 4079 */ 4080void 4081vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 4082{ 4083 uint64_t save_state; 4084 spa_t *spa = vd->vdev_spa; 4085 4086 if (state == vd->vdev_state) { 4087 vd->vdev_stat.vs_aux = aux; 4088 return; 4089 } 4090 4091 save_state = vd->vdev_state; 4092 4093 vd->vdev_state = state; 4094 vd->vdev_stat.vs_aux = aux; 4095 4096 /* 4097 * If we are setting the vdev state to anything but an open state, then 4098 * always close the underlying device unless the device has requested 4099 * a delayed close (i.e. we're about to remove or fault the device). 4100 * Otherwise, we keep accessible but invalid devices open forever. 4101 * We don't call vdev_close() itself, because that implies some extra 4102 * checks (offline, etc) that we don't want here. This is limited to 4103 * leaf devices, because otherwise closing the device will affect other 4104 * children. 4105 */ 4106 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 4107 vd->vdev_ops->vdev_op_leaf) 4108 vd->vdev_ops->vdev_op_close(vd); 4109 4110 if (vd->vdev_removed && 4111 state == VDEV_STATE_CANT_OPEN && 4112 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 4113 /* 4114 * If the previous state is set to VDEV_STATE_REMOVED, then this 4115 * device was previously marked removed and someone attempted to 4116 * reopen it. If this failed due to a nonexistent device, then 4117 * keep the device in the REMOVED state. We also let this be if 4118 * it is one of our special test online cases, which is only 4119 * attempting to online the device and shouldn't generate an FMA 4120 * fault. 4121 */ 4122 vd->vdev_state = VDEV_STATE_REMOVED; 4123 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 4124 } else if (state == VDEV_STATE_REMOVED) { 4125 vd->vdev_removed = B_TRUE; 4126 } else if (state == VDEV_STATE_CANT_OPEN) { 4127 /* 4128 * If we fail to open a vdev during an import or recovery, we 4129 * mark it as "not available", which signifies that it was 4130 * never there to begin with. Failure to open such a device 4131 * is not considered an error. 4132 */ 4133 if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 4134 spa_load_state(spa) == SPA_LOAD_RECOVER) && 4135 vd->vdev_ops->vdev_op_leaf) 4136 vd->vdev_not_present = 1; 4137 4138 /* 4139 * Post the appropriate ereport. If the 'prevstate' field is 4140 * set to something other than VDEV_STATE_UNKNOWN, it indicates 4141 * that this is part of a vdev_reopen(). In this case, we don't 4142 * want to post the ereport if the device was already in the 4143 * CANT_OPEN state beforehand. 4144 * 4145 * If the 'checkremove' flag is set, then this is an attempt to 4146 * online the device in response to an insertion event. If we 4147 * hit this case, then we have detected an insertion event for a 4148 * faulted or offline device that wasn't in the removed state. 4149 * In this scenario, we don't post an ereport because we are 4150 * about to replace the device, or attempt an online with 4151 * vdev_forcefault, which will generate the fault for us. 4152 */ 4153 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 4154 !vd->vdev_not_present && !vd->vdev_checkremove && 4155 vd != spa->spa_root_vdev) { 4156 const char *class; 4157 4158 switch (aux) { 4159 case VDEV_AUX_OPEN_FAILED: 4160 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 4161 break; 4162 case VDEV_AUX_CORRUPT_DATA: 4163 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 4164 break; 4165 case VDEV_AUX_NO_REPLICAS: 4166 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 4167 break; 4168 case VDEV_AUX_BAD_GUID_SUM: 4169 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 4170 break; 4171 case VDEV_AUX_TOO_SMALL: 4172 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 4173 break; 4174 case VDEV_AUX_BAD_LABEL: 4175 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 4176 break; 4177 default: 4178 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 4179 } 4180 4181 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 4182 } 4183 4184 /* Erase any notion of persistent removed state */ 4185 vd->vdev_removed = B_FALSE; 4186 } else { 4187 vd->vdev_removed = B_FALSE; 4188 } 4189 4190 /* 4191 * Notify the fmd of the state change. Be verbose and post 4192 * notifications even for stuff that's not important; the fmd agent can 4193 * sort it out. Don't emit state change events for non-leaf vdevs since 4194 * they can't change state on their own. The FMD can check their state 4195 * if it wants to when it sees that a leaf vdev had a state change. 4196 */ 4197 if (vd->vdev_ops->vdev_op_leaf) 4198 zfs_post_state_change(spa, vd); 4199 4200 if (!isopen && vd->vdev_parent) 4201 vdev_propagate_state(vd->vdev_parent); 4202} 4203 4204boolean_t 4205vdev_children_are_offline(vdev_t *vd) 4206{ 4207 ASSERT(!vd->vdev_ops->vdev_op_leaf); 4208 4209 for (uint64_t i = 0; i < vd->vdev_children; i++) { 4210 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) 4211 return (B_FALSE); 4212 } 4213 4214 return (B_TRUE); 4215} 4216 4217/* 4218 * Check the vdev configuration to ensure that it's capable of supporting 4219 * a root pool. We do not support partial configuration. 4220 * In addition, only a single top-level vdev is allowed. 4221 * 4222 * FreeBSD does not have above limitations. 4223 */ 4224boolean_t 4225vdev_is_bootable(vdev_t *vd) 4226{ 4227#ifdef illumos 4228 if (!vd->vdev_ops->vdev_op_leaf) { 4229 char *vdev_type = vd->vdev_ops->vdev_op_type; 4230 4231 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 4232 vd->vdev_children > 1) { 4233 return (B_FALSE); 4234 } else if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 || 4235 strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) { 4236 return (B_FALSE); 4237 } 4238 } 4239 4240 for (int c = 0; c < vd->vdev_children; c++) { 4241 if (!vdev_is_bootable(vd->vdev_child[c])) 4242 return (B_FALSE); 4243 } 4244#endif /* illumos */ 4245 return (B_TRUE); 4246} 4247 4248boolean_t 4249vdev_is_concrete(vdev_t *vd) 4250{ 4251 vdev_ops_t *ops = vd->vdev_ops; 4252 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || 4253 ops == &vdev_missing_ops || ops == &vdev_root_ops) { 4254 return (B_FALSE); 4255 } else { 4256 return (B_TRUE); 4257 } 4258} 4259 4260/* 4261 * Determine if a log device has valid content. If the vdev was 4262 * removed or faulted in the MOS config then we know that 4263 * the content on the log device has already been written to the pool. 4264 */ 4265boolean_t 4266vdev_log_state_valid(vdev_t *vd) 4267{ 4268 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 4269 !vd->vdev_removed) 4270 return (B_TRUE); 4271 4272 for (int c = 0; c < vd->vdev_children; c++) 4273 if (vdev_log_state_valid(vd->vdev_child[c])) 4274 return (B_TRUE); 4275 4276 return (B_FALSE); 4277} 4278 4279/* 4280 * Expand a vdev if possible. 4281 */ 4282void 4283vdev_expand(vdev_t *vd, uint64_t txg) 4284{ 4285 ASSERT(vd->vdev_top == vd); 4286 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4287 ASSERT(vdev_is_concrete(vd)); 4288 4289 vdev_set_deflate_ratio(vd); 4290 4291 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 4292 VERIFY(vdev_metaslab_init(vd, txg) == 0); 4293 vdev_config_dirty(vd); 4294 } 4295} 4296 4297/* 4298 * Split a vdev. 4299 */ 4300void 4301vdev_split(vdev_t *vd) 4302{ 4303 vdev_t *cvd, *pvd = vd->vdev_parent; 4304 4305 vdev_remove_child(pvd, vd); 4306 vdev_compact_children(pvd); 4307 4308 cvd = pvd->vdev_child[0]; 4309 if (pvd->vdev_children == 1) { 4310 vdev_remove_parent(cvd); 4311 cvd->vdev_splitting = B_TRUE; 4312 } 4313 vdev_propagate_state(cvd); 4314} 4315 4316void 4317vdev_deadman(vdev_t *vd) 4318{ 4319 for (int c = 0; c < vd->vdev_children; c++) { 4320 vdev_t *cvd = vd->vdev_child[c]; 4321 4322 vdev_deadman(cvd); 4323 } 4324 4325 if (vd->vdev_ops->vdev_op_leaf) { 4326 vdev_queue_t *vq = &vd->vdev_queue; 4327 4328 mutex_enter(&vq->vq_lock); 4329 if (avl_numnodes(&vq->vq_active_tree) > 0) { 4330 spa_t *spa = vd->vdev_spa; 4331 zio_t *fio; 4332 uint64_t delta; 4333 4334 /* 4335 * Look at the head of all the pending queues, 4336 * if any I/O has been outstanding for longer than 4337 * the spa_deadman_synctime we panic the system. 4338 */ 4339 fio = avl_first(&vq->vq_active_tree); 4340 delta = gethrtime() - fio->io_timestamp; 4341 if (delta > spa_deadman_synctime(spa)) { 4342 vdev_dbgmsg(vd, "SLOW IO: zio timestamp " 4343 "%lluns, delta %lluns, last io %lluns", 4344 fio->io_timestamp, (u_longlong_t)delta, 4345 vq->vq_io_complete_ts); 4346 fm_panic("I/O to pool '%s' appears to be " 4347 "hung on vdev guid %llu at '%s'.", 4348 spa_name(spa), 4349 (long long unsigned int) vd->vdev_guid, 4350 vd->vdev_path); 4351 } 4352 } 4353 mutex_exit(&vq->vq_lock); 4354 } 4355} 4356