1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
|
25 * Copyright (c) 2011 by Delphix. All rights reserved.
|
25 * Copyright (c) 2012 by Delphix. All rights reserved. |
26 */ 27 28#include <sys/zfs_context.h> 29#include <sys/fm/fs/zfs.h> 30#include <sys/spa.h> 31#include <sys/spa_impl.h> 32#include <sys/dmu.h> 33#include <sys/dmu_tx.h> 34#include <sys/vdev_impl.h> 35#include <sys/uberblock_impl.h> 36#include <sys/metaslab.h> 37#include <sys/metaslab_impl.h> 38#include <sys/space_map.h> 39#include <sys/zio.h> 40#include <sys/zap.h> 41#include <sys/fs/zfs.h> 42#include <sys/arc.h> 43#include <sys/zil.h> 44#include <sys/dsl_scan.h> 45 46SYSCTL_DECL(_vfs_zfs); 47SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV"); 48 49/* 50 * Virtual device management. 51 */ 52 53static vdev_ops_t *vdev_ops_table[] = { 54 &vdev_root_ops, 55 &vdev_raidz_ops, 56 &vdev_mirror_ops, 57 &vdev_replacing_ops, 58 &vdev_spare_ops, 59#ifdef _KERNEL 60 &vdev_geom_ops, 61#else 62 &vdev_disk_ops, 63#endif 64 &vdev_file_ops, 65 &vdev_missing_ops, 66 &vdev_hole_ops, 67 NULL 68}; 69 70/* maximum scrub/resilver I/O queue per leaf vdev */ 71int zfs_scrub_limit = 10; 72 73TUNABLE_INT("vfs.zfs.scrub_limit", &zfs_scrub_limit); 74SYSCTL_INT(_vfs_zfs, OID_AUTO, scrub_limit, CTLFLAG_RDTUN, &zfs_scrub_limit, 0, 75 "Maximum scrub/resilver I/O queue"); 76 77/* 78 * Given a vdev type, return the appropriate ops vector. 79 */ 80static vdev_ops_t * 81vdev_getops(const char *type) 82{ 83 vdev_ops_t *ops, **opspp; 84 85 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 86 if (strcmp(ops->vdev_op_type, type) == 0) 87 break; 88 89 return (ops); 90} 91 92/* 93 * Default asize function: return the MAX of psize with the asize of 94 * all children. This is what's used by anything other than RAID-Z. 95 */ 96uint64_t 97vdev_default_asize(vdev_t *vd, uint64_t psize) 98{ 99 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 100 uint64_t csize; 101 102 for (int c = 0; c < vd->vdev_children; c++) { 103 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 104 asize = MAX(asize, csize); 105 } 106 107 return (asize); 108} 109 110/* 111 * Get the minimum allocatable size. We define the allocatable size as 112 * the vdev's asize rounded to the nearest metaslab. This allows us to 113 * replace or attach devices which don't have the same physical size but 114 * can still satisfy the same number of allocations. 115 */ 116uint64_t 117vdev_get_min_asize(vdev_t *vd) 118{ 119 vdev_t *pvd = vd->vdev_parent; 120 121 /*
|
122 * The our parent is NULL (inactive spare or cache) or is the root,
|
122 * If our parent is NULL (inactive spare or cache) or is the root, |
123 * just return our own asize. 124 */ 125 if (pvd == NULL) 126 return (vd->vdev_asize); 127 128 /* 129 * The top-level vdev just returns the allocatable size rounded 130 * to the nearest metaslab. 131 */ 132 if (vd == vd->vdev_top) 133 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 134 135 /* 136 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 137 * so each child must provide at least 1/Nth of its asize. 138 */ 139 if (pvd->vdev_ops == &vdev_raidz_ops) 140 return (pvd->vdev_min_asize / pvd->vdev_children); 141 142 return (pvd->vdev_min_asize); 143} 144 145void 146vdev_set_min_asize(vdev_t *vd) 147{ 148 vd->vdev_min_asize = vdev_get_min_asize(vd); 149 150 for (int c = 0; c < vd->vdev_children; c++) 151 vdev_set_min_asize(vd->vdev_child[c]); 152} 153 154vdev_t * 155vdev_lookup_top(spa_t *spa, uint64_t vdev) 156{ 157 vdev_t *rvd = spa->spa_root_vdev; 158 159 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 160 161 if (vdev < rvd->vdev_children) { 162 ASSERT(rvd->vdev_child[vdev] != NULL); 163 return (rvd->vdev_child[vdev]); 164 } 165 166 return (NULL); 167} 168 169vdev_t * 170vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 171{ 172 vdev_t *mvd; 173 174 if (vd->vdev_guid == guid) 175 return (vd); 176 177 for (int c = 0; c < vd->vdev_children; c++) 178 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 179 NULL) 180 return (mvd); 181 182 return (NULL); 183} 184 185void 186vdev_add_child(vdev_t *pvd, vdev_t *cvd) 187{ 188 size_t oldsize, newsize; 189 uint64_t id = cvd->vdev_id; 190 vdev_t **newchild; 191 192 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 193 ASSERT(cvd->vdev_parent == NULL); 194 195 cvd->vdev_parent = pvd; 196 197 if (pvd == NULL) 198 return; 199 200 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 201 202 oldsize = pvd->vdev_children * sizeof (vdev_t *); 203 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 204 newsize = pvd->vdev_children * sizeof (vdev_t *); 205 206 newchild = kmem_zalloc(newsize, KM_SLEEP); 207 if (pvd->vdev_child != NULL) { 208 bcopy(pvd->vdev_child, newchild, oldsize); 209 kmem_free(pvd->vdev_child, oldsize); 210 } 211 212 pvd->vdev_child = newchild; 213 pvd->vdev_child[id] = cvd; 214 215 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 216 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 217 218 /* 219 * Walk up all ancestors to update guid sum. 220 */ 221 for (; pvd != NULL; pvd = pvd->vdev_parent) 222 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 223} 224 225void 226vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 227{ 228 int c; 229 uint_t id = cvd->vdev_id; 230 231 ASSERT(cvd->vdev_parent == pvd); 232 233 if (pvd == NULL) 234 return; 235 236 ASSERT(id < pvd->vdev_children); 237 ASSERT(pvd->vdev_child[id] == cvd); 238 239 pvd->vdev_child[id] = NULL; 240 cvd->vdev_parent = NULL; 241 242 for (c = 0; c < pvd->vdev_children; c++) 243 if (pvd->vdev_child[c]) 244 break; 245 246 if (c == pvd->vdev_children) { 247 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 248 pvd->vdev_child = NULL; 249 pvd->vdev_children = 0; 250 } 251 252 /* 253 * Walk up all ancestors to update guid sum. 254 */ 255 for (; pvd != NULL; pvd = pvd->vdev_parent) 256 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 257} 258 259/* 260 * Remove any holes in the child array. 261 */ 262void 263vdev_compact_children(vdev_t *pvd) 264{ 265 vdev_t **newchild, *cvd; 266 int oldc = pvd->vdev_children; 267 int newc; 268 269 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 270 271 for (int c = newc = 0; c < oldc; c++) 272 if (pvd->vdev_child[c]) 273 newc++; 274 275 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 276 277 for (int c = newc = 0; c < oldc; c++) { 278 if ((cvd = pvd->vdev_child[c]) != NULL) { 279 newchild[newc] = cvd; 280 cvd->vdev_id = newc++; 281 } 282 } 283 284 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 285 pvd->vdev_child = newchild; 286 pvd->vdev_children = newc; 287} 288 289/* 290 * Allocate and minimally initialize a vdev_t. 291 */ 292vdev_t * 293vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 294{ 295 vdev_t *vd; 296 297 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 298 299 if (spa->spa_root_vdev == NULL) { 300 ASSERT(ops == &vdev_root_ops); 301 spa->spa_root_vdev = vd; 302 spa->spa_load_guid = spa_generate_guid(NULL); 303 } 304 305 if (guid == 0 && ops != &vdev_hole_ops) { 306 if (spa->spa_root_vdev == vd) { 307 /* 308 * The root vdev's guid will also be the pool guid, 309 * which must be unique among all pools. 310 */ 311 guid = spa_generate_guid(NULL); 312 } else { 313 /* 314 * Any other vdev's guid must be unique within the pool. 315 */ 316 guid = spa_generate_guid(spa); 317 } 318 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 319 } 320 321 vd->vdev_spa = spa; 322 vd->vdev_id = id; 323 vd->vdev_guid = guid; 324 vd->vdev_guid_sum = guid; 325 vd->vdev_ops = ops; 326 vd->vdev_state = VDEV_STATE_CLOSED; 327 vd->vdev_ishole = (ops == &vdev_hole_ops); 328 329 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 330 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 331 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 332 for (int t = 0; t < DTL_TYPES; t++) { 333 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 334 &vd->vdev_dtl_lock); 335 } 336 txg_list_create(&vd->vdev_ms_list, 337 offsetof(struct metaslab, ms_txg_node)); 338 txg_list_create(&vd->vdev_dtl_list, 339 offsetof(struct vdev, vdev_dtl_node)); 340 vd->vdev_stat.vs_timestamp = gethrtime(); 341 vdev_queue_init(vd); 342 vdev_cache_init(vd); 343 344 return (vd); 345} 346 347/* 348 * Allocate a new vdev. The 'alloctype' is used to control whether we are 349 * creating a new vdev or loading an existing one - the behavior is slightly 350 * different for each case. 351 */ 352int 353vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 354 int alloctype) 355{ 356 vdev_ops_t *ops; 357 char *type; 358 uint64_t guid = 0, islog, nparity; 359 vdev_t *vd; 360 361 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 362 363 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 364 return (EINVAL); 365 366 if ((ops = vdev_getops(type)) == NULL) 367 return (EINVAL); 368 369 /* 370 * If this is a load, get the vdev guid from the nvlist. 371 * Otherwise, vdev_alloc_common() will generate one for us. 372 */ 373 if (alloctype == VDEV_ALLOC_LOAD) { 374 uint64_t label_id; 375 376 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 377 label_id != id) 378 return (EINVAL); 379 380 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 381 return (EINVAL); 382 } else if (alloctype == VDEV_ALLOC_SPARE) { 383 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 384 return (EINVAL); 385 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 386 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 387 return (EINVAL); 388 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 389 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 390 return (EINVAL); 391 } 392 393 /* 394 * The first allocated vdev must be of type 'root'. 395 */ 396 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 397 return (EINVAL); 398 399 /* 400 * Determine whether we're a log vdev. 401 */ 402 islog = 0; 403 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 404 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 405 return (ENOTSUP); 406 407 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 408 return (ENOTSUP); 409 410 /* 411 * Set the nparity property for RAID-Z vdevs. 412 */ 413 nparity = -1ULL; 414 if (ops == &vdev_raidz_ops) { 415 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 416 &nparity) == 0) { 417 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 418 return (EINVAL); 419 /* 420 * Previous versions could only support 1 or 2 parity 421 * device. 422 */ 423 if (nparity > 1 && 424 spa_version(spa) < SPA_VERSION_RAIDZ2) 425 return (ENOTSUP); 426 if (nparity > 2 && 427 spa_version(spa) < SPA_VERSION_RAIDZ3) 428 return (ENOTSUP); 429 } else { 430 /* 431 * We require the parity to be specified for SPAs that 432 * support multiple parity levels. 433 */ 434 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 435 return (EINVAL); 436 /* 437 * Otherwise, we default to 1 parity device for RAID-Z. 438 */ 439 nparity = 1; 440 } 441 } else { 442 nparity = 0; 443 } 444 ASSERT(nparity != -1ULL); 445 446 vd = vdev_alloc_common(spa, id, guid, ops); 447 448 vd->vdev_islog = islog; 449 vd->vdev_nparity = nparity; 450 451 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 452 vd->vdev_path = spa_strdup(vd->vdev_path); 453 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 454 vd->vdev_devid = spa_strdup(vd->vdev_devid); 455 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 456 &vd->vdev_physpath) == 0) 457 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 458 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 459 vd->vdev_fru = spa_strdup(vd->vdev_fru); 460 461 /* 462 * Set the whole_disk property. If it's not specified, leave the value 463 * as -1. 464 */ 465 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 466 &vd->vdev_wholedisk) != 0) 467 vd->vdev_wholedisk = -1ULL; 468 469 /* 470 * Look for the 'not present' flag. This will only be set if the device 471 * was not present at the time of import. 472 */ 473 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 474 &vd->vdev_not_present); 475 476 /* 477 * Get the alignment requirement. 478 */ 479 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 480 481 /* 482 * Retrieve the vdev creation time. 483 */ 484 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 485 &vd->vdev_crtxg); 486 487 /* 488 * If we're a top-level vdev, try to load the allocation parameters. 489 */ 490 if (parent && !parent->vdev_parent && 491 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 492 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 493 &vd->vdev_ms_array); 494 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 495 &vd->vdev_ms_shift); 496 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 497 &vd->vdev_asize); 498 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 499 &vd->vdev_removing); 500 } 501 502 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { 503 ASSERT(alloctype == VDEV_ALLOC_LOAD || 504 alloctype == VDEV_ALLOC_ADD || 505 alloctype == VDEV_ALLOC_SPLIT || 506 alloctype == VDEV_ALLOC_ROOTPOOL); 507 vd->vdev_mg = metaslab_group_create(islog ? 508 spa_log_class(spa) : spa_normal_class(spa), vd); 509 } 510 511 /* 512 * If we're a leaf vdev, try to load the DTL object and other state. 513 */ 514 if (vd->vdev_ops->vdev_op_leaf && 515 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 516 alloctype == VDEV_ALLOC_ROOTPOOL)) { 517 if (alloctype == VDEV_ALLOC_LOAD) { 518 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 519 &vd->vdev_dtl_smo.smo_object); 520 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 521 &vd->vdev_unspare); 522 } 523 524 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 525 uint64_t spare = 0; 526 527 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 528 &spare) == 0 && spare) 529 spa_spare_add(vd); 530 } 531 532 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 533 &vd->vdev_offline); 534 535 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING, 536 &vd->vdev_resilvering); 537 538 /* 539 * When importing a pool, we want to ignore the persistent fault 540 * state, as the diagnosis made on another system may not be 541 * valid in the current context. Local vdevs will 542 * remain in the faulted state. 543 */ 544 if (spa_load_state(spa) == SPA_LOAD_OPEN) { 545 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 546 &vd->vdev_faulted); 547 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 548 &vd->vdev_degraded); 549 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 550 &vd->vdev_removed); 551 552 if (vd->vdev_faulted || vd->vdev_degraded) { 553 char *aux; 554 555 vd->vdev_label_aux = 556 VDEV_AUX_ERR_EXCEEDED; 557 if (nvlist_lookup_string(nv, 558 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 559 strcmp(aux, "external") == 0) 560 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 561 } 562 } 563 } 564 565 /* 566 * Add ourselves to the parent's list of children. 567 */ 568 vdev_add_child(parent, vd); 569 570 *vdp = vd; 571 572 return (0); 573} 574 575void 576vdev_free(vdev_t *vd) 577{ 578 spa_t *spa = vd->vdev_spa; 579 580 /* 581 * vdev_free() implies closing the vdev first. This is simpler than 582 * trying to ensure complicated semantics for all callers. 583 */ 584 vdev_close(vd); 585 586 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 587 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 588 589 /* 590 * Free all children. 591 */ 592 for (int c = 0; c < vd->vdev_children; c++) 593 vdev_free(vd->vdev_child[c]); 594 595 ASSERT(vd->vdev_child == NULL); 596 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 597 598 /* 599 * Discard allocation state. 600 */ 601 if (vd->vdev_mg != NULL) { 602 vdev_metaslab_fini(vd); 603 metaslab_group_destroy(vd->vdev_mg); 604 } 605 606 ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 607 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 608 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 609 610 /* 611 * Remove this vdev from its parent's child list. 612 */ 613 vdev_remove_child(vd->vdev_parent, vd); 614 615 ASSERT(vd->vdev_parent == NULL); 616 617 /* 618 * Clean up vdev structure. 619 */ 620 vdev_queue_fini(vd); 621 vdev_cache_fini(vd); 622 623 if (vd->vdev_path) 624 spa_strfree(vd->vdev_path); 625 if (vd->vdev_devid) 626 spa_strfree(vd->vdev_devid); 627 if (vd->vdev_physpath) 628 spa_strfree(vd->vdev_physpath); 629 if (vd->vdev_fru) 630 spa_strfree(vd->vdev_fru); 631 632 if (vd->vdev_isspare) 633 spa_spare_remove(vd); 634 if (vd->vdev_isl2cache) 635 spa_l2cache_remove(vd); 636 637 txg_list_destroy(&vd->vdev_ms_list); 638 txg_list_destroy(&vd->vdev_dtl_list); 639 640 mutex_enter(&vd->vdev_dtl_lock); 641 for (int t = 0; t < DTL_TYPES; t++) { 642 space_map_unload(&vd->vdev_dtl[t]); 643 space_map_destroy(&vd->vdev_dtl[t]); 644 } 645 mutex_exit(&vd->vdev_dtl_lock); 646 647 mutex_destroy(&vd->vdev_dtl_lock); 648 mutex_destroy(&vd->vdev_stat_lock); 649 mutex_destroy(&vd->vdev_probe_lock); 650 651 if (vd == spa->spa_root_vdev) 652 spa->spa_root_vdev = NULL; 653 654 kmem_free(vd, sizeof (vdev_t)); 655} 656 657/* 658 * Transfer top-level vdev state from svd to tvd. 659 */ 660static void 661vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 662{ 663 spa_t *spa = svd->vdev_spa; 664 metaslab_t *msp; 665 vdev_t *vd; 666 int t; 667 668 ASSERT(tvd == tvd->vdev_top); 669 670 tvd->vdev_ms_array = svd->vdev_ms_array; 671 tvd->vdev_ms_shift = svd->vdev_ms_shift; 672 tvd->vdev_ms_count = svd->vdev_ms_count; 673 674 svd->vdev_ms_array = 0; 675 svd->vdev_ms_shift = 0; 676 svd->vdev_ms_count = 0; 677 678 if (tvd->vdev_mg) 679 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); 680 tvd->vdev_mg = svd->vdev_mg; 681 tvd->vdev_ms = svd->vdev_ms; 682 683 svd->vdev_mg = NULL; 684 svd->vdev_ms = NULL; 685 686 if (tvd->vdev_mg != NULL) 687 tvd->vdev_mg->mg_vd = tvd; 688 689 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 690 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 691 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 692 693 svd->vdev_stat.vs_alloc = 0; 694 svd->vdev_stat.vs_space = 0; 695 svd->vdev_stat.vs_dspace = 0; 696 697 for (t = 0; t < TXG_SIZE; t++) { 698 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 699 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 700 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 701 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 702 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 703 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 704 } 705 706 if (list_link_active(&svd->vdev_config_dirty_node)) { 707 vdev_config_clean(svd); 708 vdev_config_dirty(tvd); 709 } 710 711 if (list_link_active(&svd->vdev_state_dirty_node)) { 712 vdev_state_clean(svd); 713 vdev_state_dirty(tvd); 714 } 715 716 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 717 svd->vdev_deflate_ratio = 0; 718 719 tvd->vdev_islog = svd->vdev_islog; 720 svd->vdev_islog = 0; 721} 722 723static void 724vdev_top_update(vdev_t *tvd, vdev_t *vd) 725{ 726 if (vd == NULL) 727 return; 728 729 vd->vdev_top = tvd; 730 731 for (int c = 0; c < vd->vdev_children; c++) 732 vdev_top_update(tvd, vd->vdev_child[c]); 733} 734 735/* 736 * Add a mirror/replacing vdev above an existing vdev. 737 */ 738vdev_t * 739vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 740{ 741 spa_t *spa = cvd->vdev_spa; 742 vdev_t *pvd = cvd->vdev_parent; 743 vdev_t *mvd; 744 745 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 746 747 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 748 749 mvd->vdev_asize = cvd->vdev_asize; 750 mvd->vdev_min_asize = cvd->vdev_min_asize;
|
751 mvd->vdev_max_asize = cvd->vdev_max_asize; |
752 mvd->vdev_ashift = cvd->vdev_ashift; 753 mvd->vdev_state = cvd->vdev_state; 754 mvd->vdev_crtxg = cvd->vdev_crtxg; 755 756 vdev_remove_child(pvd, cvd); 757 vdev_add_child(pvd, mvd); 758 cvd->vdev_id = mvd->vdev_children; 759 vdev_add_child(mvd, cvd); 760 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 761 762 if (mvd == mvd->vdev_top) 763 vdev_top_transfer(cvd, mvd); 764 765 return (mvd); 766} 767 768/* 769 * Remove a 1-way mirror/replacing vdev from the tree. 770 */ 771void 772vdev_remove_parent(vdev_t *cvd) 773{ 774 vdev_t *mvd = cvd->vdev_parent; 775 vdev_t *pvd = mvd->vdev_parent; 776 777 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 778 779 ASSERT(mvd->vdev_children == 1); 780 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 781 mvd->vdev_ops == &vdev_replacing_ops || 782 mvd->vdev_ops == &vdev_spare_ops); 783 cvd->vdev_ashift = mvd->vdev_ashift; 784 785 vdev_remove_child(mvd, cvd); 786 vdev_remove_child(pvd, mvd); 787 788 /* 789 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 790 * Otherwise, we could have detached an offline device, and when we 791 * go to import the pool we'll think we have two top-level vdevs, 792 * instead of a different version of the same top-level vdev. 793 */ 794 if (mvd->vdev_top == mvd) { 795 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 796 cvd->vdev_orig_guid = cvd->vdev_guid; 797 cvd->vdev_guid += guid_delta; 798 cvd->vdev_guid_sum += guid_delta; 799 } 800 cvd->vdev_id = mvd->vdev_id; 801 vdev_add_child(pvd, cvd); 802 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 803 804 if (cvd == cvd->vdev_top) 805 vdev_top_transfer(mvd, cvd); 806 807 ASSERT(mvd->vdev_children == 0); 808 vdev_free(mvd); 809} 810 811int 812vdev_metaslab_init(vdev_t *vd, uint64_t txg) 813{ 814 spa_t *spa = vd->vdev_spa; 815 objset_t *mos = spa->spa_meta_objset; 816 uint64_t m; 817 uint64_t oldc = vd->vdev_ms_count; 818 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 819 metaslab_t **mspp; 820 int error; 821 822 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 823 824 /* 825 * This vdev is not being allocated from yet or is a hole. 826 */ 827 if (vd->vdev_ms_shift == 0) 828 return (0); 829 830 ASSERT(!vd->vdev_ishole); 831 832 /* 833 * Compute the raidz-deflation ratio. Note, we hard-code 834 * in 128k (1 << 17) because it is the current "typical" blocksize. 835 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 836 * or we will inconsistently account for existing bp's. 837 */ 838 vd->vdev_deflate_ratio = (1 << 17) / 839 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 840 841 ASSERT(oldc <= newc); 842 843 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 844 845 if (oldc != 0) { 846 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 847 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 848 } 849 850 vd->vdev_ms = mspp; 851 vd->vdev_ms_count = newc; 852 853 for (m = oldc; m < newc; m++) { 854 space_map_obj_t smo = { 0, 0, 0 }; 855 if (txg == 0) { 856 uint64_t object = 0; 857 error = dmu_read(mos, vd->vdev_ms_array, 858 m * sizeof (uint64_t), sizeof (uint64_t), &object, 859 DMU_READ_PREFETCH); 860 if (error) 861 return (error); 862 if (object != 0) { 863 dmu_buf_t *db; 864 error = dmu_bonus_hold(mos, object, FTAG, &db); 865 if (error) 866 return (error); 867 ASSERT3U(db->db_size, >=, sizeof (smo)); 868 bcopy(db->db_data, &smo, sizeof (smo)); 869 ASSERT3U(smo.smo_object, ==, object); 870 dmu_buf_rele(db, FTAG); 871 } 872 } 873 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 874 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 875 } 876 877 if (txg == 0) 878 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 879 880 /* 881 * If the vdev is being removed we don't activate 882 * the metaslabs since we want to ensure that no new 883 * allocations are performed on this device. 884 */ 885 if (oldc == 0 && !vd->vdev_removing) 886 metaslab_group_activate(vd->vdev_mg); 887 888 if (txg == 0) 889 spa_config_exit(spa, SCL_ALLOC, FTAG); 890 891 return (0); 892} 893 894void 895vdev_metaslab_fini(vdev_t *vd) 896{ 897 uint64_t m; 898 uint64_t count = vd->vdev_ms_count; 899 900 if (vd->vdev_ms != NULL) { 901 metaslab_group_passivate(vd->vdev_mg); 902 for (m = 0; m < count; m++) 903 if (vd->vdev_ms[m] != NULL) 904 metaslab_fini(vd->vdev_ms[m]); 905 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 906 vd->vdev_ms = NULL; 907 } 908} 909 910typedef struct vdev_probe_stats { 911 boolean_t vps_readable; 912 boolean_t vps_writeable; 913 int vps_flags; 914} vdev_probe_stats_t; 915 916static void 917vdev_probe_done(zio_t *zio) 918{ 919 spa_t *spa = zio->io_spa; 920 vdev_t *vd = zio->io_vd; 921 vdev_probe_stats_t *vps = zio->io_private; 922 923 ASSERT(vd->vdev_probe_zio != NULL); 924 925 if (zio->io_type == ZIO_TYPE_READ) { 926 if (zio->io_error == 0) 927 vps->vps_readable = 1; 928 if (zio->io_error == 0 && spa_writeable(spa)) { 929 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 930 zio->io_offset, zio->io_size, zio->io_data, 931 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 932 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 933 } else { 934 zio_buf_free(zio->io_data, zio->io_size); 935 } 936 } else if (zio->io_type == ZIO_TYPE_WRITE) { 937 if (zio->io_error == 0) 938 vps->vps_writeable = 1; 939 zio_buf_free(zio->io_data, zio->io_size); 940 } else if (zio->io_type == ZIO_TYPE_NULL) { 941 zio_t *pio; 942 943 vd->vdev_cant_read |= !vps->vps_readable; 944 vd->vdev_cant_write |= !vps->vps_writeable; 945 946 if (vdev_readable(vd) && 947 (vdev_writeable(vd) || !spa_writeable(spa))) { 948 zio->io_error = 0; 949 } else { 950 ASSERT(zio->io_error != 0); 951 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 952 spa, vd, NULL, 0, 0); 953 zio->io_error = ENXIO; 954 } 955 956 mutex_enter(&vd->vdev_probe_lock); 957 ASSERT(vd->vdev_probe_zio == zio); 958 vd->vdev_probe_zio = NULL; 959 mutex_exit(&vd->vdev_probe_lock); 960 961 while ((pio = zio_walk_parents(zio)) != NULL) 962 if (!vdev_accessible(vd, pio)) 963 pio->io_error = ENXIO; 964 965 kmem_free(vps, sizeof (*vps)); 966 } 967} 968 969/* 970 * Determine whether this device is accessible by reading and writing 971 * to several known locations: the pad regions of each vdev label 972 * but the first (which we leave alone in case it contains a VTOC). 973 */ 974zio_t * 975vdev_probe(vdev_t *vd, zio_t *zio) 976{ 977 spa_t *spa = vd->vdev_spa; 978 vdev_probe_stats_t *vps = NULL; 979 zio_t *pio; 980 981 ASSERT(vd->vdev_ops->vdev_op_leaf); 982 983 /* 984 * Don't probe the probe. 985 */ 986 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 987 return (NULL); 988 989 /* 990 * To prevent 'probe storms' when a device fails, we create 991 * just one probe i/o at a time. All zios that want to probe 992 * this vdev will become parents of the probe io. 993 */ 994 mutex_enter(&vd->vdev_probe_lock); 995 996 if ((pio = vd->vdev_probe_zio) == NULL) { 997 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 998 999 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 1000 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 1001 ZIO_FLAG_TRYHARD; 1002 1003 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 1004 /* 1005 * vdev_cant_read and vdev_cant_write can only 1006 * transition from TRUE to FALSE when we have the 1007 * SCL_ZIO lock as writer; otherwise they can only 1008 * transition from FALSE to TRUE. This ensures that 1009 * any zio looking at these values can assume that 1010 * failures persist for the life of the I/O. That's 1011 * important because when a device has intermittent 1012 * connectivity problems, we want to ensure that 1013 * they're ascribed to the device (ENXIO) and not 1014 * the zio (EIO). 1015 * 1016 * Since we hold SCL_ZIO as writer here, clear both 1017 * values so the probe can reevaluate from first 1018 * principles. 1019 */ 1020 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1021 vd->vdev_cant_read = B_FALSE; 1022 vd->vdev_cant_write = B_FALSE; 1023 } 1024 1025 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1026 vdev_probe_done, vps, 1027 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1028 1029 /* 1030 * We can't change the vdev state in this context, so we 1031 * kick off an async task to do it on our behalf. 1032 */ 1033 if (zio != NULL) { 1034 vd->vdev_probe_wanted = B_TRUE; 1035 spa_async_request(spa, SPA_ASYNC_PROBE); 1036 } 1037 } 1038 1039 if (zio != NULL) 1040 zio_add_child(zio, pio); 1041 1042 mutex_exit(&vd->vdev_probe_lock); 1043 1044 if (vps == NULL) { 1045 ASSERT(zio != NULL); 1046 return (NULL); 1047 } 1048 1049 for (int l = 1; l < VDEV_LABELS; l++) { 1050 zio_nowait(zio_read_phys(pio, vd, 1051 vdev_label_offset(vd->vdev_psize, l, 1052 offsetof(vdev_label_t, vl_pad2)), 1053 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 1054 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1055 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1056 } 1057 1058 if (zio == NULL) 1059 return (pio); 1060 1061 zio_nowait(pio); 1062 return (NULL); 1063} 1064 1065static void 1066vdev_open_child(void *arg) 1067{ 1068 vdev_t *vd = arg; 1069 1070 vd->vdev_open_thread = curthread; 1071 vd->vdev_open_error = vdev_open(vd); 1072 vd->vdev_open_thread = NULL; 1073} 1074 1075boolean_t 1076vdev_uses_zvols(vdev_t *vd) 1077{ 1078 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1079 strlen(ZVOL_DIR)) == 0) 1080 return (B_TRUE); 1081 for (int c = 0; c < vd->vdev_children; c++) 1082 if (vdev_uses_zvols(vd->vdev_child[c])) 1083 return (B_TRUE); 1084 return (B_FALSE); 1085} 1086 1087void 1088vdev_open_children(vdev_t *vd) 1089{ 1090 taskq_t *tq; 1091 int children = vd->vdev_children; 1092 1093 /* 1094 * in order to handle pools on top of zvols, do the opens 1095 * in a single thread so that the same thread holds the 1096 * spa_namespace_lock 1097 */ 1098 if (B_TRUE || vdev_uses_zvols(vd)) { 1099 for (int c = 0; c < children; c++) 1100 vd->vdev_child[c]->vdev_open_error = 1101 vdev_open(vd->vdev_child[c]); 1102 return; 1103 } 1104 tq = taskq_create("vdev_open", children, minclsyspri, 1105 children, children, TASKQ_PREPOPULATE); 1106 1107 for (int c = 0; c < children; c++) 1108 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1109 TQ_SLEEP) != 0); 1110 1111 taskq_destroy(tq); 1112} 1113 1114/* 1115 * Prepare a virtual device for access. 1116 */ 1117int 1118vdev_open(vdev_t *vd) 1119{ 1120 spa_t *spa = vd->vdev_spa; 1121 int error; 1122 uint64_t osize = 0;
|
1122 uint64_t asize, psize;
|
1123 uint64_t max_osize = 0; 1124 uint64_t asize, max_asize, psize; |
1125 uint64_t ashift = 0; 1126 1127 ASSERT(vd->vdev_open_thread == curthread || 1128 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1129 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1130 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1131 vd->vdev_state == VDEV_STATE_OFFLINE); 1132 1133 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1134 vd->vdev_cant_read = B_FALSE; 1135 vd->vdev_cant_write = B_FALSE; 1136 vd->vdev_min_asize = vdev_get_min_asize(vd); 1137 1138 /* 1139 * If this vdev is not removed, check its fault status. If it's 1140 * faulted, bail out of the open. 1141 */ 1142 if (!vd->vdev_removed && vd->vdev_faulted) { 1143 ASSERT(vd->vdev_children == 0); 1144 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1145 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1146 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1147 vd->vdev_label_aux); 1148 return (ENXIO); 1149 } else if (vd->vdev_offline) { 1150 ASSERT(vd->vdev_children == 0); 1151 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1152 return (ENXIO); 1153 } 1154
|
1153 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
|
1155 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); |
1156 1157 /* 1158 * Reset the vdev_reopening flag so that we actually close 1159 * the vdev on error. 1160 */ 1161 vd->vdev_reopening = B_FALSE; 1162 if (zio_injection_enabled && error == 0) 1163 error = zio_handle_device_injection(vd, NULL, ENXIO); 1164 1165 if (error) { 1166 if (vd->vdev_removed && 1167 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1168 vd->vdev_removed = B_FALSE; 1169 1170 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1171 vd->vdev_stat.vs_aux); 1172 return (error); 1173 } 1174 1175 vd->vdev_removed = B_FALSE; 1176 1177 /* 1178 * Recheck the faulted flag now that we have confirmed that 1179 * the vdev is accessible. If we're faulted, bail. 1180 */ 1181 if (vd->vdev_faulted) { 1182 ASSERT(vd->vdev_children == 0); 1183 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1184 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1185 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1186 vd->vdev_label_aux); 1187 return (ENXIO); 1188 } 1189 1190 if (vd->vdev_degraded) { 1191 ASSERT(vd->vdev_children == 0); 1192 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1193 VDEV_AUX_ERR_EXCEEDED); 1194 } else { 1195 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1196 } 1197 1198 /* 1199 * For hole or missing vdevs we just return success. 1200 */ 1201 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1202 return (0); 1203 1204 for (int c = 0; c < vd->vdev_children; c++) { 1205 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1206 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1207 VDEV_AUX_NONE); 1208 break; 1209 } 1210 } 1211 1212 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
|
1213 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); |
1214 1215 if (vd->vdev_children == 0) { 1216 if (osize < SPA_MINDEVSIZE) { 1217 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1218 VDEV_AUX_TOO_SMALL); 1219 return (EOVERFLOW); 1220 } 1221 psize = osize; 1222 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
|
1223 max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1224 VDEV_LABEL_END_SIZE); |
1225 } else { 1226 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1227 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1228 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1229 VDEV_AUX_TOO_SMALL); 1230 return (EOVERFLOW); 1231 } 1232 psize = 0; 1233 asize = osize;
|
1234 max_asize = max_osize; |
1235 } 1236 1237 vd->vdev_psize = psize; 1238 1239 /* 1240 * Make sure the allocatable size hasn't shrunk. 1241 */ 1242 if (asize < vd->vdev_min_asize) { 1243 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1244 VDEV_AUX_BAD_LABEL); 1245 return (EINVAL); 1246 } 1247 1248 if (vd->vdev_asize == 0) { 1249 /* 1250 * This is the first-ever open, so use the computed values. 1251 * For testing purposes, a higher ashift can be requested. 1252 */ 1253 vd->vdev_asize = asize;
|
1254 vd->vdev_max_asize = max_asize; |
1255 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1256 } else { 1257 /* 1258 * Make sure the alignment requirement hasn't increased. 1259 */ 1260 if (ashift > vd->vdev_top->vdev_ashift) { 1261 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1262 VDEV_AUX_BAD_LABEL); 1263 return (EINVAL); 1264 }
|
1265 vd->vdev_max_asize = max_asize; |
1266 } 1267 1268 /* 1269 * If all children are healthy and the asize has increased, 1270 * then we've experienced dynamic LUN growth. If automatic 1271 * expansion is enabled then use the additional space. 1272 */ 1273 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 1274 (vd->vdev_expanding || spa->spa_autoexpand)) 1275 vd->vdev_asize = asize; 1276 1277 vdev_set_min_asize(vd); 1278 1279 /* 1280 * Ensure we can issue some IO before declaring the 1281 * vdev open for business. 1282 */ 1283 if (vd->vdev_ops->vdev_op_leaf && 1284 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1285 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1286 VDEV_AUX_ERR_EXCEEDED); 1287 return (error); 1288 } 1289 1290 /* 1291 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1292 * resilver. But don't do this if we are doing a reopen for a scrub, 1293 * since this would just restart the scrub we are already doing. 1294 */ 1295 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1296 vdev_resilver_needed(vd, NULL, NULL)) 1297 spa_async_request(spa, SPA_ASYNC_RESILVER); 1298 1299 return (0); 1300} 1301 1302/* 1303 * Called once the vdevs are all opened, this routine validates the label 1304 * contents. This needs to be done before vdev_load() so that we don't 1305 * inadvertently do repair I/Os to the wrong device. 1306 * 1307 * If 'strict' is false ignore the spa guid check. This is necessary because 1308 * if the machine crashed during a re-guid the new guid might have been written 1309 * to all of the vdev labels, but not the cached config. The strict check 1310 * will be performed when the pool is opened again using the mos config. 1311 * 1312 * This function will only return failure if one of the vdevs indicates that it 1313 * has since been destroyed or exported. This is only possible if 1314 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1315 * will be updated but the function will return 0. 1316 */ 1317int 1318vdev_validate(vdev_t *vd, boolean_t strict) 1319{ 1320 spa_t *spa = vd->vdev_spa; 1321 nvlist_t *label; 1322 uint64_t guid = 0, top_guid; 1323 uint64_t state; 1324 1325 for (int c = 0; c < vd->vdev_children; c++) 1326 if (vdev_validate(vd->vdev_child[c], strict) != 0) 1327 return (EBADF); 1328 1329 /* 1330 * If the device has already failed, or was marked offline, don't do 1331 * any further validation. Otherwise, label I/O will fail and we will 1332 * overwrite the previous state. 1333 */ 1334 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1335 uint64_t aux_guid = 0; 1336 nvlist_t *nvl; 1337 1338 if ((label = vdev_label_read_config(vd)) == NULL) { 1339 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1340 VDEV_AUX_BAD_LABEL); 1341 return (0); 1342 } 1343 1344 /* 1345 * Determine if this vdev has been split off into another 1346 * pool. If so, then refuse to open it. 1347 */ 1348 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 1349 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 1350 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1351 VDEV_AUX_SPLIT_POOL); 1352 nvlist_free(label); 1353 return (0); 1354 } 1355 1356 if (strict && (nvlist_lookup_uint64(label, 1357 ZPOOL_CONFIG_POOL_GUID, &guid) != 0 || 1358 guid != spa_guid(spa))) { 1359 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1360 VDEV_AUX_CORRUPT_DATA); 1361 nvlist_free(label); 1362 return (0); 1363 } 1364 1365 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 1366 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 1367 &aux_guid) != 0) 1368 aux_guid = 0; 1369 1370 /* 1371 * If this vdev just became a top-level vdev because its 1372 * sibling was detached, it will have adopted the parent's 1373 * vdev guid -- but the label may or may not be on disk yet. 1374 * Fortunately, either version of the label will have the 1375 * same top guid, so if we're a top-level vdev, we can 1376 * safely compare to that instead. 1377 * 1378 * If we split this vdev off instead, then we also check the 1379 * original pool's guid. We don't want to consider the vdev 1380 * corrupt if it is partway through a split operation. 1381 */ 1382 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1383 &guid) != 0 || 1384 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 1385 &top_guid) != 0 || 1386 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) && 1387 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 1388 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1389 VDEV_AUX_CORRUPT_DATA); 1390 nvlist_free(label); 1391 return (0); 1392 } 1393 1394 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1395 &state) != 0) { 1396 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1397 VDEV_AUX_CORRUPT_DATA); 1398 nvlist_free(label); 1399 return (0); 1400 } 1401 1402 nvlist_free(label); 1403 1404 /* 1405 * If this is a verbatim import, no need to check the 1406 * state of the pool. 1407 */ 1408 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 1409 spa_load_state(spa) == SPA_LOAD_OPEN && 1410 state != POOL_STATE_ACTIVE) 1411 return (EBADF); 1412 1413 /* 1414 * If we were able to open and validate a vdev that was 1415 * previously marked permanently unavailable, clear that state 1416 * now. 1417 */ 1418 if (vd->vdev_not_present) 1419 vd->vdev_not_present = 0; 1420 } 1421 1422 return (0); 1423} 1424 1425/* 1426 * Close a virtual device. 1427 */ 1428void 1429vdev_close(vdev_t *vd) 1430{ 1431 spa_t *spa = vd->vdev_spa; 1432 vdev_t *pvd = vd->vdev_parent; 1433 1434 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1435 1436 /* 1437 * If our parent is reopening, then we are as well, unless we are 1438 * going offline. 1439 */ 1440 if (pvd != NULL && pvd->vdev_reopening) 1441 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 1442 1443 vd->vdev_ops->vdev_op_close(vd); 1444 1445 vdev_cache_purge(vd); 1446 1447 /* 1448 * We record the previous state before we close it, so that if we are 1449 * doing a reopen(), we don't generate FMA ereports if we notice that 1450 * it's still faulted. 1451 */ 1452 vd->vdev_prevstate = vd->vdev_state; 1453 1454 if (vd->vdev_offline) 1455 vd->vdev_state = VDEV_STATE_OFFLINE; 1456 else 1457 vd->vdev_state = VDEV_STATE_CLOSED; 1458 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1459} 1460 1461void 1462vdev_hold(vdev_t *vd) 1463{ 1464 spa_t *spa = vd->vdev_spa; 1465 1466 ASSERT(spa_is_root(spa)); 1467 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1468 return; 1469 1470 for (int c = 0; c < vd->vdev_children; c++) 1471 vdev_hold(vd->vdev_child[c]); 1472 1473 if (vd->vdev_ops->vdev_op_leaf) 1474 vd->vdev_ops->vdev_op_hold(vd); 1475} 1476 1477void 1478vdev_rele(vdev_t *vd) 1479{ 1480 spa_t *spa = vd->vdev_spa; 1481 1482 ASSERT(spa_is_root(spa)); 1483 for (int c = 0; c < vd->vdev_children; c++) 1484 vdev_rele(vd->vdev_child[c]); 1485 1486 if (vd->vdev_ops->vdev_op_leaf) 1487 vd->vdev_ops->vdev_op_rele(vd); 1488} 1489 1490/* 1491 * Reopen all interior vdevs and any unopened leaves. We don't actually 1492 * reopen leaf vdevs which had previously been opened as they might deadlock 1493 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 1494 * If the leaf has never been opened then open it, as usual. 1495 */ 1496void 1497vdev_reopen(vdev_t *vd) 1498{ 1499 spa_t *spa = vd->vdev_spa; 1500 1501 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1502 1503 /* set the reopening flag unless we're taking the vdev offline */ 1504 vd->vdev_reopening = !vd->vdev_offline; 1505 vdev_close(vd); 1506 (void) vdev_open(vd); 1507 1508 /* 1509 * Call vdev_validate() here to make sure we have the same device. 1510 * Otherwise, a device with an invalid label could be successfully 1511 * opened in response to vdev_reopen(). 1512 */ 1513 if (vd->vdev_aux) { 1514 (void) vdev_validate_aux(vd); 1515 if (vdev_readable(vd) && vdev_writeable(vd) && 1516 vd->vdev_aux == &spa->spa_l2cache && 1517 !l2arc_vdev_present(vd)) 1518 l2arc_add_vdev(spa, vd); 1519 } else { 1520 (void) vdev_validate(vd, B_TRUE); 1521 } 1522 1523 /* 1524 * Reassess parent vdev's health. 1525 */ 1526 vdev_propagate_state(vd); 1527} 1528 1529int 1530vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1531{ 1532 int error; 1533 1534 /* 1535 * Normally, partial opens (e.g. of a mirror) are allowed. 1536 * For a create, however, we want to fail the request if 1537 * there are any components we can't open. 1538 */ 1539 error = vdev_open(vd); 1540 1541 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1542 vdev_close(vd); 1543 return (error ? error : ENXIO); 1544 } 1545 1546 /* 1547 * Recursively initialize all labels. 1548 */ 1549 if ((error = vdev_label_init(vd, txg, isreplacing ? 1550 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1551 vdev_close(vd); 1552 return (error); 1553 } 1554 1555 return (0); 1556} 1557 1558void 1559vdev_metaslab_set_size(vdev_t *vd) 1560{ 1561 /* 1562 * Aim for roughly 200 metaslabs per vdev. 1563 */ 1564 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1565 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1566} 1567 1568void 1569vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1570{ 1571 ASSERT(vd == vd->vdev_top); 1572 ASSERT(!vd->vdev_ishole); 1573 ASSERT(ISP2(flags)); 1574 ASSERT(spa_writeable(vd->vdev_spa)); 1575 1576 if (flags & VDD_METASLAB) 1577 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1578 1579 if (flags & VDD_DTL) 1580 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1581 1582 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1583} 1584 1585/* 1586 * DTLs. 1587 * 1588 * A vdev's DTL (dirty time log) is the set of transaction groups for which 1589 * the vdev has less than perfect replication. There are four kinds of DTL: 1590 * 1591 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 1592 * 1593 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 1594 * 1595 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 1596 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 1597 * txgs that was scrubbed. 1598 * 1599 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 1600 * persistent errors or just some device being offline. 1601 * Unlike the other three, the DTL_OUTAGE map is not generally 1602 * maintained; it's only computed when needed, typically to 1603 * determine whether a device can be detached. 1604 * 1605 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 1606 * either has the data or it doesn't. 1607 * 1608 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 1609 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 1610 * if any child is less than fully replicated, then so is its parent. 1611 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 1612 * comprising only those txgs which appear in 'maxfaults' or more children; 1613 * those are the txgs we don't have enough replication to read. For example, 1614 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 1615 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 1616 * two child DTL_MISSING maps. 1617 * 1618 * It should be clear from the above that to compute the DTLs and outage maps 1619 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 1620 * Therefore, that is all we keep on disk. When loading the pool, or after 1621 * a configuration change, we generate all other DTLs from first principles. 1622 */ 1623void 1624vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1625{ 1626 space_map_t *sm = &vd->vdev_dtl[t]; 1627 1628 ASSERT(t < DTL_TYPES); 1629 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1630 ASSERT(spa_writeable(vd->vdev_spa)); 1631 1632 mutex_enter(sm->sm_lock); 1633 if (!space_map_contains(sm, txg, size)) 1634 space_map_add(sm, txg, size); 1635 mutex_exit(sm->sm_lock); 1636} 1637 1638boolean_t 1639vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1640{ 1641 space_map_t *sm = &vd->vdev_dtl[t]; 1642 boolean_t dirty = B_FALSE; 1643 1644 ASSERT(t < DTL_TYPES); 1645 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1646 1647 mutex_enter(sm->sm_lock); 1648 if (sm->sm_space != 0) 1649 dirty = space_map_contains(sm, txg, size); 1650 mutex_exit(sm->sm_lock); 1651 1652 return (dirty); 1653} 1654 1655boolean_t 1656vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 1657{ 1658 space_map_t *sm = &vd->vdev_dtl[t]; 1659 boolean_t empty; 1660 1661 mutex_enter(sm->sm_lock); 1662 empty = (sm->sm_space == 0); 1663 mutex_exit(sm->sm_lock); 1664 1665 return (empty); 1666} 1667 1668/* 1669 * Reassess DTLs after a config change or scrub completion. 1670 */ 1671void 1672vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1673{ 1674 spa_t *spa = vd->vdev_spa; 1675 avl_tree_t reftree; 1676 int minref; 1677 1678 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1679 1680 for (int c = 0; c < vd->vdev_children; c++) 1681 vdev_dtl_reassess(vd->vdev_child[c], txg, 1682 scrub_txg, scrub_done); 1683 1684 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux) 1685 return; 1686 1687 if (vd->vdev_ops->vdev_op_leaf) { 1688 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 1689 1690 mutex_enter(&vd->vdev_dtl_lock); 1691 if (scrub_txg != 0 && 1692 (spa->spa_scrub_started || 1693 (scn && scn->scn_phys.scn_errors == 0))) { 1694 /* 1695 * We completed a scrub up to scrub_txg. If we 1696 * did it without rebooting, then the scrub dtl 1697 * will be valid, so excise the old region and 1698 * fold in the scrub dtl. Otherwise, leave the 1699 * dtl as-is if there was an error. 1700 * 1701 * There's little trick here: to excise the beginning 1702 * of the DTL_MISSING map, we put it into a reference 1703 * tree and then add a segment with refcnt -1 that 1704 * covers the range [0, scrub_txg). This means 1705 * that each txg in that range has refcnt -1 or 0. 1706 * We then add DTL_SCRUB with a refcnt of 2, so that 1707 * entries in the range [0, scrub_txg) will have a 1708 * positive refcnt -- either 1 or 2. We then convert 1709 * the reference tree into the new DTL_MISSING map. 1710 */ 1711 space_map_ref_create(&reftree); 1712 space_map_ref_add_map(&reftree, 1713 &vd->vdev_dtl[DTL_MISSING], 1); 1714 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 1715 space_map_ref_add_map(&reftree, 1716 &vd->vdev_dtl[DTL_SCRUB], 2); 1717 space_map_ref_generate_map(&reftree, 1718 &vd->vdev_dtl[DTL_MISSING], 1); 1719 space_map_ref_destroy(&reftree); 1720 } 1721 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 1722 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1723 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1724 if (scrub_done) 1725 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 1726 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 1727 if (!vdev_readable(vd)) 1728 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 1729 else 1730 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1731 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1732 mutex_exit(&vd->vdev_dtl_lock); 1733 1734 if (txg != 0) 1735 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1736 return; 1737 } 1738 1739 mutex_enter(&vd->vdev_dtl_lock); 1740 for (int t = 0; t < DTL_TYPES; t++) { 1741 /* account for child's outage in parent's missing map */ 1742 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 1743 if (t == DTL_SCRUB) 1744 continue; /* leaf vdevs only */ 1745 if (t == DTL_PARTIAL) 1746 minref = 1; /* i.e. non-zero */ 1747 else if (vd->vdev_nparity != 0) 1748 minref = vd->vdev_nparity + 1; /* RAID-Z */ 1749 else 1750 minref = vd->vdev_children; /* any kind of mirror */ 1751 space_map_ref_create(&reftree); 1752 for (int c = 0; c < vd->vdev_children; c++) { 1753 vdev_t *cvd = vd->vdev_child[c]; 1754 mutex_enter(&cvd->vdev_dtl_lock); 1755 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); 1756 mutex_exit(&cvd->vdev_dtl_lock); 1757 } 1758 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 1759 space_map_ref_destroy(&reftree); 1760 } 1761 mutex_exit(&vd->vdev_dtl_lock); 1762} 1763 1764static int 1765vdev_dtl_load(vdev_t *vd) 1766{ 1767 spa_t *spa = vd->vdev_spa; 1768 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1769 objset_t *mos = spa->spa_meta_objset; 1770 dmu_buf_t *db; 1771 int error; 1772 1773 ASSERT(vd->vdev_children == 0); 1774 1775 if (smo->smo_object == 0) 1776 return (0); 1777 1778 ASSERT(!vd->vdev_ishole); 1779 1780 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1781 return (error); 1782 1783 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1784 bcopy(db->db_data, smo, sizeof (*smo)); 1785 dmu_buf_rele(db, FTAG); 1786 1787 mutex_enter(&vd->vdev_dtl_lock); 1788 error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 1789 NULL, SM_ALLOC, smo, mos); 1790 mutex_exit(&vd->vdev_dtl_lock); 1791 1792 return (error); 1793} 1794 1795void 1796vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1797{ 1798 spa_t *spa = vd->vdev_spa; 1799 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1800 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 1801 objset_t *mos = spa->spa_meta_objset; 1802 space_map_t smsync; 1803 kmutex_t smlock; 1804 dmu_buf_t *db; 1805 dmu_tx_t *tx; 1806 1807 ASSERT(!vd->vdev_ishole); 1808 1809 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1810 1811 if (vd->vdev_detached) { 1812 if (smo->smo_object != 0) { 1813 int err = dmu_object_free(mos, smo->smo_object, tx); 1814 ASSERT3U(err, ==, 0); 1815 smo->smo_object = 0; 1816 } 1817 dmu_tx_commit(tx); 1818 return; 1819 } 1820 1821 if (smo->smo_object == 0) { 1822 ASSERT(smo->smo_objsize == 0); 1823 ASSERT(smo->smo_alloc == 0); 1824 smo->smo_object = dmu_object_alloc(mos, 1825 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1826 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1827 ASSERT(smo->smo_object != 0); 1828 vdev_config_dirty(vd->vdev_top); 1829 } 1830 1831 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1832 1833 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1834 &smlock); 1835 1836 mutex_enter(&smlock); 1837 1838 mutex_enter(&vd->vdev_dtl_lock); 1839 space_map_walk(sm, space_map_add, &smsync); 1840 mutex_exit(&vd->vdev_dtl_lock); 1841 1842 space_map_truncate(smo, mos, tx); 1843 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1844 1845 space_map_destroy(&smsync); 1846 1847 mutex_exit(&smlock); 1848 mutex_destroy(&smlock); 1849 1850 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1851 dmu_buf_will_dirty(db, tx); 1852 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1853 bcopy(smo, db->db_data, sizeof (*smo)); 1854 dmu_buf_rele(db, FTAG); 1855 1856 dmu_tx_commit(tx); 1857} 1858 1859/* 1860 * Determine whether the specified vdev can be offlined/detached/removed 1861 * without losing data. 1862 */ 1863boolean_t 1864vdev_dtl_required(vdev_t *vd) 1865{ 1866 spa_t *spa = vd->vdev_spa; 1867 vdev_t *tvd = vd->vdev_top; 1868 uint8_t cant_read = vd->vdev_cant_read; 1869 boolean_t required; 1870 1871 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1872 1873 if (vd == spa->spa_root_vdev || vd == tvd) 1874 return (B_TRUE); 1875 1876 /* 1877 * Temporarily mark the device as unreadable, and then determine 1878 * whether this results in any DTL outages in the top-level vdev. 1879 * If not, we can safely offline/detach/remove the device. 1880 */ 1881 vd->vdev_cant_read = B_TRUE; 1882 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1883 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 1884 vd->vdev_cant_read = cant_read; 1885 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1886 1887 if (!required && zio_injection_enabled) 1888 required = !!zio_handle_device_injection(vd, NULL, ECHILD); 1889 1890 return (required); 1891} 1892 1893/* 1894 * Determine if resilver is needed, and if so the txg range. 1895 */ 1896boolean_t 1897vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 1898{ 1899 boolean_t needed = B_FALSE; 1900 uint64_t thismin = UINT64_MAX; 1901 uint64_t thismax = 0; 1902 1903 if (vd->vdev_children == 0) { 1904 mutex_enter(&vd->vdev_dtl_lock); 1905 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 1906 vdev_writeable(vd)) { 1907 space_seg_t *ss; 1908 1909 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 1910 thismin = ss->ss_start - 1; 1911 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 1912 thismax = ss->ss_end; 1913 needed = B_TRUE; 1914 } 1915 mutex_exit(&vd->vdev_dtl_lock); 1916 } else { 1917 for (int c = 0; c < vd->vdev_children; c++) { 1918 vdev_t *cvd = vd->vdev_child[c]; 1919 uint64_t cmin, cmax; 1920 1921 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 1922 thismin = MIN(thismin, cmin); 1923 thismax = MAX(thismax, cmax); 1924 needed = B_TRUE; 1925 } 1926 } 1927 } 1928 1929 if (needed && minp) { 1930 *minp = thismin; 1931 *maxp = thismax; 1932 } 1933 return (needed); 1934} 1935 1936void 1937vdev_load(vdev_t *vd) 1938{ 1939 /* 1940 * Recursively load all children. 1941 */ 1942 for (int c = 0; c < vd->vdev_children; c++) 1943 vdev_load(vd->vdev_child[c]); 1944 1945 /* 1946 * If this is a top-level vdev, initialize its metaslabs. 1947 */ 1948 if (vd == vd->vdev_top && !vd->vdev_ishole && 1949 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1950 vdev_metaslab_init(vd, 0) != 0)) 1951 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1952 VDEV_AUX_CORRUPT_DATA); 1953 1954 /* 1955 * If this is a leaf vdev, load its DTL. 1956 */ 1957 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1958 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1959 VDEV_AUX_CORRUPT_DATA); 1960} 1961 1962/* 1963 * The special vdev case is used for hot spares and l2cache devices. Its 1964 * sole purpose it to set the vdev state for the associated vdev. To do this, 1965 * we make sure that we can open the underlying device, then try to read the 1966 * label, and make sure that the label is sane and that it hasn't been 1967 * repurposed to another pool. 1968 */ 1969int 1970vdev_validate_aux(vdev_t *vd) 1971{ 1972 nvlist_t *label; 1973 uint64_t guid, version; 1974 uint64_t state; 1975 1976 if (!vdev_readable(vd)) 1977 return (0); 1978 1979 if ((label = vdev_label_read_config(vd)) == NULL) { 1980 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1981 VDEV_AUX_CORRUPT_DATA); 1982 return (-1); 1983 } 1984 1985 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1986 version > SPA_VERSION || 1987 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 1988 guid != vd->vdev_guid || 1989 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 1990 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1991 VDEV_AUX_CORRUPT_DATA); 1992 nvlist_free(label); 1993 return (-1); 1994 } 1995 1996 /* 1997 * We don't actually check the pool state here. If it's in fact in 1998 * use by another pool, we update this fact on the fly when requested. 1999 */ 2000 nvlist_free(label); 2001 return (0); 2002} 2003 2004void 2005vdev_remove(vdev_t *vd, uint64_t txg) 2006{ 2007 spa_t *spa = vd->vdev_spa; 2008 objset_t *mos = spa->spa_meta_objset; 2009 dmu_tx_t *tx; 2010 2011 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 2012 2013 if (vd->vdev_dtl_smo.smo_object) { 2014 ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); 2015 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); 2016 vd->vdev_dtl_smo.smo_object = 0; 2017 } 2018 2019 if (vd->vdev_ms != NULL) { 2020 for (int m = 0; m < vd->vdev_ms_count; m++) { 2021 metaslab_t *msp = vd->vdev_ms[m]; 2022 2023 if (msp == NULL || msp->ms_smo.smo_object == 0) 2024 continue; 2025 2026 ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); 2027 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); 2028 msp->ms_smo.smo_object = 0; 2029 } 2030 } 2031 2032 if (vd->vdev_ms_array) { 2033 (void) dmu_object_free(mos, vd->vdev_ms_array, tx); 2034 vd->vdev_ms_array = 0; 2035 vd->vdev_ms_shift = 0; 2036 } 2037 dmu_tx_commit(tx); 2038} 2039 2040void 2041vdev_sync_done(vdev_t *vd, uint64_t txg) 2042{ 2043 metaslab_t *msp; 2044 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 2045 2046 ASSERT(!vd->vdev_ishole); 2047 2048 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 2049 metaslab_sync_done(msp, txg); 2050 2051 if (reassess) 2052 metaslab_sync_reassess(vd->vdev_mg); 2053} 2054 2055void 2056vdev_sync(vdev_t *vd, uint64_t txg) 2057{ 2058 spa_t *spa = vd->vdev_spa; 2059 vdev_t *lvd; 2060 metaslab_t *msp; 2061 dmu_tx_t *tx; 2062 2063 ASSERT(!vd->vdev_ishole); 2064 2065 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 2066 ASSERT(vd == vd->vdev_top); 2067 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2068 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 2069 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 2070 ASSERT(vd->vdev_ms_array != 0); 2071 vdev_config_dirty(vd); 2072 dmu_tx_commit(tx); 2073 } 2074 2075 /* 2076 * Remove the metadata associated with this vdev once it's empty. 2077 */ 2078 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) 2079 vdev_remove(vd, txg); 2080 2081 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 2082 metaslab_sync(msp, txg); 2083 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 2084 } 2085 2086 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 2087 vdev_dtl_sync(lvd, txg); 2088 2089 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 2090} 2091 2092uint64_t 2093vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 2094{ 2095 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 2096} 2097 2098/* 2099 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 2100 * not be opened, and no I/O is attempted. 2101 */ 2102int 2103vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2104{ 2105 vdev_t *vd, *tvd; 2106 2107 spa_vdev_state_enter(spa, SCL_NONE); 2108 2109 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2110 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2111 2112 if (!vd->vdev_ops->vdev_op_leaf) 2113 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2114 2115 tvd = vd->vdev_top; 2116 2117 /* 2118 * We don't directly use the aux state here, but if we do a 2119 * vdev_reopen(), we need this value to be present to remember why we 2120 * were faulted. 2121 */ 2122 vd->vdev_label_aux = aux; 2123 2124 /* 2125 * Faulted state takes precedence over degraded. 2126 */ 2127 vd->vdev_delayed_close = B_FALSE; 2128 vd->vdev_faulted = 1ULL; 2129 vd->vdev_degraded = 0ULL; 2130 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 2131 2132 /* 2133 * If this device has the only valid copy of the data, then 2134 * back off and simply mark the vdev as degraded instead. 2135 */ 2136 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 2137 vd->vdev_degraded = 1ULL; 2138 vd->vdev_faulted = 0ULL; 2139 2140 /* 2141 * If we reopen the device and it's not dead, only then do we 2142 * mark it degraded. 2143 */ 2144 vdev_reopen(tvd); 2145 2146 if (vdev_readable(vd)) 2147 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 2148 } 2149 2150 return (spa_vdev_state_exit(spa, vd, 0)); 2151} 2152 2153/* 2154 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 2155 * user that something is wrong. The vdev continues to operate as normal as far 2156 * as I/O is concerned. 2157 */ 2158int 2159vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2160{ 2161 vdev_t *vd; 2162 2163 spa_vdev_state_enter(spa, SCL_NONE); 2164 2165 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2166 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2167 2168 if (!vd->vdev_ops->vdev_op_leaf) 2169 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2170 2171 /* 2172 * If the vdev is already faulted, then don't do anything. 2173 */ 2174 if (vd->vdev_faulted || vd->vdev_degraded) 2175 return (spa_vdev_state_exit(spa, NULL, 0)); 2176 2177 vd->vdev_degraded = 1ULL; 2178 if (!vdev_is_dead(vd)) 2179 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 2180 aux); 2181 2182 return (spa_vdev_state_exit(spa, vd, 0)); 2183} 2184 2185/* 2186 * Online the given vdev. If 'unspare' is set, it implies two things. First, 2187 * any attached spare device should be detached when the device finishes 2188 * resilvering. Second, the online should be treated like a 'test' online case, 2189 * so no FMA events are generated if the device fails to open. 2190 */ 2191int 2192vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 2193{ 2194 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 2195 2196 spa_vdev_state_enter(spa, SCL_NONE); 2197 2198 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2199 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2200 2201 if (!vd->vdev_ops->vdev_op_leaf) 2202 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2203 2204 tvd = vd->vdev_top; 2205 vd->vdev_offline = B_FALSE; 2206 vd->vdev_tmpoffline = B_FALSE; 2207 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 2208 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 2209 2210 /* XXX - L2ARC 1.0 does not support expansion */ 2211 if (!vd->vdev_aux) { 2212 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2213 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 2214 } 2215 2216 vdev_reopen(tvd); 2217 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 2218 2219 if (!vd->vdev_aux) { 2220 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2221 pvd->vdev_expanding = B_FALSE; 2222 } 2223 2224 if (newstate) 2225 *newstate = vd->vdev_state; 2226 if ((flags & ZFS_ONLINE_UNSPARE) && 2227 !vdev_is_dead(vd) && vd->vdev_parent && 2228 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2229 vd->vdev_parent->vdev_child[0] == vd) 2230 vd->vdev_unspare = B_TRUE; 2231 2232 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 2233 2234 /* XXX - L2ARC 1.0 does not support expansion */ 2235 if (vd->vdev_aux) 2236 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 2237 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2238 } 2239 return (spa_vdev_state_exit(spa, vd, 0)); 2240} 2241 2242static int 2243vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 2244{ 2245 vdev_t *vd, *tvd; 2246 int error = 0; 2247 uint64_t generation; 2248 metaslab_group_t *mg; 2249 2250top: 2251 spa_vdev_state_enter(spa, SCL_ALLOC); 2252 2253 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2254 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2255 2256 if (!vd->vdev_ops->vdev_op_leaf) 2257 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2258 2259 tvd = vd->vdev_top; 2260 mg = tvd->vdev_mg; 2261 generation = spa->spa_config_generation + 1; 2262 2263 /* 2264 * If the device isn't already offline, try to offline it. 2265 */ 2266 if (!vd->vdev_offline) { 2267 /* 2268 * If this device has the only valid copy of some data, 2269 * don't allow it to be offlined. Log devices are always 2270 * expendable. 2271 */ 2272 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2273 vdev_dtl_required(vd)) 2274 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2275 2276 /* 2277 * If the top-level is a slog and it has had allocations 2278 * then proceed. We check that the vdev's metaslab group 2279 * is not NULL since it's possible that we may have just 2280 * added this vdev but not yet initialized its metaslabs. 2281 */ 2282 if (tvd->vdev_islog && mg != NULL) { 2283 /* 2284 * Prevent any future allocations. 2285 */ 2286 metaslab_group_passivate(mg); 2287 (void) spa_vdev_state_exit(spa, vd, 0); 2288 2289 error = spa_offline_log(spa); 2290 2291 spa_vdev_state_enter(spa, SCL_ALLOC); 2292 2293 /* 2294 * Check to see if the config has changed. 2295 */ 2296 if (error || generation != spa->spa_config_generation) { 2297 metaslab_group_activate(mg); 2298 if (error) 2299 return (spa_vdev_state_exit(spa, 2300 vd, error)); 2301 (void) spa_vdev_state_exit(spa, vd, 0); 2302 goto top; 2303 } 2304 ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0); 2305 } 2306 2307 /* 2308 * Offline this device and reopen its top-level vdev. 2309 * If the top-level vdev is a log device then just offline 2310 * it. Otherwise, if this action results in the top-level 2311 * vdev becoming unusable, undo it and fail the request. 2312 */ 2313 vd->vdev_offline = B_TRUE; 2314 vdev_reopen(tvd); 2315 2316 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2317 vdev_is_dead(tvd)) { 2318 vd->vdev_offline = B_FALSE; 2319 vdev_reopen(tvd); 2320 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2321 } 2322 2323 /* 2324 * Add the device back into the metaslab rotor so that 2325 * once we online the device it's open for business. 2326 */ 2327 if (tvd->vdev_islog && mg != NULL) 2328 metaslab_group_activate(mg); 2329 } 2330 2331 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 2332 2333 return (spa_vdev_state_exit(spa, vd, 0)); 2334} 2335 2336int 2337vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 2338{ 2339 int error; 2340 2341 mutex_enter(&spa->spa_vdev_top_lock); 2342 error = vdev_offline_locked(spa, guid, flags); 2343 mutex_exit(&spa->spa_vdev_top_lock); 2344 2345 return (error); 2346} 2347 2348/* 2349 * Clear the error counts associated with this vdev. Unlike vdev_online() and 2350 * vdev_offline(), we assume the spa config is locked. We also clear all 2351 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 2352 */ 2353void 2354vdev_clear(spa_t *spa, vdev_t *vd) 2355{ 2356 vdev_t *rvd = spa->spa_root_vdev; 2357 2358 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2359 2360 if (vd == NULL) 2361 vd = rvd; 2362 2363 vd->vdev_stat.vs_read_errors = 0; 2364 vd->vdev_stat.vs_write_errors = 0; 2365 vd->vdev_stat.vs_checksum_errors = 0; 2366 2367 for (int c = 0; c < vd->vdev_children; c++) 2368 vdev_clear(spa, vd->vdev_child[c]); 2369 2370 /* 2371 * If we're in the FAULTED state or have experienced failed I/O, then 2372 * clear the persistent state and attempt to reopen the device. We 2373 * also mark the vdev config dirty, so that the new faulted state is 2374 * written out to disk. 2375 */ 2376 if (vd->vdev_faulted || vd->vdev_degraded || 2377 !vdev_readable(vd) || !vdev_writeable(vd)) { 2378 2379 /* 2380 * When reopening in reponse to a clear event, it may be due to 2381 * a fmadm repair request. In this case, if the device is 2382 * still broken, we want to still post the ereport again. 2383 */ 2384 vd->vdev_forcefault = B_TRUE; 2385 2386 vd->vdev_faulted = vd->vdev_degraded = 0ULL; 2387 vd->vdev_cant_read = B_FALSE; 2388 vd->vdev_cant_write = B_FALSE; 2389 2390 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 2391 2392 vd->vdev_forcefault = B_FALSE; 2393 2394 if (vd != rvd && vdev_writeable(vd->vdev_top)) 2395 vdev_state_dirty(vd->vdev_top); 2396 2397 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 2398 spa_async_request(spa, SPA_ASYNC_RESILVER); 2399 2400 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 2401 } 2402 2403 /* 2404 * When clearing a FMA-diagnosed fault, we always want to 2405 * unspare the device, as we assume that the original spare was 2406 * done in response to the FMA fault. 2407 */ 2408 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 2409 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2410 vd->vdev_parent->vdev_child[0] == vd) 2411 vd->vdev_unspare = B_TRUE; 2412} 2413 2414boolean_t 2415vdev_is_dead(vdev_t *vd) 2416{ 2417 /* 2418 * Holes and missing devices are always considered "dead". 2419 * This simplifies the code since we don't have to check for 2420 * these types of devices in the various code paths. 2421 * Instead we rely on the fact that we skip over dead devices 2422 * before issuing I/O to them. 2423 */ 2424 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole || 2425 vd->vdev_ops == &vdev_missing_ops); 2426} 2427 2428boolean_t 2429vdev_readable(vdev_t *vd) 2430{ 2431 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2432} 2433 2434boolean_t 2435vdev_writeable(vdev_t *vd) 2436{ 2437 return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 2438} 2439 2440boolean_t 2441vdev_allocatable(vdev_t *vd) 2442{ 2443 uint64_t state = vd->vdev_state; 2444 2445 /* 2446 * We currently allow allocations from vdevs which may be in the 2447 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 2448 * fails to reopen then we'll catch it later when we're holding 2449 * the proper locks. Note that we have to get the vdev state 2450 * in a local variable because although it changes atomically, 2451 * we're asking two separate questions about it. 2452 */ 2453 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 2454 !vd->vdev_cant_write && !vd->vdev_ishole); 2455} 2456 2457boolean_t 2458vdev_accessible(vdev_t *vd, zio_t *zio) 2459{ 2460 ASSERT(zio->io_vd == vd); 2461 2462 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 2463 return (B_FALSE); 2464 2465 if (zio->io_type == ZIO_TYPE_READ) 2466 return (!vd->vdev_cant_read); 2467 2468 if (zio->io_type == ZIO_TYPE_WRITE) 2469 return (!vd->vdev_cant_write); 2470 2471 return (B_TRUE); 2472} 2473 2474/* 2475 * Get statistics for the given vdev. 2476 */ 2477void 2478vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2479{ 2480 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2481 2482 mutex_enter(&vd->vdev_stat_lock); 2483 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 2484 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2485 vs->vs_state = vd->vdev_state; 2486 vs->vs_rsize = vdev_get_min_asize(vd); 2487 if (vd->vdev_ops->vdev_op_leaf) 2488 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
|
2489 vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize; |
2490 mutex_exit(&vd->vdev_stat_lock); 2491 2492 /* 2493 * If we're getting stats on the root vdev, aggregate the I/O counts 2494 * over all top-level vdevs (i.e. the direct children of the root). 2495 */ 2496 if (vd == rvd) { 2497 for (int c = 0; c < rvd->vdev_children; c++) { 2498 vdev_t *cvd = rvd->vdev_child[c]; 2499 vdev_stat_t *cvs = &cvd->vdev_stat; 2500 2501 mutex_enter(&vd->vdev_stat_lock); 2502 for (int t = 0; t < ZIO_TYPES; t++) { 2503 vs->vs_ops[t] += cvs->vs_ops[t]; 2504 vs->vs_bytes[t] += cvs->vs_bytes[t]; 2505 } 2506 cvs->vs_scan_removing = cvd->vdev_removing; 2507 mutex_exit(&vd->vdev_stat_lock); 2508 } 2509 } 2510} 2511 2512void 2513vdev_clear_stats(vdev_t *vd) 2514{ 2515 mutex_enter(&vd->vdev_stat_lock); 2516 vd->vdev_stat.vs_space = 0; 2517 vd->vdev_stat.vs_dspace = 0; 2518 vd->vdev_stat.vs_alloc = 0; 2519 mutex_exit(&vd->vdev_stat_lock); 2520} 2521 2522void 2523vdev_scan_stat_init(vdev_t *vd) 2524{ 2525 vdev_stat_t *vs = &vd->vdev_stat; 2526 2527 for (int c = 0; c < vd->vdev_children; c++) 2528 vdev_scan_stat_init(vd->vdev_child[c]); 2529 2530 mutex_enter(&vd->vdev_stat_lock); 2531 vs->vs_scan_processed = 0; 2532 mutex_exit(&vd->vdev_stat_lock); 2533} 2534 2535void 2536vdev_stat_update(zio_t *zio, uint64_t psize) 2537{ 2538 spa_t *spa = zio->io_spa; 2539 vdev_t *rvd = spa->spa_root_vdev; 2540 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2541 vdev_t *pvd; 2542 uint64_t txg = zio->io_txg; 2543 vdev_stat_t *vs = &vd->vdev_stat; 2544 zio_type_t type = zio->io_type; 2545 int flags = zio->io_flags; 2546 2547 /* 2548 * If this i/o is a gang leader, it didn't do any actual work. 2549 */ 2550 if (zio->io_gang_tree) 2551 return; 2552 2553 if (zio->io_error == 0) { 2554 /* 2555 * If this is a root i/o, don't count it -- we've already 2556 * counted the top-level vdevs, and vdev_get_stats() will 2557 * aggregate them when asked. This reduces contention on 2558 * the root vdev_stat_lock and implicitly handles blocks 2559 * that compress away to holes, for which there is no i/o. 2560 * (Holes never create vdev children, so all the counters 2561 * remain zero, which is what we want.) 2562 * 2563 * Note: this only applies to successful i/o (io_error == 0) 2564 * because unlike i/o counts, errors are not additive. 2565 * When reading a ditto block, for example, failure of 2566 * one top-level vdev does not imply a root-level error. 2567 */ 2568 if (vd == rvd) 2569 return; 2570 2571 ASSERT(vd == zio->io_vd); 2572 2573 if (flags & ZIO_FLAG_IO_BYPASS) 2574 return; 2575 2576 mutex_enter(&vd->vdev_stat_lock); 2577 2578 if (flags & ZIO_FLAG_IO_REPAIR) { 2579 if (flags & ZIO_FLAG_SCAN_THREAD) { 2580 dsl_scan_phys_t *scn_phys = 2581 &spa->spa_dsl_pool->dp_scan->scn_phys; 2582 uint64_t *processed = &scn_phys->scn_processed; 2583 2584 /* XXX cleanup? */ 2585 if (vd->vdev_ops->vdev_op_leaf) 2586 atomic_add_64(processed, psize); 2587 vs->vs_scan_processed += psize; 2588 } 2589 2590 if (flags & ZIO_FLAG_SELF_HEAL) 2591 vs->vs_self_healed += psize; 2592 } 2593 2594 vs->vs_ops[type]++; 2595 vs->vs_bytes[type] += psize; 2596 2597 mutex_exit(&vd->vdev_stat_lock); 2598 return; 2599 } 2600 2601 if (flags & ZIO_FLAG_SPECULATIVE) 2602 return; 2603 2604 /* 2605 * If this is an I/O error that is going to be retried, then ignore the 2606 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 2607 * hard errors, when in reality they can happen for any number of 2608 * innocuous reasons (bus resets, MPxIO link failure, etc). 2609 */ 2610 if (zio->io_error == EIO && 2611 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 2612 return; 2613 2614 /* 2615 * Intent logs writes won't propagate their error to the root 2616 * I/O so don't mark these types of failures as pool-level 2617 * errors. 2618 */ 2619 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 2620 return; 2621 2622 mutex_enter(&vd->vdev_stat_lock); 2623 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 2624 if (zio->io_error == ECKSUM) 2625 vs->vs_checksum_errors++; 2626 else 2627 vs->vs_read_errors++; 2628 } 2629 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 2630 vs->vs_write_errors++; 2631 mutex_exit(&vd->vdev_stat_lock); 2632 2633 if (type == ZIO_TYPE_WRITE && txg != 0 && 2634 (!(flags & ZIO_FLAG_IO_REPAIR) || 2635 (flags & ZIO_FLAG_SCAN_THREAD) || 2636 spa->spa_claiming)) { 2637 /* 2638 * This is either a normal write (not a repair), or it's 2639 * a repair induced by the scrub thread, or it's a repair 2640 * made by zil_claim() during spa_load() in the first txg. 2641 * In the normal case, we commit the DTL change in the same 2642 * txg as the block was born. In the scrub-induced repair 2643 * case, we know that scrubs run in first-pass syncing context, 2644 * so we commit the DTL change in spa_syncing_txg(spa). 2645 * In the zil_claim() case, we commit in spa_first_txg(spa). 2646 * 2647 * We currently do not make DTL entries for failed spontaneous 2648 * self-healing writes triggered by normal (non-scrubbing) 2649 * reads, because we have no transactional context in which to 2650 * do so -- and it's not clear that it'd be desirable anyway. 2651 */ 2652 if (vd->vdev_ops->vdev_op_leaf) { 2653 uint64_t commit_txg = txg; 2654 if (flags & ZIO_FLAG_SCAN_THREAD) { 2655 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2656 ASSERT(spa_sync_pass(spa) == 1); 2657 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 2658 commit_txg = spa_syncing_txg(spa); 2659 } else if (spa->spa_claiming) { 2660 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2661 commit_txg = spa_first_txg(spa); 2662 } 2663 ASSERT(commit_txg >= spa_syncing_txg(spa)); 2664 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2665 return; 2666 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2667 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 2668 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2669 } 2670 if (vd != rvd) 2671 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2672 } 2673} 2674 2675/* 2676 * Update the in-core space usage stats for this vdev, its metaslab class, 2677 * and the root vdev. 2678 */ 2679void 2680vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 2681 int64_t space_delta) 2682{ 2683 int64_t dspace_delta = space_delta; 2684 spa_t *spa = vd->vdev_spa; 2685 vdev_t *rvd = spa->spa_root_vdev; 2686 metaslab_group_t *mg = vd->vdev_mg; 2687 metaslab_class_t *mc = mg ? mg->mg_class : NULL; 2688 2689 ASSERT(vd == vd->vdev_top); 2690 2691 /* 2692 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 2693 * factor. We must calculate this here and not at the root vdev 2694 * because the root vdev's psize-to-asize is simply the max of its 2695 * childrens', thus not accurate enough for us. 2696 */ 2697 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2698 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 2699 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 2700 vd->vdev_deflate_ratio; 2701 2702 mutex_enter(&vd->vdev_stat_lock); 2703 vd->vdev_stat.vs_alloc += alloc_delta; 2704 vd->vdev_stat.vs_space += space_delta; 2705 vd->vdev_stat.vs_dspace += dspace_delta; 2706 mutex_exit(&vd->vdev_stat_lock); 2707 2708 if (mc == spa_normal_class(spa)) { 2709 mutex_enter(&rvd->vdev_stat_lock); 2710 rvd->vdev_stat.vs_alloc += alloc_delta; 2711 rvd->vdev_stat.vs_space += space_delta; 2712 rvd->vdev_stat.vs_dspace += dspace_delta; 2713 mutex_exit(&rvd->vdev_stat_lock); 2714 } 2715 2716 if (mc != NULL) { 2717 ASSERT(rvd == vd->vdev_parent); 2718 ASSERT(vd->vdev_ms_count != 0); 2719 2720 metaslab_class_space_update(mc, 2721 alloc_delta, defer_delta, space_delta, dspace_delta); 2722 } 2723} 2724 2725/* 2726 * Mark a top-level vdev's config as dirty, placing it on the dirty list 2727 * so that it will be written out next time the vdev configuration is synced. 2728 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2729 */ 2730void 2731vdev_config_dirty(vdev_t *vd) 2732{ 2733 spa_t *spa = vd->vdev_spa; 2734 vdev_t *rvd = spa->spa_root_vdev; 2735 int c; 2736 2737 ASSERT(spa_writeable(spa)); 2738 2739 /* 2740 * If this is an aux vdev (as with l2cache and spare devices), then we 2741 * update the vdev config manually and set the sync flag. 2742 */ 2743 if (vd->vdev_aux != NULL) { 2744 spa_aux_vdev_t *sav = vd->vdev_aux; 2745 nvlist_t **aux; 2746 uint_t naux; 2747 2748 for (c = 0; c < sav->sav_count; c++) { 2749 if (sav->sav_vdevs[c] == vd) 2750 break; 2751 } 2752 2753 if (c == sav->sav_count) { 2754 /* 2755 * We're being removed. There's nothing more to do. 2756 */ 2757 ASSERT(sav->sav_sync == B_TRUE); 2758 return; 2759 } 2760 2761 sav->sav_sync = B_TRUE; 2762 2763 if (nvlist_lookup_nvlist_array(sav->sav_config, 2764 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 2765 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 2766 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 2767 } 2768 2769 ASSERT(c < naux); 2770 2771 /* 2772 * Setting the nvlist in the middle if the array is a little 2773 * sketchy, but it will work. 2774 */ 2775 nvlist_free(aux[c]); 2776 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 2777 2778 return; 2779 } 2780 2781 /* 2782 * The dirty list is protected by the SCL_CONFIG lock. The caller 2783 * must either hold SCL_CONFIG as writer, or must be the sync thread 2784 * (which holds SCL_CONFIG as reader). There's only one sync thread, 2785 * so this is sufficient to ensure mutual exclusion. 2786 */ 2787 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2788 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2789 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2790 2791 if (vd == rvd) { 2792 for (c = 0; c < rvd->vdev_children; c++) 2793 vdev_config_dirty(rvd->vdev_child[c]); 2794 } else { 2795 ASSERT(vd == vd->vdev_top); 2796 2797 if (!list_link_active(&vd->vdev_config_dirty_node) && 2798 !vd->vdev_ishole) 2799 list_insert_head(&spa->spa_config_dirty_list, vd); 2800 } 2801} 2802 2803void 2804vdev_config_clean(vdev_t *vd) 2805{ 2806 spa_t *spa = vd->vdev_spa; 2807 2808 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2809 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2810 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2811 2812 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 2813 list_remove(&spa->spa_config_dirty_list, vd); 2814} 2815 2816/* 2817 * Mark a top-level vdev's state as dirty, so that the next pass of 2818 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 2819 * the state changes from larger config changes because they require 2820 * much less locking, and are often needed for administrative actions. 2821 */ 2822void 2823vdev_state_dirty(vdev_t *vd) 2824{ 2825 spa_t *spa = vd->vdev_spa; 2826 2827 ASSERT(spa_writeable(spa)); 2828 ASSERT(vd == vd->vdev_top); 2829 2830 /* 2831 * The state list is protected by the SCL_STATE lock. The caller 2832 * must either hold SCL_STATE as writer, or must be the sync thread 2833 * (which holds SCL_STATE as reader). There's only one sync thread, 2834 * so this is sufficient to ensure mutual exclusion. 2835 */ 2836 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2837 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2838 spa_config_held(spa, SCL_STATE, RW_READER))); 2839 2840 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole) 2841 list_insert_head(&spa->spa_state_dirty_list, vd); 2842} 2843 2844void 2845vdev_state_clean(vdev_t *vd) 2846{ 2847 spa_t *spa = vd->vdev_spa; 2848 2849 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2850 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2851 spa_config_held(spa, SCL_STATE, RW_READER))); 2852 2853 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 2854 list_remove(&spa->spa_state_dirty_list, vd); 2855} 2856 2857/* 2858 * Propagate vdev state up from children to parent. 2859 */ 2860void 2861vdev_propagate_state(vdev_t *vd) 2862{ 2863 spa_t *spa = vd->vdev_spa; 2864 vdev_t *rvd = spa->spa_root_vdev; 2865 int degraded = 0, faulted = 0; 2866 int corrupted = 0; 2867 vdev_t *child; 2868 2869 if (vd->vdev_children > 0) { 2870 for (int c = 0; c < vd->vdev_children; c++) { 2871 child = vd->vdev_child[c]; 2872 2873 /* 2874 * Don't factor holes into the decision. 2875 */ 2876 if (child->vdev_ishole) 2877 continue; 2878 2879 if (!vdev_readable(child) || 2880 (!vdev_writeable(child) && spa_writeable(spa))) { 2881 /* 2882 * Root special: if there is a top-level log 2883 * device, treat the root vdev as if it were 2884 * degraded. 2885 */ 2886 if (child->vdev_islog && vd == rvd) 2887 degraded++; 2888 else 2889 faulted++; 2890 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 2891 degraded++; 2892 } 2893 2894 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 2895 corrupted++; 2896 } 2897 2898 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 2899 2900 /* 2901 * Root special: if there is a top-level vdev that cannot be 2902 * opened due to corrupted metadata, then propagate the root 2903 * vdev's aux state as 'corrupt' rather than 'insufficient 2904 * replicas'. 2905 */ 2906 if (corrupted && vd == rvd && 2907 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 2908 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 2909 VDEV_AUX_CORRUPT_DATA); 2910 } 2911 2912 if (vd->vdev_parent) 2913 vdev_propagate_state(vd->vdev_parent); 2914} 2915 2916/* 2917 * Set a vdev's state. If this is during an open, we don't update the parent 2918 * state, because we're in the process of opening children depth-first. 2919 * Otherwise, we propagate the change to the parent. 2920 * 2921 * If this routine places a device in a faulted state, an appropriate ereport is 2922 * generated. 2923 */ 2924void 2925vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2926{ 2927 uint64_t save_state; 2928 spa_t *spa = vd->vdev_spa; 2929 2930 if (state == vd->vdev_state) { 2931 vd->vdev_stat.vs_aux = aux; 2932 return; 2933 } 2934 2935 save_state = vd->vdev_state; 2936 2937 vd->vdev_state = state; 2938 vd->vdev_stat.vs_aux = aux; 2939 2940 /* 2941 * If we are setting the vdev state to anything but an open state, then 2942 * always close the underlying device unless the device has requested 2943 * a delayed close (i.e. we're about to remove or fault the device). 2944 * Otherwise, we keep accessible but invalid devices open forever. 2945 * We don't call vdev_close() itself, because that implies some extra 2946 * checks (offline, etc) that we don't want here. This is limited to 2947 * leaf devices, because otherwise closing the device will affect other 2948 * children. 2949 */ 2950 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 2951 vd->vdev_ops->vdev_op_leaf) 2952 vd->vdev_ops->vdev_op_close(vd); 2953 2954 /* 2955 * If we have brought this vdev back into service, we need 2956 * to notify fmd so that it can gracefully repair any outstanding 2957 * cases due to a missing device. We do this in all cases, even those 2958 * that probably don't correlate to a repaired fault. This is sure to 2959 * catch all cases, and we let the zfs-retire agent sort it out. If 2960 * this is a transient state it's OK, as the retire agent will 2961 * double-check the state of the vdev before repairing it. 2962 */ 2963 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 2964 vd->vdev_prevstate != state) 2965 zfs_post_state_change(spa, vd); 2966 2967 if (vd->vdev_removed && 2968 state == VDEV_STATE_CANT_OPEN && 2969 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 2970 /* 2971 * If the previous state is set to VDEV_STATE_REMOVED, then this 2972 * device was previously marked removed and someone attempted to 2973 * reopen it. If this failed due to a nonexistent device, then 2974 * keep the device in the REMOVED state. We also let this be if 2975 * it is one of our special test online cases, which is only 2976 * attempting to online the device and shouldn't generate an FMA 2977 * fault. 2978 */ 2979 vd->vdev_state = VDEV_STATE_REMOVED; 2980 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2981 } else if (state == VDEV_STATE_REMOVED) { 2982 vd->vdev_removed = B_TRUE; 2983 } else if (state == VDEV_STATE_CANT_OPEN) { 2984 /* 2985 * If we fail to open a vdev during an import or recovery, we 2986 * mark it as "not available", which signifies that it was 2987 * never there to begin with. Failure to open such a device 2988 * is not considered an error. 2989 */ 2990 if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 2991 spa_load_state(spa) == SPA_LOAD_RECOVER) && 2992 vd->vdev_ops->vdev_op_leaf) 2993 vd->vdev_not_present = 1; 2994 2995 /* 2996 * Post the appropriate ereport. If the 'prevstate' field is 2997 * set to something other than VDEV_STATE_UNKNOWN, it indicates 2998 * that this is part of a vdev_reopen(). In this case, we don't 2999 * want to post the ereport if the device was already in the 3000 * CANT_OPEN state beforehand. 3001 * 3002 * If the 'checkremove' flag is set, then this is an attempt to 3003 * online the device in response to an insertion event. If we 3004 * hit this case, then we have detected an insertion event for a 3005 * faulted or offline device that wasn't in the removed state. 3006 * In this scenario, we don't post an ereport because we are 3007 * about to replace the device, or attempt an online with 3008 * vdev_forcefault, which will generate the fault for us. 3009 */ 3010 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 3011 !vd->vdev_not_present && !vd->vdev_checkremove && 3012 vd != spa->spa_root_vdev) { 3013 const char *class; 3014 3015 switch (aux) { 3016 case VDEV_AUX_OPEN_FAILED: 3017 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 3018 break; 3019 case VDEV_AUX_CORRUPT_DATA: 3020 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 3021 break; 3022 case VDEV_AUX_NO_REPLICAS: 3023 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 3024 break; 3025 case VDEV_AUX_BAD_GUID_SUM: 3026 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 3027 break; 3028 case VDEV_AUX_TOO_SMALL: 3029 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 3030 break; 3031 case VDEV_AUX_BAD_LABEL: 3032 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 3033 break; 3034 default: 3035 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 3036 } 3037 3038 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 3039 } 3040 3041 /* Erase any notion of persistent removed state */ 3042 vd->vdev_removed = B_FALSE; 3043 } else { 3044 vd->vdev_removed = B_FALSE; 3045 } 3046 3047 if (!isopen && vd->vdev_parent) 3048 vdev_propagate_state(vd->vdev_parent); 3049} 3050 3051/* 3052 * Check the vdev configuration to ensure that it's capable of supporting 3053 * a root pool. 3054 * 3055 * On Solaris, we do not support RAID-Z or partial configuration. In 3056 * addition, only a single top-level vdev is allowed and none of the 3057 * leaves can be wholedisks. 3058 * 3059 * For FreeBSD, we can boot from any configuration. There is a 3060 * limitation that the boot filesystem must be either uncompressed or 3061 * compresses with lzjb compression but I'm not sure how to enforce 3062 * that here. 3063 */ 3064boolean_t 3065vdev_is_bootable(vdev_t *vd) 3066{ 3067#ifdef sun 3068 if (!vd->vdev_ops->vdev_op_leaf) { 3069 char *vdev_type = vd->vdev_ops->vdev_op_type; 3070 3071 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 3072 vd->vdev_children > 1) { 3073 return (B_FALSE); 3074 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 3075 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 3076 return (B_FALSE); 3077 } 3078 } else if (vd->vdev_wholedisk == 1) { 3079 return (B_FALSE); 3080 } 3081 3082 for (int c = 0; c < vd->vdev_children; c++) { 3083 if (!vdev_is_bootable(vd->vdev_child[c])) 3084 return (B_FALSE); 3085 } 3086#endif /* sun */ 3087 return (B_TRUE); 3088} 3089 3090/* 3091 * Load the state from the original vdev tree (ovd) which 3092 * we've retrieved from the MOS config object. If the original 3093 * vdev was offline or faulted then we transfer that state to the 3094 * device in the current vdev tree (nvd). 3095 */ 3096void 3097vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) 3098{ 3099 spa_t *spa = nvd->vdev_spa; 3100 3101 ASSERT(nvd->vdev_top->vdev_islog); 3102 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3103 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); 3104 3105 for (int c = 0; c < nvd->vdev_children; c++) 3106 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); 3107 3108 if (nvd->vdev_ops->vdev_op_leaf) { 3109 /* 3110 * Restore the persistent vdev state 3111 */ 3112 nvd->vdev_offline = ovd->vdev_offline; 3113 nvd->vdev_faulted = ovd->vdev_faulted; 3114 nvd->vdev_degraded = ovd->vdev_degraded; 3115 nvd->vdev_removed = ovd->vdev_removed; 3116 } 3117} 3118 3119/* 3120 * Determine if a log device has valid content. If the vdev was 3121 * removed or faulted in the MOS config then we know that 3122 * the content on the log device has already been written to the pool. 3123 */ 3124boolean_t 3125vdev_log_state_valid(vdev_t *vd) 3126{ 3127 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 3128 !vd->vdev_removed) 3129 return (B_TRUE); 3130 3131 for (int c = 0; c < vd->vdev_children; c++) 3132 if (vdev_log_state_valid(vd->vdev_child[c])) 3133 return (B_TRUE); 3134 3135 return (B_FALSE); 3136} 3137 3138/* 3139 * Expand a vdev if possible. 3140 */ 3141void 3142vdev_expand(vdev_t *vd, uint64_t txg) 3143{ 3144 ASSERT(vd->vdev_top == vd); 3145 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3146 3147 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 3148 VERIFY(vdev_metaslab_init(vd, txg) == 0); 3149 vdev_config_dirty(vd); 3150 } 3151} 3152 3153/* 3154 * Split a vdev. 3155 */ 3156void 3157vdev_split(vdev_t *vd) 3158{ 3159 vdev_t *cvd, *pvd = vd->vdev_parent; 3160 3161 vdev_remove_child(pvd, vd); 3162 vdev_compact_children(pvd); 3163 3164 cvd = pvd->vdev_child[0]; 3165 if (pvd->vdev_children == 1) { 3166 vdev_remove_parent(cvd); 3167 cvd->vdev_splitting = B_TRUE; 3168 } 3169 vdev_propagate_state(cvd); 3170}
|