25 */ 26 27/* 28 * Virtual Device Labels 29 * --------------------- 30 * 31 * The vdev label serves several distinct purposes: 32 * 33 * 1. Uniquely identify this device as part of a ZFS pool and confirm its 34 * identity within the pool. 35 * 36 * 2. Verify that all the devices given in a configuration are present 37 * within the pool. 38 * 39 * 3. Determine the uberblock for the pool. 40 * 41 * 4. In case of an import operation, determine the configuration of the 42 * toplevel vdev of which it is a part. 43 * 44 * 5. If an import operation cannot find all the devices in the pool, 45 * provide enough information to the administrator to determine which 46 * devices are missing. 47 * 48 * It is important to note that while the kernel is responsible for writing the 49 * label, it only consumes the information in the first three cases. The 50 * latter information is only consumed in userland when determining the 51 * configuration to import a pool. 52 * 53 * 54 * Label Organization 55 * ------------------ 56 * 57 * Before describing the contents of the label, it's important to understand how 58 * the labels are written and updated with respect to the uberblock. 59 * 60 * When the pool configuration is altered, either because it was newly created 61 * or a device was added, we want to update all the labels such that we can deal 62 * with fatal failure at any point. To this end, each disk has two labels which 63 * are updated before and after the uberblock is synced. Assuming we have 64 * labels and an uberblock with the following transaction groups: 65 * 66 * L1 UB L2 67 * +------+ +------+ +------+ 68 * | | | | | | 69 * | t10 | | t10 | | t10 | 70 * | | | | | | 71 * +------+ +------+ +------+ 72 * 73 * In this stable state, the labels and the uberblock were all updated within 74 * the same transaction group (10). Each label is mirrored and checksummed, so 75 * that we can detect when we fail partway through writing the label. 76 * 77 * In order to identify which labels are valid, the labels are written in the 78 * following manner: 79 * 80 * 1. For each vdev, update 'L1' to the new label 81 * 2. Update the uberblock 82 * 3. For each vdev, update 'L2' to the new label 83 * 84 * Given arbitrary failure, we can determine the correct label to use based on 85 * the transaction group. If we fail after updating L1 but before updating the 86 * UB, we will notice that L1's transaction group is greater than the uberblock, 87 * so L2 must be valid. If we fail after writing the uberblock but before 88 * writing L2, we will notice that L2's transaction group is less than L1, and 89 * therefore L1 is valid. 90 * 91 * Another added complexity is that not every label is updated when the config 92 * is synced. If we add a single device, we do not want to have to re-write 93 * every label for every device in the pool. This means that both L1 and L2 may 94 * be older than the pool uberblock, because the necessary information is stored 95 * on another vdev. 96 * 97 * 98 * On-disk Format 99 * -------------- 100 * 101 * The vdev label consists of two distinct parts, and is wrapped within the 102 * vdev_label_t structure. The label includes 8k of padding to permit legacy 103 * VTOC disk labels, but is otherwise ignored. 104 * 105 * The first half of the label is a packed nvlist which contains pool wide 106 * properties, per-vdev properties, and configuration information. It is 107 * described in more detail below. 108 * 109 * The latter half of the label consists of a redundant array of uberblocks. 110 * These uberblocks are updated whenever a transaction group is committed, 111 * or when the configuration is updated. When a pool is loaded, we scan each 112 * vdev for the 'best' uberblock. 113 * 114 * 115 * Configuration Information 116 * ------------------------- 117 * 118 * The nvlist describing the pool and vdev contains the following elements: 119 * 120 * version ZFS on-disk version 121 * name Pool name 122 * state Pool state 123 * txg Transaction group in which this label was written 124 * pool_guid Unique identifier for this pool 125 * vdev_tree An nvlist describing vdev tree. 126 * features_for_read 127 * An nvlist of the features necessary for reading the MOS. 128 * 129 * Each leaf device label also contains the following: 130 * 131 * top_guid Unique ID for top-level vdev in which this is contained 132 * guid Unique ID for the leaf vdev 133 * 134 * The 'vs' configuration follows the format described in 'spa_config.c'. 135 */ 136 137#include <sys/zfs_context.h> 138#include <sys/spa.h> 139#include <sys/spa_impl.h> 140#include <sys/dmu.h> 141#include <sys/zap.h> 142#include <sys/vdev.h> 143#include <sys/vdev_impl.h> 144#include <sys/uberblock_impl.h> 145#include <sys/metaslab.h> 146#include <sys/metaslab_impl.h> 147#include <sys/zio.h> 148#include <sys/dsl_scan.h> 149#include <sys/abd.h> 150#include <sys/fs/zfs.h> 151#include <sys/trim_map.h> 152 153static boolean_t vdev_trim_on_init = B_TRUE; 154SYSCTL_DECL(_vfs_zfs_vdev); 155SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, trim_on_init, CTLFLAG_RW, 156 &vdev_trim_on_init, 0, "Enable/disable full vdev trim on initialisation"); 157 158/* 159 * Basic routines to read and write from a vdev label. 160 * Used throughout the rest of this file. 161 */ 162uint64_t 163vdev_label_offset(uint64_t psize, int l, uint64_t offset) 164{ 165 ASSERT(offset < sizeof (vdev_label_t)); 166 ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0); 167 168 return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 169 0 : psize - VDEV_LABELS * sizeof (vdev_label_t))); 170} 171 172/* 173 * Returns back the vdev label associated with the passed in offset. 174 */ 175int 176vdev_label_number(uint64_t psize, uint64_t offset) 177{ 178 int l; 179 180 if (offset >= psize - VDEV_LABEL_END_SIZE) { 181 offset -= psize - VDEV_LABEL_END_SIZE; 182 offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t); 183 } 184 l = offset / sizeof (vdev_label_t); 185 return (l < VDEV_LABELS ? l : -1); 186} 187 188static void 189vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset, 190 uint64_t size, zio_done_func_t *done, void *private, int flags) 191{ 192 ASSERT(spa_config_held(zio->io_spa, SCL_STATE_ALL, RW_WRITER) == 193 SCL_STATE_ALL); 194 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER); 195 196 zio_nowait(zio_read_phys(zio, vd, 197 vdev_label_offset(vd->vdev_psize, l, offset), 198 size, buf, ZIO_CHECKSUM_LABEL, done, private, 199 ZIO_PRIORITY_SYNC_READ, flags, B_TRUE)); 200} 201 202static void 203vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset, 204 uint64_t size, zio_done_func_t *done, void *private, int flags) 205{ 206 ASSERT(spa_config_held(zio->io_spa, SCL_ALL, RW_WRITER) == SCL_ALL || 207 (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) == 208 (SCL_CONFIG | SCL_STATE) && 209 dsl_pool_sync_context(spa_get_dsl(zio->io_spa)))); 210 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER); 211 212 zio_nowait(zio_write_phys(zio, vd, 213 vdev_label_offset(vd->vdev_psize, l, offset), 214 size, buf, ZIO_CHECKSUM_LABEL, done, private, 215 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE)); 216} 217 218/* 219 * Generate the nvlist representing this vdev's config. 220 */ 221nvlist_t * 222vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats, 223 vdev_config_flag_t flags) 224{ 225 nvlist_t *nv = NULL; 226 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 227 228 nv = fnvlist_alloc(); 229 230 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type); 231 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE))) 232 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id); 233 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid); 234 235 if (vd->vdev_path != NULL) 236 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path); 237 238 if (vd->vdev_devid != NULL) 239 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid); 240 241 if (vd->vdev_physpath != NULL) 242 fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH, 243 vd->vdev_physpath); 244 245 if (vd->vdev_fru != NULL) 246 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru); 247 248 if (vd->vdev_nparity != 0) { 249 ASSERT(strcmp(vd->vdev_ops->vdev_op_type, 250 VDEV_TYPE_RAIDZ) == 0); 251 252 /* 253 * Make sure someone hasn't managed to sneak a fancy new vdev 254 * into a crufty old storage pool. 255 */ 256 ASSERT(vd->vdev_nparity == 1 || 257 (vd->vdev_nparity <= 2 && 258 spa_version(spa) >= SPA_VERSION_RAIDZ2) || 259 (vd->vdev_nparity <= 3 && 260 spa_version(spa) >= SPA_VERSION_RAIDZ3)); 261 262 /* 263 * Note that we'll add the nparity tag even on storage pools 264 * that only support a single parity device -- older software 265 * will just ignore it. 266 */ 267 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity); 268 } 269 270 if (vd->vdev_wholedisk != -1ULL) 271 fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 272 vd->vdev_wholedisk); 273 274 if (vd->vdev_not_present) 275 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1); 276 277 if (vd->vdev_isspare) 278 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1); 279 280 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) && 281 vd == vd->vdev_top) { 282 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 283 vd->vdev_ms_array); 284 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 285 vd->vdev_ms_shift); 286 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift); 287 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE, 288 vd->vdev_asize); 289 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog); 290 if (vd->vdev_removing) { 291 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING, 292 vd->vdev_removing); 293 } 294 } 295 296 if (vd->vdev_dtl_sm != NULL) { 297 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL, 298 space_map_object(vd->vdev_dtl_sm)); 299 } 300 301 if (vic->vic_mapping_object != 0) { 302 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 303 vic->vic_mapping_object); 304 } 305 306 if (vic->vic_births_object != 0) { 307 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 308 vic->vic_births_object); 309 } 310 311 if (vic->vic_prev_indirect_vdev != UINT64_MAX) { 312 fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 313 vic->vic_prev_indirect_vdev); 314 } 315 316 if (vd->vdev_crtxg) 317 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg); 318 319 if (flags & VDEV_CONFIG_MOS) { 320 if (vd->vdev_leaf_zap != 0) { 321 ASSERT(vd->vdev_ops->vdev_op_leaf); 322 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP, 323 vd->vdev_leaf_zap); 324 } 325 326 if (vd->vdev_top_zap != 0) { 327 ASSERT(vd == vd->vdev_top); 328 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 329 vd->vdev_top_zap); 330 } 331 } 332 333 if (getstats) { 334 vdev_stat_t vs; 335 336 vdev_get_stats(vd, &vs); 337 fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 338 (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t)); 339 340 /* provide either current or previous scan information */ 341 pool_scan_stat_t ps; 342 if (spa_scan_get_stats(spa, &ps) == 0) { 343 fnvlist_add_uint64_array(nv, 344 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps, 345 sizeof (pool_scan_stat_t) / sizeof (uint64_t)); 346 } 347 348 pool_removal_stat_t prs; 349 if (spa_removal_get_stats(spa, &prs) == 0) { 350 fnvlist_add_uint64_array(nv, 351 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs, 352 sizeof (prs) / sizeof (uint64_t)); 353 } 354 355 /* 356 * Note: this can be called from open context 357 * (spa_get_stats()), so we need the rwlock to prevent 358 * the mapping from being changed by condensing. 359 */ 360 rw_enter(&vd->vdev_indirect_rwlock, RW_READER); 361 if (vd->vdev_indirect_mapping != NULL) { 362 ASSERT(vd->vdev_indirect_births != NULL); 363 vdev_indirect_mapping_t *vim = 364 vd->vdev_indirect_mapping; 365 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE, 366 vdev_indirect_mapping_size(vim)); 367 } 368 rw_exit(&vd->vdev_indirect_rwlock); 369 if (vd->vdev_mg != NULL && 370 vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) { 371 /* 372 * Compute approximately how much memory would be used 373 * for the indirect mapping if this device were to 374 * be removed. 375 * 376 * Note: If the frag metric is invalid, then not 377 * enough metaslabs have been converted to have 378 * histograms. 379 */ 380 uint64_t seg_count = 0; 381 382 /* 383 * There are the same number of allocated segments 384 * as free segments, so we will have at least one 385 * entry per free segment. 386 */ 387 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 388 seg_count += vd->vdev_mg->mg_histogram[i]; 389 } 390 391 /* 392 * The maximum length of a mapping is SPA_MAXBLOCKSIZE, 393 * so we need at least one entry per SPA_MAXBLOCKSIZE 394 * of allocated data. 395 */ 396 seg_count += vd->vdev_stat.vs_alloc / SPA_MAXBLOCKSIZE; 397 398 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE, 399 seg_count * 400 sizeof (vdev_indirect_mapping_entry_phys_t)); 401 } 402 } 403 404 if (!vd->vdev_ops->vdev_op_leaf) { 405 nvlist_t **child; 406 int c, idx; 407 408 ASSERT(!vd->vdev_ishole); 409 410 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *), 411 KM_SLEEP); 412 413 for (c = 0, idx = 0; c < vd->vdev_children; c++) { 414 vdev_t *cvd = vd->vdev_child[c]; 415 416 /* 417 * If we're generating an nvlist of removing 418 * vdevs then skip over any device which is 419 * not being removed. 420 */ 421 if ((flags & VDEV_CONFIG_REMOVING) && 422 !cvd->vdev_removing) 423 continue; 424 425 child[idx++] = vdev_config_generate(spa, cvd, 426 getstats, flags); 427 } 428 429 if (idx) { 430 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 431 child, idx); 432 } 433 434 for (c = 0; c < idx; c++) 435 nvlist_free(child[c]); 436 437 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *)); 438 439 } else { 440 const char *aux = NULL; 441 442 if (vd->vdev_offline && !vd->vdev_tmpoffline) 443 fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE); 444 if (vd->vdev_resilver_txg != 0) 445 fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 446 vd->vdev_resilver_txg); 447 if (vd->vdev_faulted) 448 fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE); 449 if (vd->vdev_degraded) 450 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE); 451 if (vd->vdev_removed) 452 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE); 453 if (vd->vdev_unspare) 454 fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE); 455 if (vd->vdev_ishole) 456 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE); 457 458 switch (vd->vdev_stat.vs_aux) { 459 case VDEV_AUX_ERR_EXCEEDED: 460 aux = "err_exceeded"; 461 break; 462 463 case VDEV_AUX_EXTERNAL: 464 aux = "external"; 465 break; 466 } 467 468 if (aux != NULL) 469 fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux); 470 471 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) { 472 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID, 473 vd->vdev_orig_guid); 474 } 475 } 476 477 return (nv); 478} 479 480/* 481 * Generate a view of the top-level vdevs. If we currently have holes 482 * in the namespace, then generate an array which contains a list of holey 483 * vdevs. Additionally, add the number of top-level children that currently 484 * exist. 485 */ 486void 487vdev_top_config_generate(spa_t *spa, nvlist_t *config) 488{ 489 vdev_t *rvd = spa->spa_root_vdev; 490 uint64_t *array; 491 uint_t c, idx; 492 493 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP); 494 495 for (c = 0, idx = 0; c < rvd->vdev_children; c++) { 496 vdev_t *tvd = rvd->vdev_child[c]; 497 498 if (tvd->vdev_ishole) { 499 array[idx++] = c; 500 } 501 } 502 503 if (idx) { 504 VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY, 505 array, idx) == 0); 506 } 507 508 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 509 rvd->vdev_children) == 0); 510 511 kmem_free(array, rvd->vdev_children * sizeof (uint64_t)); 512} 513 514/* 515 * Returns the configuration from the label of the given vdev. For vdevs 516 * which don't have a txg value stored on their label (i.e. spares/cache) 517 * or have not been completely initialized (txg = 0) just return 518 * the configuration from the first valid label we find. Otherwise, 519 * find the most up-to-date label that does not exceed the specified 520 * 'txg' value. 521 */ 522nvlist_t * 523vdev_label_read_config(vdev_t *vd, uint64_t txg) 524{ 525 spa_t *spa = vd->vdev_spa; 526 nvlist_t *config = NULL; 527 vdev_phys_t *vp; 528 abd_t *vp_abd; 529 zio_t *zio; 530 uint64_t best_txg = 0; 531 int error = 0; 532 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | 533 ZIO_FLAG_SPECULATIVE; 534 535 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 536 537 if (!vdev_readable(vd)) 538 return (NULL); 539 540 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE); 541 vp = abd_to_buf(vp_abd); 542 543retry: 544 for (int l = 0; l < VDEV_LABELS; l++) { 545 nvlist_t *label = NULL; 546 547 zio = zio_root(spa, NULL, NULL, flags); 548 549 vdev_label_read(zio, vd, l, vp_abd, 550 offsetof(vdev_label_t, vl_vdev_phys), 551 sizeof (vdev_phys_t), NULL, NULL, flags); 552 553 if (zio_wait(zio) == 0 && 554 nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist), 555 &label, 0) == 0) { 556 uint64_t label_txg = 0; 557 558 /* 559 * Auxiliary vdevs won't have txg values in their 560 * labels and newly added vdevs may not have been 561 * completely initialized so just return the 562 * configuration from the first valid label we 563 * encounter. 564 */ 565 error = nvlist_lookup_uint64(label, 566 ZPOOL_CONFIG_POOL_TXG, &label_txg); 567 if ((error || label_txg == 0) && !config) { 568 config = label; 569 break; 570 } else if (label_txg <= txg && label_txg > best_txg) { 571 best_txg = label_txg; 572 nvlist_free(config); 573 config = fnvlist_dup(label); 574 } 575 } 576 577 if (label != NULL) { 578 nvlist_free(label); 579 label = NULL; 580 } 581 } 582 583 if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) { 584 flags |= ZIO_FLAG_TRYHARD; 585 goto retry; 586 } 587 588 abd_free(vp_abd); 589 590 return (config); 591} 592 593/* 594 * Determine if a device is in use. The 'spare_guid' parameter will be filled 595 * in with the device guid if this spare is active elsewhere on the system. 596 */ 597static boolean_t 598vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason, 599 uint64_t *spare_guid, uint64_t *l2cache_guid) 600{ 601 spa_t *spa = vd->vdev_spa; 602 uint64_t state, pool_guid, device_guid, txg, spare_pool; 603 uint64_t vdtxg = 0; 604 nvlist_t *label; 605 606 if (spare_guid) 607 *spare_guid = 0ULL; 608 if (l2cache_guid) 609 *l2cache_guid = 0ULL; 610 611 /* 612 * Read the label, if any, and perform some basic sanity checks. 613 */ 614 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) 615 return (B_FALSE); 616 617 (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG, 618 &vdtxg); 619 620 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 621 &state) != 0 || 622 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 623 &device_guid) != 0) { 624 nvlist_free(label); 625 return (B_FALSE); 626 } 627 628 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 629 (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 630 &pool_guid) != 0 || 631 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 632 &txg) != 0)) { 633 nvlist_free(label); 634 return (B_FALSE); 635 } 636 637 nvlist_free(label); 638 639 /* 640 * Check to see if this device indeed belongs to the pool it claims to 641 * be a part of. The only way this is allowed is if the device is a hot 642 * spare (which we check for later on). 643 */ 644 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 645 !spa_guid_exists(pool_guid, device_guid) && 646 !spa_spare_exists(device_guid, NULL, NULL) && 647 !spa_l2cache_exists(device_guid, NULL)) 648 return (B_FALSE); 649 650 /* 651 * If the transaction group is zero, then this an initialized (but 652 * unused) label. This is only an error if the create transaction 653 * on-disk is the same as the one we're using now, in which case the 654 * user has attempted to add the same vdev multiple times in the same 655 * transaction. 656 */ 657 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 658 txg == 0 && vdtxg == crtxg) 659 return (B_TRUE); 660 661 /* 662 * Check to see if this is a spare device. We do an explicit check for 663 * spa_has_spare() here because it may be on our pending list of spares 664 * to add. We also check if it is an l2cache device. 665 */ 666 if (spa_spare_exists(device_guid, &spare_pool, NULL) || 667 spa_has_spare(spa, device_guid)) { 668 if (spare_guid) 669 *spare_guid = device_guid; 670 671 switch (reason) { 672 case VDEV_LABEL_CREATE: 673 case VDEV_LABEL_L2CACHE: 674 return (B_TRUE); 675 676 case VDEV_LABEL_REPLACE: 677 return (!spa_has_spare(spa, device_guid) || 678 spare_pool != 0ULL); 679 680 case VDEV_LABEL_SPARE: 681 return (spa_has_spare(spa, device_guid)); 682 } 683 } 684 685 /* 686 * Check to see if this is an l2cache device. 687 */ 688 if (spa_l2cache_exists(device_guid, NULL)) 689 return (B_TRUE); 690 691 /* 692 * We can't rely on a pool's state if it's been imported 693 * read-only. Instead we look to see if the pools is marked 694 * read-only in the namespace and set the state to active. 695 */ 696 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 697 (spa = spa_by_guid(pool_guid, device_guid)) != NULL && 698 spa_mode(spa) == FREAD) 699 state = POOL_STATE_ACTIVE; 700 701 /* 702 * If the device is marked ACTIVE, then this device is in use by another 703 * pool on the system. 704 */ 705 return (state == POOL_STATE_ACTIVE); 706} 707 708/* 709 * Initialize a vdev label. We check to make sure each leaf device is not in 710 * use, and writable. We put down an initial label which we will later 711 * overwrite with a complete label. Note that it's important to do this 712 * sequentially, not in parallel, so that we catch cases of multiple use of the 713 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with 714 * itself. 715 */ 716int 717vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) 718{ 719 spa_t *spa = vd->vdev_spa; 720 nvlist_t *label; 721 vdev_phys_t *vp; 722 abd_t *vp_abd; 723 abd_t *pad2; 724 uberblock_t *ub; 725 abd_t *ub_abd; 726 zio_t *zio; 727 char *buf; 728 size_t buflen; 729 int error; 730 uint64_t spare_guid, l2cache_guid; 731 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 732 733 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 734 735 for (int c = 0; c < vd->vdev_children; c++) 736 if ((error = vdev_label_init(vd->vdev_child[c], 737 crtxg, reason)) != 0) 738 return (error); 739 740 /* Track the creation time for this vdev */ 741 vd->vdev_crtxg = crtxg; 742 743 if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa)) 744 return (0); 745 746 /* 747 * Dead vdevs cannot be initialized. 748 */ 749 if (vdev_is_dead(vd)) 750 return (SET_ERROR(EIO)); 751 752 /* 753 * Determine if the vdev is in use. 754 */ 755 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT && 756 vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid)) 757 return (SET_ERROR(EBUSY)); 758 759 /* 760 * If this is a request to add or replace a spare or l2cache device 761 * that is in use elsewhere on the system, then we must update the 762 * guid (which was initialized to a random value) to reflect the 763 * actual GUID (which is shared between multiple pools). 764 */ 765 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE && 766 spare_guid != 0ULL) { 767 uint64_t guid_delta = spare_guid - vd->vdev_guid; 768 769 vd->vdev_guid += guid_delta; 770 771 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 772 pvd->vdev_guid_sum += guid_delta; 773 774 /* 775 * If this is a replacement, then we want to fallthrough to the 776 * rest of the code. If we're adding a spare, then it's already 777 * labeled appropriately and we can just return. 778 */ 779 if (reason == VDEV_LABEL_SPARE) 780 return (0); 781 ASSERT(reason == VDEV_LABEL_REPLACE || 782 reason == VDEV_LABEL_SPLIT); 783 } 784 785 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE && 786 l2cache_guid != 0ULL) { 787 uint64_t guid_delta = l2cache_guid - vd->vdev_guid; 788 789 vd->vdev_guid += guid_delta; 790 791 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 792 pvd->vdev_guid_sum += guid_delta; 793 794 /* 795 * If this is a replacement, then we want to fallthrough to the 796 * rest of the code. If we're adding an l2cache, then it's 797 * already labeled appropriately and we can just return. 798 */ 799 if (reason == VDEV_LABEL_L2CACHE) 800 return (0); 801 ASSERT(reason == VDEV_LABEL_REPLACE); 802 } 803 804 /* 805 * TRIM the whole thing, excluding the blank space and boot header 806 * as specified by ZFS On-Disk Specification (section 1.3), so that 807 * we start with a clean slate. 808 * It's just an optimization, so we don't care if it fails. 809 * Don't TRIM if removing so that we don't interfere with zpool 810 * disaster recovery. 811 */ 812 if (zfs_trim_enabled && vdev_trim_on_init && !vd->vdev_notrim && 813 (reason == VDEV_LABEL_CREATE || reason == VDEV_LABEL_SPARE || 814 reason == VDEV_LABEL_L2CACHE)) 815 zio_wait(zio_trim(NULL, spa, vd, VDEV_SKIP_SIZE, 816 vd->vdev_psize - VDEV_SKIP_SIZE)); 817 818 /* 819 * Initialize its label. 820 */ 821 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE); 822 abd_zero(vp_abd, sizeof (vdev_phys_t)); 823 vp = abd_to_buf(vp_abd); 824 825 /* 826 * Generate a label describing the pool and our top-level vdev. 827 * We mark it as being from txg 0 to indicate that it's not 828 * really part of an active pool just yet. The labels will 829 * be written again with a meaningful txg by spa_sync(). 830 */ 831 if (reason == VDEV_LABEL_SPARE || 832 (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) { 833 /* 834 * For inactive hot spares, we generate a special label that 835 * identifies as a mutually shared hot spare. We write the 836 * label if we are adding a hot spare, or if we are removing an 837 * active hot spare (in which case we want to revert the 838 * labels). 839 */ 840 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0); 841 842 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION, 843 spa_version(spa)) == 0); 844 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, 845 POOL_STATE_SPARE) == 0); 846 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID, 847 vd->vdev_guid) == 0); 848 } else if (reason == VDEV_LABEL_L2CACHE || 849 (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) { 850 /* 851 * For level 2 ARC devices, add a special label. 852 */ 853 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0); 854 855 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION, 856 spa_version(spa)) == 0); 857 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, 858 POOL_STATE_L2CACHE) == 0); 859 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID, 860 vd->vdev_guid) == 0); 861 } else { 862 uint64_t txg = 0ULL; 863 864 if (reason == VDEV_LABEL_SPLIT) 865 txg = spa->spa_uberblock.ub_txg; 866 label = spa_config_generate(spa, vd, txg, B_FALSE); 867 868 /* 869 * Add our creation time. This allows us to detect multiple 870 * vdev uses as described above, and automatically expires if we 871 * fail. 872 */ 873 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG, 874 crtxg) == 0); 875 } 876 877 buf = vp->vp_nvlist; 878 buflen = sizeof (vp->vp_nvlist); 879 880 error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP); 881 if (error != 0) { 882 nvlist_free(label); 883 abd_free(vp_abd); 884 /* EFAULT means nvlist_pack ran out of room */ 885 return (error == EFAULT ? ENAMETOOLONG : EINVAL); 886 } 887 888 /* 889 * Initialize uberblock template. 890 */ 891 ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE); 892 abd_zero(ub_abd, VDEV_UBERBLOCK_RING); 893 abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t)); 894 ub = abd_to_buf(ub_abd); 895 ub->ub_txg = 0; 896 897 /* Initialize the 2nd padding area. */ 898 pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE); 899 abd_zero(pad2, VDEV_PAD_SIZE); 900 901 /* 902 * Write everything in parallel. 903 */ 904retry: 905 zio = zio_root(spa, NULL, NULL, flags); 906 907 for (int l = 0; l < VDEV_LABELS; l++) { 908 909 vdev_label_write(zio, vd, l, vp_abd, 910 offsetof(vdev_label_t, vl_vdev_phys), 911 sizeof (vdev_phys_t), NULL, NULL, flags); 912 913 /* 914 * Skip the 1st padding area. 915 * Zero out the 2nd padding area where it might have 916 * left over data from previous filesystem format. 917 */ 918 vdev_label_write(zio, vd, l, pad2, 919 offsetof(vdev_label_t, vl_pad2), 920 VDEV_PAD_SIZE, NULL, NULL, flags); 921 922 vdev_label_write(zio, vd, l, ub_abd, 923 offsetof(vdev_label_t, vl_uberblock), 924 VDEV_UBERBLOCK_RING, NULL, NULL, flags); 925 } 926 927 error = zio_wait(zio); 928 929 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) { 930 flags |= ZIO_FLAG_TRYHARD; 931 goto retry; 932 } 933 934 nvlist_free(label); 935 abd_free(pad2); 936 abd_free(ub_abd); 937 abd_free(vp_abd); 938 939 /* 940 * If this vdev hasn't been previously identified as a spare, then we 941 * mark it as such only if a) we are labeling it as a spare, or b) it 942 * exists as a spare elsewhere in the system. Do the same for 943 * level 2 ARC devices. 944 */ 945 if (error == 0 && !vd->vdev_isspare && 946 (reason == VDEV_LABEL_SPARE || 947 spa_spare_exists(vd->vdev_guid, NULL, NULL))) 948 spa_spare_add(vd); 949 950 if (error == 0 && !vd->vdev_isl2cache && 951 (reason == VDEV_LABEL_L2CACHE || 952 spa_l2cache_exists(vd->vdev_guid, NULL))) 953 spa_l2cache_add(vd); 954 955 return (error); 956} 957 958int 959vdev_label_write_pad2(vdev_t *vd, const char *buf, size_t size) 960{ 961 spa_t *spa = vd->vdev_spa; 962 zio_t *zio; 963 abd_t *pad2; 964 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 965 int error; 966 967 if (size > VDEV_PAD_SIZE) 968 return (EINVAL); 969 970 if (!vd->vdev_ops->vdev_op_leaf) 971 return (ENODEV); 972 if (vdev_is_dead(vd)) 973 return (ENXIO); 974 975 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 976 977 pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE); 978 abd_zero(pad2, VDEV_PAD_SIZE); 979 abd_copy_from_buf(pad2, buf, size); 980 981retry: 982 zio = zio_root(spa, NULL, NULL, flags); 983 vdev_label_write(zio, vd, 0, pad2, 984 offsetof(vdev_label_t, vl_pad2), 985 VDEV_PAD_SIZE, NULL, NULL, flags); 986 error = zio_wait(zio); 987 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) { 988 flags |= ZIO_FLAG_TRYHARD; 989 goto retry; 990 } 991 992 abd_free(pad2); 993 return (error); 994} 995 996/* 997 * ========================================================================== 998 * uberblock load/sync 999 * ========================================================================== 1000 */ 1001 1002/* 1003 * Consider the following situation: txg is safely synced to disk. We've 1004 * written the first uberblock for txg + 1, and then we lose power. When we 1005 * come back up, we fail to see the uberblock for txg + 1 because, say, 1006 * it was on a mirrored device and the replica to which we wrote txg + 1 1007 * is now offline. If we then make some changes and sync txg + 1, and then 1008 * the missing replica comes back, then for a few seconds we'll have two 1009 * conflicting uberblocks on disk with the same txg. The solution is simple: 1010 * among uberblocks with equal txg, choose the one with the latest timestamp. 1011 */ 1012static int 1013vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2) 1014{ 1015 if (ub1->ub_txg < ub2->ub_txg) 1016 return (-1); 1017 if (ub1->ub_txg > ub2->ub_txg) 1018 return (1); 1019 1020 if (ub1->ub_timestamp < ub2->ub_timestamp) 1021 return (-1); 1022 if (ub1->ub_timestamp > ub2->ub_timestamp) 1023 return (1); 1024 1025 return (0); 1026} 1027 1028struct ubl_cbdata { 1029 uberblock_t *ubl_ubbest; /* Best uberblock */ 1030 vdev_t *ubl_vd; /* vdev associated with the above */ 1031}; 1032 1033static void 1034vdev_uberblock_load_done(zio_t *zio) 1035{ 1036 vdev_t *vd = zio->io_vd; 1037 spa_t *spa = zio->io_spa; 1038 zio_t *rio = zio->io_private; 1039 uberblock_t *ub = abd_to_buf(zio->io_abd); 1040 struct ubl_cbdata *cbp = rio->io_private; 1041 1042 ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd)); 1043 1044 if (zio->io_error == 0 && uberblock_verify(ub) == 0) { 1045 mutex_enter(&rio->io_lock); 1046 if (ub->ub_txg <= spa->spa_load_max_txg && 1047 vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) { 1048 /* 1049 * Keep track of the vdev in which this uberblock 1050 * was found. We will use this information later 1051 * to obtain the config nvlist associated with 1052 * this uberblock. 1053 */ 1054 *cbp->ubl_ubbest = *ub; 1055 cbp->ubl_vd = vd; 1056 } 1057 mutex_exit(&rio->io_lock); 1058 } 1059 1060 abd_free(zio->io_abd); 1061} 1062 1063static void 1064vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags, 1065 struct ubl_cbdata *cbp) 1066{ 1067 for (int c = 0; c < vd->vdev_children; c++) 1068 vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp); 1069 1070 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1071 for (int l = 0; l < VDEV_LABELS; l++) { 1072 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { 1073 vdev_label_read(zio, vd, l, 1074 abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), 1075 B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n), 1076 VDEV_UBERBLOCK_SIZE(vd), 1077 vdev_uberblock_load_done, zio, flags); 1078 } 1079 } 1080 } 1081} 1082 1083/* 1084 * Reads the 'best' uberblock from disk along with its associated 1085 * configuration. First, we read the uberblock array of each label of each 1086 * vdev, keeping track of the uberblock with the highest txg in each array. 1087 * Then, we read the configuration from the same vdev as the best uberblock. 1088 */ 1089void 1090vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config) 1091{ 1092 zio_t *zio; 1093 spa_t *spa = rvd->vdev_spa; 1094 struct ubl_cbdata cb; 1095 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | 1096 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD; 1097 1098 ASSERT(ub); 1099 ASSERT(config); 1100 1101 bzero(ub, sizeof (uberblock_t)); 1102 *config = NULL; 1103 1104 cb.ubl_ubbest = ub; 1105 cb.ubl_vd = NULL; 1106 1107 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1108 zio = zio_root(spa, NULL, &cb, flags); 1109 vdev_uberblock_load_impl(zio, rvd, flags, &cb); 1110 (void) zio_wait(zio); 1111 1112 /* 1113 * It's possible that the best uberblock was discovered on a label 1114 * that has a configuration which was written in a future txg. 1115 * Search all labels on this vdev to find the configuration that 1116 * matches the txg for our uberblock. 1117 */
| 25 */ 26 27/* 28 * Virtual Device Labels 29 * --------------------- 30 * 31 * The vdev label serves several distinct purposes: 32 * 33 * 1. Uniquely identify this device as part of a ZFS pool and confirm its 34 * identity within the pool. 35 * 36 * 2. Verify that all the devices given in a configuration are present 37 * within the pool. 38 * 39 * 3. Determine the uberblock for the pool. 40 * 41 * 4. In case of an import operation, determine the configuration of the 42 * toplevel vdev of which it is a part. 43 * 44 * 5. If an import operation cannot find all the devices in the pool, 45 * provide enough information to the administrator to determine which 46 * devices are missing. 47 * 48 * It is important to note that while the kernel is responsible for writing the 49 * label, it only consumes the information in the first three cases. The 50 * latter information is only consumed in userland when determining the 51 * configuration to import a pool. 52 * 53 * 54 * Label Organization 55 * ------------------ 56 * 57 * Before describing the contents of the label, it's important to understand how 58 * the labels are written and updated with respect to the uberblock. 59 * 60 * When the pool configuration is altered, either because it was newly created 61 * or a device was added, we want to update all the labels such that we can deal 62 * with fatal failure at any point. To this end, each disk has two labels which 63 * are updated before and after the uberblock is synced. Assuming we have 64 * labels and an uberblock with the following transaction groups: 65 * 66 * L1 UB L2 67 * +------+ +------+ +------+ 68 * | | | | | | 69 * | t10 | | t10 | | t10 | 70 * | | | | | | 71 * +------+ +------+ +------+ 72 * 73 * In this stable state, the labels and the uberblock were all updated within 74 * the same transaction group (10). Each label is mirrored and checksummed, so 75 * that we can detect when we fail partway through writing the label. 76 * 77 * In order to identify which labels are valid, the labels are written in the 78 * following manner: 79 * 80 * 1. For each vdev, update 'L1' to the new label 81 * 2. Update the uberblock 82 * 3. For each vdev, update 'L2' to the new label 83 * 84 * Given arbitrary failure, we can determine the correct label to use based on 85 * the transaction group. If we fail after updating L1 but before updating the 86 * UB, we will notice that L1's transaction group is greater than the uberblock, 87 * so L2 must be valid. If we fail after writing the uberblock but before 88 * writing L2, we will notice that L2's transaction group is less than L1, and 89 * therefore L1 is valid. 90 * 91 * Another added complexity is that not every label is updated when the config 92 * is synced. If we add a single device, we do not want to have to re-write 93 * every label for every device in the pool. This means that both L1 and L2 may 94 * be older than the pool uberblock, because the necessary information is stored 95 * on another vdev. 96 * 97 * 98 * On-disk Format 99 * -------------- 100 * 101 * The vdev label consists of two distinct parts, and is wrapped within the 102 * vdev_label_t structure. The label includes 8k of padding to permit legacy 103 * VTOC disk labels, but is otherwise ignored. 104 * 105 * The first half of the label is a packed nvlist which contains pool wide 106 * properties, per-vdev properties, and configuration information. It is 107 * described in more detail below. 108 * 109 * The latter half of the label consists of a redundant array of uberblocks. 110 * These uberblocks are updated whenever a transaction group is committed, 111 * or when the configuration is updated. When a pool is loaded, we scan each 112 * vdev for the 'best' uberblock. 113 * 114 * 115 * Configuration Information 116 * ------------------------- 117 * 118 * The nvlist describing the pool and vdev contains the following elements: 119 * 120 * version ZFS on-disk version 121 * name Pool name 122 * state Pool state 123 * txg Transaction group in which this label was written 124 * pool_guid Unique identifier for this pool 125 * vdev_tree An nvlist describing vdev tree. 126 * features_for_read 127 * An nvlist of the features necessary for reading the MOS. 128 * 129 * Each leaf device label also contains the following: 130 * 131 * top_guid Unique ID for top-level vdev in which this is contained 132 * guid Unique ID for the leaf vdev 133 * 134 * The 'vs' configuration follows the format described in 'spa_config.c'. 135 */ 136 137#include <sys/zfs_context.h> 138#include <sys/spa.h> 139#include <sys/spa_impl.h> 140#include <sys/dmu.h> 141#include <sys/zap.h> 142#include <sys/vdev.h> 143#include <sys/vdev_impl.h> 144#include <sys/uberblock_impl.h> 145#include <sys/metaslab.h> 146#include <sys/metaslab_impl.h> 147#include <sys/zio.h> 148#include <sys/dsl_scan.h> 149#include <sys/abd.h> 150#include <sys/fs/zfs.h> 151#include <sys/trim_map.h> 152 153static boolean_t vdev_trim_on_init = B_TRUE; 154SYSCTL_DECL(_vfs_zfs_vdev); 155SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, trim_on_init, CTLFLAG_RW, 156 &vdev_trim_on_init, 0, "Enable/disable full vdev trim on initialisation"); 157 158/* 159 * Basic routines to read and write from a vdev label. 160 * Used throughout the rest of this file. 161 */ 162uint64_t 163vdev_label_offset(uint64_t psize, int l, uint64_t offset) 164{ 165 ASSERT(offset < sizeof (vdev_label_t)); 166 ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0); 167 168 return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 169 0 : psize - VDEV_LABELS * sizeof (vdev_label_t))); 170} 171 172/* 173 * Returns back the vdev label associated with the passed in offset. 174 */ 175int 176vdev_label_number(uint64_t psize, uint64_t offset) 177{ 178 int l; 179 180 if (offset >= psize - VDEV_LABEL_END_SIZE) { 181 offset -= psize - VDEV_LABEL_END_SIZE; 182 offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t); 183 } 184 l = offset / sizeof (vdev_label_t); 185 return (l < VDEV_LABELS ? l : -1); 186} 187 188static void 189vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset, 190 uint64_t size, zio_done_func_t *done, void *private, int flags) 191{ 192 ASSERT(spa_config_held(zio->io_spa, SCL_STATE_ALL, RW_WRITER) == 193 SCL_STATE_ALL); 194 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER); 195 196 zio_nowait(zio_read_phys(zio, vd, 197 vdev_label_offset(vd->vdev_psize, l, offset), 198 size, buf, ZIO_CHECKSUM_LABEL, done, private, 199 ZIO_PRIORITY_SYNC_READ, flags, B_TRUE)); 200} 201 202static void 203vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset, 204 uint64_t size, zio_done_func_t *done, void *private, int flags) 205{ 206 ASSERT(spa_config_held(zio->io_spa, SCL_ALL, RW_WRITER) == SCL_ALL || 207 (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) == 208 (SCL_CONFIG | SCL_STATE) && 209 dsl_pool_sync_context(spa_get_dsl(zio->io_spa)))); 210 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER); 211 212 zio_nowait(zio_write_phys(zio, vd, 213 vdev_label_offset(vd->vdev_psize, l, offset), 214 size, buf, ZIO_CHECKSUM_LABEL, done, private, 215 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE)); 216} 217 218/* 219 * Generate the nvlist representing this vdev's config. 220 */ 221nvlist_t * 222vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats, 223 vdev_config_flag_t flags) 224{ 225 nvlist_t *nv = NULL; 226 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 227 228 nv = fnvlist_alloc(); 229 230 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type); 231 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE))) 232 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id); 233 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid); 234 235 if (vd->vdev_path != NULL) 236 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path); 237 238 if (vd->vdev_devid != NULL) 239 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid); 240 241 if (vd->vdev_physpath != NULL) 242 fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH, 243 vd->vdev_physpath); 244 245 if (vd->vdev_fru != NULL) 246 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru); 247 248 if (vd->vdev_nparity != 0) { 249 ASSERT(strcmp(vd->vdev_ops->vdev_op_type, 250 VDEV_TYPE_RAIDZ) == 0); 251 252 /* 253 * Make sure someone hasn't managed to sneak a fancy new vdev 254 * into a crufty old storage pool. 255 */ 256 ASSERT(vd->vdev_nparity == 1 || 257 (vd->vdev_nparity <= 2 && 258 spa_version(spa) >= SPA_VERSION_RAIDZ2) || 259 (vd->vdev_nparity <= 3 && 260 spa_version(spa) >= SPA_VERSION_RAIDZ3)); 261 262 /* 263 * Note that we'll add the nparity tag even on storage pools 264 * that only support a single parity device -- older software 265 * will just ignore it. 266 */ 267 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity); 268 } 269 270 if (vd->vdev_wholedisk != -1ULL) 271 fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 272 vd->vdev_wholedisk); 273 274 if (vd->vdev_not_present) 275 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1); 276 277 if (vd->vdev_isspare) 278 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1); 279 280 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) && 281 vd == vd->vdev_top) { 282 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 283 vd->vdev_ms_array); 284 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 285 vd->vdev_ms_shift); 286 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift); 287 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE, 288 vd->vdev_asize); 289 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog); 290 if (vd->vdev_removing) { 291 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING, 292 vd->vdev_removing); 293 } 294 } 295 296 if (vd->vdev_dtl_sm != NULL) { 297 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL, 298 space_map_object(vd->vdev_dtl_sm)); 299 } 300 301 if (vic->vic_mapping_object != 0) { 302 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 303 vic->vic_mapping_object); 304 } 305 306 if (vic->vic_births_object != 0) { 307 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 308 vic->vic_births_object); 309 } 310 311 if (vic->vic_prev_indirect_vdev != UINT64_MAX) { 312 fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 313 vic->vic_prev_indirect_vdev); 314 } 315 316 if (vd->vdev_crtxg) 317 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg); 318 319 if (flags & VDEV_CONFIG_MOS) { 320 if (vd->vdev_leaf_zap != 0) { 321 ASSERT(vd->vdev_ops->vdev_op_leaf); 322 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP, 323 vd->vdev_leaf_zap); 324 } 325 326 if (vd->vdev_top_zap != 0) { 327 ASSERT(vd == vd->vdev_top); 328 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 329 vd->vdev_top_zap); 330 } 331 } 332 333 if (getstats) { 334 vdev_stat_t vs; 335 336 vdev_get_stats(vd, &vs); 337 fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 338 (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t)); 339 340 /* provide either current or previous scan information */ 341 pool_scan_stat_t ps; 342 if (spa_scan_get_stats(spa, &ps) == 0) { 343 fnvlist_add_uint64_array(nv, 344 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps, 345 sizeof (pool_scan_stat_t) / sizeof (uint64_t)); 346 } 347 348 pool_removal_stat_t prs; 349 if (spa_removal_get_stats(spa, &prs) == 0) { 350 fnvlist_add_uint64_array(nv, 351 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs, 352 sizeof (prs) / sizeof (uint64_t)); 353 } 354 355 /* 356 * Note: this can be called from open context 357 * (spa_get_stats()), so we need the rwlock to prevent 358 * the mapping from being changed by condensing. 359 */ 360 rw_enter(&vd->vdev_indirect_rwlock, RW_READER); 361 if (vd->vdev_indirect_mapping != NULL) { 362 ASSERT(vd->vdev_indirect_births != NULL); 363 vdev_indirect_mapping_t *vim = 364 vd->vdev_indirect_mapping; 365 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE, 366 vdev_indirect_mapping_size(vim)); 367 } 368 rw_exit(&vd->vdev_indirect_rwlock); 369 if (vd->vdev_mg != NULL && 370 vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) { 371 /* 372 * Compute approximately how much memory would be used 373 * for the indirect mapping if this device were to 374 * be removed. 375 * 376 * Note: If the frag metric is invalid, then not 377 * enough metaslabs have been converted to have 378 * histograms. 379 */ 380 uint64_t seg_count = 0; 381 382 /* 383 * There are the same number of allocated segments 384 * as free segments, so we will have at least one 385 * entry per free segment. 386 */ 387 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 388 seg_count += vd->vdev_mg->mg_histogram[i]; 389 } 390 391 /* 392 * The maximum length of a mapping is SPA_MAXBLOCKSIZE, 393 * so we need at least one entry per SPA_MAXBLOCKSIZE 394 * of allocated data. 395 */ 396 seg_count += vd->vdev_stat.vs_alloc / SPA_MAXBLOCKSIZE; 397 398 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE, 399 seg_count * 400 sizeof (vdev_indirect_mapping_entry_phys_t)); 401 } 402 } 403 404 if (!vd->vdev_ops->vdev_op_leaf) { 405 nvlist_t **child; 406 int c, idx; 407 408 ASSERT(!vd->vdev_ishole); 409 410 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *), 411 KM_SLEEP); 412 413 for (c = 0, idx = 0; c < vd->vdev_children; c++) { 414 vdev_t *cvd = vd->vdev_child[c]; 415 416 /* 417 * If we're generating an nvlist of removing 418 * vdevs then skip over any device which is 419 * not being removed. 420 */ 421 if ((flags & VDEV_CONFIG_REMOVING) && 422 !cvd->vdev_removing) 423 continue; 424 425 child[idx++] = vdev_config_generate(spa, cvd, 426 getstats, flags); 427 } 428 429 if (idx) { 430 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 431 child, idx); 432 } 433 434 for (c = 0; c < idx; c++) 435 nvlist_free(child[c]); 436 437 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *)); 438 439 } else { 440 const char *aux = NULL; 441 442 if (vd->vdev_offline && !vd->vdev_tmpoffline) 443 fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE); 444 if (vd->vdev_resilver_txg != 0) 445 fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 446 vd->vdev_resilver_txg); 447 if (vd->vdev_faulted) 448 fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE); 449 if (vd->vdev_degraded) 450 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE); 451 if (vd->vdev_removed) 452 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE); 453 if (vd->vdev_unspare) 454 fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE); 455 if (vd->vdev_ishole) 456 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE); 457 458 switch (vd->vdev_stat.vs_aux) { 459 case VDEV_AUX_ERR_EXCEEDED: 460 aux = "err_exceeded"; 461 break; 462 463 case VDEV_AUX_EXTERNAL: 464 aux = "external"; 465 break; 466 } 467 468 if (aux != NULL) 469 fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux); 470 471 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) { 472 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID, 473 vd->vdev_orig_guid); 474 } 475 } 476 477 return (nv); 478} 479 480/* 481 * Generate a view of the top-level vdevs. If we currently have holes 482 * in the namespace, then generate an array which contains a list of holey 483 * vdevs. Additionally, add the number of top-level children that currently 484 * exist. 485 */ 486void 487vdev_top_config_generate(spa_t *spa, nvlist_t *config) 488{ 489 vdev_t *rvd = spa->spa_root_vdev; 490 uint64_t *array; 491 uint_t c, idx; 492 493 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP); 494 495 for (c = 0, idx = 0; c < rvd->vdev_children; c++) { 496 vdev_t *tvd = rvd->vdev_child[c]; 497 498 if (tvd->vdev_ishole) { 499 array[idx++] = c; 500 } 501 } 502 503 if (idx) { 504 VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY, 505 array, idx) == 0); 506 } 507 508 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 509 rvd->vdev_children) == 0); 510 511 kmem_free(array, rvd->vdev_children * sizeof (uint64_t)); 512} 513 514/* 515 * Returns the configuration from the label of the given vdev. For vdevs 516 * which don't have a txg value stored on their label (i.e. spares/cache) 517 * or have not been completely initialized (txg = 0) just return 518 * the configuration from the first valid label we find. Otherwise, 519 * find the most up-to-date label that does not exceed the specified 520 * 'txg' value. 521 */ 522nvlist_t * 523vdev_label_read_config(vdev_t *vd, uint64_t txg) 524{ 525 spa_t *spa = vd->vdev_spa; 526 nvlist_t *config = NULL; 527 vdev_phys_t *vp; 528 abd_t *vp_abd; 529 zio_t *zio; 530 uint64_t best_txg = 0; 531 int error = 0; 532 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | 533 ZIO_FLAG_SPECULATIVE; 534 535 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 536 537 if (!vdev_readable(vd)) 538 return (NULL); 539 540 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE); 541 vp = abd_to_buf(vp_abd); 542 543retry: 544 for (int l = 0; l < VDEV_LABELS; l++) { 545 nvlist_t *label = NULL; 546 547 zio = zio_root(spa, NULL, NULL, flags); 548 549 vdev_label_read(zio, vd, l, vp_abd, 550 offsetof(vdev_label_t, vl_vdev_phys), 551 sizeof (vdev_phys_t), NULL, NULL, flags); 552 553 if (zio_wait(zio) == 0 && 554 nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist), 555 &label, 0) == 0) { 556 uint64_t label_txg = 0; 557 558 /* 559 * Auxiliary vdevs won't have txg values in their 560 * labels and newly added vdevs may not have been 561 * completely initialized so just return the 562 * configuration from the first valid label we 563 * encounter. 564 */ 565 error = nvlist_lookup_uint64(label, 566 ZPOOL_CONFIG_POOL_TXG, &label_txg); 567 if ((error || label_txg == 0) && !config) { 568 config = label; 569 break; 570 } else if (label_txg <= txg && label_txg > best_txg) { 571 best_txg = label_txg; 572 nvlist_free(config); 573 config = fnvlist_dup(label); 574 } 575 } 576 577 if (label != NULL) { 578 nvlist_free(label); 579 label = NULL; 580 } 581 } 582 583 if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) { 584 flags |= ZIO_FLAG_TRYHARD; 585 goto retry; 586 } 587 588 abd_free(vp_abd); 589 590 return (config); 591} 592 593/* 594 * Determine if a device is in use. The 'spare_guid' parameter will be filled 595 * in with the device guid if this spare is active elsewhere on the system. 596 */ 597static boolean_t 598vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason, 599 uint64_t *spare_guid, uint64_t *l2cache_guid) 600{ 601 spa_t *spa = vd->vdev_spa; 602 uint64_t state, pool_guid, device_guid, txg, spare_pool; 603 uint64_t vdtxg = 0; 604 nvlist_t *label; 605 606 if (spare_guid) 607 *spare_guid = 0ULL; 608 if (l2cache_guid) 609 *l2cache_guid = 0ULL; 610 611 /* 612 * Read the label, if any, and perform some basic sanity checks. 613 */ 614 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) 615 return (B_FALSE); 616 617 (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG, 618 &vdtxg); 619 620 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 621 &state) != 0 || 622 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 623 &device_guid) != 0) { 624 nvlist_free(label); 625 return (B_FALSE); 626 } 627 628 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 629 (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 630 &pool_guid) != 0 || 631 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 632 &txg) != 0)) { 633 nvlist_free(label); 634 return (B_FALSE); 635 } 636 637 nvlist_free(label); 638 639 /* 640 * Check to see if this device indeed belongs to the pool it claims to 641 * be a part of. The only way this is allowed is if the device is a hot 642 * spare (which we check for later on). 643 */ 644 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 645 !spa_guid_exists(pool_guid, device_guid) && 646 !spa_spare_exists(device_guid, NULL, NULL) && 647 !spa_l2cache_exists(device_guid, NULL)) 648 return (B_FALSE); 649 650 /* 651 * If the transaction group is zero, then this an initialized (but 652 * unused) label. This is only an error if the create transaction 653 * on-disk is the same as the one we're using now, in which case the 654 * user has attempted to add the same vdev multiple times in the same 655 * transaction. 656 */ 657 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 658 txg == 0 && vdtxg == crtxg) 659 return (B_TRUE); 660 661 /* 662 * Check to see if this is a spare device. We do an explicit check for 663 * spa_has_spare() here because it may be on our pending list of spares 664 * to add. We also check if it is an l2cache device. 665 */ 666 if (spa_spare_exists(device_guid, &spare_pool, NULL) || 667 spa_has_spare(spa, device_guid)) { 668 if (spare_guid) 669 *spare_guid = device_guid; 670 671 switch (reason) { 672 case VDEV_LABEL_CREATE: 673 case VDEV_LABEL_L2CACHE: 674 return (B_TRUE); 675 676 case VDEV_LABEL_REPLACE: 677 return (!spa_has_spare(spa, device_guid) || 678 spare_pool != 0ULL); 679 680 case VDEV_LABEL_SPARE: 681 return (spa_has_spare(spa, device_guid)); 682 } 683 } 684 685 /* 686 * Check to see if this is an l2cache device. 687 */ 688 if (spa_l2cache_exists(device_guid, NULL)) 689 return (B_TRUE); 690 691 /* 692 * We can't rely on a pool's state if it's been imported 693 * read-only. Instead we look to see if the pools is marked 694 * read-only in the namespace and set the state to active. 695 */ 696 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 697 (spa = spa_by_guid(pool_guid, device_guid)) != NULL && 698 spa_mode(spa) == FREAD) 699 state = POOL_STATE_ACTIVE; 700 701 /* 702 * If the device is marked ACTIVE, then this device is in use by another 703 * pool on the system. 704 */ 705 return (state == POOL_STATE_ACTIVE); 706} 707 708/* 709 * Initialize a vdev label. We check to make sure each leaf device is not in 710 * use, and writable. We put down an initial label which we will later 711 * overwrite with a complete label. Note that it's important to do this 712 * sequentially, not in parallel, so that we catch cases of multiple use of the 713 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with 714 * itself. 715 */ 716int 717vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) 718{ 719 spa_t *spa = vd->vdev_spa; 720 nvlist_t *label; 721 vdev_phys_t *vp; 722 abd_t *vp_abd; 723 abd_t *pad2; 724 uberblock_t *ub; 725 abd_t *ub_abd; 726 zio_t *zio; 727 char *buf; 728 size_t buflen; 729 int error; 730 uint64_t spare_guid, l2cache_guid; 731 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 732 733 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 734 735 for (int c = 0; c < vd->vdev_children; c++) 736 if ((error = vdev_label_init(vd->vdev_child[c], 737 crtxg, reason)) != 0) 738 return (error); 739 740 /* Track the creation time for this vdev */ 741 vd->vdev_crtxg = crtxg; 742 743 if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa)) 744 return (0); 745 746 /* 747 * Dead vdevs cannot be initialized. 748 */ 749 if (vdev_is_dead(vd)) 750 return (SET_ERROR(EIO)); 751 752 /* 753 * Determine if the vdev is in use. 754 */ 755 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT && 756 vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid)) 757 return (SET_ERROR(EBUSY)); 758 759 /* 760 * If this is a request to add or replace a spare or l2cache device 761 * that is in use elsewhere on the system, then we must update the 762 * guid (which was initialized to a random value) to reflect the 763 * actual GUID (which is shared between multiple pools). 764 */ 765 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE && 766 spare_guid != 0ULL) { 767 uint64_t guid_delta = spare_guid - vd->vdev_guid; 768 769 vd->vdev_guid += guid_delta; 770 771 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 772 pvd->vdev_guid_sum += guid_delta; 773 774 /* 775 * If this is a replacement, then we want to fallthrough to the 776 * rest of the code. If we're adding a spare, then it's already 777 * labeled appropriately and we can just return. 778 */ 779 if (reason == VDEV_LABEL_SPARE) 780 return (0); 781 ASSERT(reason == VDEV_LABEL_REPLACE || 782 reason == VDEV_LABEL_SPLIT); 783 } 784 785 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE && 786 l2cache_guid != 0ULL) { 787 uint64_t guid_delta = l2cache_guid - vd->vdev_guid; 788 789 vd->vdev_guid += guid_delta; 790 791 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 792 pvd->vdev_guid_sum += guid_delta; 793 794 /* 795 * If this is a replacement, then we want to fallthrough to the 796 * rest of the code. If we're adding an l2cache, then it's 797 * already labeled appropriately and we can just return. 798 */ 799 if (reason == VDEV_LABEL_L2CACHE) 800 return (0); 801 ASSERT(reason == VDEV_LABEL_REPLACE); 802 } 803 804 /* 805 * TRIM the whole thing, excluding the blank space and boot header 806 * as specified by ZFS On-Disk Specification (section 1.3), so that 807 * we start with a clean slate. 808 * It's just an optimization, so we don't care if it fails. 809 * Don't TRIM if removing so that we don't interfere with zpool 810 * disaster recovery. 811 */ 812 if (zfs_trim_enabled && vdev_trim_on_init && !vd->vdev_notrim && 813 (reason == VDEV_LABEL_CREATE || reason == VDEV_LABEL_SPARE || 814 reason == VDEV_LABEL_L2CACHE)) 815 zio_wait(zio_trim(NULL, spa, vd, VDEV_SKIP_SIZE, 816 vd->vdev_psize - VDEV_SKIP_SIZE)); 817 818 /* 819 * Initialize its label. 820 */ 821 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE); 822 abd_zero(vp_abd, sizeof (vdev_phys_t)); 823 vp = abd_to_buf(vp_abd); 824 825 /* 826 * Generate a label describing the pool and our top-level vdev. 827 * We mark it as being from txg 0 to indicate that it's not 828 * really part of an active pool just yet. The labels will 829 * be written again with a meaningful txg by spa_sync(). 830 */ 831 if (reason == VDEV_LABEL_SPARE || 832 (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) { 833 /* 834 * For inactive hot spares, we generate a special label that 835 * identifies as a mutually shared hot spare. We write the 836 * label if we are adding a hot spare, or if we are removing an 837 * active hot spare (in which case we want to revert the 838 * labels). 839 */ 840 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0); 841 842 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION, 843 spa_version(spa)) == 0); 844 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, 845 POOL_STATE_SPARE) == 0); 846 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID, 847 vd->vdev_guid) == 0); 848 } else if (reason == VDEV_LABEL_L2CACHE || 849 (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) { 850 /* 851 * For level 2 ARC devices, add a special label. 852 */ 853 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0); 854 855 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION, 856 spa_version(spa)) == 0); 857 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, 858 POOL_STATE_L2CACHE) == 0); 859 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID, 860 vd->vdev_guid) == 0); 861 } else { 862 uint64_t txg = 0ULL; 863 864 if (reason == VDEV_LABEL_SPLIT) 865 txg = spa->spa_uberblock.ub_txg; 866 label = spa_config_generate(spa, vd, txg, B_FALSE); 867 868 /* 869 * Add our creation time. This allows us to detect multiple 870 * vdev uses as described above, and automatically expires if we 871 * fail. 872 */ 873 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG, 874 crtxg) == 0); 875 } 876 877 buf = vp->vp_nvlist; 878 buflen = sizeof (vp->vp_nvlist); 879 880 error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP); 881 if (error != 0) { 882 nvlist_free(label); 883 abd_free(vp_abd); 884 /* EFAULT means nvlist_pack ran out of room */ 885 return (error == EFAULT ? ENAMETOOLONG : EINVAL); 886 } 887 888 /* 889 * Initialize uberblock template. 890 */ 891 ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE); 892 abd_zero(ub_abd, VDEV_UBERBLOCK_RING); 893 abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t)); 894 ub = abd_to_buf(ub_abd); 895 ub->ub_txg = 0; 896 897 /* Initialize the 2nd padding area. */ 898 pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE); 899 abd_zero(pad2, VDEV_PAD_SIZE); 900 901 /* 902 * Write everything in parallel. 903 */ 904retry: 905 zio = zio_root(spa, NULL, NULL, flags); 906 907 for (int l = 0; l < VDEV_LABELS; l++) { 908 909 vdev_label_write(zio, vd, l, vp_abd, 910 offsetof(vdev_label_t, vl_vdev_phys), 911 sizeof (vdev_phys_t), NULL, NULL, flags); 912 913 /* 914 * Skip the 1st padding area. 915 * Zero out the 2nd padding area where it might have 916 * left over data from previous filesystem format. 917 */ 918 vdev_label_write(zio, vd, l, pad2, 919 offsetof(vdev_label_t, vl_pad2), 920 VDEV_PAD_SIZE, NULL, NULL, flags); 921 922 vdev_label_write(zio, vd, l, ub_abd, 923 offsetof(vdev_label_t, vl_uberblock), 924 VDEV_UBERBLOCK_RING, NULL, NULL, flags); 925 } 926 927 error = zio_wait(zio); 928 929 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) { 930 flags |= ZIO_FLAG_TRYHARD; 931 goto retry; 932 } 933 934 nvlist_free(label); 935 abd_free(pad2); 936 abd_free(ub_abd); 937 abd_free(vp_abd); 938 939 /* 940 * If this vdev hasn't been previously identified as a spare, then we 941 * mark it as such only if a) we are labeling it as a spare, or b) it 942 * exists as a spare elsewhere in the system. Do the same for 943 * level 2 ARC devices. 944 */ 945 if (error == 0 && !vd->vdev_isspare && 946 (reason == VDEV_LABEL_SPARE || 947 spa_spare_exists(vd->vdev_guid, NULL, NULL))) 948 spa_spare_add(vd); 949 950 if (error == 0 && !vd->vdev_isl2cache && 951 (reason == VDEV_LABEL_L2CACHE || 952 spa_l2cache_exists(vd->vdev_guid, NULL))) 953 spa_l2cache_add(vd); 954 955 return (error); 956} 957 958int 959vdev_label_write_pad2(vdev_t *vd, const char *buf, size_t size) 960{ 961 spa_t *spa = vd->vdev_spa; 962 zio_t *zio; 963 abd_t *pad2; 964 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 965 int error; 966 967 if (size > VDEV_PAD_SIZE) 968 return (EINVAL); 969 970 if (!vd->vdev_ops->vdev_op_leaf) 971 return (ENODEV); 972 if (vdev_is_dead(vd)) 973 return (ENXIO); 974 975 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 976 977 pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE); 978 abd_zero(pad2, VDEV_PAD_SIZE); 979 abd_copy_from_buf(pad2, buf, size); 980 981retry: 982 zio = zio_root(spa, NULL, NULL, flags); 983 vdev_label_write(zio, vd, 0, pad2, 984 offsetof(vdev_label_t, vl_pad2), 985 VDEV_PAD_SIZE, NULL, NULL, flags); 986 error = zio_wait(zio); 987 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) { 988 flags |= ZIO_FLAG_TRYHARD; 989 goto retry; 990 } 991 992 abd_free(pad2); 993 return (error); 994} 995 996/* 997 * ========================================================================== 998 * uberblock load/sync 999 * ========================================================================== 1000 */ 1001 1002/* 1003 * Consider the following situation: txg is safely synced to disk. We've 1004 * written the first uberblock for txg + 1, and then we lose power. When we 1005 * come back up, we fail to see the uberblock for txg + 1 because, say, 1006 * it was on a mirrored device and the replica to which we wrote txg + 1 1007 * is now offline. If we then make some changes and sync txg + 1, and then 1008 * the missing replica comes back, then for a few seconds we'll have two 1009 * conflicting uberblocks on disk with the same txg. The solution is simple: 1010 * among uberblocks with equal txg, choose the one with the latest timestamp. 1011 */ 1012static int 1013vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2) 1014{ 1015 if (ub1->ub_txg < ub2->ub_txg) 1016 return (-1); 1017 if (ub1->ub_txg > ub2->ub_txg) 1018 return (1); 1019 1020 if (ub1->ub_timestamp < ub2->ub_timestamp) 1021 return (-1); 1022 if (ub1->ub_timestamp > ub2->ub_timestamp) 1023 return (1); 1024 1025 return (0); 1026} 1027 1028struct ubl_cbdata { 1029 uberblock_t *ubl_ubbest; /* Best uberblock */ 1030 vdev_t *ubl_vd; /* vdev associated with the above */ 1031}; 1032 1033static void 1034vdev_uberblock_load_done(zio_t *zio) 1035{ 1036 vdev_t *vd = zio->io_vd; 1037 spa_t *spa = zio->io_spa; 1038 zio_t *rio = zio->io_private; 1039 uberblock_t *ub = abd_to_buf(zio->io_abd); 1040 struct ubl_cbdata *cbp = rio->io_private; 1041 1042 ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd)); 1043 1044 if (zio->io_error == 0 && uberblock_verify(ub) == 0) { 1045 mutex_enter(&rio->io_lock); 1046 if (ub->ub_txg <= spa->spa_load_max_txg && 1047 vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) { 1048 /* 1049 * Keep track of the vdev in which this uberblock 1050 * was found. We will use this information later 1051 * to obtain the config nvlist associated with 1052 * this uberblock. 1053 */ 1054 *cbp->ubl_ubbest = *ub; 1055 cbp->ubl_vd = vd; 1056 } 1057 mutex_exit(&rio->io_lock); 1058 } 1059 1060 abd_free(zio->io_abd); 1061} 1062 1063static void 1064vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags, 1065 struct ubl_cbdata *cbp) 1066{ 1067 for (int c = 0; c < vd->vdev_children; c++) 1068 vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp); 1069 1070 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1071 for (int l = 0; l < VDEV_LABELS; l++) { 1072 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { 1073 vdev_label_read(zio, vd, l, 1074 abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), 1075 B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n), 1076 VDEV_UBERBLOCK_SIZE(vd), 1077 vdev_uberblock_load_done, zio, flags); 1078 } 1079 } 1080 } 1081} 1082 1083/* 1084 * Reads the 'best' uberblock from disk along with its associated 1085 * configuration. First, we read the uberblock array of each label of each 1086 * vdev, keeping track of the uberblock with the highest txg in each array. 1087 * Then, we read the configuration from the same vdev as the best uberblock. 1088 */ 1089void 1090vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config) 1091{ 1092 zio_t *zio; 1093 spa_t *spa = rvd->vdev_spa; 1094 struct ubl_cbdata cb; 1095 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | 1096 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD; 1097 1098 ASSERT(ub); 1099 ASSERT(config); 1100 1101 bzero(ub, sizeof (uberblock_t)); 1102 *config = NULL; 1103 1104 cb.ubl_ubbest = ub; 1105 cb.ubl_vd = NULL; 1106 1107 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1108 zio = zio_root(spa, NULL, &cb, flags); 1109 vdev_uberblock_load_impl(zio, rvd, flags, &cb); 1110 (void) zio_wait(zio); 1111 1112 /* 1113 * It's possible that the best uberblock was discovered on a label 1114 * that has a configuration which was written in a future txg. 1115 * Search all labels on this vdev to find the configuration that 1116 * matches the txg for our uberblock. 1117 */
|
1120 spa_config_exit(spa, SCL_ALL, FTAG); 1121} 1122 1123/* 1124 * On success, increment root zio's count of good writes. 1125 * We only get credit for writes to known-visible vdevs; see spa_vdev_add(). 1126 */ 1127static void 1128vdev_uberblock_sync_done(zio_t *zio) 1129{ 1130 uint64_t *good_writes = zio->io_private; 1131 1132 if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0) 1133 atomic_inc_64(good_writes); 1134} 1135 1136/* 1137 * Write the uberblock to all labels of all leaves of the specified vdev. 1138 */ 1139static void 1140vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags) 1141{ 1142 for (int c = 0; c < vd->vdev_children; c++) 1143 vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags); 1144 1145 if (!vd->vdev_ops->vdev_op_leaf) 1146 return; 1147 1148 if (!vdev_writeable(vd)) 1149 return; 1150 1151 int n = ub->ub_txg & (VDEV_UBERBLOCK_COUNT(vd) - 1); 1152 1153 /* Copy the uberblock_t into the ABD */ 1154 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE); 1155 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd)); 1156 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t)); 1157 1158 for (int l = 0; l < VDEV_LABELS; l++) 1159 vdev_label_write(zio, vd, l, ub_abd, 1160 VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd), 1161 vdev_uberblock_sync_done, zio->io_private, 1162 flags | ZIO_FLAG_DONT_PROPAGATE); 1163 1164 abd_free(ub_abd); 1165} 1166 1167/* Sync the uberblocks to all vdevs in svd[] */ 1168int 1169vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags) 1170{ 1171 spa_t *spa = svd[0]->vdev_spa; 1172 zio_t *zio; 1173 uint64_t good_writes = 0; 1174 1175 zio = zio_root(spa, NULL, &good_writes, flags); 1176 1177 for (int v = 0; v < svdcount; v++) 1178 vdev_uberblock_sync(zio, ub, svd[v], flags); 1179 1180 (void) zio_wait(zio); 1181 1182 /* 1183 * Flush the uberblocks to disk. This ensures that the odd labels 1184 * are no longer needed (because the new uberblocks and the even 1185 * labels are safely on disk), so it is safe to overwrite them. 1186 */ 1187 zio = zio_root(spa, NULL, NULL, flags); 1188 1189 for (int v = 0; v < svdcount; v++) { 1190 if (vdev_writeable(svd[v])) { 1191 zio_flush(zio, svd[v]); 1192 } 1193 } 1194 1195 (void) zio_wait(zio); 1196 1197 return (good_writes >= 1 ? 0 : EIO); 1198} 1199 1200/* 1201 * On success, increment the count of good writes for our top-level vdev. 1202 */ 1203static void 1204vdev_label_sync_done(zio_t *zio) 1205{ 1206 uint64_t *good_writes = zio->io_private; 1207 1208 if (zio->io_error == 0) 1209 atomic_inc_64(good_writes); 1210} 1211 1212/* 1213 * If there weren't enough good writes, indicate failure to the parent. 1214 */ 1215static void 1216vdev_label_sync_top_done(zio_t *zio) 1217{ 1218 uint64_t *good_writes = zio->io_private; 1219 1220 if (*good_writes == 0) 1221 zio->io_error = SET_ERROR(EIO); 1222 1223 kmem_free(good_writes, sizeof (uint64_t)); 1224} 1225 1226/* 1227 * We ignore errors for log and cache devices, simply free the private data. 1228 */ 1229static void 1230vdev_label_sync_ignore_done(zio_t *zio) 1231{ 1232 kmem_free(zio->io_private, sizeof (uint64_t)); 1233} 1234 1235/* 1236 * Write all even or odd labels to all leaves of the specified vdev. 1237 */ 1238static void 1239vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags) 1240{ 1241 nvlist_t *label; 1242 vdev_phys_t *vp; 1243 abd_t *vp_abd; 1244 char *buf; 1245 size_t buflen; 1246 1247 for (int c = 0; c < vd->vdev_children; c++) 1248 vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags); 1249 1250 if (!vd->vdev_ops->vdev_op_leaf) 1251 return; 1252 1253 if (!vdev_writeable(vd)) 1254 return; 1255 1256 /* 1257 * Generate a label describing the top-level config to which we belong. 1258 */ 1259 label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE); 1260 1261 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE); 1262 abd_zero(vp_abd, sizeof (vdev_phys_t)); 1263 vp = abd_to_buf(vp_abd); 1264 1265 buf = vp->vp_nvlist; 1266 buflen = sizeof (vp->vp_nvlist); 1267 1268 if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) { 1269 for (; l < VDEV_LABELS; l += 2) { 1270 vdev_label_write(zio, vd, l, vp_abd, 1271 offsetof(vdev_label_t, vl_vdev_phys), 1272 sizeof (vdev_phys_t), 1273 vdev_label_sync_done, zio->io_private, 1274 flags | ZIO_FLAG_DONT_PROPAGATE); 1275 } 1276 } 1277 1278 abd_free(vp_abd); 1279 nvlist_free(label); 1280} 1281 1282int 1283vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) 1284{ 1285 list_t *dl = &spa->spa_config_dirty_list; 1286 vdev_t *vd; 1287 zio_t *zio; 1288 int error; 1289 1290 /* 1291 * Write the new labels to disk. 1292 */ 1293 zio = zio_root(spa, NULL, NULL, flags); 1294 1295 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) { 1296 uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t), 1297 KM_SLEEP); 1298 1299 ASSERT(!vd->vdev_ishole); 1300 1301 zio_t *vio = zio_null(zio, spa, NULL, 1302 (vd->vdev_islog || vd->vdev_aux != NULL) ? 1303 vdev_label_sync_ignore_done : vdev_label_sync_top_done, 1304 good_writes, flags); 1305 vdev_label_sync(vio, vd, l, txg, flags); 1306 zio_nowait(vio); 1307 } 1308 1309 error = zio_wait(zio); 1310 1311 /* 1312 * Flush the new labels to disk. 1313 */ 1314 zio = zio_root(spa, NULL, NULL, flags); 1315 1316 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) 1317 zio_flush(zio, vd); 1318 1319 (void) zio_wait(zio); 1320 1321 return (error); 1322} 1323 1324/* 1325 * Sync the uberblock and any changes to the vdev configuration. 1326 * 1327 * The order of operations is carefully crafted to ensure that 1328 * if the system panics or loses power at any time, the state on disk 1329 * is still transactionally consistent. The in-line comments below 1330 * describe the failure semantics at each stage. 1331 * 1332 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails 1333 * at any time, you can just call it again, and it will resume its work. 1334 */ 1335int 1336vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) 1337{ 1338 spa_t *spa = svd[0]->vdev_spa; 1339 uberblock_t *ub = &spa->spa_uberblock; 1340 vdev_t *vd; 1341 zio_t *zio; 1342 int error = 0; 1343 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 1344 1345retry: 1346 /* 1347 * Normally, we don't want to try too hard to write every label and 1348 * uberblock. If there is a flaky disk, we don't want the rest of the 1349 * sync process to block while we retry. But if we can't write a 1350 * single label out, we should retry with ZIO_FLAG_TRYHARD before 1351 * bailing out and declaring the pool faulted. 1352 */ 1353 if (error != 0) { 1354 if ((flags & ZIO_FLAG_TRYHARD) != 0) 1355 return (error); 1356 flags |= ZIO_FLAG_TRYHARD; 1357 } 1358 1359 ASSERT(ub->ub_txg <= txg); 1360 1361 /* 1362 * If this isn't a resync due to I/O errors, 1363 * and nothing changed in this transaction group, 1364 * and the vdev configuration hasn't changed, 1365 * then there's nothing to do. 1366 */ 1367 if (ub->ub_txg < txg && 1368 uberblock_update(ub, spa->spa_root_vdev, txg) == B_FALSE && 1369 list_is_empty(&spa->spa_config_dirty_list)) 1370 return (0); 1371 1372 if (txg > spa_freeze_txg(spa)) 1373 return (0); 1374 1375 ASSERT(txg <= spa->spa_final_txg); 1376 1377 /* 1378 * Flush the write cache of every disk that's been written to 1379 * in this transaction group. This ensures that all blocks 1380 * written in this txg will be committed to stable storage 1381 * before any uberblock that references them. 1382 */ 1383 zio = zio_root(spa, NULL, NULL, flags); 1384 1385 for (vd = txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd; 1386 vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg))) 1387 zio_flush(zio, vd); 1388 1389 (void) zio_wait(zio); 1390 1391 /* 1392 * Sync out the even labels (L0, L2) for every dirty vdev. If the 1393 * system dies in the middle of this process, that's OK: all of the 1394 * even labels that made it to disk will be newer than any uberblock, 1395 * and will therefore be considered invalid. The odd labels (L1, L3), 1396 * which have not yet been touched, will still be valid. We flush 1397 * the new labels to disk to ensure that all even-label updates 1398 * are committed to stable storage before the uberblock update. 1399 */ 1400 if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) 1401 goto retry; 1402 1403 /* 1404 * Sync the uberblocks to all vdevs in svd[]. 1405 * If the system dies in the middle of this step, there are two cases 1406 * to consider, and the on-disk state is consistent either way: 1407 * 1408 * (1) If none of the new uberblocks made it to disk, then the 1409 * previous uberblock will be the newest, and the odd labels 1410 * (which had not yet been touched) will be valid with respect 1411 * to that uberblock. 1412 * 1413 * (2) If one or more new uberblocks made it to disk, then they 1414 * will be the newest, and the even labels (which had all 1415 * been successfully committed) will be valid with respect 1416 * to the new uberblocks. 1417 */ 1418 if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) 1419 goto retry; 1420 1421 /* 1422 * Sync out odd labels for every dirty vdev. If the system dies 1423 * in the middle of this process, the even labels and the new 1424 * uberblocks will suffice to open the pool. The next time 1425 * the pool is opened, the first thing we'll do -- before any 1426 * user data is modified -- is mark every vdev dirty so that 1427 * all labels will be brought up to date. We flush the new labels 1428 * to disk to ensure that all odd-label updates are committed to 1429 * stable storage before the next transaction group begins. 1430 */ 1431 if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) 1432 goto retry;; 1433 1434 trim_thread_wakeup(spa); 1435 1436 return (0); 1437}
| 1127 spa_config_exit(spa, SCL_ALL, FTAG); 1128} 1129 1130/* 1131 * On success, increment root zio's count of good writes. 1132 * We only get credit for writes to known-visible vdevs; see spa_vdev_add(). 1133 */ 1134static void 1135vdev_uberblock_sync_done(zio_t *zio) 1136{ 1137 uint64_t *good_writes = zio->io_private; 1138 1139 if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0) 1140 atomic_inc_64(good_writes); 1141} 1142 1143/* 1144 * Write the uberblock to all labels of all leaves of the specified vdev. 1145 */ 1146static void 1147vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags) 1148{ 1149 for (int c = 0; c < vd->vdev_children; c++) 1150 vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags); 1151 1152 if (!vd->vdev_ops->vdev_op_leaf) 1153 return; 1154 1155 if (!vdev_writeable(vd)) 1156 return; 1157 1158 int n = ub->ub_txg & (VDEV_UBERBLOCK_COUNT(vd) - 1); 1159 1160 /* Copy the uberblock_t into the ABD */ 1161 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE); 1162 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd)); 1163 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t)); 1164 1165 for (int l = 0; l < VDEV_LABELS; l++) 1166 vdev_label_write(zio, vd, l, ub_abd, 1167 VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd), 1168 vdev_uberblock_sync_done, zio->io_private, 1169 flags | ZIO_FLAG_DONT_PROPAGATE); 1170 1171 abd_free(ub_abd); 1172} 1173 1174/* Sync the uberblocks to all vdevs in svd[] */ 1175int 1176vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags) 1177{ 1178 spa_t *spa = svd[0]->vdev_spa; 1179 zio_t *zio; 1180 uint64_t good_writes = 0; 1181 1182 zio = zio_root(spa, NULL, &good_writes, flags); 1183 1184 for (int v = 0; v < svdcount; v++) 1185 vdev_uberblock_sync(zio, ub, svd[v], flags); 1186 1187 (void) zio_wait(zio); 1188 1189 /* 1190 * Flush the uberblocks to disk. This ensures that the odd labels 1191 * are no longer needed (because the new uberblocks and the even 1192 * labels are safely on disk), so it is safe to overwrite them. 1193 */ 1194 zio = zio_root(spa, NULL, NULL, flags); 1195 1196 for (int v = 0; v < svdcount; v++) { 1197 if (vdev_writeable(svd[v])) { 1198 zio_flush(zio, svd[v]); 1199 } 1200 } 1201 1202 (void) zio_wait(zio); 1203 1204 return (good_writes >= 1 ? 0 : EIO); 1205} 1206 1207/* 1208 * On success, increment the count of good writes for our top-level vdev. 1209 */ 1210static void 1211vdev_label_sync_done(zio_t *zio) 1212{ 1213 uint64_t *good_writes = zio->io_private; 1214 1215 if (zio->io_error == 0) 1216 atomic_inc_64(good_writes); 1217} 1218 1219/* 1220 * If there weren't enough good writes, indicate failure to the parent. 1221 */ 1222static void 1223vdev_label_sync_top_done(zio_t *zio) 1224{ 1225 uint64_t *good_writes = zio->io_private; 1226 1227 if (*good_writes == 0) 1228 zio->io_error = SET_ERROR(EIO); 1229 1230 kmem_free(good_writes, sizeof (uint64_t)); 1231} 1232 1233/* 1234 * We ignore errors for log and cache devices, simply free the private data. 1235 */ 1236static void 1237vdev_label_sync_ignore_done(zio_t *zio) 1238{ 1239 kmem_free(zio->io_private, sizeof (uint64_t)); 1240} 1241 1242/* 1243 * Write all even or odd labels to all leaves of the specified vdev. 1244 */ 1245static void 1246vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags) 1247{ 1248 nvlist_t *label; 1249 vdev_phys_t *vp; 1250 abd_t *vp_abd; 1251 char *buf; 1252 size_t buflen; 1253 1254 for (int c = 0; c < vd->vdev_children; c++) 1255 vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags); 1256 1257 if (!vd->vdev_ops->vdev_op_leaf) 1258 return; 1259 1260 if (!vdev_writeable(vd)) 1261 return; 1262 1263 /* 1264 * Generate a label describing the top-level config to which we belong. 1265 */ 1266 label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE); 1267 1268 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE); 1269 abd_zero(vp_abd, sizeof (vdev_phys_t)); 1270 vp = abd_to_buf(vp_abd); 1271 1272 buf = vp->vp_nvlist; 1273 buflen = sizeof (vp->vp_nvlist); 1274 1275 if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) { 1276 for (; l < VDEV_LABELS; l += 2) { 1277 vdev_label_write(zio, vd, l, vp_abd, 1278 offsetof(vdev_label_t, vl_vdev_phys), 1279 sizeof (vdev_phys_t), 1280 vdev_label_sync_done, zio->io_private, 1281 flags | ZIO_FLAG_DONT_PROPAGATE); 1282 } 1283 } 1284 1285 abd_free(vp_abd); 1286 nvlist_free(label); 1287} 1288 1289int 1290vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) 1291{ 1292 list_t *dl = &spa->spa_config_dirty_list; 1293 vdev_t *vd; 1294 zio_t *zio; 1295 int error; 1296 1297 /* 1298 * Write the new labels to disk. 1299 */ 1300 zio = zio_root(spa, NULL, NULL, flags); 1301 1302 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) { 1303 uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t), 1304 KM_SLEEP); 1305 1306 ASSERT(!vd->vdev_ishole); 1307 1308 zio_t *vio = zio_null(zio, spa, NULL, 1309 (vd->vdev_islog || vd->vdev_aux != NULL) ? 1310 vdev_label_sync_ignore_done : vdev_label_sync_top_done, 1311 good_writes, flags); 1312 vdev_label_sync(vio, vd, l, txg, flags); 1313 zio_nowait(vio); 1314 } 1315 1316 error = zio_wait(zio); 1317 1318 /* 1319 * Flush the new labels to disk. 1320 */ 1321 zio = zio_root(spa, NULL, NULL, flags); 1322 1323 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) 1324 zio_flush(zio, vd); 1325 1326 (void) zio_wait(zio); 1327 1328 return (error); 1329} 1330 1331/* 1332 * Sync the uberblock and any changes to the vdev configuration. 1333 * 1334 * The order of operations is carefully crafted to ensure that 1335 * if the system panics or loses power at any time, the state on disk 1336 * is still transactionally consistent. The in-line comments below 1337 * describe the failure semantics at each stage. 1338 * 1339 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails 1340 * at any time, you can just call it again, and it will resume its work. 1341 */ 1342int 1343vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) 1344{ 1345 spa_t *spa = svd[0]->vdev_spa; 1346 uberblock_t *ub = &spa->spa_uberblock; 1347 vdev_t *vd; 1348 zio_t *zio; 1349 int error = 0; 1350 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 1351 1352retry: 1353 /* 1354 * Normally, we don't want to try too hard to write every label and 1355 * uberblock. If there is a flaky disk, we don't want the rest of the 1356 * sync process to block while we retry. But if we can't write a 1357 * single label out, we should retry with ZIO_FLAG_TRYHARD before 1358 * bailing out and declaring the pool faulted. 1359 */ 1360 if (error != 0) { 1361 if ((flags & ZIO_FLAG_TRYHARD) != 0) 1362 return (error); 1363 flags |= ZIO_FLAG_TRYHARD; 1364 } 1365 1366 ASSERT(ub->ub_txg <= txg); 1367 1368 /* 1369 * If this isn't a resync due to I/O errors, 1370 * and nothing changed in this transaction group, 1371 * and the vdev configuration hasn't changed, 1372 * then there's nothing to do. 1373 */ 1374 if (ub->ub_txg < txg && 1375 uberblock_update(ub, spa->spa_root_vdev, txg) == B_FALSE && 1376 list_is_empty(&spa->spa_config_dirty_list)) 1377 return (0); 1378 1379 if (txg > spa_freeze_txg(spa)) 1380 return (0); 1381 1382 ASSERT(txg <= spa->spa_final_txg); 1383 1384 /* 1385 * Flush the write cache of every disk that's been written to 1386 * in this transaction group. This ensures that all blocks 1387 * written in this txg will be committed to stable storage 1388 * before any uberblock that references them. 1389 */ 1390 zio = zio_root(spa, NULL, NULL, flags); 1391 1392 for (vd = txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd; 1393 vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg))) 1394 zio_flush(zio, vd); 1395 1396 (void) zio_wait(zio); 1397 1398 /* 1399 * Sync out the even labels (L0, L2) for every dirty vdev. If the 1400 * system dies in the middle of this process, that's OK: all of the 1401 * even labels that made it to disk will be newer than any uberblock, 1402 * and will therefore be considered invalid. The odd labels (L1, L3), 1403 * which have not yet been touched, will still be valid. We flush 1404 * the new labels to disk to ensure that all even-label updates 1405 * are committed to stable storage before the uberblock update. 1406 */ 1407 if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) 1408 goto retry; 1409 1410 /* 1411 * Sync the uberblocks to all vdevs in svd[]. 1412 * If the system dies in the middle of this step, there are two cases 1413 * to consider, and the on-disk state is consistent either way: 1414 * 1415 * (1) If none of the new uberblocks made it to disk, then the 1416 * previous uberblock will be the newest, and the odd labels 1417 * (which had not yet been touched) will be valid with respect 1418 * to that uberblock. 1419 * 1420 * (2) If one or more new uberblocks made it to disk, then they 1421 * will be the newest, and the even labels (which had all 1422 * been successfully committed) will be valid with respect 1423 * to the new uberblocks. 1424 */ 1425 if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) 1426 goto retry; 1427 1428 /* 1429 * Sync out odd labels for every dirty vdev. If the system dies 1430 * in the middle of this process, the even labels and the new 1431 * uberblocks will suffice to open the pool. The next time 1432 * the pool is opened, the first thing we'll do -- before any 1433 * user data is modified -- is mark every vdev dirty so that 1434 * all labels will be brought up to date. We flush the new labels 1435 * to disk to ensure that all odd-label updates are committed to 1436 * stable storage before the next transaction group begins. 1437 */ 1438 if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) 1439 goto retry;; 1440 1441 trim_thread_wakeup(spa); 1442 1443 return (0); 1444}
|