spa.c revision 332547
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright 2013 Saso Kiselkov. All rights reserved. 29 * Copyright (c) 2014 Integros [integros.com] 30 * Copyright 2016 Toomas Soome <tsoome@me.com> 31 * Copyright 2017 Joyent, Inc. 32 * Copyright (c) 2017 Datto Inc. 33 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association. 34 */ 35 36/* 37 * SPA: Storage Pool Allocator 38 * 39 * This file contains all the routines used when modifying on-disk SPA state. 40 * This includes opening, importing, destroying, exporting a pool, and syncing a 41 * pool. 42 */ 43 44#include <sys/zfs_context.h> 45#include <sys/fm/fs/zfs.h> 46#include <sys/spa_impl.h> 47#include <sys/zio.h> 48#include <sys/zio_checksum.h> 49#include <sys/dmu.h> 50#include <sys/dmu_tx.h> 51#include <sys/zap.h> 52#include <sys/zil.h> 53#include <sys/ddt.h> 54#include <sys/vdev_impl.h> 55#include <sys/vdev_removal.h> 56#include <sys/vdev_indirect_mapping.h> 57#include <sys/vdev_indirect_births.h> 58#include <sys/metaslab.h> 59#include <sys/metaslab_impl.h> 60#include <sys/uberblock_impl.h> 61#include <sys/txg.h> 62#include <sys/avl.h> 63#include <sys/bpobj.h> 64#include <sys/dmu_traverse.h> 65#include <sys/dmu_objset.h> 66#include <sys/unique.h> 67#include <sys/dsl_pool.h> 68#include <sys/dsl_dataset.h> 69#include <sys/dsl_dir.h> 70#include <sys/dsl_prop.h> 71#include <sys/dsl_synctask.h> 72#include <sys/fs/zfs.h> 73#include <sys/arc.h> 74#include <sys/callb.h> 75#include <sys/spa_boot.h> 76#include <sys/zfs_ioctl.h> 77#include <sys/dsl_scan.h> 78#include <sys/dmu_send.h> 79#include <sys/dsl_destroy.h> 80#include <sys/dsl_userhold.h> 81#include <sys/zfeature.h> 82#include <sys/zvol.h> 83#include <sys/trim_map.h> 84#include <sys/abd.h> 85 86#ifdef _KERNEL 87#include <sys/callb.h> 88#include <sys/cpupart.h> 89#include <sys/zone.h> 90#endif /* _KERNEL */ 91 92#include "zfs_prop.h" 93#include "zfs_comutil.h" 94 95/* Check hostid on import? */ 96static int check_hostid = 1; 97 98/* 99 * The interval, in seconds, at which failed configuration cache file writes 100 * should be retried. 101 */ 102int zfs_ccw_retry_interval = 300; 103 104SYSCTL_DECL(_vfs_zfs); 105SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0, 106 "Check hostid on import?"); 107TUNABLE_INT("vfs.zfs.ccw_retry_interval", &zfs_ccw_retry_interval); 108SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RW, 109 &zfs_ccw_retry_interval, 0, 110 "Configuration cache file write, retry after failure, interval (seconds)"); 111 112typedef enum zti_modes { 113 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 114 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 115 ZTI_MODE_NULL, /* don't create a taskq */ 116 ZTI_NMODES 117} zti_modes_t; 118 119#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 120#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 121#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 122 123#define ZTI_N(n) ZTI_P(n, 1) 124#define ZTI_ONE ZTI_N(1) 125 126typedef struct zio_taskq_info { 127 zti_modes_t zti_mode; 128 uint_t zti_value; 129 uint_t zti_count; 130} zio_taskq_info_t; 131 132static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 133 "issue", "issue_high", "intr", "intr_high" 134}; 135 136/* 137 * This table defines the taskq settings for each ZFS I/O type. When 138 * initializing a pool, we use this table to create an appropriately sized 139 * taskq. Some operations are low volume and therefore have a small, static 140 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 141 * macros. Other operations process a large amount of data; the ZTI_BATCH 142 * macro causes us to create a taskq oriented for throughput. Some operations 143 * are so high frequency and short-lived that the taskq itself can become a a 144 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 145 * additional degree of parallelism specified by the number of threads per- 146 * taskq and the number of taskqs; when dispatching an event in this case, the 147 * particular taskq is chosen at random. 148 * 149 * The different taskq priorities are to handle the different contexts (issue 150 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 151 * need to be handled with minimum delay. 152 */ 153const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 154 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 155 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 156 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 157 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 158 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 159 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 160 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 161}; 162 163static void spa_sync_version(void *arg, dmu_tx_t *tx); 164static void spa_sync_props(void *arg, dmu_tx_t *tx); 165static boolean_t spa_has_active_shared_spare(spa_t *spa); 166static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport); 167static void spa_vdev_resilver_done(spa_t *spa); 168 169uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 170#ifdef PSRSET_BIND 171id_t zio_taskq_psrset_bind = PS_NONE; 172#endif 173#ifdef SYSDC 174boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 175uint_t zio_taskq_basedc = 80; /* base duty cycle */ 176#endif 177 178boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 179extern int zfs_sync_pass_deferred_free; 180 181/* 182 * Report any spa_load_verify errors found, but do not fail spa_load. 183 * This is used by zdb to analyze non-idle pools. 184 */ 185boolean_t spa_load_verify_dryrun = B_FALSE; 186 187/* 188 * This (illegal) pool name is used when temporarily importing a spa_t in order 189 * to get the vdev stats associated with the imported devices. 190 */ 191#define TRYIMPORT_NAME "$import" 192 193/* 194 * For debugging purposes: print out vdev tree during pool import. 195 */ 196int spa_load_print_vdev_tree = B_FALSE; 197 198/* 199 * A non-zero value for zfs_max_missing_tvds means that we allow importing 200 * pools with missing top-level vdevs. This is strictly intended for advanced 201 * pool recovery cases since missing data is almost inevitable. Pools with 202 * missing devices can only be imported read-only for safety reasons, and their 203 * fail-mode will be automatically set to "continue". 204 * 205 * With 1 missing vdev we should be able to import the pool and mount all 206 * datasets. User data that was not modified after the missing device has been 207 * added should be recoverable. This means that snapshots created prior to the 208 * addition of that device should be completely intact. 209 * 210 * With 2 missing vdevs, some datasets may fail to mount since there are 211 * dataset statistics that are stored as regular metadata. Some data might be 212 * recoverable if those vdevs were added recently. 213 * 214 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 215 * may be missing entirely. Chances of data recovery are very low. Note that 216 * there are also risks of performing an inadvertent rewind as we might be 217 * missing all the vdevs with the latest uberblocks. 218 */ 219uint64_t zfs_max_missing_tvds = 0; 220 221/* 222 * The parameters below are similar to zfs_max_missing_tvds but are only 223 * intended for a preliminary open of the pool with an untrusted config which 224 * might be incomplete or out-dated. 225 * 226 * We are more tolerant for pools opened from a cachefile since we could have 227 * an out-dated cachefile where a device removal was not registered. 228 * We could have set the limit arbitrarily high but in the case where devices 229 * are really missing we would want to return the proper error codes; we chose 230 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 231 * and we get a chance to retrieve the trusted config. 232 */ 233uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 234 235/* 236 * In the case where config was assembled by scanning device paths (/dev/dsks 237 * by default) we are less tolerant since all the existing devices should have 238 * been detected and we want spa_load to return the right error codes. 239 */ 240uint64_t zfs_max_missing_tvds_scan = 0; 241 242 243SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_print_vdev_tree, CTLFLAG_RWTUN, 244 &spa_load_print_vdev_tree, 0, 245 "print out vdev tree during pool import"); 246SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds, CTLFLAG_RWTUN, 247 &zfs_max_missing_tvds, 0, 248 "allow importing pools with missing top-level vdevs"); 249SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile, CTLFLAG_RWTUN, 250 &zfs_max_missing_tvds_cachefile, 0, 251 "allow importing pools with missing top-level vdevs in cache file"); 252SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan, CTLFLAG_RWTUN, 253 &zfs_max_missing_tvds_scan, 0, 254 "allow importing pools with missing top-level vdevs during scan"); 255 256/* 257 * Debugging aid that pauses spa_sync() towards the end. 258 */ 259boolean_t zfs_pause_spa_sync = B_FALSE; 260 261/* 262 * ========================================================================== 263 * SPA properties routines 264 * ========================================================================== 265 */ 266 267/* 268 * Add a (source=src, propname=propval) list to an nvlist. 269 */ 270static void 271spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 272 uint64_t intval, zprop_source_t src) 273{ 274 const char *propname = zpool_prop_to_name(prop); 275 nvlist_t *propval; 276 277 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 278 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 279 280 if (strval != NULL) 281 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 282 else 283 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 284 285 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 286 nvlist_free(propval); 287} 288 289/* 290 * Get property values from the spa configuration. 291 */ 292static void 293spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 294{ 295 vdev_t *rvd = spa->spa_root_vdev; 296 dsl_pool_t *pool = spa->spa_dsl_pool; 297 uint64_t size, alloc, cap, version; 298 zprop_source_t src = ZPROP_SRC_NONE; 299 spa_config_dirent_t *dp; 300 metaslab_class_t *mc = spa_normal_class(spa); 301 302 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 303 304 if (rvd != NULL) { 305 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 306 size = metaslab_class_get_space(spa_normal_class(spa)); 307 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 308 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 309 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 310 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 311 size - alloc, src); 312 spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL, 313 spa->spa_checkpoint_info.sci_dspace, src); 314 315 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 316 metaslab_class_fragmentation(mc), src); 317 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 318 metaslab_class_expandable_space(mc), src); 319 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 320 (spa_mode(spa) == FREAD), src); 321 322 cap = (size == 0) ? 0 : (alloc * 100 / size); 323 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 324 325 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 326 ddt_get_pool_dedup_ratio(spa), src); 327 328 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 329 rvd->vdev_state, src); 330 331 version = spa_version(spa); 332 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 333 src = ZPROP_SRC_DEFAULT; 334 else 335 src = ZPROP_SRC_LOCAL; 336 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 337 } 338 339 if (pool != NULL) { 340 /* 341 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 342 * when opening pools before this version freedir will be NULL. 343 */ 344 if (pool->dp_free_dir != NULL) { 345 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 346 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 347 src); 348 } else { 349 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 350 NULL, 0, src); 351 } 352 353 if (pool->dp_leak_dir != NULL) { 354 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 355 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 356 src); 357 } else { 358 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 359 NULL, 0, src); 360 } 361 } 362 363 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 364 365 if (spa->spa_comment != NULL) { 366 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 367 0, ZPROP_SRC_LOCAL); 368 } 369 370 if (spa->spa_root != NULL) 371 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 372 0, ZPROP_SRC_LOCAL); 373 374 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 375 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 376 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 377 } else { 378 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 379 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 380 } 381 382 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 383 if (dp->scd_path == NULL) { 384 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 385 "none", 0, ZPROP_SRC_LOCAL); 386 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 387 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 388 dp->scd_path, 0, ZPROP_SRC_LOCAL); 389 } 390 } 391} 392 393/* 394 * Get zpool property values. 395 */ 396int 397spa_prop_get(spa_t *spa, nvlist_t **nvp) 398{ 399 objset_t *mos = spa->spa_meta_objset; 400 zap_cursor_t zc; 401 zap_attribute_t za; 402 int err; 403 404 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 405 406 mutex_enter(&spa->spa_props_lock); 407 408 /* 409 * Get properties from the spa config. 410 */ 411 spa_prop_get_config(spa, nvp); 412 413 /* If no pool property object, no more prop to get. */ 414 if (mos == NULL || spa->spa_pool_props_object == 0) { 415 mutex_exit(&spa->spa_props_lock); 416 return (0); 417 } 418 419 /* 420 * Get properties from the MOS pool property object. 421 */ 422 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 423 (err = zap_cursor_retrieve(&zc, &za)) == 0; 424 zap_cursor_advance(&zc)) { 425 uint64_t intval = 0; 426 char *strval = NULL; 427 zprop_source_t src = ZPROP_SRC_DEFAULT; 428 zpool_prop_t prop; 429 430 if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL) 431 continue; 432 433 switch (za.za_integer_length) { 434 case 8: 435 /* integer property */ 436 if (za.za_first_integer != 437 zpool_prop_default_numeric(prop)) 438 src = ZPROP_SRC_LOCAL; 439 440 if (prop == ZPOOL_PROP_BOOTFS) { 441 dsl_pool_t *dp; 442 dsl_dataset_t *ds = NULL; 443 444 dp = spa_get_dsl(spa); 445 dsl_pool_config_enter(dp, FTAG); 446 if (err = dsl_dataset_hold_obj(dp, 447 za.za_first_integer, FTAG, &ds)) { 448 dsl_pool_config_exit(dp, FTAG); 449 break; 450 } 451 452 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 453 KM_SLEEP); 454 dsl_dataset_name(ds, strval); 455 dsl_dataset_rele(ds, FTAG); 456 dsl_pool_config_exit(dp, FTAG); 457 } else { 458 strval = NULL; 459 intval = za.za_first_integer; 460 } 461 462 spa_prop_add_list(*nvp, prop, strval, intval, src); 463 464 if (strval != NULL) 465 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 466 467 break; 468 469 case 1: 470 /* string property */ 471 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 472 err = zap_lookup(mos, spa->spa_pool_props_object, 473 za.za_name, 1, za.za_num_integers, strval); 474 if (err) { 475 kmem_free(strval, za.za_num_integers); 476 break; 477 } 478 spa_prop_add_list(*nvp, prop, strval, 0, src); 479 kmem_free(strval, za.za_num_integers); 480 break; 481 482 default: 483 break; 484 } 485 } 486 zap_cursor_fini(&zc); 487 mutex_exit(&spa->spa_props_lock); 488out: 489 if (err && err != ENOENT) { 490 nvlist_free(*nvp); 491 *nvp = NULL; 492 return (err); 493 } 494 495 return (0); 496} 497 498/* 499 * Validate the given pool properties nvlist and modify the list 500 * for the property values to be set. 501 */ 502static int 503spa_prop_validate(spa_t *spa, nvlist_t *props) 504{ 505 nvpair_t *elem; 506 int error = 0, reset_bootfs = 0; 507 uint64_t objnum = 0; 508 boolean_t has_feature = B_FALSE; 509 510 elem = NULL; 511 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 512 uint64_t intval; 513 char *strval, *slash, *check, *fname; 514 const char *propname = nvpair_name(elem); 515 zpool_prop_t prop = zpool_name_to_prop(propname); 516 517 switch (prop) { 518 case ZPOOL_PROP_INVAL: 519 if (!zpool_prop_feature(propname)) { 520 error = SET_ERROR(EINVAL); 521 break; 522 } 523 524 /* 525 * Sanitize the input. 526 */ 527 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 528 error = SET_ERROR(EINVAL); 529 break; 530 } 531 532 if (nvpair_value_uint64(elem, &intval) != 0) { 533 error = SET_ERROR(EINVAL); 534 break; 535 } 536 537 if (intval != 0) { 538 error = SET_ERROR(EINVAL); 539 break; 540 } 541 542 fname = strchr(propname, '@') + 1; 543 if (zfeature_lookup_name(fname, NULL) != 0) { 544 error = SET_ERROR(EINVAL); 545 break; 546 } 547 548 has_feature = B_TRUE; 549 break; 550 551 case ZPOOL_PROP_VERSION: 552 error = nvpair_value_uint64(elem, &intval); 553 if (!error && 554 (intval < spa_version(spa) || 555 intval > SPA_VERSION_BEFORE_FEATURES || 556 has_feature)) 557 error = SET_ERROR(EINVAL); 558 break; 559 560 case ZPOOL_PROP_DELEGATION: 561 case ZPOOL_PROP_AUTOREPLACE: 562 case ZPOOL_PROP_LISTSNAPS: 563 case ZPOOL_PROP_AUTOEXPAND: 564 error = nvpair_value_uint64(elem, &intval); 565 if (!error && intval > 1) 566 error = SET_ERROR(EINVAL); 567 break; 568 569 case ZPOOL_PROP_BOOTFS: 570 /* 571 * If the pool version is less than SPA_VERSION_BOOTFS, 572 * or the pool is still being created (version == 0), 573 * the bootfs property cannot be set. 574 */ 575 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 576 error = SET_ERROR(ENOTSUP); 577 break; 578 } 579 580 /* 581 * Make sure the vdev config is bootable 582 */ 583 if (!vdev_is_bootable(spa->spa_root_vdev)) { 584 error = SET_ERROR(ENOTSUP); 585 break; 586 } 587 588 reset_bootfs = 1; 589 590 error = nvpair_value_string(elem, &strval); 591 592 if (!error) { 593 objset_t *os; 594 uint64_t propval; 595 596 if (strval == NULL || strval[0] == '\0') { 597 objnum = zpool_prop_default_numeric( 598 ZPOOL_PROP_BOOTFS); 599 break; 600 } 601 602 if (error = dmu_objset_hold(strval, FTAG, &os)) 603 break; 604 605 /* 606 * Must be ZPL, and its property settings 607 * must be supported by GRUB (compression 608 * is not gzip, and large blocks are not used). 609 */ 610 611 if (dmu_objset_type(os) != DMU_OST_ZFS) { 612 error = SET_ERROR(ENOTSUP); 613 } else if ((error = 614 dsl_prop_get_int_ds(dmu_objset_ds(os), 615 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 616 &propval)) == 0 && 617 !BOOTFS_COMPRESS_VALID(propval)) { 618 error = SET_ERROR(ENOTSUP); 619 } else { 620 objnum = dmu_objset_id(os); 621 } 622 dmu_objset_rele(os, FTAG); 623 } 624 break; 625 626 case ZPOOL_PROP_FAILUREMODE: 627 error = nvpair_value_uint64(elem, &intval); 628 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 629 intval > ZIO_FAILURE_MODE_PANIC)) 630 error = SET_ERROR(EINVAL); 631 632 /* 633 * This is a special case which only occurs when 634 * the pool has completely failed. This allows 635 * the user to change the in-core failmode property 636 * without syncing it out to disk (I/Os might 637 * currently be blocked). We do this by returning 638 * EIO to the caller (spa_prop_set) to trick it 639 * into thinking we encountered a property validation 640 * error. 641 */ 642 if (!error && spa_suspended(spa)) { 643 spa->spa_failmode = intval; 644 error = SET_ERROR(EIO); 645 } 646 break; 647 648 case ZPOOL_PROP_CACHEFILE: 649 if ((error = nvpair_value_string(elem, &strval)) != 0) 650 break; 651 652 if (strval[0] == '\0') 653 break; 654 655 if (strcmp(strval, "none") == 0) 656 break; 657 658 if (strval[0] != '/') { 659 error = SET_ERROR(EINVAL); 660 break; 661 } 662 663 slash = strrchr(strval, '/'); 664 ASSERT(slash != NULL); 665 666 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 667 strcmp(slash, "/..") == 0) 668 error = SET_ERROR(EINVAL); 669 break; 670 671 case ZPOOL_PROP_COMMENT: 672 if ((error = nvpair_value_string(elem, &strval)) != 0) 673 break; 674 for (check = strval; *check != '\0'; check++) { 675 /* 676 * The kernel doesn't have an easy isprint() 677 * check. For this kernel check, we merely 678 * check ASCII apart from DEL. Fix this if 679 * there is an easy-to-use kernel isprint(). 680 */ 681 if (*check >= 0x7f) { 682 error = SET_ERROR(EINVAL); 683 break; 684 } 685 } 686 if (strlen(strval) > ZPROP_MAX_COMMENT) 687 error = E2BIG; 688 break; 689 690 case ZPOOL_PROP_DEDUPDITTO: 691 if (spa_version(spa) < SPA_VERSION_DEDUP) 692 error = SET_ERROR(ENOTSUP); 693 else 694 error = nvpair_value_uint64(elem, &intval); 695 if (error == 0 && 696 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 697 error = SET_ERROR(EINVAL); 698 break; 699 } 700 701 if (error) 702 break; 703 } 704 705 if (!error && reset_bootfs) { 706 error = nvlist_remove(props, 707 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 708 709 if (!error) { 710 error = nvlist_add_uint64(props, 711 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 712 } 713 } 714 715 return (error); 716} 717 718void 719spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 720{ 721 char *cachefile; 722 spa_config_dirent_t *dp; 723 724 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 725 &cachefile) != 0) 726 return; 727 728 dp = kmem_alloc(sizeof (spa_config_dirent_t), 729 KM_SLEEP); 730 731 if (cachefile[0] == '\0') 732 dp->scd_path = spa_strdup(spa_config_path); 733 else if (strcmp(cachefile, "none") == 0) 734 dp->scd_path = NULL; 735 else 736 dp->scd_path = spa_strdup(cachefile); 737 738 list_insert_head(&spa->spa_config_list, dp); 739 if (need_sync) 740 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 741} 742 743int 744spa_prop_set(spa_t *spa, nvlist_t *nvp) 745{ 746 int error; 747 nvpair_t *elem = NULL; 748 boolean_t need_sync = B_FALSE; 749 750 if ((error = spa_prop_validate(spa, nvp)) != 0) 751 return (error); 752 753 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 754 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 755 756 if (prop == ZPOOL_PROP_CACHEFILE || 757 prop == ZPOOL_PROP_ALTROOT || 758 prop == ZPOOL_PROP_READONLY) 759 continue; 760 761 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 762 uint64_t ver; 763 764 if (prop == ZPOOL_PROP_VERSION) { 765 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 766 } else { 767 ASSERT(zpool_prop_feature(nvpair_name(elem))); 768 ver = SPA_VERSION_FEATURES; 769 need_sync = B_TRUE; 770 } 771 772 /* Save time if the version is already set. */ 773 if (ver == spa_version(spa)) 774 continue; 775 776 /* 777 * In addition to the pool directory object, we might 778 * create the pool properties object, the features for 779 * read object, the features for write object, or the 780 * feature descriptions object. 781 */ 782 error = dsl_sync_task(spa->spa_name, NULL, 783 spa_sync_version, &ver, 784 6, ZFS_SPACE_CHECK_RESERVED); 785 if (error) 786 return (error); 787 continue; 788 } 789 790 need_sync = B_TRUE; 791 break; 792 } 793 794 if (need_sync) { 795 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 796 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 797 } 798 799 return (0); 800} 801 802/* 803 * If the bootfs property value is dsobj, clear it. 804 */ 805void 806spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 807{ 808 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 809 VERIFY(zap_remove(spa->spa_meta_objset, 810 spa->spa_pool_props_object, 811 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 812 spa->spa_bootfs = 0; 813 } 814} 815 816/*ARGSUSED*/ 817static int 818spa_change_guid_check(void *arg, dmu_tx_t *tx) 819{ 820 uint64_t *newguid = arg; 821 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 822 vdev_t *rvd = spa->spa_root_vdev; 823 uint64_t vdev_state; 824 825 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 826 int error = (spa_has_checkpoint(spa)) ? 827 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 828 return (SET_ERROR(error)); 829 } 830 831 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 832 vdev_state = rvd->vdev_state; 833 spa_config_exit(spa, SCL_STATE, FTAG); 834 835 if (vdev_state != VDEV_STATE_HEALTHY) 836 return (SET_ERROR(ENXIO)); 837 838 ASSERT3U(spa_guid(spa), !=, *newguid); 839 840 return (0); 841} 842 843static void 844spa_change_guid_sync(void *arg, dmu_tx_t *tx) 845{ 846 uint64_t *newguid = arg; 847 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 848 uint64_t oldguid; 849 vdev_t *rvd = spa->spa_root_vdev; 850 851 oldguid = spa_guid(spa); 852 853 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 854 rvd->vdev_guid = *newguid; 855 rvd->vdev_guid_sum += (*newguid - oldguid); 856 vdev_config_dirty(rvd); 857 spa_config_exit(spa, SCL_STATE, FTAG); 858 859 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 860 oldguid, *newguid); 861} 862 863/* 864 * Change the GUID for the pool. This is done so that we can later 865 * re-import a pool built from a clone of our own vdevs. We will modify 866 * the root vdev's guid, our own pool guid, and then mark all of our 867 * vdevs dirty. Note that we must make sure that all our vdevs are 868 * online when we do this, or else any vdevs that weren't present 869 * would be orphaned from our pool. We are also going to issue a 870 * sysevent to update any watchers. 871 */ 872int 873spa_change_guid(spa_t *spa) 874{ 875 int error; 876 uint64_t guid; 877 878 mutex_enter(&spa->spa_vdev_top_lock); 879 mutex_enter(&spa_namespace_lock); 880 guid = spa_generate_guid(NULL); 881 882 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 883 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 884 885 if (error == 0) { 886 spa_write_cachefile(spa, B_FALSE, B_TRUE); 887 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 888 } 889 890 mutex_exit(&spa_namespace_lock); 891 mutex_exit(&spa->spa_vdev_top_lock); 892 893 return (error); 894} 895 896/* 897 * ========================================================================== 898 * SPA state manipulation (open/create/destroy/import/export) 899 * ========================================================================== 900 */ 901 902static int 903spa_error_entry_compare(const void *a, const void *b) 904{ 905 spa_error_entry_t *sa = (spa_error_entry_t *)a; 906 spa_error_entry_t *sb = (spa_error_entry_t *)b; 907 int ret; 908 909 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 910 sizeof (zbookmark_phys_t)); 911 912 if (ret < 0) 913 return (-1); 914 else if (ret > 0) 915 return (1); 916 else 917 return (0); 918} 919 920/* 921 * Utility function which retrieves copies of the current logs and 922 * re-initializes them in the process. 923 */ 924void 925spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 926{ 927 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 928 929 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 930 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 931 932 avl_create(&spa->spa_errlist_scrub, 933 spa_error_entry_compare, sizeof (spa_error_entry_t), 934 offsetof(spa_error_entry_t, se_avl)); 935 avl_create(&spa->spa_errlist_last, 936 spa_error_entry_compare, sizeof (spa_error_entry_t), 937 offsetof(spa_error_entry_t, se_avl)); 938} 939 940static void 941spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 942{ 943 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 944 enum zti_modes mode = ztip->zti_mode; 945 uint_t value = ztip->zti_value; 946 uint_t count = ztip->zti_count; 947 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 948 char name[32]; 949 uint_t flags = 0; 950 boolean_t batch = B_FALSE; 951 952 if (mode == ZTI_MODE_NULL) { 953 tqs->stqs_count = 0; 954 tqs->stqs_taskq = NULL; 955 return; 956 } 957 958 ASSERT3U(count, >, 0); 959 960 tqs->stqs_count = count; 961 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 962 963 switch (mode) { 964 case ZTI_MODE_FIXED: 965 ASSERT3U(value, >=, 1); 966 value = MAX(value, 1); 967 break; 968 969 case ZTI_MODE_BATCH: 970 batch = B_TRUE; 971 flags |= TASKQ_THREADS_CPU_PCT; 972 value = zio_taskq_batch_pct; 973 break; 974 975 default: 976 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 977 "spa_activate()", 978 zio_type_name[t], zio_taskq_types[q], mode, value); 979 break; 980 } 981 982 for (uint_t i = 0; i < count; i++) { 983 taskq_t *tq; 984 985 if (count > 1) { 986 (void) snprintf(name, sizeof (name), "%s_%s_%u", 987 zio_type_name[t], zio_taskq_types[q], i); 988 } else { 989 (void) snprintf(name, sizeof (name), "%s_%s", 990 zio_type_name[t], zio_taskq_types[q]); 991 } 992 993#ifdef SYSDC 994 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 995 if (batch) 996 flags |= TASKQ_DC_BATCH; 997 998 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 999 spa->spa_proc, zio_taskq_basedc, flags); 1000 } else { 1001#endif 1002 pri_t pri = maxclsyspri; 1003 /* 1004 * The write issue taskq can be extremely CPU 1005 * intensive. Run it at slightly lower priority 1006 * than the other taskqs. 1007 * FreeBSD notes: 1008 * - numerically higher priorities are lower priorities; 1009 * - if priorities divided by four (RQ_PPQ) are equal 1010 * then a difference between them is insignificant. 1011 */ 1012 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 1013#ifdef illumos 1014 pri--; 1015#else 1016 pri += 4; 1017#endif 1018 1019 tq = taskq_create_proc(name, value, pri, 50, 1020 INT_MAX, spa->spa_proc, flags); 1021#ifdef SYSDC 1022 } 1023#endif 1024 1025 tqs->stqs_taskq[i] = tq; 1026 } 1027} 1028 1029static void 1030spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1031{ 1032 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1033 1034 if (tqs->stqs_taskq == NULL) { 1035 ASSERT0(tqs->stqs_count); 1036 return; 1037 } 1038 1039 for (uint_t i = 0; i < tqs->stqs_count; i++) { 1040 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1041 taskq_destroy(tqs->stqs_taskq[i]); 1042 } 1043 1044 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1045 tqs->stqs_taskq = NULL; 1046} 1047 1048/* 1049 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1050 * Note that a type may have multiple discrete taskqs to avoid lock contention 1051 * on the taskq itself. In that case we choose which taskq at random by using 1052 * the low bits of gethrtime(). 1053 */ 1054void 1055spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1056 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 1057{ 1058 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1059 taskq_t *tq; 1060 1061 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1062 ASSERT3U(tqs->stqs_count, !=, 0); 1063 1064 if (tqs->stqs_count == 1) { 1065 tq = tqs->stqs_taskq[0]; 1066 } else { 1067#ifdef _KERNEL 1068 tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count]; 1069#else 1070 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 1071#endif 1072 } 1073 1074 taskq_dispatch_ent(tq, func, arg, flags, ent); 1075} 1076 1077static void 1078spa_create_zio_taskqs(spa_t *spa) 1079{ 1080 for (int t = 0; t < ZIO_TYPES; t++) { 1081 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1082 spa_taskqs_init(spa, t, q); 1083 } 1084 } 1085} 1086 1087#ifdef _KERNEL 1088#ifdef SPA_PROCESS 1089static void 1090spa_thread(void *arg) 1091{ 1092 callb_cpr_t cprinfo; 1093 1094 spa_t *spa = arg; 1095 user_t *pu = PTOU(curproc); 1096 1097 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1098 spa->spa_name); 1099 1100 ASSERT(curproc != &p0); 1101 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1102 "zpool-%s", spa->spa_name); 1103 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1104 1105#ifdef PSRSET_BIND 1106 /* bind this thread to the requested psrset */ 1107 if (zio_taskq_psrset_bind != PS_NONE) { 1108 pool_lock(); 1109 mutex_enter(&cpu_lock); 1110 mutex_enter(&pidlock); 1111 mutex_enter(&curproc->p_lock); 1112 1113 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1114 0, NULL, NULL) == 0) { 1115 curthread->t_bind_pset = zio_taskq_psrset_bind; 1116 } else { 1117 cmn_err(CE_WARN, 1118 "Couldn't bind process for zfs pool \"%s\" to " 1119 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1120 } 1121 1122 mutex_exit(&curproc->p_lock); 1123 mutex_exit(&pidlock); 1124 mutex_exit(&cpu_lock); 1125 pool_unlock(); 1126 } 1127#endif 1128 1129#ifdef SYSDC 1130 if (zio_taskq_sysdc) { 1131 sysdc_thread_enter(curthread, 100, 0); 1132 } 1133#endif 1134 1135 spa->spa_proc = curproc; 1136 spa->spa_did = curthread->t_did; 1137 1138 spa_create_zio_taskqs(spa); 1139 1140 mutex_enter(&spa->spa_proc_lock); 1141 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1142 1143 spa->spa_proc_state = SPA_PROC_ACTIVE; 1144 cv_broadcast(&spa->spa_proc_cv); 1145 1146 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1147 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1148 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1149 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1150 1151 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1152 spa->spa_proc_state = SPA_PROC_GONE; 1153 spa->spa_proc = &p0; 1154 cv_broadcast(&spa->spa_proc_cv); 1155 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1156 1157 mutex_enter(&curproc->p_lock); 1158 lwp_exit(); 1159} 1160#endif /* SPA_PROCESS */ 1161#endif 1162 1163/* 1164 * Activate an uninitialized pool. 1165 */ 1166static void 1167spa_activate(spa_t *spa, int mode) 1168{ 1169 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1170 1171 spa->spa_state = POOL_STATE_ACTIVE; 1172 spa->spa_mode = mode; 1173 1174 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1175 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1176 1177 /* Try to create a covering process */ 1178 mutex_enter(&spa->spa_proc_lock); 1179 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1180 ASSERT(spa->spa_proc == &p0); 1181 spa->spa_did = 0; 1182 1183#ifdef SPA_PROCESS 1184 /* Only create a process if we're going to be around a while. */ 1185 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1186 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1187 NULL, 0) == 0) { 1188 spa->spa_proc_state = SPA_PROC_CREATED; 1189 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1190 cv_wait(&spa->spa_proc_cv, 1191 &spa->spa_proc_lock); 1192 } 1193 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1194 ASSERT(spa->spa_proc != &p0); 1195 ASSERT(spa->spa_did != 0); 1196 } else { 1197#ifdef _KERNEL 1198 cmn_err(CE_WARN, 1199 "Couldn't create process for zfs pool \"%s\"\n", 1200 spa->spa_name); 1201#endif 1202 } 1203 } 1204#endif /* SPA_PROCESS */ 1205 mutex_exit(&spa->spa_proc_lock); 1206 1207 /* If we didn't create a process, we need to create our taskqs. */ 1208 ASSERT(spa->spa_proc == &p0); 1209 if (spa->spa_proc == &p0) { 1210 spa_create_zio_taskqs(spa); 1211 } 1212 1213 /* 1214 * Start TRIM thread. 1215 */ 1216 trim_thread_create(spa); 1217 1218 for (size_t i = 0; i < TXG_SIZE; i++) 1219 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 0); 1220 1221 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1222 offsetof(vdev_t, vdev_config_dirty_node)); 1223 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1224 offsetof(objset_t, os_evicting_node)); 1225 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1226 offsetof(vdev_t, vdev_state_dirty_node)); 1227 1228 txg_list_create(&spa->spa_vdev_txg_list, spa, 1229 offsetof(struct vdev, vdev_txg_node)); 1230 1231 avl_create(&spa->spa_errlist_scrub, 1232 spa_error_entry_compare, sizeof (spa_error_entry_t), 1233 offsetof(spa_error_entry_t, se_avl)); 1234 avl_create(&spa->spa_errlist_last, 1235 spa_error_entry_compare, sizeof (spa_error_entry_t), 1236 offsetof(spa_error_entry_t, se_avl)); 1237} 1238 1239/* 1240 * Opposite of spa_activate(). 1241 */ 1242static void 1243spa_deactivate(spa_t *spa) 1244{ 1245 ASSERT(spa->spa_sync_on == B_FALSE); 1246 ASSERT(spa->spa_dsl_pool == NULL); 1247 ASSERT(spa->spa_root_vdev == NULL); 1248 ASSERT(spa->spa_async_zio_root == NULL); 1249 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1250 1251 /* 1252 * Stop TRIM thread in case spa_unload() wasn't called directly 1253 * before spa_deactivate(). 1254 */ 1255 trim_thread_destroy(spa); 1256 1257 spa_evicting_os_wait(spa); 1258 1259 txg_list_destroy(&spa->spa_vdev_txg_list); 1260 1261 list_destroy(&spa->spa_config_dirty_list); 1262 list_destroy(&spa->spa_evicting_os_list); 1263 list_destroy(&spa->spa_state_dirty_list); 1264 1265 for (int t = 0; t < ZIO_TYPES; t++) { 1266 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1267 spa_taskqs_fini(spa, t, q); 1268 } 1269 } 1270 1271 for (size_t i = 0; i < TXG_SIZE; i++) { 1272 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1273 VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1274 spa->spa_txg_zio[i] = NULL; 1275 } 1276 1277 metaslab_class_destroy(spa->spa_normal_class); 1278 spa->spa_normal_class = NULL; 1279 1280 metaslab_class_destroy(spa->spa_log_class); 1281 spa->spa_log_class = NULL; 1282 1283 /* 1284 * If this was part of an import or the open otherwise failed, we may 1285 * still have errors left in the queues. Empty them just in case. 1286 */ 1287 spa_errlog_drain(spa); 1288 1289 avl_destroy(&spa->spa_errlist_scrub); 1290 avl_destroy(&spa->spa_errlist_last); 1291 1292 spa->spa_state = POOL_STATE_UNINITIALIZED; 1293 1294 mutex_enter(&spa->spa_proc_lock); 1295 if (spa->spa_proc_state != SPA_PROC_NONE) { 1296 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1297 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1298 cv_broadcast(&spa->spa_proc_cv); 1299 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1300 ASSERT(spa->spa_proc != &p0); 1301 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1302 } 1303 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1304 spa->spa_proc_state = SPA_PROC_NONE; 1305 } 1306 ASSERT(spa->spa_proc == &p0); 1307 mutex_exit(&spa->spa_proc_lock); 1308 1309#ifdef SPA_PROCESS 1310 /* 1311 * We want to make sure spa_thread() has actually exited the ZFS 1312 * module, so that the module can't be unloaded out from underneath 1313 * it. 1314 */ 1315 if (spa->spa_did != 0) { 1316 thread_join(spa->spa_did); 1317 spa->spa_did = 0; 1318 } 1319#endif /* SPA_PROCESS */ 1320} 1321 1322/* 1323 * Verify a pool configuration, and construct the vdev tree appropriately. This 1324 * will create all the necessary vdevs in the appropriate layout, with each vdev 1325 * in the CLOSED state. This will prep the pool before open/creation/import. 1326 * All vdev validation is done by the vdev_alloc() routine. 1327 */ 1328static int 1329spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1330 uint_t id, int atype) 1331{ 1332 nvlist_t **child; 1333 uint_t children; 1334 int error; 1335 1336 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1337 return (error); 1338 1339 if ((*vdp)->vdev_ops->vdev_op_leaf) 1340 return (0); 1341 1342 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1343 &child, &children); 1344 1345 if (error == ENOENT) 1346 return (0); 1347 1348 if (error) { 1349 vdev_free(*vdp); 1350 *vdp = NULL; 1351 return (SET_ERROR(EINVAL)); 1352 } 1353 1354 for (int c = 0; c < children; c++) { 1355 vdev_t *vd; 1356 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1357 atype)) != 0) { 1358 vdev_free(*vdp); 1359 *vdp = NULL; 1360 return (error); 1361 } 1362 } 1363 1364 ASSERT(*vdp != NULL); 1365 1366 return (0); 1367} 1368 1369/* 1370 * Opposite of spa_load(). 1371 */ 1372static void 1373spa_unload(spa_t *spa) 1374{ 1375 int i; 1376 1377 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1378 1379 spa_load_note(spa, "UNLOADING"); 1380 1381 /* 1382 * Stop TRIM thread. 1383 */ 1384 trim_thread_destroy(spa); 1385 1386 /* 1387 * Stop async tasks. 1388 */ 1389 spa_async_suspend(spa); 1390 1391 /* 1392 * Stop syncing. 1393 */ 1394 if (spa->spa_sync_on) { 1395 txg_sync_stop(spa->spa_dsl_pool); 1396 spa->spa_sync_on = B_FALSE; 1397 } 1398 1399 /* 1400 * Even though vdev_free() also calls vdev_metaslab_fini, we need 1401 * to call it earlier, before we wait for async i/o to complete. 1402 * This ensures that there is no async metaslab prefetching, by 1403 * calling taskq_wait(mg_taskq). 1404 */ 1405 if (spa->spa_root_vdev != NULL) { 1406 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1407 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) 1408 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]); 1409 spa_config_exit(spa, SCL_ALL, FTAG); 1410 } 1411 1412 /* 1413 * Wait for any outstanding async I/O to complete. 1414 */ 1415 if (spa->spa_async_zio_root != NULL) { 1416 for (int i = 0; i < max_ncpus; i++) 1417 (void) zio_wait(spa->spa_async_zio_root[i]); 1418 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1419 spa->spa_async_zio_root = NULL; 1420 } 1421 1422 if (spa->spa_vdev_removal != NULL) { 1423 spa_vdev_removal_destroy(spa->spa_vdev_removal); 1424 spa->spa_vdev_removal = NULL; 1425 } 1426 1427 if (spa->spa_condense_zthr != NULL) { 1428 ASSERT(!zthr_isrunning(spa->spa_condense_zthr)); 1429 zthr_destroy(spa->spa_condense_zthr); 1430 spa->spa_condense_zthr = NULL; 1431 } 1432 1433 if (spa->spa_checkpoint_discard_zthr != NULL) { 1434 ASSERT(!zthr_isrunning(spa->spa_checkpoint_discard_zthr)); 1435 zthr_destroy(spa->spa_checkpoint_discard_zthr); 1436 spa->spa_checkpoint_discard_zthr = NULL; 1437 } 1438 1439 spa_condense_fini(spa); 1440 1441 bpobj_close(&spa->spa_deferred_bpobj); 1442 1443 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1444 1445 /* 1446 * Close all vdevs. 1447 */ 1448 if (spa->spa_root_vdev) 1449 vdev_free(spa->spa_root_vdev); 1450 ASSERT(spa->spa_root_vdev == NULL); 1451 1452 /* 1453 * Close the dsl pool. 1454 */ 1455 if (spa->spa_dsl_pool) { 1456 dsl_pool_close(spa->spa_dsl_pool); 1457 spa->spa_dsl_pool = NULL; 1458 spa->spa_meta_objset = NULL; 1459 } 1460 1461 ddt_unload(spa); 1462 1463 /* 1464 * Drop and purge level 2 cache 1465 */ 1466 spa_l2cache_drop(spa); 1467 1468 for (i = 0; i < spa->spa_spares.sav_count; i++) 1469 vdev_free(spa->spa_spares.sav_vdevs[i]); 1470 if (spa->spa_spares.sav_vdevs) { 1471 kmem_free(spa->spa_spares.sav_vdevs, 1472 spa->spa_spares.sav_count * sizeof (void *)); 1473 spa->spa_spares.sav_vdevs = NULL; 1474 } 1475 if (spa->spa_spares.sav_config) { 1476 nvlist_free(spa->spa_spares.sav_config); 1477 spa->spa_spares.sav_config = NULL; 1478 } 1479 spa->spa_spares.sav_count = 0; 1480 1481 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1482 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1483 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1484 } 1485 if (spa->spa_l2cache.sav_vdevs) { 1486 kmem_free(spa->spa_l2cache.sav_vdevs, 1487 spa->spa_l2cache.sav_count * sizeof (void *)); 1488 spa->spa_l2cache.sav_vdevs = NULL; 1489 } 1490 if (spa->spa_l2cache.sav_config) { 1491 nvlist_free(spa->spa_l2cache.sav_config); 1492 spa->spa_l2cache.sav_config = NULL; 1493 } 1494 spa->spa_l2cache.sav_count = 0; 1495 1496 spa->spa_async_suspended = 0; 1497 1498 spa->spa_indirect_vdevs_loaded = B_FALSE; 1499 1500 if (spa->spa_comment != NULL) { 1501 spa_strfree(spa->spa_comment); 1502 spa->spa_comment = NULL; 1503 } 1504 1505 spa_config_exit(spa, SCL_ALL, FTAG); 1506} 1507 1508/* 1509 * Load (or re-load) the current list of vdevs describing the active spares for 1510 * this pool. When this is called, we have some form of basic information in 1511 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1512 * then re-generate a more complete list including status information. 1513 */ 1514void 1515spa_load_spares(spa_t *spa) 1516{ 1517 nvlist_t **spares; 1518 uint_t nspares; 1519 int i; 1520 vdev_t *vd, *tvd; 1521 1522#ifndef _KERNEL 1523 /* 1524 * zdb opens both the current state of the pool and the 1525 * checkpointed state (if present), with a different spa_t. 1526 * 1527 * As spare vdevs are shared among open pools, we skip loading 1528 * them when we load the checkpointed state of the pool. 1529 */ 1530 if (!spa_writeable(spa)) 1531 return; 1532#endif 1533 1534 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1535 1536 /* 1537 * First, close and free any existing spare vdevs. 1538 */ 1539 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1540 vd = spa->spa_spares.sav_vdevs[i]; 1541 1542 /* Undo the call to spa_activate() below */ 1543 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1544 B_FALSE)) != NULL && tvd->vdev_isspare) 1545 spa_spare_remove(tvd); 1546 vdev_close(vd); 1547 vdev_free(vd); 1548 } 1549 1550 if (spa->spa_spares.sav_vdevs) 1551 kmem_free(spa->spa_spares.sav_vdevs, 1552 spa->spa_spares.sav_count * sizeof (void *)); 1553 1554 if (spa->spa_spares.sav_config == NULL) 1555 nspares = 0; 1556 else 1557 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1558 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1559 1560 spa->spa_spares.sav_count = (int)nspares; 1561 spa->spa_spares.sav_vdevs = NULL; 1562 1563 if (nspares == 0) 1564 return; 1565 1566 /* 1567 * Construct the array of vdevs, opening them to get status in the 1568 * process. For each spare, there is potentially two different vdev_t 1569 * structures associated with it: one in the list of spares (used only 1570 * for basic validation purposes) and one in the active vdev 1571 * configuration (if it's spared in). During this phase we open and 1572 * validate each vdev on the spare list. If the vdev also exists in the 1573 * active configuration, then we also mark this vdev as an active spare. 1574 */ 1575 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1576 KM_SLEEP); 1577 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1578 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1579 VDEV_ALLOC_SPARE) == 0); 1580 ASSERT(vd != NULL); 1581 1582 spa->spa_spares.sav_vdevs[i] = vd; 1583 1584 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1585 B_FALSE)) != NULL) { 1586 if (!tvd->vdev_isspare) 1587 spa_spare_add(tvd); 1588 1589 /* 1590 * We only mark the spare active if we were successfully 1591 * able to load the vdev. Otherwise, importing a pool 1592 * with a bad active spare would result in strange 1593 * behavior, because multiple pool would think the spare 1594 * is actively in use. 1595 * 1596 * There is a vulnerability here to an equally bizarre 1597 * circumstance, where a dead active spare is later 1598 * brought back to life (onlined or otherwise). Given 1599 * the rarity of this scenario, and the extra complexity 1600 * it adds, we ignore the possibility. 1601 */ 1602 if (!vdev_is_dead(tvd)) 1603 spa_spare_activate(tvd); 1604 } 1605 1606 vd->vdev_top = vd; 1607 vd->vdev_aux = &spa->spa_spares; 1608 1609 if (vdev_open(vd) != 0) 1610 continue; 1611 1612 if (vdev_validate_aux(vd) == 0) 1613 spa_spare_add(vd); 1614 } 1615 1616 /* 1617 * Recompute the stashed list of spares, with status information 1618 * this time. 1619 */ 1620 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1621 DATA_TYPE_NVLIST_ARRAY) == 0); 1622 1623 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1624 KM_SLEEP); 1625 for (i = 0; i < spa->spa_spares.sav_count; i++) 1626 spares[i] = vdev_config_generate(spa, 1627 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1628 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1629 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1630 for (i = 0; i < spa->spa_spares.sav_count; i++) 1631 nvlist_free(spares[i]); 1632 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1633} 1634 1635/* 1636 * Load (or re-load) the current list of vdevs describing the active l2cache for 1637 * this pool. When this is called, we have some form of basic information in 1638 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1639 * then re-generate a more complete list including status information. 1640 * Devices which are already active have their details maintained, and are 1641 * not re-opened. 1642 */ 1643void 1644spa_load_l2cache(spa_t *spa) 1645{ 1646 nvlist_t **l2cache; 1647 uint_t nl2cache; 1648 int i, j, oldnvdevs; 1649 uint64_t guid; 1650 vdev_t *vd, **oldvdevs, **newvdevs; 1651 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1652 1653#ifndef _KERNEL 1654 /* 1655 * zdb opens both the current state of the pool and the 1656 * checkpointed state (if present), with a different spa_t. 1657 * 1658 * As L2 caches are part of the ARC which is shared among open 1659 * pools, we skip loading them when we load the checkpointed 1660 * state of the pool. 1661 */ 1662 if (!spa_writeable(spa)) 1663 return; 1664#endif 1665 1666 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1667 1668 if (sav->sav_config != NULL) { 1669 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1670 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1671 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1672 } else { 1673 nl2cache = 0; 1674 newvdevs = NULL; 1675 } 1676 1677 oldvdevs = sav->sav_vdevs; 1678 oldnvdevs = sav->sav_count; 1679 sav->sav_vdevs = NULL; 1680 sav->sav_count = 0; 1681 1682 /* 1683 * Process new nvlist of vdevs. 1684 */ 1685 for (i = 0; i < nl2cache; i++) { 1686 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1687 &guid) == 0); 1688 1689 newvdevs[i] = NULL; 1690 for (j = 0; j < oldnvdevs; j++) { 1691 vd = oldvdevs[j]; 1692 if (vd != NULL && guid == vd->vdev_guid) { 1693 /* 1694 * Retain previous vdev for add/remove ops. 1695 */ 1696 newvdevs[i] = vd; 1697 oldvdevs[j] = NULL; 1698 break; 1699 } 1700 } 1701 1702 if (newvdevs[i] == NULL) { 1703 /* 1704 * Create new vdev 1705 */ 1706 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1707 VDEV_ALLOC_L2CACHE) == 0); 1708 ASSERT(vd != NULL); 1709 newvdevs[i] = vd; 1710 1711 /* 1712 * Commit this vdev as an l2cache device, 1713 * even if it fails to open. 1714 */ 1715 spa_l2cache_add(vd); 1716 1717 vd->vdev_top = vd; 1718 vd->vdev_aux = sav; 1719 1720 spa_l2cache_activate(vd); 1721 1722 if (vdev_open(vd) != 0) 1723 continue; 1724 1725 (void) vdev_validate_aux(vd); 1726 1727 if (!vdev_is_dead(vd)) 1728 l2arc_add_vdev(spa, vd); 1729 } 1730 } 1731 1732 /* 1733 * Purge vdevs that were dropped 1734 */ 1735 for (i = 0; i < oldnvdevs; i++) { 1736 uint64_t pool; 1737 1738 vd = oldvdevs[i]; 1739 if (vd != NULL) { 1740 ASSERT(vd->vdev_isl2cache); 1741 1742 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1743 pool != 0ULL && l2arc_vdev_present(vd)) 1744 l2arc_remove_vdev(vd); 1745 vdev_clear_stats(vd); 1746 vdev_free(vd); 1747 } 1748 } 1749 1750 if (oldvdevs) 1751 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1752 1753 if (sav->sav_config == NULL) 1754 goto out; 1755 1756 sav->sav_vdevs = newvdevs; 1757 sav->sav_count = (int)nl2cache; 1758 1759 /* 1760 * Recompute the stashed list of l2cache devices, with status 1761 * information this time. 1762 */ 1763 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1764 DATA_TYPE_NVLIST_ARRAY) == 0); 1765 1766 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1767 for (i = 0; i < sav->sav_count; i++) 1768 l2cache[i] = vdev_config_generate(spa, 1769 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1770 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1771 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1772out: 1773 for (i = 0; i < sav->sav_count; i++) 1774 nvlist_free(l2cache[i]); 1775 if (sav->sav_count) 1776 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1777} 1778 1779static int 1780load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1781{ 1782 dmu_buf_t *db; 1783 char *packed = NULL; 1784 size_t nvsize = 0; 1785 int error; 1786 *value = NULL; 1787 1788 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1789 if (error != 0) 1790 return (error); 1791 1792 nvsize = *(uint64_t *)db->db_data; 1793 dmu_buf_rele(db, FTAG); 1794 1795 packed = kmem_alloc(nvsize, KM_SLEEP); 1796 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1797 DMU_READ_PREFETCH); 1798 if (error == 0) 1799 error = nvlist_unpack(packed, nvsize, value, 0); 1800 kmem_free(packed, nvsize); 1801 1802 return (error); 1803} 1804 1805/* 1806 * Concrete top-level vdevs that are not missing and are not logs. At every 1807 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 1808 */ 1809static uint64_t 1810spa_healthy_core_tvds(spa_t *spa) 1811{ 1812 vdev_t *rvd = spa->spa_root_vdev; 1813 uint64_t tvds = 0; 1814 1815 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1816 vdev_t *vd = rvd->vdev_child[i]; 1817 if (vd->vdev_islog) 1818 continue; 1819 if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 1820 tvds++; 1821 } 1822 1823 return (tvds); 1824} 1825 1826/* 1827 * Checks to see if the given vdev could not be opened, in which case we post a 1828 * sysevent to notify the autoreplace code that the device has been removed. 1829 */ 1830static void 1831spa_check_removed(vdev_t *vd) 1832{ 1833 for (uint64_t c = 0; c < vd->vdev_children; c++) 1834 spa_check_removed(vd->vdev_child[c]); 1835 1836 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1837 vdev_is_concrete(vd)) { 1838 zfs_post_autoreplace(vd->vdev_spa, vd); 1839 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 1840 } 1841} 1842 1843static int 1844spa_check_for_missing_logs(spa_t *spa) 1845{ 1846 vdev_t *rvd = spa->spa_root_vdev; 1847 1848 /* 1849 * If we're doing a normal import, then build up any additional 1850 * diagnostic information about missing log devices. 1851 * We'll pass this up to the user for further processing. 1852 */ 1853 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1854 nvlist_t **child, *nv; 1855 uint64_t idx = 0; 1856 1857 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1858 KM_SLEEP); 1859 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1860 1861 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 1862 vdev_t *tvd = rvd->vdev_child[c]; 1863 1864 /* 1865 * We consider a device as missing only if it failed 1866 * to open (i.e. offline or faulted is not considered 1867 * as missing). 1868 */ 1869 if (tvd->vdev_islog && 1870 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 1871 child[idx++] = vdev_config_generate(spa, tvd, 1872 B_FALSE, VDEV_CONFIG_MISSING); 1873 } 1874 } 1875 1876 if (idx > 0) { 1877 fnvlist_add_nvlist_array(nv, 1878 ZPOOL_CONFIG_CHILDREN, child, idx); 1879 fnvlist_add_nvlist(spa->spa_load_info, 1880 ZPOOL_CONFIG_MISSING_DEVICES, nv); 1881 1882 for (uint64_t i = 0; i < idx; i++) 1883 nvlist_free(child[i]); 1884 } 1885 nvlist_free(nv); 1886 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1887 1888 if (idx > 0) { 1889 spa_load_failed(spa, "some log devices are missing"); 1890 return (SET_ERROR(ENXIO)); 1891 } 1892 } else { 1893 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 1894 vdev_t *tvd = rvd->vdev_child[c]; 1895 1896 if (tvd->vdev_islog && 1897 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 1898 spa_set_log_state(spa, SPA_LOG_CLEAR); 1899 spa_load_note(spa, "some log devices are " 1900 "missing, ZIL is dropped."); 1901 break; 1902 } 1903 } 1904 } 1905 1906 return (0); 1907} 1908 1909/* 1910 * Check for missing log devices 1911 */ 1912static boolean_t 1913spa_check_logs(spa_t *spa) 1914{ 1915 boolean_t rv = B_FALSE; 1916 dsl_pool_t *dp = spa_get_dsl(spa); 1917 1918 switch (spa->spa_log_state) { 1919 case SPA_LOG_MISSING: 1920 /* need to recheck in case slog has been restored */ 1921 case SPA_LOG_UNKNOWN: 1922 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1923 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 1924 if (rv) 1925 spa_set_log_state(spa, SPA_LOG_MISSING); 1926 break; 1927 } 1928 return (rv); 1929} 1930 1931static boolean_t 1932spa_passivate_log(spa_t *spa) 1933{ 1934 vdev_t *rvd = spa->spa_root_vdev; 1935 boolean_t slog_found = B_FALSE; 1936 1937 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1938 1939 if (!spa_has_slogs(spa)) 1940 return (B_FALSE); 1941 1942 for (int c = 0; c < rvd->vdev_children; c++) { 1943 vdev_t *tvd = rvd->vdev_child[c]; 1944 metaslab_group_t *mg = tvd->vdev_mg; 1945 1946 if (tvd->vdev_islog) { 1947 metaslab_group_passivate(mg); 1948 slog_found = B_TRUE; 1949 } 1950 } 1951 1952 return (slog_found); 1953} 1954 1955static void 1956spa_activate_log(spa_t *spa) 1957{ 1958 vdev_t *rvd = spa->spa_root_vdev; 1959 1960 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1961 1962 for (int c = 0; c < rvd->vdev_children; c++) { 1963 vdev_t *tvd = rvd->vdev_child[c]; 1964 metaslab_group_t *mg = tvd->vdev_mg; 1965 1966 if (tvd->vdev_islog) 1967 metaslab_group_activate(mg); 1968 } 1969} 1970 1971int 1972spa_reset_logs(spa_t *spa) 1973{ 1974 int error; 1975 1976 error = dmu_objset_find(spa_name(spa), zil_reset, 1977 NULL, DS_FIND_CHILDREN); 1978 if (error == 0) { 1979 /* 1980 * We successfully offlined the log device, sync out the 1981 * current txg so that the "stubby" block can be removed 1982 * by zil_sync(). 1983 */ 1984 txg_wait_synced(spa->spa_dsl_pool, 0); 1985 } 1986 return (error); 1987} 1988 1989static void 1990spa_aux_check_removed(spa_aux_vdev_t *sav) 1991{ 1992 int i; 1993 1994 for (i = 0; i < sav->sav_count; i++) 1995 spa_check_removed(sav->sav_vdevs[i]); 1996} 1997 1998void 1999spa_claim_notify(zio_t *zio) 2000{ 2001 spa_t *spa = zio->io_spa; 2002 2003 if (zio->io_error) 2004 return; 2005 2006 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 2007 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 2008 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 2009 mutex_exit(&spa->spa_props_lock); 2010} 2011 2012typedef struct spa_load_error { 2013 uint64_t sle_meta_count; 2014 uint64_t sle_data_count; 2015} spa_load_error_t; 2016 2017static void 2018spa_load_verify_done(zio_t *zio) 2019{ 2020 blkptr_t *bp = zio->io_bp; 2021 spa_load_error_t *sle = zio->io_private; 2022 dmu_object_type_t type = BP_GET_TYPE(bp); 2023 int error = zio->io_error; 2024 spa_t *spa = zio->io_spa; 2025 2026 abd_free(zio->io_abd); 2027 if (error) { 2028 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2029 type != DMU_OT_INTENT_LOG) 2030 atomic_inc_64(&sle->sle_meta_count); 2031 else 2032 atomic_inc_64(&sle->sle_data_count); 2033 } 2034 2035 mutex_enter(&spa->spa_scrub_lock); 2036 spa->spa_scrub_inflight--; 2037 cv_broadcast(&spa->spa_scrub_io_cv); 2038 mutex_exit(&spa->spa_scrub_lock); 2039} 2040 2041/* 2042 * Maximum number of concurrent scrub i/os to create while verifying 2043 * a pool while importing it. 2044 */ 2045int spa_load_verify_maxinflight = 10000; 2046boolean_t spa_load_verify_metadata = B_TRUE; 2047boolean_t spa_load_verify_data = B_TRUE; 2048 2049SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN, 2050 &spa_load_verify_maxinflight, 0, 2051 "Maximum number of concurrent scrub I/Os to create while verifying a " 2052 "pool while importing it"); 2053 2054SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN, 2055 &spa_load_verify_metadata, 0, 2056 "Check metadata on import?"); 2057 2058SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN, 2059 &spa_load_verify_data, 0, 2060 "Check user data on import?"); 2061 2062/*ARGSUSED*/ 2063static int 2064spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2065 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2066{ 2067 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2068 return (0); 2069 /* 2070 * Note: normally this routine will not be called if 2071 * spa_load_verify_metadata is not set. However, it may be useful 2072 * to manually set the flag after the traversal has begun. 2073 */ 2074 if (!spa_load_verify_metadata) 2075 return (0); 2076 if (!BP_IS_METADATA(bp) && !spa_load_verify_data) 2077 return (0); 2078 2079 zio_t *rio = arg; 2080 size_t size = BP_GET_PSIZE(bp); 2081 2082 mutex_enter(&spa->spa_scrub_lock); 2083 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 2084 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2085 spa->spa_scrub_inflight++; 2086 mutex_exit(&spa->spa_scrub_lock); 2087 2088 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2089 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2090 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2091 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2092 return (0); 2093} 2094 2095/* ARGSUSED */ 2096int 2097verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2098{ 2099 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2100 return (SET_ERROR(ENAMETOOLONG)); 2101 2102 return (0); 2103} 2104 2105static int 2106spa_load_verify(spa_t *spa) 2107{ 2108 zio_t *rio; 2109 spa_load_error_t sle = { 0 }; 2110 zpool_rewind_policy_t policy; 2111 boolean_t verify_ok = B_FALSE; 2112 int error = 0; 2113 2114 zpool_get_rewind_policy(spa->spa_config, &policy); 2115 2116 if (policy.zrp_request & ZPOOL_NEVER_REWIND) 2117 return (0); 2118 2119 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2120 error = dmu_objset_find_dp(spa->spa_dsl_pool, 2121 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2122 DS_FIND_CHILDREN); 2123 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2124 if (error != 0) 2125 return (error); 2126 2127 rio = zio_root(spa, NULL, &sle, 2128 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2129 2130 if (spa_load_verify_metadata) { 2131 if (spa->spa_extreme_rewind) { 2132 spa_load_note(spa, "performing a complete scan of the " 2133 "pool since extreme rewind is on. This may take " 2134 "a very long time.\n (spa_load_verify_data=%u, " 2135 "spa_load_verify_metadata=%u)", 2136 spa_load_verify_data, spa_load_verify_metadata); 2137 } 2138 error = traverse_pool(spa, spa->spa_verify_min_txg, 2139 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 2140 spa_load_verify_cb, rio); 2141 } 2142 2143 (void) zio_wait(rio); 2144 2145 spa->spa_load_meta_errors = sle.sle_meta_count; 2146 spa->spa_load_data_errors = sle.sle_data_count; 2147 2148 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2149 spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2150 "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2151 (u_longlong_t)sle.sle_data_count); 2152 } 2153 2154 if (spa_load_verify_dryrun || 2155 (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 2156 sle.sle_data_count <= policy.zrp_maxdata)) { 2157 int64_t loss = 0; 2158 2159 verify_ok = B_TRUE; 2160 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2161 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2162 2163 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2164 VERIFY(nvlist_add_uint64(spa->spa_load_info, 2165 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 2166 VERIFY(nvlist_add_int64(spa->spa_load_info, 2167 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 2168 VERIFY(nvlist_add_uint64(spa->spa_load_info, 2169 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 2170 } else { 2171 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2172 } 2173 2174 if (spa_load_verify_dryrun) 2175 return (0); 2176 2177 if (error) { 2178 if (error != ENXIO && error != EIO) 2179 error = SET_ERROR(EIO); 2180 return (error); 2181 } 2182 2183 return (verify_ok ? 0 : EIO); 2184} 2185 2186/* 2187 * Find a value in the pool props object. 2188 */ 2189static void 2190spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2191{ 2192 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2193 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2194} 2195 2196/* 2197 * Find a value in the pool directory object. 2198 */ 2199static int 2200spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2201{ 2202 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2203 name, sizeof (uint64_t), 1, val); 2204 2205 if (error != 0 && (error != ENOENT || log_enoent)) { 2206 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2207 "[error=%d]", name, error); 2208 } 2209 2210 return (error); 2211} 2212 2213static int 2214spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2215{ 2216 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2217 return (SET_ERROR(err)); 2218} 2219 2220static void 2221spa_spawn_aux_threads(spa_t *spa) 2222{ 2223 ASSERT(spa_writeable(spa)); 2224 2225 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2226 2227 spa_start_indirect_condensing_thread(spa); 2228 2229 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 2230 spa->spa_checkpoint_discard_zthr = 2231 zthr_create(spa_checkpoint_discard_thread_check, 2232 spa_checkpoint_discard_thread, spa); 2233} 2234 2235/* 2236 * Fix up config after a partly-completed split. This is done with the 2237 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2238 * pool have that entry in their config, but only the splitting one contains 2239 * a list of all the guids of the vdevs that are being split off. 2240 * 2241 * This function determines what to do with that list: either rejoin 2242 * all the disks to the pool, or complete the splitting process. To attempt 2243 * the rejoin, each disk that is offlined is marked online again, and 2244 * we do a reopen() call. If the vdev label for every disk that was 2245 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2246 * then we call vdev_split() on each disk, and complete the split. 2247 * 2248 * Otherwise we leave the config alone, with all the vdevs in place in 2249 * the original pool. 2250 */ 2251static void 2252spa_try_repair(spa_t *spa, nvlist_t *config) 2253{ 2254 uint_t extracted; 2255 uint64_t *glist; 2256 uint_t i, gcount; 2257 nvlist_t *nvl; 2258 vdev_t **vd; 2259 boolean_t attempt_reopen; 2260 2261 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2262 return; 2263 2264 /* check that the config is complete */ 2265 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2266 &glist, &gcount) != 0) 2267 return; 2268 2269 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2270 2271 /* attempt to online all the vdevs & validate */ 2272 attempt_reopen = B_TRUE; 2273 for (i = 0; i < gcount; i++) { 2274 if (glist[i] == 0) /* vdev is hole */ 2275 continue; 2276 2277 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2278 if (vd[i] == NULL) { 2279 /* 2280 * Don't bother attempting to reopen the disks; 2281 * just do the split. 2282 */ 2283 attempt_reopen = B_FALSE; 2284 } else { 2285 /* attempt to re-online it */ 2286 vd[i]->vdev_offline = B_FALSE; 2287 } 2288 } 2289 2290 if (attempt_reopen) { 2291 vdev_reopen(spa->spa_root_vdev); 2292 2293 /* check each device to see what state it's in */ 2294 for (extracted = 0, i = 0; i < gcount; i++) { 2295 if (vd[i] != NULL && 2296 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2297 break; 2298 ++extracted; 2299 } 2300 } 2301 2302 /* 2303 * If every disk has been moved to the new pool, or if we never 2304 * even attempted to look at them, then we split them off for 2305 * good. 2306 */ 2307 if (!attempt_reopen || gcount == extracted) { 2308 for (i = 0; i < gcount; i++) 2309 if (vd[i] != NULL) 2310 vdev_split(vd[i]); 2311 vdev_reopen(spa->spa_root_vdev); 2312 } 2313 2314 kmem_free(vd, gcount * sizeof (vdev_t *)); 2315} 2316 2317static int 2318spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 2319{ 2320 char *ereport = FM_EREPORT_ZFS_POOL; 2321 int error; 2322 2323 spa->spa_load_state = state; 2324 2325 gethrestime(&spa->spa_loaded_ts); 2326 error = spa_load_impl(spa, type, &ereport); 2327 2328 /* 2329 * Don't count references from objsets that are already closed 2330 * and are making their way through the eviction process. 2331 */ 2332 spa_evicting_os_wait(spa); 2333 spa->spa_minref = refcount_count(&spa->spa_refcount); 2334 if (error) { 2335 if (error != EEXIST) { 2336 spa->spa_loaded_ts.tv_sec = 0; 2337 spa->spa_loaded_ts.tv_nsec = 0; 2338 } 2339 if (error != EBADF) { 2340 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2341 } 2342 } 2343 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2344 spa->spa_ena = 0; 2345 2346 return (error); 2347} 2348 2349/* 2350 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 2351 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 2352 * spa's per-vdev ZAP list. 2353 */ 2354static uint64_t 2355vdev_count_verify_zaps(vdev_t *vd) 2356{ 2357 spa_t *spa = vd->vdev_spa; 2358 uint64_t total = 0; 2359 if (vd->vdev_top_zap != 0) { 2360 total++; 2361 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2362 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 2363 } 2364 if (vd->vdev_leaf_zap != 0) { 2365 total++; 2366 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2367 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 2368 } 2369 2370 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2371 total += vdev_count_verify_zaps(vd->vdev_child[i]); 2372 } 2373 2374 return (total); 2375} 2376 2377static int 2378spa_verify_host(spa_t *spa, nvlist_t *mos_config) 2379{ 2380 uint64_t hostid; 2381 char *hostname; 2382 uint64_t myhostid = 0; 2383 2384 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 2385 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2386 hostname = fnvlist_lookup_string(mos_config, 2387 ZPOOL_CONFIG_HOSTNAME); 2388 2389 myhostid = zone_get_hostid(NULL); 2390 2391 if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 2392 cmn_err(CE_WARN, "pool '%s' could not be " 2393 "loaded as it was last accessed by " 2394 "another system (host: %s hostid: 0x%llx). " 2395 "See: http://illumos.org/msg/ZFS-8000-EY", 2396 spa_name(spa), hostname, (u_longlong_t)hostid); 2397 spa_load_failed(spa, "hostid verification failed: pool " 2398 "last accessed by host: %s (hostid: 0x%llx)", 2399 hostname, (u_longlong_t)hostid); 2400 return (SET_ERROR(EBADF)); 2401 } 2402 } 2403 2404 return (0); 2405} 2406 2407static int 2408spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 2409{ 2410 int error = 0; 2411 nvlist_t *nvtree, *nvl, *config = spa->spa_config; 2412 int parse; 2413 vdev_t *rvd; 2414 uint64_t pool_guid; 2415 char *comment; 2416 2417 /* 2418 * Versioning wasn't explicitly added to the label until later, so if 2419 * it's not present treat it as the initial version. 2420 */ 2421 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2422 &spa->spa_ubsync.ub_version) != 0) 2423 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2424 2425 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 2426 spa_load_failed(spa, "invalid config provided: '%s' missing", 2427 ZPOOL_CONFIG_POOL_GUID); 2428 return (SET_ERROR(EINVAL)); 2429 } 2430 2431 /* 2432 * If we are doing an import, ensure that the pool is not already 2433 * imported by checking if its pool guid already exists in the 2434 * spa namespace. 2435 * 2436 * The only case that we allow an already imported pool to be 2437 * imported again, is when the pool is checkpointed and we want to 2438 * look at its checkpointed state from userland tools like zdb. 2439 */ 2440#ifdef _KERNEL 2441 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 2442 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 2443 spa_guid_exists(pool_guid, 0)) { 2444#else 2445 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 2446 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 2447 spa_guid_exists(pool_guid, 0) && 2448 !spa_importing_readonly_checkpoint(spa)) { 2449#endif 2450 spa_load_failed(spa, "a pool with guid %llu is already open", 2451 (u_longlong_t)pool_guid); 2452 return (SET_ERROR(EEXIST)); 2453 } 2454 2455 spa->spa_config_guid = pool_guid; 2456 2457 nvlist_free(spa->spa_load_info); 2458 spa->spa_load_info = fnvlist_alloc(); 2459 2460 ASSERT(spa->spa_comment == NULL); 2461 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2462 spa->spa_comment = spa_strdup(comment); 2463 2464 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2465 &spa->spa_config_txg); 2466 2467 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 2468 spa->spa_config_splitting = fnvlist_dup(nvl); 2469 2470 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 2471 spa_load_failed(spa, "invalid config provided: '%s' missing", 2472 ZPOOL_CONFIG_VDEV_TREE); 2473 return (SET_ERROR(EINVAL)); 2474 } 2475 2476 /* 2477 * Create "The Godfather" zio to hold all async IOs 2478 */ 2479 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 2480 KM_SLEEP); 2481 for (int i = 0; i < max_ncpus; i++) { 2482 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2483 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2484 ZIO_FLAG_GODFATHER); 2485 } 2486 2487 /* 2488 * Parse the configuration into a vdev tree. We explicitly set the 2489 * value that will be returned by spa_version() since parsing the 2490 * configuration requires knowing the version number. 2491 */ 2492 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2493 parse = (type == SPA_IMPORT_EXISTING ? 2494 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2495 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 2496 spa_config_exit(spa, SCL_ALL, FTAG); 2497 2498 if (error != 0) { 2499 spa_load_failed(spa, "unable to parse config [error=%d]", 2500 error); 2501 return (error); 2502 } 2503 2504 ASSERT(spa->spa_root_vdev == rvd); 2505 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 2506 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 2507 2508 if (type != SPA_IMPORT_ASSEMBLE) { 2509 ASSERT(spa_guid(spa) == pool_guid); 2510 } 2511 2512 return (0); 2513} 2514 2515/* 2516 * Recursively open all vdevs in the vdev tree. This function is called twice: 2517 * first with the untrusted config, then with the trusted config. 2518 */ 2519static int 2520spa_ld_open_vdevs(spa_t *spa) 2521{ 2522 int error = 0; 2523 2524 /* 2525 * spa_missing_tvds_allowed defines how many top-level vdevs can be 2526 * missing/unopenable for the root vdev to be still considered openable. 2527 */ 2528 if (spa->spa_trust_config) { 2529 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 2530 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 2531 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 2532 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 2533 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 2534 } else { 2535 spa->spa_missing_tvds_allowed = 0; 2536 } 2537 2538 spa->spa_missing_tvds_allowed = 2539 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 2540 2541 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2542 error = vdev_open(spa->spa_root_vdev); 2543 spa_config_exit(spa, SCL_ALL, FTAG); 2544 2545 if (spa->spa_missing_tvds != 0) { 2546 spa_load_note(spa, "vdev tree has %lld missing top-level " 2547 "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 2548 if (spa->spa_trust_config && (spa->spa_mode & FWRITE)) { 2549 /* 2550 * Although theoretically we could allow users to open 2551 * incomplete pools in RW mode, we'd need to add a lot 2552 * of extra logic (e.g. adjust pool space to account 2553 * for missing vdevs). 2554 * This limitation also prevents users from accidentally 2555 * opening the pool in RW mode during data recovery and 2556 * damaging it further. 2557 */ 2558 spa_load_note(spa, "pools with missing top-level " 2559 "vdevs can only be opened in read-only mode."); 2560 error = SET_ERROR(ENXIO); 2561 } else { 2562 spa_load_note(spa, "current settings allow for maximum " 2563 "%lld missing top-level vdevs at this stage.", 2564 (u_longlong_t)spa->spa_missing_tvds_allowed); 2565 } 2566 } 2567 if (error != 0) { 2568 spa_load_failed(spa, "unable to open vdev tree [error=%d]", 2569 error); 2570 } 2571 if (spa->spa_missing_tvds != 0 || error != 0) 2572 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 2573 2574 return (error); 2575} 2576 2577/* 2578 * We need to validate the vdev labels against the configuration that 2579 * we have in hand. This function is called twice: first with an untrusted 2580 * config, then with a trusted config. The validation is more strict when the 2581 * config is trusted. 2582 */ 2583static int 2584spa_ld_validate_vdevs(spa_t *spa) 2585{ 2586 int error = 0; 2587 vdev_t *rvd = spa->spa_root_vdev; 2588 2589 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2590 error = vdev_validate(rvd); 2591 spa_config_exit(spa, SCL_ALL, FTAG); 2592 2593 if (error != 0) { 2594 spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 2595 return (error); 2596 } 2597 2598 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 2599 spa_load_failed(spa, "cannot open vdev tree after invalidating " 2600 "some vdevs"); 2601 vdev_dbgmsg_print_tree(rvd, 2); 2602 return (SET_ERROR(ENXIO)); 2603 } 2604 2605 return (0); 2606} 2607 2608static void 2609spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 2610{ 2611 spa->spa_state = POOL_STATE_ACTIVE; 2612 spa->spa_ubsync = spa->spa_uberblock; 2613 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2614 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2615 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2616 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2617 spa->spa_claim_max_txg = spa->spa_first_txg; 2618 spa->spa_prev_software_version = ub->ub_software_version; 2619} 2620 2621static int 2622spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 2623{ 2624 vdev_t *rvd = spa->spa_root_vdev; 2625 nvlist_t *label; 2626 uberblock_t *ub = &spa->spa_uberblock; 2627 2628 /* 2629 * If we are opening the checkpointed state of the pool by 2630 * rewinding to it, at this point we will have written the 2631 * checkpointed uberblock to the vdev labels, so searching 2632 * the labels will find the right uberblock. However, if 2633 * we are opening the checkpointed state read-only, we have 2634 * not modified the labels. Therefore, we must ignore the 2635 * labels and continue using the spa_uberblock that was set 2636 * by spa_ld_checkpoint_rewind. 2637 * 2638 * Note that it would be fine to ignore the labels when 2639 * rewinding (opening writeable) as well. However, if we 2640 * crash just after writing the labels, we will end up 2641 * searching the labels. Doing so in the common case means 2642 * that this code path gets exercised normally, rather than 2643 * just in the edge case. 2644 */ 2645 if (ub->ub_checkpoint_txg != 0 && 2646 spa_importing_readonly_checkpoint(spa)) { 2647 spa_ld_select_uberblock_done(spa, ub); 2648 return (0); 2649 } 2650 2651 /* 2652 * Find the best uberblock. 2653 */ 2654 vdev_uberblock_load(rvd, ub, &label); 2655 2656 /* 2657 * If we weren't able to find a single valid uberblock, return failure. 2658 */ 2659 if (ub->ub_txg == 0) { 2660 nvlist_free(label); 2661 spa_load_failed(spa, "no valid uberblock found"); 2662 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2663 } 2664 2665 spa_load_note(spa, "using uberblock with txg=%llu", 2666 (u_longlong_t)ub->ub_txg); 2667 2668 /* 2669 * If the pool has an unsupported version we can't open it. 2670 */ 2671 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2672 nvlist_free(label); 2673 spa_load_failed(spa, "version %llu is not supported", 2674 (u_longlong_t)ub->ub_version); 2675 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2676 } 2677 2678 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2679 nvlist_t *features; 2680 2681 /* 2682 * If we weren't able to find what's necessary for reading the 2683 * MOS in the label, return failure. 2684 */ 2685 if (label == NULL) { 2686 spa_load_failed(spa, "label config unavailable"); 2687 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2688 ENXIO)); 2689 } 2690 2691 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 2692 &features) != 0) { 2693 nvlist_free(label); 2694 spa_load_failed(spa, "invalid label: '%s' missing", 2695 ZPOOL_CONFIG_FEATURES_FOR_READ); 2696 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2697 ENXIO)); 2698 } 2699 2700 /* 2701 * Update our in-core representation with the definitive values 2702 * from the label. 2703 */ 2704 nvlist_free(spa->spa_label_features); 2705 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2706 } 2707 2708 nvlist_free(label); 2709 2710 /* 2711 * Look through entries in the label nvlist's features_for_read. If 2712 * there is a feature listed there which we don't understand then we 2713 * cannot open a pool. 2714 */ 2715 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2716 nvlist_t *unsup_feat; 2717 2718 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2719 0); 2720 2721 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2722 NULL); nvp != NULL; 2723 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2724 if (!zfeature_is_supported(nvpair_name(nvp))) { 2725 VERIFY(nvlist_add_string(unsup_feat, 2726 nvpair_name(nvp), "") == 0); 2727 } 2728 } 2729 2730 if (!nvlist_empty(unsup_feat)) { 2731 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2732 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2733 nvlist_free(unsup_feat); 2734 spa_load_failed(spa, "some features are unsupported"); 2735 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2736 ENOTSUP)); 2737 } 2738 2739 nvlist_free(unsup_feat); 2740 } 2741 2742 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2743 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2744 spa_try_repair(spa, spa->spa_config); 2745 spa_config_exit(spa, SCL_ALL, FTAG); 2746 nvlist_free(spa->spa_config_splitting); 2747 spa->spa_config_splitting = NULL; 2748 } 2749 2750 /* 2751 * Initialize internal SPA structures. 2752 */ 2753 spa_ld_select_uberblock_done(spa, ub); 2754 2755 return (0); 2756} 2757 2758static int 2759spa_ld_open_rootbp(spa_t *spa) 2760{ 2761 int error = 0; 2762 vdev_t *rvd = spa->spa_root_vdev; 2763 2764 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2765 if (error != 0) { 2766 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 2767 "[error=%d]", error); 2768 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2769 } 2770 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2771 2772 return (0); 2773} 2774 2775static int 2776spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 2777 boolean_t reloading) 2778{ 2779 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 2780 nvlist_t *nv, *mos_config, *policy; 2781 int error = 0, copy_error; 2782 uint64_t healthy_tvds, healthy_tvds_mos; 2783 uint64_t mos_config_txg; 2784 2785 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 2786 != 0) 2787 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2788 2789 /* 2790 * If we're assembling a pool from a split, the config provided is 2791 * already trusted so there is nothing to do. 2792 */ 2793 if (type == SPA_IMPORT_ASSEMBLE) 2794 return (0); 2795 2796 healthy_tvds = spa_healthy_core_tvds(spa); 2797 2798 if (load_nvlist(spa, spa->spa_config_object, &mos_config) 2799 != 0) { 2800 spa_load_failed(spa, "unable to retrieve MOS config"); 2801 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2802 } 2803 2804 /* 2805 * If we are doing an open, pool owner wasn't verified yet, thus do 2806 * the verification here. 2807 */ 2808 if (spa->spa_load_state == SPA_LOAD_OPEN) { 2809 error = spa_verify_host(spa, mos_config); 2810 if (error != 0) { 2811 nvlist_free(mos_config); 2812 return (error); 2813 } 2814 } 2815 2816 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 2817 2818 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2819 2820 /* 2821 * Build a new vdev tree from the trusted config 2822 */ 2823 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 2824 2825 /* 2826 * Vdev paths in the MOS may be obsolete. If the untrusted config was 2827 * obtained by scanning /dev/dsk, then it will have the right vdev 2828 * paths. We update the trusted MOS config with this information. 2829 * We first try to copy the paths with vdev_copy_path_strict, which 2830 * succeeds only when both configs have exactly the same vdev tree. 2831 * If that fails, we fall back to a more flexible method that has a 2832 * best effort policy. 2833 */ 2834 copy_error = vdev_copy_path_strict(rvd, mrvd); 2835 if (copy_error != 0 || spa_load_print_vdev_tree) { 2836 spa_load_note(spa, "provided vdev tree:"); 2837 vdev_dbgmsg_print_tree(rvd, 2); 2838 spa_load_note(spa, "MOS vdev tree:"); 2839 vdev_dbgmsg_print_tree(mrvd, 2); 2840 } 2841 if (copy_error != 0) { 2842 spa_load_note(spa, "vdev_copy_path_strict failed, falling " 2843 "back to vdev_copy_path_relaxed"); 2844 vdev_copy_path_relaxed(rvd, mrvd); 2845 } 2846 2847 vdev_close(rvd); 2848 vdev_free(rvd); 2849 spa->spa_root_vdev = mrvd; 2850 rvd = mrvd; 2851 spa_config_exit(spa, SCL_ALL, FTAG); 2852 2853 /* 2854 * We will use spa_config if we decide to reload the spa or if spa_load 2855 * fails and we rewind. We must thus regenerate the config using the 2856 * MOS information with the updated paths. Rewind policy is an import 2857 * setting and is not in the MOS. We copy it over to our new, trusted 2858 * config. 2859 */ 2860 mos_config_txg = fnvlist_lookup_uint64(mos_config, 2861 ZPOOL_CONFIG_POOL_TXG); 2862 nvlist_free(mos_config); 2863 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 2864 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_REWIND_POLICY, 2865 &policy) == 0) 2866 fnvlist_add_nvlist(mos_config, ZPOOL_REWIND_POLICY, policy); 2867 spa_config_set(spa, mos_config); 2868 spa->spa_config_source = SPA_CONFIG_SRC_MOS; 2869 2870 /* 2871 * Now that we got the config from the MOS, we should be more strict 2872 * in checking blkptrs and can make assumptions about the consistency 2873 * of the vdev tree. spa_trust_config must be set to true before opening 2874 * vdevs in order for them to be writeable. 2875 */ 2876 spa->spa_trust_config = B_TRUE; 2877 2878 /* 2879 * Open and validate the new vdev tree 2880 */ 2881 error = spa_ld_open_vdevs(spa); 2882 if (error != 0) 2883 return (error); 2884 2885 error = spa_ld_validate_vdevs(spa); 2886 if (error != 0) 2887 return (error); 2888 2889 if (copy_error != 0 || spa_load_print_vdev_tree) { 2890 spa_load_note(spa, "final vdev tree:"); 2891 vdev_dbgmsg_print_tree(rvd, 2); 2892 } 2893 2894 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 2895 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 2896 /* 2897 * Sanity check to make sure that we are indeed loading the 2898 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 2899 * in the config provided and they happened to be the only ones 2900 * to have the latest uberblock, we could involuntarily perform 2901 * an extreme rewind. 2902 */ 2903 healthy_tvds_mos = spa_healthy_core_tvds(spa); 2904 if (healthy_tvds_mos - healthy_tvds >= 2905 SPA_SYNC_MIN_VDEVS) { 2906 spa_load_note(spa, "config provided misses too many " 2907 "top-level vdevs compared to MOS (%lld vs %lld). ", 2908 (u_longlong_t)healthy_tvds, 2909 (u_longlong_t)healthy_tvds_mos); 2910 spa_load_note(spa, "vdev tree:"); 2911 vdev_dbgmsg_print_tree(rvd, 2); 2912 if (reloading) { 2913 spa_load_failed(spa, "config was already " 2914 "provided from MOS. Aborting."); 2915 return (spa_vdev_err(rvd, 2916 VDEV_AUX_CORRUPT_DATA, EIO)); 2917 } 2918 spa_load_note(spa, "spa must be reloaded using MOS " 2919 "config"); 2920 return (SET_ERROR(EAGAIN)); 2921 } 2922 } 2923 2924 error = spa_check_for_missing_logs(spa); 2925 if (error != 0) 2926 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2927 2928 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 2929 spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 2930 "guid sum (%llu != %llu)", 2931 (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 2932 (u_longlong_t)rvd->vdev_guid_sum); 2933 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2934 ENXIO)); 2935 } 2936 2937 return (0); 2938} 2939 2940static int 2941spa_ld_open_indirect_vdev_metadata(spa_t *spa) 2942{ 2943 int error = 0; 2944 vdev_t *rvd = spa->spa_root_vdev; 2945 2946 /* 2947 * Everything that we read before spa_remove_init() must be stored 2948 * on concreted vdevs. Therefore we do this as early as possible. 2949 */ 2950 error = spa_remove_init(spa); 2951 if (error != 0) { 2952 spa_load_failed(spa, "spa_remove_init failed [error=%d]", 2953 error); 2954 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2955 } 2956 2957 /* 2958 * Retrieve information needed to condense indirect vdev mappings. 2959 */ 2960 error = spa_condense_init(spa); 2961 if (error != 0) { 2962 spa_load_failed(spa, "spa_condense_init failed [error=%d]", 2963 error); 2964 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 2965 } 2966 2967 return (0); 2968} 2969 2970static int 2971spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 2972{ 2973 int error = 0; 2974 vdev_t *rvd = spa->spa_root_vdev; 2975 2976 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2977 boolean_t missing_feat_read = B_FALSE; 2978 nvlist_t *unsup_feat, *enabled_feat; 2979 2980 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2981 &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 2982 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2983 } 2984 2985 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2986 &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 2987 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2988 } 2989 2990 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2991 &spa->spa_feat_desc_obj, B_TRUE) != 0) { 2992 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2993 } 2994 2995 enabled_feat = fnvlist_alloc(); 2996 unsup_feat = fnvlist_alloc(); 2997 2998 if (!spa_features_check(spa, B_FALSE, 2999 unsup_feat, enabled_feat)) 3000 missing_feat_read = B_TRUE; 3001 3002 if (spa_writeable(spa) || 3003 spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 3004 if (!spa_features_check(spa, B_TRUE, 3005 unsup_feat, enabled_feat)) { 3006 *missing_feat_writep = B_TRUE; 3007 } 3008 } 3009 3010 fnvlist_add_nvlist(spa->spa_load_info, 3011 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 3012 3013 if (!nvlist_empty(unsup_feat)) { 3014 fnvlist_add_nvlist(spa->spa_load_info, 3015 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 3016 } 3017 3018 fnvlist_free(enabled_feat); 3019 fnvlist_free(unsup_feat); 3020 3021 if (!missing_feat_read) { 3022 fnvlist_add_boolean(spa->spa_load_info, 3023 ZPOOL_CONFIG_CAN_RDONLY); 3024 } 3025 3026 /* 3027 * If the state is SPA_LOAD_TRYIMPORT, our objective is 3028 * twofold: to determine whether the pool is available for 3029 * import in read-write mode and (if it is not) whether the 3030 * pool is available for import in read-only mode. If the pool 3031 * is available for import in read-write mode, it is displayed 3032 * as available in userland; if it is not available for import 3033 * in read-only mode, it is displayed as unavailable in 3034 * userland. If the pool is available for import in read-only 3035 * mode but not read-write mode, it is displayed as unavailable 3036 * in userland with a special note that the pool is actually 3037 * available for open in read-only mode. 3038 * 3039 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 3040 * missing a feature for write, we must first determine whether 3041 * the pool can be opened read-only before returning to 3042 * userland in order to know whether to display the 3043 * abovementioned note. 3044 */ 3045 if (missing_feat_read || (*missing_feat_writep && 3046 spa_writeable(spa))) { 3047 spa_load_failed(spa, "pool uses unsupported features"); 3048 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 3049 ENOTSUP)); 3050 } 3051 3052 /* 3053 * Load refcounts for ZFS features from disk into an in-memory 3054 * cache during SPA initialization. 3055 */ 3056 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 3057 uint64_t refcount; 3058 3059 error = feature_get_refcount_from_disk(spa, 3060 &spa_feature_table[i], &refcount); 3061 if (error == 0) { 3062 spa->spa_feat_refcount_cache[i] = refcount; 3063 } else if (error == ENOTSUP) { 3064 spa->spa_feat_refcount_cache[i] = 3065 SPA_FEATURE_DISABLED; 3066 } else { 3067 spa_load_failed(spa, "error getting refcount " 3068 "for feature %s [error=%d]", 3069 spa_feature_table[i].fi_guid, error); 3070 return (spa_vdev_err(rvd, 3071 VDEV_AUX_CORRUPT_DATA, EIO)); 3072 } 3073 } 3074 } 3075 3076 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 3077 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 3078 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 3079 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3080 } 3081 3082 return (0); 3083} 3084 3085static int 3086spa_ld_load_special_directories(spa_t *spa) 3087{ 3088 int error = 0; 3089 vdev_t *rvd = spa->spa_root_vdev; 3090 3091 spa->spa_is_initializing = B_TRUE; 3092 error = dsl_pool_open(spa->spa_dsl_pool); 3093 spa->spa_is_initializing = B_FALSE; 3094 if (error != 0) { 3095 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 3096 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3097 } 3098 3099 return (0); 3100} 3101 3102static int 3103spa_ld_get_props(spa_t *spa) 3104{ 3105 int error = 0; 3106 uint64_t obj; 3107 vdev_t *rvd = spa->spa_root_vdev; 3108 3109 /* Grab the secret checksum salt from the MOS. */ 3110 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3111 DMU_POOL_CHECKSUM_SALT, 1, 3112 sizeof (spa->spa_cksum_salt.zcs_bytes), 3113 spa->spa_cksum_salt.zcs_bytes); 3114 if (error == ENOENT) { 3115 /* Generate a new salt for subsequent use */ 3116 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 3117 sizeof (spa->spa_cksum_salt.zcs_bytes)); 3118 } else if (error != 0) { 3119 spa_load_failed(spa, "unable to retrieve checksum salt from " 3120 "MOS [error=%d]", error); 3121 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3122 } 3123 3124 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 3125 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3126 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 3127 if (error != 0) { 3128 spa_load_failed(spa, "error opening deferred-frees bpobj " 3129 "[error=%d]", error); 3130 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3131 } 3132 3133 /* 3134 * Load the bit that tells us to use the new accounting function 3135 * (raid-z deflation). If we have an older pool, this will not 3136 * be present. 3137 */ 3138 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 3139 if (error != 0 && error != ENOENT) 3140 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3141 3142 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 3143 &spa->spa_creation_version, B_FALSE); 3144 if (error != 0 && error != ENOENT) 3145 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3146 3147 /* 3148 * Load the persistent error log. If we have an older pool, this will 3149 * not be present. 3150 */ 3151 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 3152 B_FALSE); 3153 if (error != 0 && error != ENOENT) 3154 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3155 3156 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 3157 &spa->spa_errlog_scrub, B_FALSE); 3158 if (error != 0 && error != ENOENT) 3159 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3160 3161 /* 3162 * Load the history object. If we have an older pool, this 3163 * will not be present. 3164 */ 3165 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 3166 if (error != 0 && error != ENOENT) 3167 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3168 3169 /* 3170 * Load the per-vdev ZAP map. If we have an older pool, this will not 3171 * be present; in this case, defer its creation to a later time to 3172 * avoid dirtying the MOS this early / out of sync context. See 3173 * spa_sync_config_object. 3174 */ 3175 3176 /* The sentinel is only available in the MOS config. */ 3177 nvlist_t *mos_config; 3178 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 3179 spa_load_failed(spa, "unable to retrieve MOS config"); 3180 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3181 } 3182 3183 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 3184 &spa->spa_all_vdev_zaps, B_FALSE); 3185 3186 if (error == ENOENT) { 3187 VERIFY(!nvlist_exists(mos_config, 3188 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 3189 spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 3190 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 3191 } else if (error != 0) { 3192 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3193 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 3194 /* 3195 * An older version of ZFS overwrote the sentinel value, so 3196 * we have orphaned per-vdev ZAPs in the MOS. Defer their 3197 * destruction to later; see spa_sync_config_object. 3198 */ 3199 spa->spa_avz_action = AVZ_ACTION_DESTROY; 3200 /* 3201 * We're assuming that no vdevs have had their ZAPs created 3202 * before this. Better be sure of it. 3203 */ 3204 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 3205 } 3206 nvlist_free(mos_config); 3207 3208 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3209 3210 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 3211 B_FALSE); 3212 if (error && error != ENOENT) 3213 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3214 3215 if (error == 0) { 3216 uint64_t autoreplace; 3217 3218 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 3219 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 3220 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 3221 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 3222 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 3223 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 3224 &spa->spa_dedup_ditto); 3225 3226 spa->spa_autoreplace = (autoreplace != 0); 3227 } 3228 3229 /* 3230 * If we are importing a pool with missing top-level vdevs, 3231 * we enforce that the pool doesn't panic or get suspended on 3232 * error since the likelihood of missing data is extremely high. 3233 */ 3234 if (spa->spa_missing_tvds > 0 && 3235 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 3236 spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 3237 spa_load_note(spa, "forcing failmode to 'continue' " 3238 "as some top level vdevs are missing"); 3239 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 3240 } 3241 3242 return (0); 3243} 3244 3245static int 3246spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 3247{ 3248 int error = 0; 3249 vdev_t *rvd = spa->spa_root_vdev; 3250 3251 /* 3252 * If we're assembling the pool from the split-off vdevs of 3253 * an existing pool, we don't want to attach the spares & cache 3254 * devices. 3255 */ 3256 3257 /* 3258 * Load any hot spares for this pool. 3259 */ 3260 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 3261 B_FALSE); 3262 if (error != 0 && error != ENOENT) 3263 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3264 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 3265 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 3266 if (load_nvlist(spa, spa->spa_spares.sav_object, 3267 &spa->spa_spares.sav_config) != 0) { 3268 spa_load_failed(spa, "error loading spares nvlist"); 3269 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3270 } 3271 3272 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3273 spa_load_spares(spa); 3274 spa_config_exit(spa, SCL_ALL, FTAG); 3275 } else if (error == 0) { 3276 spa->spa_spares.sav_sync = B_TRUE; 3277 } 3278 3279 /* 3280 * Load any level 2 ARC devices for this pool. 3281 */ 3282 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 3283 &spa->spa_l2cache.sav_object, B_FALSE); 3284 if (error != 0 && error != ENOENT) 3285 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3286 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 3287 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 3288 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 3289 &spa->spa_l2cache.sav_config) != 0) { 3290 spa_load_failed(spa, "error loading l2cache nvlist"); 3291 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3292 } 3293 3294 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3295 spa_load_l2cache(spa); 3296 spa_config_exit(spa, SCL_ALL, FTAG); 3297 } else if (error == 0) { 3298 spa->spa_l2cache.sav_sync = B_TRUE; 3299 } 3300 3301 return (0); 3302} 3303 3304static int 3305spa_ld_load_vdev_metadata(spa_t *spa) 3306{ 3307 int error = 0; 3308 vdev_t *rvd = spa->spa_root_vdev; 3309 3310 /* 3311 * If the 'autoreplace' property is set, then post a resource notifying 3312 * the ZFS DE that it should not issue any faults for unopenable 3313 * devices. We also iterate over the vdevs, and post a sysevent for any 3314 * unopenable vdevs so that the normal autoreplace handler can take 3315 * over. 3316 */ 3317 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 3318 spa_check_removed(spa->spa_root_vdev); 3319 /* 3320 * For the import case, this is done in spa_import(), because 3321 * at this point we're using the spare definitions from 3322 * the MOS config, not necessarily from the userland config. 3323 */ 3324 if (spa->spa_load_state != SPA_LOAD_IMPORT) { 3325 spa_aux_check_removed(&spa->spa_spares); 3326 spa_aux_check_removed(&spa->spa_l2cache); 3327 } 3328 } 3329 3330 /* 3331 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 3332 */ 3333 error = vdev_load(rvd); 3334 if (error != 0) { 3335 spa_load_failed(spa, "vdev_load failed [error=%d]", error); 3336 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3337 } 3338 3339 /* 3340 * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 3341 */ 3342 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3343 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 3344 spa_config_exit(spa, SCL_ALL, FTAG); 3345 3346 return (0); 3347} 3348 3349static int 3350spa_ld_load_dedup_tables(spa_t *spa) 3351{ 3352 int error = 0; 3353 vdev_t *rvd = spa->spa_root_vdev; 3354 3355 error = ddt_load(spa); 3356 if (error != 0) { 3357 spa_load_failed(spa, "ddt_load failed [error=%d]", error); 3358 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3359 } 3360 3361 return (0); 3362} 3363 3364static int 3365spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport) 3366{ 3367 vdev_t *rvd = spa->spa_root_vdev; 3368 3369 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 3370 boolean_t missing = spa_check_logs(spa); 3371 if (missing) { 3372 if (spa->spa_missing_tvds != 0) { 3373 spa_load_note(spa, "spa_check_logs failed " 3374 "so dropping the logs"); 3375 } else { 3376 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 3377 spa_load_failed(spa, "spa_check_logs failed"); 3378 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 3379 ENXIO)); 3380 } 3381 } 3382 } 3383 3384 return (0); 3385} 3386 3387static int 3388spa_ld_verify_pool_data(spa_t *spa) 3389{ 3390 int error = 0; 3391 vdev_t *rvd = spa->spa_root_vdev; 3392 3393 /* 3394 * We've successfully opened the pool, verify that we're ready 3395 * to start pushing transactions. 3396 */ 3397 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 3398 error = spa_load_verify(spa); 3399 if (error != 0) { 3400 spa_load_failed(spa, "spa_load_verify failed " 3401 "[error=%d]", error); 3402 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3403 error)); 3404 } 3405 } 3406 3407 return (0); 3408} 3409 3410static void 3411spa_ld_claim_log_blocks(spa_t *spa) 3412{ 3413 dmu_tx_t *tx; 3414 dsl_pool_t *dp = spa_get_dsl(spa); 3415 3416 /* 3417 * Claim log blocks that haven't been committed yet. 3418 * This must all happen in a single txg. 3419 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 3420 * invoked from zil_claim_log_block()'s i/o done callback. 3421 * Price of rollback is that we abandon the log. 3422 */ 3423 spa->spa_claiming = B_TRUE; 3424 3425 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 3426 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 3427 zil_claim, tx, DS_FIND_CHILDREN); 3428 dmu_tx_commit(tx); 3429 3430 spa->spa_claiming = B_FALSE; 3431 3432 spa_set_log_state(spa, SPA_LOG_GOOD); 3433} 3434 3435static void 3436spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 3437 boolean_t update_config_cache) 3438{ 3439 vdev_t *rvd = spa->spa_root_vdev; 3440 int need_update = B_FALSE; 3441 3442 /* 3443 * If the config cache is stale, or we have uninitialized 3444 * metaslabs (see spa_vdev_add()), then update the config. 3445 * 3446 * If this is a verbatim import, trust the current 3447 * in-core spa_config and update the disk labels. 3448 */ 3449 if (update_config_cache || config_cache_txg != spa->spa_config_txg || 3450 spa->spa_load_state == SPA_LOAD_IMPORT || 3451 spa->spa_load_state == SPA_LOAD_RECOVER || 3452 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 3453 need_update = B_TRUE; 3454 3455 for (int c = 0; c < rvd->vdev_children; c++) 3456 if (rvd->vdev_child[c]->vdev_ms_array == 0) 3457 need_update = B_TRUE; 3458 3459 /* 3460 * Update the config cache asychronously in case we're the 3461 * root pool, in which case the config cache isn't writable yet. 3462 */ 3463 if (need_update) 3464 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3465} 3466 3467static void 3468spa_ld_prepare_for_reload(spa_t *spa) 3469{ 3470 int mode = spa->spa_mode; 3471 int async_suspended = spa->spa_async_suspended; 3472 3473 spa_unload(spa); 3474 spa_deactivate(spa); 3475 spa_activate(spa, mode); 3476 3477 /* 3478 * We save the value of spa_async_suspended as it gets reset to 0 by 3479 * spa_unload(). We want to restore it back to the original value before 3480 * returning as we might be calling spa_async_resume() later. 3481 */ 3482 spa->spa_async_suspended = async_suspended; 3483} 3484 3485static int 3486spa_ld_read_checkpoint_txg(spa_t *spa) 3487{ 3488 uberblock_t checkpoint; 3489 int error = 0; 3490 3491 ASSERT0(spa->spa_checkpoint_txg); 3492 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3493 3494 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3495 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 3496 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 3497 3498 if (error == ENOENT) 3499 return (0); 3500 3501 if (error != 0) 3502 return (error); 3503 3504 ASSERT3U(checkpoint.ub_txg, !=, 0); 3505 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 3506 ASSERT3U(checkpoint.ub_timestamp, !=, 0); 3507 spa->spa_checkpoint_txg = checkpoint.ub_txg; 3508 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 3509 3510 return (0); 3511} 3512 3513static int 3514spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 3515{ 3516 int error = 0; 3517 3518 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3519 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 3520 3521 /* 3522 * Never trust the config that is provided unless we are assembling 3523 * a pool following a split. 3524 * This means don't trust blkptrs and the vdev tree in general. This 3525 * also effectively puts the spa in read-only mode since 3526 * spa_writeable() checks for spa_trust_config to be true. 3527 * We will later load a trusted config from the MOS. 3528 */ 3529 if (type != SPA_IMPORT_ASSEMBLE) 3530 spa->spa_trust_config = B_FALSE; 3531 3532 /* 3533 * Parse the config provided to create a vdev tree. 3534 */ 3535 error = spa_ld_parse_config(spa, type); 3536 if (error != 0) 3537 return (error); 3538 3539 /* 3540 * Now that we have the vdev tree, try to open each vdev. This involves 3541 * opening the underlying physical device, retrieving its geometry and 3542 * probing the vdev with a dummy I/O. The state of each vdev will be set 3543 * based on the success of those operations. After this we'll be ready 3544 * to read from the vdevs. 3545 */ 3546 error = spa_ld_open_vdevs(spa); 3547 if (error != 0) 3548 return (error); 3549 3550 /* 3551 * Read the label of each vdev and make sure that the GUIDs stored 3552 * there match the GUIDs in the config provided. 3553 * If we're assembling a new pool that's been split off from an 3554 * existing pool, the labels haven't yet been updated so we skip 3555 * validation for now. 3556 */ 3557 if (type != SPA_IMPORT_ASSEMBLE) { 3558 error = spa_ld_validate_vdevs(spa); 3559 if (error != 0) 3560 return (error); 3561 } 3562 3563 /* 3564 * Read all vdev labels to find the best uberblock (i.e. latest, 3565 * unless spa_load_max_txg is set) and store it in spa_uberblock. We 3566 * get the list of features required to read blkptrs in the MOS from 3567 * the vdev label with the best uberblock and verify that our version 3568 * of zfs supports them all. 3569 */ 3570 error = spa_ld_select_uberblock(spa, type); 3571 if (error != 0) 3572 return (error); 3573 3574 /* 3575 * Pass that uberblock to the dsl_pool layer which will open the root 3576 * blkptr. This blkptr points to the latest version of the MOS and will 3577 * allow us to read its contents. 3578 */ 3579 error = spa_ld_open_rootbp(spa); 3580 if (error != 0) 3581 return (error); 3582 3583 return (0); 3584} 3585 3586static int 3587spa_ld_checkpoint_rewind(spa_t *spa) 3588{ 3589 uberblock_t checkpoint; 3590 int error = 0; 3591 3592 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3593 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 3594 3595 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3596 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 3597 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 3598 3599 if (error != 0) { 3600 spa_load_failed(spa, "unable to retrieve checkpointed " 3601 "uberblock from the MOS config [error=%d]", error); 3602 3603 if (error == ENOENT) 3604 error = ZFS_ERR_NO_CHECKPOINT; 3605 3606 return (error); 3607 } 3608 3609 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 3610 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 3611 3612 /* 3613 * We need to update the txg and timestamp of the checkpointed 3614 * uberblock to be higher than the latest one. This ensures that 3615 * the checkpointed uberblock is selected if we were to close and 3616 * reopen the pool right after we've written it in the vdev labels. 3617 * (also see block comment in vdev_uberblock_compare) 3618 */ 3619 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 3620 checkpoint.ub_timestamp = gethrestime_sec(); 3621 3622 /* 3623 * Set current uberblock to be the checkpointed uberblock. 3624 */ 3625 spa->spa_uberblock = checkpoint; 3626 3627 /* 3628 * If we are doing a normal rewind, then the pool is open for 3629 * writing and we sync the "updated" checkpointed uberblock to 3630 * disk. Once this is done, we've basically rewound the whole 3631 * pool and there is no way back. 3632 * 3633 * There are cases when we don't want to attempt and sync the 3634 * checkpointed uberblock to disk because we are opening a 3635 * pool as read-only. Specifically, verifying the checkpointed 3636 * state with zdb, and importing the checkpointed state to get 3637 * a "preview" of its content. 3638 */ 3639 if (spa_writeable(spa)) { 3640 vdev_t *rvd = spa->spa_root_vdev; 3641 3642 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3643 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 3644 int svdcount = 0; 3645 int children = rvd->vdev_children; 3646 int c0 = spa_get_random(children); 3647 3648 for (int c = 0; c < children; c++) { 3649 vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 3650 3651 /* Stop when revisiting the first vdev */ 3652 if (c > 0 && svd[0] == vd) 3653 break; 3654 3655 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 3656 !vdev_is_concrete(vd)) 3657 continue; 3658 3659 svd[svdcount++] = vd; 3660 if (svdcount == SPA_SYNC_MIN_VDEVS) 3661 break; 3662 } 3663 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 3664 if (error == 0) 3665 spa->spa_last_synced_guid = rvd->vdev_guid; 3666 spa_config_exit(spa, SCL_ALL, FTAG); 3667 3668 if (error != 0) { 3669 spa_load_failed(spa, "failed to write checkpointed " 3670 "uberblock to the vdev labels [error=%d]", error); 3671 return (error); 3672 } 3673 } 3674 3675 return (0); 3676} 3677 3678static int 3679spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 3680 boolean_t *update_config_cache) 3681{ 3682 int error; 3683 3684 /* 3685 * Parse the config for pool, open and validate vdevs, 3686 * select an uberblock, and use that uberblock to open 3687 * the MOS. 3688 */ 3689 error = spa_ld_mos_init(spa, type); 3690 if (error != 0) 3691 return (error); 3692 3693 /* 3694 * Retrieve the trusted config stored in the MOS and use it to create 3695 * a new, exact version of the vdev tree, then reopen all vdevs. 3696 */ 3697 error = spa_ld_trusted_config(spa, type, B_FALSE); 3698 if (error == EAGAIN) { 3699 if (update_config_cache != NULL) 3700 *update_config_cache = B_TRUE; 3701 3702 /* 3703 * Redo the loading process with the trusted config if it is 3704 * too different from the untrusted config. 3705 */ 3706 spa_ld_prepare_for_reload(spa); 3707 spa_load_note(spa, "RELOADING"); 3708 error = spa_ld_mos_init(spa, type); 3709 if (error != 0) 3710 return (error); 3711 3712 error = spa_ld_trusted_config(spa, type, B_TRUE); 3713 if (error != 0) 3714 return (error); 3715 3716 } else if (error != 0) { 3717 return (error); 3718 } 3719 3720 return (0); 3721} 3722 3723/* 3724 * Load an existing storage pool, using the config provided. This config 3725 * describes which vdevs are part of the pool and is later validated against 3726 * partial configs present in each vdev's label and an entire copy of the 3727 * config stored in the MOS. 3728 */ 3729static int 3730spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) 3731{ 3732 int error = 0; 3733 boolean_t missing_feat_write = B_FALSE; 3734 boolean_t checkpoint_rewind = 3735 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 3736 boolean_t update_config_cache = B_FALSE; 3737 3738 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3739 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 3740 3741 spa_load_note(spa, "LOADING"); 3742 3743 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 3744 if (error != 0) 3745 return (error); 3746 3747 /* 3748 * If we are rewinding to the checkpoint then we need to repeat 3749 * everything we've done so far in this function but this time 3750 * selecting the checkpointed uberblock and using that to open 3751 * the MOS. 3752 */ 3753 if (checkpoint_rewind) { 3754 /* 3755 * If we are rewinding to the checkpoint update config cache 3756 * anyway. 3757 */ 3758 update_config_cache = B_TRUE; 3759 3760 /* 3761 * Extract the checkpointed uberblock from the current MOS 3762 * and use this as the pool's uberblock from now on. If the 3763 * pool is imported as writeable we also write the checkpoint 3764 * uberblock to the labels, making the rewind permanent. 3765 */ 3766 error = spa_ld_checkpoint_rewind(spa); 3767 if (error != 0) 3768 return (error); 3769 3770 /* 3771 * Redo the loading process process again with the 3772 * checkpointed uberblock. 3773 */ 3774 spa_ld_prepare_for_reload(spa); 3775 spa_load_note(spa, "LOADING checkpointed uberblock"); 3776 error = spa_ld_mos_with_trusted_config(spa, type, NULL); 3777 if (error != 0) 3778 return (error); 3779 } 3780 3781 /* 3782 * Retrieve the checkpoint txg if the pool has a checkpoint. 3783 */ 3784 error = spa_ld_read_checkpoint_txg(spa); 3785 if (error != 0) 3786 return (error); 3787 3788 /* 3789 * Retrieve the mapping of indirect vdevs. Those vdevs were removed 3790 * from the pool and their contents were re-mapped to other vdevs. Note 3791 * that everything that we read before this step must have been 3792 * rewritten on concrete vdevs after the last device removal was 3793 * initiated. Otherwise we could be reading from indirect vdevs before 3794 * we have loaded their mappings. 3795 */ 3796 error = spa_ld_open_indirect_vdev_metadata(spa); 3797 if (error != 0) 3798 return (error); 3799 3800 /* 3801 * Retrieve the full list of active features from the MOS and check if 3802 * they are all supported. 3803 */ 3804 error = spa_ld_check_features(spa, &missing_feat_write); 3805 if (error != 0) 3806 return (error); 3807 3808 /* 3809 * Load several special directories from the MOS needed by the dsl_pool 3810 * layer. 3811 */ 3812 error = spa_ld_load_special_directories(spa); 3813 if (error != 0) 3814 return (error); 3815 3816 /* 3817 * Retrieve pool properties from the MOS. 3818 */ 3819 error = spa_ld_get_props(spa); 3820 if (error != 0) 3821 return (error); 3822 3823 /* 3824 * Retrieve the list of auxiliary devices - cache devices and spares - 3825 * and open them. 3826 */ 3827 error = spa_ld_open_aux_vdevs(spa, type); 3828 if (error != 0) 3829 return (error); 3830 3831 /* 3832 * Load the metadata for all vdevs. Also check if unopenable devices 3833 * should be autoreplaced. 3834 */ 3835 error = spa_ld_load_vdev_metadata(spa); 3836 if (error != 0) 3837 return (error); 3838 3839 error = spa_ld_load_dedup_tables(spa); 3840 if (error != 0) 3841 return (error); 3842 3843 /* 3844 * Verify the logs now to make sure we don't have any unexpected errors 3845 * when we claim log blocks later. 3846 */ 3847 error = spa_ld_verify_logs(spa, type, ereport); 3848 if (error != 0) 3849 return (error); 3850 3851 if (missing_feat_write) { 3852 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 3853 3854 /* 3855 * At this point, we know that we can open the pool in 3856 * read-only mode but not read-write mode. We now have enough 3857 * information and can return to userland. 3858 */ 3859 return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 3860 ENOTSUP)); 3861 } 3862 3863 /* 3864 * Traverse the last txgs to make sure the pool was left off in a safe 3865 * state. When performing an extreme rewind, we verify the whole pool, 3866 * which can take a very long time. 3867 */ 3868 error = spa_ld_verify_pool_data(spa); 3869 if (error != 0) 3870 return (error); 3871 3872 /* 3873 * Calculate the deflated space for the pool. This must be done before 3874 * we write anything to the pool because we'd need to update the space 3875 * accounting using the deflated sizes. 3876 */ 3877 spa_update_dspace(spa); 3878 3879 /* 3880 * We have now retrieved all the information we needed to open the 3881 * pool. If we are importing the pool in read-write mode, a few 3882 * additional steps must be performed to finish the import. 3883 */ 3884 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 3885 spa->spa_load_max_txg == UINT64_MAX)) { 3886 uint64_t config_cache_txg = spa->spa_config_txg; 3887 3888 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 3889 3890 /* 3891 * In case of a checkpoint rewind, log the original txg 3892 * of the checkpointed uberblock. 3893 */ 3894 if (checkpoint_rewind) { 3895 spa_history_log_internal(spa, "checkpoint rewind", 3896 NULL, "rewound state to txg=%llu", 3897 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 3898 } 3899 3900 /* 3901 * Traverse the ZIL and claim all blocks. 3902 */ 3903 spa_ld_claim_log_blocks(spa); 3904 3905 /* 3906 * Kick-off the syncing thread. 3907 */ 3908 spa->spa_sync_on = B_TRUE; 3909 txg_sync_start(spa->spa_dsl_pool); 3910 3911 /* 3912 * Wait for all claims to sync. We sync up to the highest 3913 * claimed log block birth time so that claimed log blocks 3914 * don't appear to be from the future. spa_claim_max_txg 3915 * will have been set for us by ZIL traversal operations 3916 * performed above. 3917 */ 3918 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 3919 3920 /* 3921 * Check if we need to request an update of the config. On the 3922 * next sync, we would update the config stored in vdev labels 3923 * and the cachefile (by default /etc/zfs/zpool.cache). 3924 */ 3925 spa_ld_check_for_config_update(spa, config_cache_txg, 3926 update_config_cache); 3927 3928 /* 3929 * Check all DTLs to see if anything needs resilvering. 3930 */ 3931 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 3932 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 3933 spa_async_request(spa, SPA_ASYNC_RESILVER); 3934 3935 /* 3936 * Log the fact that we booted up (so that we can detect if 3937 * we rebooted in the middle of an operation). 3938 */ 3939 spa_history_log_version(spa, "open"); 3940 3941 /* 3942 * Delete any inconsistent datasets. 3943 */ 3944 (void) dmu_objset_find(spa_name(spa), 3945 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 3946 3947 /* 3948 * Clean up any stale temporary dataset userrefs. 3949 */ 3950 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 3951 3952 spa_restart_removal(spa); 3953 3954 spa_spawn_aux_threads(spa); 3955 } 3956 3957 spa_load_note(spa, "LOADED"); 3958 3959 return (0); 3960} 3961 3962static int 3963spa_load_retry(spa_t *spa, spa_load_state_t state) 3964{ 3965 int mode = spa->spa_mode; 3966 3967 spa_unload(spa); 3968 spa_deactivate(spa); 3969 3970 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 3971 3972 spa_activate(spa, mode); 3973 spa_async_suspend(spa); 3974 3975 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 3976 (u_longlong_t)spa->spa_load_max_txg); 3977 3978 return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 3979} 3980 3981/* 3982 * If spa_load() fails this function will try loading prior txg's. If 3983 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 3984 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 3985 * function will not rewind the pool and will return the same error as 3986 * spa_load(). 3987 */ 3988static int 3989spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 3990 int rewind_flags) 3991{ 3992 nvlist_t *loadinfo = NULL; 3993 nvlist_t *config = NULL; 3994 int load_error, rewind_error; 3995 uint64_t safe_rewind_txg; 3996 uint64_t min_txg; 3997 3998 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 3999 spa->spa_load_max_txg = spa->spa_load_txg; 4000 spa_set_log_state(spa, SPA_LOG_CLEAR); 4001 } else { 4002 spa->spa_load_max_txg = max_request; 4003 if (max_request != UINT64_MAX) 4004 spa->spa_extreme_rewind = B_TRUE; 4005 } 4006 4007 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 4008 if (load_error == 0) 4009 return (0); 4010 if (load_error == ZFS_ERR_NO_CHECKPOINT) { 4011 /* 4012 * When attempting checkpoint-rewind on a pool with no 4013 * checkpoint, we should not attempt to load uberblocks 4014 * from previous txgs when spa_load fails. 4015 */ 4016 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4017 return (load_error); 4018 } 4019 4020 if (spa->spa_root_vdev != NULL) 4021 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4022 4023 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 4024 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 4025 4026 if (rewind_flags & ZPOOL_NEVER_REWIND) { 4027 nvlist_free(config); 4028 return (load_error); 4029 } 4030 4031 if (state == SPA_LOAD_RECOVER) { 4032 /* Price of rolling back is discarding txgs, including log */ 4033 spa_set_log_state(spa, SPA_LOG_CLEAR); 4034 } else { 4035 /* 4036 * If we aren't rolling back save the load info from our first 4037 * import attempt so that we can restore it after attempting 4038 * to rewind. 4039 */ 4040 loadinfo = spa->spa_load_info; 4041 spa->spa_load_info = fnvlist_alloc(); 4042 } 4043 4044 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 4045 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 4046 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 4047 TXG_INITIAL : safe_rewind_txg; 4048 4049 /* 4050 * Continue as long as we're finding errors, we're still within 4051 * the acceptable rewind range, and we're still finding uberblocks 4052 */ 4053 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 4054 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 4055 if (spa->spa_load_max_txg < safe_rewind_txg) 4056 spa->spa_extreme_rewind = B_TRUE; 4057 rewind_error = spa_load_retry(spa, state); 4058 } 4059 4060 spa->spa_extreme_rewind = B_FALSE; 4061 spa->spa_load_max_txg = UINT64_MAX; 4062 4063 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 4064 spa_config_set(spa, config); 4065 else 4066 nvlist_free(config); 4067 4068 if (state == SPA_LOAD_RECOVER) { 4069 ASSERT3P(loadinfo, ==, NULL); 4070 return (rewind_error); 4071 } else { 4072 /* Store the rewind info as part of the initial load info */ 4073 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 4074 spa->spa_load_info); 4075 4076 /* Restore the initial load info */ 4077 fnvlist_free(spa->spa_load_info); 4078 spa->spa_load_info = loadinfo; 4079 4080 return (load_error); 4081 } 4082} 4083 4084/* 4085 * Pool Open/Import 4086 * 4087 * The import case is identical to an open except that the configuration is sent 4088 * down from userland, instead of grabbed from the configuration cache. For the 4089 * case of an open, the pool configuration will exist in the 4090 * POOL_STATE_UNINITIALIZED state. 4091 * 4092 * The stats information (gen/count/ustats) is used to gather vdev statistics at 4093 * the same time open the pool, without having to keep around the spa_t in some 4094 * ambiguous state. 4095 */ 4096static int 4097spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 4098 nvlist_t **config) 4099{ 4100 spa_t *spa; 4101 spa_load_state_t state = SPA_LOAD_OPEN; 4102 int error; 4103 int locked = B_FALSE; 4104 int firstopen = B_FALSE; 4105 4106 *spapp = NULL; 4107 4108 /* 4109 * As disgusting as this is, we need to support recursive calls to this 4110 * function because dsl_dir_open() is called during spa_load(), and ends 4111 * up calling spa_open() again. The real fix is to figure out how to 4112 * avoid dsl_dir_open() calling this in the first place. 4113 */ 4114 if (mutex_owner(&spa_namespace_lock) != curthread) { 4115 mutex_enter(&spa_namespace_lock); 4116 locked = B_TRUE; 4117 } 4118 4119 if ((spa = spa_lookup(pool)) == NULL) { 4120 if (locked) 4121 mutex_exit(&spa_namespace_lock); 4122 return (SET_ERROR(ENOENT)); 4123 } 4124 4125 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 4126 zpool_rewind_policy_t policy; 4127 4128 firstopen = B_TRUE; 4129 4130 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 4131 &policy); 4132 if (policy.zrp_request & ZPOOL_DO_REWIND) 4133 state = SPA_LOAD_RECOVER; 4134 4135 spa_activate(spa, spa_mode_global); 4136 4137 if (state != SPA_LOAD_RECOVER) 4138 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4139 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 4140 4141 zfs_dbgmsg("spa_open_common: opening %s", pool); 4142 error = spa_load_best(spa, state, policy.zrp_txg, 4143 policy.zrp_request); 4144 4145 if (error == EBADF) { 4146 /* 4147 * If vdev_validate() returns failure (indicated by 4148 * EBADF), it indicates that one of the vdevs indicates 4149 * that the pool has been exported or destroyed. If 4150 * this is the case, the config cache is out of sync and 4151 * we should remove the pool from the namespace. 4152 */ 4153 spa_unload(spa); 4154 spa_deactivate(spa); 4155 spa_write_cachefile(spa, B_TRUE, B_TRUE); 4156 spa_remove(spa); 4157 if (locked) 4158 mutex_exit(&spa_namespace_lock); 4159 return (SET_ERROR(ENOENT)); 4160 } 4161 4162 if (error) { 4163 /* 4164 * We can't open the pool, but we still have useful 4165 * information: the state of each vdev after the 4166 * attempted vdev_open(). Return this to the user. 4167 */ 4168 if (config != NULL && spa->spa_config) { 4169 VERIFY(nvlist_dup(spa->spa_config, config, 4170 KM_SLEEP) == 0); 4171 VERIFY(nvlist_add_nvlist(*config, 4172 ZPOOL_CONFIG_LOAD_INFO, 4173 spa->spa_load_info) == 0); 4174 } 4175 spa_unload(spa); 4176 spa_deactivate(spa); 4177 spa->spa_last_open_failed = error; 4178 if (locked) 4179 mutex_exit(&spa_namespace_lock); 4180 *spapp = NULL; 4181 return (error); 4182 } 4183 } 4184 4185 spa_open_ref(spa, tag); 4186 4187 if (config != NULL) 4188 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4189 4190 /* 4191 * If we've recovered the pool, pass back any information we 4192 * gathered while doing the load. 4193 */ 4194 if (state == SPA_LOAD_RECOVER) { 4195 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 4196 spa->spa_load_info) == 0); 4197 } 4198 4199 if (locked) { 4200 spa->spa_last_open_failed = 0; 4201 spa->spa_last_ubsync_txg = 0; 4202 spa->spa_load_txg = 0; 4203 mutex_exit(&spa_namespace_lock); 4204#ifdef __FreeBSD__ 4205#ifdef _KERNEL 4206 if (firstopen) 4207 zvol_create_minors(spa->spa_name); 4208#endif 4209#endif 4210 } 4211 4212 *spapp = spa; 4213 4214 return (0); 4215} 4216 4217int 4218spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 4219 nvlist_t **config) 4220{ 4221 return (spa_open_common(name, spapp, tag, policy, config)); 4222} 4223 4224int 4225spa_open(const char *name, spa_t **spapp, void *tag) 4226{ 4227 return (spa_open_common(name, spapp, tag, NULL, NULL)); 4228} 4229 4230/* 4231 * Lookup the given spa_t, incrementing the inject count in the process, 4232 * preventing it from being exported or destroyed. 4233 */ 4234spa_t * 4235spa_inject_addref(char *name) 4236{ 4237 spa_t *spa; 4238 4239 mutex_enter(&spa_namespace_lock); 4240 if ((spa = spa_lookup(name)) == NULL) { 4241 mutex_exit(&spa_namespace_lock); 4242 return (NULL); 4243 } 4244 spa->spa_inject_ref++; 4245 mutex_exit(&spa_namespace_lock); 4246 4247 return (spa); 4248} 4249 4250void 4251spa_inject_delref(spa_t *spa) 4252{ 4253 mutex_enter(&spa_namespace_lock); 4254 spa->spa_inject_ref--; 4255 mutex_exit(&spa_namespace_lock); 4256} 4257 4258/* 4259 * Add spares device information to the nvlist. 4260 */ 4261static void 4262spa_add_spares(spa_t *spa, nvlist_t *config) 4263{ 4264 nvlist_t **spares; 4265 uint_t i, nspares; 4266 nvlist_t *nvroot; 4267 uint64_t guid; 4268 vdev_stat_t *vs; 4269 uint_t vsc; 4270 uint64_t pool; 4271 4272 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 4273 4274 if (spa->spa_spares.sav_count == 0) 4275 return; 4276 4277 VERIFY(nvlist_lookup_nvlist(config, 4278 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4279 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 4280 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 4281 if (nspares != 0) { 4282 VERIFY(nvlist_add_nvlist_array(nvroot, 4283 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4284 VERIFY(nvlist_lookup_nvlist_array(nvroot, 4285 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 4286 4287 /* 4288 * Go through and find any spares which have since been 4289 * repurposed as an active spare. If this is the case, update 4290 * their status appropriately. 4291 */ 4292 for (i = 0; i < nspares; i++) { 4293 VERIFY(nvlist_lookup_uint64(spares[i], 4294 ZPOOL_CONFIG_GUID, &guid) == 0); 4295 if (spa_spare_exists(guid, &pool, NULL) && 4296 pool != 0ULL) { 4297 VERIFY(nvlist_lookup_uint64_array( 4298 spares[i], ZPOOL_CONFIG_VDEV_STATS, 4299 (uint64_t **)&vs, &vsc) == 0); 4300 vs->vs_state = VDEV_STATE_CANT_OPEN; 4301 vs->vs_aux = VDEV_AUX_SPARED; 4302 } 4303 } 4304 } 4305} 4306 4307/* 4308 * Add l2cache device information to the nvlist, including vdev stats. 4309 */ 4310static void 4311spa_add_l2cache(spa_t *spa, nvlist_t *config) 4312{ 4313 nvlist_t **l2cache; 4314 uint_t i, j, nl2cache; 4315 nvlist_t *nvroot; 4316 uint64_t guid; 4317 vdev_t *vd; 4318 vdev_stat_t *vs; 4319 uint_t vsc; 4320 4321 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 4322 4323 if (spa->spa_l2cache.sav_count == 0) 4324 return; 4325 4326 VERIFY(nvlist_lookup_nvlist(config, 4327 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4328 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 4329 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 4330 if (nl2cache != 0) { 4331 VERIFY(nvlist_add_nvlist_array(nvroot, 4332 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4333 VERIFY(nvlist_lookup_nvlist_array(nvroot, 4334 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 4335 4336 /* 4337 * Update level 2 cache device stats. 4338 */ 4339 4340 for (i = 0; i < nl2cache; i++) { 4341 VERIFY(nvlist_lookup_uint64(l2cache[i], 4342 ZPOOL_CONFIG_GUID, &guid) == 0); 4343 4344 vd = NULL; 4345 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 4346 if (guid == 4347 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 4348 vd = spa->spa_l2cache.sav_vdevs[j]; 4349 break; 4350 } 4351 } 4352 ASSERT(vd != NULL); 4353 4354 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 4355 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 4356 == 0); 4357 vdev_get_stats(vd, vs); 4358 } 4359 } 4360} 4361 4362static void 4363spa_add_feature_stats(spa_t *spa, nvlist_t *config) 4364{ 4365 nvlist_t *features; 4366 zap_cursor_t zc; 4367 zap_attribute_t za; 4368 4369 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 4370 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4371 4372 /* We may be unable to read features if pool is suspended. */ 4373 if (spa_suspended(spa)) 4374 goto out; 4375 4376 if (spa->spa_feat_for_read_obj != 0) { 4377 for (zap_cursor_init(&zc, spa->spa_meta_objset, 4378 spa->spa_feat_for_read_obj); 4379 zap_cursor_retrieve(&zc, &za) == 0; 4380 zap_cursor_advance(&zc)) { 4381 ASSERT(za.za_integer_length == sizeof (uint64_t) && 4382 za.za_num_integers == 1); 4383 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 4384 za.za_first_integer)); 4385 } 4386 zap_cursor_fini(&zc); 4387 } 4388 4389 if (spa->spa_feat_for_write_obj != 0) { 4390 for (zap_cursor_init(&zc, spa->spa_meta_objset, 4391 spa->spa_feat_for_write_obj); 4392 zap_cursor_retrieve(&zc, &za) == 0; 4393 zap_cursor_advance(&zc)) { 4394 ASSERT(za.za_integer_length == sizeof (uint64_t) && 4395 za.za_num_integers == 1); 4396 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 4397 za.za_first_integer)); 4398 } 4399 zap_cursor_fini(&zc); 4400 } 4401 4402out: 4403 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 4404 features) == 0); 4405 nvlist_free(features); 4406} 4407 4408int 4409spa_get_stats(const char *name, nvlist_t **config, 4410 char *altroot, size_t buflen) 4411{ 4412 int error; 4413 spa_t *spa; 4414 4415 *config = NULL; 4416 error = spa_open_common(name, &spa, FTAG, NULL, config); 4417 4418 if (spa != NULL) { 4419 /* 4420 * This still leaves a window of inconsistency where the spares 4421 * or l2cache devices could change and the config would be 4422 * self-inconsistent. 4423 */ 4424 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4425 4426 if (*config != NULL) { 4427 uint64_t loadtimes[2]; 4428 4429 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 4430 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 4431 VERIFY(nvlist_add_uint64_array(*config, 4432 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 4433 4434 VERIFY(nvlist_add_uint64(*config, 4435 ZPOOL_CONFIG_ERRCOUNT, 4436 spa_get_errlog_size(spa)) == 0); 4437 4438 if (spa_suspended(spa)) 4439 VERIFY(nvlist_add_uint64(*config, 4440 ZPOOL_CONFIG_SUSPENDED, 4441 spa->spa_failmode) == 0); 4442 4443 spa_add_spares(spa, *config); 4444 spa_add_l2cache(spa, *config); 4445 spa_add_feature_stats(spa, *config); 4446 } 4447 } 4448 4449 /* 4450 * We want to get the alternate root even for faulted pools, so we cheat 4451 * and call spa_lookup() directly. 4452 */ 4453 if (altroot) { 4454 if (spa == NULL) { 4455 mutex_enter(&spa_namespace_lock); 4456 spa = spa_lookup(name); 4457 if (spa) 4458 spa_altroot(spa, altroot, buflen); 4459 else 4460 altroot[0] = '\0'; 4461 spa = NULL; 4462 mutex_exit(&spa_namespace_lock); 4463 } else { 4464 spa_altroot(spa, altroot, buflen); 4465 } 4466 } 4467 4468 if (spa != NULL) { 4469 spa_config_exit(spa, SCL_CONFIG, FTAG); 4470 spa_close(spa, FTAG); 4471 } 4472 4473 return (error); 4474} 4475 4476/* 4477 * Validate that the auxiliary device array is well formed. We must have an 4478 * array of nvlists, each which describes a valid leaf vdev. If this is an 4479 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 4480 * specified, as long as they are well-formed. 4481 */ 4482static int 4483spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 4484 spa_aux_vdev_t *sav, const char *config, uint64_t version, 4485 vdev_labeltype_t label) 4486{ 4487 nvlist_t **dev; 4488 uint_t i, ndev; 4489 vdev_t *vd; 4490 int error; 4491 4492 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4493 4494 /* 4495 * It's acceptable to have no devs specified. 4496 */ 4497 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 4498 return (0); 4499 4500 if (ndev == 0) 4501 return (SET_ERROR(EINVAL)); 4502 4503 /* 4504 * Make sure the pool is formatted with a version that supports this 4505 * device type. 4506 */ 4507 if (spa_version(spa) < version) 4508 return (SET_ERROR(ENOTSUP)); 4509 4510 /* 4511 * Set the pending device list so we correctly handle device in-use 4512 * checking. 4513 */ 4514 sav->sav_pending = dev; 4515 sav->sav_npending = ndev; 4516 4517 for (i = 0; i < ndev; i++) { 4518 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 4519 mode)) != 0) 4520 goto out; 4521 4522 if (!vd->vdev_ops->vdev_op_leaf) { 4523 vdev_free(vd); 4524 error = SET_ERROR(EINVAL); 4525 goto out; 4526 } 4527 4528 /* 4529 * The L2ARC currently only supports disk devices in 4530 * kernel context. For user-level testing, we allow it. 4531 */ 4532#ifdef _KERNEL 4533 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 4534 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 4535 error = SET_ERROR(ENOTBLK); 4536 vdev_free(vd); 4537 goto out; 4538 } 4539#endif 4540 vd->vdev_top = vd; 4541 4542 if ((error = vdev_open(vd)) == 0 && 4543 (error = vdev_label_init(vd, crtxg, label)) == 0) { 4544 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 4545 vd->vdev_guid) == 0); 4546 } 4547 4548 vdev_free(vd); 4549 4550 if (error && 4551 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 4552 goto out; 4553 else 4554 error = 0; 4555 } 4556 4557out: 4558 sav->sav_pending = NULL; 4559 sav->sav_npending = 0; 4560 return (error); 4561} 4562 4563static int 4564spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 4565{ 4566 int error; 4567 4568 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4569 4570 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 4571 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 4572 VDEV_LABEL_SPARE)) != 0) { 4573 return (error); 4574 } 4575 4576 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 4577 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 4578 VDEV_LABEL_L2CACHE)); 4579} 4580 4581static void 4582spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 4583 const char *config) 4584{ 4585 int i; 4586 4587 if (sav->sav_config != NULL) { 4588 nvlist_t **olddevs; 4589 uint_t oldndevs; 4590 nvlist_t **newdevs; 4591 4592 /* 4593 * Generate new dev list by concatentating with the 4594 * current dev list. 4595 */ 4596 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 4597 &olddevs, &oldndevs) == 0); 4598 4599 newdevs = kmem_alloc(sizeof (void *) * 4600 (ndevs + oldndevs), KM_SLEEP); 4601 for (i = 0; i < oldndevs; i++) 4602 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 4603 KM_SLEEP) == 0); 4604 for (i = 0; i < ndevs; i++) 4605 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 4606 KM_SLEEP) == 0); 4607 4608 VERIFY(nvlist_remove(sav->sav_config, config, 4609 DATA_TYPE_NVLIST_ARRAY) == 0); 4610 4611 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 4612 config, newdevs, ndevs + oldndevs) == 0); 4613 for (i = 0; i < oldndevs + ndevs; i++) 4614 nvlist_free(newdevs[i]); 4615 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 4616 } else { 4617 /* 4618 * Generate a new dev list. 4619 */ 4620 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 4621 KM_SLEEP) == 0); 4622 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 4623 devs, ndevs) == 0); 4624 } 4625} 4626 4627/* 4628 * Stop and drop level 2 ARC devices 4629 */ 4630void 4631spa_l2cache_drop(spa_t *spa) 4632{ 4633 vdev_t *vd; 4634 int i; 4635 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4636 4637 for (i = 0; i < sav->sav_count; i++) { 4638 uint64_t pool; 4639 4640 vd = sav->sav_vdevs[i]; 4641 ASSERT(vd != NULL); 4642 4643 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 4644 pool != 0ULL && l2arc_vdev_present(vd)) 4645 l2arc_remove_vdev(vd); 4646 } 4647} 4648 4649/* 4650 * Pool Creation 4651 */ 4652int 4653spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 4654 nvlist_t *zplprops) 4655{ 4656 spa_t *spa; 4657 char *altroot = NULL; 4658 vdev_t *rvd; 4659 dsl_pool_t *dp; 4660 dmu_tx_t *tx; 4661 int error = 0; 4662 uint64_t txg = TXG_INITIAL; 4663 nvlist_t **spares, **l2cache; 4664 uint_t nspares, nl2cache; 4665 uint64_t version, obj; 4666 boolean_t has_features; 4667 4668 /* 4669 * If this pool already exists, return failure. 4670 */ 4671 mutex_enter(&spa_namespace_lock); 4672 if (spa_lookup(pool) != NULL) { 4673 mutex_exit(&spa_namespace_lock); 4674 return (SET_ERROR(EEXIST)); 4675 } 4676 4677 /* 4678 * Allocate a new spa_t structure. 4679 */ 4680 (void) nvlist_lookup_string(props, 4681 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4682 spa = spa_add(pool, NULL, altroot); 4683 spa_activate(spa, spa_mode_global); 4684 4685 if (props && (error = spa_prop_validate(spa, props))) { 4686 spa_deactivate(spa); 4687 spa_remove(spa); 4688 mutex_exit(&spa_namespace_lock); 4689 return (error); 4690 } 4691 4692 has_features = B_FALSE; 4693 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 4694 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 4695 if (zpool_prop_feature(nvpair_name(elem))) 4696 has_features = B_TRUE; 4697 } 4698 4699 if (has_features || nvlist_lookup_uint64(props, 4700 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 4701 version = SPA_VERSION; 4702 } 4703 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 4704 4705 spa->spa_first_txg = txg; 4706 spa->spa_uberblock.ub_txg = txg - 1; 4707 spa->spa_uberblock.ub_version = version; 4708 spa->spa_ubsync = spa->spa_uberblock; 4709 spa->spa_load_state = SPA_LOAD_CREATE; 4710 spa->spa_removing_phys.sr_state = DSS_NONE; 4711 spa->spa_removing_phys.sr_removing_vdev = -1; 4712 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 4713 4714 /* 4715 * Create "The Godfather" zio to hold all async IOs 4716 */ 4717 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 4718 KM_SLEEP); 4719 for (int i = 0; i < max_ncpus; i++) { 4720 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 4721 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 4722 ZIO_FLAG_GODFATHER); 4723 } 4724 4725 /* 4726 * Create the root vdev. 4727 */ 4728 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4729 4730 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 4731 4732 ASSERT(error != 0 || rvd != NULL); 4733 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 4734 4735 if (error == 0 && !zfs_allocatable_devs(nvroot)) 4736 error = SET_ERROR(EINVAL); 4737 4738 if (error == 0 && 4739 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 4740 (error = spa_validate_aux(spa, nvroot, txg, 4741 VDEV_ALLOC_ADD)) == 0) { 4742 for (int c = 0; c < rvd->vdev_children; c++) { 4743 vdev_ashift_optimize(rvd->vdev_child[c]); 4744 vdev_metaslab_set_size(rvd->vdev_child[c]); 4745 vdev_expand(rvd->vdev_child[c], txg); 4746 } 4747 } 4748 4749 spa_config_exit(spa, SCL_ALL, FTAG); 4750 4751 if (error != 0) { 4752 spa_unload(spa); 4753 spa_deactivate(spa); 4754 spa_remove(spa); 4755 mutex_exit(&spa_namespace_lock); 4756 return (error); 4757 } 4758 4759 /* 4760 * Get the list of spares, if specified. 4761 */ 4762 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 4763 &spares, &nspares) == 0) { 4764 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 4765 KM_SLEEP) == 0); 4766 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 4767 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4768 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4769 spa_load_spares(spa); 4770 spa_config_exit(spa, SCL_ALL, FTAG); 4771 spa->spa_spares.sav_sync = B_TRUE; 4772 } 4773 4774 /* 4775 * Get the list of level 2 cache devices, if specified. 4776 */ 4777 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4778 &l2cache, &nl2cache) == 0) { 4779 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4780 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4781 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4782 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4783 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4784 spa_load_l2cache(spa); 4785 spa_config_exit(spa, SCL_ALL, FTAG); 4786 spa->spa_l2cache.sav_sync = B_TRUE; 4787 } 4788 4789 spa->spa_is_initializing = B_TRUE; 4790 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 4791 spa->spa_meta_objset = dp->dp_meta_objset; 4792 spa->spa_is_initializing = B_FALSE; 4793 4794 /* 4795 * Create DDTs (dedup tables). 4796 */ 4797 ddt_create(spa); 4798 4799 spa_update_dspace(spa); 4800 4801 tx = dmu_tx_create_assigned(dp, txg); 4802 4803 /* 4804 * Create the pool config object. 4805 */ 4806 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 4807 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 4808 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 4809 4810 if (zap_add(spa->spa_meta_objset, 4811 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 4812 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 4813 cmn_err(CE_PANIC, "failed to add pool config"); 4814 } 4815 4816 if (spa_version(spa) >= SPA_VERSION_FEATURES) 4817 spa_feature_create_zap_objects(spa, tx); 4818 4819 if (zap_add(spa->spa_meta_objset, 4820 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 4821 sizeof (uint64_t), 1, &version, tx) != 0) { 4822 cmn_err(CE_PANIC, "failed to add pool version"); 4823 } 4824 4825 /* Newly created pools with the right version are always deflated. */ 4826 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 4827 spa->spa_deflate = TRUE; 4828 if (zap_add(spa->spa_meta_objset, 4829 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 4830 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 4831 cmn_err(CE_PANIC, "failed to add deflate"); 4832 } 4833 } 4834 4835 /* 4836 * Create the deferred-free bpobj. Turn off compression 4837 * because sync-to-convergence takes longer if the blocksize 4838 * keeps changing. 4839 */ 4840 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 4841 dmu_object_set_compress(spa->spa_meta_objset, obj, 4842 ZIO_COMPRESS_OFF, tx); 4843 if (zap_add(spa->spa_meta_objset, 4844 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 4845 sizeof (uint64_t), 1, &obj, tx) != 0) { 4846 cmn_err(CE_PANIC, "failed to add bpobj"); 4847 } 4848 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 4849 spa->spa_meta_objset, obj)); 4850 4851 /* 4852 * Create the pool's history object. 4853 */ 4854 if (version >= SPA_VERSION_ZPOOL_HISTORY) 4855 spa_history_create_obj(spa, tx); 4856 4857 /* 4858 * Generate some random noise for salted checksums to operate on. 4859 */ 4860 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 4861 sizeof (spa->spa_cksum_salt.zcs_bytes)); 4862 4863 /* 4864 * Set pool properties. 4865 */ 4866 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 4867 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 4868 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 4869 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 4870 4871 if (props != NULL) { 4872 spa_configfile_set(spa, props, B_FALSE); 4873 spa_sync_props(props, tx); 4874 } 4875 4876 dmu_tx_commit(tx); 4877 4878 spa->spa_sync_on = B_TRUE; 4879 txg_sync_start(spa->spa_dsl_pool); 4880 4881 /* 4882 * We explicitly wait for the first transaction to complete so that our 4883 * bean counters are appropriately updated. 4884 */ 4885 txg_wait_synced(spa->spa_dsl_pool, txg); 4886 4887 spa_spawn_aux_threads(spa); 4888 4889 spa_write_cachefile(spa, B_FALSE, B_TRUE); 4890 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 4891 4892 spa_history_log_version(spa, "create"); 4893 4894 /* 4895 * Don't count references from objsets that are already closed 4896 * and are making their way through the eviction process. 4897 */ 4898 spa_evicting_os_wait(spa); 4899 spa->spa_minref = refcount_count(&spa->spa_refcount); 4900 spa->spa_load_state = SPA_LOAD_NONE; 4901 4902 mutex_exit(&spa_namespace_lock); 4903 4904 return (0); 4905} 4906 4907#ifdef _KERNEL 4908#ifdef illumos 4909/* 4910 * Get the root pool information from the root disk, then import the root pool 4911 * during the system boot up time. 4912 */ 4913extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 4914 4915static nvlist_t * 4916spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 4917{ 4918 nvlist_t *config; 4919 nvlist_t *nvtop, *nvroot; 4920 uint64_t pgid; 4921 4922 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 4923 return (NULL); 4924 4925 /* 4926 * Add this top-level vdev to the child array. 4927 */ 4928 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4929 &nvtop) == 0); 4930 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4931 &pgid) == 0); 4932 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 4933 4934 /* 4935 * Put this pool's top-level vdevs into a root vdev. 4936 */ 4937 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4938 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 4939 VDEV_TYPE_ROOT) == 0); 4940 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 4941 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 4942 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4943 &nvtop, 1) == 0); 4944 4945 /* 4946 * Replace the existing vdev_tree with the new root vdev in 4947 * this pool's configuration (remove the old, add the new). 4948 */ 4949 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 4950 nvlist_free(nvroot); 4951 return (config); 4952} 4953 4954/* 4955 * Walk the vdev tree and see if we can find a device with "better" 4956 * configuration. A configuration is "better" if the label on that 4957 * device has a more recent txg. 4958 */ 4959static void 4960spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 4961{ 4962 for (int c = 0; c < vd->vdev_children; c++) 4963 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 4964 4965 if (vd->vdev_ops->vdev_op_leaf) { 4966 nvlist_t *label; 4967 uint64_t label_txg; 4968 4969 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 4970 &label) != 0) 4971 return; 4972 4973 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 4974 &label_txg) == 0); 4975 4976 /* 4977 * Do we have a better boot device? 4978 */ 4979 if (label_txg > *txg) { 4980 *txg = label_txg; 4981 *avd = vd; 4982 } 4983 nvlist_free(label); 4984 } 4985} 4986 4987/* 4988 * Import a root pool. 4989 * 4990 * For x86. devpath_list will consist of devid and/or physpath name of 4991 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 4992 * The GRUB "findroot" command will return the vdev we should boot. 4993 * 4994 * For Sparc, devpath_list consists the physpath name of the booting device 4995 * no matter the rootpool is a single device pool or a mirrored pool. 4996 * e.g. 4997 * "/pci@1f,0/ide@d/disk@0,0:a" 4998 */ 4999int 5000spa_import_rootpool(char *devpath, char *devid) 5001{ 5002 spa_t *spa; 5003 vdev_t *rvd, *bvd, *avd = NULL; 5004 nvlist_t *config, *nvtop; 5005 uint64_t guid, txg; 5006 char *pname; 5007 int error; 5008 5009 /* 5010 * Read the label from the boot device and generate a configuration. 5011 */ 5012 config = spa_generate_rootconf(devpath, devid, &guid); 5013#if defined(_OBP) && defined(_KERNEL) 5014 if (config == NULL) { 5015 if (strstr(devpath, "/iscsi/ssd") != NULL) { 5016 /* iscsi boot */ 5017 get_iscsi_bootpath_phy(devpath); 5018 config = spa_generate_rootconf(devpath, devid, &guid); 5019 } 5020 } 5021#endif 5022 if (config == NULL) { 5023 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 5024 devpath); 5025 return (SET_ERROR(EIO)); 5026 } 5027 5028 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 5029 &pname) == 0); 5030 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 5031 5032 mutex_enter(&spa_namespace_lock); 5033 if ((spa = spa_lookup(pname)) != NULL) { 5034 /* 5035 * Remove the existing root pool from the namespace so that we 5036 * can replace it with the correct config we just read in. 5037 */ 5038 spa_remove(spa); 5039 } 5040 5041 spa = spa_add(pname, config, NULL); 5042 spa->spa_is_root = B_TRUE; 5043 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 5044 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 5045 &spa->spa_ubsync.ub_version) != 0) 5046 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 5047 5048 /* 5049 * Build up a vdev tree based on the boot device's label config. 5050 */ 5051 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5052 &nvtop) == 0); 5053 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5054 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 5055 VDEV_ALLOC_ROOTPOOL); 5056 spa_config_exit(spa, SCL_ALL, FTAG); 5057 if (error) { 5058 mutex_exit(&spa_namespace_lock); 5059 nvlist_free(config); 5060 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 5061 pname); 5062 return (error); 5063 } 5064 5065 /* 5066 * Get the boot vdev. 5067 */ 5068 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 5069 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 5070 (u_longlong_t)guid); 5071 error = SET_ERROR(ENOENT); 5072 goto out; 5073 } 5074 5075 /* 5076 * Determine if there is a better boot device. 5077 */ 5078 avd = bvd; 5079 spa_alt_rootvdev(rvd, &avd, &txg); 5080 if (avd != bvd) { 5081 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 5082 "try booting from '%s'", avd->vdev_path); 5083 error = SET_ERROR(EINVAL); 5084 goto out; 5085 } 5086 5087 /* 5088 * If the boot device is part of a spare vdev then ensure that 5089 * we're booting off the active spare. 5090 */ 5091 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 5092 !bvd->vdev_isspare) { 5093 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 5094 "try booting from '%s'", 5095 bvd->vdev_parent-> 5096 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 5097 error = SET_ERROR(EINVAL); 5098 goto out; 5099 } 5100 5101 error = 0; 5102out: 5103 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5104 vdev_free(rvd); 5105 spa_config_exit(spa, SCL_ALL, FTAG); 5106 mutex_exit(&spa_namespace_lock); 5107 5108 nvlist_free(config); 5109 return (error); 5110} 5111 5112#else /* !illumos */ 5113 5114extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs, 5115 uint64_t *count); 5116 5117static nvlist_t * 5118spa_generate_rootconf(const char *name) 5119{ 5120 nvlist_t **configs, **tops; 5121 nvlist_t *config; 5122 nvlist_t *best_cfg, *nvtop, *nvroot; 5123 uint64_t *holes; 5124 uint64_t best_txg; 5125 uint64_t nchildren; 5126 uint64_t pgid; 5127 uint64_t count; 5128 uint64_t i; 5129 uint_t nholes; 5130 5131 if (vdev_geom_read_pool_label(name, &configs, &count) != 0) 5132 return (NULL); 5133 5134 ASSERT3U(count, !=, 0); 5135 best_txg = 0; 5136 for (i = 0; i < count; i++) { 5137 uint64_t txg; 5138 5139 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG, 5140 &txg) == 0); 5141 if (txg > best_txg) { 5142 best_txg = txg; 5143 best_cfg = configs[i]; 5144 } 5145 } 5146 5147 nchildren = 1; 5148 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren); 5149 holes = NULL; 5150 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY, 5151 &holes, &nholes); 5152 5153 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP); 5154 for (i = 0; i < nchildren; i++) { 5155 if (i >= count) 5156 break; 5157 if (configs[i] == NULL) 5158 continue; 5159 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE, 5160 &nvtop) == 0); 5161 nvlist_dup(nvtop, &tops[i], KM_SLEEP); 5162 } 5163 for (i = 0; holes != NULL && i < nholes; i++) { 5164 if (i >= nchildren) 5165 continue; 5166 if (tops[holes[i]] != NULL) 5167 continue; 5168 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP); 5169 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE, 5170 VDEV_TYPE_HOLE) == 0); 5171 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, 5172 holes[i]) == 0); 5173 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 5174 0) == 0); 5175 } 5176 for (i = 0; i < nchildren; i++) { 5177 if (tops[i] != NULL) 5178 continue; 5179 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP); 5180 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE, 5181 VDEV_TYPE_MISSING) == 0); 5182 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, 5183 i) == 0); 5184 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 5185 0) == 0); 5186 } 5187 5188 /* 5189 * Create pool config based on the best vdev config. 5190 */ 5191 nvlist_dup(best_cfg, &config, KM_SLEEP); 5192 5193 /* 5194 * Put this pool's top-level vdevs into a root vdev. 5195 */ 5196 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5197 &pgid) == 0); 5198 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5199 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 5200 VDEV_TYPE_ROOT) == 0); 5201 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 5202 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 5203 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 5204 tops, nchildren) == 0); 5205 5206 /* 5207 * Replace the existing vdev_tree with the new root vdev in 5208 * this pool's configuration (remove the old, add the new). 5209 */ 5210 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 5211 5212 /* 5213 * Drop vdev config elements that should not be present at pool level. 5214 */ 5215 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64); 5216 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64); 5217 5218 for (i = 0; i < count; i++) 5219 nvlist_free(configs[i]); 5220 kmem_free(configs, count * sizeof(void *)); 5221 for (i = 0; i < nchildren; i++) 5222 nvlist_free(tops[i]); 5223 kmem_free(tops, nchildren * sizeof(void *)); 5224 nvlist_free(nvroot); 5225 return (config); 5226} 5227 5228int 5229spa_import_rootpool(const char *name) 5230{ 5231 spa_t *spa; 5232 vdev_t *rvd, *bvd, *avd = NULL; 5233 nvlist_t *config, *nvtop; 5234 uint64_t txg; 5235 char *pname; 5236 int error; 5237 5238 /* 5239 * Read the label from the boot device and generate a configuration. 5240 */ 5241 config = spa_generate_rootconf(name); 5242 5243 mutex_enter(&spa_namespace_lock); 5244 if (config != NULL) { 5245 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 5246 &pname) == 0 && strcmp(name, pname) == 0); 5247 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) 5248 == 0); 5249 5250 if ((spa = spa_lookup(pname)) != NULL) { 5251 /* 5252 * The pool could already be imported, 5253 * e.g., after reboot -r. 5254 */ 5255 if (spa->spa_state == POOL_STATE_ACTIVE) { 5256 mutex_exit(&spa_namespace_lock); 5257 nvlist_free(config); 5258 return (0); 5259 } 5260 5261 /* 5262 * Remove the existing root pool from the namespace so 5263 * that we can replace it with the correct config 5264 * we just read in. 5265 */ 5266 spa_remove(spa); 5267 } 5268 spa = spa_add(pname, config, NULL); 5269 5270 /* 5271 * Set spa_ubsync.ub_version as it can be used in vdev_alloc() 5272 * via spa_version(). 5273 */ 5274 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 5275 &spa->spa_ubsync.ub_version) != 0) 5276 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 5277 } else if ((spa = spa_lookup(name)) == NULL) { 5278 mutex_exit(&spa_namespace_lock); 5279 nvlist_free(config); 5280 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'", 5281 name); 5282 return (EIO); 5283 } else { 5284 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0); 5285 } 5286 spa->spa_is_root = B_TRUE; 5287 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 5288 5289 /* 5290 * Build up a vdev tree based on the boot device's label config. 5291 */ 5292 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5293 &nvtop) == 0); 5294 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5295 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 5296 VDEV_ALLOC_ROOTPOOL); 5297 spa_config_exit(spa, SCL_ALL, FTAG); 5298 if (error) { 5299 mutex_exit(&spa_namespace_lock); 5300 nvlist_free(config); 5301 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 5302 pname); 5303 return (error); 5304 } 5305 5306 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5307 vdev_free(rvd); 5308 spa_config_exit(spa, SCL_ALL, FTAG); 5309 mutex_exit(&spa_namespace_lock); 5310 5311 nvlist_free(config); 5312 return (0); 5313} 5314 5315#endif /* illumos */ 5316#endif /* _KERNEL */ 5317 5318/* 5319 * Import a non-root pool into the system. 5320 */ 5321int 5322spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 5323{ 5324 spa_t *spa; 5325 char *altroot = NULL; 5326 spa_load_state_t state = SPA_LOAD_IMPORT; 5327 zpool_rewind_policy_t policy; 5328 uint64_t mode = spa_mode_global; 5329 uint64_t readonly = B_FALSE; 5330 int error; 5331 nvlist_t *nvroot; 5332 nvlist_t **spares, **l2cache; 5333 uint_t nspares, nl2cache; 5334 5335 /* 5336 * If a pool with this name exists, return failure. 5337 */ 5338 mutex_enter(&spa_namespace_lock); 5339 if (spa_lookup(pool) != NULL) { 5340 mutex_exit(&spa_namespace_lock); 5341 return (SET_ERROR(EEXIST)); 5342 } 5343 5344 /* 5345 * Create and initialize the spa structure. 5346 */ 5347 (void) nvlist_lookup_string(props, 5348 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5349 (void) nvlist_lookup_uint64(props, 5350 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 5351 if (readonly) 5352 mode = FREAD; 5353 spa = spa_add(pool, config, altroot); 5354 spa->spa_import_flags = flags; 5355 5356 /* 5357 * Verbatim import - Take a pool and insert it into the namespace 5358 * as if it had been loaded at boot. 5359 */ 5360 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 5361 if (props != NULL) 5362 spa_configfile_set(spa, props, B_FALSE); 5363 5364 spa_write_cachefile(spa, B_FALSE, B_TRUE); 5365 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 5366 zfs_dbgmsg("spa_import: verbatim import of %s", pool); 5367 mutex_exit(&spa_namespace_lock); 5368 return (0); 5369 } 5370 5371 spa_activate(spa, mode); 5372 5373 /* 5374 * Don't start async tasks until we know everything is healthy. 5375 */ 5376 spa_async_suspend(spa); 5377 5378 zpool_get_rewind_policy(config, &policy); 5379 if (policy.zrp_request & ZPOOL_DO_REWIND) 5380 state = SPA_LOAD_RECOVER; 5381 5382 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 5383 5384 if (state != SPA_LOAD_RECOVER) { 5385 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5386 zfs_dbgmsg("spa_import: importing %s", pool); 5387 } else { 5388 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 5389 "(RECOVERY MODE)", pool, (longlong_t)policy.zrp_txg); 5390 } 5391 error = spa_load_best(spa, state, policy.zrp_txg, policy.zrp_request); 5392 5393 /* 5394 * Propagate anything learned while loading the pool and pass it 5395 * back to caller (i.e. rewind info, missing devices, etc). 5396 */ 5397 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 5398 spa->spa_load_info) == 0); 5399 5400 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5401 /* 5402 * Toss any existing sparelist, as it doesn't have any validity 5403 * anymore, and conflicts with spa_has_spare(). 5404 */ 5405 if (spa->spa_spares.sav_config) { 5406 nvlist_free(spa->spa_spares.sav_config); 5407 spa->spa_spares.sav_config = NULL; 5408 spa_load_spares(spa); 5409 } 5410 if (spa->spa_l2cache.sav_config) { 5411 nvlist_free(spa->spa_l2cache.sav_config); 5412 spa->spa_l2cache.sav_config = NULL; 5413 spa_load_l2cache(spa); 5414 } 5415 5416 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5417 &nvroot) == 0); 5418 if (error == 0) 5419 error = spa_validate_aux(spa, nvroot, -1ULL, 5420 VDEV_ALLOC_SPARE); 5421 if (error == 0) 5422 error = spa_validate_aux(spa, nvroot, -1ULL, 5423 VDEV_ALLOC_L2CACHE); 5424 spa_config_exit(spa, SCL_ALL, FTAG); 5425 5426 if (props != NULL) 5427 spa_configfile_set(spa, props, B_FALSE); 5428 5429 if (error != 0 || (props && spa_writeable(spa) && 5430 (error = spa_prop_set(spa, props)))) { 5431 spa_unload(spa); 5432 spa_deactivate(spa); 5433 spa_remove(spa); 5434 mutex_exit(&spa_namespace_lock); 5435 return (error); 5436 } 5437 5438 spa_async_resume(spa); 5439 5440 /* 5441 * Override any spares and level 2 cache devices as specified by 5442 * the user, as these may have correct device names/devids, etc. 5443 */ 5444 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5445 &spares, &nspares) == 0) { 5446 if (spa->spa_spares.sav_config) 5447 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 5448 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 5449 else 5450 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 5451 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5452 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 5453 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5454 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5455 spa_load_spares(spa); 5456 spa_config_exit(spa, SCL_ALL, FTAG); 5457 spa->spa_spares.sav_sync = B_TRUE; 5458 } 5459 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5460 &l2cache, &nl2cache) == 0) { 5461 if (spa->spa_l2cache.sav_config) 5462 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 5463 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 5464 else 5465 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 5466 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5467 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 5468 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5469 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5470 spa_load_l2cache(spa); 5471 spa_config_exit(spa, SCL_ALL, FTAG); 5472 spa->spa_l2cache.sav_sync = B_TRUE; 5473 } 5474 5475 /* 5476 * Check for any removed devices. 5477 */ 5478 if (spa->spa_autoreplace) { 5479 spa_aux_check_removed(&spa->spa_spares); 5480 spa_aux_check_removed(&spa->spa_l2cache); 5481 } 5482 5483 if (spa_writeable(spa)) { 5484 /* 5485 * Update the config cache to include the newly-imported pool. 5486 */ 5487 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5488 } 5489 5490 /* 5491 * It's possible that the pool was expanded while it was exported. 5492 * We kick off an async task to handle this for us. 5493 */ 5494 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 5495 5496 spa_history_log_version(spa, "import"); 5497 5498 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 5499 5500 mutex_exit(&spa_namespace_lock); 5501 5502#ifdef __FreeBSD__ 5503#ifdef _KERNEL 5504 zvol_create_minors(pool); 5505#endif 5506#endif 5507 return (0); 5508} 5509 5510nvlist_t * 5511spa_tryimport(nvlist_t *tryconfig) 5512{ 5513 nvlist_t *config = NULL; 5514 char *poolname, *cachefile; 5515 spa_t *spa; 5516 uint64_t state; 5517 int error; 5518 zpool_rewind_policy_t policy; 5519 5520 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 5521 return (NULL); 5522 5523 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 5524 return (NULL); 5525 5526 /* 5527 * Create and initialize the spa structure. 5528 */ 5529 mutex_enter(&spa_namespace_lock); 5530 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 5531 spa_activate(spa, FREAD); 5532 5533 /* 5534 * Rewind pool if a max txg was provided. Note that even though we 5535 * retrieve the complete rewind policy, only the rewind txg is relevant 5536 * for tryimport. 5537 */ 5538 zpool_get_rewind_policy(spa->spa_config, &policy); 5539 if (policy.zrp_txg != UINT64_MAX) { 5540 spa->spa_load_max_txg = policy.zrp_txg; 5541 spa->spa_extreme_rewind = B_TRUE; 5542 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 5543 poolname, (longlong_t)policy.zrp_txg); 5544 } else { 5545 zfs_dbgmsg("spa_tryimport: importing %s", poolname); 5546 } 5547 5548 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 5549 == 0) { 5550 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 5551 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5552 } else { 5553 spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 5554 } 5555 5556 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 5557 5558 /* 5559 * If 'tryconfig' was at least parsable, return the current config. 5560 */ 5561 if (spa->spa_root_vdev != NULL) { 5562 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5563 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 5564 poolname) == 0); 5565 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5566 state) == 0); 5567 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 5568 spa->spa_uberblock.ub_timestamp) == 0); 5569 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 5570 spa->spa_load_info) == 0); 5571 5572 /* 5573 * If the bootfs property exists on this pool then we 5574 * copy it out so that external consumers can tell which 5575 * pools are bootable. 5576 */ 5577 if ((!error || error == EEXIST) && spa->spa_bootfs) { 5578 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5579 5580 /* 5581 * We have to play games with the name since the 5582 * pool was opened as TRYIMPORT_NAME. 5583 */ 5584 if (dsl_dsobj_to_dsname(spa_name(spa), 5585 spa->spa_bootfs, tmpname) == 0) { 5586 char *cp; 5587 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5588 5589 cp = strchr(tmpname, '/'); 5590 if (cp == NULL) { 5591 (void) strlcpy(dsname, tmpname, 5592 MAXPATHLEN); 5593 } else { 5594 (void) snprintf(dsname, MAXPATHLEN, 5595 "%s/%s", poolname, ++cp); 5596 } 5597 VERIFY(nvlist_add_string(config, 5598 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 5599 kmem_free(dsname, MAXPATHLEN); 5600 } 5601 kmem_free(tmpname, MAXPATHLEN); 5602 } 5603 5604 /* 5605 * Add the list of hot spares and level 2 cache devices. 5606 */ 5607 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5608 spa_add_spares(spa, config); 5609 spa_add_l2cache(spa, config); 5610 spa_config_exit(spa, SCL_CONFIG, FTAG); 5611 } 5612 5613 spa_unload(spa); 5614 spa_deactivate(spa); 5615 spa_remove(spa); 5616 mutex_exit(&spa_namespace_lock); 5617 5618 return (config); 5619} 5620 5621/* 5622 * Pool export/destroy 5623 * 5624 * The act of destroying or exporting a pool is very simple. We make sure there 5625 * is no more pending I/O and any references to the pool are gone. Then, we 5626 * update the pool state and sync all the labels to disk, removing the 5627 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 5628 * we don't sync the labels or remove the configuration cache. 5629 */ 5630static int 5631spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 5632 boolean_t force, boolean_t hardforce) 5633{ 5634 spa_t *spa; 5635 5636 if (oldconfig) 5637 *oldconfig = NULL; 5638 5639 if (!(spa_mode_global & FWRITE)) 5640 return (SET_ERROR(EROFS)); 5641 5642 mutex_enter(&spa_namespace_lock); 5643 if ((spa = spa_lookup(pool)) == NULL) { 5644 mutex_exit(&spa_namespace_lock); 5645 return (SET_ERROR(ENOENT)); 5646 } 5647 5648 /* 5649 * Put a hold on the pool, drop the namespace lock, stop async tasks, 5650 * reacquire the namespace lock, and see if we can export. 5651 */ 5652 spa_open_ref(spa, FTAG); 5653 mutex_exit(&spa_namespace_lock); 5654 spa_async_suspend(spa); 5655 mutex_enter(&spa_namespace_lock); 5656 spa_close(spa, FTAG); 5657 5658 /* 5659 * The pool will be in core if it's openable, 5660 * in which case we can modify its state. 5661 */ 5662 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 5663 /* 5664 * Objsets may be open only because they're dirty, so we 5665 * have to force it to sync before checking spa_refcnt. 5666 */ 5667 txg_wait_synced(spa->spa_dsl_pool, 0); 5668 spa_evicting_os_wait(spa); 5669 5670 /* 5671 * A pool cannot be exported or destroyed if there are active 5672 * references. If we are resetting a pool, allow references by 5673 * fault injection handlers. 5674 */ 5675 if (!spa_refcount_zero(spa) || 5676 (spa->spa_inject_ref != 0 && 5677 new_state != POOL_STATE_UNINITIALIZED)) { 5678 spa_async_resume(spa); 5679 mutex_exit(&spa_namespace_lock); 5680 return (SET_ERROR(EBUSY)); 5681 } 5682 5683 /* 5684 * A pool cannot be exported if it has an active shared spare. 5685 * This is to prevent other pools stealing the active spare 5686 * from an exported pool. At user's own will, such pool can 5687 * be forcedly exported. 5688 */ 5689 if (!force && new_state == POOL_STATE_EXPORTED && 5690 spa_has_active_shared_spare(spa)) { 5691 spa_async_resume(spa); 5692 mutex_exit(&spa_namespace_lock); 5693 return (SET_ERROR(EXDEV)); 5694 } 5695 5696 /* 5697 * We want this to be reflected on every label, 5698 * so mark them all dirty. spa_unload() will do the 5699 * final sync that pushes these changes out. 5700 */ 5701 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 5702 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5703 spa->spa_state = new_state; 5704 spa->spa_final_txg = spa_last_synced_txg(spa) + 5705 TXG_DEFER_SIZE + 1; 5706 vdev_config_dirty(spa->spa_root_vdev); 5707 spa_config_exit(spa, SCL_ALL, FTAG); 5708 } 5709 } 5710 5711 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 5712 5713 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 5714 spa_unload(spa); 5715 spa_deactivate(spa); 5716 } 5717 5718 if (oldconfig && spa->spa_config) 5719 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 5720 5721 if (new_state != POOL_STATE_UNINITIALIZED) { 5722 if (!hardforce) 5723 spa_write_cachefile(spa, B_TRUE, B_TRUE); 5724 spa_remove(spa); 5725 } 5726 mutex_exit(&spa_namespace_lock); 5727 5728 return (0); 5729} 5730 5731/* 5732 * Destroy a storage pool. 5733 */ 5734int 5735spa_destroy(char *pool) 5736{ 5737 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 5738 B_FALSE, B_FALSE)); 5739} 5740 5741/* 5742 * Export a storage pool. 5743 */ 5744int 5745spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 5746 boolean_t hardforce) 5747{ 5748 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 5749 force, hardforce)); 5750} 5751 5752/* 5753 * Similar to spa_export(), this unloads the spa_t without actually removing it 5754 * from the namespace in any way. 5755 */ 5756int 5757spa_reset(char *pool) 5758{ 5759 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 5760 B_FALSE, B_FALSE)); 5761} 5762 5763/* 5764 * ========================================================================== 5765 * Device manipulation 5766 * ========================================================================== 5767 */ 5768 5769/* 5770 * Add a device to a storage pool. 5771 */ 5772int 5773spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 5774{ 5775 uint64_t txg, id; 5776 int error; 5777 vdev_t *rvd = spa->spa_root_vdev; 5778 vdev_t *vd, *tvd; 5779 nvlist_t **spares, **l2cache; 5780 uint_t nspares, nl2cache; 5781 5782 ASSERT(spa_writeable(spa)); 5783 5784 txg = spa_vdev_enter(spa); 5785 5786 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 5787 VDEV_ALLOC_ADD)) != 0) 5788 return (spa_vdev_exit(spa, NULL, txg, error)); 5789 5790 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 5791 5792 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 5793 &nspares) != 0) 5794 nspares = 0; 5795 5796 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 5797 &nl2cache) != 0) 5798 nl2cache = 0; 5799 5800 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 5801 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 5802 5803 if (vd->vdev_children != 0 && 5804 (error = vdev_create(vd, txg, B_FALSE)) != 0) 5805 return (spa_vdev_exit(spa, vd, txg, error)); 5806 5807 /* 5808 * We must validate the spares and l2cache devices after checking the 5809 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 5810 */ 5811 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 5812 return (spa_vdev_exit(spa, vd, txg, error)); 5813 5814 /* 5815 * If we are in the middle of a device removal, we can only add 5816 * devices which match the existing devices in the pool. 5817 * If we are in the middle of a removal, or have some indirect 5818 * vdevs, we can not add raidz toplevels. 5819 */ 5820 if (spa->spa_vdev_removal != NULL || 5821 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 5822 for (int c = 0; c < vd->vdev_children; c++) { 5823 tvd = vd->vdev_child[c]; 5824 if (spa->spa_vdev_removal != NULL && 5825 tvd->vdev_ashift != 5826 spa->spa_vdev_removal->svr_vdev->vdev_ashift) { 5827 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 5828 } 5829 /* Fail if top level vdev is raidz */ 5830 if (tvd->vdev_ops == &vdev_raidz_ops) { 5831 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 5832 } 5833 /* 5834 * Need the top level mirror to be 5835 * a mirror of leaf vdevs only 5836 */ 5837 if (tvd->vdev_ops == &vdev_mirror_ops) { 5838 for (uint64_t cid = 0; 5839 cid < tvd->vdev_children; cid++) { 5840 vdev_t *cvd = tvd->vdev_child[cid]; 5841 if (!cvd->vdev_ops->vdev_op_leaf) { 5842 return (spa_vdev_exit(spa, vd, 5843 txg, EINVAL)); 5844 } 5845 } 5846 } 5847 } 5848 } 5849 5850 for (int c = 0; c < vd->vdev_children; c++) { 5851 5852 /* 5853 * Set the vdev id to the first hole, if one exists. 5854 */ 5855 for (id = 0; id < rvd->vdev_children; id++) { 5856 if (rvd->vdev_child[id]->vdev_ishole) { 5857 vdev_free(rvd->vdev_child[id]); 5858 break; 5859 } 5860 } 5861 tvd = vd->vdev_child[c]; 5862 vdev_remove_child(vd, tvd); 5863 tvd->vdev_id = id; 5864 vdev_add_child(rvd, tvd); 5865 vdev_config_dirty(tvd); 5866 } 5867 5868 if (nspares != 0) { 5869 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 5870 ZPOOL_CONFIG_SPARES); 5871 spa_load_spares(spa); 5872 spa->spa_spares.sav_sync = B_TRUE; 5873 } 5874 5875 if (nl2cache != 0) { 5876 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 5877 ZPOOL_CONFIG_L2CACHE); 5878 spa_load_l2cache(spa); 5879 spa->spa_l2cache.sav_sync = B_TRUE; 5880 } 5881 5882 /* 5883 * We have to be careful when adding new vdevs to an existing pool. 5884 * If other threads start allocating from these vdevs before we 5885 * sync the config cache, and we lose power, then upon reboot we may 5886 * fail to open the pool because there are DVAs that the config cache 5887 * can't translate. Therefore, we first add the vdevs without 5888 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 5889 * and then let spa_config_update() initialize the new metaslabs. 5890 * 5891 * spa_load() checks for added-but-not-initialized vdevs, so that 5892 * if we lose power at any point in this sequence, the remaining 5893 * steps will be completed the next time we load the pool. 5894 */ 5895 (void) spa_vdev_exit(spa, vd, txg, 0); 5896 5897 mutex_enter(&spa_namespace_lock); 5898 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5899 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 5900 mutex_exit(&spa_namespace_lock); 5901 5902 return (0); 5903} 5904 5905/* 5906 * Attach a device to a mirror. The arguments are the path to any device 5907 * in the mirror, and the nvroot for the new device. If the path specifies 5908 * a device that is not mirrored, we automatically insert the mirror vdev. 5909 * 5910 * If 'replacing' is specified, the new device is intended to replace the 5911 * existing device; in this case the two devices are made into their own 5912 * mirror using the 'replacing' vdev, which is functionally identical to 5913 * the mirror vdev (it actually reuses all the same ops) but has a few 5914 * extra rules: you can't attach to it after it's been created, and upon 5915 * completion of resilvering, the first disk (the one being replaced) 5916 * is automatically detached. 5917 */ 5918int 5919spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 5920{ 5921 uint64_t txg, dtl_max_txg; 5922 vdev_t *rvd = spa->spa_root_vdev; 5923 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 5924 vdev_ops_t *pvops; 5925 char *oldvdpath, *newvdpath; 5926 int newvd_isspare; 5927 int error; 5928 5929 ASSERT(spa_writeable(spa)); 5930 5931 txg = spa_vdev_enter(spa); 5932 5933 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 5934 5935 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5936 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 5937 error = (spa_has_checkpoint(spa)) ? 5938 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 5939 return (spa_vdev_exit(spa, NULL, txg, error)); 5940 } 5941 5942 if (spa->spa_vdev_removal != NULL || 5943 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 5944 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 5945 } 5946 5947 if (oldvd == NULL) 5948 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 5949 5950 if (!oldvd->vdev_ops->vdev_op_leaf) 5951 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 5952 5953 pvd = oldvd->vdev_parent; 5954 5955 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 5956 VDEV_ALLOC_ATTACH)) != 0) 5957 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5958 5959 if (newrootvd->vdev_children != 1) 5960 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 5961 5962 newvd = newrootvd->vdev_child[0]; 5963 5964 if (!newvd->vdev_ops->vdev_op_leaf) 5965 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 5966 5967 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 5968 return (spa_vdev_exit(spa, newrootvd, txg, error)); 5969 5970 /* 5971 * Spares can't replace logs 5972 */ 5973 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 5974 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 5975 5976 if (!replacing) { 5977 /* 5978 * For attach, the only allowable parent is a mirror or the root 5979 * vdev. 5980 */ 5981 if (pvd->vdev_ops != &vdev_mirror_ops && 5982 pvd->vdev_ops != &vdev_root_ops) 5983 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 5984 5985 pvops = &vdev_mirror_ops; 5986 } else { 5987 /* 5988 * Active hot spares can only be replaced by inactive hot 5989 * spares. 5990 */ 5991 if (pvd->vdev_ops == &vdev_spare_ops && 5992 oldvd->vdev_isspare && 5993 !spa_has_spare(spa, newvd->vdev_guid)) 5994 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 5995 5996 /* 5997 * If the source is a hot spare, and the parent isn't already a 5998 * spare, then we want to create a new hot spare. Otherwise, we 5999 * want to create a replacing vdev. The user is not allowed to 6000 * attach to a spared vdev child unless the 'isspare' state is 6001 * the same (spare replaces spare, non-spare replaces 6002 * non-spare). 6003 */ 6004 if (pvd->vdev_ops == &vdev_replacing_ops && 6005 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 6006 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6007 } else if (pvd->vdev_ops == &vdev_spare_ops && 6008 newvd->vdev_isspare != oldvd->vdev_isspare) { 6009 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6010 } 6011 6012 if (newvd->vdev_isspare) 6013 pvops = &vdev_spare_ops; 6014 else 6015 pvops = &vdev_replacing_ops; 6016 } 6017 6018 /* 6019 * Make sure the new device is big enough. 6020 */ 6021 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 6022 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 6023 6024 /* 6025 * The new device cannot have a higher alignment requirement 6026 * than the top-level vdev. 6027 */ 6028 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 6029 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 6030 6031 /* 6032 * If this is an in-place replacement, update oldvd's path and devid 6033 * to make it distinguishable from newvd, and unopenable from now on. 6034 */ 6035 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 6036 spa_strfree(oldvd->vdev_path); 6037 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 6038 KM_SLEEP); 6039 (void) sprintf(oldvd->vdev_path, "%s/%s", 6040 newvd->vdev_path, "old"); 6041 if (oldvd->vdev_devid != NULL) { 6042 spa_strfree(oldvd->vdev_devid); 6043 oldvd->vdev_devid = NULL; 6044 } 6045 } 6046 6047 /* mark the device being resilvered */ 6048 newvd->vdev_resilver_txg = txg; 6049 6050 /* 6051 * If the parent is not a mirror, or if we're replacing, insert the new 6052 * mirror/replacing/spare vdev above oldvd. 6053 */ 6054 if (pvd->vdev_ops != pvops) 6055 pvd = vdev_add_parent(oldvd, pvops); 6056 6057 ASSERT(pvd->vdev_top->vdev_parent == rvd); 6058 ASSERT(pvd->vdev_ops == pvops); 6059 ASSERT(oldvd->vdev_parent == pvd); 6060 6061 /* 6062 * Extract the new device from its root and add it to pvd. 6063 */ 6064 vdev_remove_child(newrootvd, newvd); 6065 newvd->vdev_id = pvd->vdev_children; 6066 newvd->vdev_crtxg = oldvd->vdev_crtxg; 6067 vdev_add_child(pvd, newvd); 6068 6069 tvd = newvd->vdev_top; 6070 ASSERT(pvd->vdev_top == tvd); 6071 ASSERT(tvd->vdev_parent == rvd); 6072 6073 vdev_config_dirty(tvd); 6074 6075 /* 6076 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 6077 * for any dmu_sync-ed blocks. It will propagate upward when 6078 * spa_vdev_exit() calls vdev_dtl_reassess(). 6079 */ 6080 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 6081 6082 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 6083 dtl_max_txg - TXG_INITIAL); 6084 6085 if (newvd->vdev_isspare) { 6086 spa_spare_activate(newvd); 6087 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 6088 } 6089 6090 oldvdpath = spa_strdup(oldvd->vdev_path); 6091 newvdpath = spa_strdup(newvd->vdev_path); 6092 newvd_isspare = newvd->vdev_isspare; 6093 6094 /* 6095 * Mark newvd's DTL dirty in this txg. 6096 */ 6097 vdev_dirty(tvd, VDD_DTL, newvd, txg); 6098 6099 /* 6100 * Schedule the resilver to restart in the future. We do this to 6101 * ensure that dmu_sync-ed blocks have been stitched into the 6102 * respective datasets. 6103 */ 6104 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 6105 6106 if (spa->spa_bootfs) 6107 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 6108 6109 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 6110 6111 /* 6112 * Commit the config 6113 */ 6114 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 6115 6116 spa_history_log_internal(spa, "vdev attach", NULL, 6117 "%s vdev=%s %s vdev=%s", 6118 replacing && newvd_isspare ? "spare in" : 6119 replacing ? "replace" : "attach", newvdpath, 6120 replacing ? "for" : "to", oldvdpath); 6121 6122 spa_strfree(oldvdpath); 6123 spa_strfree(newvdpath); 6124 6125 return (0); 6126} 6127 6128/* 6129 * Detach a device from a mirror or replacing vdev. 6130 * 6131 * If 'replace_done' is specified, only detach if the parent 6132 * is a replacing vdev. 6133 */ 6134int 6135spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 6136{ 6137 uint64_t txg; 6138 int error; 6139 vdev_t *rvd = spa->spa_root_vdev; 6140 vdev_t *vd, *pvd, *cvd, *tvd; 6141 boolean_t unspare = B_FALSE; 6142 uint64_t unspare_guid = 0; 6143 char *vdpath; 6144 6145 ASSERT(spa_writeable(spa)); 6146 6147 txg = spa_vdev_enter(spa); 6148 6149 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 6150 6151 /* 6152 * Besides being called directly from the userland through the 6153 * ioctl interface, spa_vdev_detach() can be potentially called 6154 * at the end of spa_vdev_resilver_done(). 6155 * 6156 * In the regular case, when we have a checkpoint this shouldn't 6157 * happen as we never empty the DTLs of a vdev during the scrub 6158 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 6159 * should never get here when we have a checkpoint. 6160 * 6161 * That said, even in a case when we checkpoint the pool exactly 6162 * as spa_vdev_resilver_done() calls this function everything 6163 * should be fine as the resilver will return right away. 6164 */ 6165 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6166 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6167 error = (spa_has_checkpoint(spa)) ? 6168 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6169 return (spa_vdev_exit(spa, NULL, txg, error)); 6170 } 6171 6172 if (vd == NULL) 6173 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6174 6175 if (!vd->vdev_ops->vdev_op_leaf) 6176 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6177 6178 pvd = vd->vdev_parent; 6179 6180 /* 6181 * If the parent/child relationship is not as expected, don't do it. 6182 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 6183 * vdev that's replacing B with C. The user's intent in replacing 6184 * is to go from M(A,B) to M(A,C). If the user decides to cancel 6185 * the replace by detaching C, the expected behavior is to end up 6186 * M(A,B). But suppose that right after deciding to detach C, 6187 * the replacement of B completes. We would have M(A,C), and then 6188 * ask to detach C, which would leave us with just A -- not what 6189 * the user wanted. To prevent this, we make sure that the 6190 * parent/child relationship hasn't changed -- in this example, 6191 * that C's parent is still the replacing vdev R. 6192 */ 6193 if (pvd->vdev_guid != pguid && pguid != 0) 6194 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6195 6196 /* 6197 * Only 'replacing' or 'spare' vdevs can be replaced. 6198 */ 6199 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 6200 pvd->vdev_ops != &vdev_spare_ops) 6201 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6202 6203 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 6204 spa_version(spa) >= SPA_VERSION_SPARES); 6205 6206 /* 6207 * Only mirror, replacing, and spare vdevs support detach. 6208 */ 6209 if (pvd->vdev_ops != &vdev_replacing_ops && 6210 pvd->vdev_ops != &vdev_mirror_ops && 6211 pvd->vdev_ops != &vdev_spare_ops) 6212 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6213 6214 /* 6215 * If this device has the only valid copy of some data, 6216 * we cannot safely detach it. 6217 */ 6218 if (vdev_dtl_required(vd)) 6219 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6220 6221 ASSERT(pvd->vdev_children >= 2); 6222 6223 /* 6224 * If we are detaching the second disk from a replacing vdev, then 6225 * check to see if we changed the original vdev's path to have "/old" 6226 * at the end in spa_vdev_attach(). If so, undo that change now. 6227 */ 6228 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 6229 vd->vdev_path != NULL) { 6230 size_t len = strlen(vd->vdev_path); 6231 6232 for (int c = 0; c < pvd->vdev_children; c++) { 6233 cvd = pvd->vdev_child[c]; 6234 6235 if (cvd == vd || cvd->vdev_path == NULL) 6236 continue; 6237 6238 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 6239 strcmp(cvd->vdev_path + len, "/old") == 0) { 6240 spa_strfree(cvd->vdev_path); 6241 cvd->vdev_path = spa_strdup(vd->vdev_path); 6242 break; 6243 } 6244 } 6245 } 6246 6247 /* 6248 * If we are detaching the original disk from a spare, then it implies 6249 * that the spare should become a real disk, and be removed from the 6250 * active spare list for the pool. 6251 */ 6252 if (pvd->vdev_ops == &vdev_spare_ops && 6253 vd->vdev_id == 0 && 6254 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 6255 unspare = B_TRUE; 6256 6257 /* 6258 * Erase the disk labels so the disk can be used for other things. 6259 * This must be done after all other error cases are handled, 6260 * but before we disembowel vd (so we can still do I/O to it). 6261 * But if we can't do it, don't treat the error as fatal -- 6262 * it may be that the unwritability of the disk is the reason 6263 * it's being detached! 6264 */ 6265 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 6266 6267 /* 6268 * Remove vd from its parent and compact the parent's children. 6269 */ 6270 vdev_remove_child(pvd, vd); 6271 vdev_compact_children(pvd); 6272 6273 /* 6274 * Remember one of the remaining children so we can get tvd below. 6275 */ 6276 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 6277 6278 /* 6279 * If we need to remove the remaining child from the list of hot spares, 6280 * do it now, marking the vdev as no longer a spare in the process. 6281 * We must do this before vdev_remove_parent(), because that can 6282 * change the GUID if it creates a new toplevel GUID. For a similar 6283 * reason, we must remove the spare now, in the same txg as the detach; 6284 * otherwise someone could attach a new sibling, change the GUID, and 6285 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 6286 */ 6287 if (unspare) { 6288 ASSERT(cvd->vdev_isspare); 6289 spa_spare_remove(cvd); 6290 unspare_guid = cvd->vdev_guid; 6291 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 6292 cvd->vdev_unspare = B_TRUE; 6293 } 6294 6295 /* 6296 * If the parent mirror/replacing vdev only has one child, 6297 * the parent is no longer needed. Remove it from the tree. 6298 */ 6299 if (pvd->vdev_children == 1) { 6300 if (pvd->vdev_ops == &vdev_spare_ops) 6301 cvd->vdev_unspare = B_FALSE; 6302 vdev_remove_parent(cvd); 6303 } 6304 6305 6306 /* 6307 * We don't set tvd until now because the parent we just removed 6308 * may have been the previous top-level vdev. 6309 */ 6310 tvd = cvd->vdev_top; 6311 ASSERT(tvd->vdev_parent == rvd); 6312 6313 /* 6314 * Reevaluate the parent vdev state. 6315 */ 6316 vdev_propagate_state(cvd); 6317 6318 /* 6319 * If the 'autoexpand' property is set on the pool then automatically 6320 * try to expand the size of the pool. For example if the device we 6321 * just detached was smaller than the others, it may be possible to 6322 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 6323 * first so that we can obtain the updated sizes of the leaf vdevs. 6324 */ 6325 if (spa->spa_autoexpand) { 6326 vdev_reopen(tvd); 6327 vdev_expand(tvd, txg); 6328 } 6329 6330 vdev_config_dirty(tvd); 6331 6332 /* 6333 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 6334 * vd->vdev_detached is set and free vd's DTL object in syncing context. 6335 * But first make sure we're not on any *other* txg's DTL list, to 6336 * prevent vd from being accessed after it's freed. 6337 */ 6338 vdpath = spa_strdup(vd->vdev_path); 6339 for (int t = 0; t < TXG_SIZE; t++) 6340 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 6341 vd->vdev_detached = B_TRUE; 6342 vdev_dirty(tvd, VDD_DTL, vd, txg); 6343 6344 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 6345 6346 /* hang on to the spa before we release the lock */ 6347 spa_open_ref(spa, FTAG); 6348 6349 error = spa_vdev_exit(spa, vd, txg, 0); 6350 6351 spa_history_log_internal(spa, "detach", NULL, 6352 "vdev=%s", vdpath); 6353 spa_strfree(vdpath); 6354 6355 /* 6356 * If this was the removal of the original device in a hot spare vdev, 6357 * then we want to go through and remove the device from the hot spare 6358 * list of every other pool. 6359 */ 6360 if (unspare) { 6361 spa_t *altspa = NULL; 6362 6363 mutex_enter(&spa_namespace_lock); 6364 while ((altspa = spa_next(altspa)) != NULL) { 6365 if (altspa->spa_state != POOL_STATE_ACTIVE || 6366 altspa == spa) 6367 continue; 6368 6369 spa_open_ref(altspa, FTAG); 6370 mutex_exit(&spa_namespace_lock); 6371 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 6372 mutex_enter(&spa_namespace_lock); 6373 spa_close(altspa, FTAG); 6374 } 6375 mutex_exit(&spa_namespace_lock); 6376 6377 /* search the rest of the vdevs for spares to remove */ 6378 spa_vdev_resilver_done(spa); 6379 } 6380 6381 /* all done with the spa; OK to release */ 6382 mutex_enter(&spa_namespace_lock); 6383 spa_close(spa, FTAG); 6384 mutex_exit(&spa_namespace_lock); 6385 6386 return (error); 6387} 6388 6389/* 6390 * Split a set of devices from their mirrors, and create a new pool from them. 6391 */ 6392int 6393spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 6394 nvlist_t *props, boolean_t exp) 6395{ 6396 int error = 0; 6397 uint64_t txg, *glist; 6398 spa_t *newspa; 6399 uint_t c, children, lastlog; 6400 nvlist_t **child, *nvl, *tmp; 6401 dmu_tx_t *tx; 6402 char *altroot = NULL; 6403 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 6404 boolean_t activate_slog; 6405 6406 ASSERT(spa_writeable(spa)); 6407 6408 txg = spa_vdev_enter(spa); 6409 6410 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6411 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6412 error = (spa_has_checkpoint(spa)) ? 6413 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6414 return (spa_vdev_exit(spa, NULL, txg, error)); 6415 } 6416 6417 /* clear the log and flush everything up to now */ 6418 activate_slog = spa_passivate_log(spa); 6419 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 6420 error = spa_reset_logs(spa); 6421 txg = spa_vdev_config_enter(spa); 6422 6423 if (activate_slog) 6424 spa_activate_log(spa); 6425 6426 if (error != 0) 6427 return (spa_vdev_exit(spa, NULL, txg, error)); 6428 6429 /* check new spa name before going any further */ 6430 if (spa_lookup(newname) != NULL) 6431 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 6432 6433 /* 6434 * scan through all the children to ensure they're all mirrors 6435 */ 6436 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 6437 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 6438 &children) != 0) 6439 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6440 6441 /* first, check to ensure we've got the right child count */ 6442 rvd = spa->spa_root_vdev; 6443 lastlog = 0; 6444 for (c = 0; c < rvd->vdev_children; c++) { 6445 vdev_t *vd = rvd->vdev_child[c]; 6446 6447 /* don't count the holes & logs as children */ 6448 if (vd->vdev_islog || !vdev_is_concrete(vd)) { 6449 if (lastlog == 0) 6450 lastlog = c; 6451 continue; 6452 } 6453 6454 lastlog = 0; 6455 } 6456 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 6457 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6458 6459 /* next, ensure no spare or cache devices are part of the split */ 6460 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 6461 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 6462 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6463 6464 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 6465 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 6466 6467 /* then, loop over each vdev and validate it */ 6468 for (c = 0; c < children; c++) { 6469 uint64_t is_hole = 0; 6470 6471 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 6472 &is_hole); 6473 6474 if (is_hole != 0) { 6475 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 6476 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 6477 continue; 6478 } else { 6479 error = SET_ERROR(EINVAL); 6480 break; 6481 } 6482 } 6483 6484 /* which disk is going to be split? */ 6485 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 6486 &glist[c]) != 0) { 6487 error = SET_ERROR(EINVAL); 6488 break; 6489 } 6490 6491 /* look it up in the spa */ 6492 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 6493 if (vml[c] == NULL) { 6494 error = SET_ERROR(ENODEV); 6495 break; 6496 } 6497 6498 /* make sure there's nothing stopping the split */ 6499 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 6500 vml[c]->vdev_islog || 6501 !vdev_is_concrete(vml[c]) || 6502 vml[c]->vdev_isspare || 6503 vml[c]->vdev_isl2cache || 6504 !vdev_writeable(vml[c]) || 6505 vml[c]->vdev_children != 0 || 6506 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 6507 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 6508 error = SET_ERROR(EINVAL); 6509 break; 6510 } 6511 6512 if (vdev_dtl_required(vml[c])) { 6513 error = SET_ERROR(EBUSY); 6514 break; 6515 } 6516 6517 /* we need certain info from the top level */ 6518 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 6519 vml[c]->vdev_top->vdev_ms_array) == 0); 6520 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 6521 vml[c]->vdev_top->vdev_ms_shift) == 0); 6522 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 6523 vml[c]->vdev_top->vdev_asize) == 0); 6524 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 6525 vml[c]->vdev_top->vdev_ashift) == 0); 6526 6527 /* transfer per-vdev ZAPs */ 6528 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 6529 VERIFY0(nvlist_add_uint64(child[c], 6530 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 6531 6532 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 6533 VERIFY0(nvlist_add_uint64(child[c], 6534 ZPOOL_CONFIG_VDEV_TOP_ZAP, 6535 vml[c]->vdev_parent->vdev_top_zap)); 6536 } 6537 6538 if (error != 0) { 6539 kmem_free(vml, children * sizeof (vdev_t *)); 6540 kmem_free(glist, children * sizeof (uint64_t)); 6541 return (spa_vdev_exit(spa, NULL, txg, error)); 6542 } 6543 6544 /* stop writers from using the disks */ 6545 for (c = 0; c < children; c++) { 6546 if (vml[c] != NULL) 6547 vml[c]->vdev_offline = B_TRUE; 6548 } 6549 vdev_reopen(spa->spa_root_vdev); 6550 6551 /* 6552 * Temporarily record the splitting vdevs in the spa config. This 6553 * will disappear once the config is regenerated. 6554 */ 6555 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6556 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 6557 glist, children) == 0); 6558 kmem_free(glist, children * sizeof (uint64_t)); 6559 6560 mutex_enter(&spa->spa_props_lock); 6561 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 6562 nvl) == 0); 6563 mutex_exit(&spa->spa_props_lock); 6564 spa->spa_config_splitting = nvl; 6565 vdev_config_dirty(spa->spa_root_vdev); 6566 6567 /* configure and create the new pool */ 6568 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 6569 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 6570 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 6571 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6572 spa_version(spa)) == 0); 6573 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 6574 spa->spa_config_txg) == 0); 6575 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 6576 spa_generate_guid(NULL)) == 0); 6577 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 6578 (void) nvlist_lookup_string(props, 6579 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6580 6581 /* add the new pool to the namespace */ 6582 newspa = spa_add(newname, config, altroot); 6583 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 6584 newspa->spa_config_txg = spa->spa_config_txg; 6585 spa_set_log_state(newspa, SPA_LOG_CLEAR); 6586 6587 /* release the spa config lock, retaining the namespace lock */ 6588 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 6589 6590 if (zio_injection_enabled) 6591 zio_handle_panic_injection(spa, FTAG, 1); 6592 6593 spa_activate(newspa, spa_mode_global); 6594 spa_async_suspend(newspa); 6595 6596#ifndef illumos 6597 /* mark that we are creating new spa by splitting */ 6598 newspa->spa_splitting_newspa = B_TRUE; 6599#endif 6600 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 6601 6602 /* create the new pool from the disks of the original pool */ 6603 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 6604#ifndef illumos 6605 newspa->spa_splitting_newspa = B_FALSE; 6606#endif 6607 if (error) 6608 goto out; 6609 6610 /* if that worked, generate a real config for the new pool */ 6611 if (newspa->spa_root_vdev != NULL) { 6612 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 6613 NV_UNIQUE_NAME, KM_SLEEP) == 0); 6614 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 6615 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 6616 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 6617 B_TRUE)); 6618 } 6619 6620 /* set the props */ 6621 if (props != NULL) { 6622 spa_configfile_set(newspa, props, B_FALSE); 6623 error = spa_prop_set(newspa, props); 6624 if (error) 6625 goto out; 6626 } 6627 6628 /* flush everything */ 6629 txg = spa_vdev_config_enter(newspa); 6630 vdev_config_dirty(newspa->spa_root_vdev); 6631 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 6632 6633 if (zio_injection_enabled) 6634 zio_handle_panic_injection(spa, FTAG, 2); 6635 6636 spa_async_resume(newspa); 6637 6638 /* finally, update the original pool's config */ 6639 txg = spa_vdev_config_enter(spa); 6640 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 6641 error = dmu_tx_assign(tx, TXG_WAIT); 6642 if (error != 0) 6643 dmu_tx_abort(tx); 6644 for (c = 0; c < children; c++) { 6645 if (vml[c] != NULL) { 6646 vdev_split(vml[c]); 6647 if (error == 0) 6648 spa_history_log_internal(spa, "detach", tx, 6649 "vdev=%s", vml[c]->vdev_path); 6650 6651 vdev_free(vml[c]); 6652 } 6653 } 6654 spa->spa_avz_action = AVZ_ACTION_REBUILD; 6655 vdev_config_dirty(spa->spa_root_vdev); 6656 spa->spa_config_splitting = NULL; 6657 nvlist_free(nvl); 6658 if (error == 0) 6659 dmu_tx_commit(tx); 6660 (void) spa_vdev_exit(spa, NULL, txg, 0); 6661 6662 if (zio_injection_enabled) 6663 zio_handle_panic_injection(spa, FTAG, 3); 6664 6665 /* split is complete; log a history record */ 6666 spa_history_log_internal(newspa, "split", NULL, 6667 "from pool %s", spa_name(spa)); 6668 6669 kmem_free(vml, children * sizeof (vdev_t *)); 6670 6671 /* if we're not going to mount the filesystems in userland, export */ 6672 if (exp) 6673 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 6674 B_FALSE, B_FALSE); 6675 6676 return (error); 6677 6678out: 6679 spa_unload(newspa); 6680 spa_deactivate(newspa); 6681 spa_remove(newspa); 6682 6683 txg = spa_vdev_config_enter(spa); 6684 6685 /* re-online all offlined disks */ 6686 for (c = 0; c < children; c++) { 6687 if (vml[c] != NULL) 6688 vml[c]->vdev_offline = B_FALSE; 6689 } 6690 vdev_reopen(spa->spa_root_vdev); 6691 6692 nvlist_free(spa->spa_config_splitting); 6693 spa->spa_config_splitting = NULL; 6694 (void) spa_vdev_exit(spa, NULL, txg, error); 6695 6696 kmem_free(vml, children * sizeof (vdev_t *)); 6697 return (error); 6698} 6699 6700/* 6701 * Find any device that's done replacing, or a vdev marked 'unspare' that's 6702 * currently spared, so we can detach it. 6703 */ 6704static vdev_t * 6705spa_vdev_resilver_done_hunt(vdev_t *vd) 6706{ 6707 vdev_t *newvd, *oldvd; 6708 6709 for (int c = 0; c < vd->vdev_children; c++) { 6710 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 6711 if (oldvd != NULL) 6712 return (oldvd); 6713 } 6714 6715 /* 6716 * Check for a completed replacement. We always consider the first 6717 * vdev in the list to be the oldest vdev, and the last one to be 6718 * the newest (see spa_vdev_attach() for how that works). In 6719 * the case where the newest vdev is faulted, we will not automatically 6720 * remove it after a resilver completes. This is OK as it will require 6721 * user intervention to determine which disk the admin wishes to keep. 6722 */ 6723 if (vd->vdev_ops == &vdev_replacing_ops) { 6724 ASSERT(vd->vdev_children > 1); 6725 6726 newvd = vd->vdev_child[vd->vdev_children - 1]; 6727 oldvd = vd->vdev_child[0]; 6728 6729 if (vdev_dtl_empty(newvd, DTL_MISSING) && 6730 vdev_dtl_empty(newvd, DTL_OUTAGE) && 6731 !vdev_dtl_required(oldvd)) 6732 return (oldvd); 6733 } 6734 6735 /* 6736 * Check for a completed resilver with the 'unspare' flag set. 6737 */ 6738 if (vd->vdev_ops == &vdev_spare_ops) { 6739 vdev_t *first = vd->vdev_child[0]; 6740 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 6741 6742 if (last->vdev_unspare) { 6743 oldvd = first; 6744 newvd = last; 6745 } else if (first->vdev_unspare) { 6746 oldvd = last; 6747 newvd = first; 6748 } else { 6749 oldvd = NULL; 6750 } 6751 6752 if (oldvd != NULL && 6753 vdev_dtl_empty(newvd, DTL_MISSING) && 6754 vdev_dtl_empty(newvd, DTL_OUTAGE) && 6755 !vdev_dtl_required(oldvd)) 6756 return (oldvd); 6757 6758 /* 6759 * If there are more than two spares attached to a disk, 6760 * and those spares are not required, then we want to 6761 * attempt to free them up now so that they can be used 6762 * by other pools. Once we're back down to a single 6763 * disk+spare, we stop removing them. 6764 */ 6765 if (vd->vdev_children > 2) { 6766 newvd = vd->vdev_child[1]; 6767 6768 if (newvd->vdev_isspare && last->vdev_isspare && 6769 vdev_dtl_empty(last, DTL_MISSING) && 6770 vdev_dtl_empty(last, DTL_OUTAGE) && 6771 !vdev_dtl_required(newvd)) 6772 return (newvd); 6773 } 6774 } 6775 6776 return (NULL); 6777} 6778 6779static void 6780spa_vdev_resilver_done(spa_t *spa) 6781{ 6782 vdev_t *vd, *pvd, *ppvd; 6783 uint64_t guid, sguid, pguid, ppguid; 6784 6785 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6786 6787 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 6788 pvd = vd->vdev_parent; 6789 ppvd = pvd->vdev_parent; 6790 guid = vd->vdev_guid; 6791 pguid = pvd->vdev_guid; 6792 ppguid = ppvd->vdev_guid; 6793 sguid = 0; 6794 /* 6795 * If we have just finished replacing a hot spared device, then 6796 * we need to detach the parent's first child (the original hot 6797 * spare) as well. 6798 */ 6799 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 6800 ppvd->vdev_children == 2) { 6801 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 6802 sguid = ppvd->vdev_child[1]->vdev_guid; 6803 } 6804 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 6805 6806 spa_config_exit(spa, SCL_ALL, FTAG); 6807 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 6808 return; 6809 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 6810 return; 6811 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6812 } 6813 6814 spa_config_exit(spa, SCL_ALL, FTAG); 6815} 6816 6817/* 6818 * Update the stored path or FRU for this vdev. 6819 */ 6820int 6821spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 6822 boolean_t ispath) 6823{ 6824 vdev_t *vd; 6825 boolean_t sync = B_FALSE; 6826 6827 ASSERT(spa_writeable(spa)); 6828 6829 spa_vdev_state_enter(spa, SCL_ALL); 6830 6831 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 6832 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 6833 6834 if (!vd->vdev_ops->vdev_op_leaf) 6835 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 6836 6837 if (ispath) { 6838 if (strcmp(value, vd->vdev_path) != 0) { 6839 spa_strfree(vd->vdev_path); 6840 vd->vdev_path = spa_strdup(value); 6841 sync = B_TRUE; 6842 } 6843 } else { 6844 if (vd->vdev_fru == NULL) { 6845 vd->vdev_fru = spa_strdup(value); 6846 sync = B_TRUE; 6847 } else if (strcmp(value, vd->vdev_fru) != 0) { 6848 spa_strfree(vd->vdev_fru); 6849 vd->vdev_fru = spa_strdup(value); 6850 sync = B_TRUE; 6851 } 6852 } 6853 6854 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 6855} 6856 6857int 6858spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 6859{ 6860 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 6861} 6862 6863int 6864spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 6865{ 6866 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 6867} 6868 6869/* 6870 * ========================================================================== 6871 * SPA Scanning 6872 * ========================================================================== 6873 */ 6874int 6875spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 6876{ 6877 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 6878 6879 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 6880 return (SET_ERROR(EBUSY)); 6881 6882 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 6883} 6884 6885int 6886spa_scan_stop(spa_t *spa) 6887{ 6888 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 6889 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 6890 return (SET_ERROR(EBUSY)); 6891 return (dsl_scan_cancel(spa->spa_dsl_pool)); 6892} 6893 6894int 6895spa_scan(spa_t *spa, pool_scan_func_t func) 6896{ 6897 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 6898 6899 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 6900 return (SET_ERROR(ENOTSUP)); 6901 6902 /* 6903 * If a resilver was requested, but there is no DTL on a 6904 * writeable leaf device, we have nothing to do. 6905 */ 6906 if (func == POOL_SCAN_RESILVER && 6907 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 6908 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 6909 return (0); 6910 } 6911 6912 return (dsl_scan(spa->spa_dsl_pool, func)); 6913} 6914 6915/* 6916 * ========================================================================== 6917 * SPA async task processing 6918 * ========================================================================== 6919 */ 6920 6921static void 6922spa_async_remove(spa_t *spa, vdev_t *vd) 6923{ 6924 if (vd->vdev_remove_wanted) { 6925 vd->vdev_remove_wanted = B_FALSE; 6926 vd->vdev_delayed_close = B_FALSE; 6927 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 6928 6929 /* 6930 * We want to clear the stats, but we don't want to do a full 6931 * vdev_clear() as that will cause us to throw away 6932 * degraded/faulted state as well as attempt to reopen the 6933 * device, all of which is a waste. 6934 */ 6935 vd->vdev_stat.vs_read_errors = 0; 6936 vd->vdev_stat.vs_write_errors = 0; 6937 vd->vdev_stat.vs_checksum_errors = 0; 6938 6939 vdev_state_dirty(vd->vdev_top); 6940 /* Tell userspace that the vdev is gone. */ 6941 zfs_post_remove(spa, vd); 6942 } 6943 6944 for (int c = 0; c < vd->vdev_children; c++) 6945 spa_async_remove(spa, vd->vdev_child[c]); 6946} 6947 6948static void 6949spa_async_probe(spa_t *spa, vdev_t *vd) 6950{ 6951 if (vd->vdev_probe_wanted) { 6952 vd->vdev_probe_wanted = B_FALSE; 6953 vdev_reopen(vd); /* vdev_open() does the actual probe */ 6954 } 6955 6956 for (int c = 0; c < vd->vdev_children; c++) 6957 spa_async_probe(spa, vd->vdev_child[c]); 6958} 6959 6960static void 6961spa_async_autoexpand(spa_t *spa, vdev_t *vd) 6962{ 6963 sysevent_id_t eid; 6964 nvlist_t *attr; 6965 char *physpath; 6966 6967 if (!spa->spa_autoexpand) 6968 return; 6969 6970 for (int c = 0; c < vd->vdev_children; c++) { 6971 vdev_t *cvd = vd->vdev_child[c]; 6972 spa_async_autoexpand(spa, cvd); 6973 } 6974 6975 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 6976 return; 6977 6978 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 6979 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 6980 6981 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6982 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 6983 6984 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 6985 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP); 6986 6987 nvlist_free(attr); 6988 kmem_free(physpath, MAXPATHLEN); 6989} 6990 6991static void 6992spa_async_thread(void *arg) 6993{ 6994 spa_t *spa = (spa_t *)arg; 6995 int tasks; 6996 6997 ASSERT(spa->spa_sync_on); 6998 6999 mutex_enter(&spa->spa_async_lock); 7000 tasks = spa->spa_async_tasks; 7001 spa->spa_async_tasks &= SPA_ASYNC_REMOVE; 7002 mutex_exit(&spa->spa_async_lock); 7003 7004 /* 7005 * See if the config needs to be updated. 7006 */ 7007 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 7008 uint64_t old_space, new_space; 7009 7010 mutex_enter(&spa_namespace_lock); 7011 old_space = metaslab_class_get_space(spa_normal_class(spa)); 7012 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 7013 new_space = metaslab_class_get_space(spa_normal_class(spa)); 7014 mutex_exit(&spa_namespace_lock); 7015 7016 /* 7017 * If the pool grew as a result of the config update, 7018 * then log an internal history event. 7019 */ 7020 if (new_space != old_space) { 7021 spa_history_log_internal(spa, "vdev online", NULL, 7022 "pool '%s' size: %llu(+%llu)", 7023 spa_name(spa), new_space, new_space - old_space); 7024 } 7025 } 7026 7027 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 7028 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 7029 spa_async_autoexpand(spa, spa->spa_root_vdev); 7030 spa_config_exit(spa, SCL_CONFIG, FTAG); 7031 } 7032 7033 /* 7034 * See if any devices need to be probed. 7035 */ 7036 if (tasks & SPA_ASYNC_PROBE) { 7037 spa_vdev_state_enter(spa, SCL_NONE); 7038 spa_async_probe(spa, spa->spa_root_vdev); 7039 (void) spa_vdev_state_exit(spa, NULL, 0); 7040 } 7041 7042 /* 7043 * If any devices are done replacing, detach them. 7044 */ 7045 if (tasks & SPA_ASYNC_RESILVER_DONE) 7046 spa_vdev_resilver_done(spa); 7047 7048 /* 7049 * Kick off a resilver. 7050 */ 7051 if (tasks & SPA_ASYNC_RESILVER) 7052 dsl_resilver_restart(spa->spa_dsl_pool, 0); 7053 7054 /* 7055 * Let the world know that we're done. 7056 */ 7057 mutex_enter(&spa->spa_async_lock); 7058 spa->spa_async_thread = NULL; 7059 cv_broadcast(&spa->spa_async_cv); 7060 mutex_exit(&spa->spa_async_lock); 7061 thread_exit(); 7062} 7063 7064static void 7065spa_async_thread_vd(void *arg) 7066{ 7067 spa_t *spa = arg; 7068 int tasks; 7069 7070 mutex_enter(&spa->spa_async_lock); 7071 tasks = spa->spa_async_tasks; 7072retry: 7073 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE; 7074 mutex_exit(&spa->spa_async_lock); 7075 7076 /* 7077 * See if any devices need to be marked REMOVED. 7078 */ 7079 if (tasks & SPA_ASYNC_REMOVE) { 7080 spa_vdev_state_enter(spa, SCL_NONE); 7081 spa_async_remove(spa, spa->spa_root_vdev); 7082 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 7083 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 7084 for (int i = 0; i < spa->spa_spares.sav_count; i++) 7085 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 7086 (void) spa_vdev_state_exit(spa, NULL, 0); 7087 } 7088 7089 /* 7090 * Let the world know that we're done. 7091 */ 7092 mutex_enter(&spa->spa_async_lock); 7093 tasks = spa->spa_async_tasks; 7094 if ((tasks & SPA_ASYNC_REMOVE) != 0) 7095 goto retry; 7096 spa->spa_async_thread_vd = NULL; 7097 cv_broadcast(&spa->spa_async_cv); 7098 mutex_exit(&spa->spa_async_lock); 7099 thread_exit(); 7100} 7101 7102void 7103spa_async_suspend(spa_t *spa) 7104{ 7105 mutex_enter(&spa->spa_async_lock); 7106 spa->spa_async_suspended++; 7107 while (spa->spa_async_thread != NULL || 7108 spa->spa_async_thread_vd != NULL) 7109 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 7110 mutex_exit(&spa->spa_async_lock); 7111 7112 spa_vdev_remove_suspend(spa); 7113 7114 zthr_t *condense_thread = spa->spa_condense_zthr; 7115 if (condense_thread != NULL && zthr_isrunning(condense_thread)) 7116 VERIFY0(zthr_cancel(condense_thread)); 7117 7118 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 7119 if (discard_thread != NULL && zthr_isrunning(discard_thread)) 7120 VERIFY0(zthr_cancel(discard_thread)); 7121} 7122 7123void 7124spa_async_resume(spa_t *spa) 7125{ 7126 mutex_enter(&spa->spa_async_lock); 7127 ASSERT(spa->spa_async_suspended != 0); 7128 spa->spa_async_suspended--; 7129 mutex_exit(&spa->spa_async_lock); 7130 spa_restart_removal(spa); 7131 7132 zthr_t *condense_thread = spa->spa_condense_zthr; 7133 if (condense_thread != NULL && !zthr_isrunning(condense_thread)) 7134 zthr_resume(condense_thread); 7135 7136 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 7137 if (discard_thread != NULL && !zthr_isrunning(discard_thread)) 7138 zthr_resume(discard_thread); 7139} 7140 7141static boolean_t 7142spa_async_tasks_pending(spa_t *spa) 7143{ 7144 uint_t non_config_tasks; 7145 uint_t config_task; 7146 boolean_t config_task_suspended; 7147 7148 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE | 7149 SPA_ASYNC_REMOVE); 7150 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 7151 if (spa->spa_ccw_fail_time == 0) { 7152 config_task_suspended = B_FALSE; 7153 } else { 7154 config_task_suspended = 7155 (gethrtime() - spa->spa_ccw_fail_time) < 7156 (zfs_ccw_retry_interval * NANOSEC); 7157 } 7158 7159 return (non_config_tasks || (config_task && !config_task_suspended)); 7160} 7161 7162static void 7163spa_async_dispatch(spa_t *spa) 7164{ 7165 mutex_enter(&spa->spa_async_lock); 7166 if (spa_async_tasks_pending(spa) && 7167 !spa->spa_async_suspended && 7168 spa->spa_async_thread == NULL && 7169 rootdir != NULL) 7170 spa->spa_async_thread = thread_create(NULL, 0, 7171 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 7172 mutex_exit(&spa->spa_async_lock); 7173} 7174 7175static void 7176spa_async_dispatch_vd(spa_t *spa) 7177{ 7178 mutex_enter(&spa->spa_async_lock); 7179 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 && 7180 !spa->spa_async_suspended && 7181 spa->spa_async_thread_vd == NULL && 7182 rootdir != NULL) 7183 spa->spa_async_thread_vd = thread_create(NULL, 0, 7184 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri); 7185 mutex_exit(&spa->spa_async_lock); 7186} 7187 7188void 7189spa_async_request(spa_t *spa, int task) 7190{ 7191 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 7192 mutex_enter(&spa->spa_async_lock); 7193 spa->spa_async_tasks |= task; 7194 mutex_exit(&spa->spa_async_lock); 7195 spa_async_dispatch_vd(spa); 7196} 7197 7198/* 7199 * ========================================================================== 7200 * SPA syncing routines 7201 * ========================================================================== 7202 */ 7203 7204static int 7205bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 7206{ 7207 bpobj_t *bpo = arg; 7208 bpobj_enqueue(bpo, bp, tx); 7209 return (0); 7210} 7211 7212static int 7213spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 7214{ 7215 zio_t *zio = arg; 7216 7217 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 7218 BP_GET_PSIZE(bp), zio->io_flags)); 7219 return (0); 7220} 7221 7222/* 7223 * Note: this simple function is not inlined to make it easier to dtrace the 7224 * amount of time spent syncing frees. 7225 */ 7226static void 7227spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 7228{ 7229 zio_t *zio = zio_root(spa, NULL, NULL, 0); 7230 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 7231 VERIFY(zio_wait(zio) == 0); 7232} 7233 7234/* 7235 * Note: this simple function is not inlined to make it easier to dtrace the 7236 * amount of time spent syncing deferred frees. 7237 */ 7238static void 7239spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 7240{ 7241 zio_t *zio = zio_root(spa, NULL, NULL, 0); 7242 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 7243 spa_free_sync_cb, zio, tx), ==, 0); 7244 VERIFY0(zio_wait(zio)); 7245} 7246 7247 7248static void 7249spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 7250{ 7251 char *packed = NULL; 7252 size_t bufsize; 7253 size_t nvsize = 0; 7254 dmu_buf_t *db; 7255 7256 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 7257 7258 /* 7259 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 7260 * information. This avoids the dmu_buf_will_dirty() path and 7261 * saves us a pre-read to get data we don't actually care about. 7262 */ 7263 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 7264 packed = kmem_alloc(bufsize, KM_SLEEP); 7265 7266 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 7267 KM_SLEEP) == 0); 7268 bzero(packed + nvsize, bufsize - nvsize); 7269 7270 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 7271 7272 kmem_free(packed, bufsize); 7273 7274 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 7275 dmu_buf_will_dirty(db, tx); 7276 *(uint64_t *)db->db_data = nvsize; 7277 dmu_buf_rele(db, FTAG); 7278} 7279 7280static void 7281spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 7282 const char *config, const char *entry) 7283{ 7284 nvlist_t *nvroot; 7285 nvlist_t **list; 7286 int i; 7287 7288 if (!sav->sav_sync) 7289 return; 7290 7291 /* 7292 * Update the MOS nvlist describing the list of available devices. 7293 * spa_validate_aux() will have already made sure this nvlist is 7294 * valid and the vdevs are labeled appropriately. 7295 */ 7296 if (sav->sav_object == 0) { 7297 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 7298 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 7299 sizeof (uint64_t), tx); 7300 VERIFY(zap_update(spa->spa_meta_objset, 7301 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 7302 &sav->sav_object, tx) == 0); 7303 } 7304 7305 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 7306 if (sav->sav_count == 0) { 7307 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 7308 } else { 7309 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 7310 for (i = 0; i < sav->sav_count; i++) 7311 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 7312 B_FALSE, VDEV_CONFIG_L2CACHE); 7313 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 7314 sav->sav_count) == 0); 7315 for (i = 0; i < sav->sav_count; i++) 7316 nvlist_free(list[i]); 7317 kmem_free(list, sav->sav_count * sizeof (void *)); 7318 } 7319 7320 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 7321 nvlist_free(nvroot); 7322 7323 sav->sav_sync = B_FALSE; 7324} 7325 7326/* 7327 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 7328 * The all-vdev ZAP must be empty. 7329 */ 7330static void 7331spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 7332{ 7333 spa_t *spa = vd->vdev_spa; 7334 if (vd->vdev_top_zap != 0) { 7335 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 7336 vd->vdev_top_zap, tx)); 7337 } 7338 if (vd->vdev_leaf_zap != 0) { 7339 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 7340 vd->vdev_leaf_zap, tx)); 7341 } 7342 for (uint64_t i = 0; i < vd->vdev_children; i++) { 7343 spa_avz_build(vd->vdev_child[i], avz, tx); 7344 } 7345} 7346 7347static void 7348spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 7349{ 7350 nvlist_t *config; 7351 7352 /* 7353 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 7354 * its config may not be dirty but we still need to build per-vdev ZAPs. 7355 * Similarly, if the pool is being assembled (e.g. after a split), we 7356 * need to rebuild the AVZ although the config may not be dirty. 7357 */ 7358 if (list_is_empty(&spa->spa_config_dirty_list) && 7359 spa->spa_avz_action == AVZ_ACTION_NONE) 7360 return; 7361 7362 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7363 7364 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 7365 spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 7366 spa->spa_all_vdev_zaps != 0); 7367 7368 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 7369 /* Make and build the new AVZ */ 7370 uint64_t new_avz = zap_create(spa->spa_meta_objset, 7371 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 7372 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 7373 7374 /* Diff old AVZ with new one */ 7375 zap_cursor_t zc; 7376 zap_attribute_t za; 7377 7378 for (zap_cursor_init(&zc, spa->spa_meta_objset, 7379 spa->spa_all_vdev_zaps); 7380 zap_cursor_retrieve(&zc, &za) == 0; 7381 zap_cursor_advance(&zc)) { 7382 uint64_t vdzap = za.za_first_integer; 7383 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 7384 vdzap) == ENOENT) { 7385 /* 7386 * ZAP is listed in old AVZ but not in new one; 7387 * destroy it 7388 */ 7389 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 7390 tx)); 7391 } 7392 } 7393 7394 zap_cursor_fini(&zc); 7395 7396 /* Destroy the old AVZ */ 7397 VERIFY0(zap_destroy(spa->spa_meta_objset, 7398 spa->spa_all_vdev_zaps, tx)); 7399 7400 /* Replace the old AVZ in the dir obj with the new one */ 7401 VERIFY0(zap_update(spa->spa_meta_objset, 7402 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 7403 sizeof (new_avz), 1, &new_avz, tx)); 7404 7405 spa->spa_all_vdev_zaps = new_avz; 7406 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 7407 zap_cursor_t zc; 7408 zap_attribute_t za; 7409 7410 /* Walk through the AVZ and destroy all listed ZAPs */ 7411 for (zap_cursor_init(&zc, spa->spa_meta_objset, 7412 spa->spa_all_vdev_zaps); 7413 zap_cursor_retrieve(&zc, &za) == 0; 7414 zap_cursor_advance(&zc)) { 7415 uint64_t zap = za.za_first_integer; 7416 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 7417 } 7418 7419 zap_cursor_fini(&zc); 7420 7421 /* Destroy and unlink the AVZ itself */ 7422 VERIFY0(zap_destroy(spa->spa_meta_objset, 7423 spa->spa_all_vdev_zaps, tx)); 7424 VERIFY0(zap_remove(spa->spa_meta_objset, 7425 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 7426 spa->spa_all_vdev_zaps = 0; 7427 } 7428 7429 if (spa->spa_all_vdev_zaps == 0) { 7430 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 7431 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 7432 DMU_POOL_VDEV_ZAP_MAP, tx); 7433 } 7434 spa->spa_avz_action = AVZ_ACTION_NONE; 7435 7436 /* Create ZAPs for vdevs that don't have them. */ 7437 vdev_construct_zaps(spa->spa_root_vdev, tx); 7438 7439 config = spa_config_generate(spa, spa->spa_root_vdev, 7440 dmu_tx_get_txg(tx), B_FALSE); 7441 7442 /* 7443 * If we're upgrading the spa version then make sure that 7444 * the config object gets updated with the correct version. 7445 */ 7446 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 7447 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 7448 spa->spa_uberblock.ub_version); 7449 7450 spa_config_exit(spa, SCL_STATE, FTAG); 7451 7452 nvlist_free(spa->spa_config_syncing); 7453 spa->spa_config_syncing = config; 7454 7455 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 7456} 7457 7458static void 7459spa_sync_version(void *arg, dmu_tx_t *tx) 7460{ 7461 uint64_t *versionp = arg; 7462 uint64_t version = *versionp; 7463 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 7464 7465 /* 7466 * Setting the version is special cased when first creating the pool. 7467 */ 7468 ASSERT(tx->tx_txg != TXG_INITIAL); 7469 7470 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 7471 ASSERT(version >= spa_version(spa)); 7472 7473 spa->spa_uberblock.ub_version = version; 7474 vdev_config_dirty(spa->spa_root_vdev); 7475 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 7476} 7477 7478/* 7479 * Set zpool properties. 7480 */ 7481static void 7482spa_sync_props(void *arg, dmu_tx_t *tx) 7483{ 7484 nvlist_t *nvp = arg; 7485 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 7486 objset_t *mos = spa->spa_meta_objset; 7487 nvpair_t *elem = NULL; 7488 7489 mutex_enter(&spa->spa_props_lock); 7490 7491 while ((elem = nvlist_next_nvpair(nvp, elem))) { 7492 uint64_t intval; 7493 char *strval, *fname; 7494 zpool_prop_t prop; 7495 const char *propname; 7496 zprop_type_t proptype; 7497 spa_feature_t fid; 7498 7499 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 7500 case ZPOOL_PROP_INVAL: 7501 /* 7502 * We checked this earlier in spa_prop_validate(). 7503 */ 7504 ASSERT(zpool_prop_feature(nvpair_name(elem))); 7505 7506 fname = strchr(nvpair_name(elem), '@') + 1; 7507 VERIFY0(zfeature_lookup_name(fname, &fid)); 7508 7509 spa_feature_enable(spa, fid, tx); 7510 spa_history_log_internal(spa, "set", tx, 7511 "%s=enabled", nvpair_name(elem)); 7512 break; 7513 7514 case ZPOOL_PROP_VERSION: 7515 intval = fnvpair_value_uint64(elem); 7516 /* 7517 * The version is synced seperatly before other 7518 * properties and should be correct by now. 7519 */ 7520 ASSERT3U(spa_version(spa), >=, intval); 7521 break; 7522 7523 case ZPOOL_PROP_ALTROOT: 7524 /* 7525 * 'altroot' is a non-persistent property. It should 7526 * have been set temporarily at creation or import time. 7527 */ 7528 ASSERT(spa->spa_root != NULL); 7529 break; 7530 7531 case ZPOOL_PROP_READONLY: 7532 case ZPOOL_PROP_CACHEFILE: 7533 /* 7534 * 'readonly' and 'cachefile' are also non-persisitent 7535 * properties. 7536 */ 7537 break; 7538 case ZPOOL_PROP_COMMENT: 7539 strval = fnvpair_value_string(elem); 7540 if (spa->spa_comment != NULL) 7541 spa_strfree(spa->spa_comment); 7542 spa->spa_comment = spa_strdup(strval); 7543 /* 7544 * We need to dirty the configuration on all the vdevs 7545 * so that their labels get updated. It's unnecessary 7546 * to do this for pool creation since the vdev's 7547 * configuratoin has already been dirtied. 7548 */ 7549 if (tx->tx_txg != TXG_INITIAL) 7550 vdev_config_dirty(spa->spa_root_vdev); 7551 spa_history_log_internal(spa, "set", tx, 7552 "%s=%s", nvpair_name(elem), strval); 7553 break; 7554 default: 7555 /* 7556 * Set pool property values in the poolprops mos object. 7557 */ 7558 if (spa->spa_pool_props_object == 0) { 7559 spa->spa_pool_props_object = 7560 zap_create_link(mos, DMU_OT_POOL_PROPS, 7561 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 7562 tx); 7563 } 7564 7565 /* normalize the property name */ 7566 propname = zpool_prop_to_name(prop); 7567 proptype = zpool_prop_get_type(prop); 7568 7569 if (nvpair_type(elem) == DATA_TYPE_STRING) { 7570 ASSERT(proptype == PROP_TYPE_STRING); 7571 strval = fnvpair_value_string(elem); 7572 VERIFY0(zap_update(mos, 7573 spa->spa_pool_props_object, propname, 7574 1, strlen(strval) + 1, strval, tx)); 7575 spa_history_log_internal(spa, "set", tx, 7576 "%s=%s", nvpair_name(elem), strval); 7577 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 7578 intval = fnvpair_value_uint64(elem); 7579 7580 if (proptype == PROP_TYPE_INDEX) { 7581 const char *unused; 7582 VERIFY0(zpool_prop_index_to_string( 7583 prop, intval, &unused)); 7584 } 7585 VERIFY0(zap_update(mos, 7586 spa->spa_pool_props_object, propname, 7587 8, 1, &intval, tx)); 7588 spa_history_log_internal(spa, "set", tx, 7589 "%s=%lld", nvpair_name(elem), intval); 7590 } else { 7591 ASSERT(0); /* not allowed */ 7592 } 7593 7594 switch (prop) { 7595 case ZPOOL_PROP_DELEGATION: 7596 spa->spa_delegation = intval; 7597 break; 7598 case ZPOOL_PROP_BOOTFS: 7599 spa->spa_bootfs = intval; 7600 break; 7601 case ZPOOL_PROP_FAILUREMODE: 7602 spa->spa_failmode = intval; 7603 break; 7604 case ZPOOL_PROP_AUTOEXPAND: 7605 spa->spa_autoexpand = intval; 7606 if (tx->tx_txg != TXG_INITIAL) 7607 spa_async_request(spa, 7608 SPA_ASYNC_AUTOEXPAND); 7609 break; 7610 case ZPOOL_PROP_DEDUPDITTO: 7611 spa->spa_dedup_ditto = intval; 7612 break; 7613 default: 7614 break; 7615 } 7616 } 7617 7618 } 7619 7620 mutex_exit(&spa->spa_props_lock); 7621} 7622 7623/* 7624 * Perform one-time upgrade on-disk changes. spa_version() does not 7625 * reflect the new version this txg, so there must be no changes this 7626 * txg to anything that the upgrade code depends on after it executes. 7627 * Therefore this must be called after dsl_pool_sync() does the sync 7628 * tasks. 7629 */ 7630static void 7631spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 7632{ 7633 dsl_pool_t *dp = spa->spa_dsl_pool; 7634 7635 ASSERT(spa->spa_sync_pass == 1); 7636 7637 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 7638 7639 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 7640 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 7641 dsl_pool_create_origin(dp, tx); 7642 7643 /* Keeping the origin open increases spa_minref */ 7644 spa->spa_minref += 3; 7645 } 7646 7647 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 7648 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 7649 dsl_pool_upgrade_clones(dp, tx); 7650 } 7651 7652 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 7653 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 7654 dsl_pool_upgrade_dir_clones(dp, tx); 7655 7656 /* Keeping the freedir open increases spa_minref */ 7657 spa->spa_minref += 3; 7658 } 7659 7660 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 7661 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 7662 spa_feature_create_zap_objects(spa, tx); 7663 } 7664 7665 /* 7666 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 7667 * when possibility to use lz4 compression for metadata was added 7668 * Old pools that have this feature enabled must be upgraded to have 7669 * this feature active 7670 */ 7671 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 7672 boolean_t lz4_en = spa_feature_is_enabled(spa, 7673 SPA_FEATURE_LZ4_COMPRESS); 7674 boolean_t lz4_ac = spa_feature_is_active(spa, 7675 SPA_FEATURE_LZ4_COMPRESS); 7676 7677 if (lz4_en && !lz4_ac) 7678 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 7679 } 7680 7681 /* 7682 * If we haven't written the salt, do so now. Note that the 7683 * feature may not be activated yet, but that's fine since 7684 * the presence of this ZAP entry is backwards compatible. 7685 */ 7686 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 7687 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 7688 VERIFY0(zap_add(spa->spa_meta_objset, 7689 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 7690 sizeof (spa->spa_cksum_salt.zcs_bytes), 7691 spa->spa_cksum_salt.zcs_bytes, tx)); 7692 } 7693 7694 rrw_exit(&dp->dp_config_rwlock, FTAG); 7695} 7696 7697static void 7698vdev_indirect_state_sync_verify(vdev_t *vd) 7699{ 7700 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 7701 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 7702 7703 if (vd->vdev_ops == &vdev_indirect_ops) { 7704 ASSERT(vim != NULL); 7705 ASSERT(vib != NULL); 7706 } 7707 7708 if (vdev_obsolete_sm_object(vd) != 0) { 7709 ASSERT(vd->vdev_obsolete_sm != NULL); 7710 ASSERT(vd->vdev_removing || 7711 vd->vdev_ops == &vdev_indirect_ops); 7712 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 7713 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 7714 7715 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 7716 space_map_object(vd->vdev_obsolete_sm)); 7717 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 7718 space_map_allocated(vd->vdev_obsolete_sm)); 7719 } 7720 ASSERT(vd->vdev_obsolete_segments != NULL); 7721 7722 /* 7723 * Since frees / remaps to an indirect vdev can only 7724 * happen in syncing context, the obsolete segments 7725 * tree must be empty when we start syncing. 7726 */ 7727 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 7728} 7729 7730/* 7731 * Sync the specified transaction group. New blocks may be dirtied as 7732 * part of the process, so we iterate until it converges. 7733 */ 7734void 7735spa_sync(spa_t *spa, uint64_t txg) 7736{ 7737 dsl_pool_t *dp = spa->spa_dsl_pool; 7738 objset_t *mos = spa->spa_meta_objset; 7739 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 7740 vdev_t *rvd = spa->spa_root_vdev; 7741 vdev_t *vd; 7742 dmu_tx_t *tx; 7743 int error; 7744 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 7745 zfs_vdev_queue_depth_pct / 100; 7746 7747 VERIFY(spa_writeable(spa)); 7748 7749 /* 7750 * Wait for i/os issued in open context that need to complete 7751 * before this txg syncs. 7752 */ 7753 VERIFY0(zio_wait(spa->spa_txg_zio[txg & TXG_MASK])); 7754 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 0); 7755 7756 /* 7757 * Lock out configuration changes. 7758 */ 7759 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 7760 7761 spa->spa_syncing_txg = txg; 7762 spa->spa_sync_pass = 0; 7763 7764 mutex_enter(&spa->spa_alloc_lock); 7765 VERIFY0(avl_numnodes(&spa->spa_alloc_tree)); 7766 mutex_exit(&spa->spa_alloc_lock); 7767 7768 /* 7769 * If there are any pending vdev state changes, convert them 7770 * into config changes that go out with this transaction group. 7771 */ 7772 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7773 while (list_head(&spa->spa_state_dirty_list) != NULL) { 7774 /* 7775 * We need the write lock here because, for aux vdevs, 7776 * calling vdev_config_dirty() modifies sav_config. 7777 * This is ugly and will become unnecessary when we 7778 * eliminate the aux vdev wart by integrating all vdevs 7779 * into the root vdev tree. 7780 */ 7781 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7782 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 7783 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 7784 vdev_state_clean(vd); 7785 vdev_config_dirty(vd); 7786 } 7787 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7788 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7789 } 7790 spa_config_exit(spa, SCL_STATE, FTAG); 7791 7792 tx = dmu_tx_create_assigned(dp, txg); 7793 7794 spa->spa_sync_starttime = gethrtime(); 7795#ifdef illumos 7796 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 7797 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 7798#else /* !illumos */ 7799#ifdef _KERNEL 7800 callout_schedule(&spa->spa_deadman_cycid, 7801 hz * spa->spa_deadman_synctime / NANOSEC); 7802#endif 7803#endif /* illumos */ 7804 7805 /* 7806 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 7807 * set spa_deflate if we have no raid-z vdevs. 7808 */ 7809 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 7810 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 7811 int i; 7812 7813 for (i = 0; i < rvd->vdev_children; i++) { 7814 vd = rvd->vdev_child[i]; 7815 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 7816 break; 7817 } 7818 if (i == rvd->vdev_children) { 7819 spa->spa_deflate = TRUE; 7820 VERIFY(0 == zap_add(spa->spa_meta_objset, 7821 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 7822 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 7823 } 7824 } 7825 7826 /* 7827 * Set the top-level vdev's max queue depth. Evaluate each 7828 * top-level's async write queue depth in case it changed. 7829 * The max queue depth will not change in the middle of syncing 7830 * out this txg. 7831 */ 7832 uint64_t queue_depth_total = 0; 7833 for (int c = 0; c < rvd->vdev_children; c++) { 7834 vdev_t *tvd = rvd->vdev_child[c]; 7835 metaslab_group_t *mg = tvd->vdev_mg; 7836 7837 if (mg == NULL || mg->mg_class != spa_normal_class(spa) || 7838 !metaslab_group_initialized(mg)) 7839 continue; 7840 7841 /* 7842 * It is safe to do a lock-free check here because only async 7843 * allocations look at mg_max_alloc_queue_depth, and async 7844 * allocations all happen from spa_sync(). 7845 */ 7846 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth)); 7847 mg->mg_max_alloc_queue_depth = max_queue_depth; 7848 queue_depth_total += mg->mg_max_alloc_queue_depth; 7849 } 7850 metaslab_class_t *mc = spa_normal_class(spa); 7851 ASSERT0(refcount_count(&mc->mc_alloc_slots)); 7852 mc->mc_alloc_max_slots = queue_depth_total; 7853 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 7854 7855 ASSERT3U(mc->mc_alloc_max_slots, <=, 7856 max_queue_depth * rvd->vdev_children); 7857 7858 for (int c = 0; c < rvd->vdev_children; c++) { 7859 vdev_t *vd = rvd->vdev_child[c]; 7860 vdev_indirect_state_sync_verify(vd); 7861 7862 if (vdev_indirect_should_condense(vd)) { 7863 spa_condense_indirect_start_sync(vd, tx); 7864 break; 7865 } 7866 } 7867 7868 /* 7869 * Iterate to convergence. 7870 */ 7871 do { 7872 int pass = ++spa->spa_sync_pass; 7873 7874 spa_sync_config_object(spa, tx); 7875 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 7876 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 7877 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 7878 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 7879 spa_errlog_sync(spa, txg); 7880 dsl_pool_sync(dp, txg); 7881 7882 if (pass < zfs_sync_pass_deferred_free) { 7883 spa_sync_frees(spa, free_bpl, tx); 7884 } else { 7885 /* 7886 * We can not defer frees in pass 1, because 7887 * we sync the deferred frees later in pass 1. 7888 */ 7889 ASSERT3U(pass, >, 1); 7890 bplist_iterate(free_bpl, bpobj_enqueue_cb, 7891 &spa->spa_deferred_bpobj, tx); 7892 } 7893 7894 ddt_sync(spa, txg); 7895 dsl_scan_sync(dp, tx); 7896 7897 if (spa->spa_vdev_removal != NULL) 7898 svr_sync(spa, tx); 7899 7900 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 7901 != NULL) 7902 vdev_sync(vd, txg); 7903 7904 if (pass == 1) { 7905 spa_sync_upgrades(spa, tx); 7906 ASSERT3U(txg, >=, 7907 spa->spa_uberblock.ub_rootbp.blk_birth); 7908 /* 7909 * Note: We need to check if the MOS is dirty 7910 * because we could have marked the MOS dirty 7911 * without updating the uberblock (e.g. if we 7912 * have sync tasks but no dirty user data). We 7913 * need to check the uberblock's rootbp because 7914 * it is updated if we have synced out dirty 7915 * data (though in this case the MOS will most 7916 * likely also be dirty due to second order 7917 * effects, we don't want to rely on that here). 7918 */ 7919 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg && 7920 !dmu_objset_is_dirty(mos, txg)) { 7921 /* 7922 * Nothing changed on the first pass, 7923 * therefore this TXG is a no-op. Avoid 7924 * syncing deferred frees, so that we 7925 * can keep this TXG as a no-op. 7926 */ 7927 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, 7928 txg)); 7929 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 7930 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 7931 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, 7932 txg)); 7933 break; 7934 } 7935 spa_sync_deferred_frees(spa, tx); 7936 } 7937 7938 } while (dmu_objset_is_dirty(mos, txg)); 7939 7940 if (!list_is_empty(&spa->spa_config_dirty_list)) { 7941 /* 7942 * Make sure that the number of ZAPs for all the vdevs matches 7943 * the number of ZAPs in the per-vdev ZAP list. This only gets 7944 * called if the config is dirty; otherwise there may be 7945 * outstanding AVZ operations that weren't completed in 7946 * spa_sync_config_object. 7947 */ 7948 uint64_t all_vdev_zap_entry_count; 7949 ASSERT0(zap_count(spa->spa_meta_objset, 7950 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 7951 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 7952 all_vdev_zap_entry_count); 7953 } 7954 7955 if (spa->spa_vdev_removal != NULL) { 7956 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 7957 } 7958 7959 /* 7960 * Rewrite the vdev configuration (which includes the uberblock) 7961 * to commit the transaction group. 7962 * 7963 * If there are no dirty vdevs, we sync the uberblock to a few 7964 * random top-level vdevs that are known to be visible in the 7965 * config cache (see spa_vdev_add() for a complete description). 7966 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 7967 */ 7968 for (;;) { 7969 /* 7970 * We hold SCL_STATE to prevent vdev open/close/etc. 7971 * while we're attempting to write the vdev labels. 7972 */ 7973 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7974 7975 if (list_is_empty(&spa->spa_config_dirty_list)) { 7976 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 7977 int svdcount = 0; 7978 int children = rvd->vdev_children; 7979 int c0 = spa_get_random(children); 7980 7981 for (int c = 0; c < children; c++) { 7982 vd = rvd->vdev_child[(c0 + c) % children]; 7983 7984 /* Stop when revisiting the first vdev */ 7985 if (c > 0 && svd[0] == vd) 7986 break; 7987 7988 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 7989 !vdev_is_concrete(vd)) 7990 continue; 7991 7992 svd[svdcount++] = vd; 7993 if (svdcount == SPA_SYNC_MIN_VDEVS) 7994 break; 7995 } 7996 error = vdev_config_sync(svd, svdcount, txg); 7997 } else { 7998 error = vdev_config_sync(rvd->vdev_child, 7999 rvd->vdev_children, txg); 8000 } 8001 8002 if (error == 0) 8003 spa->spa_last_synced_guid = rvd->vdev_guid; 8004 8005 spa_config_exit(spa, SCL_STATE, FTAG); 8006 8007 if (error == 0) 8008 break; 8009 zio_suspend(spa, NULL); 8010 zio_resume_wait(spa); 8011 } 8012 dmu_tx_commit(tx); 8013 8014#ifdef illumos 8015 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 8016#else /* !illumos */ 8017#ifdef _KERNEL 8018 callout_drain(&spa->spa_deadman_cycid); 8019#endif 8020#endif /* illumos */ 8021 8022 /* 8023 * Clear the dirty config list. 8024 */ 8025 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 8026 vdev_config_clean(vd); 8027 8028 /* 8029 * Now that the new config has synced transactionally, 8030 * let it become visible to the config cache. 8031 */ 8032 if (spa->spa_config_syncing != NULL) { 8033 spa_config_set(spa, spa->spa_config_syncing); 8034 spa->spa_config_txg = txg; 8035 spa->spa_config_syncing = NULL; 8036 } 8037 8038 dsl_pool_sync_done(dp, txg); 8039 8040 mutex_enter(&spa->spa_alloc_lock); 8041 VERIFY0(avl_numnodes(&spa->spa_alloc_tree)); 8042 mutex_exit(&spa->spa_alloc_lock); 8043 8044 /* 8045 * Update usable space statistics. 8046 */ 8047 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 8048 vdev_sync_done(vd, txg); 8049 8050 spa_update_dspace(spa); 8051 8052 /* 8053 * It had better be the case that we didn't dirty anything 8054 * since vdev_config_sync(). 8055 */ 8056 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 8057 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 8058 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 8059 8060 while (zfs_pause_spa_sync) 8061 delay(1); 8062 8063 spa->spa_sync_pass = 0; 8064 8065 /* 8066 * Update the last synced uberblock here. We want to do this at 8067 * the end of spa_sync() so that consumers of spa_last_synced_txg() 8068 * will be guaranteed that all the processing associated with 8069 * that txg has been completed. 8070 */ 8071 spa->spa_ubsync = spa->spa_uberblock; 8072 spa_config_exit(spa, SCL_CONFIG, FTAG); 8073 8074 spa_handle_ignored_writes(spa); 8075 8076 /* 8077 * If any async tasks have been requested, kick them off. 8078 */ 8079 spa_async_dispatch(spa); 8080 spa_async_dispatch_vd(spa); 8081} 8082 8083/* 8084 * Sync all pools. We don't want to hold the namespace lock across these 8085 * operations, so we take a reference on the spa_t and drop the lock during the 8086 * sync. 8087 */ 8088void 8089spa_sync_allpools(void) 8090{ 8091 spa_t *spa = NULL; 8092 mutex_enter(&spa_namespace_lock); 8093 while ((spa = spa_next(spa)) != NULL) { 8094 if (spa_state(spa) != POOL_STATE_ACTIVE || 8095 !spa_writeable(spa) || spa_suspended(spa)) 8096 continue; 8097 spa_open_ref(spa, FTAG); 8098 mutex_exit(&spa_namespace_lock); 8099 txg_wait_synced(spa_get_dsl(spa), 0); 8100 mutex_enter(&spa_namespace_lock); 8101 spa_close(spa, FTAG); 8102 } 8103 mutex_exit(&spa_namespace_lock); 8104} 8105 8106/* 8107 * ========================================================================== 8108 * Miscellaneous routines 8109 * ========================================================================== 8110 */ 8111 8112/* 8113 * Remove all pools in the system. 8114 */ 8115void 8116spa_evict_all(void) 8117{ 8118 spa_t *spa; 8119 8120 /* 8121 * Remove all cached state. All pools should be closed now, 8122 * so every spa in the AVL tree should be unreferenced. 8123 */ 8124 mutex_enter(&spa_namespace_lock); 8125 while ((spa = spa_next(NULL)) != NULL) { 8126 /* 8127 * Stop async tasks. The async thread may need to detach 8128 * a device that's been replaced, which requires grabbing 8129 * spa_namespace_lock, so we must drop it here. 8130 */ 8131 spa_open_ref(spa, FTAG); 8132 mutex_exit(&spa_namespace_lock); 8133 spa_async_suspend(spa); 8134 mutex_enter(&spa_namespace_lock); 8135 spa_close(spa, FTAG); 8136 8137 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 8138 spa_unload(spa); 8139 spa_deactivate(spa); 8140 } 8141 spa_remove(spa); 8142 } 8143 mutex_exit(&spa_namespace_lock); 8144} 8145 8146vdev_t * 8147spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 8148{ 8149 vdev_t *vd; 8150 int i; 8151 8152 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 8153 return (vd); 8154 8155 if (aux) { 8156 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 8157 vd = spa->spa_l2cache.sav_vdevs[i]; 8158 if (vd->vdev_guid == guid) 8159 return (vd); 8160 } 8161 8162 for (i = 0; i < spa->spa_spares.sav_count; i++) { 8163 vd = spa->spa_spares.sav_vdevs[i]; 8164 if (vd->vdev_guid == guid) 8165 return (vd); 8166 } 8167 } 8168 8169 return (NULL); 8170} 8171 8172void 8173spa_upgrade(spa_t *spa, uint64_t version) 8174{ 8175 ASSERT(spa_writeable(spa)); 8176 8177 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 8178 8179 /* 8180 * This should only be called for a non-faulted pool, and since a 8181 * future version would result in an unopenable pool, this shouldn't be 8182 * possible. 8183 */ 8184 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 8185 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 8186 8187 spa->spa_uberblock.ub_version = version; 8188 vdev_config_dirty(spa->spa_root_vdev); 8189 8190 spa_config_exit(spa, SCL_ALL, FTAG); 8191 8192 txg_wait_synced(spa_get_dsl(spa), 0); 8193} 8194 8195boolean_t 8196spa_has_spare(spa_t *spa, uint64_t guid) 8197{ 8198 int i; 8199 uint64_t spareguid; 8200 spa_aux_vdev_t *sav = &spa->spa_spares; 8201 8202 for (i = 0; i < sav->sav_count; i++) 8203 if (sav->sav_vdevs[i]->vdev_guid == guid) 8204 return (B_TRUE); 8205 8206 for (i = 0; i < sav->sav_npending; i++) { 8207 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 8208 &spareguid) == 0 && spareguid == guid) 8209 return (B_TRUE); 8210 } 8211 8212 return (B_FALSE); 8213} 8214 8215/* 8216 * Check if a pool has an active shared spare device. 8217 * Note: reference count of an active spare is 2, as a spare and as a replace 8218 */ 8219static boolean_t 8220spa_has_active_shared_spare(spa_t *spa) 8221{ 8222 int i, refcnt; 8223 uint64_t pool; 8224 spa_aux_vdev_t *sav = &spa->spa_spares; 8225 8226 for (i = 0; i < sav->sav_count; i++) { 8227 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 8228 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 8229 refcnt > 2) 8230 return (B_TRUE); 8231 } 8232 8233 return (B_FALSE); 8234} 8235 8236sysevent_t * 8237spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 8238{ 8239 sysevent_t *ev = NULL; 8240#ifdef _KERNEL 8241 sysevent_attr_list_t *attr = NULL; 8242 sysevent_value_t value; 8243 8244 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 8245 SE_SLEEP); 8246 ASSERT(ev != NULL); 8247 8248 value.value_type = SE_DATA_TYPE_STRING; 8249 value.value.sv_string = spa_name(spa); 8250 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 8251 goto done; 8252 8253 value.value_type = SE_DATA_TYPE_UINT64; 8254 value.value.sv_uint64 = spa_guid(spa); 8255 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 8256 goto done; 8257 8258 if (vd) { 8259 value.value_type = SE_DATA_TYPE_UINT64; 8260 value.value.sv_uint64 = vd->vdev_guid; 8261 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 8262 SE_SLEEP) != 0) 8263 goto done; 8264 8265 if (vd->vdev_path) { 8266 value.value_type = SE_DATA_TYPE_STRING; 8267 value.value.sv_string = vd->vdev_path; 8268 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 8269 &value, SE_SLEEP) != 0) 8270 goto done; 8271 } 8272 } 8273 8274 if (hist_nvl != NULL) { 8275 fnvlist_merge((nvlist_t *)attr, hist_nvl); 8276 } 8277 8278 if (sysevent_attach_attributes(ev, attr) != 0) 8279 goto done; 8280 attr = NULL; 8281 8282done: 8283 if (attr) 8284 sysevent_free_attr(attr); 8285 8286#endif 8287 return (ev); 8288} 8289 8290void 8291spa_event_post(sysevent_t *ev) 8292{ 8293#ifdef _KERNEL 8294 sysevent_id_t eid; 8295 8296 (void) log_sysevent(ev, SE_SLEEP, &eid); 8297 sysevent_free(ev); 8298#endif 8299} 8300 8301void 8302spa_event_discard(sysevent_t *ev) 8303{ 8304#ifdef _KERNEL 8305 sysevent_free(ev); 8306#endif 8307} 8308 8309/* 8310 * Post a sysevent corresponding to the given event. The 'name' must be one of 8311 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 8312 * filled in from the spa and (optionally) the vdev and history nvl. This 8313 * doesn't do anything in the userland libzpool, as we don't want consumers to 8314 * misinterpret ztest or zdb as real changes. 8315 */ 8316void 8317spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 8318{ 8319 spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 8320} 8321