spa.c revision 321567
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd 22168404Spjd/* 23219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24321567Smav * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25287745Sdelphij * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26247265Smm * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27286575Smav * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28289422Smav * Copyright 2013 Saso Kiselkov. All rights reserved. 29296519Smav * Copyright (c) 2014 Integros [integros.com] 30168404Spjd */ 31168404Spjd 32168404Spjd/* 33251629Sdelphij * SPA: Storage Pool Allocator 34251629Sdelphij * 35168404Spjd * This file contains all the routines used when modifying on-disk SPA state. 36168404Spjd * This includes opening, importing, destroying, exporting a pool, and syncing a 37168404Spjd * pool. 38168404Spjd */ 39168404Spjd 40168404Spjd#include <sys/zfs_context.h> 41168404Spjd#include <sys/fm/fs/zfs.h> 42168404Spjd#include <sys/spa_impl.h> 43168404Spjd#include <sys/zio.h> 44168404Spjd#include <sys/zio_checksum.h> 45168404Spjd#include <sys/dmu.h> 46168404Spjd#include <sys/dmu_tx.h> 47168404Spjd#include <sys/zap.h> 48168404Spjd#include <sys/zil.h> 49219089Spjd#include <sys/ddt.h> 50168404Spjd#include <sys/vdev_impl.h> 51168404Spjd#include <sys/metaslab.h> 52219089Spjd#include <sys/metaslab_impl.h> 53168404Spjd#include <sys/uberblock_impl.h> 54168404Spjd#include <sys/txg.h> 55168404Spjd#include <sys/avl.h> 56168404Spjd#include <sys/dmu_traverse.h> 57168404Spjd#include <sys/dmu_objset.h> 58168404Spjd#include <sys/unique.h> 59168404Spjd#include <sys/dsl_pool.h> 60168404Spjd#include <sys/dsl_dataset.h> 61168404Spjd#include <sys/dsl_dir.h> 62168404Spjd#include <sys/dsl_prop.h> 63168404Spjd#include <sys/dsl_synctask.h> 64168404Spjd#include <sys/fs/zfs.h> 65185029Spjd#include <sys/arc.h> 66168404Spjd#include <sys/callb.h> 67185029Spjd#include <sys/spa_boot.h> 68219089Spjd#include <sys/zfs_ioctl.h> 69219089Spjd#include <sys/dsl_scan.h> 70248571Smm#include <sys/dmu_send.h> 71248571Smm#include <sys/dsl_destroy.h> 72248571Smm#include <sys/dsl_userhold.h> 73236884Smm#include <sys/zfeature.h> 74219089Spjd#include <sys/zvol.h> 75240868Spjd#include <sys/trim_map.h> 76168404Spjd 77219089Spjd#ifdef _KERNEL 78219089Spjd#include <sys/callb.h> 79219089Spjd#include <sys/cpupart.h> 80219089Spjd#include <sys/zone.h> 81219089Spjd#endif /* _KERNEL */ 82219089Spjd 83185029Spjd#include "zfs_prop.h" 84185029Spjd#include "zfs_comutil.h" 85168404Spjd 86204073Spjd/* Check hostid on import? */ 87204073Spjdstatic int check_hostid = 1; 88204073Spjd 89251636Sdelphij/* 90251636Sdelphij * The interval, in seconds, at which failed configuration cache file writes 91251636Sdelphij * should be retried. 92251636Sdelphij */ 93251636Sdelphijstatic int zfs_ccw_retry_interval = 300; 94251636Sdelphij 95271785SwillSYSCTL_DECL(_vfs_zfs); 96271785SwillSYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0, 97271785Swill "Check hostid on import?"); 98271785SwillTUNABLE_INT("vfs.zfs.ccw_retry_interval", &zfs_ccw_retry_interval); 99271785SwillSYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RW, 100271785Swill &zfs_ccw_retry_interval, 0, 101271785Swill "Configuration cache file write, retry after failure, interval (seconds)"); 102271785Swill 103219089Spjdtypedef enum zti_modes { 104258631Savg ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 105258631Savg ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 106258631Savg ZTI_MODE_NULL, /* don't create a taskq */ 107258631Savg ZTI_NMODES 108219089Spjd} zti_modes_t; 109168712Spjd 110258631Savg#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 111258631Savg#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 112258631Savg#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 113209962Smm 114258631Savg#define ZTI_N(n) ZTI_P(n, 1) 115258631Savg#define ZTI_ONE ZTI_N(1) 116209962Smm 117209962Smmtypedef struct zio_taskq_info { 118258631Savg zti_modes_t zti_mode; 119211931Smm uint_t zti_value; 120258631Savg uint_t zti_count; 121209962Smm} zio_taskq_info_t; 122209962Smm 123209962Smmstatic const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 124219089Spjd "issue", "issue_high", "intr", "intr_high" 125209962Smm}; 126209962Smm 127211931Smm/* 128258631Savg * This table defines the taskq settings for each ZFS I/O type. When 129258631Savg * initializing a pool, we use this table to create an appropriately sized 130258631Savg * taskq. Some operations are low volume and therefore have a small, static 131258631Savg * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 132258631Savg * macros. Other operations process a large amount of data; the ZTI_BATCH 133258631Savg * macro causes us to create a taskq oriented for throughput. Some operations 134258631Savg * are so high frequency and short-lived that the taskq itself can become a a 135258631Savg * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 136258631Savg * additional degree of parallelism specified by the number of threads per- 137258631Savg * taskq and the number of taskqs; when dispatching an event in this case, the 138258631Savg * particular taskq is chosen at random. 139258631Savg * 140258631Savg * The different taskq priorities are to handle the different contexts (issue 141258631Savg * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 142258631Savg * need to be handled with minimum delay. 143211931Smm */ 144211931Smmconst zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 145211931Smm /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 146258631Savg { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 147264670Sdelphij { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 148258631Savg { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 149258631Savg { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 150258631Savg { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 151258631Savg { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 152209962Smm}; 153209962Smm 154307113Smavstatic sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, const char *name); 155307113Smavstatic void spa_event_post(sysevent_t *ev); 156248571Smmstatic void spa_sync_version(void *arg, dmu_tx_t *tx); 157248571Smmstatic void spa_sync_props(void *arg, dmu_tx_t *tx); 158185029Spjdstatic boolean_t spa_has_active_shared_spare(spa_t *spa); 159219089Spjdstatic int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 160219089Spjd spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 161219089Spjd char **ereport); 162219089Spjdstatic void spa_vdev_resilver_done(spa_t *spa); 163185029Spjd 164258632Savguint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 165219089Spjd#ifdef PSRSET_BIND 166219089Spjdid_t zio_taskq_psrset_bind = PS_NONE; 167219089Spjd#endif 168219089Spjd#ifdef SYSDC 169219089Spjdboolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 170314355Savguint_t zio_taskq_basedc = 80; /* base duty cycle */ 171219089Spjd#endif 172219089Spjd 173219089Spjdboolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 174243503Smmextern int zfs_sync_pass_deferred_free; 175219089Spjd 176168404Spjd/* 177219089Spjd * This (illegal) pool name is used when temporarily importing a spa_t in order 178219089Spjd * to get the vdev stats associated with the imported devices. 179219089Spjd */ 180219089Spjd#define TRYIMPORT_NAME "$import" 181219089Spjd 182219089Spjd/* 183168404Spjd * ========================================================================== 184185029Spjd * SPA properties routines 185185029Spjd * ========================================================================== 186185029Spjd */ 187185029Spjd 188185029Spjd/* 189185029Spjd * Add a (source=src, propname=propval) list to an nvlist. 190185029Spjd */ 191185029Spjdstatic void 192185029Spjdspa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 193185029Spjd uint64_t intval, zprop_source_t src) 194185029Spjd{ 195185029Spjd const char *propname = zpool_prop_to_name(prop); 196185029Spjd nvlist_t *propval; 197185029Spjd 198185029Spjd VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 199185029Spjd VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 200185029Spjd 201185029Spjd if (strval != NULL) 202185029Spjd VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 203185029Spjd else 204185029Spjd VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 205185029Spjd 206185029Spjd VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 207185029Spjd nvlist_free(propval); 208185029Spjd} 209185029Spjd 210185029Spjd/* 211185029Spjd * Get property values from the spa configuration. 212185029Spjd */ 213185029Spjdstatic void 214185029Spjdspa_prop_get_config(spa_t *spa, nvlist_t **nvp) 215185029Spjd{ 216236155Smm vdev_t *rvd = spa->spa_root_vdev; 217236884Smm dsl_pool_t *pool = spa->spa_dsl_pool; 218269118Sdelphij uint64_t size, alloc, cap, version; 219185029Spjd zprop_source_t src = ZPROP_SRC_NONE; 220185029Spjd spa_config_dirent_t *dp; 221269118Sdelphij metaslab_class_t *mc = spa_normal_class(spa); 222185029Spjd 223185029Spjd ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 224185029Spjd 225236155Smm if (rvd != NULL) { 226219089Spjd alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 227219089Spjd size = metaslab_class_get_space(spa_normal_class(spa)); 228209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 229209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 230219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 231219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 232219089Spjd size - alloc, src); 233236155Smm 234269118Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 235269118Sdelphij metaslab_class_fragmentation(mc), src); 236269118Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 237269118Sdelphij metaslab_class_expandable_space(mc), src); 238219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 239219089Spjd (spa_mode(spa) == FREAD), src); 240185029Spjd 241219089Spjd cap = (size == 0) ? 0 : (alloc * 100 / size); 242209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 243185029Spjd 244219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 245219089Spjd ddt_get_pool_dedup_ratio(spa), src); 246219089Spjd 247209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 248236155Smm rvd->vdev_state, src); 249209962Smm 250209962Smm version = spa_version(spa); 251209962Smm if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 252209962Smm src = ZPROP_SRC_DEFAULT; 253209962Smm else 254209962Smm src = ZPROP_SRC_LOCAL; 255209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 256209962Smm } 257209962Smm 258236884Smm if (pool != NULL) { 259236884Smm /* 260236884Smm * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 261236884Smm * when opening pools before this version freedir will be NULL. 262236884Smm */ 263268079Sdelphij if (pool->dp_free_dir != NULL) { 264236884Smm spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 265275782Sdelphij dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 266275782Sdelphij src); 267236884Smm } else { 268236884Smm spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 269236884Smm NULL, 0, src); 270236884Smm } 271268079Sdelphij 272268079Sdelphij if (pool->dp_leak_dir != NULL) { 273268079Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 274275782Sdelphij dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 275275782Sdelphij src); 276268079Sdelphij } else { 277268079Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 278268079Sdelphij NULL, 0, src); 279268079Sdelphij } 280236884Smm } 281236884Smm 282185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 283185029Spjd 284228103Smm if (spa->spa_comment != NULL) { 285228103Smm spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 286228103Smm 0, ZPROP_SRC_LOCAL); 287228103Smm } 288228103Smm 289185029Spjd if (spa->spa_root != NULL) 290185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 291185029Spjd 0, ZPROP_SRC_LOCAL); 292185029Spjd 293274337Sdelphij if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 294274337Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 295274337Sdelphij MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 296274337Sdelphij } else { 297274337Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 298274337Sdelphij SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 299274337Sdelphij } 300274337Sdelphij 301185029Spjd if ((dp = list_head(&spa->spa_config_list)) != NULL) { 302185029Spjd if (dp->scd_path == NULL) { 303185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 304185029Spjd "none", 0, ZPROP_SRC_LOCAL); 305185029Spjd } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 306185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 307185029Spjd dp->scd_path, 0, ZPROP_SRC_LOCAL); 308185029Spjd } 309185029Spjd } 310185029Spjd} 311185029Spjd 312185029Spjd/* 313185029Spjd * Get zpool property values. 314185029Spjd */ 315185029Spjdint 316185029Spjdspa_prop_get(spa_t *spa, nvlist_t **nvp) 317185029Spjd{ 318219089Spjd objset_t *mos = spa->spa_meta_objset; 319185029Spjd zap_cursor_t zc; 320185029Spjd zap_attribute_t za; 321185029Spjd int err; 322185029Spjd 323185029Spjd VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 324185029Spjd 325185029Spjd mutex_enter(&spa->spa_props_lock); 326185029Spjd 327185029Spjd /* 328185029Spjd * Get properties from the spa config. 329185029Spjd */ 330185029Spjd spa_prop_get_config(spa, nvp); 331185029Spjd 332185029Spjd /* If no pool property object, no more prop to get. */ 333219089Spjd if (mos == NULL || spa->spa_pool_props_object == 0) { 334185029Spjd mutex_exit(&spa->spa_props_lock); 335185029Spjd return (0); 336185029Spjd } 337185029Spjd 338185029Spjd /* 339185029Spjd * Get properties from the MOS pool property object. 340185029Spjd */ 341185029Spjd for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 342185029Spjd (err = zap_cursor_retrieve(&zc, &za)) == 0; 343185029Spjd zap_cursor_advance(&zc)) { 344185029Spjd uint64_t intval = 0; 345185029Spjd char *strval = NULL; 346185029Spjd zprop_source_t src = ZPROP_SRC_DEFAULT; 347185029Spjd zpool_prop_t prop; 348185029Spjd 349185029Spjd if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 350185029Spjd continue; 351185029Spjd 352185029Spjd switch (za.za_integer_length) { 353185029Spjd case 8: 354185029Spjd /* integer property */ 355185029Spjd if (za.za_first_integer != 356185029Spjd zpool_prop_default_numeric(prop)) 357185029Spjd src = ZPROP_SRC_LOCAL; 358185029Spjd 359185029Spjd if (prop == ZPOOL_PROP_BOOTFS) { 360185029Spjd dsl_pool_t *dp; 361185029Spjd dsl_dataset_t *ds = NULL; 362185029Spjd 363185029Spjd dp = spa_get_dsl(spa); 364248571Smm dsl_pool_config_enter(dp, FTAG); 365185029Spjd if (err = dsl_dataset_hold_obj(dp, 366185029Spjd za.za_first_integer, FTAG, &ds)) { 367248571Smm dsl_pool_config_exit(dp, FTAG); 368185029Spjd break; 369185029Spjd } 370185029Spjd 371307108Smav strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 372185029Spjd KM_SLEEP); 373185029Spjd dsl_dataset_name(ds, strval); 374185029Spjd dsl_dataset_rele(ds, FTAG); 375248571Smm dsl_pool_config_exit(dp, FTAG); 376185029Spjd } else { 377185029Spjd strval = NULL; 378185029Spjd intval = za.za_first_integer; 379185029Spjd } 380185029Spjd 381185029Spjd spa_prop_add_list(*nvp, prop, strval, intval, src); 382185029Spjd 383185029Spjd if (strval != NULL) 384307108Smav kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 385185029Spjd 386185029Spjd break; 387185029Spjd 388185029Spjd case 1: 389185029Spjd /* string property */ 390185029Spjd strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 391185029Spjd err = zap_lookup(mos, spa->spa_pool_props_object, 392185029Spjd za.za_name, 1, za.za_num_integers, strval); 393185029Spjd if (err) { 394185029Spjd kmem_free(strval, za.za_num_integers); 395185029Spjd break; 396185029Spjd } 397185029Spjd spa_prop_add_list(*nvp, prop, strval, 0, src); 398185029Spjd kmem_free(strval, za.za_num_integers); 399185029Spjd break; 400185029Spjd 401185029Spjd default: 402185029Spjd break; 403185029Spjd } 404185029Spjd } 405185029Spjd zap_cursor_fini(&zc); 406185029Spjd mutex_exit(&spa->spa_props_lock); 407185029Spjdout: 408185029Spjd if (err && err != ENOENT) { 409185029Spjd nvlist_free(*nvp); 410185029Spjd *nvp = NULL; 411185029Spjd return (err); 412185029Spjd } 413185029Spjd 414185029Spjd return (0); 415185029Spjd} 416185029Spjd 417185029Spjd/* 418185029Spjd * Validate the given pool properties nvlist and modify the list 419185029Spjd * for the property values to be set. 420185029Spjd */ 421185029Spjdstatic int 422185029Spjdspa_prop_validate(spa_t *spa, nvlist_t *props) 423185029Spjd{ 424185029Spjd nvpair_t *elem; 425185029Spjd int error = 0, reset_bootfs = 0; 426247187Smm uint64_t objnum = 0; 427236884Smm boolean_t has_feature = B_FALSE; 428185029Spjd 429185029Spjd elem = NULL; 430185029Spjd while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 431185029Spjd uint64_t intval; 432236884Smm char *strval, *slash, *check, *fname; 433236884Smm const char *propname = nvpair_name(elem); 434236884Smm zpool_prop_t prop = zpool_name_to_prop(propname); 435185029Spjd 436236884Smm switch (prop) { 437236884Smm case ZPROP_INVAL: 438236884Smm if (!zpool_prop_feature(propname)) { 439249195Smm error = SET_ERROR(EINVAL); 440236884Smm break; 441236884Smm } 442185029Spjd 443236884Smm /* 444236884Smm * Sanitize the input. 445236884Smm */ 446236884Smm if (nvpair_type(elem) != DATA_TYPE_UINT64) { 447249195Smm error = SET_ERROR(EINVAL); 448236884Smm break; 449236884Smm } 450185029Spjd 451236884Smm if (nvpair_value_uint64(elem, &intval) != 0) { 452249195Smm error = SET_ERROR(EINVAL); 453236884Smm break; 454236884Smm } 455236884Smm 456236884Smm if (intval != 0) { 457249195Smm error = SET_ERROR(EINVAL); 458236884Smm break; 459236884Smm } 460236884Smm 461236884Smm fname = strchr(propname, '@') + 1; 462236884Smm if (zfeature_lookup_name(fname, NULL) != 0) { 463249195Smm error = SET_ERROR(EINVAL); 464236884Smm break; 465236884Smm } 466236884Smm 467236884Smm has_feature = B_TRUE; 468236884Smm break; 469236884Smm 470185029Spjd case ZPOOL_PROP_VERSION: 471185029Spjd error = nvpair_value_uint64(elem, &intval); 472185029Spjd if (!error && 473236884Smm (intval < spa_version(spa) || 474236884Smm intval > SPA_VERSION_BEFORE_FEATURES || 475236884Smm has_feature)) 476249195Smm error = SET_ERROR(EINVAL); 477185029Spjd break; 478185029Spjd 479185029Spjd case ZPOOL_PROP_DELEGATION: 480185029Spjd case ZPOOL_PROP_AUTOREPLACE: 481185029Spjd case ZPOOL_PROP_LISTSNAPS: 482219089Spjd case ZPOOL_PROP_AUTOEXPAND: 483185029Spjd error = nvpair_value_uint64(elem, &intval); 484185029Spjd if (!error && intval > 1) 485249195Smm error = SET_ERROR(EINVAL); 486185029Spjd break; 487185029Spjd 488185029Spjd case ZPOOL_PROP_BOOTFS: 489209962Smm /* 490209962Smm * If the pool version is less than SPA_VERSION_BOOTFS, 491209962Smm * or the pool is still being created (version == 0), 492209962Smm * the bootfs property cannot be set. 493209962Smm */ 494185029Spjd if (spa_version(spa) < SPA_VERSION_BOOTFS) { 495249195Smm error = SET_ERROR(ENOTSUP); 496185029Spjd break; 497185029Spjd } 498185029Spjd 499185029Spjd /* 500185029Spjd * Make sure the vdev config is bootable 501185029Spjd */ 502185029Spjd if (!vdev_is_bootable(spa->spa_root_vdev)) { 503249195Smm error = SET_ERROR(ENOTSUP); 504185029Spjd break; 505185029Spjd } 506185029Spjd 507185029Spjd reset_bootfs = 1; 508185029Spjd 509185029Spjd error = nvpair_value_string(elem, &strval); 510185029Spjd 511185029Spjd if (!error) { 512236884Smm objset_t *os; 513274337Sdelphij uint64_t propval; 514185029Spjd 515185029Spjd if (strval == NULL || strval[0] == '\0') { 516185029Spjd objnum = zpool_prop_default_numeric( 517185029Spjd ZPOOL_PROP_BOOTFS); 518185029Spjd break; 519185029Spjd } 520185029Spjd 521219089Spjd if (error = dmu_objset_hold(strval, FTAG, &os)) 522185029Spjd break; 523185029Spjd 524274337Sdelphij /* 525274337Sdelphij * Must be ZPL, and its property settings 526274337Sdelphij * must be supported by GRUB (compression 527274337Sdelphij * is not gzip, and large blocks are not used). 528274337Sdelphij */ 529219089Spjd 530219089Spjd if (dmu_objset_type(os) != DMU_OST_ZFS) { 531249195Smm error = SET_ERROR(ENOTSUP); 532248571Smm } else if ((error = 533248571Smm dsl_prop_get_int_ds(dmu_objset_ds(os), 534185029Spjd zfs_prop_to_name(ZFS_PROP_COMPRESSION), 535274337Sdelphij &propval)) == 0 && 536274337Sdelphij !BOOTFS_COMPRESS_VALID(propval)) { 537249195Smm error = SET_ERROR(ENOTSUP); 538274337Sdelphij } else if ((error = 539274337Sdelphij dsl_prop_get_int_ds(dmu_objset_ds(os), 540274337Sdelphij zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 541274337Sdelphij &propval)) == 0 && 542274337Sdelphij propval > SPA_OLD_MAXBLOCKSIZE) { 543274337Sdelphij error = SET_ERROR(ENOTSUP); 544185029Spjd } else { 545185029Spjd objnum = dmu_objset_id(os); 546185029Spjd } 547219089Spjd dmu_objset_rele(os, FTAG); 548185029Spjd } 549185029Spjd break; 550185029Spjd 551185029Spjd case ZPOOL_PROP_FAILUREMODE: 552185029Spjd error = nvpair_value_uint64(elem, &intval); 553185029Spjd if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 554185029Spjd intval > ZIO_FAILURE_MODE_PANIC)) 555249195Smm error = SET_ERROR(EINVAL); 556185029Spjd 557185029Spjd /* 558185029Spjd * This is a special case which only occurs when 559185029Spjd * the pool has completely failed. This allows 560185029Spjd * the user to change the in-core failmode property 561185029Spjd * without syncing it out to disk (I/Os might 562185029Spjd * currently be blocked). We do this by returning 563185029Spjd * EIO to the caller (spa_prop_set) to trick it 564185029Spjd * into thinking we encountered a property validation 565185029Spjd * error. 566185029Spjd */ 567185029Spjd if (!error && spa_suspended(spa)) { 568185029Spjd spa->spa_failmode = intval; 569249195Smm error = SET_ERROR(EIO); 570185029Spjd } 571185029Spjd break; 572185029Spjd 573185029Spjd case ZPOOL_PROP_CACHEFILE: 574185029Spjd if ((error = nvpair_value_string(elem, &strval)) != 0) 575185029Spjd break; 576185029Spjd 577185029Spjd if (strval[0] == '\0') 578185029Spjd break; 579185029Spjd 580185029Spjd if (strcmp(strval, "none") == 0) 581185029Spjd break; 582185029Spjd 583185029Spjd if (strval[0] != '/') { 584249195Smm error = SET_ERROR(EINVAL); 585185029Spjd break; 586185029Spjd } 587185029Spjd 588185029Spjd slash = strrchr(strval, '/'); 589185029Spjd ASSERT(slash != NULL); 590185029Spjd 591185029Spjd if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 592185029Spjd strcmp(slash, "/..") == 0) 593249195Smm error = SET_ERROR(EINVAL); 594185029Spjd break; 595219089Spjd 596228103Smm case ZPOOL_PROP_COMMENT: 597228103Smm if ((error = nvpair_value_string(elem, &strval)) != 0) 598228103Smm break; 599228103Smm for (check = strval; *check != '\0'; check++) { 600228103Smm /* 601228103Smm * The kernel doesn't have an easy isprint() 602228103Smm * check. For this kernel check, we merely 603228103Smm * check ASCII apart from DEL. Fix this if 604228103Smm * there is an easy-to-use kernel isprint(). 605228103Smm */ 606228103Smm if (*check >= 0x7f) { 607249195Smm error = SET_ERROR(EINVAL); 608228103Smm break; 609228103Smm } 610228103Smm } 611228103Smm if (strlen(strval) > ZPROP_MAX_COMMENT) 612228103Smm error = E2BIG; 613228103Smm break; 614228103Smm 615219089Spjd case ZPOOL_PROP_DEDUPDITTO: 616219089Spjd if (spa_version(spa) < SPA_VERSION_DEDUP) 617249195Smm error = SET_ERROR(ENOTSUP); 618219089Spjd else 619219089Spjd error = nvpair_value_uint64(elem, &intval); 620219089Spjd if (error == 0 && 621219089Spjd intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 622249195Smm error = SET_ERROR(EINVAL); 623219089Spjd break; 624185029Spjd } 625185029Spjd 626185029Spjd if (error) 627185029Spjd break; 628185029Spjd } 629185029Spjd 630185029Spjd if (!error && reset_bootfs) { 631185029Spjd error = nvlist_remove(props, 632185029Spjd zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 633185029Spjd 634185029Spjd if (!error) { 635185029Spjd error = nvlist_add_uint64(props, 636185029Spjd zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 637185029Spjd } 638185029Spjd } 639185029Spjd 640185029Spjd return (error); 641185029Spjd} 642185029Spjd 643209962Smmvoid 644209962Smmspa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 645209962Smm{ 646209962Smm char *cachefile; 647209962Smm spa_config_dirent_t *dp; 648209962Smm 649209962Smm if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 650209962Smm &cachefile) != 0) 651209962Smm return; 652209962Smm 653209962Smm dp = kmem_alloc(sizeof (spa_config_dirent_t), 654209962Smm KM_SLEEP); 655209962Smm 656209962Smm if (cachefile[0] == '\0') 657209962Smm dp->scd_path = spa_strdup(spa_config_path); 658209962Smm else if (strcmp(cachefile, "none") == 0) 659209962Smm dp->scd_path = NULL; 660209962Smm else 661209962Smm dp->scd_path = spa_strdup(cachefile); 662209962Smm 663209962Smm list_insert_head(&spa->spa_config_list, dp); 664209962Smm if (need_sync) 665209962Smm spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 666209962Smm} 667209962Smm 668185029Spjdint 669185029Spjdspa_prop_set(spa_t *spa, nvlist_t *nvp) 670185029Spjd{ 671185029Spjd int error; 672236884Smm nvpair_t *elem = NULL; 673209962Smm boolean_t need_sync = B_FALSE; 674185029Spjd 675185029Spjd if ((error = spa_prop_validate(spa, nvp)) != 0) 676185029Spjd return (error); 677185029Spjd 678209962Smm while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 679236884Smm zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 680209962Smm 681219089Spjd if (prop == ZPOOL_PROP_CACHEFILE || 682219089Spjd prop == ZPOOL_PROP_ALTROOT || 683219089Spjd prop == ZPOOL_PROP_READONLY) 684209962Smm continue; 685209962Smm 686236884Smm if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 687236884Smm uint64_t ver; 688236884Smm 689236884Smm if (prop == ZPOOL_PROP_VERSION) { 690236884Smm VERIFY(nvpair_value_uint64(elem, &ver) == 0); 691236884Smm } else { 692236884Smm ASSERT(zpool_prop_feature(nvpair_name(elem))); 693236884Smm ver = SPA_VERSION_FEATURES; 694236884Smm need_sync = B_TRUE; 695236884Smm } 696236884Smm 697236884Smm /* Save time if the version is already set. */ 698236884Smm if (ver == spa_version(spa)) 699236884Smm continue; 700236884Smm 701236884Smm /* 702236884Smm * In addition to the pool directory object, we might 703236884Smm * create the pool properties object, the features for 704236884Smm * read object, the features for write object, or the 705236884Smm * feature descriptions object. 706236884Smm */ 707248571Smm error = dsl_sync_task(spa->spa_name, NULL, 708268473Sdelphij spa_sync_version, &ver, 709268473Sdelphij 6, ZFS_SPACE_CHECK_RESERVED); 710236884Smm if (error) 711236884Smm return (error); 712236884Smm continue; 713236884Smm } 714236884Smm 715209962Smm need_sync = B_TRUE; 716209962Smm break; 717209962Smm } 718209962Smm 719236884Smm if (need_sync) { 720248571Smm return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 721268473Sdelphij nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 722236884Smm } 723236884Smm 724236884Smm return (0); 725185029Spjd} 726185029Spjd 727185029Spjd/* 728185029Spjd * If the bootfs property value is dsobj, clear it. 729185029Spjd */ 730185029Spjdvoid 731185029Spjdspa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 732185029Spjd{ 733185029Spjd if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 734185029Spjd VERIFY(zap_remove(spa->spa_meta_objset, 735185029Spjd spa->spa_pool_props_object, 736185029Spjd zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 737185029Spjd spa->spa_bootfs = 0; 738185029Spjd } 739185029Spjd} 740185029Spjd 741239620Smm/*ARGSUSED*/ 742239620Smmstatic int 743248571Smmspa_change_guid_check(void *arg, dmu_tx_t *tx) 744239620Smm{ 745248571Smm uint64_t *newguid = arg; 746248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 747239620Smm vdev_t *rvd = spa->spa_root_vdev; 748239620Smm uint64_t vdev_state; 749239620Smm 750239620Smm spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 751239620Smm vdev_state = rvd->vdev_state; 752239620Smm spa_config_exit(spa, SCL_STATE, FTAG); 753239620Smm 754239620Smm if (vdev_state != VDEV_STATE_HEALTHY) 755249195Smm return (SET_ERROR(ENXIO)); 756239620Smm 757239620Smm ASSERT3U(spa_guid(spa), !=, *newguid); 758239620Smm 759239620Smm return (0); 760239620Smm} 761239620Smm 762239620Smmstatic void 763248571Smmspa_change_guid_sync(void *arg, dmu_tx_t *tx) 764239620Smm{ 765248571Smm uint64_t *newguid = arg; 766248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 767239620Smm uint64_t oldguid; 768239620Smm vdev_t *rvd = spa->spa_root_vdev; 769239620Smm 770239620Smm oldguid = spa_guid(spa); 771239620Smm 772239620Smm spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 773239620Smm rvd->vdev_guid = *newguid; 774239620Smm rvd->vdev_guid_sum += (*newguid - oldguid); 775239620Smm vdev_config_dirty(rvd); 776239620Smm spa_config_exit(spa, SCL_STATE, FTAG); 777239620Smm 778248571Smm spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 779239620Smm oldguid, *newguid); 780239620Smm} 781239620Smm 782185029Spjd/* 783228103Smm * Change the GUID for the pool. This is done so that we can later 784228103Smm * re-import a pool built from a clone of our own vdevs. We will modify 785228103Smm * the root vdev's guid, our own pool guid, and then mark all of our 786228103Smm * vdevs dirty. Note that we must make sure that all our vdevs are 787228103Smm * online when we do this, or else any vdevs that weren't present 788228103Smm * would be orphaned from our pool. We are also going to issue a 789228103Smm * sysevent to update any watchers. 790228103Smm */ 791228103Smmint 792228103Smmspa_change_guid(spa_t *spa) 793228103Smm{ 794239620Smm int error; 795239620Smm uint64_t guid; 796228103Smm 797254074Sdelphij mutex_enter(&spa->spa_vdev_top_lock); 798239620Smm mutex_enter(&spa_namespace_lock); 799239620Smm guid = spa_generate_guid(NULL); 800228103Smm 801248571Smm error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 802268473Sdelphij spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 803228103Smm 804239620Smm if (error == 0) { 805239620Smm spa_config_sync(spa, B_FALSE, B_TRUE); 806239620Smm spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 807239620Smm } 808228103Smm 809239620Smm mutex_exit(&spa_namespace_lock); 810254074Sdelphij mutex_exit(&spa->spa_vdev_top_lock); 811228103Smm 812239620Smm return (error); 813228103Smm} 814228103Smm 815228103Smm/* 816185029Spjd * ========================================================================== 817168404Spjd * SPA state manipulation (open/create/destroy/import/export) 818168404Spjd * ========================================================================== 819168404Spjd */ 820168404Spjd 821168404Spjdstatic int 822168404Spjdspa_error_entry_compare(const void *a, const void *b) 823168404Spjd{ 824168404Spjd spa_error_entry_t *sa = (spa_error_entry_t *)a; 825168404Spjd spa_error_entry_t *sb = (spa_error_entry_t *)b; 826168404Spjd int ret; 827168404Spjd 828168404Spjd ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 829268123Sdelphij sizeof (zbookmark_phys_t)); 830168404Spjd 831168404Spjd if (ret < 0) 832168404Spjd return (-1); 833168404Spjd else if (ret > 0) 834168404Spjd return (1); 835168404Spjd else 836168404Spjd return (0); 837168404Spjd} 838168404Spjd 839168404Spjd/* 840168404Spjd * Utility function which retrieves copies of the current logs and 841168404Spjd * re-initializes them in the process. 842168404Spjd */ 843168404Spjdvoid 844168404Spjdspa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 845168404Spjd{ 846168404Spjd ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 847168404Spjd 848168404Spjd bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 849168404Spjd bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 850168404Spjd 851168404Spjd avl_create(&spa->spa_errlist_scrub, 852168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 853168404Spjd offsetof(spa_error_entry_t, se_avl)); 854168404Spjd avl_create(&spa->spa_errlist_last, 855168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 856168404Spjd offsetof(spa_error_entry_t, se_avl)); 857168404Spjd} 858168404Spjd 859258631Savgstatic void 860258631Savgspa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 861168404Spjd{ 862258631Savg const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 863258631Savg enum zti_modes mode = ztip->zti_mode; 864258631Savg uint_t value = ztip->zti_value; 865258631Savg uint_t count = ztip->zti_count; 866258631Savg spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 867258631Savg char name[32]; 868258630Savg uint_t flags = 0; 869219089Spjd boolean_t batch = B_FALSE; 870168404Spjd 871258631Savg if (mode == ZTI_MODE_NULL) { 872258631Savg tqs->stqs_count = 0; 873258631Savg tqs->stqs_taskq = NULL; 874258631Savg return; 875258631Savg } 876168404Spjd 877258631Savg ASSERT3U(count, >, 0); 878168404Spjd 879258631Savg tqs->stqs_count = count; 880258631Savg tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 881219089Spjd 882258632Savg switch (mode) { 883258632Savg case ZTI_MODE_FIXED: 884258632Savg ASSERT3U(value, >=, 1); 885258632Savg value = MAX(value, 1); 886258632Savg break; 887219089Spjd 888258632Savg case ZTI_MODE_BATCH: 889258632Savg batch = B_TRUE; 890258632Savg flags |= TASKQ_THREADS_CPU_PCT; 891258632Savg value = zio_taskq_batch_pct; 892258632Savg break; 893219089Spjd 894258632Savg default: 895258632Savg panic("unrecognized mode for %s_%s taskq (%u:%u) in " 896258632Savg "spa_activate()", 897258632Savg zio_type_name[t], zio_taskq_types[q], mode, value); 898258632Savg break; 899258632Savg } 900258631Savg 901258632Savg for (uint_t i = 0; i < count; i++) { 902258632Savg taskq_t *tq; 903258631Savg 904258631Savg if (count > 1) { 905258631Savg (void) snprintf(name, sizeof (name), "%s_%s_%u", 906258631Savg zio_type_name[t], zio_taskq_types[q], i); 907258631Savg } else { 908258631Savg (void) snprintf(name, sizeof (name), "%s_%s", 909258631Savg zio_type_name[t], zio_taskq_types[q]); 910258631Savg } 911258631Savg 912219089Spjd#ifdef SYSDC 913258631Savg if (zio_taskq_sysdc && spa->spa_proc != &p0) { 914258631Savg if (batch) 915258631Savg flags |= TASKQ_DC_BATCH; 916219089Spjd 917258631Savg tq = taskq_create_sysdc(name, value, 50, INT_MAX, 918258631Savg spa->spa_proc, zio_taskq_basedc, flags); 919258631Savg } else { 920258631Savg#endif 921258632Savg pri_t pri = maxclsyspri; 922258632Savg /* 923258632Savg * The write issue taskq can be extremely CPU 924258632Savg * intensive. Run it at slightly lower priority 925258632Savg * than the other taskqs. 926314858Savg * FreeBSD notes: 927314858Savg * - numerically higher priorities are lower priorities; 928314858Savg * - if priorities divided by four (RQ_PPQ) are equal 929314858Savg * then a difference between them is insignificant. 930258632Savg */ 931258632Savg if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 932314858Savg#ifdef illumos 933314858Savg pri--; 934314858Savg#else 935314858Savg pri += 4; 936314858Savg#endif 937258632Savg 938258632Savg tq = taskq_create_proc(name, value, pri, 50, 939258631Savg INT_MAX, spa->spa_proc, flags); 940258631Savg#ifdef SYSDC 941258631Savg } 942258631Savg#endif 943258631Savg 944258631Savg tqs->stqs_taskq[i] = tq; 945219089Spjd } 946219089Spjd} 947219089Spjd 948219089Spjdstatic void 949258631Savgspa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 950258631Savg{ 951258631Savg spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 952258631Savg 953258631Savg if (tqs->stqs_taskq == NULL) { 954258631Savg ASSERT0(tqs->stqs_count); 955258631Savg return; 956258631Savg } 957258631Savg 958258631Savg for (uint_t i = 0; i < tqs->stqs_count; i++) { 959258631Savg ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 960258631Savg taskq_destroy(tqs->stqs_taskq[i]); 961258631Savg } 962258631Savg 963258631Savg kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 964258631Savg tqs->stqs_taskq = NULL; 965258631Savg} 966258631Savg 967258631Savg/* 968258631Savg * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 969258631Savg * Note that a type may have multiple discrete taskqs to avoid lock contention 970258631Savg * on the taskq itself. In that case we choose which taskq at random by using 971258631Savg * the low bits of gethrtime(). 972258631Savg */ 973258631Savgvoid 974258631Savgspa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 975258631Savg task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 976258631Savg{ 977258631Savg spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 978258631Savg taskq_t *tq; 979258631Savg 980258631Savg ASSERT3P(tqs->stqs_taskq, !=, NULL); 981258631Savg ASSERT3U(tqs->stqs_count, !=, 0); 982258631Savg 983258631Savg if (tqs->stqs_count == 1) { 984258631Savg tq = tqs->stqs_taskq[0]; 985258631Savg } else { 986267038Sbdrewery#ifdef _KERNEL 987267029Smav tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count]; 988267038Sbdrewery#else 989267038Sbdrewery tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 990267038Sbdrewery#endif 991258631Savg } 992258631Savg 993258631Savg taskq_dispatch_ent(tq, func, arg, flags, ent); 994258631Savg} 995258631Savg 996258631Savgstatic void 997219089Spjdspa_create_zio_taskqs(spa_t *spa) 998219089Spjd{ 999185029Spjd for (int t = 0; t < ZIO_TYPES; t++) { 1000185029Spjd for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1001258631Savg spa_taskqs_init(spa, t, q); 1002219089Spjd } 1003219089Spjd } 1004219089Spjd} 1005209962Smm 1006219089Spjd#ifdef _KERNEL 1007219089Spjd#ifdef SPA_PROCESS 1008219089Spjdstatic void 1009219089Spjdspa_thread(void *arg) 1010219089Spjd{ 1011219089Spjd callb_cpr_t cprinfo; 1012209962Smm 1013219089Spjd spa_t *spa = arg; 1014219089Spjd user_t *pu = PTOU(curproc); 1015209962Smm 1016219089Spjd CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1017219089Spjd spa->spa_name); 1018209962Smm 1019219089Spjd ASSERT(curproc != &p0); 1020219089Spjd (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1021219089Spjd "zpool-%s", spa->spa_name); 1022219089Spjd (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1023211931Smm 1024219089Spjd#ifdef PSRSET_BIND 1025219089Spjd /* bind this thread to the requested psrset */ 1026219089Spjd if (zio_taskq_psrset_bind != PS_NONE) { 1027219089Spjd pool_lock(); 1028219089Spjd mutex_enter(&cpu_lock); 1029219089Spjd mutex_enter(&pidlock); 1030219089Spjd mutex_enter(&curproc->p_lock); 1031219089Spjd 1032219089Spjd if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1033219089Spjd 0, NULL, NULL) == 0) { 1034219089Spjd curthread->t_bind_pset = zio_taskq_psrset_bind; 1035219089Spjd } else { 1036219089Spjd cmn_err(CE_WARN, 1037219089Spjd "Couldn't bind process for zfs pool \"%s\" to " 1038219089Spjd "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1039219089Spjd } 1040219089Spjd 1041219089Spjd mutex_exit(&curproc->p_lock); 1042219089Spjd mutex_exit(&pidlock); 1043219089Spjd mutex_exit(&cpu_lock); 1044219089Spjd pool_unlock(); 1045219089Spjd } 1046219089Spjd#endif 1047219089Spjd 1048219089Spjd#ifdef SYSDC 1049219089Spjd if (zio_taskq_sysdc) { 1050219089Spjd sysdc_thread_enter(curthread, 100, 0); 1051219089Spjd } 1052219089Spjd#endif 1053219089Spjd 1054219089Spjd spa->spa_proc = curproc; 1055219089Spjd spa->spa_did = curthread->t_did; 1056219089Spjd 1057219089Spjd spa_create_zio_taskqs(spa); 1058219089Spjd 1059219089Spjd mutex_enter(&spa->spa_proc_lock); 1060219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1061219089Spjd 1062219089Spjd spa->spa_proc_state = SPA_PROC_ACTIVE; 1063219089Spjd cv_broadcast(&spa->spa_proc_cv); 1064219089Spjd 1065219089Spjd CALLB_CPR_SAFE_BEGIN(&cprinfo); 1066219089Spjd while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1067219089Spjd cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1068219089Spjd CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1069219089Spjd 1070219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1071219089Spjd spa->spa_proc_state = SPA_PROC_GONE; 1072219089Spjd spa->spa_proc = &p0; 1073219089Spjd cv_broadcast(&spa->spa_proc_cv); 1074219089Spjd CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1075219089Spjd 1076219089Spjd mutex_enter(&curproc->p_lock); 1077219089Spjd lwp_exit(); 1078219089Spjd} 1079219089Spjd#endif /* SPA_PROCESS */ 1080219089Spjd#endif 1081219089Spjd 1082219089Spjd/* 1083219089Spjd * Activate an uninitialized pool. 1084219089Spjd */ 1085219089Spjdstatic void 1086219089Spjdspa_activate(spa_t *spa, int mode) 1087219089Spjd{ 1088219089Spjd ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1089219089Spjd 1090219089Spjd spa->spa_state = POOL_STATE_ACTIVE; 1091219089Spjd spa->spa_mode = mode; 1092219089Spjd 1093219089Spjd spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1094219089Spjd spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1095219089Spjd 1096219089Spjd /* Try to create a covering process */ 1097219089Spjd mutex_enter(&spa->spa_proc_lock); 1098219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1099219089Spjd ASSERT(spa->spa_proc == &p0); 1100219089Spjd spa->spa_did = 0; 1101219089Spjd 1102219089Spjd#ifdef SPA_PROCESS 1103219089Spjd /* Only create a process if we're going to be around a while. */ 1104219089Spjd if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1105219089Spjd if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1106219089Spjd NULL, 0) == 0) { 1107219089Spjd spa->spa_proc_state = SPA_PROC_CREATED; 1108219089Spjd while (spa->spa_proc_state == SPA_PROC_CREATED) { 1109219089Spjd cv_wait(&spa->spa_proc_cv, 1110219089Spjd &spa->spa_proc_lock); 1111209962Smm } 1112219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1113219089Spjd ASSERT(spa->spa_proc != &p0); 1114219089Spjd ASSERT(spa->spa_did != 0); 1115219089Spjd } else { 1116219089Spjd#ifdef _KERNEL 1117219089Spjd cmn_err(CE_WARN, 1118219089Spjd "Couldn't create process for zfs pool \"%s\"\n", 1119219089Spjd spa->spa_name); 1120219089Spjd#endif 1121185029Spjd } 1122168404Spjd } 1123219089Spjd#endif /* SPA_PROCESS */ 1124219089Spjd mutex_exit(&spa->spa_proc_lock); 1125168404Spjd 1126219089Spjd /* If we didn't create a process, we need to create our taskqs. */ 1127219089Spjd ASSERT(spa->spa_proc == &p0); 1128219089Spjd if (spa->spa_proc == &p0) { 1129219089Spjd spa_create_zio_taskqs(spa); 1130219089Spjd } 1131219089Spjd 1132240868Spjd /* 1133240868Spjd * Start TRIM thread. 1134240868Spjd */ 1135240868Spjd trim_thread_create(spa); 1136240868Spjd 1137185029Spjd list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1138185029Spjd offsetof(vdev_t, vdev_config_dirty_node)); 1139286575Smav list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1140286575Smav offsetof(objset_t, os_evicting_node)); 1141185029Spjd list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1142185029Spjd offsetof(vdev_t, vdev_state_dirty_node)); 1143168404Spjd 1144321567Smav txg_list_create(&spa->spa_vdev_txg_list, spa, 1145168404Spjd offsetof(struct vdev, vdev_txg_node)); 1146168404Spjd 1147168404Spjd avl_create(&spa->spa_errlist_scrub, 1148168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 1149168404Spjd offsetof(spa_error_entry_t, se_avl)); 1150168404Spjd avl_create(&spa->spa_errlist_last, 1151168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 1152168404Spjd offsetof(spa_error_entry_t, se_avl)); 1153168404Spjd} 1154168404Spjd 1155168404Spjd/* 1156168404Spjd * Opposite of spa_activate(). 1157168404Spjd */ 1158168404Spjdstatic void 1159168404Spjdspa_deactivate(spa_t *spa) 1160168404Spjd{ 1161168404Spjd ASSERT(spa->spa_sync_on == B_FALSE); 1162168404Spjd ASSERT(spa->spa_dsl_pool == NULL); 1163168404Spjd ASSERT(spa->spa_root_vdev == NULL); 1164209962Smm ASSERT(spa->spa_async_zio_root == NULL); 1165168404Spjd ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1166168404Spjd 1167240868Spjd /* 1168240868Spjd * Stop TRIM thread in case spa_unload() wasn't called directly 1169240868Spjd * before spa_deactivate(). 1170240868Spjd */ 1171240868Spjd trim_thread_destroy(spa); 1172240868Spjd 1173286575Smav spa_evicting_os_wait(spa); 1174286575Smav 1175168404Spjd txg_list_destroy(&spa->spa_vdev_txg_list); 1176168404Spjd 1177185029Spjd list_destroy(&spa->spa_config_dirty_list); 1178286575Smav list_destroy(&spa->spa_evicting_os_list); 1179185029Spjd list_destroy(&spa->spa_state_dirty_list); 1180168404Spjd 1181185029Spjd for (int t = 0; t < ZIO_TYPES; t++) { 1182185029Spjd for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1183258631Savg spa_taskqs_fini(spa, t, q); 1184185029Spjd } 1185168404Spjd } 1186168404Spjd 1187168404Spjd metaslab_class_destroy(spa->spa_normal_class); 1188168404Spjd spa->spa_normal_class = NULL; 1189168404Spjd 1190185029Spjd metaslab_class_destroy(spa->spa_log_class); 1191185029Spjd spa->spa_log_class = NULL; 1192185029Spjd 1193168404Spjd /* 1194168404Spjd * If this was part of an import or the open otherwise failed, we may 1195168404Spjd * still have errors left in the queues. Empty them just in case. 1196168404Spjd */ 1197168404Spjd spa_errlog_drain(spa); 1198168404Spjd 1199168404Spjd avl_destroy(&spa->spa_errlist_scrub); 1200168404Spjd avl_destroy(&spa->spa_errlist_last); 1201168404Spjd 1202168404Spjd spa->spa_state = POOL_STATE_UNINITIALIZED; 1203219089Spjd 1204219089Spjd mutex_enter(&spa->spa_proc_lock); 1205219089Spjd if (spa->spa_proc_state != SPA_PROC_NONE) { 1206219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1207219089Spjd spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1208219089Spjd cv_broadcast(&spa->spa_proc_cv); 1209219089Spjd while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1210219089Spjd ASSERT(spa->spa_proc != &p0); 1211219089Spjd cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1212219089Spjd } 1213219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1214219089Spjd spa->spa_proc_state = SPA_PROC_NONE; 1215219089Spjd } 1216219089Spjd ASSERT(spa->spa_proc == &p0); 1217219089Spjd mutex_exit(&spa->spa_proc_lock); 1218219089Spjd 1219219089Spjd#ifdef SPA_PROCESS 1220219089Spjd /* 1221219089Spjd * We want to make sure spa_thread() has actually exited the ZFS 1222219089Spjd * module, so that the module can't be unloaded out from underneath 1223219089Spjd * it. 1224219089Spjd */ 1225219089Spjd if (spa->spa_did != 0) { 1226219089Spjd thread_join(spa->spa_did); 1227219089Spjd spa->spa_did = 0; 1228219089Spjd } 1229219089Spjd#endif /* SPA_PROCESS */ 1230168404Spjd} 1231168404Spjd 1232168404Spjd/* 1233168404Spjd * Verify a pool configuration, and construct the vdev tree appropriately. This 1234168404Spjd * will create all the necessary vdevs in the appropriate layout, with each vdev 1235168404Spjd * in the CLOSED state. This will prep the pool before open/creation/import. 1236168404Spjd * All vdev validation is done by the vdev_alloc() routine. 1237168404Spjd */ 1238168404Spjdstatic int 1239168404Spjdspa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1240168404Spjd uint_t id, int atype) 1241168404Spjd{ 1242168404Spjd nvlist_t **child; 1243219089Spjd uint_t children; 1244168404Spjd int error; 1245168404Spjd 1246168404Spjd if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1247168404Spjd return (error); 1248168404Spjd 1249168404Spjd if ((*vdp)->vdev_ops->vdev_op_leaf) 1250168404Spjd return (0); 1251168404Spjd 1252185029Spjd error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1253185029Spjd &child, &children); 1254185029Spjd 1255185029Spjd if (error == ENOENT) 1256185029Spjd return (0); 1257185029Spjd 1258185029Spjd if (error) { 1259168404Spjd vdev_free(*vdp); 1260168404Spjd *vdp = NULL; 1261249195Smm return (SET_ERROR(EINVAL)); 1262168404Spjd } 1263168404Spjd 1264219089Spjd for (int c = 0; c < children; c++) { 1265168404Spjd vdev_t *vd; 1266168404Spjd if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1267168404Spjd atype)) != 0) { 1268168404Spjd vdev_free(*vdp); 1269168404Spjd *vdp = NULL; 1270168404Spjd return (error); 1271168404Spjd } 1272168404Spjd } 1273168404Spjd 1274168404Spjd ASSERT(*vdp != NULL); 1275168404Spjd 1276168404Spjd return (0); 1277168404Spjd} 1278168404Spjd 1279168404Spjd/* 1280168404Spjd * Opposite of spa_load(). 1281168404Spjd */ 1282168404Spjdstatic void 1283168404Spjdspa_unload(spa_t *spa) 1284168404Spjd{ 1285168404Spjd int i; 1286168404Spjd 1287185029Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1288185029Spjd 1289168404Spjd /* 1290240868Spjd * Stop TRIM thread. 1291240868Spjd */ 1292240868Spjd trim_thread_destroy(spa); 1293240868Spjd 1294240868Spjd /* 1295168404Spjd * Stop async tasks. 1296168404Spjd */ 1297168404Spjd spa_async_suspend(spa); 1298168404Spjd 1299168404Spjd /* 1300168404Spjd * Stop syncing. 1301168404Spjd */ 1302168404Spjd if (spa->spa_sync_on) { 1303168404Spjd txg_sync_stop(spa->spa_dsl_pool); 1304168404Spjd spa->spa_sync_on = B_FALSE; 1305168404Spjd } 1306168404Spjd 1307168404Spjd /* 1308321529Smav * Even though vdev_free() also calls vdev_metaslab_fini, we need 1309321529Smav * to call it earlier, before we wait for async i/o to complete. 1310321529Smav * This ensures that there is no async metaslab prefetching, by 1311321529Smav * calling taskq_wait(mg_taskq). 1312321529Smav */ 1313321529Smav if (spa->spa_root_vdev != NULL) { 1314321529Smav spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1315321529Smav for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) 1316321529Smav vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]); 1317321529Smav spa_config_exit(spa, SCL_ALL, FTAG); 1318321529Smav } 1319321529Smav 1320321529Smav /* 1321185029Spjd * Wait for any outstanding async I/O to complete. 1322168404Spjd */ 1323209962Smm if (spa->spa_async_zio_root != NULL) { 1324272598Sdelphij for (int i = 0; i < max_ncpus; i++) 1325272598Sdelphij (void) zio_wait(spa->spa_async_zio_root[i]); 1326272598Sdelphij kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1327209962Smm spa->spa_async_zio_root = NULL; 1328209962Smm } 1329168404Spjd 1330219089Spjd bpobj_close(&spa->spa_deferred_bpobj); 1331219089Spjd 1332258717Savg spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1333258717Savg 1334168404Spjd /* 1335258717Savg * Close all vdevs. 1336258717Savg */ 1337258717Savg if (spa->spa_root_vdev) 1338258717Savg vdev_free(spa->spa_root_vdev); 1339258717Savg ASSERT(spa->spa_root_vdev == NULL); 1340258717Savg 1341258717Savg /* 1342168404Spjd * Close the dsl pool. 1343168404Spjd */ 1344168404Spjd if (spa->spa_dsl_pool) { 1345168404Spjd dsl_pool_close(spa->spa_dsl_pool); 1346168404Spjd spa->spa_dsl_pool = NULL; 1347219089Spjd spa->spa_meta_objset = NULL; 1348168404Spjd } 1349168404Spjd 1350219089Spjd ddt_unload(spa); 1351219089Spjd 1352168404Spjd /* 1353209962Smm * Drop and purge level 2 cache 1354209962Smm */ 1355209962Smm spa_l2cache_drop(spa); 1356209962Smm 1357185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) 1358185029Spjd vdev_free(spa->spa_spares.sav_vdevs[i]); 1359185029Spjd if (spa->spa_spares.sav_vdevs) { 1360185029Spjd kmem_free(spa->spa_spares.sav_vdevs, 1361185029Spjd spa->spa_spares.sav_count * sizeof (void *)); 1362185029Spjd spa->spa_spares.sav_vdevs = NULL; 1363168404Spjd } 1364185029Spjd if (spa->spa_spares.sav_config) { 1365185029Spjd nvlist_free(spa->spa_spares.sav_config); 1366185029Spjd spa->spa_spares.sav_config = NULL; 1367168404Spjd } 1368185029Spjd spa->spa_spares.sav_count = 0; 1369168404Spjd 1370230514Smm for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1371230514Smm vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1372185029Spjd vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1373230514Smm } 1374185029Spjd if (spa->spa_l2cache.sav_vdevs) { 1375185029Spjd kmem_free(spa->spa_l2cache.sav_vdevs, 1376185029Spjd spa->spa_l2cache.sav_count * sizeof (void *)); 1377185029Spjd spa->spa_l2cache.sav_vdevs = NULL; 1378185029Spjd } 1379185029Spjd if (spa->spa_l2cache.sav_config) { 1380185029Spjd nvlist_free(spa->spa_l2cache.sav_config); 1381185029Spjd spa->spa_l2cache.sav_config = NULL; 1382185029Spjd } 1383185029Spjd spa->spa_l2cache.sav_count = 0; 1384185029Spjd 1385168404Spjd spa->spa_async_suspended = 0; 1386209962Smm 1387228103Smm if (spa->spa_comment != NULL) { 1388228103Smm spa_strfree(spa->spa_comment); 1389228103Smm spa->spa_comment = NULL; 1390228103Smm } 1391228103Smm 1392209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 1393168404Spjd} 1394168404Spjd 1395168404Spjd/* 1396168404Spjd * Load (or re-load) the current list of vdevs describing the active spares for 1397168404Spjd * this pool. When this is called, we have some form of basic information in 1398185029Spjd * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1399185029Spjd * then re-generate a more complete list including status information. 1400168404Spjd */ 1401168404Spjdstatic void 1402168404Spjdspa_load_spares(spa_t *spa) 1403168404Spjd{ 1404168404Spjd nvlist_t **spares; 1405168404Spjd uint_t nspares; 1406168404Spjd int i; 1407168404Spjd vdev_t *vd, *tvd; 1408168404Spjd 1409185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1410185029Spjd 1411168404Spjd /* 1412168404Spjd * First, close and free any existing spare vdevs. 1413168404Spjd */ 1414185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) { 1415185029Spjd vd = spa->spa_spares.sav_vdevs[i]; 1416168404Spjd 1417168404Spjd /* Undo the call to spa_activate() below */ 1418185029Spjd if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1419185029Spjd B_FALSE)) != NULL && tvd->vdev_isspare) 1420168404Spjd spa_spare_remove(tvd); 1421168404Spjd vdev_close(vd); 1422168404Spjd vdev_free(vd); 1423168404Spjd } 1424168404Spjd 1425185029Spjd if (spa->spa_spares.sav_vdevs) 1426185029Spjd kmem_free(spa->spa_spares.sav_vdevs, 1427185029Spjd spa->spa_spares.sav_count * sizeof (void *)); 1428168404Spjd 1429185029Spjd if (spa->spa_spares.sav_config == NULL) 1430168404Spjd nspares = 0; 1431168404Spjd else 1432185029Spjd VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1433168404Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1434168404Spjd 1435185029Spjd spa->spa_spares.sav_count = (int)nspares; 1436185029Spjd spa->spa_spares.sav_vdevs = NULL; 1437168404Spjd 1438168404Spjd if (nspares == 0) 1439168404Spjd return; 1440168404Spjd 1441168404Spjd /* 1442168404Spjd * Construct the array of vdevs, opening them to get status in the 1443168404Spjd * process. For each spare, there is potentially two different vdev_t 1444168404Spjd * structures associated with it: one in the list of spares (used only 1445168404Spjd * for basic validation purposes) and one in the active vdev 1446168404Spjd * configuration (if it's spared in). During this phase we open and 1447168404Spjd * validate each vdev on the spare list. If the vdev also exists in the 1448168404Spjd * active configuration, then we also mark this vdev as an active spare. 1449168404Spjd */ 1450185029Spjd spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1451185029Spjd KM_SLEEP); 1452185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) { 1453168404Spjd VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1454168404Spjd VDEV_ALLOC_SPARE) == 0); 1455168404Spjd ASSERT(vd != NULL); 1456168404Spjd 1457185029Spjd spa->spa_spares.sav_vdevs[i] = vd; 1458168404Spjd 1459185029Spjd if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1460185029Spjd B_FALSE)) != NULL) { 1461168404Spjd if (!tvd->vdev_isspare) 1462168404Spjd spa_spare_add(tvd); 1463168404Spjd 1464168404Spjd /* 1465168404Spjd * We only mark the spare active if we were successfully 1466168404Spjd * able to load the vdev. Otherwise, importing a pool 1467168404Spjd * with a bad active spare would result in strange 1468168404Spjd * behavior, because multiple pool would think the spare 1469168404Spjd * is actively in use. 1470168404Spjd * 1471168404Spjd * There is a vulnerability here to an equally bizarre 1472168404Spjd * circumstance, where a dead active spare is later 1473168404Spjd * brought back to life (onlined or otherwise). Given 1474168404Spjd * the rarity of this scenario, and the extra complexity 1475168404Spjd * it adds, we ignore the possibility. 1476168404Spjd */ 1477168404Spjd if (!vdev_is_dead(tvd)) 1478168404Spjd spa_spare_activate(tvd); 1479168404Spjd } 1480168404Spjd 1481185029Spjd vd->vdev_top = vd; 1482209962Smm vd->vdev_aux = &spa->spa_spares; 1483185029Spjd 1484168404Spjd if (vdev_open(vd) != 0) 1485168404Spjd continue; 1486168404Spjd 1487185029Spjd if (vdev_validate_aux(vd) == 0) 1488185029Spjd spa_spare_add(vd); 1489168404Spjd } 1490168404Spjd 1491168404Spjd /* 1492168404Spjd * Recompute the stashed list of spares, with status information 1493168404Spjd * this time. 1494168404Spjd */ 1495185029Spjd VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1496168404Spjd DATA_TYPE_NVLIST_ARRAY) == 0); 1497168404Spjd 1498185029Spjd spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1499185029Spjd KM_SLEEP); 1500185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) 1501185029Spjd spares[i] = vdev_config_generate(spa, 1502219089Spjd spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1503185029Spjd VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1504185029Spjd ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1505185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) 1506168404Spjd nvlist_free(spares[i]); 1507185029Spjd kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1508168404Spjd} 1509168404Spjd 1510185029Spjd/* 1511185029Spjd * Load (or re-load) the current list of vdevs describing the active l2cache for 1512185029Spjd * this pool. When this is called, we have some form of basic information in 1513185029Spjd * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1514185029Spjd * then re-generate a more complete list including status information. 1515185029Spjd * Devices which are already active have their details maintained, and are 1516185029Spjd * not re-opened. 1517185029Spjd */ 1518185029Spjdstatic void 1519185029Spjdspa_load_l2cache(spa_t *spa) 1520185029Spjd{ 1521185029Spjd nvlist_t **l2cache; 1522185029Spjd uint_t nl2cache; 1523185029Spjd int i, j, oldnvdevs; 1524219089Spjd uint64_t guid; 1525185029Spjd vdev_t *vd, **oldvdevs, **newvdevs; 1526185029Spjd spa_aux_vdev_t *sav = &spa->spa_l2cache; 1527185029Spjd 1528185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1529185029Spjd 1530185029Spjd if (sav->sav_config != NULL) { 1531185029Spjd VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1532185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1533185029Spjd newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1534185029Spjd } else { 1535185029Spjd nl2cache = 0; 1536247187Smm newvdevs = NULL; 1537185029Spjd } 1538185029Spjd 1539185029Spjd oldvdevs = sav->sav_vdevs; 1540185029Spjd oldnvdevs = sav->sav_count; 1541185029Spjd sav->sav_vdevs = NULL; 1542185029Spjd sav->sav_count = 0; 1543185029Spjd 1544185029Spjd /* 1545185029Spjd * Process new nvlist of vdevs. 1546185029Spjd */ 1547185029Spjd for (i = 0; i < nl2cache; i++) { 1548185029Spjd VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1549185029Spjd &guid) == 0); 1550185029Spjd 1551185029Spjd newvdevs[i] = NULL; 1552185029Spjd for (j = 0; j < oldnvdevs; j++) { 1553185029Spjd vd = oldvdevs[j]; 1554185029Spjd if (vd != NULL && guid == vd->vdev_guid) { 1555185029Spjd /* 1556185029Spjd * Retain previous vdev for add/remove ops. 1557185029Spjd */ 1558185029Spjd newvdevs[i] = vd; 1559185029Spjd oldvdevs[j] = NULL; 1560185029Spjd break; 1561185029Spjd } 1562185029Spjd } 1563185029Spjd 1564185029Spjd if (newvdevs[i] == NULL) { 1565185029Spjd /* 1566185029Spjd * Create new vdev 1567185029Spjd */ 1568185029Spjd VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1569185029Spjd VDEV_ALLOC_L2CACHE) == 0); 1570185029Spjd ASSERT(vd != NULL); 1571185029Spjd newvdevs[i] = vd; 1572185029Spjd 1573185029Spjd /* 1574185029Spjd * Commit this vdev as an l2cache device, 1575185029Spjd * even if it fails to open. 1576185029Spjd */ 1577185029Spjd spa_l2cache_add(vd); 1578185029Spjd 1579185029Spjd vd->vdev_top = vd; 1580185029Spjd vd->vdev_aux = sav; 1581185029Spjd 1582185029Spjd spa_l2cache_activate(vd); 1583185029Spjd 1584185029Spjd if (vdev_open(vd) != 0) 1585185029Spjd continue; 1586185029Spjd 1587185029Spjd (void) vdev_validate_aux(vd); 1588185029Spjd 1589219089Spjd if (!vdev_is_dead(vd)) 1590219089Spjd l2arc_add_vdev(spa, vd); 1591185029Spjd } 1592185029Spjd } 1593185029Spjd 1594185029Spjd /* 1595185029Spjd * Purge vdevs that were dropped 1596185029Spjd */ 1597185029Spjd for (i = 0; i < oldnvdevs; i++) { 1598185029Spjd uint64_t pool; 1599185029Spjd 1600185029Spjd vd = oldvdevs[i]; 1601185029Spjd if (vd != NULL) { 1602230514Smm ASSERT(vd->vdev_isl2cache); 1603230514Smm 1604209962Smm if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1605209962Smm pool != 0ULL && l2arc_vdev_present(vd)) 1606185029Spjd l2arc_remove_vdev(vd); 1607230514Smm vdev_clear_stats(vd); 1608230514Smm vdev_free(vd); 1609185029Spjd } 1610185029Spjd } 1611185029Spjd 1612185029Spjd if (oldvdevs) 1613185029Spjd kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1614185029Spjd 1615185029Spjd if (sav->sav_config == NULL) 1616185029Spjd goto out; 1617185029Spjd 1618185029Spjd sav->sav_vdevs = newvdevs; 1619185029Spjd sav->sav_count = (int)nl2cache; 1620185029Spjd 1621185029Spjd /* 1622185029Spjd * Recompute the stashed list of l2cache devices, with status 1623185029Spjd * information this time. 1624185029Spjd */ 1625185029Spjd VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1626185029Spjd DATA_TYPE_NVLIST_ARRAY) == 0); 1627185029Spjd 1628185029Spjd l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1629185029Spjd for (i = 0; i < sav->sav_count; i++) 1630185029Spjd l2cache[i] = vdev_config_generate(spa, 1631219089Spjd sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1632185029Spjd VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1633185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1634185029Spjdout: 1635185029Spjd for (i = 0; i < sav->sav_count; i++) 1636185029Spjd nvlist_free(l2cache[i]); 1637185029Spjd if (sav->sav_count) 1638185029Spjd kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1639185029Spjd} 1640185029Spjd 1641168404Spjdstatic int 1642168404Spjdload_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1643168404Spjd{ 1644168404Spjd dmu_buf_t *db; 1645168404Spjd char *packed = NULL; 1646168404Spjd size_t nvsize = 0; 1647168404Spjd int error; 1648168404Spjd *value = NULL; 1649168404Spjd 1650262676Sdelphij error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1651262676Sdelphij if (error != 0) 1652262676Sdelphij return (error); 1653287744Sdelphij 1654168404Spjd nvsize = *(uint64_t *)db->db_data; 1655168404Spjd dmu_buf_rele(db, FTAG); 1656168404Spjd 1657168404Spjd packed = kmem_alloc(nvsize, KM_SLEEP); 1658209962Smm error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1659209962Smm DMU_READ_PREFETCH); 1660168404Spjd if (error == 0) 1661168404Spjd error = nvlist_unpack(packed, nvsize, value, 0); 1662168404Spjd kmem_free(packed, nvsize); 1663168404Spjd 1664168404Spjd return (error); 1665168404Spjd} 1666168404Spjd 1667168404Spjd/* 1668185029Spjd * Checks to see if the given vdev could not be opened, in which case we post a 1669185029Spjd * sysevent to notify the autoreplace code that the device has been removed. 1670185029Spjd */ 1671185029Spjdstatic void 1672185029Spjdspa_check_removed(vdev_t *vd) 1673185029Spjd{ 1674219089Spjd for (int c = 0; c < vd->vdev_children; c++) 1675185029Spjd spa_check_removed(vd->vdev_child[c]); 1676185029Spjd 1677249188Smm if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1678249188Smm !vd->vdev_ishole) { 1679185029Spjd zfs_post_autoreplace(vd->vdev_spa, vd); 1680185029Spjd spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1681185029Spjd } 1682185029Spjd} 1683185029Spjd 1684299441Smavstatic void 1685299441Smavspa_config_valid_zaps(vdev_t *vd, vdev_t *mvd) 1686299441Smav{ 1687299441Smav ASSERT3U(vd->vdev_children, ==, mvd->vdev_children); 1688299441Smav 1689299441Smav vd->vdev_top_zap = mvd->vdev_top_zap; 1690299441Smav vd->vdev_leaf_zap = mvd->vdev_leaf_zap; 1691299441Smav 1692299441Smav for (uint64_t i = 0; i < vd->vdev_children; i++) { 1693299441Smav spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]); 1694299441Smav } 1695299441Smav} 1696299441Smav 1697185029Spjd/* 1698219089Spjd * Validate the current config against the MOS config 1699213197Smm */ 1700219089Spjdstatic boolean_t 1701219089Spjdspa_config_valid(spa_t *spa, nvlist_t *config) 1702213197Smm{ 1703219089Spjd vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1704219089Spjd nvlist_t *nv; 1705213197Smm 1706219089Spjd VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1707213197Smm 1708219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1709219089Spjd VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1710219089Spjd 1711219089Spjd ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1712219089Spjd 1713219089Spjd /* 1714219089Spjd * If we're doing a normal import, then build up any additional 1715219089Spjd * diagnostic information about missing devices in this config. 1716219089Spjd * We'll pass this up to the user for further processing. 1717219089Spjd */ 1718219089Spjd if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1719219089Spjd nvlist_t **child, *nv; 1720219089Spjd uint64_t idx = 0; 1721219089Spjd 1722219089Spjd child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1723219089Spjd KM_SLEEP); 1724219089Spjd VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1725219089Spjd 1726219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1727219089Spjd vdev_t *tvd = rvd->vdev_child[c]; 1728219089Spjd vdev_t *mtvd = mrvd->vdev_child[c]; 1729219089Spjd 1730219089Spjd if (tvd->vdev_ops == &vdev_missing_ops && 1731219089Spjd mtvd->vdev_ops != &vdev_missing_ops && 1732219089Spjd mtvd->vdev_islog) 1733219089Spjd child[idx++] = vdev_config_generate(spa, mtvd, 1734219089Spjd B_FALSE, 0); 1735219089Spjd } 1736219089Spjd 1737219089Spjd if (idx) { 1738219089Spjd VERIFY(nvlist_add_nvlist_array(nv, 1739219089Spjd ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1740219089Spjd VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1741219089Spjd ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1742219089Spjd 1743219089Spjd for (int i = 0; i < idx; i++) 1744219089Spjd nvlist_free(child[i]); 1745219089Spjd } 1746219089Spjd nvlist_free(nv); 1747219089Spjd kmem_free(child, rvd->vdev_children * sizeof (char **)); 1748219089Spjd } 1749219089Spjd 1750219089Spjd /* 1751219089Spjd * Compare the root vdev tree with the information we have 1752219089Spjd * from the MOS config (mrvd). Check each top-level vdev 1753219089Spjd * with the corresponding MOS config top-level (mtvd). 1754219089Spjd */ 1755219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1756213197Smm vdev_t *tvd = rvd->vdev_child[c]; 1757219089Spjd vdev_t *mtvd = mrvd->vdev_child[c]; 1758213197Smm 1759219089Spjd /* 1760219089Spjd * Resolve any "missing" vdevs in the current configuration. 1761219089Spjd * If we find that the MOS config has more accurate information 1762219089Spjd * about the top-level vdev then use that vdev instead. 1763219089Spjd */ 1764219089Spjd if (tvd->vdev_ops == &vdev_missing_ops && 1765219089Spjd mtvd->vdev_ops != &vdev_missing_ops) { 1766219089Spjd 1767219089Spjd if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1768219089Spjd continue; 1769219089Spjd 1770219089Spjd /* 1771219089Spjd * Device specific actions. 1772219089Spjd */ 1773219089Spjd if (mtvd->vdev_islog) { 1774219089Spjd spa_set_log_state(spa, SPA_LOG_CLEAR); 1775219089Spjd } else { 1776219089Spjd /* 1777219089Spjd * XXX - once we have 'readonly' pool 1778219089Spjd * support we should be able to handle 1779219089Spjd * missing data devices by transitioning 1780219089Spjd * the pool to readonly. 1781219089Spjd */ 1782219089Spjd continue; 1783219089Spjd } 1784219089Spjd 1785219089Spjd /* 1786219089Spjd * Swap the missing vdev with the data we were 1787219089Spjd * able to obtain from the MOS config. 1788219089Spjd */ 1789219089Spjd vdev_remove_child(rvd, tvd); 1790219089Spjd vdev_remove_child(mrvd, mtvd); 1791219089Spjd 1792219089Spjd vdev_add_child(rvd, mtvd); 1793219089Spjd vdev_add_child(mrvd, tvd); 1794219089Spjd 1795219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 1796219089Spjd vdev_load(mtvd); 1797219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1798219089Spjd 1799219089Spjd vdev_reopen(rvd); 1800299441Smav } else { 1801299441Smav if (mtvd->vdev_islog) { 1802299441Smav /* 1803299441Smav * Load the slog device's state from the MOS 1804299441Smav * config since it's possible that the label 1805299441Smav * does not contain the most up-to-date 1806299441Smav * information. 1807299441Smav */ 1808299441Smav vdev_load_log_state(tvd, mtvd); 1809299441Smav vdev_reopen(tvd); 1810299441Smav } 1811299441Smav 1812219089Spjd /* 1813299441Smav * Per-vdev ZAP info is stored exclusively in the MOS. 1814219089Spjd */ 1815299441Smav spa_config_valid_zaps(tvd, mtvd); 1816219089Spjd } 1817213197Smm } 1818299441Smav 1819219089Spjd vdev_free(mrvd); 1820219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 1821219089Spjd 1822219089Spjd /* 1823219089Spjd * Ensure we were able to validate the config. 1824219089Spjd */ 1825219089Spjd return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1826213197Smm} 1827213197Smm 1828213197Smm/* 1829185029Spjd * Check for missing log devices 1830185029Spjd */ 1831248571Smmstatic boolean_t 1832185029Spjdspa_check_logs(spa_t *spa) 1833185029Spjd{ 1834248571Smm boolean_t rv = B_FALSE; 1835286686Smav dsl_pool_t *dp = spa_get_dsl(spa); 1836248571Smm 1837185029Spjd switch (spa->spa_log_state) { 1838185029Spjd case SPA_LOG_MISSING: 1839185029Spjd /* need to recheck in case slog has been restored */ 1840185029Spjd case SPA_LOG_UNKNOWN: 1841286686Smav rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1842286686Smav zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 1843248571Smm if (rv) 1844219089Spjd spa_set_log_state(spa, SPA_LOG_MISSING); 1845185029Spjd break; 1846185029Spjd } 1847248571Smm return (rv); 1848185029Spjd} 1849185029Spjd 1850219089Spjdstatic boolean_t 1851219089Spjdspa_passivate_log(spa_t *spa) 1852219089Spjd{ 1853219089Spjd vdev_t *rvd = spa->spa_root_vdev; 1854219089Spjd boolean_t slog_found = B_FALSE; 1855219089Spjd 1856219089Spjd ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1857219089Spjd 1858219089Spjd if (!spa_has_slogs(spa)) 1859219089Spjd return (B_FALSE); 1860219089Spjd 1861219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1862219089Spjd vdev_t *tvd = rvd->vdev_child[c]; 1863219089Spjd metaslab_group_t *mg = tvd->vdev_mg; 1864219089Spjd 1865219089Spjd if (tvd->vdev_islog) { 1866219089Spjd metaslab_group_passivate(mg); 1867219089Spjd slog_found = B_TRUE; 1868219089Spjd } 1869219089Spjd } 1870219089Spjd 1871219089Spjd return (slog_found); 1872219089Spjd} 1873219089Spjd 1874219089Spjdstatic void 1875219089Spjdspa_activate_log(spa_t *spa) 1876219089Spjd{ 1877219089Spjd vdev_t *rvd = spa->spa_root_vdev; 1878219089Spjd 1879219089Spjd ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1880219089Spjd 1881219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1882219089Spjd vdev_t *tvd = rvd->vdev_child[c]; 1883219089Spjd metaslab_group_t *mg = tvd->vdev_mg; 1884219089Spjd 1885219089Spjd if (tvd->vdev_islog) 1886219089Spjd metaslab_group_activate(mg); 1887219089Spjd } 1888219089Spjd} 1889219089Spjd 1890219089Spjdint 1891219089Spjdspa_offline_log(spa_t *spa) 1892219089Spjd{ 1893248571Smm int error; 1894219089Spjd 1895248571Smm error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1896248571Smm NULL, DS_FIND_CHILDREN); 1897248571Smm if (error == 0) { 1898219089Spjd /* 1899219089Spjd * We successfully offlined the log device, sync out the 1900219089Spjd * current txg so that the "stubby" block can be removed 1901219089Spjd * by zil_sync(). 1902219089Spjd */ 1903219089Spjd txg_wait_synced(spa->spa_dsl_pool, 0); 1904219089Spjd } 1905219089Spjd return (error); 1906219089Spjd} 1907219089Spjd 1908219089Spjdstatic void 1909219089Spjdspa_aux_check_removed(spa_aux_vdev_t *sav) 1910219089Spjd{ 1911219089Spjd int i; 1912219089Spjd 1913219089Spjd for (i = 0; i < sav->sav_count; i++) 1914219089Spjd spa_check_removed(sav->sav_vdevs[i]); 1915219089Spjd} 1916219089Spjd 1917219089Spjdvoid 1918219089Spjdspa_claim_notify(zio_t *zio) 1919219089Spjd{ 1920219089Spjd spa_t *spa = zio->io_spa; 1921219089Spjd 1922219089Spjd if (zio->io_error) 1923219089Spjd return; 1924219089Spjd 1925219089Spjd mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1926219089Spjd if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1927219089Spjd spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1928219089Spjd mutex_exit(&spa->spa_props_lock); 1929219089Spjd} 1930219089Spjd 1931219089Spjdtypedef struct spa_load_error { 1932219089Spjd uint64_t sle_meta_count; 1933219089Spjd uint64_t sle_data_count; 1934219089Spjd} spa_load_error_t; 1935219089Spjd 1936219089Spjdstatic void 1937219089Spjdspa_load_verify_done(zio_t *zio) 1938219089Spjd{ 1939219089Spjd blkptr_t *bp = zio->io_bp; 1940219089Spjd spa_load_error_t *sle = zio->io_private; 1941219089Spjd dmu_object_type_t type = BP_GET_TYPE(bp); 1942219089Spjd int error = zio->io_error; 1943268720Sdelphij spa_t *spa = zio->io_spa; 1944219089Spjd 1945219089Spjd if (error) { 1946236884Smm if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1947219089Spjd type != DMU_OT_INTENT_LOG) 1948270247Sdelphij atomic_inc_64(&sle->sle_meta_count); 1949219089Spjd else 1950270247Sdelphij atomic_inc_64(&sle->sle_data_count); 1951219089Spjd } 1952219089Spjd zio_data_buf_free(zio->io_data, zio->io_size); 1953268720Sdelphij 1954268720Sdelphij mutex_enter(&spa->spa_scrub_lock); 1955268720Sdelphij spa->spa_scrub_inflight--; 1956268720Sdelphij cv_broadcast(&spa->spa_scrub_io_cv); 1957268720Sdelphij mutex_exit(&spa->spa_scrub_lock); 1958219089Spjd} 1959219089Spjd 1960268720Sdelphij/* 1961268720Sdelphij * Maximum number of concurrent scrub i/os to create while verifying 1962268720Sdelphij * a pool while importing it. 1963268720Sdelphij */ 1964268720Sdelphijint spa_load_verify_maxinflight = 10000; 1965268720Sdelphijboolean_t spa_load_verify_metadata = B_TRUE; 1966268720Sdelphijboolean_t spa_load_verify_data = B_TRUE; 1967268720Sdelphij 1968268720SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN, 1969268720Sdelphij &spa_load_verify_maxinflight, 0, 1970268720Sdelphij "Maximum number of concurrent scrub I/Os to create while verifying a " 1971268720Sdelphij "pool while importing it"); 1972268720Sdelphij 1973268720SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN, 1974268720Sdelphij &spa_load_verify_metadata, 0, 1975268720Sdelphij "Check metadata on import?"); 1976268720Sdelphij 1977268720SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN, 1978268720Sdelphij &spa_load_verify_data, 0, 1979268720Sdelphij "Check user data on import?"); 1980268720Sdelphij 1981219089Spjd/*ARGSUSED*/ 1982219089Spjdstatic int 1983219089Spjdspa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1984268123Sdelphij const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1985219089Spjd{ 1986286705Smav if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 1987268720Sdelphij return (0); 1988268720Sdelphij /* 1989268720Sdelphij * Note: normally this routine will not be called if 1990268720Sdelphij * spa_load_verify_metadata is not set. However, it may be useful 1991268720Sdelphij * to manually set the flag after the traversal has begun. 1992268720Sdelphij */ 1993268720Sdelphij if (!spa_load_verify_metadata) 1994268720Sdelphij return (0); 1995268720Sdelphij if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data) 1996268720Sdelphij return (0); 1997219089Spjd 1998268720Sdelphij zio_t *rio = arg; 1999268720Sdelphij size_t size = BP_GET_PSIZE(bp); 2000268720Sdelphij void *data = zio_data_buf_alloc(size); 2001268720Sdelphij 2002268720Sdelphij mutex_enter(&spa->spa_scrub_lock); 2003268720Sdelphij while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 2004268720Sdelphij cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2005268720Sdelphij spa->spa_scrub_inflight++; 2006268720Sdelphij mutex_exit(&spa->spa_scrub_lock); 2007268720Sdelphij 2008268720Sdelphij zio_nowait(zio_read(rio, spa, bp, data, size, 2009268720Sdelphij spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2010268720Sdelphij ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2011268720Sdelphij ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2012219089Spjd return (0); 2013219089Spjd} 2014219089Spjd 2015307045Smav/* ARGSUSED */ 2016307045Smavint 2017307045Smavverify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2018307045Smav{ 2019307108Smav if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2020307045Smav return (SET_ERROR(ENAMETOOLONG)); 2021307045Smav 2022307045Smav return (0); 2023307045Smav} 2024307045Smav 2025219089Spjdstatic int 2026219089Spjdspa_load_verify(spa_t *spa) 2027219089Spjd{ 2028219089Spjd zio_t *rio; 2029219089Spjd spa_load_error_t sle = { 0 }; 2030219089Spjd zpool_rewind_policy_t policy; 2031219089Spjd boolean_t verify_ok = B_FALSE; 2032268720Sdelphij int error = 0; 2033219089Spjd 2034219089Spjd zpool_get_rewind_policy(spa->spa_config, &policy); 2035219089Spjd 2036219089Spjd if (policy.zrp_request & ZPOOL_NEVER_REWIND) 2037219089Spjd return (0); 2038219089Spjd 2039307045Smav dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2040307045Smav error = dmu_objset_find_dp(spa->spa_dsl_pool, 2041307045Smav spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2042307045Smav DS_FIND_CHILDREN); 2043307045Smav dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2044307045Smav if (error != 0) 2045307045Smav return (error); 2046307045Smav 2047219089Spjd rio = zio_root(spa, NULL, &sle, 2048219089Spjd ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2049219089Spjd 2050268720Sdelphij if (spa_load_verify_metadata) { 2051268720Sdelphij error = traverse_pool(spa, spa->spa_verify_min_txg, 2052268720Sdelphij TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 2053268720Sdelphij spa_load_verify_cb, rio); 2054268720Sdelphij } 2055219089Spjd 2056219089Spjd (void) zio_wait(rio); 2057219089Spjd 2058219089Spjd spa->spa_load_meta_errors = sle.sle_meta_count; 2059219089Spjd spa->spa_load_data_errors = sle.sle_data_count; 2060219089Spjd 2061219089Spjd if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 2062219089Spjd sle.sle_data_count <= policy.zrp_maxdata) { 2063219089Spjd int64_t loss = 0; 2064219089Spjd 2065219089Spjd verify_ok = B_TRUE; 2066219089Spjd spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2067219089Spjd spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2068219089Spjd 2069219089Spjd loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2070219089Spjd VERIFY(nvlist_add_uint64(spa->spa_load_info, 2071219089Spjd ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 2072219089Spjd VERIFY(nvlist_add_int64(spa->spa_load_info, 2073219089Spjd ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 2074219089Spjd VERIFY(nvlist_add_uint64(spa->spa_load_info, 2075219089Spjd ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 2076219089Spjd } else { 2077219089Spjd spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2078219089Spjd } 2079219089Spjd 2080219089Spjd if (error) { 2081219089Spjd if (error != ENXIO && error != EIO) 2082249195Smm error = SET_ERROR(EIO); 2083219089Spjd return (error); 2084219089Spjd } 2085219089Spjd 2086219089Spjd return (verify_ok ? 0 : EIO); 2087219089Spjd} 2088219089Spjd 2089185029Spjd/* 2090219089Spjd * Find a value in the pool props object. 2091168404Spjd */ 2092219089Spjdstatic void 2093219089Spjdspa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2094219089Spjd{ 2095219089Spjd (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2096219089Spjd zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2097219089Spjd} 2098219089Spjd 2099219089Spjd/* 2100219089Spjd * Find a value in the pool directory object. 2101219089Spjd */ 2102168404Spjdstatic int 2103219089Spjdspa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 2104168404Spjd{ 2105219089Spjd return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2106219089Spjd name, sizeof (uint64_t), 1, val)); 2107219089Spjd} 2108168404Spjd 2109219089Spjdstatic int 2110219089Spjdspa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2111219089Spjd{ 2112219089Spjd vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2113219089Spjd return (err); 2114219089Spjd} 2115219089Spjd 2116219089Spjd/* 2117219089Spjd * Fix up config after a partly-completed split. This is done with the 2118219089Spjd * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2119219089Spjd * pool have that entry in their config, but only the splitting one contains 2120219089Spjd * a list of all the guids of the vdevs that are being split off. 2121219089Spjd * 2122219089Spjd * This function determines what to do with that list: either rejoin 2123219089Spjd * all the disks to the pool, or complete the splitting process. To attempt 2124219089Spjd * the rejoin, each disk that is offlined is marked online again, and 2125219089Spjd * we do a reopen() call. If the vdev label for every disk that was 2126219089Spjd * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2127219089Spjd * then we call vdev_split() on each disk, and complete the split. 2128219089Spjd * 2129219089Spjd * Otherwise we leave the config alone, with all the vdevs in place in 2130219089Spjd * the original pool. 2131219089Spjd */ 2132219089Spjdstatic void 2133219089Spjdspa_try_repair(spa_t *spa, nvlist_t *config) 2134219089Spjd{ 2135219089Spjd uint_t extracted; 2136219089Spjd uint64_t *glist; 2137219089Spjd uint_t i, gcount; 2138219089Spjd nvlist_t *nvl; 2139219089Spjd vdev_t **vd; 2140219089Spjd boolean_t attempt_reopen; 2141219089Spjd 2142219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2143219089Spjd return; 2144219089Spjd 2145219089Spjd /* check that the config is complete */ 2146219089Spjd if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2147219089Spjd &glist, &gcount) != 0) 2148219089Spjd return; 2149219089Spjd 2150219089Spjd vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2151219089Spjd 2152219089Spjd /* attempt to online all the vdevs & validate */ 2153219089Spjd attempt_reopen = B_TRUE; 2154219089Spjd for (i = 0; i < gcount; i++) { 2155219089Spjd if (glist[i] == 0) /* vdev is hole */ 2156219089Spjd continue; 2157219089Spjd 2158219089Spjd vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2159219089Spjd if (vd[i] == NULL) { 2160219089Spjd /* 2161219089Spjd * Don't bother attempting to reopen the disks; 2162219089Spjd * just do the split. 2163219089Spjd */ 2164219089Spjd attempt_reopen = B_FALSE; 2165219089Spjd } else { 2166219089Spjd /* attempt to re-online it */ 2167219089Spjd vd[i]->vdev_offline = B_FALSE; 2168219089Spjd } 2169219089Spjd } 2170219089Spjd 2171219089Spjd if (attempt_reopen) { 2172219089Spjd vdev_reopen(spa->spa_root_vdev); 2173219089Spjd 2174219089Spjd /* check each device to see what state it's in */ 2175219089Spjd for (extracted = 0, i = 0; i < gcount; i++) { 2176219089Spjd if (vd[i] != NULL && 2177219089Spjd vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2178219089Spjd break; 2179219089Spjd ++extracted; 2180219089Spjd } 2181219089Spjd } 2182219089Spjd 2183209962Smm /* 2184219089Spjd * If every disk has been moved to the new pool, or if we never 2185219089Spjd * even attempted to look at them, then we split them off for 2186219089Spjd * good. 2187209962Smm */ 2188219089Spjd if (!attempt_reopen || gcount == extracted) { 2189219089Spjd for (i = 0; i < gcount; i++) 2190219089Spjd if (vd[i] != NULL) 2191219089Spjd vdev_split(vd[i]); 2192219089Spjd vdev_reopen(spa->spa_root_vdev); 2193219089Spjd } 2194209962Smm 2195219089Spjd kmem_free(vd, gcount * sizeof (vdev_t *)); 2196219089Spjd} 2197185029Spjd 2198219089Spjdstatic int 2199219089Spjdspa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2200219089Spjd boolean_t mosconfig) 2201219089Spjd{ 2202219089Spjd nvlist_t *config = spa->spa_config; 2203219089Spjd char *ereport = FM_EREPORT_ZFS_POOL; 2204228103Smm char *comment; 2205219089Spjd int error; 2206219089Spjd uint64_t pool_guid; 2207219089Spjd nvlist_t *nvl; 2208168404Spjd 2209219089Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2210249195Smm return (SET_ERROR(EINVAL)); 2211168404Spjd 2212228103Smm ASSERT(spa->spa_comment == NULL); 2213228103Smm if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2214228103Smm spa->spa_comment = spa_strdup(comment); 2215228103Smm 2216168404Spjd /* 2217168404Spjd * Versioning wasn't explicitly added to the label until later, so if 2218168404Spjd * it's not present treat it as the initial version. 2219168404Spjd */ 2220219089Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2221219089Spjd &spa->spa_ubsync.ub_version) != 0) 2222219089Spjd spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2223168404Spjd 2224168404Spjd (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2225168404Spjd &spa->spa_config_txg); 2226168404Spjd 2227168404Spjd if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2228168404Spjd spa_guid_exists(pool_guid, 0)) { 2229249195Smm error = SET_ERROR(EEXIST); 2230219089Spjd } else { 2231228103Smm spa->spa_config_guid = pool_guid; 2232219089Spjd 2233219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2234219089Spjd &nvl) == 0) { 2235219089Spjd VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2236219089Spjd KM_SLEEP) == 0); 2237219089Spjd } 2238219089Spjd 2239236884Smm nvlist_free(spa->spa_load_info); 2240236884Smm spa->spa_load_info = fnvlist_alloc(); 2241236884Smm 2242219089Spjd gethrestime(&spa->spa_loaded_ts); 2243219089Spjd error = spa_load_impl(spa, pool_guid, config, state, type, 2244219089Spjd mosconfig, &ereport); 2245168404Spjd } 2246168404Spjd 2247286575Smav /* 2248286575Smav * Don't count references from objsets that are already closed 2249286575Smav * and are making their way through the eviction process. 2250286575Smav */ 2251286575Smav spa_evicting_os_wait(spa); 2252219089Spjd spa->spa_minref = refcount_count(&spa->spa_refcount); 2253219089Spjd if (error) { 2254219089Spjd if (error != EEXIST) { 2255219089Spjd spa->spa_loaded_ts.tv_sec = 0; 2256219089Spjd spa->spa_loaded_ts.tv_nsec = 0; 2257219089Spjd } 2258219089Spjd if (error != EBADF) { 2259219089Spjd zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2260219089Spjd } 2261219089Spjd } 2262219089Spjd spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2263219089Spjd spa->spa_ena = 0; 2264168404Spjd 2265219089Spjd return (error); 2266219089Spjd} 2267219089Spjd 2268219089Spjd/* 2269299441Smav * Count the number of per-vdev ZAPs associated with all of the vdevs in the 2270299441Smav * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 2271299441Smav * spa's per-vdev ZAP list. 2272299441Smav */ 2273299441Smavstatic uint64_t 2274299441Smavvdev_count_verify_zaps(vdev_t *vd) 2275299441Smav{ 2276299441Smav spa_t *spa = vd->vdev_spa; 2277299441Smav uint64_t total = 0; 2278299441Smav if (vd->vdev_top_zap != 0) { 2279299441Smav total++; 2280299441Smav ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2281299441Smav spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 2282299441Smav } 2283299441Smav if (vd->vdev_leaf_zap != 0) { 2284299441Smav total++; 2285299441Smav ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2286299441Smav spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 2287299441Smav } 2288299441Smav 2289299441Smav for (uint64_t i = 0; i < vd->vdev_children; i++) { 2290299441Smav total += vdev_count_verify_zaps(vd->vdev_child[i]); 2291299441Smav } 2292299441Smav 2293299441Smav return (total); 2294299441Smav} 2295299441Smav 2296299441Smav/* 2297219089Spjd * Load an existing storage pool, using the pool's builtin spa_config as a 2298219089Spjd * source of configuration information. 2299219089Spjd */ 2300219089Spjdstatic int 2301219089Spjdspa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2302219089Spjd spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2303219089Spjd char **ereport) 2304219089Spjd{ 2305219089Spjd int error = 0; 2306219089Spjd nvlist_t *nvroot = NULL; 2307236884Smm nvlist_t *label; 2308219089Spjd vdev_t *rvd; 2309219089Spjd uberblock_t *ub = &spa->spa_uberblock; 2310219089Spjd uint64_t children, config_cache_txg = spa->spa_config_txg; 2311219089Spjd int orig_mode = spa->spa_mode; 2312219089Spjd int parse; 2313219089Spjd uint64_t obj; 2314236884Smm boolean_t missing_feat_write = B_FALSE; 2315219089Spjd 2316168404Spjd /* 2317219089Spjd * If this is an untrusted config, access the pool in read-only mode. 2318219089Spjd * This prevents things like resilvering recently removed devices. 2319219089Spjd */ 2320219089Spjd if (!mosconfig) 2321219089Spjd spa->spa_mode = FREAD; 2322219089Spjd 2323219089Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2324219089Spjd 2325219089Spjd spa->spa_load_state = state; 2326219089Spjd 2327219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2328249195Smm return (SET_ERROR(EINVAL)); 2329219089Spjd 2330219089Spjd parse = (type == SPA_IMPORT_EXISTING ? 2331219089Spjd VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2332219089Spjd 2333219089Spjd /* 2334209962Smm * Create "The Godfather" zio to hold all async IOs 2335209962Smm */ 2336272598Sdelphij spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 2337272598Sdelphij KM_SLEEP); 2338272598Sdelphij for (int i = 0; i < max_ncpus; i++) { 2339272598Sdelphij spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2340272598Sdelphij ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2341272598Sdelphij ZIO_FLAG_GODFATHER); 2342272598Sdelphij } 2343209962Smm 2344209962Smm /* 2345168404Spjd * Parse the configuration into a vdev tree. We explicitly set the 2346168404Spjd * value that will be returned by spa_version() since parsing the 2347168404Spjd * configuration requires knowing the version number. 2348168404Spjd */ 2349185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2350219089Spjd error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2351185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2352168404Spjd 2353168404Spjd if (error != 0) 2354219089Spjd return (error); 2355168404Spjd 2356168404Spjd ASSERT(spa->spa_root_vdev == rvd); 2357284304Savg ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 2358284304Savg ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 2359168404Spjd 2360219089Spjd if (type != SPA_IMPORT_ASSEMBLE) { 2361219089Spjd ASSERT(spa_guid(spa) == pool_guid); 2362219089Spjd } 2363219089Spjd 2364168404Spjd /* 2365168404Spjd * Try to open all vdevs, loading each label in the process. 2366168404Spjd */ 2367185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2368168926Spjd error = vdev_open(rvd); 2369185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2370168926Spjd if (error != 0) 2371219089Spjd return (error); 2372168404Spjd 2373168404Spjd /* 2374209962Smm * We need to validate the vdev labels against the configuration that 2375209962Smm * we have in hand, which is dependent on the setting of mosconfig. If 2376209962Smm * mosconfig is true then we're validating the vdev labels based on 2377219089Spjd * that config. Otherwise, we're validating against the cached config 2378209962Smm * (zpool.cache) that was read when we loaded the zfs module, and then 2379209962Smm * later we will recursively call spa_load() and validate against 2380209962Smm * the vdev config. 2381219089Spjd * 2382219089Spjd * If we're assembling a new pool that's been split off from an 2383219089Spjd * existing pool, the labels haven't yet been updated so we skip 2384219089Spjd * validation for now. 2385168404Spjd */ 2386219089Spjd if (type != SPA_IMPORT_ASSEMBLE) { 2387219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2388230514Smm error = vdev_validate(rvd, mosconfig); 2389219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2390168404Spjd 2391219089Spjd if (error != 0) 2392219089Spjd return (error); 2393219089Spjd 2394219089Spjd if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2395249195Smm return (SET_ERROR(ENXIO)); 2396168404Spjd } 2397168404Spjd 2398168404Spjd /* 2399168404Spjd * Find the best uberblock. 2400168404Spjd */ 2401236884Smm vdev_uberblock_load(rvd, ub, &label); 2402168404Spjd 2403168404Spjd /* 2404168404Spjd * If we weren't able to find a single valid uberblock, return failure. 2405168404Spjd */ 2406236884Smm if (ub->ub_txg == 0) { 2407236884Smm nvlist_free(label); 2408219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2409236884Smm } 2410168404Spjd 2411168404Spjd /* 2412236884Smm * If the pool has an unsupported version we can't open it. 2413168404Spjd */ 2414236884Smm if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2415236884Smm nvlist_free(label); 2416219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2417236884Smm } 2418168404Spjd 2419236884Smm if (ub->ub_version >= SPA_VERSION_FEATURES) { 2420236884Smm nvlist_t *features; 2421236884Smm 2422236884Smm /* 2423236884Smm * If we weren't able to find what's necessary for reading the 2424236884Smm * MOS in the label, return failure. 2425236884Smm */ 2426236884Smm if (label == NULL || nvlist_lookup_nvlist(label, 2427236884Smm ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2428236884Smm nvlist_free(label); 2429236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2430236884Smm ENXIO)); 2431236884Smm } 2432236884Smm 2433236884Smm /* 2434236884Smm * Update our in-core representation with the definitive values 2435236884Smm * from the label. 2436236884Smm */ 2437236884Smm nvlist_free(spa->spa_label_features); 2438236884Smm VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2439236884Smm } 2440236884Smm 2441236884Smm nvlist_free(label); 2442236884Smm 2443168404Spjd /* 2444236884Smm * Look through entries in the label nvlist's features_for_read. If 2445236884Smm * there is a feature listed there which we don't understand then we 2446236884Smm * cannot open a pool. 2447236884Smm */ 2448236884Smm if (ub->ub_version >= SPA_VERSION_FEATURES) { 2449236884Smm nvlist_t *unsup_feat; 2450236884Smm 2451236884Smm VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2452236884Smm 0); 2453236884Smm 2454236884Smm for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2455236884Smm NULL); nvp != NULL; 2456236884Smm nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2457236884Smm if (!zfeature_is_supported(nvpair_name(nvp))) { 2458236884Smm VERIFY(nvlist_add_string(unsup_feat, 2459236884Smm nvpair_name(nvp), "") == 0); 2460236884Smm } 2461236884Smm } 2462236884Smm 2463236884Smm if (!nvlist_empty(unsup_feat)) { 2464236884Smm VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2465236884Smm ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2466236884Smm nvlist_free(unsup_feat); 2467236884Smm return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2468236884Smm ENOTSUP)); 2469236884Smm } 2470236884Smm 2471236884Smm nvlist_free(unsup_feat); 2472236884Smm } 2473236884Smm 2474236884Smm /* 2475168404Spjd * If the vdev guid sum doesn't match the uberblock, we have an 2476219089Spjd * incomplete configuration. We first check to see if the pool 2477219089Spjd * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2478219089Spjd * If it is, defer the vdev_guid_sum check till later so we 2479219089Spjd * can handle missing vdevs. 2480168404Spjd */ 2481219089Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2482219089Spjd &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2483219089Spjd rvd->vdev_guid_sum != ub->ub_guid_sum) 2484219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2485219089Spjd 2486219089Spjd if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2487219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2488219089Spjd spa_try_repair(spa, config); 2489219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2490219089Spjd nvlist_free(spa->spa_config_splitting); 2491219089Spjd spa->spa_config_splitting = NULL; 2492168404Spjd } 2493168404Spjd 2494168404Spjd /* 2495168404Spjd * Initialize internal SPA structures. 2496168404Spjd */ 2497168404Spjd spa->spa_state = POOL_STATE_ACTIVE; 2498168404Spjd spa->spa_ubsync = spa->spa_uberblock; 2499219089Spjd spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2500219089Spjd TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2501219089Spjd spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2502219089Spjd spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2503219089Spjd spa->spa_claim_max_txg = spa->spa_first_txg; 2504219089Spjd spa->spa_prev_software_version = ub->ub_software_version; 2505219089Spjd 2506236884Smm error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2507219089Spjd if (error) 2508219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2509168404Spjd spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2510168404Spjd 2511219089Spjd if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2512219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2513168404Spjd 2514236884Smm if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2515236884Smm boolean_t missing_feat_read = B_FALSE; 2516238926Smm nvlist_t *unsup_feat, *enabled_feat; 2517236884Smm 2518236884Smm if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2519236884Smm &spa->spa_feat_for_read_obj) != 0) { 2520236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2521236884Smm } 2522236884Smm 2523236884Smm if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2524236884Smm &spa->spa_feat_for_write_obj) != 0) { 2525236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2526236884Smm } 2527236884Smm 2528236884Smm if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2529236884Smm &spa->spa_feat_desc_obj) != 0) { 2530236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2531236884Smm } 2532236884Smm 2533238926Smm enabled_feat = fnvlist_alloc(); 2534238926Smm unsup_feat = fnvlist_alloc(); 2535236884Smm 2536259813Sdelphij if (!spa_features_check(spa, B_FALSE, 2537238926Smm unsup_feat, enabled_feat)) 2538236884Smm missing_feat_read = B_TRUE; 2539236884Smm 2540236884Smm if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2541259813Sdelphij if (!spa_features_check(spa, B_TRUE, 2542238926Smm unsup_feat, enabled_feat)) { 2543236884Smm missing_feat_write = B_TRUE; 2544238926Smm } 2545236884Smm } 2546236884Smm 2547238926Smm fnvlist_add_nvlist(spa->spa_load_info, 2548238926Smm ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2549238926Smm 2550236884Smm if (!nvlist_empty(unsup_feat)) { 2551238926Smm fnvlist_add_nvlist(spa->spa_load_info, 2552238926Smm ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2553236884Smm } 2554236884Smm 2555238926Smm fnvlist_free(enabled_feat); 2556238926Smm fnvlist_free(unsup_feat); 2557236884Smm 2558236884Smm if (!missing_feat_read) { 2559236884Smm fnvlist_add_boolean(spa->spa_load_info, 2560236884Smm ZPOOL_CONFIG_CAN_RDONLY); 2561236884Smm } 2562236884Smm 2563236884Smm /* 2564236884Smm * If the state is SPA_LOAD_TRYIMPORT, our objective is 2565236884Smm * twofold: to determine whether the pool is available for 2566236884Smm * import in read-write mode and (if it is not) whether the 2567236884Smm * pool is available for import in read-only mode. If the pool 2568236884Smm * is available for import in read-write mode, it is displayed 2569236884Smm * as available in userland; if it is not available for import 2570236884Smm * in read-only mode, it is displayed as unavailable in 2571236884Smm * userland. If the pool is available for import in read-only 2572236884Smm * mode but not read-write mode, it is displayed as unavailable 2573236884Smm * in userland with a special note that the pool is actually 2574236884Smm * available for open in read-only mode. 2575236884Smm * 2576236884Smm * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2577236884Smm * missing a feature for write, we must first determine whether 2578236884Smm * the pool can be opened read-only before returning to 2579236884Smm * userland in order to know whether to display the 2580236884Smm * abovementioned note. 2581236884Smm */ 2582236884Smm if (missing_feat_read || (missing_feat_write && 2583236884Smm spa_writeable(spa))) { 2584236884Smm return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2585236884Smm ENOTSUP)); 2586236884Smm } 2587260150Sdelphij 2588260150Sdelphij /* 2589260150Sdelphij * Load refcounts for ZFS features from disk into an in-memory 2590260150Sdelphij * cache during SPA initialization. 2591260150Sdelphij */ 2592260150Sdelphij for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2593260150Sdelphij uint64_t refcount; 2594260150Sdelphij 2595260150Sdelphij error = feature_get_refcount_from_disk(spa, 2596260150Sdelphij &spa_feature_table[i], &refcount); 2597260150Sdelphij if (error == 0) { 2598260150Sdelphij spa->spa_feat_refcount_cache[i] = refcount; 2599260150Sdelphij } else if (error == ENOTSUP) { 2600260150Sdelphij spa->spa_feat_refcount_cache[i] = 2601260150Sdelphij SPA_FEATURE_DISABLED; 2602260150Sdelphij } else { 2603260150Sdelphij return (spa_vdev_err(rvd, 2604260150Sdelphij VDEV_AUX_CORRUPT_DATA, EIO)); 2605260150Sdelphij } 2606260150Sdelphij } 2607236884Smm } 2608236884Smm 2609260150Sdelphij if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 2610260150Sdelphij if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 2611268075Sdelphij &spa->spa_feat_enabled_txg_obj) != 0) 2612260150Sdelphij return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2613260150Sdelphij } 2614260150Sdelphij 2615236884Smm spa->spa_is_initializing = B_TRUE; 2616236884Smm error = dsl_pool_open(spa->spa_dsl_pool); 2617236884Smm spa->spa_is_initializing = B_FALSE; 2618236884Smm if (error != 0) 2619236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2620236884Smm 2621168404Spjd if (!mosconfig) { 2622168498Spjd uint64_t hostid; 2623219089Spjd nvlist_t *policy = NULL, *nvconfig; 2624168404Spjd 2625219089Spjd if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2626219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2627168404Spjd 2628219089Spjd if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2629185029Spjd ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2630168498Spjd char *hostname; 2631168498Spjd unsigned long myhostid = 0; 2632168498Spjd 2633219089Spjd VERIFY(nvlist_lookup_string(nvconfig, 2634168498Spjd ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2635168498Spjd 2636219089Spjd#ifdef _KERNEL 2637219089Spjd myhostid = zone_get_hostid(NULL); 2638219089Spjd#else /* _KERNEL */ 2639219089Spjd /* 2640219089Spjd * We're emulating the system's hostid in userland, so 2641219089Spjd * we can't use zone_get_hostid(). 2642219089Spjd */ 2643168498Spjd (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2644219089Spjd#endif /* _KERNEL */ 2645204073Spjd if (check_hostid && hostid != 0 && myhostid != 0 && 2646219089Spjd hostid != myhostid) { 2647219089Spjd nvlist_free(nvconfig); 2648168498Spjd cmn_err(CE_WARN, "pool '%s' could not be " 2649168498Spjd "loaded as it was last accessed by " 2650185029Spjd "another system (host: %s hostid: 0x%lx). " 2651236146Smm "See: http://illumos.org/msg/ZFS-8000-EY", 2652185029Spjd spa_name(spa), hostname, 2653168498Spjd (unsigned long)hostid); 2654249195Smm return (SET_ERROR(EBADF)); 2655168498Spjd } 2656168498Spjd } 2657219089Spjd if (nvlist_lookup_nvlist(spa->spa_config, 2658219089Spjd ZPOOL_REWIND_POLICY, &policy) == 0) 2659219089Spjd VERIFY(nvlist_add_nvlist(nvconfig, 2660219089Spjd ZPOOL_REWIND_POLICY, policy) == 0); 2661168498Spjd 2662219089Spjd spa_config_set(spa, nvconfig); 2663168404Spjd spa_unload(spa); 2664168404Spjd spa_deactivate(spa); 2665209962Smm spa_activate(spa, orig_mode); 2666168404Spjd 2667219089Spjd return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2668168404Spjd } 2669168404Spjd 2670289422Smav /* Grab the secret checksum salt from the MOS. */ 2671289422Smav error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2672289422Smav DMU_POOL_CHECKSUM_SALT, 1, 2673289422Smav sizeof (spa->spa_cksum_salt.zcs_bytes), 2674289422Smav spa->spa_cksum_salt.zcs_bytes); 2675289422Smav if (error == ENOENT) { 2676289422Smav /* Generate a new salt for subsequent use */ 2677289422Smav (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 2678289422Smav sizeof (spa->spa_cksum_salt.zcs_bytes)); 2679289422Smav } else if (error != 0) { 2680289422Smav return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2681289422Smav } 2682289422Smav 2683219089Spjd if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2684219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2685219089Spjd error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2686219089Spjd if (error != 0) 2687219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2688168404Spjd 2689168404Spjd /* 2690168404Spjd * Load the bit that tells us to use the new accounting function 2691168404Spjd * (raid-z deflation). If we have an older pool, this will not 2692168404Spjd * be present. 2693168404Spjd */ 2694219089Spjd error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2695219089Spjd if (error != 0 && error != ENOENT) 2696219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2697168404Spjd 2698219089Spjd error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2699219089Spjd &spa->spa_creation_version); 2700219089Spjd if (error != 0 && error != ENOENT) 2701219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2702219089Spjd 2703168404Spjd /* 2704168404Spjd * Load the persistent error log. If we have an older pool, this will 2705168404Spjd * not be present. 2706168404Spjd */ 2707219089Spjd error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2708219089Spjd if (error != 0 && error != ENOENT) 2709219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2710168404Spjd 2711219089Spjd error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2712219089Spjd &spa->spa_errlog_scrub); 2713219089Spjd if (error != 0 && error != ENOENT) 2714219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2715168404Spjd 2716168404Spjd /* 2717168404Spjd * Load the history object. If we have an older pool, this 2718168404Spjd * will not be present. 2719168404Spjd */ 2720219089Spjd error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2721219089Spjd if (error != 0 && error != ENOENT) 2722219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2723168404Spjd 2724168404Spjd /* 2725299441Smav * Load the per-vdev ZAP map. If we have an older pool, this will not 2726299441Smav * be present; in this case, defer its creation to a later time to 2727299441Smav * avoid dirtying the MOS this early / out of sync context. See 2728299441Smav * spa_sync_config_object. 2729299441Smav */ 2730299441Smav 2731299441Smav /* The sentinel is only available in the MOS config. */ 2732299441Smav nvlist_t *mos_config; 2733299441Smav if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) 2734299441Smav return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2735299441Smav 2736299441Smav error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 2737299441Smav &spa->spa_all_vdev_zaps); 2738299441Smav 2739321540Smav if (error == ENOENT) { 2740321540Smav VERIFY(!nvlist_exists(mos_config, 2741321540Smav ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 2742321540Smav spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 2743321540Smav ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 2744321540Smav } else if (error != 0) { 2745299441Smav return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2746321540Smav } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 2747299441Smav /* 2748299441Smav * An older version of ZFS overwrote the sentinel value, so 2749299441Smav * we have orphaned per-vdev ZAPs in the MOS. Defer their 2750299441Smav * destruction to later; see spa_sync_config_object. 2751299441Smav */ 2752299441Smav spa->spa_avz_action = AVZ_ACTION_DESTROY; 2753299441Smav /* 2754299441Smav * We're assuming that no vdevs have had their ZAPs created 2755299441Smav * before this. Better be sure of it. 2756299441Smav */ 2757299441Smav ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 2758299441Smav } 2759299441Smav nvlist_free(mos_config); 2760299441Smav 2761299441Smav /* 2762219089Spjd * If we're assembling the pool from the split-off vdevs of 2763219089Spjd * an existing pool, we don't want to attach the spares & cache 2764219089Spjd * devices. 2765219089Spjd */ 2766219089Spjd 2767219089Spjd /* 2768168404Spjd * Load any hot spares for this pool. 2769168404Spjd */ 2770219089Spjd error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2771219089Spjd if (error != 0 && error != ENOENT) 2772219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2773219089Spjd if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2774185029Spjd ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2775185029Spjd if (load_nvlist(spa, spa->spa_spares.sav_object, 2776219089Spjd &spa->spa_spares.sav_config) != 0) 2777219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2778168404Spjd 2779185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2780168404Spjd spa_load_spares(spa); 2781185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2782219089Spjd } else if (error == 0) { 2783219089Spjd spa->spa_spares.sav_sync = B_TRUE; 2784168404Spjd } 2785168404Spjd 2786185029Spjd /* 2787185029Spjd * Load any level 2 ARC devices for this pool. 2788185029Spjd */ 2789219089Spjd error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2790185029Spjd &spa->spa_l2cache.sav_object); 2791219089Spjd if (error != 0 && error != ENOENT) 2792219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2793219089Spjd if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2794185029Spjd ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2795185029Spjd if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2796219089Spjd &spa->spa_l2cache.sav_config) != 0) 2797219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2798185029Spjd 2799185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2800185029Spjd spa_load_l2cache(spa); 2801185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2802219089Spjd } else if (error == 0) { 2803219089Spjd spa->spa_l2cache.sav_sync = B_TRUE; 2804185029Spjd } 2805185029Spjd 2806219089Spjd spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2807213197Smm 2808219089Spjd error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2809219089Spjd if (error && error != ENOENT) 2810219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2811185029Spjd 2812219089Spjd if (error == 0) { 2813219089Spjd uint64_t autoreplace; 2814185029Spjd 2815219089Spjd spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2816219089Spjd spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2817219089Spjd spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2818219089Spjd spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2819219089Spjd spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2820219089Spjd spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2821219089Spjd &spa->spa_dedup_ditto); 2822185029Spjd 2823219089Spjd spa->spa_autoreplace = (autoreplace != 0); 2824168404Spjd } 2825168404Spjd 2826168404Spjd /* 2827185029Spjd * If the 'autoreplace' property is set, then post a resource notifying 2828185029Spjd * the ZFS DE that it should not issue any faults for unopenable 2829185029Spjd * devices. We also iterate over the vdevs, and post a sysevent for any 2830185029Spjd * unopenable vdevs so that the normal autoreplace handler can take 2831185029Spjd * over. 2832185029Spjd */ 2833219089Spjd if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2834185029Spjd spa_check_removed(spa->spa_root_vdev); 2835219089Spjd /* 2836219089Spjd * For the import case, this is done in spa_import(), because 2837219089Spjd * at this point we're using the spare definitions from 2838219089Spjd * the MOS config, not necessarily from the userland config. 2839219089Spjd */ 2840219089Spjd if (state != SPA_LOAD_IMPORT) { 2841219089Spjd spa_aux_check_removed(&spa->spa_spares); 2842219089Spjd spa_aux_check_removed(&spa->spa_l2cache); 2843219089Spjd } 2844219089Spjd } 2845185029Spjd 2846185029Spjd /* 2847168404Spjd * Load the vdev state for all toplevel vdevs. 2848168404Spjd */ 2849168404Spjd vdev_load(rvd); 2850168404Spjd 2851168404Spjd /* 2852168404Spjd * Propagate the leaf DTLs we just loaded all the way up the tree. 2853168404Spjd */ 2854185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2855168404Spjd vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2856185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2857168404Spjd 2858168404Spjd /* 2859219089Spjd * Load the DDTs (dedup tables). 2860168404Spjd */ 2861219089Spjd error = ddt_load(spa); 2862219089Spjd if (error != 0) 2863219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2864219089Spjd 2865219089Spjd spa_update_dspace(spa); 2866219089Spjd 2867219089Spjd /* 2868219089Spjd * Validate the config, using the MOS config to fill in any 2869219089Spjd * information which might be missing. If we fail to validate 2870219089Spjd * the config then declare the pool unfit for use. If we're 2871219089Spjd * assembling a pool from a split, the log is not transferred 2872219089Spjd * over. 2873219089Spjd */ 2874219089Spjd if (type != SPA_IMPORT_ASSEMBLE) { 2875219089Spjd nvlist_t *nvconfig; 2876219089Spjd 2877219089Spjd if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2878219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2879219089Spjd 2880219089Spjd if (!spa_config_valid(spa, nvconfig)) { 2881219089Spjd nvlist_free(nvconfig); 2882219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2883219089Spjd ENXIO)); 2884219089Spjd } 2885219089Spjd nvlist_free(nvconfig); 2886219089Spjd 2887219089Spjd /* 2888236884Smm * Now that we've validated the config, check the state of the 2889219089Spjd * root vdev. If it can't be opened, it indicates one or 2890219089Spjd * more toplevel vdevs are faulted. 2891219089Spjd */ 2892219089Spjd if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2893249195Smm return (SET_ERROR(ENXIO)); 2894219089Spjd 2895286600Smav if (spa_writeable(spa) && spa_check_logs(spa)) { 2896219089Spjd *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2897219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2898219089Spjd } 2899168404Spjd } 2900168404Spjd 2901236884Smm if (missing_feat_write) { 2902236884Smm ASSERT(state == SPA_LOAD_TRYIMPORT); 2903236884Smm 2904236884Smm /* 2905236884Smm * At this point, we know that we can open the pool in 2906236884Smm * read-only mode but not read-write mode. We now have enough 2907236884Smm * information and can return to userland. 2908236884Smm */ 2909236884Smm return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2910236884Smm } 2911236884Smm 2912219089Spjd /* 2913219089Spjd * We've successfully opened the pool, verify that we're ready 2914219089Spjd * to start pushing transactions. 2915219089Spjd */ 2916219089Spjd if (state != SPA_LOAD_TRYIMPORT) { 2917219089Spjd if (error = spa_load_verify(spa)) 2918219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2919219089Spjd error)); 2920219089Spjd } 2921219089Spjd 2922219089Spjd if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2923219089Spjd spa->spa_load_max_txg == UINT64_MAX)) { 2924168404Spjd dmu_tx_t *tx; 2925168404Spjd int need_update = B_FALSE; 2926286686Smav dsl_pool_t *dp = spa_get_dsl(spa); 2927168404Spjd 2928209962Smm ASSERT(state != SPA_LOAD_TRYIMPORT); 2929209962Smm 2930168404Spjd /* 2931168404Spjd * Claim log blocks that haven't been committed yet. 2932168404Spjd * This must all happen in a single txg. 2933219089Spjd * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2934219089Spjd * invoked from zil_claim_log_block()'s i/o done callback. 2935219089Spjd * Price of rollback is that we abandon the log. 2936168404Spjd */ 2937219089Spjd spa->spa_claiming = B_TRUE; 2938219089Spjd 2939286686Smav tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 2940286686Smav (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2941168404Spjd zil_claim, tx, DS_FIND_CHILDREN); 2942168404Spjd dmu_tx_commit(tx); 2943168404Spjd 2944219089Spjd spa->spa_claiming = B_FALSE; 2945219089Spjd 2946219089Spjd spa_set_log_state(spa, SPA_LOG_GOOD); 2947168404Spjd spa->spa_sync_on = B_TRUE; 2948168404Spjd txg_sync_start(spa->spa_dsl_pool); 2949168404Spjd 2950168404Spjd /* 2951219089Spjd * Wait for all claims to sync. We sync up to the highest 2952219089Spjd * claimed log block birth time so that claimed log blocks 2953219089Spjd * don't appear to be from the future. spa_claim_max_txg 2954219089Spjd * will have been set for us by either zil_check_log_chain() 2955219089Spjd * (invoked from spa_check_logs()) or zil_claim() above. 2956168404Spjd */ 2957219089Spjd txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2958168404Spjd 2959168404Spjd /* 2960168404Spjd * If the config cache is stale, or we have uninitialized 2961168404Spjd * metaslabs (see spa_vdev_add()), then update the config. 2962209962Smm * 2963219089Spjd * If this is a verbatim import, trust the current 2964209962Smm * in-core spa_config and update the disk labels. 2965168404Spjd */ 2966168404Spjd if (config_cache_txg != spa->spa_config_txg || 2967219089Spjd state == SPA_LOAD_IMPORT || 2968219089Spjd state == SPA_LOAD_RECOVER || 2969219089Spjd (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2970168404Spjd need_update = B_TRUE; 2971168404Spjd 2972209962Smm for (int c = 0; c < rvd->vdev_children; c++) 2973168404Spjd if (rvd->vdev_child[c]->vdev_ms_array == 0) 2974168404Spjd need_update = B_TRUE; 2975168404Spjd 2976168404Spjd /* 2977168404Spjd * Update the config cache asychronously in case we're the 2978168404Spjd * root pool, in which case the config cache isn't writable yet. 2979168404Spjd */ 2980168404Spjd if (need_update) 2981168404Spjd spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2982208683Spjd 2983208683Spjd /* 2984208683Spjd * Check all DTLs to see if anything needs resilvering. 2985208683Spjd */ 2986219089Spjd if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2987219089Spjd vdev_resilver_needed(rvd, NULL, NULL)) 2988208683Spjd spa_async_request(spa, SPA_ASYNC_RESILVER); 2989219089Spjd 2990219089Spjd /* 2991248571Smm * Log the fact that we booted up (so that we can detect if 2992248571Smm * we rebooted in the middle of an operation). 2993248571Smm */ 2994248571Smm spa_history_log_version(spa, "open"); 2995248571Smm 2996248571Smm /* 2997219089Spjd * Delete any inconsistent datasets. 2998219089Spjd */ 2999219089Spjd (void) dmu_objset_find(spa_name(spa), 3000219089Spjd dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 3001219089Spjd 3002219089Spjd /* 3003219089Spjd * Clean up any stale temporary dataset userrefs. 3004219089Spjd */ 3005219089Spjd dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 3006168404Spjd } 3007168404Spjd 3008219089Spjd return (0); 3009219089Spjd} 3010168404Spjd 3011219089Spjdstatic int 3012219089Spjdspa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 3013219089Spjd{ 3014219089Spjd int mode = spa->spa_mode; 3015219089Spjd 3016219089Spjd spa_unload(spa); 3017219089Spjd spa_deactivate(spa); 3018219089Spjd 3019268720Sdelphij spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 3020219089Spjd 3021219089Spjd spa_activate(spa, mode); 3022219089Spjd spa_async_suspend(spa); 3023219089Spjd 3024219089Spjd return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 3025168404Spjd} 3026168404Spjd 3027236884Smm/* 3028236884Smm * If spa_load() fails this function will try loading prior txg's. If 3029236884Smm * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 3030236884Smm * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 3031236884Smm * function will not rewind the pool and will return the same error as 3032236884Smm * spa_load(). 3033236884Smm */ 3034219089Spjdstatic int 3035219089Spjdspa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 3036219089Spjd uint64_t max_request, int rewind_flags) 3037219089Spjd{ 3038236884Smm nvlist_t *loadinfo = NULL; 3039219089Spjd nvlist_t *config = NULL; 3040219089Spjd int load_error, rewind_error; 3041219089Spjd uint64_t safe_rewind_txg; 3042219089Spjd uint64_t min_txg; 3043219089Spjd 3044219089Spjd if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 3045219089Spjd spa->spa_load_max_txg = spa->spa_load_txg; 3046219089Spjd spa_set_log_state(spa, SPA_LOG_CLEAR); 3047219089Spjd } else { 3048219089Spjd spa->spa_load_max_txg = max_request; 3049268720Sdelphij if (max_request != UINT64_MAX) 3050268720Sdelphij spa->spa_extreme_rewind = B_TRUE; 3051219089Spjd } 3052219089Spjd 3053219089Spjd load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 3054219089Spjd mosconfig); 3055219089Spjd if (load_error == 0) 3056219089Spjd return (0); 3057219089Spjd 3058219089Spjd if (spa->spa_root_vdev != NULL) 3059219089Spjd config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 3060219089Spjd 3061219089Spjd spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 3062219089Spjd spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 3063219089Spjd 3064219089Spjd if (rewind_flags & ZPOOL_NEVER_REWIND) { 3065219089Spjd nvlist_free(config); 3066219089Spjd return (load_error); 3067219089Spjd } 3068219089Spjd 3069236884Smm if (state == SPA_LOAD_RECOVER) { 3070236884Smm /* Price of rolling back is discarding txgs, including log */ 3071219089Spjd spa_set_log_state(spa, SPA_LOG_CLEAR); 3072236884Smm } else { 3073236884Smm /* 3074236884Smm * If we aren't rolling back save the load info from our first 3075236884Smm * import attempt so that we can restore it after attempting 3076236884Smm * to rewind. 3077236884Smm */ 3078236884Smm loadinfo = spa->spa_load_info; 3079236884Smm spa->spa_load_info = fnvlist_alloc(); 3080236884Smm } 3081219089Spjd 3082219089Spjd spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 3083219089Spjd safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 3084219089Spjd min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 3085219089Spjd TXG_INITIAL : safe_rewind_txg; 3086219089Spjd 3087219089Spjd /* 3088219089Spjd * Continue as long as we're finding errors, we're still within 3089219089Spjd * the acceptable rewind range, and we're still finding uberblocks 3090219089Spjd */ 3091219089Spjd while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 3092219089Spjd spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 3093219089Spjd if (spa->spa_load_max_txg < safe_rewind_txg) 3094219089Spjd spa->spa_extreme_rewind = B_TRUE; 3095219089Spjd rewind_error = spa_load_retry(spa, state, mosconfig); 3096219089Spjd } 3097219089Spjd 3098219089Spjd spa->spa_extreme_rewind = B_FALSE; 3099219089Spjd spa->spa_load_max_txg = UINT64_MAX; 3100219089Spjd 3101219089Spjd if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 3102219089Spjd spa_config_set(spa, config); 3103219089Spjd 3104236884Smm if (state == SPA_LOAD_RECOVER) { 3105236884Smm ASSERT3P(loadinfo, ==, NULL); 3106236884Smm return (rewind_error); 3107236884Smm } else { 3108236884Smm /* Store the rewind info as part of the initial load info */ 3109236884Smm fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 3110236884Smm spa->spa_load_info); 3111236884Smm 3112236884Smm /* Restore the initial load info */ 3113236884Smm fnvlist_free(spa->spa_load_info); 3114236884Smm spa->spa_load_info = loadinfo; 3115236884Smm 3116236884Smm return (load_error); 3117236884Smm } 3118219089Spjd} 3119219089Spjd 3120168404Spjd/* 3121168404Spjd * Pool Open/Import 3122168404Spjd * 3123168404Spjd * The import case is identical to an open except that the configuration is sent 3124168404Spjd * down from userland, instead of grabbed from the configuration cache. For the 3125168404Spjd * case of an open, the pool configuration will exist in the 3126185029Spjd * POOL_STATE_UNINITIALIZED state. 3127168404Spjd * 3128168404Spjd * The stats information (gen/count/ustats) is used to gather vdev statistics at 3129168404Spjd * the same time open the pool, without having to keep around the spa_t in some 3130168404Spjd * ambiguous state. 3131168404Spjd */ 3132168404Spjdstatic int 3133219089Spjdspa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 3134219089Spjd nvlist_t **config) 3135168404Spjd{ 3136168404Spjd spa_t *spa; 3137219089Spjd spa_load_state_t state = SPA_LOAD_OPEN; 3138168404Spjd int error; 3139168404Spjd int locked = B_FALSE; 3140219089Spjd int firstopen = B_FALSE; 3141168404Spjd 3142168404Spjd *spapp = NULL; 3143168404Spjd 3144168404Spjd /* 3145168404Spjd * As disgusting as this is, we need to support recursive calls to this 3146168404Spjd * function because dsl_dir_open() is called during spa_load(), and ends 3147168404Spjd * up calling spa_open() again. The real fix is to figure out how to 3148168404Spjd * avoid dsl_dir_open() calling this in the first place. 3149168404Spjd */ 3150168404Spjd if (mutex_owner(&spa_namespace_lock) != curthread) { 3151168404Spjd mutex_enter(&spa_namespace_lock); 3152168404Spjd locked = B_TRUE; 3153168404Spjd } 3154168404Spjd 3155168404Spjd if ((spa = spa_lookup(pool)) == NULL) { 3156168404Spjd if (locked) 3157168404Spjd mutex_exit(&spa_namespace_lock); 3158249195Smm return (SET_ERROR(ENOENT)); 3159168404Spjd } 3160219089Spjd 3161168404Spjd if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 3162219089Spjd zpool_rewind_policy_t policy; 3163168404Spjd 3164219089Spjd firstopen = B_TRUE; 3165219089Spjd 3166219089Spjd zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 3167219089Spjd &policy); 3168219089Spjd if (policy.zrp_request & ZPOOL_DO_REWIND) 3169219089Spjd state = SPA_LOAD_RECOVER; 3170219089Spjd 3171209962Smm spa_activate(spa, spa_mode_global); 3172168404Spjd 3173219089Spjd if (state != SPA_LOAD_RECOVER) 3174219089Spjd spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 3175168404Spjd 3176219089Spjd error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 3177219089Spjd policy.zrp_request); 3178219089Spjd 3179168404Spjd if (error == EBADF) { 3180168404Spjd /* 3181168404Spjd * If vdev_validate() returns failure (indicated by 3182168404Spjd * EBADF), it indicates that one of the vdevs indicates 3183168404Spjd * that the pool has been exported or destroyed. If 3184168404Spjd * this is the case, the config cache is out of sync and 3185168404Spjd * we should remove the pool from the namespace. 3186168404Spjd */ 3187168404Spjd spa_unload(spa); 3188168404Spjd spa_deactivate(spa); 3189185029Spjd spa_config_sync(spa, B_TRUE, B_TRUE); 3190168404Spjd spa_remove(spa); 3191168404Spjd if (locked) 3192168404Spjd mutex_exit(&spa_namespace_lock); 3193249195Smm return (SET_ERROR(ENOENT)); 3194168404Spjd } 3195168404Spjd 3196168404Spjd if (error) { 3197168404Spjd /* 3198168404Spjd * We can't open the pool, but we still have useful 3199168404Spjd * information: the state of each vdev after the 3200168404Spjd * attempted vdev_open(). Return this to the user. 3201168404Spjd */ 3202219089Spjd if (config != NULL && spa->spa_config) { 3203219089Spjd VERIFY(nvlist_dup(spa->spa_config, config, 3204219089Spjd KM_SLEEP) == 0); 3205219089Spjd VERIFY(nvlist_add_nvlist(*config, 3206219089Spjd ZPOOL_CONFIG_LOAD_INFO, 3207219089Spjd spa->spa_load_info) == 0); 3208219089Spjd } 3209168404Spjd spa_unload(spa); 3210168404Spjd spa_deactivate(spa); 3211219089Spjd spa->spa_last_open_failed = error; 3212168404Spjd if (locked) 3213168404Spjd mutex_exit(&spa_namespace_lock); 3214168404Spjd *spapp = NULL; 3215168404Spjd return (error); 3216168404Spjd } 3217168404Spjd } 3218168404Spjd 3219168404Spjd spa_open_ref(spa, tag); 3220185029Spjd 3221219089Spjd if (config != NULL) 3222219089Spjd *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 3223219089Spjd 3224219089Spjd /* 3225219089Spjd * If we've recovered the pool, pass back any information we 3226219089Spjd * gathered while doing the load. 3227219089Spjd */ 3228219089Spjd if (state == SPA_LOAD_RECOVER) { 3229219089Spjd VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 3230219089Spjd spa->spa_load_info) == 0); 3231219089Spjd } 3232219089Spjd 3233219089Spjd if (locked) { 3234219089Spjd spa->spa_last_open_failed = 0; 3235219089Spjd spa->spa_last_ubsync_txg = 0; 3236219089Spjd spa->spa_load_txg = 0; 3237168404Spjd mutex_exit(&spa_namespace_lock); 3238219089Spjd#ifdef __FreeBSD__ 3239219089Spjd#ifdef _KERNEL 3240219089Spjd if (firstopen) 3241249047Savg zvol_create_minors(spa->spa_name); 3242219089Spjd#endif 3243219089Spjd#endif 3244219089Spjd } 3245168404Spjd 3246168404Spjd *spapp = spa; 3247168404Spjd 3248168404Spjd return (0); 3249168404Spjd} 3250168404Spjd 3251168404Spjdint 3252219089Spjdspa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 3253219089Spjd nvlist_t **config) 3254219089Spjd{ 3255219089Spjd return (spa_open_common(name, spapp, tag, policy, config)); 3256219089Spjd} 3257219089Spjd 3258219089Spjdint 3259168404Spjdspa_open(const char *name, spa_t **spapp, void *tag) 3260168404Spjd{ 3261219089Spjd return (spa_open_common(name, spapp, tag, NULL, NULL)); 3262168404Spjd} 3263168404Spjd 3264168404Spjd/* 3265168404Spjd * Lookup the given spa_t, incrementing the inject count in the process, 3266168404Spjd * preventing it from being exported or destroyed. 3267168404Spjd */ 3268168404Spjdspa_t * 3269168404Spjdspa_inject_addref(char *name) 3270168404Spjd{ 3271168404Spjd spa_t *spa; 3272168404Spjd 3273168404Spjd mutex_enter(&spa_namespace_lock); 3274168404Spjd if ((spa = spa_lookup(name)) == NULL) { 3275168404Spjd mutex_exit(&spa_namespace_lock); 3276168404Spjd return (NULL); 3277168404Spjd } 3278168404Spjd spa->spa_inject_ref++; 3279168404Spjd mutex_exit(&spa_namespace_lock); 3280168404Spjd 3281168404Spjd return (spa); 3282168404Spjd} 3283168404Spjd 3284168404Spjdvoid 3285168404Spjdspa_inject_delref(spa_t *spa) 3286168404Spjd{ 3287168404Spjd mutex_enter(&spa_namespace_lock); 3288168404Spjd spa->spa_inject_ref--; 3289168404Spjd mutex_exit(&spa_namespace_lock); 3290168404Spjd} 3291168404Spjd 3292185029Spjd/* 3293185029Spjd * Add spares device information to the nvlist. 3294185029Spjd */ 3295168404Spjdstatic void 3296168404Spjdspa_add_spares(spa_t *spa, nvlist_t *config) 3297168404Spjd{ 3298168404Spjd nvlist_t **spares; 3299168404Spjd uint_t i, nspares; 3300168404Spjd nvlist_t *nvroot; 3301168404Spjd uint64_t guid; 3302168404Spjd vdev_stat_t *vs; 3303168404Spjd uint_t vsc; 3304168404Spjd uint64_t pool; 3305168404Spjd 3306209962Smm ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3307209962Smm 3308185029Spjd if (spa->spa_spares.sav_count == 0) 3309168404Spjd return; 3310168404Spjd 3311168404Spjd VERIFY(nvlist_lookup_nvlist(config, 3312168404Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3313185029Spjd VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3314168404Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3315168404Spjd if (nspares != 0) { 3316168404Spjd VERIFY(nvlist_add_nvlist_array(nvroot, 3317168404Spjd ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3318168404Spjd VERIFY(nvlist_lookup_nvlist_array(nvroot, 3319168404Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3320168404Spjd 3321168404Spjd /* 3322168404Spjd * Go through and find any spares which have since been 3323168404Spjd * repurposed as an active spare. If this is the case, update 3324168404Spjd * their status appropriately. 3325168404Spjd */ 3326168404Spjd for (i = 0; i < nspares; i++) { 3327168404Spjd VERIFY(nvlist_lookup_uint64(spares[i], 3328168404Spjd ZPOOL_CONFIG_GUID, &guid) == 0); 3329185029Spjd if (spa_spare_exists(guid, &pool, NULL) && 3330185029Spjd pool != 0ULL) { 3331168404Spjd VERIFY(nvlist_lookup_uint64_array( 3332219089Spjd spares[i], ZPOOL_CONFIG_VDEV_STATS, 3333168404Spjd (uint64_t **)&vs, &vsc) == 0); 3334168404Spjd vs->vs_state = VDEV_STATE_CANT_OPEN; 3335168404Spjd vs->vs_aux = VDEV_AUX_SPARED; 3336168404Spjd } 3337168404Spjd } 3338168404Spjd } 3339168404Spjd} 3340168404Spjd 3341185029Spjd/* 3342185029Spjd * Add l2cache device information to the nvlist, including vdev stats. 3343185029Spjd */ 3344185029Spjdstatic void 3345185029Spjdspa_add_l2cache(spa_t *spa, nvlist_t *config) 3346185029Spjd{ 3347185029Spjd nvlist_t **l2cache; 3348185029Spjd uint_t i, j, nl2cache; 3349185029Spjd nvlist_t *nvroot; 3350185029Spjd uint64_t guid; 3351185029Spjd vdev_t *vd; 3352185029Spjd vdev_stat_t *vs; 3353185029Spjd uint_t vsc; 3354185029Spjd 3355209962Smm ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3356209962Smm 3357185029Spjd if (spa->spa_l2cache.sav_count == 0) 3358185029Spjd return; 3359185029Spjd 3360185029Spjd VERIFY(nvlist_lookup_nvlist(config, 3361185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3362185029Spjd VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3363185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3364185029Spjd if (nl2cache != 0) { 3365185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, 3366185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3367185029Spjd VERIFY(nvlist_lookup_nvlist_array(nvroot, 3368185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3369185029Spjd 3370185029Spjd /* 3371185029Spjd * Update level 2 cache device stats. 3372185029Spjd */ 3373185029Spjd 3374185029Spjd for (i = 0; i < nl2cache; i++) { 3375185029Spjd VERIFY(nvlist_lookup_uint64(l2cache[i], 3376185029Spjd ZPOOL_CONFIG_GUID, &guid) == 0); 3377185029Spjd 3378185029Spjd vd = NULL; 3379185029Spjd for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3380185029Spjd if (guid == 3381185029Spjd spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3382185029Spjd vd = spa->spa_l2cache.sav_vdevs[j]; 3383185029Spjd break; 3384185029Spjd } 3385185029Spjd } 3386185029Spjd ASSERT(vd != NULL); 3387185029Spjd 3388185029Spjd VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3389219089Spjd ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3390219089Spjd == 0); 3391185029Spjd vdev_get_stats(vd, vs); 3392185029Spjd } 3393185029Spjd } 3394185029Spjd} 3395185029Spjd 3396236884Smmstatic void 3397236884Smmspa_add_feature_stats(spa_t *spa, nvlist_t *config) 3398236884Smm{ 3399236884Smm nvlist_t *features; 3400236884Smm zap_cursor_t zc; 3401236884Smm zap_attribute_t za; 3402236884Smm 3403236884Smm ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3404236884Smm VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3405236884Smm 3406253993Smav /* We may be unable to read features if pool is suspended. */ 3407253993Smav if (spa_suspended(spa)) 3408253993Smav goto out; 3409253993Smav 3410236884Smm if (spa->spa_feat_for_read_obj != 0) { 3411236884Smm for (zap_cursor_init(&zc, spa->spa_meta_objset, 3412236884Smm spa->spa_feat_for_read_obj); 3413236884Smm zap_cursor_retrieve(&zc, &za) == 0; 3414236884Smm zap_cursor_advance(&zc)) { 3415236884Smm ASSERT(za.za_integer_length == sizeof (uint64_t) && 3416236884Smm za.za_num_integers == 1); 3417236884Smm VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3418236884Smm za.za_first_integer)); 3419236884Smm } 3420236884Smm zap_cursor_fini(&zc); 3421236884Smm } 3422236884Smm 3423236884Smm if (spa->spa_feat_for_write_obj != 0) { 3424236884Smm for (zap_cursor_init(&zc, spa->spa_meta_objset, 3425236884Smm spa->spa_feat_for_write_obj); 3426236884Smm zap_cursor_retrieve(&zc, &za) == 0; 3427236884Smm zap_cursor_advance(&zc)) { 3428236884Smm ASSERT(za.za_integer_length == sizeof (uint64_t) && 3429236884Smm za.za_num_integers == 1); 3430236884Smm VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3431236884Smm za.za_first_integer)); 3432236884Smm } 3433236884Smm zap_cursor_fini(&zc); 3434236884Smm } 3435236884Smm 3436253993Smavout: 3437236884Smm VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3438236884Smm features) == 0); 3439236884Smm nvlist_free(features); 3440236884Smm} 3441236884Smm 3442168404Spjdint 3443236884Smmspa_get_stats(const char *name, nvlist_t **config, 3444236884Smm char *altroot, size_t buflen) 3445168404Spjd{ 3446168404Spjd int error; 3447168404Spjd spa_t *spa; 3448168404Spjd 3449168404Spjd *config = NULL; 3450219089Spjd error = spa_open_common(name, &spa, FTAG, NULL, config); 3451168404Spjd 3452209962Smm if (spa != NULL) { 3453209962Smm /* 3454209962Smm * This still leaves a window of inconsistency where the spares 3455209962Smm * or l2cache devices could change and the config would be 3456209962Smm * self-inconsistent. 3457209962Smm */ 3458209962Smm spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3459168404Spjd 3460209962Smm if (*config != NULL) { 3461219089Spjd uint64_t loadtimes[2]; 3462219089Spjd 3463219089Spjd loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3464219089Spjd loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3465219089Spjd VERIFY(nvlist_add_uint64_array(*config, 3466219089Spjd ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3467219089Spjd 3468185029Spjd VERIFY(nvlist_add_uint64(*config, 3469209962Smm ZPOOL_CONFIG_ERRCOUNT, 3470209962Smm spa_get_errlog_size(spa)) == 0); 3471185029Spjd 3472209962Smm if (spa_suspended(spa)) 3473209962Smm VERIFY(nvlist_add_uint64(*config, 3474209962Smm ZPOOL_CONFIG_SUSPENDED, 3475209962Smm spa->spa_failmode) == 0); 3476209962Smm 3477209962Smm spa_add_spares(spa, *config); 3478209962Smm spa_add_l2cache(spa, *config); 3479236884Smm spa_add_feature_stats(spa, *config); 3480209962Smm } 3481168404Spjd } 3482168404Spjd 3483168404Spjd /* 3484168404Spjd * We want to get the alternate root even for faulted pools, so we cheat 3485168404Spjd * and call spa_lookup() directly. 3486168404Spjd */ 3487168404Spjd if (altroot) { 3488168404Spjd if (spa == NULL) { 3489168404Spjd mutex_enter(&spa_namespace_lock); 3490168404Spjd spa = spa_lookup(name); 3491168404Spjd if (spa) 3492168404Spjd spa_altroot(spa, altroot, buflen); 3493168404Spjd else 3494168404Spjd altroot[0] = '\0'; 3495168404Spjd spa = NULL; 3496168404Spjd mutex_exit(&spa_namespace_lock); 3497168404Spjd } else { 3498168404Spjd spa_altroot(spa, altroot, buflen); 3499168404Spjd } 3500168404Spjd } 3501168404Spjd 3502209962Smm if (spa != NULL) { 3503209962Smm spa_config_exit(spa, SCL_CONFIG, FTAG); 3504168404Spjd spa_close(spa, FTAG); 3505209962Smm } 3506168404Spjd 3507168404Spjd return (error); 3508168404Spjd} 3509168404Spjd 3510168404Spjd/* 3511185029Spjd * Validate that the auxiliary device array is well formed. We must have an 3512185029Spjd * array of nvlists, each which describes a valid leaf vdev. If this is an 3513185029Spjd * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3514185029Spjd * specified, as long as they are well-formed. 3515168404Spjd */ 3516168404Spjdstatic int 3517185029Spjdspa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3518185029Spjd spa_aux_vdev_t *sav, const char *config, uint64_t version, 3519185029Spjd vdev_labeltype_t label) 3520168404Spjd{ 3521185029Spjd nvlist_t **dev; 3522185029Spjd uint_t i, ndev; 3523168404Spjd vdev_t *vd; 3524168404Spjd int error; 3525168404Spjd 3526185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3527185029Spjd 3528168404Spjd /* 3529185029Spjd * It's acceptable to have no devs specified. 3530168404Spjd */ 3531185029Spjd if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3532168404Spjd return (0); 3533168404Spjd 3534185029Spjd if (ndev == 0) 3535249195Smm return (SET_ERROR(EINVAL)); 3536168404Spjd 3537168404Spjd /* 3538185029Spjd * Make sure the pool is formatted with a version that supports this 3539185029Spjd * device type. 3540168404Spjd */ 3541185029Spjd if (spa_version(spa) < version) 3542249195Smm return (SET_ERROR(ENOTSUP)); 3543168404Spjd 3544168404Spjd /* 3545185029Spjd * Set the pending device list so we correctly handle device in-use 3546168404Spjd * checking. 3547168404Spjd */ 3548185029Spjd sav->sav_pending = dev; 3549185029Spjd sav->sav_npending = ndev; 3550168404Spjd 3551185029Spjd for (i = 0; i < ndev; i++) { 3552185029Spjd if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3553168404Spjd mode)) != 0) 3554168404Spjd goto out; 3555168404Spjd 3556168404Spjd if (!vd->vdev_ops->vdev_op_leaf) { 3557168404Spjd vdev_free(vd); 3558249195Smm error = SET_ERROR(EINVAL); 3559168404Spjd goto out; 3560168404Spjd } 3561168404Spjd 3562185029Spjd /* 3563185029Spjd * The L2ARC currently only supports disk devices in 3564185029Spjd * kernel context. For user-level testing, we allow it. 3565185029Spjd */ 3566185029Spjd#ifdef _KERNEL 3567185029Spjd if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3568185029Spjd strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3569249195Smm error = SET_ERROR(ENOTBLK); 3570230514Smm vdev_free(vd); 3571185029Spjd goto out; 3572185029Spjd } 3573185029Spjd#endif 3574168404Spjd vd->vdev_top = vd; 3575168404Spjd 3576168404Spjd if ((error = vdev_open(vd)) == 0 && 3577185029Spjd (error = vdev_label_init(vd, crtxg, label)) == 0) { 3578185029Spjd VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3579168404Spjd vd->vdev_guid) == 0); 3580168404Spjd } 3581168404Spjd 3582168404Spjd vdev_free(vd); 3583168404Spjd 3584185029Spjd if (error && 3585185029Spjd (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3586168404Spjd goto out; 3587168404Spjd else 3588168404Spjd error = 0; 3589168404Spjd } 3590168404Spjd 3591168404Spjdout: 3592185029Spjd sav->sav_pending = NULL; 3593185029Spjd sav->sav_npending = 0; 3594168404Spjd return (error); 3595168404Spjd} 3596168404Spjd 3597185029Spjdstatic int 3598185029Spjdspa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3599185029Spjd{ 3600185029Spjd int error; 3601185029Spjd 3602185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3603185029Spjd 3604185029Spjd if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3605185029Spjd &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3606185029Spjd VDEV_LABEL_SPARE)) != 0) { 3607185029Spjd return (error); 3608185029Spjd } 3609185029Spjd 3610185029Spjd return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3611185029Spjd &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3612185029Spjd VDEV_LABEL_L2CACHE)); 3613185029Spjd} 3614185029Spjd 3615185029Spjdstatic void 3616185029Spjdspa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3617185029Spjd const char *config) 3618185029Spjd{ 3619185029Spjd int i; 3620185029Spjd 3621185029Spjd if (sav->sav_config != NULL) { 3622185029Spjd nvlist_t **olddevs; 3623185029Spjd uint_t oldndevs; 3624185029Spjd nvlist_t **newdevs; 3625185029Spjd 3626185029Spjd /* 3627185029Spjd * Generate new dev list by concatentating with the 3628185029Spjd * current dev list. 3629185029Spjd */ 3630185029Spjd VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3631185029Spjd &olddevs, &oldndevs) == 0); 3632185029Spjd 3633185029Spjd newdevs = kmem_alloc(sizeof (void *) * 3634185029Spjd (ndevs + oldndevs), KM_SLEEP); 3635185029Spjd for (i = 0; i < oldndevs; i++) 3636185029Spjd VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3637185029Spjd KM_SLEEP) == 0); 3638185029Spjd for (i = 0; i < ndevs; i++) 3639185029Spjd VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3640185029Spjd KM_SLEEP) == 0); 3641185029Spjd 3642185029Spjd VERIFY(nvlist_remove(sav->sav_config, config, 3643185029Spjd DATA_TYPE_NVLIST_ARRAY) == 0); 3644185029Spjd 3645185029Spjd VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3646185029Spjd config, newdevs, ndevs + oldndevs) == 0); 3647185029Spjd for (i = 0; i < oldndevs + ndevs; i++) 3648185029Spjd nvlist_free(newdevs[i]); 3649185029Spjd kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3650185029Spjd } else { 3651185029Spjd /* 3652185029Spjd * Generate a new dev list. 3653185029Spjd */ 3654185029Spjd VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3655185029Spjd KM_SLEEP) == 0); 3656185029Spjd VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3657185029Spjd devs, ndevs) == 0); 3658185029Spjd } 3659185029Spjd} 3660185029Spjd 3661168404Spjd/* 3662185029Spjd * Stop and drop level 2 ARC devices 3663185029Spjd */ 3664185029Spjdvoid 3665185029Spjdspa_l2cache_drop(spa_t *spa) 3666185029Spjd{ 3667185029Spjd vdev_t *vd; 3668185029Spjd int i; 3669185029Spjd spa_aux_vdev_t *sav = &spa->spa_l2cache; 3670185029Spjd 3671185029Spjd for (i = 0; i < sav->sav_count; i++) { 3672185029Spjd uint64_t pool; 3673185029Spjd 3674185029Spjd vd = sav->sav_vdevs[i]; 3675185029Spjd ASSERT(vd != NULL); 3676185029Spjd 3677209962Smm if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3678209962Smm pool != 0ULL && l2arc_vdev_present(vd)) 3679185029Spjd l2arc_remove_vdev(vd); 3680185029Spjd } 3681185029Spjd} 3682185029Spjd 3683185029Spjd/* 3684168404Spjd * Pool Creation 3685168404Spjd */ 3686168404Spjdint 3687185029Spjdspa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3688248571Smm nvlist_t *zplprops) 3689168404Spjd{ 3690168404Spjd spa_t *spa; 3691185029Spjd char *altroot = NULL; 3692168404Spjd vdev_t *rvd; 3693168404Spjd dsl_pool_t *dp; 3694168404Spjd dmu_tx_t *tx; 3695219089Spjd int error = 0; 3696168404Spjd uint64_t txg = TXG_INITIAL; 3697185029Spjd nvlist_t **spares, **l2cache; 3698185029Spjd uint_t nspares, nl2cache; 3699219089Spjd uint64_t version, obj; 3700236884Smm boolean_t has_features; 3701168404Spjd 3702168404Spjd /* 3703168404Spjd * If this pool already exists, return failure. 3704168404Spjd */ 3705168404Spjd mutex_enter(&spa_namespace_lock); 3706168404Spjd if (spa_lookup(pool) != NULL) { 3707168404Spjd mutex_exit(&spa_namespace_lock); 3708249195Smm return (SET_ERROR(EEXIST)); 3709168404Spjd } 3710168404Spjd 3711168404Spjd /* 3712168404Spjd * Allocate a new spa_t structure. 3713168404Spjd */ 3714185029Spjd (void) nvlist_lookup_string(props, 3715185029Spjd zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3716219089Spjd spa = spa_add(pool, NULL, altroot); 3717209962Smm spa_activate(spa, spa_mode_global); 3718168404Spjd 3719185029Spjd if (props && (error = spa_prop_validate(spa, props))) { 3720185029Spjd spa_deactivate(spa); 3721185029Spjd spa_remove(spa); 3722185029Spjd mutex_exit(&spa_namespace_lock); 3723185029Spjd return (error); 3724185029Spjd } 3725185029Spjd 3726236884Smm has_features = B_FALSE; 3727236884Smm for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3728236884Smm elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3729236884Smm if (zpool_prop_feature(nvpair_name(elem))) 3730236884Smm has_features = B_TRUE; 3731236884Smm } 3732236884Smm 3733236884Smm if (has_features || nvlist_lookup_uint64(props, 3734236884Smm zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3735185029Spjd version = SPA_VERSION; 3736236884Smm } 3737236884Smm ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3738219089Spjd 3739219089Spjd spa->spa_first_txg = txg; 3740219089Spjd spa->spa_uberblock.ub_txg = txg - 1; 3741185029Spjd spa->spa_uberblock.ub_version = version; 3742168404Spjd spa->spa_ubsync = spa->spa_uberblock; 3743307277Smav spa->spa_load_state = SPA_LOAD_CREATE; 3744168404Spjd 3745168404Spjd /* 3746209962Smm * Create "The Godfather" zio to hold all async IOs 3747209962Smm */ 3748272598Sdelphij spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3749272598Sdelphij KM_SLEEP); 3750272598Sdelphij for (int i = 0; i < max_ncpus; i++) { 3751272598Sdelphij spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3752272598Sdelphij ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3753272598Sdelphij ZIO_FLAG_GODFATHER); 3754272598Sdelphij } 3755209962Smm 3756209962Smm /* 3757168404Spjd * Create the root vdev. 3758168404Spjd */ 3759185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3760168404Spjd 3761168404Spjd error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3762168404Spjd 3763168404Spjd ASSERT(error != 0 || rvd != NULL); 3764168404Spjd ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3765168404Spjd 3766185029Spjd if (error == 0 && !zfs_allocatable_devs(nvroot)) 3767249195Smm error = SET_ERROR(EINVAL); 3768168404Spjd 3769168404Spjd if (error == 0 && 3770168404Spjd (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3771185029Spjd (error = spa_validate_aux(spa, nvroot, txg, 3772168404Spjd VDEV_ALLOC_ADD)) == 0) { 3773219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 3774254591Sgibbs vdev_ashift_optimize(rvd->vdev_child[c]); 3775219089Spjd vdev_metaslab_set_size(rvd->vdev_child[c]); 3776219089Spjd vdev_expand(rvd->vdev_child[c], txg); 3777219089Spjd } 3778168404Spjd } 3779168404Spjd 3780185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3781168404Spjd 3782168404Spjd if (error != 0) { 3783168404Spjd spa_unload(spa); 3784168404Spjd spa_deactivate(spa); 3785168404Spjd spa_remove(spa); 3786168404Spjd mutex_exit(&spa_namespace_lock); 3787168404Spjd return (error); 3788168404Spjd } 3789168404Spjd 3790168404Spjd /* 3791168404Spjd * Get the list of spares, if specified. 3792168404Spjd */ 3793168404Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3794168404Spjd &spares, &nspares) == 0) { 3795185029Spjd VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3796168404Spjd KM_SLEEP) == 0); 3797185029Spjd VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3798168404Spjd ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3799185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3800168404Spjd spa_load_spares(spa); 3801185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3802185029Spjd spa->spa_spares.sav_sync = B_TRUE; 3803168404Spjd } 3804168404Spjd 3805185029Spjd /* 3806185029Spjd * Get the list of level 2 cache devices, if specified. 3807185029Spjd */ 3808185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3809185029Spjd &l2cache, &nl2cache) == 0) { 3810185029Spjd VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3811185029Spjd NV_UNIQUE_NAME, KM_SLEEP) == 0); 3812185029Spjd VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3813185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3814185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3815185029Spjd spa_load_l2cache(spa); 3816185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3817185029Spjd spa->spa_l2cache.sav_sync = B_TRUE; 3818185029Spjd } 3819185029Spjd 3820236884Smm spa->spa_is_initializing = B_TRUE; 3821185029Spjd spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3822168404Spjd spa->spa_meta_objset = dp->dp_meta_objset; 3823236884Smm spa->spa_is_initializing = B_FALSE; 3824168404Spjd 3825219089Spjd /* 3826219089Spjd * Create DDTs (dedup tables). 3827219089Spjd */ 3828219089Spjd ddt_create(spa); 3829219089Spjd 3830219089Spjd spa_update_dspace(spa); 3831219089Spjd 3832168404Spjd tx = dmu_tx_create_assigned(dp, txg); 3833168404Spjd 3834168404Spjd /* 3835168404Spjd * Create the pool config object. 3836168404Spjd */ 3837168404Spjd spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3838185029Spjd DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3839168404Spjd DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3840168404Spjd 3841168404Spjd if (zap_add(spa->spa_meta_objset, 3842168404Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3843168404Spjd sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3844168404Spjd cmn_err(CE_PANIC, "failed to add pool config"); 3845168404Spjd } 3846168404Spjd 3847236884Smm if (spa_version(spa) >= SPA_VERSION_FEATURES) 3848236884Smm spa_feature_create_zap_objects(spa, tx); 3849236884Smm 3850219089Spjd if (zap_add(spa->spa_meta_objset, 3851219089Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3852219089Spjd sizeof (uint64_t), 1, &version, tx) != 0) { 3853219089Spjd cmn_err(CE_PANIC, "failed to add pool version"); 3854219089Spjd } 3855219089Spjd 3856185029Spjd /* Newly created pools with the right version are always deflated. */ 3857185029Spjd if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3858185029Spjd spa->spa_deflate = TRUE; 3859185029Spjd if (zap_add(spa->spa_meta_objset, 3860185029Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3861185029Spjd sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3862185029Spjd cmn_err(CE_PANIC, "failed to add deflate"); 3863185029Spjd } 3864168404Spjd } 3865168404Spjd 3866168404Spjd /* 3867219089Spjd * Create the deferred-free bpobj. Turn off compression 3868168404Spjd * because sync-to-convergence takes longer if the blocksize 3869168404Spjd * keeps changing. 3870168404Spjd */ 3871219089Spjd obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3872219089Spjd dmu_object_set_compress(spa->spa_meta_objset, obj, 3873168404Spjd ZIO_COMPRESS_OFF, tx); 3874168404Spjd if (zap_add(spa->spa_meta_objset, 3875219089Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3876219089Spjd sizeof (uint64_t), 1, &obj, tx) != 0) { 3877219089Spjd cmn_err(CE_PANIC, "failed to add bpobj"); 3878168404Spjd } 3879219089Spjd VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3880219089Spjd spa->spa_meta_objset, obj)); 3881168404Spjd 3882168404Spjd /* 3883168404Spjd * Create the pool's history object. 3884168404Spjd */ 3885185029Spjd if (version >= SPA_VERSION_ZPOOL_HISTORY) 3886185029Spjd spa_history_create_obj(spa, tx); 3887168404Spjd 3888185029Spjd /* 3889289422Smav * Generate some random noise for salted checksums to operate on. 3890289422Smav */ 3891289422Smav (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 3892289422Smav sizeof (spa->spa_cksum_salt.zcs_bytes)); 3893289422Smav 3894289422Smav /* 3895185029Spjd * Set pool properties. 3896185029Spjd */ 3897185029Spjd spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3898185029Spjd spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3899185029Spjd spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3900219089Spjd spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3901219089Spjd 3902209962Smm if (props != NULL) { 3903209962Smm spa_configfile_set(spa, props, B_FALSE); 3904248571Smm spa_sync_props(props, tx); 3905209962Smm } 3906185029Spjd 3907168404Spjd dmu_tx_commit(tx); 3908168404Spjd 3909168404Spjd spa->spa_sync_on = B_TRUE; 3910168404Spjd txg_sync_start(spa->spa_dsl_pool); 3911168404Spjd 3912168404Spjd /* 3913168404Spjd * We explicitly wait for the first transaction to complete so that our 3914168404Spjd * bean counters are appropriately updated. 3915168404Spjd */ 3916168404Spjd txg_wait_synced(spa->spa_dsl_pool, txg); 3917168404Spjd 3918185029Spjd spa_config_sync(spa, B_FALSE, B_TRUE); 3919287745Sdelphij spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE); 3920168404Spjd 3921248571Smm spa_history_log_version(spa, "create"); 3922185029Spjd 3923286575Smav /* 3924286575Smav * Don't count references from objsets that are already closed 3925286575Smav * and are making their way through the eviction process. 3926286575Smav */ 3927286575Smav spa_evicting_os_wait(spa); 3928208442Smm spa->spa_minref = refcount_count(&spa->spa_refcount); 3929307277Smav spa->spa_load_state = SPA_LOAD_NONE; 3930208442Smm 3931168404Spjd mutex_exit(&spa_namespace_lock); 3932168404Spjd 3933168404Spjd return (0); 3934168404Spjd} 3935168404Spjd 3936241286Savg#ifdef _KERNEL 3937277300Ssmh#ifdef illumos 3938185029Spjd/* 3939219089Spjd * Get the root pool information from the root disk, then import the root pool 3940219089Spjd * during the system boot up time. 3941185029Spjd */ 3942219089Spjdextern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3943219089Spjd 3944219089Spjdstatic nvlist_t * 3945219089Spjdspa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3946185029Spjd{ 3947219089Spjd nvlist_t *config; 3948185029Spjd nvlist_t *nvtop, *nvroot; 3949185029Spjd uint64_t pgid; 3950185029Spjd 3951219089Spjd if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3952219089Spjd return (NULL); 3953219089Spjd 3954168404Spjd /* 3955185029Spjd * Add this top-level vdev to the child array. 3956168404Spjd */ 3957219089Spjd VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3958219089Spjd &nvtop) == 0); 3959219089Spjd VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3960219089Spjd &pgid) == 0); 3961219089Spjd VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3962168404Spjd 3963185029Spjd /* 3964185029Spjd * Put this pool's top-level vdevs into a root vdev. 3965185029Spjd */ 3966185029Spjd VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3967219089Spjd VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3968219089Spjd VDEV_TYPE_ROOT) == 0); 3969185029Spjd VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3970185029Spjd VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3971185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3972185029Spjd &nvtop, 1) == 0); 3973168404Spjd 3974168404Spjd /* 3975185029Spjd * Replace the existing vdev_tree with the new root vdev in 3976185029Spjd * this pool's configuration (remove the old, add the new). 3977168404Spjd */ 3978185029Spjd VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3979185029Spjd nvlist_free(nvroot); 3980219089Spjd return (config); 3981185029Spjd} 3982168404Spjd 3983185029Spjd/* 3984219089Spjd * Walk the vdev tree and see if we can find a device with "better" 3985219089Spjd * configuration. A configuration is "better" if the label on that 3986219089Spjd * device has a more recent txg. 3987185029Spjd */ 3988219089Spjdstatic void 3989219089Spjdspa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3990185029Spjd{ 3991219089Spjd for (int c = 0; c < vd->vdev_children; c++) 3992219089Spjd spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3993185029Spjd 3994219089Spjd if (vd->vdev_ops->vdev_op_leaf) { 3995219089Spjd nvlist_t *label; 3996219089Spjd uint64_t label_txg; 3997185029Spjd 3998219089Spjd if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3999219089Spjd &label) != 0) 4000219089Spjd return; 4001185029Spjd 4002219089Spjd VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 4003219089Spjd &label_txg) == 0); 4004168404Spjd 4005219089Spjd /* 4006219089Spjd * Do we have a better boot device? 4007219089Spjd */ 4008219089Spjd if (label_txg > *txg) { 4009219089Spjd *txg = label_txg; 4010219089Spjd *avd = vd; 4011185029Spjd } 4012219089Spjd nvlist_free(label); 4013185029Spjd } 4014185029Spjd} 4015185029Spjd 4016185029Spjd/* 4017185029Spjd * Import a root pool. 4018185029Spjd * 4019185029Spjd * For x86. devpath_list will consist of devid and/or physpath name of 4020185029Spjd * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 4021185029Spjd * The GRUB "findroot" command will return the vdev we should boot. 4022185029Spjd * 4023185029Spjd * For Sparc, devpath_list consists the physpath name of the booting device 4024185029Spjd * no matter the rootpool is a single device pool or a mirrored pool. 4025185029Spjd * e.g. 4026185029Spjd * "/pci@1f,0/ide@d/disk@0,0:a" 4027185029Spjd */ 4028185029Spjdint 4029185029Spjdspa_import_rootpool(char *devpath, char *devid) 4030185029Spjd{ 4031219089Spjd spa_t *spa; 4032219089Spjd vdev_t *rvd, *bvd, *avd = NULL; 4033219089Spjd nvlist_t *config, *nvtop; 4034219089Spjd uint64_t guid, txg; 4035185029Spjd char *pname; 4036185029Spjd int error; 4037185029Spjd 4038185029Spjd /* 4039219089Spjd * Read the label from the boot device and generate a configuration. 4040185029Spjd */ 4041219089Spjd config = spa_generate_rootconf(devpath, devid, &guid); 4042219089Spjd#if defined(_OBP) && defined(_KERNEL) 4043219089Spjd if (config == NULL) { 4044219089Spjd if (strstr(devpath, "/iscsi/ssd") != NULL) { 4045219089Spjd /* iscsi boot */ 4046219089Spjd get_iscsi_bootpath_phy(devpath); 4047219089Spjd config = spa_generate_rootconf(devpath, devid, &guid); 4048219089Spjd } 4049219089Spjd } 4050219089Spjd#endif 4051219089Spjd if (config == NULL) { 4052236884Smm cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 4053219089Spjd devpath); 4054249195Smm return (SET_ERROR(EIO)); 4055219089Spjd } 4056185029Spjd 4057219089Spjd VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 4058219089Spjd &pname) == 0); 4059219089Spjd VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 4060185029Spjd 4061209962Smm mutex_enter(&spa_namespace_lock); 4062209962Smm if ((spa = spa_lookup(pname)) != NULL) { 4063209962Smm /* 4064209962Smm * Remove the existing root pool from the namespace so that we 4065209962Smm * can replace it with the correct config we just read in. 4066209962Smm */ 4067209962Smm spa_remove(spa); 4068209962Smm } 4069185029Spjd 4070219089Spjd spa = spa_add(pname, config, NULL); 4071209962Smm spa->spa_is_root = B_TRUE; 4072219089Spjd spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 4073209962Smm 4074219089Spjd /* 4075219089Spjd * Build up a vdev tree based on the boot device's label config. 4076219089Spjd */ 4077219089Spjd VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4078219089Spjd &nvtop) == 0); 4079219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4080219089Spjd error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 4081219089Spjd VDEV_ALLOC_ROOTPOOL); 4082219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 4083219089Spjd if (error) { 4084209962Smm mutex_exit(&spa_namespace_lock); 4085219089Spjd nvlist_free(config); 4086219089Spjd cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 4087219089Spjd pname); 4088219089Spjd return (error); 4089209962Smm } 4090209962Smm 4091219089Spjd /* 4092219089Spjd * Get the boot vdev. 4093219089Spjd */ 4094219089Spjd if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 4095219089Spjd cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 4096219089Spjd (u_longlong_t)guid); 4097249195Smm error = SET_ERROR(ENOENT); 4098219089Spjd goto out; 4099219089Spjd } 4100209962Smm 4101219089Spjd /* 4102219089Spjd * Determine if there is a better boot device. 4103219089Spjd */ 4104219089Spjd avd = bvd; 4105219089Spjd spa_alt_rootvdev(rvd, &avd, &txg); 4106219089Spjd if (avd != bvd) { 4107219089Spjd cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 4108219089Spjd "try booting from '%s'", avd->vdev_path); 4109249195Smm error = SET_ERROR(EINVAL); 4110219089Spjd goto out; 4111219089Spjd } 4112209962Smm 4113219089Spjd /* 4114219089Spjd * If the boot device is part of a spare vdev then ensure that 4115219089Spjd * we're booting off the active spare. 4116219089Spjd */ 4117219089Spjd if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 4118219089Spjd !bvd->vdev_isspare) { 4119219089Spjd cmn_err(CE_NOTE, "The boot device is currently spared. Please " 4120219089Spjd "try booting from '%s'", 4121219089Spjd bvd->vdev_parent-> 4122219089Spjd vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 4123249195Smm error = SET_ERROR(EINVAL); 4124219089Spjd goto out; 4125219089Spjd } 4126209962Smm 4127219089Spjd error = 0; 4128219089Spjdout: 4129219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4130219089Spjd vdev_free(rvd); 4131219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 4132209962Smm mutex_exit(&spa_namespace_lock); 4133209962Smm 4134219089Spjd nvlist_free(config); 4135219089Spjd return (error); 4136185029Spjd} 4137185029Spjd 4138277300Ssmh#else /* !illumos */ 4139241286Savg 4140243502Savgextern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs, 4141243502Savg uint64_t *count); 4142241286Savg 4143241286Savgstatic nvlist_t * 4144241286Savgspa_generate_rootconf(const char *name) 4145241286Savg{ 4146243502Savg nvlist_t **configs, **tops; 4147241286Savg nvlist_t *config; 4148243502Savg nvlist_t *best_cfg, *nvtop, *nvroot; 4149243502Savg uint64_t *holes; 4150243502Savg uint64_t best_txg; 4151243213Savg uint64_t nchildren; 4152241286Savg uint64_t pgid; 4153243502Savg uint64_t count; 4154243502Savg uint64_t i; 4155243502Savg uint_t nholes; 4156241286Savg 4157243502Savg if (vdev_geom_read_pool_label(name, &configs, &count) != 0) 4158241286Savg return (NULL); 4159241286Savg 4160243502Savg ASSERT3U(count, !=, 0); 4161243502Savg best_txg = 0; 4162243502Savg for (i = 0; i < count; i++) { 4163243502Savg uint64_t txg; 4164243502Savg 4165243502Savg VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG, 4166243502Savg &txg) == 0); 4167243502Savg if (txg > best_txg) { 4168243502Savg best_txg = txg; 4169243502Savg best_cfg = configs[i]; 4170243502Savg } 4171243502Savg } 4172243502Savg 4173245945Savg nchildren = 1; 4174245945Savg nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren); 4175243502Savg holes = NULL; 4176243502Savg nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY, 4177243502Savg &holes, &nholes); 4178243502Savg 4179244635Savg tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP); 4180243502Savg for (i = 0; i < nchildren; i++) { 4181243502Savg if (i >= count) 4182243502Savg break; 4183243502Savg if (configs[i] == NULL) 4184243502Savg continue; 4185243502Savg VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE, 4186243502Savg &nvtop) == 0); 4187243502Savg nvlist_dup(nvtop, &tops[i], KM_SLEEP); 4188243213Savg } 4189243502Savg for (i = 0; holes != NULL && i < nholes; i++) { 4190243502Savg if (i >= nchildren) 4191243502Savg continue; 4192243502Savg if (tops[holes[i]] != NULL) 4193243502Savg continue; 4194243502Savg nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP); 4195243502Savg VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE, 4196243502Savg VDEV_TYPE_HOLE) == 0); 4197243502Savg VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, 4198243502Savg holes[i]) == 0); 4199243502Savg VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 4200243502Savg 0) == 0); 4201243502Savg } 4202243502Savg for (i = 0; i < nchildren; i++) { 4203243502Savg if (tops[i] != NULL) 4204243502Savg continue; 4205243502Savg nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP); 4206243502Savg VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE, 4207243502Savg VDEV_TYPE_MISSING) == 0); 4208243502Savg VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, 4209243502Savg i) == 0); 4210243502Savg VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 4211243502Savg 0) == 0); 4212243502Savg } 4213243213Savg 4214243213Savg /* 4215243502Savg * Create pool config based on the best vdev config. 4216241286Savg */ 4217243502Savg nvlist_dup(best_cfg, &config, KM_SLEEP); 4218241286Savg 4219241286Savg /* 4220241286Savg * Put this pool's top-level vdevs into a root vdev. 4221241286Savg */ 4222243502Savg VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4223243502Savg &pgid) == 0); 4224241286Savg VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4225241286Savg VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 4226241286Savg VDEV_TYPE_ROOT) == 0); 4227241286Savg VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 4228241286Savg VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 4229241286Savg VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4230243502Savg tops, nchildren) == 0); 4231241286Savg 4232241286Savg /* 4233241286Savg * Replace the existing vdev_tree with the new root vdev in 4234241286Savg * this pool's configuration (remove the old, add the new). 4235241286Savg */ 4236241286Savg VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 4237243502Savg 4238243502Savg /* 4239243502Savg * Drop vdev config elements that should not be present at pool level. 4240243502Savg */ 4241243502Savg nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64); 4242243502Savg nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64); 4243243502Savg 4244243502Savg for (i = 0; i < count; i++) 4245243502Savg nvlist_free(configs[i]); 4246243502Savg kmem_free(configs, count * sizeof(void *)); 4247243502Savg for (i = 0; i < nchildren; i++) 4248243502Savg nvlist_free(tops[i]); 4249243502Savg kmem_free(tops, nchildren * sizeof(void *)); 4250241286Savg nvlist_free(nvroot); 4251241286Savg return (config); 4252241286Savg} 4253241286Savg 4254241286Savgint 4255241286Savgspa_import_rootpool(const char *name) 4256241286Savg{ 4257241286Savg spa_t *spa; 4258241286Savg vdev_t *rvd, *bvd, *avd = NULL; 4259241286Savg nvlist_t *config, *nvtop; 4260241286Savg uint64_t txg; 4261241286Savg char *pname; 4262241286Savg int error; 4263241286Savg 4264241286Savg /* 4265241286Savg * Read the label from the boot device and generate a configuration. 4266241286Savg */ 4267241286Savg config = spa_generate_rootconf(name); 4268243213Savg 4269243213Savg mutex_enter(&spa_namespace_lock); 4270243213Savg if (config != NULL) { 4271243213Savg VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 4272243213Savg &pname) == 0 && strcmp(name, pname) == 0); 4273243213Savg VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) 4274243213Savg == 0); 4275243213Savg 4276243213Savg if ((spa = spa_lookup(pname)) != NULL) { 4277243213Savg /* 4278243213Savg * Remove the existing root pool from the namespace so 4279243213Savg * that we can replace it with the correct config 4280243213Savg * we just read in. 4281243213Savg */ 4282243213Savg spa_remove(spa); 4283243213Savg } 4284243213Savg spa = spa_add(pname, config, NULL); 4285243501Savg 4286243501Savg /* 4287243501Savg * Set spa_ubsync.ub_version as it can be used in vdev_alloc() 4288243501Savg * via spa_version(). 4289243501Savg */ 4290243501Savg if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 4291243501Savg &spa->spa_ubsync.ub_version) != 0) 4292243501Savg spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 4293243213Savg } else if ((spa = spa_lookup(name)) == NULL) { 4294287100Savg mutex_exit(&spa_namespace_lock); 4295287100Savg nvlist_free(config); 4296241286Savg cmn_err(CE_NOTE, "Cannot find the pool label for '%s'", 4297241286Savg name); 4298241286Savg return (EIO); 4299243213Savg } else { 4300243213Savg VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0); 4301241286Savg } 4302241286Savg spa->spa_is_root = B_TRUE; 4303241286Savg spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 4304241286Savg 4305241286Savg /* 4306241286Savg * Build up a vdev tree based on the boot device's label config. 4307241286Savg */ 4308241286Savg VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4309241286Savg &nvtop) == 0); 4310241286Savg spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4311241286Savg error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 4312241286Savg VDEV_ALLOC_ROOTPOOL); 4313241286Savg spa_config_exit(spa, SCL_ALL, FTAG); 4314241286Savg if (error) { 4315241286Savg mutex_exit(&spa_namespace_lock); 4316241286Savg nvlist_free(config); 4317241286Savg cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 4318241286Savg pname); 4319241286Savg return (error); 4320241286Savg } 4321241286Savg 4322241286Savg spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4323241286Savg vdev_free(rvd); 4324241286Savg spa_config_exit(spa, SCL_ALL, FTAG); 4325241286Savg mutex_exit(&spa_namespace_lock); 4326241286Savg 4327243213Savg nvlist_free(config); 4328243213Savg return (0); 4329241286Savg} 4330241286Savg 4331277300Ssmh#endif /* illumos */ 4332277300Ssmh#endif /* _KERNEL */ 4333219089Spjd 4334209962Smm/* 4335209962Smm * Import a non-root pool into the system. 4336209962Smm */ 4337185029Spjdint 4338219089Spjdspa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 4339185029Spjd{ 4340209962Smm spa_t *spa; 4341209962Smm char *altroot = NULL; 4342219089Spjd spa_load_state_t state = SPA_LOAD_IMPORT; 4343219089Spjd zpool_rewind_policy_t policy; 4344219089Spjd uint64_t mode = spa_mode_global; 4345219089Spjd uint64_t readonly = B_FALSE; 4346209962Smm int error; 4347209962Smm nvlist_t *nvroot; 4348209962Smm nvlist_t **spares, **l2cache; 4349209962Smm uint_t nspares, nl2cache; 4350209962Smm 4351209962Smm /* 4352209962Smm * If a pool with this name exists, return failure. 4353209962Smm */ 4354209962Smm mutex_enter(&spa_namespace_lock); 4355219089Spjd if (spa_lookup(pool) != NULL) { 4356209962Smm mutex_exit(&spa_namespace_lock); 4357249195Smm return (SET_ERROR(EEXIST)); 4358209962Smm } 4359209962Smm 4360209962Smm /* 4361209962Smm * Create and initialize the spa structure. 4362209962Smm */ 4363209962Smm (void) nvlist_lookup_string(props, 4364209962Smm zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4365219089Spjd (void) nvlist_lookup_uint64(props, 4366219089Spjd zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 4367219089Spjd if (readonly) 4368219089Spjd mode = FREAD; 4369219089Spjd spa = spa_add(pool, config, altroot); 4370219089Spjd spa->spa_import_flags = flags; 4371209962Smm 4372209962Smm /* 4373219089Spjd * Verbatim import - Take a pool and insert it into the namespace 4374219089Spjd * as if it had been loaded at boot. 4375219089Spjd */ 4376219089Spjd if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 4377219089Spjd if (props != NULL) 4378219089Spjd spa_configfile_set(spa, props, B_FALSE); 4379219089Spjd 4380219089Spjd spa_config_sync(spa, B_FALSE, B_TRUE); 4381287745Sdelphij spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT); 4382219089Spjd 4383219089Spjd mutex_exit(&spa_namespace_lock); 4384219089Spjd return (0); 4385219089Spjd } 4386219089Spjd 4387219089Spjd spa_activate(spa, mode); 4388219089Spjd 4389219089Spjd /* 4390209962Smm * Don't start async tasks until we know everything is healthy. 4391209962Smm */ 4392209962Smm spa_async_suspend(spa); 4393209962Smm 4394219089Spjd zpool_get_rewind_policy(config, &policy); 4395219089Spjd if (policy.zrp_request & ZPOOL_DO_REWIND) 4396219089Spjd state = SPA_LOAD_RECOVER; 4397219089Spjd 4398209962Smm /* 4399209962Smm * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 4400209962Smm * because the user-supplied config is actually the one to trust when 4401209962Smm * doing an import. 4402209962Smm */ 4403219089Spjd if (state != SPA_LOAD_RECOVER) 4404219089Spjd spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4405209962Smm 4406219089Spjd error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 4407219089Spjd policy.zrp_request); 4408219089Spjd 4409219089Spjd /* 4410219089Spjd * Propagate anything learned while loading the pool and pass it 4411219089Spjd * back to caller (i.e. rewind info, missing devices, etc). 4412219089Spjd */ 4413219089Spjd VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4414219089Spjd spa->spa_load_info) == 0); 4415219089Spjd 4416209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4417209962Smm /* 4418209962Smm * Toss any existing sparelist, as it doesn't have any validity 4419209962Smm * anymore, and conflicts with spa_has_spare(). 4420209962Smm */ 4421209962Smm if (spa->spa_spares.sav_config) { 4422209962Smm nvlist_free(spa->spa_spares.sav_config); 4423209962Smm spa->spa_spares.sav_config = NULL; 4424209962Smm spa_load_spares(spa); 4425209962Smm } 4426209962Smm if (spa->spa_l2cache.sav_config) { 4427209962Smm nvlist_free(spa->spa_l2cache.sav_config); 4428209962Smm spa->spa_l2cache.sav_config = NULL; 4429209962Smm spa_load_l2cache(spa); 4430209962Smm } 4431209962Smm 4432209962Smm VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4433209962Smm &nvroot) == 0); 4434209962Smm if (error == 0) 4435209962Smm error = spa_validate_aux(spa, nvroot, -1ULL, 4436209962Smm VDEV_ALLOC_SPARE); 4437209962Smm if (error == 0) 4438209962Smm error = spa_validate_aux(spa, nvroot, -1ULL, 4439209962Smm VDEV_ALLOC_L2CACHE); 4440209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 4441209962Smm 4442209962Smm if (props != NULL) 4443209962Smm spa_configfile_set(spa, props, B_FALSE); 4444209962Smm 4445209962Smm if (error != 0 || (props && spa_writeable(spa) && 4446209962Smm (error = spa_prop_set(spa, props)))) { 4447209962Smm spa_unload(spa); 4448209962Smm spa_deactivate(spa); 4449209962Smm spa_remove(spa); 4450209962Smm mutex_exit(&spa_namespace_lock); 4451209962Smm return (error); 4452209962Smm } 4453209962Smm 4454209962Smm spa_async_resume(spa); 4455209962Smm 4456209962Smm /* 4457209962Smm * Override any spares and level 2 cache devices as specified by 4458209962Smm * the user, as these may have correct device names/devids, etc. 4459209962Smm */ 4460209962Smm if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 4461209962Smm &spares, &nspares) == 0) { 4462209962Smm if (spa->spa_spares.sav_config) 4463209962Smm VERIFY(nvlist_remove(spa->spa_spares.sav_config, 4464209962Smm ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 4465209962Smm else 4466209962Smm VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 4467209962Smm NV_UNIQUE_NAME, KM_SLEEP) == 0); 4468209962Smm VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 4469209962Smm ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4470209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4471209962Smm spa_load_spares(spa); 4472209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 4473209962Smm spa->spa_spares.sav_sync = B_TRUE; 4474209962Smm } 4475209962Smm if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4476209962Smm &l2cache, &nl2cache) == 0) { 4477209962Smm if (spa->spa_l2cache.sav_config) 4478209962Smm VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 4479209962Smm ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 4480209962Smm else 4481209962Smm VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4482209962Smm NV_UNIQUE_NAME, KM_SLEEP) == 0); 4483209962Smm VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4484209962Smm ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4485209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4486209962Smm spa_load_l2cache(spa); 4487209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 4488209962Smm spa->spa_l2cache.sav_sync = B_TRUE; 4489209962Smm } 4490209962Smm 4491219089Spjd /* 4492219089Spjd * Check for any removed devices. 4493219089Spjd */ 4494219089Spjd if (spa->spa_autoreplace) { 4495219089Spjd spa_aux_check_removed(&spa->spa_spares); 4496219089Spjd spa_aux_check_removed(&spa->spa_l2cache); 4497219089Spjd } 4498219089Spjd 4499209962Smm if (spa_writeable(spa)) { 4500209962Smm /* 4501209962Smm * Update the config cache to include the newly-imported pool. 4502209962Smm */ 4503209962Smm spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4504209962Smm } 4505209962Smm 4506219089Spjd /* 4507219089Spjd * It's possible that the pool was expanded while it was exported. 4508219089Spjd * We kick off an async task to handle this for us. 4509219089Spjd */ 4510219089Spjd spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4511219089Spjd 4512248571Smm spa_history_log_version(spa, "import"); 4513209962Smm 4514287745Sdelphij spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT); 4515287745Sdelphij 4516287745Sdelphij mutex_exit(&spa_namespace_lock); 4517287745Sdelphij 4518219089Spjd#ifdef __FreeBSD__ 4519219089Spjd#ifdef _KERNEL 4520219089Spjd zvol_create_minors(pool); 4521219089Spjd#endif 4522219089Spjd#endif 4523209962Smm return (0); 4524185029Spjd} 4525185029Spjd 4526168404Spjdnvlist_t * 4527168404Spjdspa_tryimport(nvlist_t *tryconfig) 4528168404Spjd{ 4529168404Spjd nvlist_t *config = NULL; 4530168404Spjd char *poolname; 4531168404Spjd spa_t *spa; 4532168404Spjd uint64_t state; 4533208443Smm int error; 4534168404Spjd 4535168404Spjd if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4536168404Spjd return (NULL); 4537168404Spjd 4538168404Spjd if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4539168404Spjd return (NULL); 4540168404Spjd 4541168404Spjd /* 4542168404Spjd * Create and initialize the spa structure. 4543168404Spjd */ 4544168404Spjd mutex_enter(&spa_namespace_lock); 4545219089Spjd spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4546209962Smm spa_activate(spa, FREAD); 4547168404Spjd 4548168404Spjd /* 4549168404Spjd * Pass off the heavy lifting to spa_load(). 4550168404Spjd * Pass TRUE for mosconfig because the user-supplied config 4551168404Spjd * is actually the one to trust when doing an import. 4552168404Spjd */ 4553219089Spjd error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4554168404Spjd 4555168404Spjd /* 4556168404Spjd * If 'tryconfig' was at least parsable, return the current config. 4557168404Spjd */ 4558168404Spjd if (spa->spa_root_vdev != NULL) { 4559168404Spjd config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4560168404Spjd VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4561168404Spjd poolname) == 0); 4562168404Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4563168404Spjd state) == 0); 4564168498Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4565168498Spjd spa->spa_uberblock.ub_timestamp) == 0); 4566236884Smm VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4567236884Smm spa->spa_load_info) == 0); 4568168404Spjd 4569168404Spjd /* 4570185029Spjd * If the bootfs property exists on this pool then we 4571185029Spjd * copy it out so that external consumers can tell which 4572185029Spjd * pools are bootable. 4573168404Spjd */ 4574208443Smm if ((!error || error == EEXIST) && spa->spa_bootfs) { 4575185029Spjd char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4576185029Spjd 4577185029Spjd /* 4578185029Spjd * We have to play games with the name since the 4579185029Spjd * pool was opened as TRYIMPORT_NAME. 4580185029Spjd */ 4581185029Spjd if (dsl_dsobj_to_dsname(spa_name(spa), 4582185029Spjd spa->spa_bootfs, tmpname) == 0) { 4583185029Spjd char *cp; 4584185029Spjd char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4585185029Spjd 4586185029Spjd cp = strchr(tmpname, '/'); 4587185029Spjd if (cp == NULL) { 4588185029Spjd (void) strlcpy(dsname, tmpname, 4589185029Spjd MAXPATHLEN); 4590185029Spjd } else { 4591185029Spjd (void) snprintf(dsname, MAXPATHLEN, 4592185029Spjd "%s/%s", poolname, ++cp); 4593185029Spjd } 4594185029Spjd VERIFY(nvlist_add_string(config, 4595185029Spjd ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4596185029Spjd kmem_free(dsname, MAXPATHLEN); 4597185029Spjd } 4598185029Spjd kmem_free(tmpname, MAXPATHLEN); 4599185029Spjd } 4600185029Spjd 4601185029Spjd /* 4602185029Spjd * Add the list of hot spares and level 2 cache devices. 4603185029Spjd */ 4604209962Smm spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4605168404Spjd spa_add_spares(spa, config); 4606185029Spjd spa_add_l2cache(spa, config); 4607209962Smm spa_config_exit(spa, SCL_CONFIG, FTAG); 4608168404Spjd } 4609168404Spjd 4610168404Spjd spa_unload(spa); 4611168404Spjd spa_deactivate(spa); 4612168404Spjd spa_remove(spa); 4613168404Spjd mutex_exit(&spa_namespace_lock); 4614168404Spjd 4615168404Spjd return (config); 4616168404Spjd} 4617168404Spjd 4618168404Spjd/* 4619168404Spjd * Pool export/destroy 4620168404Spjd * 4621168404Spjd * The act of destroying or exporting a pool is very simple. We make sure there 4622168404Spjd * is no more pending I/O and any references to the pool are gone. Then, we 4623168404Spjd * update the pool state and sync all the labels to disk, removing the 4624207670Smm * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4625207670Smm * we don't sync the labels or remove the configuration cache. 4626168404Spjd */ 4627168404Spjdstatic int 4628185029Spjdspa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4629207670Smm boolean_t force, boolean_t hardforce) 4630168404Spjd{ 4631168404Spjd spa_t *spa; 4632168404Spjd 4633168404Spjd if (oldconfig) 4634168404Spjd *oldconfig = NULL; 4635168404Spjd 4636209962Smm if (!(spa_mode_global & FWRITE)) 4637249195Smm return (SET_ERROR(EROFS)); 4638168404Spjd 4639168404Spjd mutex_enter(&spa_namespace_lock); 4640168404Spjd if ((spa = spa_lookup(pool)) == NULL) { 4641168404Spjd mutex_exit(&spa_namespace_lock); 4642249195Smm return (SET_ERROR(ENOENT)); 4643168404Spjd } 4644168404Spjd 4645168404Spjd /* 4646168404Spjd * Put a hold on the pool, drop the namespace lock, stop async tasks, 4647168404Spjd * reacquire the namespace lock, and see if we can export. 4648168404Spjd */ 4649168404Spjd spa_open_ref(spa, FTAG); 4650168404Spjd mutex_exit(&spa_namespace_lock); 4651168404Spjd spa_async_suspend(spa); 4652168404Spjd mutex_enter(&spa_namespace_lock); 4653168404Spjd spa_close(spa, FTAG); 4654168404Spjd 4655168404Spjd /* 4656168404Spjd * The pool will be in core if it's openable, 4657168404Spjd * in which case we can modify its state. 4658168404Spjd */ 4659168404Spjd if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4660168404Spjd /* 4661168404Spjd * Objsets may be open only because they're dirty, so we 4662168404Spjd * have to force it to sync before checking spa_refcnt. 4663168404Spjd */ 4664168404Spjd txg_wait_synced(spa->spa_dsl_pool, 0); 4665286575Smav spa_evicting_os_wait(spa); 4666168404Spjd 4667168404Spjd /* 4668168404Spjd * A pool cannot be exported or destroyed if there are active 4669168404Spjd * references. If we are resetting a pool, allow references by 4670168404Spjd * fault injection handlers. 4671168404Spjd */ 4672168404Spjd if (!spa_refcount_zero(spa) || 4673168404Spjd (spa->spa_inject_ref != 0 && 4674168404Spjd new_state != POOL_STATE_UNINITIALIZED)) { 4675168404Spjd spa_async_resume(spa); 4676168404Spjd mutex_exit(&spa_namespace_lock); 4677249195Smm return (SET_ERROR(EBUSY)); 4678168404Spjd } 4679168404Spjd 4680185029Spjd /* 4681185029Spjd * A pool cannot be exported if it has an active shared spare. 4682185029Spjd * This is to prevent other pools stealing the active spare 4683185029Spjd * from an exported pool. At user's own will, such pool can 4684185029Spjd * be forcedly exported. 4685185029Spjd */ 4686185029Spjd if (!force && new_state == POOL_STATE_EXPORTED && 4687185029Spjd spa_has_active_shared_spare(spa)) { 4688185029Spjd spa_async_resume(spa); 4689185029Spjd mutex_exit(&spa_namespace_lock); 4690249195Smm return (SET_ERROR(EXDEV)); 4691185029Spjd } 4692168404Spjd 4693168404Spjd /* 4694168404Spjd * We want this to be reflected on every label, 4695168404Spjd * so mark them all dirty. spa_unload() will do the 4696168404Spjd * final sync that pushes these changes out. 4697168404Spjd */ 4698207670Smm if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4699185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4700168404Spjd spa->spa_state = new_state; 4701219089Spjd spa->spa_final_txg = spa_last_synced_txg(spa) + 4702219089Spjd TXG_DEFER_SIZE + 1; 4703168404Spjd vdev_config_dirty(spa->spa_root_vdev); 4704185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 4705168404Spjd } 4706168404Spjd } 4707168404Spjd 4708185029Spjd spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4709185029Spjd 4710168404Spjd if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4711168404Spjd spa_unload(spa); 4712168404Spjd spa_deactivate(spa); 4713168404Spjd } 4714168404Spjd 4715168404Spjd if (oldconfig && spa->spa_config) 4716168404Spjd VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4717168404Spjd 4718168404Spjd if (new_state != POOL_STATE_UNINITIALIZED) { 4719207670Smm if (!hardforce) 4720207670Smm spa_config_sync(spa, B_TRUE, B_TRUE); 4721168404Spjd spa_remove(spa); 4722168404Spjd } 4723168404Spjd mutex_exit(&spa_namespace_lock); 4724168404Spjd 4725168404Spjd return (0); 4726168404Spjd} 4727168404Spjd 4728168404Spjd/* 4729168404Spjd * Destroy a storage pool. 4730168404Spjd */ 4731168404Spjdint 4732168404Spjdspa_destroy(char *pool) 4733168404Spjd{ 4734207670Smm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4735207670Smm B_FALSE, B_FALSE)); 4736168404Spjd} 4737168404Spjd 4738168404Spjd/* 4739168404Spjd * Export a storage pool. 4740168404Spjd */ 4741168404Spjdint 4742207670Smmspa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4743207670Smm boolean_t hardforce) 4744168404Spjd{ 4745207670Smm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4746207670Smm force, hardforce)); 4747168404Spjd} 4748168404Spjd 4749168404Spjd/* 4750168404Spjd * Similar to spa_export(), this unloads the spa_t without actually removing it 4751168404Spjd * from the namespace in any way. 4752168404Spjd */ 4753168404Spjdint 4754168404Spjdspa_reset(char *pool) 4755168404Spjd{ 4756185029Spjd return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4757207670Smm B_FALSE, B_FALSE)); 4758168404Spjd} 4759168404Spjd 4760168404Spjd/* 4761168404Spjd * ========================================================================== 4762168404Spjd * Device manipulation 4763168404Spjd * ========================================================================== 4764168404Spjd */ 4765168404Spjd 4766168404Spjd/* 4767185029Spjd * Add a device to a storage pool. 4768168404Spjd */ 4769168404Spjdint 4770168404Spjdspa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4771168404Spjd{ 4772219089Spjd uint64_t txg, id; 4773209962Smm int error; 4774168404Spjd vdev_t *rvd = spa->spa_root_vdev; 4775168404Spjd vdev_t *vd, *tvd; 4776185029Spjd nvlist_t **spares, **l2cache; 4777185029Spjd uint_t nspares, nl2cache; 4778168404Spjd 4779219089Spjd ASSERT(spa_writeable(spa)); 4780219089Spjd 4781168404Spjd txg = spa_vdev_enter(spa); 4782168404Spjd 4783168404Spjd if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4784168404Spjd VDEV_ALLOC_ADD)) != 0) 4785168404Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 4786168404Spjd 4787185029Spjd spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4788168404Spjd 4789185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4790185029Spjd &nspares) != 0) 4791168404Spjd nspares = 0; 4792168404Spjd 4793185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4794185029Spjd &nl2cache) != 0) 4795185029Spjd nl2cache = 0; 4796185029Spjd 4797185029Spjd if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4798168404Spjd return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4799168404Spjd 4800185029Spjd if (vd->vdev_children != 0 && 4801185029Spjd (error = vdev_create(vd, txg, B_FALSE)) != 0) 4802185029Spjd return (spa_vdev_exit(spa, vd, txg, error)); 4803168404Spjd 4804168404Spjd /* 4805185029Spjd * We must validate the spares and l2cache devices after checking the 4806185029Spjd * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4807168404Spjd */ 4808185029Spjd if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4809168404Spjd return (spa_vdev_exit(spa, vd, txg, error)); 4810168404Spjd 4811168404Spjd /* 4812168404Spjd * Transfer each new top-level vdev from vd to rvd. 4813168404Spjd */ 4814209962Smm for (int c = 0; c < vd->vdev_children; c++) { 4815219089Spjd 4816219089Spjd /* 4817219089Spjd * Set the vdev id to the first hole, if one exists. 4818219089Spjd */ 4819219089Spjd for (id = 0; id < rvd->vdev_children; id++) { 4820219089Spjd if (rvd->vdev_child[id]->vdev_ishole) { 4821219089Spjd vdev_free(rvd->vdev_child[id]); 4822219089Spjd break; 4823219089Spjd } 4824219089Spjd } 4825168404Spjd tvd = vd->vdev_child[c]; 4826168404Spjd vdev_remove_child(vd, tvd); 4827219089Spjd tvd->vdev_id = id; 4828168404Spjd vdev_add_child(rvd, tvd); 4829168404Spjd vdev_config_dirty(tvd); 4830168404Spjd } 4831168404Spjd 4832168404Spjd if (nspares != 0) { 4833185029Spjd spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4834185029Spjd ZPOOL_CONFIG_SPARES); 4835168404Spjd spa_load_spares(spa); 4836185029Spjd spa->spa_spares.sav_sync = B_TRUE; 4837168404Spjd } 4838168404Spjd 4839185029Spjd if (nl2cache != 0) { 4840185029Spjd spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4841185029Spjd ZPOOL_CONFIG_L2CACHE); 4842185029Spjd spa_load_l2cache(spa); 4843185029Spjd spa->spa_l2cache.sav_sync = B_TRUE; 4844185029Spjd } 4845185029Spjd 4846168404Spjd /* 4847168404Spjd * We have to be careful when adding new vdevs to an existing pool. 4848168404Spjd * If other threads start allocating from these vdevs before we 4849168404Spjd * sync the config cache, and we lose power, then upon reboot we may 4850168404Spjd * fail to open the pool because there are DVAs that the config cache 4851168404Spjd * can't translate. Therefore, we first add the vdevs without 4852168404Spjd * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4853168404Spjd * and then let spa_config_update() initialize the new metaslabs. 4854168404Spjd * 4855168404Spjd * spa_load() checks for added-but-not-initialized vdevs, so that 4856168404Spjd * if we lose power at any point in this sequence, the remaining 4857168404Spjd * steps will be completed the next time we load the pool. 4858168404Spjd */ 4859168404Spjd (void) spa_vdev_exit(spa, vd, txg, 0); 4860168404Spjd 4861168404Spjd mutex_enter(&spa_namespace_lock); 4862168404Spjd spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4863287745Sdelphij spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD); 4864168404Spjd mutex_exit(&spa_namespace_lock); 4865168404Spjd 4866168404Spjd return (0); 4867168404Spjd} 4868168404Spjd 4869168404Spjd/* 4870168404Spjd * Attach a device to a mirror. The arguments are the path to any device 4871168404Spjd * in the mirror, and the nvroot for the new device. If the path specifies 4872168404Spjd * a device that is not mirrored, we automatically insert the mirror vdev. 4873168404Spjd * 4874168404Spjd * If 'replacing' is specified, the new device is intended to replace the 4875168404Spjd * existing device; in this case the two devices are made into their own 4876185029Spjd * mirror using the 'replacing' vdev, which is functionally identical to 4877168404Spjd * the mirror vdev (it actually reuses all the same ops) but has a few 4878168404Spjd * extra rules: you can't attach to it after it's been created, and upon 4879168404Spjd * completion of resilvering, the first disk (the one being replaced) 4880168404Spjd * is automatically detached. 4881168404Spjd */ 4882168404Spjdint 4883168404Spjdspa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4884168404Spjd{ 4885219089Spjd uint64_t txg, dtl_max_txg; 4886168404Spjd vdev_t *rvd = spa->spa_root_vdev; 4887168404Spjd vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4888168404Spjd vdev_ops_t *pvops; 4889185029Spjd char *oldvdpath, *newvdpath; 4890185029Spjd int newvd_isspare; 4891185029Spjd int error; 4892168404Spjd 4893219089Spjd ASSERT(spa_writeable(spa)); 4894219089Spjd 4895168404Spjd txg = spa_vdev_enter(spa); 4896168404Spjd 4897185029Spjd oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4898168404Spjd 4899168404Spjd if (oldvd == NULL) 4900168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4901168404Spjd 4902168404Spjd if (!oldvd->vdev_ops->vdev_op_leaf) 4903168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4904168404Spjd 4905168404Spjd pvd = oldvd->vdev_parent; 4906168404Spjd 4907168404Spjd if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4908230514Smm VDEV_ALLOC_ATTACH)) != 0) 4909185029Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4910185029Spjd 4911185029Spjd if (newrootvd->vdev_children != 1) 4912168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4913168404Spjd 4914168404Spjd newvd = newrootvd->vdev_child[0]; 4915168404Spjd 4916168404Spjd if (!newvd->vdev_ops->vdev_op_leaf) 4917168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4918168404Spjd 4919168404Spjd if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4920168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, error)); 4921168404Spjd 4922185029Spjd /* 4923185029Spjd * Spares can't replace logs 4924185029Spjd */ 4925185029Spjd if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4926185029Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4927185029Spjd 4928168404Spjd if (!replacing) { 4929168404Spjd /* 4930168404Spjd * For attach, the only allowable parent is a mirror or the root 4931168404Spjd * vdev. 4932168404Spjd */ 4933168404Spjd if (pvd->vdev_ops != &vdev_mirror_ops && 4934168404Spjd pvd->vdev_ops != &vdev_root_ops) 4935168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4936168404Spjd 4937168404Spjd pvops = &vdev_mirror_ops; 4938168404Spjd } else { 4939168404Spjd /* 4940168404Spjd * Active hot spares can only be replaced by inactive hot 4941168404Spjd * spares. 4942168404Spjd */ 4943168404Spjd if (pvd->vdev_ops == &vdev_spare_ops && 4944219089Spjd oldvd->vdev_isspare && 4945168404Spjd !spa_has_spare(spa, newvd->vdev_guid)) 4946168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4947168404Spjd 4948168404Spjd /* 4949168404Spjd * If the source is a hot spare, and the parent isn't already a 4950168404Spjd * spare, then we want to create a new hot spare. Otherwise, we 4951168404Spjd * want to create a replacing vdev. The user is not allowed to 4952168404Spjd * attach to a spared vdev child unless the 'isspare' state is 4953168404Spjd * the same (spare replaces spare, non-spare replaces 4954168404Spjd * non-spare). 4955168404Spjd */ 4956219089Spjd if (pvd->vdev_ops == &vdev_replacing_ops && 4957219089Spjd spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4958168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4959219089Spjd } else if (pvd->vdev_ops == &vdev_spare_ops && 4960219089Spjd newvd->vdev_isspare != oldvd->vdev_isspare) { 4961168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4962219089Spjd } 4963219089Spjd 4964219089Spjd if (newvd->vdev_isspare) 4965168404Spjd pvops = &vdev_spare_ops; 4966168404Spjd else 4967168404Spjd pvops = &vdev_replacing_ops; 4968168404Spjd } 4969168404Spjd 4970168404Spjd /* 4971219089Spjd * Make sure the new device is big enough. 4972168404Spjd */ 4973219089Spjd if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4974168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4975168404Spjd 4976168404Spjd /* 4977168404Spjd * The new device cannot have a higher alignment requirement 4978168404Spjd * than the top-level vdev. 4979168404Spjd */ 4980168404Spjd if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4981168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4982168404Spjd 4983168404Spjd /* 4984168404Spjd * If this is an in-place replacement, update oldvd's path and devid 4985168404Spjd * to make it distinguishable from newvd, and unopenable from now on. 4986168404Spjd */ 4987168404Spjd if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4988168404Spjd spa_strfree(oldvd->vdev_path); 4989168404Spjd oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4990168404Spjd KM_SLEEP); 4991168404Spjd (void) sprintf(oldvd->vdev_path, "%s/%s", 4992168404Spjd newvd->vdev_path, "old"); 4993168404Spjd if (oldvd->vdev_devid != NULL) { 4994168404Spjd spa_strfree(oldvd->vdev_devid); 4995168404Spjd oldvd->vdev_devid = NULL; 4996168404Spjd } 4997168404Spjd } 4998168404Spjd 4999219089Spjd /* mark the device being resilvered */ 5000254112Sdelphij newvd->vdev_resilver_txg = txg; 5001219089Spjd 5002168404Spjd /* 5003168404Spjd * If the parent is not a mirror, or if we're replacing, insert the new 5004168404Spjd * mirror/replacing/spare vdev above oldvd. 5005168404Spjd */ 5006168404Spjd if (pvd->vdev_ops != pvops) 5007168404Spjd pvd = vdev_add_parent(oldvd, pvops); 5008168404Spjd 5009168404Spjd ASSERT(pvd->vdev_top->vdev_parent == rvd); 5010168404Spjd ASSERT(pvd->vdev_ops == pvops); 5011168404Spjd ASSERT(oldvd->vdev_parent == pvd); 5012168404Spjd 5013168404Spjd /* 5014168404Spjd * Extract the new device from its root and add it to pvd. 5015168404Spjd */ 5016168404Spjd vdev_remove_child(newrootvd, newvd); 5017168404Spjd newvd->vdev_id = pvd->vdev_children; 5018219089Spjd newvd->vdev_crtxg = oldvd->vdev_crtxg; 5019168404Spjd vdev_add_child(pvd, newvd); 5020168404Spjd 5021168404Spjd tvd = newvd->vdev_top; 5022168404Spjd ASSERT(pvd->vdev_top == tvd); 5023168404Spjd ASSERT(tvd->vdev_parent == rvd); 5024168404Spjd 5025168404Spjd vdev_config_dirty(tvd); 5026168404Spjd 5027168404Spjd /* 5028219089Spjd * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 5029219089Spjd * for any dmu_sync-ed blocks. It will propagate upward when 5030219089Spjd * spa_vdev_exit() calls vdev_dtl_reassess(). 5031168404Spjd */ 5032219089Spjd dtl_max_txg = txg + TXG_CONCURRENT_STATES; 5033168404Spjd 5034219089Spjd vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 5035219089Spjd dtl_max_txg - TXG_INITIAL); 5036168404Spjd 5037209962Smm if (newvd->vdev_isspare) { 5038168404Spjd spa_spare_activate(newvd); 5039209962Smm spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 5040209962Smm } 5041209962Smm 5042185029Spjd oldvdpath = spa_strdup(oldvd->vdev_path); 5043185029Spjd newvdpath = spa_strdup(newvd->vdev_path); 5044185029Spjd newvd_isspare = newvd->vdev_isspare; 5045168404Spjd 5046168404Spjd /* 5047168404Spjd * Mark newvd's DTL dirty in this txg. 5048168404Spjd */ 5049168404Spjd vdev_dirty(tvd, VDD_DTL, newvd, txg); 5050168404Spjd 5051219089Spjd /* 5052258717Savg * Schedule the resilver to restart in the future. We do this to 5053258717Savg * ensure that dmu_sync-ed blocks have been stitched into the 5054258717Savg * respective datasets. 5055219089Spjd */ 5056219089Spjd dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 5057168404Spjd 5058287745Sdelphij if (spa->spa_bootfs) 5059287745Sdelphij spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 5060287745Sdelphij 5061287745Sdelphij spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH); 5062287745Sdelphij 5063219089Spjd /* 5064219089Spjd * Commit the config 5065219089Spjd */ 5066219089Spjd (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 5067185029Spjd 5068248571Smm spa_history_log_internal(spa, "vdev attach", NULL, 5069219089Spjd "%s vdev=%s %s vdev=%s", 5070219089Spjd replacing && newvd_isspare ? "spare in" : 5071219089Spjd replacing ? "replace" : "attach", newvdpath, 5072219089Spjd replacing ? "for" : "to", oldvdpath); 5073219089Spjd 5074185029Spjd spa_strfree(oldvdpath); 5075185029Spjd spa_strfree(newvdpath); 5076185029Spjd 5077168404Spjd return (0); 5078168404Spjd} 5079168404Spjd 5080168404Spjd/* 5081168404Spjd * Detach a device from a mirror or replacing vdev. 5082251631Sdelphij * 5083168404Spjd * If 'replace_done' is specified, only detach if the parent 5084168404Spjd * is a replacing vdev. 5085168404Spjd */ 5086168404Spjdint 5087209962Smmspa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 5088168404Spjd{ 5089168404Spjd uint64_t txg; 5090209962Smm int error; 5091168404Spjd vdev_t *rvd = spa->spa_root_vdev; 5092168404Spjd vdev_t *vd, *pvd, *cvd, *tvd; 5093168404Spjd boolean_t unspare = B_FALSE; 5094247187Smm uint64_t unspare_guid = 0; 5095219089Spjd char *vdpath; 5096168404Spjd 5097219089Spjd ASSERT(spa_writeable(spa)); 5098219089Spjd 5099168404Spjd txg = spa_vdev_enter(spa); 5100168404Spjd 5101185029Spjd vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5102168404Spjd 5103168404Spjd if (vd == NULL) 5104168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 5105168404Spjd 5106168404Spjd if (!vd->vdev_ops->vdev_op_leaf) 5107168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 5108168404Spjd 5109168404Spjd pvd = vd->vdev_parent; 5110168404Spjd 5111168404Spjd /* 5112209962Smm * If the parent/child relationship is not as expected, don't do it. 5113209962Smm * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 5114209962Smm * vdev that's replacing B with C. The user's intent in replacing 5115209962Smm * is to go from M(A,B) to M(A,C). If the user decides to cancel 5116209962Smm * the replace by detaching C, the expected behavior is to end up 5117209962Smm * M(A,B). But suppose that right after deciding to detach C, 5118209962Smm * the replacement of B completes. We would have M(A,C), and then 5119209962Smm * ask to detach C, which would leave us with just A -- not what 5120209962Smm * the user wanted. To prevent this, we make sure that the 5121209962Smm * parent/child relationship hasn't changed -- in this example, 5122209962Smm * that C's parent is still the replacing vdev R. 5123209962Smm */ 5124209962Smm if (pvd->vdev_guid != pguid && pguid != 0) 5125209962Smm return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 5126209962Smm 5127209962Smm /* 5128219089Spjd * Only 'replacing' or 'spare' vdevs can be replaced. 5129168404Spjd */ 5130219089Spjd if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 5131219089Spjd pvd->vdev_ops != &vdev_spare_ops) 5132219089Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 5133168404Spjd 5134168404Spjd ASSERT(pvd->vdev_ops != &vdev_spare_ops || 5135185029Spjd spa_version(spa) >= SPA_VERSION_SPARES); 5136168404Spjd 5137168404Spjd /* 5138168404Spjd * Only mirror, replacing, and spare vdevs support detach. 5139168404Spjd */ 5140168404Spjd if (pvd->vdev_ops != &vdev_replacing_ops && 5141168404Spjd pvd->vdev_ops != &vdev_mirror_ops && 5142168404Spjd pvd->vdev_ops != &vdev_spare_ops) 5143168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 5144168404Spjd 5145168404Spjd /* 5146209962Smm * If this device has the only valid copy of some data, 5147209962Smm * we cannot safely detach it. 5148168404Spjd */ 5149209962Smm if (vdev_dtl_required(vd)) 5150168404Spjd return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 5151168404Spjd 5152209962Smm ASSERT(pvd->vdev_children >= 2); 5153168404Spjd 5154168404Spjd /* 5155185029Spjd * If we are detaching the second disk from a replacing vdev, then 5156185029Spjd * check to see if we changed the original vdev's path to have "/old" 5157185029Spjd * at the end in spa_vdev_attach(). If so, undo that change now. 5158168404Spjd */ 5159219089Spjd if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 5160219089Spjd vd->vdev_path != NULL) { 5161219089Spjd size_t len = strlen(vd->vdev_path); 5162219089Spjd 5163219089Spjd for (int c = 0; c < pvd->vdev_children; c++) { 5164219089Spjd cvd = pvd->vdev_child[c]; 5165219089Spjd 5166219089Spjd if (cvd == vd || cvd->vdev_path == NULL) 5167219089Spjd continue; 5168219089Spjd 5169219089Spjd if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 5170219089Spjd strcmp(cvd->vdev_path + len, "/old") == 0) { 5171219089Spjd spa_strfree(cvd->vdev_path); 5172219089Spjd cvd->vdev_path = spa_strdup(vd->vdev_path); 5173219089Spjd break; 5174219089Spjd } 5175185029Spjd } 5176185029Spjd } 5177168404Spjd 5178168404Spjd /* 5179168404Spjd * If we are detaching the original disk from a spare, then it implies 5180168404Spjd * that the spare should become a real disk, and be removed from the 5181168404Spjd * active spare list for the pool. 5182168404Spjd */ 5183168404Spjd if (pvd->vdev_ops == &vdev_spare_ops && 5184219089Spjd vd->vdev_id == 0 && 5185219089Spjd pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 5186168404Spjd unspare = B_TRUE; 5187168404Spjd 5188168404Spjd /* 5189168404Spjd * Erase the disk labels so the disk can be used for other things. 5190168404Spjd * This must be done after all other error cases are handled, 5191168404Spjd * but before we disembowel vd (so we can still do I/O to it). 5192168404Spjd * But if we can't do it, don't treat the error as fatal -- 5193168404Spjd * it may be that the unwritability of the disk is the reason 5194168404Spjd * it's being detached! 5195168404Spjd */ 5196168404Spjd error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5197168404Spjd 5198168404Spjd /* 5199168404Spjd * Remove vd from its parent and compact the parent's children. 5200168404Spjd */ 5201168404Spjd vdev_remove_child(pvd, vd); 5202168404Spjd vdev_compact_children(pvd); 5203168404Spjd 5204168404Spjd /* 5205168404Spjd * Remember one of the remaining children so we can get tvd below. 5206168404Spjd */ 5207219089Spjd cvd = pvd->vdev_child[pvd->vdev_children - 1]; 5208168404Spjd 5209168404Spjd /* 5210168404Spjd * If we need to remove the remaining child from the list of hot spares, 5211209962Smm * do it now, marking the vdev as no longer a spare in the process. 5212209962Smm * We must do this before vdev_remove_parent(), because that can 5213209962Smm * change the GUID if it creates a new toplevel GUID. For a similar 5214209962Smm * reason, we must remove the spare now, in the same txg as the detach; 5215209962Smm * otherwise someone could attach a new sibling, change the GUID, and 5216209962Smm * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 5217168404Spjd */ 5218168404Spjd if (unspare) { 5219168404Spjd ASSERT(cvd->vdev_isspare); 5220168404Spjd spa_spare_remove(cvd); 5221168404Spjd unspare_guid = cvd->vdev_guid; 5222209962Smm (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 5223219089Spjd cvd->vdev_unspare = B_TRUE; 5224168404Spjd } 5225168404Spjd 5226168404Spjd /* 5227168404Spjd * If the parent mirror/replacing vdev only has one child, 5228168404Spjd * the parent is no longer needed. Remove it from the tree. 5229168404Spjd */ 5230219089Spjd if (pvd->vdev_children == 1) { 5231219089Spjd if (pvd->vdev_ops == &vdev_spare_ops) 5232219089Spjd cvd->vdev_unspare = B_FALSE; 5233168404Spjd vdev_remove_parent(cvd); 5234219089Spjd } 5235168404Spjd 5236219089Spjd 5237168404Spjd /* 5238168404Spjd * We don't set tvd until now because the parent we just removed 5239168404Spjd * may have been the previous top-level vdev. 5240168404Spjd */ 5241168404Spjd tvd = cvd->vdev_top; 5242168404Spjd ASSERT(tvd->vdev_parent == rvd); 5243168404Spjd 5244168404Spjd /* 5245168404Spjd * Reevaluate the parent vdev state. 5246168404Spjd */ 5247185029Spjd vdev_propagate_state(cvd); 5248168404Spjd 5249168404Spjd /* 5250219089Spjd * If the 'autoexpand' property is set on the pool then automatically 5251219089Spjd * try to expand the size of the pool. For example if the device we 5252219089Spjd * just detached was smaller than the others, it may be possible to 5253219089Spjd * add metaslabs (i.e. grow the pool). We need to reopen the vdev 5254219089Spjd * first so that we can obtain the updated sizes of the leaf vdevs. 5255168404Spjd */ 5256219089Spjd if (spa->spa_autoexpand) { 5257219089Spjd vdev_reopen(tvd); 5258219089Spjd vdev_expand(tvd, txg); 5259219089Spjd } 5260168404Spjd 5261168404Spjd vdev_config_dirty(tvd); 5262168404Spjd 5263168404Spjd /* 5264168404Spjd * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 5265168404Spjd * vd->vdev_detached is set and free vd's DTL object in syncing context. 5266168404Spjd * But first make sure we're not on any *other* txg's DTL list, to 5267168404Spjd * prevent vd from being accessed after it's freed. 5268168404Spjd */ 5269219089Spjd vdpath = spa_strdup(vd->vdev_path); 5270209962Smm for (int t = 0; t < TXG_SIZE; t++) 5271168404Spjd (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 5272168404Spjd vd->vdev_detached = B_TRUE; 5273168404Spjd vdev_dirty(tvd, VDD_DTL, vd, txg); 5274168404Spjd 5275185029Spjd spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 5276185029Spjd 5277219089Spjd /* hang on to the spa before we release the lock */ 5278219089Spjd spa_open_ref(spa, FTAG); 5279219089Spjd 5280168404Spjd error = spa_vdev_exit(spa, vd, txg, 0); 5281168404Spjd 5282248571Smm spa_history_log_internal(spa, "detach", NULL, 5283219089Spjd "vdev=%s", vdpath); 5284219089Spjd spa_strfree(vdpath); 5285219089Spjd 5286168404Spjd /* 5287168404Spjd * If this was the removal of the original device in a hot spare vdev, 5288168404Spjd * then we want to go through and remove the device from the hot spare 5289168404Spjd * list of every other pool. 5290168404Spjd */ 5291168404Spjd if (unspare) { 5292219089Spjd spa_t *altspa = NULL; 5293219089Spjd 5294168404Spjd mutex_enter(&spa_namespace_lock); 5295219089Spjd while ((altspa = spa_next(altspa)) != NULL) { 5296219089Spjd if (altspa->spa_state != POOL_STATE_ACTIVE || 5297219089Spjd altspa == spa) 5298168404Spjd continue; 5299219089Spjd 5300219089Spjd spa_open_ref(altspa, FTAG); 5301185029Spjd mutex_exit(&spa_namespace_lock); 5302219089Spjd (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 5303185029Spjd mutex_enter(&spa_namespace_lock); 5304219089Spjd spa_close(altspa, FTAG); 5305168404Spjd } 5306168404Spjd mutex_exit(&spa_namespace_lock); 5307219089Spjd 5308219089Spjd /* search the rest of the vdevs for spares to remove */ 5309219089Spjd spa_vdev_resilver_done(spa); 5310168404Spjd } 5311168404Spjd 5312219089Spjd /* all done with the spa; OK to release */ 5313219089Spjd mutex_enter(&spa_namespace_lock); 5314219089Spjd spa_close(spa, FTAG); 5315219089Spjd mutex_exit(&spa_namespace_lock); 5316219089Spjd 5317168404Spjd return (error); 5318168404Spjd} 5319168404Spjd 5320219089Spjd/* 5321219089Spjd * Split a set of devices from their mirrors, and create a new pool from them. 5322219089Spjd */ 5323219089Spjdint 5324219089Spjdspa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 5325219089Spjd nvlist_t *props, boolean_t exp) 5326219089Spjd{ 5327219089Spjd int error = 0; 5328219089Spjd uint64_t txg, *glist; 5329219089Spjd spa_t *newspa; 5330219089Spjd uint_t c, children, lastlog; 5331219089Spjd nvlist_t **child, *nvl, *tmp; 5332219089Spjd dmu_tx_t *tx; 5333219089Spjd char *altroot = NULL; 5334219089Spjd vdev_t *rvd, **vml = NULL; /* vdev modify list */ 5335219089Spjd boolean_t activate_slog; 5336219089Spjd 5337219089Spjd ASSERT(spa_writeable(spa)); 5338219089Spjd 5339219089Spjd txg = spa_vdev_enter(spa); 5340219089Spjd 5341219089Spjd /* clear the log and flush everything up to now */ 5342219089Spjd activate_slog = spa_passivate_log(spa); 5343219089Spjd (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5344219089Spjd error = spa_offline_log(spa); 5345219089Spjd txg = spa_vdev_config_enter(spa); 5346219089Spjd 5347219089Spjd if (activate_slog) 5348219089Spjd spa_activate_log(spa); 5349219089Spjd 5350219089Spjd if (error != 0) 5351219089Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 5352219089Spjd 5353219089Spjd /* check new spa name before going any further */ 5354219089Spjd if (spa_lookup(newname) != NULL) 5355219089Spjd return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 5356219089Spjd 5357219089Spjd /* 5358219089Spjd * scan through all the children to ensure they're all mirrors 5359219089Spjd */ 5360219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 5361219089Spjd nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 5362219089Spjd &children) != 0) 5363219089Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5364219089Spjd 5365219089Spjd /* first, check to ensure we've got the right child count */ 5366219089Spjd rvd = spa->spa_root_vdev; 5367219089Spjd lastlog = 0; 5368219089Spjd for (c = 0; c < rvd->vdev_children; c++) { 5369219089Spjd vdev_t *vd = rvd->vdev_child[c]; 5370219089Spjd 5371219089Spjd /* don't count the holes & logs as children */ 5372219089Spjd if (vd->vdev_islog || vd->vdev_ishole) { 5373219089Spjd if (lastlog == 0) 5374219089Spjd lastlog = c; 5375219089Spjd continue; 5376219089Spjd } 5377219089Spjd 5378219089Spjd lastlog = 0; 5379219089Spjd } 5380219089Spjd if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 5381219089Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5382219089Spjd 5383219089Spjd /* next, ensure no spare or cache devices are part of the split */ 5384219089Spjd if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 5385219089Spjd nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 5386219089Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5387219089Spjd 5388219089Spjd vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 5389219089Spjd glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 5390219089Spjd 5391219089Spjd /* then, loop over each vdev and validate it */ 5392219089Spjd for (c = 0; c < children; c++) { 5393219089Spjd uint64_t is_hole = 0; 5394219089Spjd 5395219089Spjd (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 5396219089Spjd &is_hole); 5397219089Spjd 5398219089Spjd if (is_hole != 0) { 5399219089Spjd if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 5400219089Spjd spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 5401219089Spjd continue; 5402219089Spjd } else { 5403249195Smm error = SET_ERROR(EINVAL); 5404219089Spjd break; 5405219089Spjd } 5406219089Spjd } 5407219089Spjd 5408219089Spjd /* which disk is going to be split? */ 5409219089Spjd if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 5410219089Spjd &glist[c]) != 0) { 5411249195Smm error = SET_ERROR(EINVAL); 5412219089Spjd break; 5413219089Spjd } 5414219089Spjd 5415219089Spjd /* look it up in the spa */ 5416219089Spjd vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 5417219089Spjd if (vml[c] == NULL) { 5418249195Smm error = SET_ERROR(ENODEV); 5419219089Spjd break; 5420219089Spjd } 5421219089Spjd 5422219089Spjd /* make sure there's nothing stopping the split */ 5423219089Spjd if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 5424219089Spjd vml[c]->vdev_islog || 5425219089Spjd vml[c]->vdev_ishole || 5426219089Spjd vml[c]->vdev_isspare || 5427219089Spjd vml[c]->vdev_isl2cache || 5428219089Spjd !vdev_writeable(vml[c]) || 5429219089Spjd vml[c]->vdev_children != 0 || 5430219089Spjd vml[c]->vdev_state != VDEV_STATE_HEALTHY || 5431219089Spjd c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 5432249195Smm error = SET_ERROR(EINVAL); 5433219089Spjd break; 5434219089Spjd } 5435219089Spjd 5436219089Spjd if (vdev_dtl_required(vml[c])) { 5437249195Smm error = SET_ERROR(EBUSY); 5438219089Spjd break; 5439219089Spjd } 5440219089Spjd 5441219089Spjd /* we need certain info from the top level */ 5442219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 5443219089Spjd vml[c]->vdev_top->vdev_ms_array) == 0); 5444219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 5445219089Spjd vml[c]->vdev_top->vdev_ms_shift) == 0); 5446219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 5447219089Spjd vml[c]->vdev_top->vdev_asize) == 0); 5448219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 5449219089Spjd vml[c]->vdev_top->vdev_ashift) == 0); 5450299441Smav 5451299441Smav /* transfer per-vdev ZAPs */ 5452299441Smav ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 5453299441Smav VERIFY0(nvlist_add_uint64(child[c], 5454299441Smav ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 5455299441Smav 5456299441Smav ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 5457299441Smav VERIFY0(nvlist_add_uint64(child[c], 5458299441Smav ZPOOL_CONFIG_VDEV_TOP_ZAP, 5459299441Smav vml[c]->vdev_parent->vdev_top_zap)); 5460219089Spjd } 5461219089Spjd 5462219089Spjd if (error != 0) { 5463219089Spjd kmem_free(vml, children * sizeof (vdev_t *)); 5464219089Spjd kmem_free(glist, children * sizeof (uint64_t)); 5465219089Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 5466219089Spjd } 5467219089Spjd 5468219089Spjd /* stop writers from using the disks */ 5469219089Spjd for (c = 0; c < children; c++) { 5470219089Spjd if (vml[c] != NULL) 5471219089Spjd vml[c]->vdev_offline = B_TRUE; 5472219089Spjd } 5473219089Spjd vdev_reopen(spa->spa_root_vdev); 5474219089Spjd 5475219089Spjd /* 5476219089Spjd * Temporarily record the splitting vdevs in the spa config. This 5477219089Spjd * will disappear once the config is regenerated. 5478219089Spjd */ 5479219089Spjd VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5480219089Spjd VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 5481219089Spjd glist, children) == 0); 5482219089Spjd kmem_free(glist, children * sizeof (uint64_t)); 5483219089Spjd 5484219089Spjd mutex_enter(&spa->spa_props_lock); 5485219089Spjd VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 5486219089Spjd nvl) == 0); 5487219089Spjd mutex_exit(&spa->spa_props_lock); 5488219089Spjd spa->spa_config_splitting = nvl; 5489219089Spjd vdev_config_dirty(spa->spa_root_vdev); 5490219089Spjd 5491219089Spjd /* configure and create the new pool */ 5492219089Spjd VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 5493219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5494219089Spjd exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 5495219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5496219089Spjd spa_version(spa)) == 0); 5497219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 5498219089Spjd spa->spa_config_txg) == 0); 5499219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5500219089Spjd spa_generate_guid(NULL)) == 0); 5501299441Smav VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 5502219089Spjd (void) nvlist_lookup_string(props, 5503219089Spjd zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5504219089Spjd 5505219089Spjd /* add the new pool to the namespace */ 5506219089Spjd newspa = spa_add(newname, config, altroot); 5507299441Smav newspa->spa_avz_action = AVZ_ACTION_REBUILD; 5508219089Spjd newspa->spa_config_txg = spa->spa_config_txg; 5509219089Spjd spa_set_log_state(newspa, SPA_LOG_CLEAR); 5510219089Spjd 5511219089Spjd /* release the spa config lock, retaining the namespace lock */ 5512219089Spjd spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5513219089Spjd 5514219089Spjd if (zio_injection_enabled) 5515219089Spjd zio_handle_panic_injection(spa, FTAG, 1); 5516219089Spjd 5517219089Spjd spa_activate(newspa, spa_mode_global); 5518219089Spjd spa_async_suspend(newspa); 5519219089Spjd 5520277300Ssmh#ifndef illumos 5521219089Spjd /* mark that we are creating new spa by splitting */ 5522219089Spjd newspa->spa_splitting_newspa = B_TRUE; 5523219089Spjd#endif 5524219089Spjd /* create the new pool from the disks of the original pool */ 5525219089Spjd error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 5526277300Ssmh#ifndef illumos 5527219089Spjd newspa->spa_splitting_newspa = B_FALSE; 5528219089Spjd#endif 5529219089Spjd if (error) 5530219089Spjd goto out; 5531219089Spjd 5532219089Spjd /* if that worked, generate a real config for the new pool */ 5533219089Spjd if (newspa->spa_root_vdev != NULL) { 5534219089Spjd VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5535219089Spjd NV_UNIQUE_NAME, KM_SLEEP) == 0); 5536219089Spjd VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5537219089Spjd ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5538219089Spjd spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5539219089Spjd B_TRUE)); 5540219089Spjd } 5541219089Spjd 5542219089Spjd /* set the props */ 5543219089Spjd if (props != NULL) { 5544219089Spjd spa_configfile_set(newspa, props, B_FALSE); 5545219089Spjd error = spa_prop_set(newspa, props); 5546219089Spjd if (error) 5547219089Spjd goto out; 5548219089Spjd } 5549219089Spjd 5550219089Spjd /* flush everything */ 5551219089Spjd txg = spa_vdev_config_enter(newspa); 5552219089Spjd vdev_config_dirty(newspa->spa_root_vdev); 5553219089Spjd (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5554219089Spjd 5555219089Spjd if (zio_injection_enabled) 5556219089Spjd zio_handle_panic_injection(spa, FTAG, 2); 5557219089Spjd 5558219089Spjd spa_async_resume(newspa); 5559219089Spjd 5560219089Spjd /* finally, update the original pool's config */ 5561219089Spjd txg = spa_vdev_config_enter(spa); 5562219089Spjd tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5563219089Spjd error = dmu_tx_assign(tx, TXG_WAIT); 5564219089Spjd if (error != 0) 5565219089Spjd dmu_tx_abort(tx); 5566219089Spjd for (c = 0; c < children; c++) { 5567219089Spjd if (vml[c] != NULL) { 5568219089Spjd vdev_split(vml[c]); 5569219089Spjd if (error == 0) 5570248571Smm spa_history_log_internal(spa, "detach", tx, 5571248571Smm "vdev=%s", vml[c]->vdev_path); 5572299441Smav 5573219089Spjd vdev_free(vml[c]); 5574219089Spjd } 5575219089Spjd } 5576299441Smav spa->spa_avz_action = AVZ_ACTION_REBUILD; 5577219089Spjd vdev_config_dirty(spa->spa_root_vdev); 5578219089Spjd spa->spa_config_splitting = NULL; 5579219089Spjd nvlist_free(nvl); 5580219089Spjd if (error == 0) 5581219089Spjd dmu_tx_commit(tx); 5582219089Spjd (void) spa_vdev_exit(spa, NULL, txg, 0); 5583219089Spjd 5584219089Spjd if (zio_injection_enabled) 5585219089Spjd zio_handle_panic_injection(spa, FTAG, 3); 5586219089Spjd 5587219089Spjd /* split is complete; log a history record */ 5588248571Smm spa_history_log_internal(newspa, "split", NULL, 5589248571Smm "from pool %s", spa_name(spa)); 5590219089Spjd 5591219089Spjd kmem_free(vml, children * sizeof (vdev_t *)); 5592219089Spjd 5593219089Spjd /* if we're not going to mount the filesystems in userland, export */ 5594219089Spjd if (exp) 5595219089Spjd error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5596219089Spjd B_FALSE, B_FALSE); 5597219089Spjd 5598219089Spjd return (error); 5599219089Spjd 5600219089Spjdout: 5601219089Spjd spa_unload(newspa); 5602219089Spjd spa_deactivate(newspa); 5603219089Spjd spa_remove(newspa); 5604219089Spjd 5605219089Spjd txg = spa_vdev_config_enter(spa); 5606219089Spjd 5607219089Spjd /* re-online all offlined disks */ 5608219089Spjd for (c = 0; c < children; c++) { 5609219089Spjd if (vml[c] != NULL) 5610219089Spjd vml[c]->vdev_offline = B_FALSE; 5611219089Spjd } 5612219089Spjd vdev_reopen(spa->spa_root_vdev); 5613219089Spjd 5614219089Spjd nvlist_free(spa->spa_config_splitting); 5615219089Spjd spa->spa_config_splitting = NULL; 5616219089Spjd (void) spa_vdev_exit(spa, NULL, txg, error); 5617219089Spjd 5618219089Spjd kmem_free(vml, children * sizeof (vdev_t *)); 5619219089Spjd return (error); 5620219089Spjd} 5621219089Spjd 5622185029Spjdstatic nvlist_t * 5623185029Spjdspa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5624185029Spjd{ 5625185029Spjd for (int i = 0; i < count; i++) { 5626185029Spjd uint64_t guid; 5627185029Spjd 5628185029Spjd VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5629185029Spjd &guid) == 0); 5630185029Spjd 5631185029Spjd if (guid == target_guid) 5632185029Spjd return (nvpp[i]); 5633185029Spjd } 5634185029Spjd 5635185029Spjd return (NULL); 5636185029Spjd} 5637185029Spjd 5638185029Spjdstatic void 5639185029Spjdspa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5640307277Smav nvlist_t *dev_to_remove) 5641185029Spjd{ 5642185029Spjd nvlist_t **newdev = NULL; 5643185029Spjd 5644185029Spjd if (count > 1) 5645185029Spjd newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5646185029Spjd 5647185029Spjd for (int i = 0, j = 0; i < count; i++) { 5648185029Spjd if (dev[i] == dev_to_remove) 5649185029Spjd continue; 5650185029Spjd VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5651185029Spjd } 5652185029Spjd 5653185029Spjd VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5654185029Spjd VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5655185029Spjd 5656185029Spjd for (int i = 0; i < count - 1; i++) 5657185029Spjd nvlist_free(newdev[i]); 5658185029Spjd 5659185029Spjd if (count > 1) 5660185029Spjd kmem_free(newdev, (count - 1) * sizeof (void *)); 5661185029Spjd} 5662185029Spjd 5663168404Spjd/* 5664219089Spjd * Evacuate the device. 5665219089Spjd */ 5666219089Spjdstatic int 5667219089Spjdspa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5668219089Spjd{ 5669219089Spjd uint64_t txg; 5670219089Spjd int error = 0; 5671219089Spjd 5672219089Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5673219089Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5674219089Spjd ASSERT(vd == vd->vdev_top); 5675219089Spjd 5676219089Spjd /* 5677219089Spjd * Evacuate the device. We don't hold the config lock as writer 5678219089Spjd * since we need to do I/O but we do keep the 5679219089Spjd * spa_namespace_lock held. Once this completes the device 5680219089Spjd * should no longer have any blocks allocated on it. 5681219089Spjd */ 5682219089Spjd if (vd->vdev_islog) { 5683219089Spjd if (vd->vdev_stat.vs_alloc != 0) 5684219089Spjd error = spa_offline_log(spa); 5685219089Spjd } else { 5686249195Smm error = SET_ERROR(ENOTSUP); 5687219089Spjd } 5688219089Spjd 5689219089Spjd if (error) 5690219089Spjd return (error); 5691219089Spjd 5692219089Spjd /* 5693219089Spjd * The evacuation succeeded. Remove any remaining MOS metadata 5694219089Spjd * associated with this vdev, and wait for these changes to sync. 5695219089Spjd */ 5696240415Smm ASSERT0(vd->vdev_stat.vs_alloc); 5697219089Spjd txg = spa_vdev_config_enter(spa); 5698219089Spjd vd->vdev_removing = B_TRUE; 5699258717Savg vdev_dirty_leaves(vd, VDD_DTL, txg); 5700219089Spjd vdev_config_dirty(vd); 5701219089Spjd spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5702219089Spjd 5703219089Spjd return (0); 5704219089Spjd} 5705219089Spjd 5706219089Spjd/* 5707219089Spjd * Complete the removal by cleaning up the namespace. 5708219089Spjd */ 5709219089Spjdstatic void 5710219089Spjdspa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5711219089Spjd{ 5712219089Spjd vdev_t *rvd = spa->spa_root_vdev; 5713219089Spjd uint64_t id = vd->vdev_id; 5714219089Spjd boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5715219089Spjd 5716219089Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5717219089Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5718219089Spjd ASSERT(vd == vd->vdev_top); 5719219089Spjd 5720219089Spjd /* 5721219089Spjd * Only remove any devices which are empty. 5722219089Spjd */ 5723219089Spjd if (vd->vdev_stat.vs_alloc != 0) 5724219089Spjd return; 5725219089Spjd 5726219089Spjd (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5727219089Spjd 5728219089Spjd if (list_link_active(&vd->vdev_state_dirty_node)) 5729219089Spjd vdev_state_clean(vd); 5730219089Spjd if (list_link_active(&vd->vdev_config_dirty_node)) 5731219089Spjd vdev_config_clean(vd); 5732219089Spjd 5733219089Spjd vdev_free(vd); 5734219089Spjd 5735219089Spjd if (last_vdev) { 5736219089Spjd vdev_compact_children(rvd); 5737219089Spjd } else { 5738219089Spjd vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5739219089Spjd vdev_add_child(rvd, vd); 5740219089Spjd } 5741219089Spjd vdev_config_dirty(rvd); 5742219089Spjd 5743219089Spjd /* 5744219089Spjd * Reassess the health of our root vdev. 5745219089Spjd */ 5746219089Spjd vdev_reopen(rvd); 5747219089Spjd} 5748219089Spjd 5749219089Spjd/* 5750219089Spjd * Remove a device from the pool - 5751219089Spjd * 5752219089Spjd * Removing a device from the vdev namespace requires several steps 5753219089Spjd * and can take a significant amount of time. As a result we use 5754219089Spjd * the spa_vdev_config_[enter/exit] functions which allow us to 5755219089Spjd * grab and release the spa_config_lock while still holding the namespace 5756219089Spjd * lock. During each step the configuration is synced out. 5757251631Sdelphij * 5758251631Sdelphij * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5759251631Sdelphij * devices. 5760219089Spjd */ 5761168404Spjdint 5762168404Spjdspa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5763168404Spjd{ 5764168404Spjd vdev_t *vd; 5765307113Smav sysevent_t *ev = NULL; 5766219089Spjd metaslab_group_t *mg; 5767185029Spjd nvlist_t **spares, **l2cache, *nv; 5768219089Spjd uint64_t txg = 0; 5769185029Spjd uint_t nspares, nl2cache; 5770185029Spjd int error = 0; 5771209962Smm boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5772168404Spjd 5773219089Spjd ASSERT(spa_writeable(spa)); 5774219089Spjd 5775209962Smm if (!locked) 5776209962Smm txg = spa_vdev_enter(spa); 5777168404Spjd 5778185029Spjd vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5779168404Spjd 5780185029Spjd if (spa->spa_spares.sav_vdevs != NULL && 5781185029Spjd nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5782185029Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5783185029Spjd (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5784185029Spjd /* 5785185029Spjd * Only remove the hot spare if it's not currently in use 5786185029Spjd * in this pool. 5787185029Spjd */ 5788185029Spjd if (vd == NULL || unspare) { 5789307113Smav if (vd == NULL) 5790307113Smav vd = spa_lookup_by_guid(spa, guid, B_TRUE); 5791307113Smav ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX); 5792185029Spjd spa_vdev_remove_aux(spa->spa_spares.sav_config, 5793185029Spjd ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5794185029Spjd spa_load_spares(spa); 5795185029Spjd spa->spa_spares.sav_sync = B_TRUE; 5796185029Spjd } else { 5797249195Smm error = SET_ERROR(EBUSY); 5798168404Spjd } 5799185029Spjd } else if (spa->spa_l2cache.sav_vdevs != NULL && 5800185029Spjd nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5801185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5802185029Spjd (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5803185029Spjd /* 5804185029Spjd * Cache devices can always be removed. 5805185029Spjd */ 5806307113Smav vd = spa_lookup_by_guid(spa, guid, B_TRUE); 5807307113Smav ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX); 5808185029Spjd spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5809185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5810185029Spjd spa_load_l2cache(spa); 5811185029Spjd spa->spa_l2cache.sav_sync = B_TRUE; 5812219089Spjd } else if (vd != NULL && vd->vdev_islog) { 5813219089Spjd ASSERT(!locked); 5814219089Spjd ASSERT(vd == vd->vdev_top); 5815219089Spjd 5816219089Spjd mg = vd->vdev_mg; 5817219089Spjd 5818219089Spjd /* 5819219089Spjd * Stop allocating from this vdev. 5820219089Spjd */ 5821219089Spjd metaslab_group_passivate(mg); 5822219089Spjd 5823219089Spjd /* 5824219089Spjd * Wait for the youngest allocations and frees to sync, 5825219089Spjd * and then wait for the deferral of those frees to finish. 5826219089Spjd */ 5827219089Spjd spa_vdev_config_exit(spa, NULL, 5828219089Spjd txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5829219089Spjd 5830219089Spjd /* 5831219089Spjd * Attempt to evacuate the vdev. 5832219089Spjd */ 5833219089Spjd error = spa_vdev_remove_evacuate(spa, vd); 5834219089Spjd 5835219089Spjd txg = spa_vdev_config_enter(spa); 5836219089Spjd 5837219089Spjd /* 5838219089Spjd * If we couldn't evacuate the vdev, unwind. 5839219089Spjd */ 5840219089Spjd if (error) { 5841219089Spjd metaslab_group_activate(mg); 5842219089Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 5843219089Spjd } 5844219089Spjd 5845219089Spjd /* 5846219089Spjd * Clean up the vdev namespace. 5847219089Spjd */ 5848307113Smav ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_DEV); 5849219089Spjd spa_vdev_remove_from_namespace(spa, vd); 5850219089Spjd 5851185029Spjd } else if (vd != NULL) { 5852185029Spjd /* 5853185029Spjd * Normal vdevs cannot be removed (yet). 5854185029Spjd */ 5855249195Smm error = SET_ERROR(ENOTSUP); 5856168404Spjd } else { 5857185029Spjd /* 5858185029Spjd * There is no vdev of any kind with the specified guid. 5859185029Spjd */ 5860249195Smm error = SET_ERROR(ENOENT); 5861168404Spjd } 5862168404Spjd 5863209962Smm if (!locked) 5864307047Smav error = spa_vdev_exit(spa, NULL, txg, error); 5865209962Smm 5866307113Smav if (ev) 5867307113Smav spa_event_post(ev); 5868307113Smav 5869209962Smm return (error); 5870168404Spjd} 5871168404Spjd 5872168404Spjd/* 5873185029Spjd * Find any device that's done replacing, or a vdev marked 'unspare' that's 5874251631Sdelphij * currently spared, so we can detach it. 5875168404Spjd */ 5876168404Spjdstatic vdev_t * 5877185029Spjdspa_vdev_resilver_done_hunt(vdev_t *vd) 5878168404Spjd{ 5879168404Spjd vdev_t *newvd, *oldvd; 5880168404Spjd 5881219089Spjd for (int c = 0; c < vd->vdev_children; c++) { 5882185029Spjd oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5883168404Spjd if (oldvd != NULL) 5884168404Spjd return (oldvd); 5885168404Spjd } 5886168404Spjd 5887185029Spjd /* 5888219089Spjd * Check for a completed replacement. We always consider the first 5889219089Spjd * vdev in the list to be the oldest vdev, and the last one to be 5890219089Spjd * the newest (see spa_vdev_attach() for how that works). In 5891219089Spjd * the case where the newest vdev is faulted, we will not automatically 5892219089Spjd * remove it after a resilver completes. This is OK as it will require 5893219089Spjd * user intervention to determine which disk the admin wishes to keep. 5894185029Spjd */ 5895219089Spjd if (vd->vdev_ops == &vdev_replacing_ops) { 5896219089Spjd ASSERT(vd->vdev_children > 1); 5897219089Spjd 5898219089Spjd newvd = vd->vdev_child[vd->vdev_children - 1]; 5899168404Spjd oldvd = vd->vdev_child[0]; 5900168404Spjd 5901209962Smm if (vdev_dtl_empty(newvd, DTL_MISSING) && 5902219089Spjd vdev_dtl_empty(newvd, DTL_OUTAGE) && 5903209962Smm !vdev_dtl_required(oldvd)) 5904168404Spjd return (oldvd); 5905168404Spjd } 5906168404Spjd 5907185029Spjd /* 5908185029Spjd * Check for a completed resilver with the 'unspare' flag set. 5909185029Spjd */ 5910219089Spjd if (vd->vdev_ops == &vdev_spare_ops) { 5911219089Spjd vdev_t *first = vd->vdev_child[0]; 5912219089Spjd vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5913185029Spjd 5914219089Spjd if (last->vdev_unspare) { 5915219089Spjd oldvd = first; 5916219089Spjd newvd = last; 5917219089Spjd } else if (first->vdev_unspare) { 5918219089Spjd oldvd = last; 5919219089Spjd newvd = first; 5920219089Spjd } else { 5921219089Spjd oldvd = NULL; 5922219089Spjd } 5923219089Spjd 5924219089Spjd if (oldvd != NULL && 5925209962Smm vdev_dtl_empty(newvd, DTL_MISSING) && 5926219089Spjd vdev_dtl_empty(newvd, DTL_OUTAGE) && 5927219089Spjd !vdev_dtl_required(oldvd)) 5928185029Spjd return (oldvd); 5929219089Spjd 5930219089Spjd /* 5931219089Spjd * If there are more than two spares attached to a disk, 5932219089Spjd * and those spares are not required, then we want to 5933219089Spjd * attempt to free them up now so that they can be used 5934219089Spjd * by other pools. Once we're back down to a single 5935219089Spjd * disk+spare, we stop removing them. 5936219089Spjd */ 5937219089Spjd if (vd->vdev_children > 2) { 5938219089Spjd newvd = vd->vdev_child[1]; 5939219089Spjd 5940219089Spjd if (newvd->vdev_isspare && last->vdev_isspare && 5941219089Spjd vdev_dtl_empty(last, DTL_MISSING) && 5942219089Spjd vdev_dtl_empty(last, DTL_OUTAGE) && 5943219089Spjd !vdev_dtl_required(newvd)) 5944219089Spjd return (newvd); 5945185029Spjd } 5946185029Spjd } 5947185029Spjd 5948168404Spjd return (NULL); 5949168404Spjd} 5950168404Spjd 5951168404Spjdstatic void 5952185029Spjdspa_vdev_resilver_done(spa_t *spa) 5953168404Spjd{ 5954209962Smm vdev_t *vd, *pvd, *ppvd; 5955209962Smm uint64_t guid, sguid, pguid, ppguid; 5956168404Spjd 5957209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5958168404Spjd 5959185029Spjd while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5960209962Smm pvd = vd->vdev_parent; 5961209962Smm ppvd = pvd->vdev_parent; 5962168404Spjd guid = vd->vdev_guid; 5963209962Smm pguid = pvd->vdev_guid; 5964209962Smm ppguid = ppvd->vdev_guid; 5965209962Smm sguid = 0; 5966168404Spjd /* 5967168404Spjd * If we have just finished replacing a hot spared device, then 5968168404Spjd * we need to detach the parent's first child (the original hot 5969168404Spjd * spare) as well. 5970168404Spjd */ 5971219089Spjd if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5972219089Spjd ppvd->vdev_children == 2) { 5973168404Spjd ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5974209962Smm sguid = ppvd->vdev_child[1]->vdev_guid; 5975168404Spjd } 5976254112Sdelphij ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5977254112Sdelphij 5978209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 5979209962Smm if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5980168404Spjd return; 5981209962Smm if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5982168404Spjd return; 5983209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5984168404Spjd } 5985168404Spjd 5986209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 5987168404Spjd} 5988168404Spjd 5989168404Spjd/* 5990219089Spjd * Update the stored path or FRU for this vdev. 5991168404Spjd */ 5992168404Spjdint 5993209962Smmspa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5994209962Smm boolean_t ispath) 5995168404Spjd{ 5996185029Spjd vdev_t *vd; 5997219089Spjd boolean_t sync = B_FALSE; 5998168404Spjd 5999219089Spjd ASSERT(spa_writeable(spa)); 6000168404Spjd 6001219089Spjd spa_vdev_state_enter(spa, SCL_ALL); 6002219089Spjd 6003209962Smm if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 6004219089Spjd return (spa_vdev_state_exit(spa, NULL, ENOENT)); 6005168404Spjd 6006168404Spjd if (!vd->vdev_ops->vdev_op_leaf) 6007219089Spjd return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 6008168404Spjd 6009209962Smm if (ispath) { 6010219089Spjd if (strcmp(value, vd->vdev_path) != 0) { 6011219089Spjd spa_strfree(vd->vdev_path); 6012219089Spjd vd->vdev_path = spa_strdup(value); 6013219089Spjd sync = B_TRUE; 6014219089Spjd } 6015209962Smm } else { 6016219089Spjd if (vd->vdev_fru == NULL) { 6017219089Spjd vd->vdev_fru = spa_strdup(value); 6018219089Spjd sync = B_TRUE; 6019219089Spjd } else if (strcmp(value, vd->vdev_fru) != 0) { 6020209962Smm spa_strfree(vd->vdev_fru); 6021219089Spjd vd->vdev_fru = spa_strdup(value); 6022219089Spjd sync = B_TRUE; 6023219089Spjd } 6024209962Smm } 6025168404Spjd 6026219089Spjd return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 6027168404Spjd} 6028168404Spjd 6029209962Smmint 6030209962Smmspa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 6031209962Smm{ 6032209962Smm return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 6033209962Smm} 6034209962Smm 6035209962Smmint 6036209962Smmspa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 6037209962Smm{ 6038209962Smm return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 6039209962Smm} 6040209962Smm 6041168404Spjd/* 6042168404Spjd * ========================================================================== 6043219089Spjd * SPA Scanning 6044168404Spjd * ========================================================================== 6045168404Spjd */ 6046168404Spjd 6047168404Spjdint 6048219089Spjdspa_scan_stop(spa_t *spa) 6049168404Spjd{ 6050185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 6051219089Spjd if (dsl_scan_resilvering(spa->spa_dsl_pool)) 6052249195Smm return (SET_ERROR(EBUSY)); 6053219089Spjd return (dsl_scan_cancel(spa->spa_dsl_pool)); 6054219089Spjd} 6055168404Spjd 6056219089Spjdint 6057219089Spjdspa_scan(spa_t *spa, pool_scan_func_t func) 6058219089Spjd{ 6059219089Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 6060219089Spjd 6061219089Spjd if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 6062249195Smm return (SET_ERROR(ENOTSUP)); 6063168404Spjd 6064168404Spjd /* 6065185029Spjd * If a resilver was requested, but there is no DTL on a 6066185029Spjd * writeable leaf device, we have nothing to do. 6067168404Spjd */ 6068219089Spjd if (func == POOL_SCAN_RESILVER && 6069185029Spjd !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 6070185029Spjd spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 6071168404Spjd return (0); 6072168404Spjd } 6073168404Spjd 6074219089Spjd return (dsl_scan(spa->spa_dsl_pool, func)); 6075168404Spjd} 6076168404Spjd 6077168404Spjd/* 6078168404Spjd * ========================================================================== 6079168404Spjd * SPA async task processing 6080168404Spjd * ========================================================================== 6081168404Spjd */ 6082168404Spjd 6083168404Spjdstatic void 6084185029Spjdspa_async_remove(spa_t *spa, vdev_t *vd) 6085168404Spjd{ 6086185029Spjd if (vd->vdev_remove_wanted) { 6087219089Spjd vd->vdev_remove_wanted = B_FALSE; 6088219089Spjd vd->vdev_delayed_close = B_FALSE; 6089185029Spjd vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 6090209962Smm 6091209962Smm /* 6092209962Smm * We want to clear the stats, but we don't want to do a full 6093209962Smm * vdev_clear() as that will cause us to throw away 6094209962Smm * degraded/faulted state as well as attempt to reopen the 6095209962Smm * device, all of which is a waste. 6096209962Smm */ 6097209962Smm vd->vdev_stat.vs_read_errors = 0; 6098209962Smm vd->vdev_stat.vs_write_errors = 0; 6099209962Smm vd->vdev_stat.vs_checksum_errors = 0; 6100209962Smm 6101185029Spjd vdev_state_dirty(vd->vdev_top); 6102294027Sasomers /* Tell userspace that the vdev is gone. */ 6103294027Sasomers zfs_post_remove(spa, vd); 6104185029Spjd } 6105168404Spjd 6106185029Spjd for (int c = 0; c < vd->vdev_children; c++) 6107185029Spjd spa_async_remove(spa, vd->vdev_child[c]); 6108185029Spjd} 6109168404Spjd 6110185029Spjdstatic void 6111185029Spjdspa_async_probe(spa_t *spa, vdev_t *vd) 6112185029Spjd{ 6113185029Spjd if (vd->vdev_probe_wanted) { 6114219089Spjd vd->vdev_probe_wanted = B_FALSE; 6115185029Spjd vdev_reopen(vd); /* vdev_open() does the actual probe */ 6116168404Spjd } 6117168404Spjd 6118185029Spjd for (int c = 0; c < vd->vdev_children; c++) 6119185029Spjd spa_async_probe(spa, vd->vdev_child[c]); 6120168404Spjd} 6121168404Spjd 6122168404Spjdstatic void 6123219089Spjdspa_async_autoexpand(spa_t *spa, vdev_t *vd) 6124219089Spjd{ 6125219089Spjd sysevent_id_t eid; 6126219089Spjd nvlist_t *attr; 6127219089Spjd char *physpath; 6128219089Spjd 6129219089Spjd if (!spa->spa_autoexpand) 6130219089Spjd return; 6131219089Spjd 6132219089Spjd for (int c = 0; c < vd->vdev_children; c++) { 6133219089Spjd vdev_t *cvd = vd->vdev_child[c]; 6134219089Spjd spa_async_autoexpand(spa, cvd); 6135219089Spjd } 6136219089Spjd 6137219089Spjd if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 6138219089Spjd return; 6139219089Spjd 6140219089Spjd physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 6141219089Spjd (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 6142219089Spjd 6143219089Spjd VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6144219089Spjd VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 6145219089Spjd 6146219089Spjd (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 6147219089Spjd ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP); 6148219089Spjd 6149219089Spjd nvlist_free(attr); 6150219089Spjd kmem_free(physpath, MAXPATHLEN); 6151219089Spjd} 6152219089Spjd 6153219089Spjdstatic void 6154168404Spjdspa_async_thread(void *arg) 6155168404Spjd{ 6156168404Spjd spa_t *spa = arg; 6157168404Spjd int tasks; 6158168404Spjd 6159168404Spjd ASSERT(spa->spa_sync_on); 6160168404Spjd 6161168404Spjd mutex_enter(&spa->spa_async_lock); 6162168404Spjd tasks = spa->spa_async_tasks; 6163253990Smav spa->spa_async_tasks &= SPA_ASYNC_REMOVE; 6164168404Spjd mutex_exit(&spa->spa_async_lock); 6165168404Spjd 6166168404Spjd /* 6167168404Spjd * See if the config needs to be updated. 6168168404Spjd */ 6169168404Spjd if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 6170219089Spjd uint64_t old_space, new_space; 6171219089Spjd 6172168404Spjd mutex_enter(&spa_namespace_lock); 6173219089Spjd old_space = metaslab_class_get_space(spa_normal_class(spa)); 6174168404Spjd spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6175219089Spjd new_space = metaslab_class_get_space(spa_normal_class(spa)); 6176168404Spjd mutex_exit(&spa_namespace_lock); 6177219089Spjd 6178219089Spjd /* 6179219089Spjd * If the pool grew as a result of the config update, 6180219089Spjd * then log an internal history event. 6181219089Spjd */ 6182219089Spjd if (new_space != old_space) { 6183248571Smm spa_history_log_internal(spa, "vdev online", NULL, 6184219089Spjd "pool '%s' size: %llu(+%llu)", 6185219089Spjd spa_name(spa), new_space, new_space - old_space); 6186219089Spjd } 6187168404Spjd } 6188168404Spjd 6189219089Spjd if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 6190219089Spjd spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6191219089Spjd spa_async_autoexpand(spa, spa->spa_root_vdev); 6192219089Spjd spa_config_exit(spa, SCL_CONFIG, FTAG); 6193219089Spjd } 6194219089Spjd 6195168404Spjd /* 6196185029Spjd * See if any devices need to be probed. 6197168404Spjd */ 6198185029Spjd if (tasks & SPA_ASYNC_PROBE) { 6199219089Spjd spa_vdev_state_enter(spa, SCL_NONE); 6200185029Spjd spa_async_probe(spa, spa->spa_root_vdev); 6201185029Spjd (void) spa_vdev_state_exit(spa, NULL, 0); 6202185029Spjd } 6203168404Spjd 6204168404Spjd /* 6205185029Spjd * If any devices are done replacing, detach them. 6206168404Spjd */ 6207185029Spjd if (tasks & SPA_ASYNC_RESILVER_DONE) 6208185029Spjd spa_vdev_resilver_done(spa); 6209168404Spjd 6210168404Spjd /* 6211168404Spjd * Kick off a resilver. 6212168404Spjd */ 6213168404Spjd if (tasks & SPA_ASYNC_RESILVER) 6214219089Spjd dsl_resilver_restart(spa->spa_dsl_pool, 0); 6215168404Spjd 6216168404Spjd /* 6217168404Spjd * Let the world know that we're done. 6218168404Spjd */ 6219168404Spjd mutex_enter(&spa->spa_async_lock); 6220168404Spjd spa->spa_async_thread = NULL; 6221168404Spjd cv_broadcast(&spa->spa_async_cv); 6222168404Spjd mutex_exit(&spa->spa_async_lock); 6223168404Spjd thread_exit(); 6224168404Spjd} 6225168404Spjd 6226253990Smavstatic void 6227253990Smavspa_async_thread_vd(void *arg) 6228253990Smav{ 6229253990Smav spa_t *spa = arg; 6230253990Smav int tasks; 6231253990Smav 6232253990Smav ASSERT(spa->spa_sync_on); 6233253990Smav 6234253990Smav mutex_enter(&spa->spa_async_lock); 6235253990Smav tasks = spa->spa_async_tasks; 6236253990Smavretry: 6237253990Smav spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE; 6238253990Smav mutex_exit(&spa->spa_async_lock); 6239253990Smav 6240253990Smav /* 6241253990Smav * See if any devices need to be marked REMOVED. 6242253990Smav */ 6243253990Smav if (tasks & SPA_ASYNC_REMOVE) { 6244253990Smav spa_vdev_state_enter(spa, SCL_NONE); 6245253990Smav spa_async_remove(spa, spa->spa_root_vdev); 6246253990Smav for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 6247253990Smav spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 6248253990Smav for (int i = 0; i < spa->spa_spares.sav_count; i++) 6249253990Smav spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 6250253990Smav (void) spa_vdev_state_exit(spa, NULL, 0); 6251253990Smav } 6252253990Smav 6253253990Smav /* 6254253990Smav * Let the world know that we're done. 6255253990Smav */ 6256253990Smav mutex_enter(&spa->spa_async_lock); 6257253990Smav tasks = spa->spa_async_tasks; 6258253990Smav if ((tasks & SPA_ASYNC_REMOVE) != 0) 6259253990Smav goto retry; 6260253990Smav spa->spa_async_thread_vd = NULL; 6261253990Smav cv_broadcast(&spa->spa_async_cv); 6262253990Smav mutex_exit(&spa->spa_async_lock); 6263253990Smav thread_exit(); 6264253990Smav} 6265253990Smav 6266168404Spjdvoid 6267168404Spjdspa_async_suspend(spa_t *spa) 6268168404Spjd{ 6269168404Spjd mutex_enter(&spa->spa_async_lock); 6270168404Spjd spa->spa_async_suspended++; 6271253990Smav while (spa->spa_async_thread != NULL && 6272253990Smav spa->spa_async_thread_vd != NULL) 6273168404Spjd cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 6274168404Spjd mutex_exit(&spa->spa_async_lock); 6275168404Spjd} 6276168404Spjd 6277168404Spjdvoid 6278168404Spjdspa_async_resume(spa_t *spa) 6279168404Spjd{ 6280168404Spjd mutex_enter(&spa->spa_async_lock); 6281168404Spjd ASSERT(spa->spa_async_suspended != 0); 6282168404Spjd spa->spa_async_suspended--; 6283168404Spjd mutex_exit(&spa->spa_async_lock); 6284168404Spjd} 6285168404Spjd 6286251636Sdelphijstatic boolean_t 6287251636Sdelphijspa_async_tasks_pending(spa_t *spa) 6288251636Sdelphij{ 6289251636Sdelphij uint_t non_config_tasks; 6290251636Sdelphij uint_t config_task; 6291251636Sdelphij boolean_t config_task_suspended; 6292251636Sdelphij 6293253990Smav non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE | 6294253990Smav SPA_ASYNC_REMOVE); 6295251636Sdelphij config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 6296251636Sdelphij if (spa->spa_ccw_fail_time == 0) { 6297251636Sdelphij config_task_suspended = B_FALSE; 6298251636Sdelphij } else { 6299251636Sdelphij config_task_suspended = 6300251636Sdelphij (gethrtime() - spa->spa_ccw_fail_time) < 6301251636Sdelphij (zfs_ccw_retry_interval * NANOSEC); 6302251636Sdelphij } 6303251636Sdelphij 6304251636Sdelphij return (non_config_tasks || (config_task && !config_task_suspended)); 6305251636Sdelphij} 6306251636Sdelphij 6307168404Spjdstatic void 6308168404Spjdspa_async_dispatch(spa_t *spa) 6309168404Spjd{ 6310168404Spjd mutex_enter(&spa->spa_async_lock); 6311251636Sdelphij if (spa_async_tasks_pending(spa) && 6312251636Sdelphij !spa->spa_async_suspended && 6313168404Spjd spa->spa_async_thread == NULL && 6314251636Sdelphij rootdir != NULL) 6315168404Spjd spa->spa_async_thread = thread_create(NULL, 0, 6316168404Spjd spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 6317168404Spjd mutex_exit(&spa->spa_async_lock); 6318168404Spjd} 6319168404Spjd 6320253990Smavstatic void 6321253990Smavspa_async_dispatch_vd(spa_t *spa) 6322253990Smav{ 6323253990Smav mutex_enter(&spa->spa_async_lock); 6324253990Smav if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 && 6325253990Smav !spa->spa_async_suspended && 6326253990Smav spa->spa_async_thread_vd == NULL && 6327253990Smav rootdir != NULL) 6328253990Smav spa->spa_async_thread_vd = thread_create(NULL, 0, 6329253990Smav spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri); 6330253990Smav mutex_exit(&spa->spa_async_lock); 6331253990Smav} 6332253990Smav 6333168404Spjdvoid 6334168404Spjdspa_async_request(spa_t *spa, int task) 6335168404Spjd{ 6336219089Spjd zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 6337168404Spjd mutex_enter(&spa->spa_async_lock); 6338168404Spjd spa->spa_async_tasks |= task; 6339168404Spjd mutex_exit(&spa->spa_async_lock); 6340253990Smav spa_async_dispatch_vd(spa); 6341168404Spjd} 6342168404Spjd 6343168404Spjd/* 6344168404Spjd * ========================================================================== 6345168404Spjd * SPA syncing routines 6346168404Spjd * ========================================================================== 6347168404Spjd */ 6348168404Spjd 6349219089Spjdstatic int 6350219089Spjdbpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6351168404Spjd{ 6352219089Spjd bpobj_t *bpo = arg; 6353219089Spjd bpobj_enqueue(bpo, bp, tx); 6354219089Spjd return (0); 6355219089Spjd} 6356168404Spjd 6357219089Spjdstatic int 6358219089Spjdspa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6359219089Spjd{ 6360219089Spjd zio_t *zio = arg; 6361168404Spjd 6362219089Spjd zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 6363240868Spjd BP_GET_PSIZE(bp), zio->io_flags)); 6364219089Spjd return (0); 6365168404Spjd} 6366168404Spjd 6367258632Savg/* 6368258632Savg * Note: this simple function is not inlined to make it easier to dtrace the 6369258632Savg * amount of time spent syncing frees. 6370258632Savg */ 6371168404Spjdstatic void 6372258632Savgspa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 6373258632Savg{ 6374258632Savg zio_t *zio = zio_root(spa, NULL, NULL, 0); 6375258632Savg bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 6376258632Savg VERIFY(zio_wait(zio) == 0); 6377258632Savg} 6378258632Savg 6379258632Savg/* 6380258632Savg * Note: this simple function is not inlined to make it easier to dtrace the 6381258632Savg * amount of time spent syncing deferred frees. 6382258632Savg */ 6383258632Savgstatic void 6384258632Savgspa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 6385258632Savg{ 6386258632Savg zio_t *zio = zio_root(spa, NULL, NULL, 0); 6387258632Savg VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 6388258632Savg spa_free_sync_cb, zio, tx), ==, 0); 6389258632Savg VERIFY0(zio_wait(zio)); 6390258632Savg} 6391258632Savg 6392258632Savg 6393258632Savgstatic void 6394168404Spjdspa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 6395168404Spjd{ 6396168404Spjd char *packed = NULL; 6397185029Spjd size_t bufsize; 6398168404Spjd size_t nvsize = 0; 6399168404Spjd dmu_buf_t *db; 6400168404Spjd 6401168404Spjd VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 6402168404Spjd 6403185029Spjd /* 6404185029Spjd * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 6405260150Sdelphij * information. This avoids the dmu_buf_will_dirty() path and 6406185029Spjd * saves us a pre-read to get data we don't actually care about. 6407185029Spjd */ 6408236884Smm bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 6409185029Spjd packed = kmem_alloc(bufsize, KM_SLEEP); 6410168404Spjd 6411168404Spjd VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 6412168404Spjd KM_SLEEP) == 0); 6413185029Spjd bzero(packed + nvsize, bufsize - nvsize); 6414168404Spjd 6415185029Spjd dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 6416168404Spjd 6417185029Spjd kmem_free(packed, bufsize); 6418168404Spjd 6419168404Spjd VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 6420168404Spjd dmu_buf_will_dirty(db, tx); 6421168404Spjd *(uint64_t *)db->db_data = nvsize; 6422168404Spjd dmu_buf_rele(db, FTAG); 6423168404Spjd} 6424168404Spjd 6425168404Spjdstatic void 6426185029Spjdspa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 6427185029Spjd const char *config, const char *entry) 6428168404Spjd{ 6429168404Spjd nvlist_t *nvroot; 6430185029Spjd nvlist_t **list; 6431168404Spjd int i; 6432168404Spjd 6433185029Spjd if (!sav->sav_sync) 6434168404Spjd return; 6435168404Spjd 6436168404Spjd /* 6437185029Spjd * Update the MOS nvlist describing the list of available devices. 6438185029Spjd * spa_validate_aux() will have already made sure this nvlist is 6439185029Spjd * valid and the vdevs are labeled appropriately. 6440168404Spjd */ 6441185029Spjd if (sav->sav_object == 0) { 6442185029Spjd sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 6443185029Spjd DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 6444185029Spjd sizeof (uint64_t), tx); 6445168404Spjd VERIFY(zap_update(spa->spa_meta_objset, 6446185029Spjd DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 6447185029Spjd &sav->sav_object, tx) == 0); 6448168404Spjd } 6449168404Spjd 6450168404Spjd VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6451185029Spjd if (sav->sav_count == 0) { 6452185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 6453168404Spjd } else { 6454185029Spjd list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 6455185029Spjd for (i = 0; i < sav->sav_count; i++) 6456185029Spjd list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 6457219089Spjd B_FALSE, VDEV_CONFIG_L2CACHE); 6458185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 6459185029Spjd sav->sav_count) == 0); 6460185029Spjd for (i = 0; i < sav->sav_count; i++) 6461185029Spjd nvlist_free(list[i]); 6462185029Spjd kmem_free(list, sav->sav_count * sizeof (void *)); 6463168404Spjd } 6464168404Spjd 6465185029Spjd spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 6466168404Spjd nvlist_free(nvroot); 6467168404Spjd 6468185029Spjd sav->sav_sync = B_FALSE; 6469168404Spjd} 6470168404Spjd 6471299441Smav/* 6472299441Smav * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 6473299441Smav * The all-vdev ZAP must be empty. 6474299441Smav */ 6475168404Spjdstatic void 6476299441Smavspa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 6477299441Smav{ 6478299441Smav spa_t *spa = vd->vdev_spa; 6479299441Smav if (vd->vdev_top_zap != 0) { 6480299441Smav VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 6481299441Smav vd->vdev_top_zap, tx)); 6482299441Smav } 6483299441Smav if (vd->vdev_leaf_zap != 0) { 6484299441Smav VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 6485299441Smav vd->vdev_leaf_zap, tx)); 6486299441Smav } 6487299441Smav for (uint64_t i = 0; i < vd->vdev_children; i++) { 6488299441Smav spa_avz_build(vd->vdev_child[i], avz, tx); 6489299441Smav } 6490299441Smav} 6491299441Smav 6492299441Smavstatic void 6493168404Spjdspa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 6494168404Spjd{ 6495168404Spjd nvlist_t *config; 6496168404Spjd 6497299441Smav /* 6498299441Smav * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 6499299441Smav * its config may not be dirty but we still need to build per-vdev ZAPs. 6500299441Smav * Similarly, if the pool is being assembled (e.g. after a split), we 6501299441Smav * need to rebuild the AVZ although the config may not be dirty. 6502299441Smav */ 6503299441Smav if (list_is_empty(&spa->spa_config_dirty_list) && 6504299441Smav spa->spa_avz_action == AVZ_ACTION_NONE) 6505168404Spjd return; 6506168404Spjd 6507185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6508168404Spjd 6509299441Smav ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 6510321540Smav spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 6511299441Smav spa->spa_all_vdev_zaps != 0); 6512299441Smav 6513299441Smav if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 6514299441Smav /* Make and build the new AVZ */ 6515299441Smav uint64_t new_avz = zap_create(spa->spa_meta_objset, 6516299441Smav DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 6517299441Smav spa_avz_build(spa->spa_root_vdev, new_avz, tx); 6518299441Smav 6519299441Smav /* Diff old AVZ with new one */ 6520299441Smav zap_cursor_t zc; 6521299441Smav zap_attribute_t za; 6522299441Smav 6523299441Smav for (zap_cursor_init(&zc, spa->spa_meta_objset, 6524299441Smav spa->spa_all_vdev_zaps); 6525299441Smav zap_cursor_retrieve(&zc, &za) == 0; 6526299441Smav zap_cursor_advance(&zc)) { 6527299441Smav uint64_t vdzap = za.za_first_integer; 6528299441Smav if (zap_lookup_int(spa->spa_meta_objset, new_avz, 6529299441Smav vdzap) == ENOENT) { 6530299441Smav /* 6531299441Smav * ZAP is listed in old AVZ but not in new one; 6532299441Smav * destroy it 6533299441Smav */ 6534299441Smav VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 6535299441Smav tx)); 6536299441Smav } 6537299441Smav } 6538299441Smav 6539299441Smav zap_cursor_fini(&zc); 6540299441Smav 6541299441Smav /* Destroy the old AVZ */ 6542299441Smav VERIFY0(zap_destroy(spa->spa_meta_objset, 6543299441Smav spa->spa_all_vdev_zaps, tx)); 6544299441Smav 6545299441Smav /* Replace the old AVZ in the dir obj with the new one */ 6546299441Smav VERIFY0(zap_update(spa->spa_meta_objset, 6547299441Smav DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 6548299441Smav sizeof (new_avz), 1, &new_avz, tx)); 6549299441Smav 6550299441Smav spa->spa_all_vdev_zaps = new_avz; 6551299441Smav } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 6552299441Smav zap_cursor_t zc; 6553299441Smav zap_attribute_t za; 6554299441Smav 6555299441Smav /* Walk through the AVZ and destroy all listed ZAPs */ 6556299441Smav for (zap_cursor_init(&zc, spa->spa_meta_objset, 6557299441Smav spa->spa_all_vdev_zaps); 6558299441Smav zap_cursor_retrieve(&zc, &za) == 0; 6559299441Smav zap_cursor_advance(&zc)) { 6560299441Smav uint64_t zap = za.za_first_integer; 6561299441Smav VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 6562299441Smav } 6563299441Smav 6564299441Smav zap_cursor_fini(&zc); 6565299441Smav 6566299441Smav /* Destroy and unlink the AVZ itself */ 6567299441Smav VERIFY0(zap_destroy(spa->spa_meta_objset, 6568299441Smav spa->spa_all_vdev_zaps, tx)); 6569299441Smav VERIFY0(zap_remove(spa->spa_meta_objset, 6570299441Smav DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 6571299441Smav spa->spa_all_vdev_zaps = 0; 6572299441Smav } 6573299441Smav 6574299441Smav if (spa->spa_all_vdev_zaps == 0) { 6575299441Smav spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 6576299441Smav DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 6577299441Smav DMU_POOL_VDEV_ZAP_MAP, tx); 6578299441Smav } 6579299441Smav spa->spa_avz_action = AVZ_ACTION_NONE; 6580299441Smav 6581299441Smav /* Create ZAPs for vdevs that don't have them. */ 6582299441Smav vdev_construct_zaps(spa->spa_root_vdev, tx); 6583299441Smav 6584185029Spjd config = spa_config_generate(spa, spa->spa_root_vdev, 6585185029Spjd dmu_tx_get_txg(tx), B_FALSE); 6586185029Spjd 6587243505Smm /* 6588243505Smm * If we're upgrading the spa version then make sure that 6589243505Smm * the config object gets updated with the correct version. 6590243505Smm */ 6591243505Smm if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 6592243505Smm fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6593243505Smm spa->spa_uberblock.ub_version); 6594243505Smm 6595185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 6596185029Spjd 6597296528Smav nvlist_free(spa->spa_config_syncing); 6598168404Spjd spa->spa_config_syncing = config; 6599168404Spjd 6600168404Spjd spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 6601168404Spjd} 6602168404Spjd 6603236884Smmstatic void 6604248571Smmspa_sync_version(void *arg, dmu_tx_t *tx) 6605236884Smm{ 6606248571Smm uint64_t *versionp = arg; 6607248571Smm uint64_t version = *versionp; 6608248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6609236884Smm 6610236884Smm /* 6611236884Smm * Setting the version is special cased when first creating the pool. 6612236884Smm */ 6613236884Smm ASSERT(tx->tx_txg != TXG_INITIAL); 6614236884Smm 6615247592Sdelphij ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6616236884Smm ASSERT(version >= spa_version(spa)); 6617236884Smm 6618236884Smm spa->spa_uberblock.ub_version = version; 6619236884Smm vdev_config_dirty(spa->spa_root_vdev); 6620248571Smm spa_history_log_internal(spa, "set", tx, "version=%lld", version); 6621236884Smm} 6622236884Smm 6623185029Spjd/* 6624185029Spjd * Set zpool properties. 6625185029Spjd */ 6626168404Spjdstatic void 6627248571Smmspa_sync_props(void *arg, dmu_tx_t *tx) 6628168404Spjd{ 6629248571Smm nvlist_t *nvp = arg; 6630248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6631185029Spjd objset_t *mos = spa->spa_meta_objset; 6632236884Smm nvpair_t *elem = NULL; 6633168404Spjd 6634168404Spjd mutex_enter(&spa->spa_props_lock); 6635168404Spjd 6636185029Spjd while ((elem = nvlist_next_nvpair(nvp, elem))) { 6637236884Smm uint64_t intval; 6638236884Smm char *strval, *fname; 6639236884Smm zpool_prop_t prop; 6640236884Smm const char *propname; 6641236884Smm zprop_type_t proptype; 6642259813Sdelphij spa_feature_t fid; 6643236884Smm 6644185029Spjd switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 6645236884Smm case ZPROP_INVAL: 6646236884Smm /* 6647236884Smm * We checked this earlier in spa_prop_validate(). 6648236884Smm */ 6649236884Smm ASSERT(zpool_prop_feature(nvpair_name(elem))); 6650236884Smm 6651236884Smm fname = strchr(nvpair_name(elem), '@') + 1; 6652259813Sdelphij VERIFY0(zfeature_lookup_name(fname, &fid)); 6653236884Smm 6654259813Sdelphij spa_feature_enable(spa, fid, tx); 6655248571Smm spa_history_log_internal(spa, "set", tx, 6656248571Smm "%s=enabled", nvpair_name(elem)); 6657236884Smm break; 6658236884Smm 6659185029Spjd case ZPOOL_PROP_VERSION: 6660258717Savg intval = fnvpair_value_uint64(elem); 6661185029Spjd /* 6662236884Smm * The version is synced seperatly before other 6663236884Smm * properties and should be correct by now. 6664185029Spjd */ 6665236884Smm ASSERT3U(spa_version(spa), >=, intval); 6666185029Spjd break; 6667168404Spjd 6668185029Spjd case ZPOOL_PROP_ALTROOT: 6669185029Spjd /* 6670185029Spjd * 'altroot' is a non-persistent property. It should 6671185029Spjd * have been set temporarily at creation or import time. 6672185029Spjd */ 6673185029Spjd ASSERT(spa->spa_root != NULL); 6674185029Spjd break; 6675168404Spjd 6676219089Spjd case ZPOOL_PROP_READONLY: 6677185029Spjd case ZPOOL_PROP_CACHEFILE: 6678185029Spjd /* 6679219089Spjd * 'readonly' and 'cachefile' are also non-persisitent 6680219089Spjd * properties. 6681185029Spjd */ 6682168404Spjd break; 6683228103Smm case ZPOOL_PROP_COMMENT: 6684258717Savg strval = fnvpair_value_string(elem); 6685228103Smm if (spa->spa_comment != NULL) 6686228103Smm spa_strfree(spa->spa_comment); 6687228103Smm spa->spa_comment = spa_strdup(strval); 6688228103Smm /* 6689228103Smm * We need to dirty the configuration on all the vdevs 6690228103Smm * so that their labels get updated. It's unnecessary 6691228103Smm * to do this for pool creation since the vdev's 6692228103Smm * configuratoin has already been dirtied. 6693228103Smm */ 6694228103Smm if (tx->tx_txg != TXG_INITIAL) 6695228103Smm vdev_config_dirty(spa->spa_root_vdev); 6696248571Smm spa_history_log_internal(spa, "set", tx, 6697248571Smm "%s=%s", nvpair_name(elem), strval); 6698228103Smm break; 6699185029Spjd default: 6700185029Spjd /* 6701185029Spjd * Set pool property values in the poolprops mos object. 6702185029Spjd */ 6703185029Spjd if (spa->spa_pool_props_object == 0) { 6704236884Smm spa->spa_pool_props_object = 6705236884Smm zap_create_link(mos, DMU_OT_POOL_PROPS, 6706185029Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6707236884Smm tx); 6708185029Spjd } 6709185029Spjd 6710185029Spjd /* normalize the property name */ 6711185029Spjd propname = zpool_prop_to_name(prop); 6712185029Spjd proptype = zpool_prop_get_type(prop); 6713185029Spjd 6714185029Spjd if (nvpair_type(elem) == DATA_TYPE_STRING) { 6715185029Spjd ASSERT(proptype == PROP_TYPE_STRING); 6716258717Savg strval = fnvpair_value_string(elem); 6717258717Savg VERIFY0(zap_update(mos, 6718185029Spjd spa->spa_pool_props_object, propname, 6719258717Savg 1, strlen(strval) + 1, strval, tx)); 6720248571Smm spa_history_log_internal(spa, "set", tx, 6721248571Smm "%s=%s", nvpair_name(elem), strval); 6722185029Spjd } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6723258717Savg intval = fnvpair_value_uint64(elem); 6724185029Spjd 6725185029Spjd if (proptype == PROP_TYPE_INDEX) { 6726185029Spjd const char *unused; 6727258717Savg VERIFY0(zpool_prop_index_to_string( 6728258717Savg prop, intval, &unused)); 6729185029Spjd } 6730258717Savg VERIFY0(zap_update(mos, 6731185029Spjd spa->spa_pool_props_object, propname, 6732258717Savg 8, 1, &intval, tx)); 6733248571Smm spa_history_log_internal(spa, "set", tx, 6734248571Smm "%s=%lld", nvpair_name(elem), intval); 6735185029Spjd } else { 6736185029Spjd ASSERT(0); /* not allowed */ 6737185029Spjd } 6738185029Spjd 6739185029Spjd switch (prop) { 6740185029Spjd case ZPOOL_PROP_DELEGATION: 6741185029Spjd spa->spa_delegation = intval; 6742185029Spjd break; 6743185029Spjd case ZPOOL_PROP_BOOTFS: 6744185029Spjd spa->spa_bootfs = intval; 6745185029Spjd break; 6746185029Spjd case ZPOOL_PROP_FAILUREMODE: 6747185029Spjd spa->spa_failmode = intval; 6748185029Spjd break; 6749219089Spjd case ZPOOL_PROP_AUTOEXPAND: 6750219089Spjd spa->spa_autoexpand = intval; 6751219089Spjd if (tx->tx_txg != TXG_INITIAL) 6752219089Spjd spa_async_request(spa, 6753219089Spjd SPA_ASYNC_AUTOEXPAND); 6754219089Spjd break; 6755219089Spjd case ZPOOL_PROP_DEDUPDITTO: 6756219089Spjd spa->spa_dedup_ditto = intval; 6757219089Spjd break; 6758185029Spjd default: 6759185029Spjd break; 6760185029Spjd } 6761168404Spjd } 6762185029Spjd 6763168404Spjd } 6764185029Spjd 6765185029Spjd mutex_exit(&spa->spa_props_lock); 6766168404Spjd} 6767168404Spjd 6768168404Spjd/* 6769219089Spjd * Perform one-time upgrade on-disk changes. spa_version() does not 6770219089Spjd * reflect the new version this txg, so there must be no changes this 6771219089Spjd * txg to anything that the upgrade code depends on after it executes. 6772219089Spjd * Therefore this must be called after dsl_pool_sync() does the sync 6773219089Spjd * tasks. 6774219089Spjd */ 6775219089Spjdstatic void 6776219089Spjdspa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6777219089Spjd{ 6778219089Spjd dsl_pool_t *dp = spa->spa_dsl_pool; 6779219089Spjd 6780219089Spjd ASSERT(spa->spa_sync_pass == 1); 6781219089Spjd 6782248571Smm rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6783248571Smm 6784219089Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6785219089Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6786219089Spjd dsl_pool_create_origin(dp, tx); 6787219089Spjd 6788219089Spjd /* Keeping the origin open increases spa_minref */ 6789219089Spjd spa->spa_minref += 3; 6790219089Spjd } 6791219089Spjd 6792219089Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6793219089Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6794219089Spjd dsl_pool_upgrade_clones(dp, tx); 6795219089Spjd } 6796219089Spjd 6797219089Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6798219089Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6799219089Spjd dsl_pool_upgrade_dir_clones(dp, tx); 6800219089Spjd 6801219089Spjd /* Keeping the freedir open increases spa_minref */ 6802219089Spjd spa->spa_minref += 3; 6803219089Spjd } 6804236884Smm 6805236884Smm if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6806236884Smm spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6807236884Smm spa_feature_create_zap_objects(spa, tx); 6808236884Smm } 6809268126Sdelphij 6810268126Sdelphij /* 6811268126Sdelphij * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 6812268126Sdelphij * when possibility to use lz4 compression for metadata was added 6813268126Sdelphij * Old pools that have this feature enabled must be upgraded to have 6814268126Sdelphij * this feature active 6815268126Sdelphij */ 6816268126Sdelphij if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6817268126Sdelphij boolean_t lz4_en = spa_feature_is_enabled(spa, 6818268126Sdelphij SPA_FEATURE_LZ4_COMPRESS); 6819268126Sdelphij boolean_t lz4_ac = spa_feature_is_active(spa, 6820268126Sdelphij SPA_FEATURE_LZ4_COMPRESS); 6821268126Sdelphij 6822268126Sdelphij if (lz4_en && !lz4_ac) 6823268126Sdelphij spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 6824268126Sdelphij } 6825289422Smav 6826289422Smav /* 6827289422Smav * If we haven't written the salt, do so now. Note that the 6828289422Smav * feature may not be activated yet, but that's fine since 6829289422Smav * the presence of this ZAP entry is backwards compatible. 6830289422Smav */ 6831289422Smav if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 6832289422Smav DMU_POOL_CHECKSUM_SALT) == ENOENT) { 6833289422Smav VERIFY0(zap_add(spa->spa_meta_objset, 6834289422Smav DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 6835289422Smav sizeof (spa->spa_cksum_salt.zcs_bytes), 6836289422Smav spa->spa_cksum_salt.zcs_bytes, tx)); 6837289422Smav } 6838289422Smav 6839248571Smm rrw_exit(&dp->dp_config_rwlock, FTAG); 6840219089Spjd} 6841219089Spjd 6842219089Spjd/* 6843168404Spjd * Sync the specified transaction group. New blocks may be dirtied as 6844168404Spjd * part of the process, so we iterate until it converges. 6845168404Spjd */ 6846168404Spjdvoid 6847168404Spjdspa_sync(spa_t *spa, uint64_t txg) 6848168404Spjd{ 6849168404Spjd dsl_pool_t *dp = spa->spa_dsl_pool; 6850168404Spjd objset_t *mos = spa->spa_meta_objset; 6851219089Spjd bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6852168404Spjd vdev_t *rvd = spa->spa_root_vdev; 6853168404Spjd vdev_t *vd; 6854168404Spjd dmu_tx_t *tx; 6855185029Spjd int error; 6856307277Smav uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 6857307277Smav zfs_vdev_queue_depth_pct / 100; 6858168404Spjd 6859219089Spjd VERIFY(spa_writeable(spa)); 6860219089Spjd 6861168404Spjd /* 6862168404Spjd * Lock out configuration changes. 6863168404Spjd */ 6864185029Spjd spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6865168404Spjd 6866168404Spjd spa->spa_syncing_txg = txg; 6867168404Spjd spa->spa_sync_pass = 0; 6868168404Spjd 6869307277Smav mutex_enter(&spa->spa_alloc_lock); 6870307277Smav VERIFY0(avl_numnodes(&spa->spa_alloc_tree)); 6871307277Smav mutex_exit(&spa->spa_alloc_lock); 6872307277Smav 6873185029Spjd /* 6874185029Spjd * If there are any pending vdev state changes, convert them 6875185029Spjd * into config changes that go out with this transaction group. 6876185029Spjd */ 6877185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6878209962Smm while (list_head(&spa->spa_state_dirty_list) != NULL) { 6879209962Smm /* 6880209962Smm * We need the write lock here because, for aux vdevs, 6881209962Smm * calling vdev_config_dirty() modifies sav_config. 6882209962Smm * This is ugly and will become unnecessary when we 6883209962Smm * eliminate the aux vdev wart by integrating all vdevs 6884209962Smm * into the root vdev tree. 6885209962Smm */ 6886209962Smm spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6887209962Smm spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6888209962Smm while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6889209962Smm vdev_state_clean(vd); 6890209962Smm vdev_config_dirty(vd); 6891209962Smm } 6892209962Smm spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6893209962Smm spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6894185029Spjd } 6895185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 6896185029Spjd 6897168404Spjd tx = dmu_tx_create_assigned(dp, txg); 6898168404Spjd 6899247265Smm spa->spa_sync_starttime = gethrtime(); 6900247265Smm#ifdef illumos 6901247265Smm VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6902247265Smm spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6903277300Ssmh#else /* !illumos */ 6904247265Smm#ifdef _KERNEL 6905314665Savg callout_schedule(&spa->spa_deadman_cycid, 6906314665Savg hz * spa->spa_deadman_synctime / NANOSEC); 6907247265Smm#endif 6908277300Ssmh#endif /* illumos */ 6909247265Smm 6910168404Spjd /* 6911185029Spjd * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6912168404Spjd * set spa_deflate if we have no raid-z vdevs. 6913168404Spjd */ 6914185029Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6915185029Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6916168404Spjd int i; 6917168404Spjd 6918168404Spjd for (i = 0; i < rvd->vdev_children; i++) { 6919168404Spjd vd = rvd->vdev_child[i]; 6920168404Spjd if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6921168404Spjd break; 6922168404Spjd } 6923168404Spjd if (i == rvd->vdev_children) { 6924168404Spjd spa->spa_deflate = TRUE; 6925168404Spjd VERIFY(0 == zap_add(spa->spa_meta_objset, 6926168404Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6927168404Spjd sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6928168404Spjd } 6929168404Spjd } 6930168404Spjd 6931168404Spjd /* 6932307277Smav * Set the top-level vdev's max queue depth. Evaluate each 6933307277Smav * top-level's async write queue depth in case it changed. 6934307277Smav * The max queue depth will not change in the middle of syncing 6935307277Smav * out this txg. 6936307277Smav */ 6937307277Smav uint64_t queue_depth_total = 0; 6938307277Smav for (int c = 0; c < rvd->vdev_children; c++) { 6939307277Smav vdev_t *tvd = rvd->vdev_child[c]; 6940307277Smav metaslab_group_t *mg = tvd->vdev_mg; 6941307277Smav 6942307277Smav if (mg == NULL || mg->mg_class != spa_normal_class(spa) || 6943307277Smav !metaslab_group_initialized(mg)) 6944307277Smav continue; 6945307277Smav 6946307277Smav /* 6947307277Smav * It is safe to do a lock-free check here because only async 6948307277Smav * allocations look at mg_max_alloc_queue_depth, and async 6949307277Smav * allocations all happen from spa_sync(). 6950307277Smav */ 6951307277Smav ASSERT0(refcount_count(&mg->mg_alloc_queue_depth)); 6952307277Smav mg->mg_max_alloc_queue_depth = max_queue_depth; 6953307277Smav queue_depth_total += mg->mg_max_alloc_queue_depth; 6954307277Smav } 6955307277Smav metaslab_class_t *mc = spa_normal_class(spa); 6956307277Smav ASSERT0(refcount_count(&mc->mc_alloc_slots)); 6957307277Smav mc->mc_alloc_max_slots = queue_depth_total; 6958307277Smav mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 6959307277Smav 6960307277Smav ASSERT3U(mc->mc_alloc_max_slots, <=, 6961307277Smav max_queue_depth * rvd->vdev_children); 6962307277Smav 6963307277Smav /* 6964168404Spjd * Iterate to convergence. 6965168404Spjd */ 6966168404Spjd do { 6967219089Spjd int pass = ++spa->spa_sync_pass; 6968168404Spjd 6969168404Spjd spa_sync_config_object(spa, tx); 6970185029Spjd spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6971185029Spjd ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6972185029Spjd spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6973185029Spjd ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6974168404Spjd spa_errlog_sync(spa, txg); 6975168404Spjd dsl_pool_sync(dp, txg); 6976168404Spjd 6977243503Smm if (pass < zfs_sync_pass_deferred_free) { 6978258632Savg spa_sync_frees(spa, free_bpl, tx); 6979219089Spjd } else { 6980275781Sdelphij /* 6981275781Sdelphij * We can not defer frees in pass 1, because 6982275781Sdelphij * we sync the deferred frees later in pass 1. 6983275781Sdelphij */ 6984275781Sdelphij ASSERT3U(pass, >, 1); 6985219089Spjd bplist_iterate(free_bpl, bpobj_enqueue_cb, 6986258632Savg &spa->spa_deferred_bpobj, tx); 6987168404Spjd } 6988168404Spjd 6989219089Spjd ddt_sync(spa, txg); 6990219089Spjd dsl_scan_sync(dp, tx); 6991168404Spjd 6992219089Spjd while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6993219089Spjd vdev_sync(vd, txg); 6994168404Spjd 6995275781Sdelphij if (pass == 1) { 6996219089Spjd spa_sync_upgrades(spa, tx); 6997275781Sdelphij ASSERT3U(txg, >=, 6998275781Sdelphij spa->spa_uberblock.ub_rootbp.blk_birth); 6999275781Sdelphij /* 7000275781Sdelphij * Note: We need to check if the MOS is dirty 7001275781Sdelphij * because we could have marked the MOS dirty 7002275781Sdelphij * without updating the uberblock (e.g. if we 7003275781Sdelphij * have sync tasks but no dirty user data). We 7004275781Sdelphij * need to check the uberblock's rootbp because 7005275781Sdelphij * it is updated if we have synced out dirty 7006275781Sdelphij * data (though in this case the MOS will most 7007275781Sdelphij * likely also be dirty due to second order 7008275781Sdelphij * effects, we don't want to rely on that here). 7009275781Sdelphij */ 7010275781Sdelphij if (spa->spa_uberblock.ub_rootbp.blk_birth < txg && 7011275781Sdelphij !dmu_objset_is_dirty(mos, txg)) { 7012275781Sdelphij /* 7013275781Sdelphij * Nothing changed on the first pass, 7014275781Sdelphij * therefore this TXG is a no-op. Avoid 7015275781Sdelphij * syncing deferred frees, so that we 7016275781Sdelphij * can keep this TXG as a no-op. 7017275781Sdelphij */ 7018275781Sdelphij ASSERT(txg_list_empty(&dp->dp_dirty_datasets, 7019275781Sdelphij txg)); 7020275781Sdelphij ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 7021275781Sdelphij ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 7022275781Sdelphij break; 7023275781Sdelphij } 7024275781Sdelphij spa_sync_deferred_frees(spa, tx); 7025275781Sdelphij } 7026168404Spjd 7027219089Spjd } while (dmu_objset_is_dirty(mos, txg)); 7028219089Spjd 7029299441Smav if (!list_is_empty(&spa->spa_config_dirty_list)) { 7030299441Smav /* 7031299441Smav * Make sure that the number of ZAPs for all the vdevs matches 7032299441Smav * the number of ZAPs in the per-vdev ZAP list. This only gets 7033299441Smav * called if the config is dirty; otherwise there may be 7034299441Smav * outstanding AVZ operations that weren't completed in 7035299441Smav * spa_sync_config_object. 7036299441Smav */ 7037299441Smav uint64_t all_vdev_zap_entry_count; 7038299441Smav ASSERT0(zap_count(spa->spa_meta_objset, 7039299441Smav spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 7040299441Smav ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 7041299441Smav all_vdev_zap_entry_count); 7042299441Smav } 7043299441Smav 7044168404Spjd /* 7045168404Spjd * Rewrite the vdev configuration (which includes the uberblock) 7046168404Spjd * to commit the transaction group. 7047168404Spjd * 7048185029Spjd * If there are no dirty vdevs, we sync the uberblock to a few 7049185029Spjd * random top-level vdevs that are known to be visible in the 7050185029Spjd * config cache (see spa_vdev_add() for a complete description). 7051185029Spjd * If there *are* dirty vdevs, sync the uberblock to all vdevs. 7052168404Spjd */ 7053185029Spjd for (;;) { 7054185029Spjd /* 7055185029Spjd * We hold SCL_STATE to prevent vdev open/close/etc. 7056185029Spjd * while we're attempting to write the vdev labels. 7057185029Spjd */ 7058185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7059168404Spjd 7060185029Spjd if (list_is_empty(&spa->spa_config_dirty_list)) { 7061185029Spjd vdev_t *svd[SPA_DVAS_PER_BP]; 7062185029Spjd int svdcount = 0; 7063185029Spjd int children = rvd->vdev_children; 7064185029Spjd int c0 = spa_get_random(children); 7065185029Spjd 7066219089Spjd for (int c = 0; c < children; c++) { 7067185029Spjd vd = rvd->vdev_child[(c0 + c) % children]; 7068185029Spjd if (vd->vdev_ms_array == 0 || vd->vdev_islog) 7069185029Spjd continue; 7070185029Spjd svd[svdcount++] = vd; 7071185029Spjd if (svdcount == SPA_DVAS_PER_BP) 7072185029Spjd break; 7073185029Spjd } 7074294811Smav error = vdev_config_sync(svd, svdcount, txg); 7075185029Spjd } else { 7076185029Spjd error = vdev_config_sync(rvd->vdev_child, 7077294811Smav rvd->vdev_children, txg); 7078168404Spjd } 7079185029Spjd 7080239620Smm if (error == 0) 7081239620Smm spa->spa_last_synced_guid = rvd->vdev_guid; 7082239620Smm 7083185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 7084185029Spjd 7085185029Spjd if (error == 0) 7086185029Spjd break; 7087185029Spjd zio_suspend(spa, NULL); 7088185029Spjd zio_resume_wait(spa); 7089168404Spjd } 7090168404Spjd dmu_tx_commit(tx); 7091168404Spjd 7092247265Smm#ifdef illumos 7093247265Smm VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 7094277300Ssmh#else /* !illumos */ 7095247265Smm#ifdef _KERNEL 7096247265Smm callout_drain(&spa->spa_deadman_cycid); 7097247265Smm#endif 7098277300Ssmh#endif /* illumos */ 7099247265Smm 7100168404Spjd /* 7101168404Spjd * Clear the dirty config list. 7102168404Spjd */ 7103185029Spjd while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 7104168404Spjd vdev_config_clean(vd); 7105168404Spjd 7106168404Spjd /* 7107168404Spjd * Now that the new config has synced transactionally, 7108168404Spjd * let it become visible to the config cache. 7109168404Spjd */ 7110168404Spjd if (spa->spa_config_syncing != NULL) { 7111168404Spjd spa_config_set(spa, spa->spa_config_syncing); 7112168404Spjd spa->spa_config_txg = txg; 7113168404Spjd spa->spa_config_syncing = NULL; 7114168404Spjd } 7115168404Spjd 7116219089Spjd dsl_pool_sync_done(dp, txg); 7117168404Spjd 7118307277Smav mutex_enter(&spa->spa_alloc_lock); 7119307277Smav VERIFY0(avl_numnodes(&spa->spa_alloc_tree)); 7120307277Smav mutex_exit(&spa->spa_alloc_lock); 7121307277Smav 7122168404Spjd /* 7123168404Spjd * Update usable space statistics. 7124168404Spjd */ 7125168404Spjd while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 7126168404Spjd vdev_sync_done(vd, txg); 7127168404Spjd 7128219089Spjd spa_update_dspace(spa); 7129219089Spjd 7130168404Spjd /* 7131168404Spjd * It had better be the case that we didn't dirty anything 7132168404Spjd * since vdev_config_sync(). 7133168404Spjd */ 7134168404Spjd ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 7135168404Spjd ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 7136168404Spjd ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 7137168404Spjd 7138219089Spjd spa->spa_sync_pass = 0; 7139219089Spjd 7140310515Savg /* 7141310515Savg * Update the last synced uberblock here. We want to do this at 7142310515Savg * the end of spa_sync() so that consumers of spa_last_synced_txg() 7143310515Savg * will be guaranteed that all the processing associated with 7144310515Savg * that txg has been completed. 7145310515Savg */ 7146310515Savg spa->spa_ubsync = spa->spa_uberblock; 7147185029Spjd spa_config_exit(spa, SCL_CONFIG, FTAG); 7148168404Spjd 7149219089Spjd spa_handle_ignored_writes(spa); 7150219089Spjd 7151168404Spjd /* 7152168404Spjd * If any async tasks have been requested, kick them off. 7153168404Spjd */ 7154168404Spjd spa_async_dispatch(spa); 7155253990Smav spa_async_dispatch_vd(spa); 7156168404Spjd} 7157168404Spjd 7158168404Spjd/* 7159168404Spjd * Sync all pools. We don't want to hold the namespace lock across these 7160168404Spjd * operations, so we take a reference on the spa_t and drop the lock during the 7161168404Spjd * sync. 7162168404Spjd */ 7163168404Spjdvoid 7164168404Spjdspa_sync_allpools(void) 7165168404Spjd{ 7166168404Spjd spa_t *spa = NULL; 7167168404Spjd mutex_enter(&spa_namespace_lock); 7168168404Spjd while ((spa = spa_next(spa)) != NULL) { 7169219089Spjd if (spa_state(spa) != POOL_STATE_ACTIVE || 7170219089Spjd !spa_writeable(spa) || spa_suspended(spa)) 7171168404Spjd continue; 7172168404Spjd spa_open_ref(spa, FTAG); 7173168404Spjd mutex_exit(&spa_namespace_lock); 7174168404Spjd txg_wait_synced(spa_get_dsl(spa), 0); 7175168404Spjd mutex_enter(&spa_namespace_lock); 7176168404Spjd spa_close(spa, FTAG); 7177168404Spjd } 7178168404Spjd mutex_exit(&spa_namespace_lock); 7179168404Spjd} 7180168404Spjd 7181168404Spjd/* 7182168404Spjd * ========================================================================== 7183168404Spjd * Miscellaneous routines 7184168404Spjd * ========================================================================== 7185168404Spjd */ 7186168404Spjd 7187168404Spjd/* 7188168404Spjd * Remove all pools in the system. 7189168404Spjd */ 7190168404Spjdvoid 7191168404Spjdspa_evict_all(void) 7192168404Spjd{ 7193168404Spjd spa_t *spa; 7194168404Spjd 7195168404Spjd /* 7196168404Spjd * Remove all cached state. All pools should be closed now, 7197168404Spjd * so every spa in the AVL tree should be unreferenced. 7198168404Spjd */ 7199168404Spjd mutex_enter(&spa_namespace_lock); 7200168404Spjd while ((spa = spa_next(NULL)) != NULL) { 7201168404Spjd /* 7202168404Spjd * Stop async tasks. The async thread may need to detach 7203168404Spjd * a device that's been replaced, which requires grabbing 7204168404Spjd * spa_namespace_lock, so we must drop it here. 7205168404Spjd */ 7206168404Spjd spa_open_ref(spa, FTAG); 7207168404Spjd mutex_exit(&spa_namespace_lock); 7208168404Spjd spa_async_suspend(spa); 7209168404Spjd mutex_enter(&spa_namespace_lock); 7210168404Spjd spa_close(spa, FTAG); 7211168404Spjd 7212168404Spjd if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 7213168404Spjd spa_unload(spa); 7214168404Spjd spa_deactivate(spa); 7215168404Spjd } 7216168404Spjd spa_remove(spa); 7217168404Spjd } 7218168404Spjd mutex_exit(&spa_namespace_lock); 7219168404Spjd} 7220168404Spjd 7221168404Spjdvdev_t * 7222209962Smmspa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 7223168404Spjd{ 7224185029Spjd vdev_t *vd; 7225185029Spjd int i; 7226185029Spjd 7227185029Spjd if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 7228185029Spjd return (vd); 7229185029Spjd 7230209962Smm if (aux) { 7231185029Spjd for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 7232185029Spjd vd = spa->spa_l2cache.sav_vdevs[i]; 7233185029Spjd if (vd->vdev_guid == guid) 7234185029Spjd return (vd); 7235185029Spjd } 7236209962Smm 7237209962Smm for (i = 0; i < spa->spa_spares.sav_count; i++) { 7238209962Smm vd = spa->spa_spares.sav_vdevs[i]; 7239209962Smm if (vd->vdev_guid == guid) 7240209962Smm return (vd); 7241209962Smm } 7242185029Spjd } 7243185029Spjd 7244185029Spjd return (NULL); 7245168404Spjd} 7246168404Spjd 7247168404Spjdvoid 7248185029Spjdspa_upgrade(spa_t *spa, uint64_t version) 7249168404Spjd{ 7250219089Spjd ASSERT(spa_writeable(spa)); 7251219089Spjd 7252185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7253168404Spjd 7254168404Spjd /* 7255168404Spjd * This should only be called for a non-faulted pool, and since a 7256168404Spjd * future version would result in an unopenable pool, this shouldn't be 7257168404Spjd * possible. 7258168404Spjd */ 7259247592Sdelphij ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 7260268075Sdelphij ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 7261168404Spjd 7262185029Spjd spa->spa_uberblock.ub_version = version; 7263168404Spjd vdev_config_dirty(spa->spa_root_vdev); 7264168404Spjd 7265185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 7266168404Spjd 7267168404Spjd txg_wait_synced(spa_get_dsl(spa), 0); 7268168404Spjd} 7269168404Spjd 7270168404Spjdboolean_t 7271168404Spjdspa_has_spare(spa_t *spa, uint64_t guid) 7272168404Spjd{ 7273168404Spjd int i; 7274168404Spjd uint64_t spareguid; 7275185029Spjd spa_aux_vdev_t *sav = &spa->spa_spares; 7276168404Spjd 7277185029Spjd for (i = 0; i < sav->sav_count; i++) 7278185029Spjd if (sav->sav_vdevs[i]->vdev_guid == guid) 7279168404Spjd return (B_TRUE); 7280168404Spjd 7281185029Spjd for (i = 0; i < sav->sav_npending; i++) { 7282185029Spjd if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 7283185029Spjd &spareguid) == 0 && spareguid == guid) 7284168404Spjd return (B_TRUE); 7285168404Spjd } 7286168404Spjd 7287168404Spjd return (B_FALSE); 7288168404Spjd} 7289168404Spjd 7290185029Spjd/* 7291185029Spjd * Check if a pool has an active shared spare device. 7292185029Spjd * Note: reference count of an active spare is 2, as a spare and as a replace 7293185029Spjd */ 7294185029Spjdstatic boolean_t 7295185029Spjdspa_has_active_shared_spare(spa_t *spa) 7296168404Spjd{ 7297185029Spjd int i, refcnt; 7298185029Spjd uint64_t pool; 7299185029Spjd spa_aux_vdev_t *sav = &spa->spa_spares; 7300185029Spjd 7301185029Spjd for (i = 0; i < sav->sav_count; i++) { 7302185029Spjd if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 7303185029Spjd &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 7304185029Spjd refcnt > 2) 7305185029Spjd return (B_TRUE); 7306185029Spjd } 7307185029Spjd 7308185029Spjd return (B_FALSE); 7309168404Spjd} 7310168404Spjd 7311307113Smavstatic sysevent_t * 7312307113Smavspa_event_create(spa_t *spa, vdev_t *vd, const char *name) 7313168404Spjd{ 7314307113Smav sysevent_t *ev = NULL; 7315185029Spjd#ifdef _KERNEL 7316185029Spjd sysevent_attr_list_t *attr = NULL; 7317185029Spjd sysevent_value_t value; 7318168404Spjd 7319185029Spjd ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 7320185029Spjd SE_SLEEP); 7321307113Smav ASSERT(ev != NULL); 7322168404Spjd 7323185029Spjd value.value_type = SE_DATA_TYPE_STRING; 7324185029Spjd value.value.sv_string = spa_name(spa); 7325185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 7326185029Spjd goto done; 7327168404Spjd 7328185029Spjd value.value_type = SE_DATA_TYPE_UINT64; 7329185029Spjd value.value.sv_uint64 = spa_guid(spa); 7330185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 7331185029Spjd goto done; 7332168404Spjd 7333185029Spjd if (vd) { 7334185029Spjd value.value_type = SE_DATA_TYPE_UINT64; 7335185029Spjd value.value.sv_uint64 = vd->vdev_guid; 7336185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 7337185029Spjd SE_SLEEP) != 0) 7338185029Spjd goto done; 7339168404Spjd 7340185029Spjd if (vd->vdev_path) { 7341185029Spjd value.value_type = SE_DATA_TYPE_STRING; 7342185029Spjd value.value.sv_string = vd->vdev_path; 7343185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 7344185029Spjd &value, SE_SLEEP) != 0) 7345185029Spjd goto done; 7346168404Spjd } 7347168404Spjd } 7348168404Spjd 7349185029Spjd if (sysevent_attach_attributes(ev, attr) != 0) 7350185029Spjd goto done; 7351185029Spjd attr = NULL; 7352168404Spjd 7353185029Spjddone: 7354185029Spjd if (attr) 7355185029Spjd sysevent_free_attr(attr); 7356307113Smav 7357307113Smav#endif 7358307113Smav return (ev); 7359307113Smav} 7360307113Smav 7361307113Smavstatic void 7362307113Smavspa_event_post(sysevent_t *ev) 7363307113Smav{ 7364307113Smav#ifdef _KERNEL 7365307113Smav sysevent_id_t eid; 7366307113Smav 7367307113Smav (void) log_sysevent(ev, SE_SLEEP, &eid); 7368185029Spjd sysevent_free(ev); 7369185029Spjd#endif 7370168404Spjd} 7371307113Smav 7372307113Smav/* 7373307113Smav * Post a sysevent corresponding to the given event. The 'name' must be one of 7374307113Smav * the event definitions in sys/sysevent/eventdefs.h. The payload will be 7375307113Smav * filled in from the spa and (optionally) the vdev. This doesn't do anything 7376307113Smav * in the userland libzpool, as we don't want consumers to misinterpret ztest 7377307113Smav * or zdb as real changes. 7378307113Smav */ 7379307113Smavvoid 7380307113Smavspa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 7381307113Smav{ 7382307113Smav spa_event_post(spa_event_create(spa, vd, name)); 7383307113Smav} 7384