spa.c revision 269118
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd 22168404Spjd/* 23219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24264670Sdelphij * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25268126Sdelphij * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved. 26247265Smm * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27168404Spjd */ 28168404Spjd 29168404Spjd/* 30251629Sdelphij * SPA: Storage Pool Allocator 31251629Sdelphij * 32168404Spjd * This file contains all the routines used when modifying on-disk SPA state. 33168404Spjd * This includes opening, importing, destroying, exporting a pool, and syncing a 34168404Spjd * pool. 35168404Spjd */ 36168404Spjd 37168404Spjd#include <sys/zfs_context.h> 38168404Spjd#include <sys/fm/fs/zfs.h> 39168404Spjd#include <sys/spa_impl.h> 40168404Spjd#include <sys/zio.h> 41168404Spjd#include <sys/zio_checksum.h> 42168404Spjd#include <sys/dmu.h> 43168404Spjd#include <sys/dmu_tx.h> 44168404Spjd#include <sys/zap.h> 45168404Spjd#include <sys/zil.h> 46219089Spjd#include <sys/ddt.h> 47168404Spjd#include <sys/vdev_impl.h> 48168404Spjd#include <sys/metaslab.h> 49219089Spjd#include <sys/metaslab_impl.h> 50168404Spjd#include <sys/uberblock_impl.h> 51168404Spjd#include <sys/txg.h> 52168404Spjd#include <sys/avl.h> 53168404Spjd#include <sys/dmu_traverse.h> 54168404Spjd#include <sys/dmu_objset.h> 55168404Spjd#include <sys/unique.h> 56168404Spjd#include <sys/dsl_pool.h> 57168404Spjd#include <sys/dsl_dataset.h> 58168404Spjd#include <sys/dsl_dir.h> 59168404Spjd#include <sys/dsl_prop.h> 60168404Spjd#include <sys/dsl_synctask.h> 61168404Spjd#include <sys/fs/zfs.h> 62185029Spjd#include <sys/arc.h> 63168404Spjd#include <sys/callb.h> 64185029Spjd#include <sys/spa_boot.h> 65219089Spjd#include <sys/zfs_ioctl.h> 66219089Spjd#include <sys/dsl_scan.h> 67248571Smm#include <sys/dmu_send.h> 68248571Smm#include <sys/dsl_destroy.h> 69248571Smm#include <sys/dsl_userhold.h> 70236884Smm#include <sys/zfeature.h> 71219089Spjd#include <sys/zvol.h> 72240868Spjd#include <sys/trim_map.h> 73168404Spjd 74219089Spjd#ifdef _KERNEL 75219089Spjd#include <sys/callb.h> 76219089Spjd#include <sys/cpupart.h> 77219089Spjd#include <sys/zone.h> 78219089Spjd#endif /* _KERNEL */ 79219089Spjd 80185029Spjd#include "zfs_prop.h" 81185029Spjd#include "zfs_comutil.h" 82168404Spjd 83204073Spjd/* Check hostid on import? */ 84204073Spjdstatic int check_hostid = 1; 85204073Spjd 86204073SpjdSYSCTL_DECL(_vfs_zfs); 87267992ShselaskySYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0, 88204073Spjd "Check hostid on import?"); 89204073Spjd 90251636Sdelphij/* 91251636Sdelphij * The interval, in seconds, at which failed configuration cache file writes 92251636Sdelphij * should be retried. 93251636Sdelphij */ 94251636Sdelphijstatic int zfs_ccw_retry_interval = 300; 95251636Sdelphij 96219089Spjdtypedef enum zti_modes { 97258631Savg ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 98258631Savg ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 99258631Savg ZTI_MODE_NULL, /* don't create a taskq */ 100258631Savg ZTI_NMODES 101219089Spjd} zti_modes_t; 102168712Spjd 103258631Savg#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 104258631Savg#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 105258631Savg#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 106209962Smm 107258631Savg#define ZTI_N(n) ZTI_P(n, 1) 108258631Savg#define ZTI_ONE ZTI_N(1) 109209962Smm 110209962Smmtypedef struct zio_taskq_info { 111258631Savg zti_modes_t zti_mode; 112211931Smm uint_t zti_value; 113258631Savg uint_t zti_count; 114209962Smm} zio_taskq_info_t; 115209962Smm 116209962Smmstatic const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 117219089Spjd "issue", "issue_high", "intr", "intr_high" 118209962Smm}; 119209962Smm 120211931Smm/* 121258631Savg * This table defines the taskq settings for each ZFS I/O type. When 122258631Savg * initializing a pool, we use this table to create an appropriately sized 123258631Savg * taskq. Some operations are low volume and therefore have a small, static 124258631Savg * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 125258631Savg * macros. Other operations process a large amount of data; the ZTI_BATCH 126258631Savg * macro causes us to create a taskq oriented for throughput. Some operations 127258631Savg * are so high frequency and short-lived that the taskq itself can become a a 128258631Savg * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 129258631Savg * additional degree of parallelism specified by the number of threads per- 130258631Savg * taskq and the number of taskqs; when dispatching an event in this case, the 131258631Savg * particular taskq is chosen at random. 132258631Savg * 133258631Savg * The different taskq priorities are to handle the different contexts (issue 134258631Savg * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 135258631Savg * need to be handled with minimum delay. 136211931Smm */ 137211931Smmconst zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 138211931Smm /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 139258631Savg { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 140264670Sdelphij { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 141258631Savg { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 142258631Savg { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 143258631Savg { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 144258631Savg { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 145209962Smm}; 146209962Smm 147248571Smmstatic void spa_sync_version(void *arg, dmu_tx_t *tx); 148248571Smmstatic void spa_sync_props(void *arg, dmu_tx_t *tx); 149185029Spjdstatic boolean_t spa_has_active_shared_spare(spa_t *spa); 150219089Spjdstatic int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 151219089Spjd spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 152219089Spjd char **ereport); 153219089Spjdstatic void spa_vdev_resilver_done(spa_t *spa); 154185029Spjd 155258632Savguint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 156219089Spjd#ifdef PSRSET_BIND 157219089Spjdid_t zio_taskq_psrset_bind = PS_NONE; 158219089Spjd#endif 159219089Spjd#ifdef SYSDC 160219089Spjdboolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 161219089Spjd#endif 162219089Spjduint_t zio_taskq_basedc = 80; /* base duty cycle */ 163219089Spjd 164219089Spjdboolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 165243503Smmextern int zfs_sync_pass_deferred_free; 166219089Spjd 167247265Smm#ifndef illumos 168247265Smmextern void spa_deadman(void *arg); 169247265Smm#endif 170247265Smm 171168404Spjd/* 172219089Spjd * This (illegal) pool name is used when temporarily importing a spa_t in order 173219089Spjd * to get the vdev stats associated with the imported devices. 174219089Spjd */ 175219089Spjd#define TRYIMPORT_NAME "$import" 176219089Spjd 177219089Spjd/* 178168404Spjd * ========================================================================== 179185029Spjd * SPA properties routines 180185029Spjd * ========================================================================== 181185029Spjd */ 182185029Spjd 183185029Spjd/* 184185029Spjd * Add a (source=src, propname=propval) list to an nvlist. 185185029Spjd */ 186185029Spjdstatic void 187185029Spjdspa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 188185029Spjd uint64_t intval, zprop_source_t src) 189185029Spjd{ 190185029Spjd const char *propname = zpool_prop_to_name(prop); 191185029Spjd nvlist_t *propval; 192185029Spjd 193185029Spjd VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 194185029Spjd VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 195185029Spjd 196185029Spjd if (strval != NULL) 197185029Spjd VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 198185029Spjd else 199185029Spjd VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 200185029Spjd 201185029Spjd VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 202185029Spjd nvlist_free(propval); 203185029Spjd} 204185029Spjd 205185029Spjd/* 206185029Spjd * Get property values from the spa configuration. 207185029Spjd */ 208185029Spjdstatic void 209185029Spjdspa_prop_get_config(spa_t *spa, nvlist_t **nvp) 210185029Spjd{ 211236155Smm vdev_t *rvd = spa->spa_root_vdev; 212236884Smm dsl_pool_t *pool = spa->spa_dsl_pool; 213269118Sdelphij uint64_t size, alloc, cap, version; 214185029Spjd zprop_source_t src = ZPROP_SRC_NONE; 215185029Spjd spa_config_dirent_t *dp; 216269118Sdelphij metaslab_class_t *mc = spa_normal_class(spa); 217185029Spjd 218185029Spjd ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 219185029Spjd 220236155Smm if (rvd != NULL) { 221219089Spjd alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 222219089Spjd size = metaslab_class_get_space(spa_normal_class(spa)); 223209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 224209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 225219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 226219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 227219089Spjd size - alloc, src); 228236155Smm 229269118Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 230269118Sdelphij metaslab_class_fragmentation(mc), src); 231269118Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 232269118Sdelphij metaslab_class_expandable_space(mc), src); 233219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 234219089Spjd (spa_mode(spa) == FREAD), src); 235185029Spjd 236219089Spjd cap = (size == 0) ? 0 : (alloc * 100 / size); 237209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 238185029Spjd 239219089Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 240219089Spjd ddt_get_pool_dedup_ratio(spa), src); 241219089Spjd 242209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 243236155Smm rvd->vdev_state, src); 244209962Smm 245209962Smm version = spa_version(spa); 246209962Smm if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 247209962Smm src = ZPROP_SRC_DEFAULT; 248209962Smm else 249209962Smm src = ZPROP_SRC_LOCAL; 250209962Smm spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 251209962Smm } 252209962Smm 253236884Smm if (pool != NULL) { 254236884Smm /* 255236884Smm * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 256236884Smm * when opening pools before this version freedir will be NULL. 257236884Smm */ 258268079Sdelphij if (pool->dp_free_dir != NULL) { 259236884Smm spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 260268079Sdelphij pool->dp_free_dir->dd_phys->dd_used_bytes, src); 261236884Smm } else { 262236884Smm spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 263236884Smm NULL, 0, src); 264236884Smm } 265268079Sdelphij 266268079Sdelphij if (pool->dp_leak_dir != NULL) { 267268079Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 268268079Sdelphij pool->dp_leak_dir->dd_phys->dd_used_bytes, src); 269268079Sdelphij } else { 270268079Sdelphij spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 271268079Sdelphij NULL, 0, src); 272268079Sdelphij } 273236884Smm } 274236884Smm 275185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 276185029Spjd 277228103Smm if (spa->spa_comment != NULL) { 278228103Smm spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 279228103Smm 0, ZPROP_SRC_LOCAL); 280228103Smm } 281228103Smm 282185029Spjd if (spa->spa_root != NULL) 283185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 284185029Spjd 0, ZPROP_SRC_LOCAL); 285185029Spjd 286185029Spjd if ((dp = list_head(&spa->spa_config_list)) != NULL) { 287185029Spjd if (dp->scd_path == NULL) { 288185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 289185029Spjd "none", 0, ZPROP_SRC_LOCAL); 290185029Spjd } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 291185029Spjd spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 292185029Spjd dp->scd_path, 0, ZPROP_SRC_LOCAL); 293185029Spjd } 294185029Spjd } 295185029Spjd} 296185029Spjd 297185029Spjd/* 298185029Spjd * Get zpool property values. 299185029Spjd */ 300185029Spjdint 301185029Spjdspa_prop_get(spa_t *spa, nvlist_t **nvp) 302185029Spjd{ 303219089Spjd objset_t *mos = spa->spa_meta_objset; 304185029Spjd zap_cursor_t zc; 305185029Spjd zap_attribute_t za; 306185029Spjd int err; 307185029Spjd 308185029Spjd VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 309185029Spjd 310185029Spjd mutex_enter(&spa->spa_props_lock); 311185029Spjd 312185029Spjd /* 313185029Spjd * Get properties from the spa config. 314185029Spjd */ 315185029Spjd spa_prop_get_config(spa, nvp); 316185029Spjd 317185029Spjd /* If no pool property object, no more prop to get. */ 318219089Spjd if (mos == NULL || spa->spa_pool_props_object == 0) { 319185029Spjd mutex_exit(&spa->spa_props_lock); 320185029Spjd return (0); 321185029Spjd } 322185029Spjd 323185029Spjd /* 324185029Spjd * Get properties from the MOS pool property object. 325185029Spjd */ 326185029Spjd for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 327185029Spjd (err = zap_cursor_retrieve(&zc, &za)) == 0; 328185029Spjd zap_cursor_advance(&zc)) { 329185029Spjd uint64_t intval = 0; 330185029Spjd char *strval = NULL; 331185029Spjd zprop_source_t src = ZPROP_SRC_DEFAULT; 332185029Spjd zpool_prop_t prop; 333185029Spjd 334185029Spjd if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 335185029Spjd continue; 336185029Spjd 337185029Spjd switch (za.za_integer_length) { 338185029Spjd case 8: 339185029Spjd /* integer property */ 340185029Spjd if (za.za_first_integer != 341185029Spjd zpool_prop_default_numeric(prop)) 342185029Spjd src = ZPROP_SRC_LOCAL; 343185029Spjd 344185029Spjd if (prop == ZPOOL_PROP_BOOTFS) { 345185029Spjd dsl_pool_t *dp; 346185029Spjd dsl_dataset_t *ds = NULL; 347185029Spjd 348185029Spjd dp = spa_get_dsl(spa); 349248571Smm dsl_pool_config_enter(dp, FTAG); 350185029Spjd if (err = dsl_dataset_hold_obj(dp, 351185029Spjd za.za_first_integer, FTAG, &ds)) { 352248571Smm dsl_pool_config_exit(dp, FTAG); 353185029Spjd break; 354185029Spjd } 355185029Spjd 356185029Spjd strval = kmem_alloc( 357185029Spjd MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 358185029Spjd KM_SLEEP); 359185029Spjd dsl_dataset_name(ds, strval); 360185029Spjd dsl_dataset_rele(ds, FTAG); 361248571Smm dsl_pool_config_exit(dp, FTAG); 362185029Spjd } else { 363185029Spjd strval = NULL; 364185029Spjd intval = za.za_first_integer; 365185029Spjd } 366185029Spjd 367185029Spjd spa_prop_add_list(*nvp, prop, strval, intval, src); 368185029Spjd 369185029Spjd if (strval != NULL) 370185029Spjd kmem_free(strval, 371185029Spjd MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 372185029Spjd 373185029Spjd break; 374185029Spjd 375185029Spjd case 1: 376185029Spjd /* string property */ 377185029Spjd strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 378185029Spjd err = zap_lookup(mos, spa->spa_pool_props_object, 379185029Spjd za.za_name, 1, za.za_num_integers, strval); 380185029Spjd if (err) { 381185029Spjd kmem_free(strval, za.za_num_integers); 382185029Spjd break; 383185029Spjd } 384185029Spjd spa_prop_add_list(*nvp, prop, strval, 0, src); 385185029Spjd kmem_free(strval, za.za_num_integers); 386185029Spjd break; 387185029Spjd 388185029Spjd default: 389185029Spjd break; 390185029Spjd } 391185029Spjd } 392185029Spjd zap_cursor_fini(&zc); 393185029Spjd mutex_exit(&spa->spa_props_lock); 394185029Spjdout: 395185029Spjd if (err && err != ENOENT) { 396185029Spjd nvlist_free(*nvp); 397185029Spjd *nvp = NULL; 398185029Spjd return (err); 399185029Spjd } 400185029Spjd 401185029Spjd return (0); 402185029Spjd} 403185029Spjd 404185029Spjd/* 405185029Spjd * Validate the given pool properties nvlist and modify the list 406185029Spjd * for the property values to be set. 407185029Spjd */ 408185029Spjdstatic int 409185029Spjdspa_prop_validate(spa_t *spa, nvlist_t *props) 410185029Spjd{ 411185029Spjd nvpair_t *elem; 412185029Spjd int error = 0, reset_bootfs = 0; 413247187Smm uint64_t objnum = 0; 414236884Smm boolean_t has_feature = B_FALSE; 415185029Spjd 416185029Spjd elem = NULL; 417185029Spjd while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 418185029Spjd uint64_t intval; 419236884Smm char *strval, *slash, *check, *fname; 420236884Smm const char *propname = nvpair_name(elem); 421236884Smm zpool_prop_t prop = zpool_name_to_prop(propname); 422185029Spjd 423236884Smm switch (prop) { 424236884Smm case ZPROP_INVAL: 425236884Smm if (!zpool_prop_feature(propname)) { 426249195Smm error = SET_ERROR(EINVAL); 427236884Smm break; 428236884Smm } 429185029Spjd 430236884Smm /* 431236884Smm * Sanitize the input. 432236884Smm */ 433236884Smm if (nvpair_type(elem) != DATA_TYPE_UINT64) { 434249195Smm error = SET_ERROR(EINVAL); 435236884Smm break; 436236884Smm } 437185029Spjd 438236884Smm if (nvpair_value_uint64(elem, &intval) != 0) { 439249195Smm error = SET_ERROR(EINVAL); 440236884Smm break; 441236884Smm } 442236884Smm 443236884Smm if (intval != 0) { 444249195Smm error = SET_ERROR(EINVAL); 445236884Smm break; 446236884Smm } 447236884Smm 448236884Smm fname = strchr(propname, '@') + 1; 449236884Smm if (zfeature_lookup_name(fname, NULL) != 0) { 450249195Smm error = SET_ERROR(EINVAL); 451236884Smm break; 452236884Smm } 453236884Smm 454236884Smm has_feature = B_TRUE; 455236884Smm break; 456236884Smm 457185029Spjd case ZPOOL_PROP_VERSION: 458185029Spjd error = nvpair_value_uint64(elem, &intval); 459185029Spjd if (!error && 460236884Smm (intval < spa_version(spa) || 461236884Smm intval > SPA_VERSION_BEFORE_FEATURES || 462236884Smm has_feature)) 463249195Smm error = SET_ERROR(EINVAL); 464185029Spjd break; 465185029Spjd 466185029Spjd case ZPOOL_PROP_DELEGATION: 467185029Spjd case ZPOOL_PROP_AUTOREPLACE: 468185029Spjd case ZPOOL_PROP_LISTSNAPS: 469219089Spjd case ZPOOL_PROP_AUTOEXPAND: 470185029Spjd error = nvpair_value_uint64(elem, &intval); 471185029Spjd if (!error && intval > 1) 472249195Smm error = SET_ERROR(EINVAL); 473185029Spjd break; 474185029Spjd 475185029Spjd case ZPOOL_PROP_BOOTFS: 476209962Smm /* 477209962Smm * If the pool version is less than SPA_VERSION_BOOTFS, 478209962Smm * or the pool is still being created (version == 0), 479209962Smm * the bootfs property cannot be set. 480209962Smm */ 481185029Spjd if (spa_version(spa) < SPA_VERSION_BOOTFS) { 482249195Smm error = SET_ERROR(ENOTSUP); 483185029Spjd break; 484185029Spjd } 485185029Spjd 486185029Spjd /* 487185029Spjd * Make sure the vdev config is bootable 488185029Spjd */ 489185029Spjd if (!vdev_is_bootable(spa->spa_root_vdev)) { 490249195Smm error = SET_ERROR(ENOTSUP); 491185029Spjd break; 492185029Spjd } 493185029Spjd 494185029Spjd reset_bootfs = 1; 495185029Spjd 496185029Spjd error = nvpair_value_string(elem, &strval); 497185029Spjd 498185029Spjd if (!error) { 499236884Smm objset_t *os; 500185029Spjd uint64_t compress; 501185029Spjd 502185029Spjd if (strval == NULL || strval[0] == '\0') { 503185029Spjd objnum = zpool_prop_default_numeric( 504185029Spjd ZPOOL_PROP_BOOTFS); 505185029Spjd break; 506185029Spjd } 507185029Spjd 508219089Spjd if (error = dmu_objset_hold(strval, FTAG, &os)) 509185029Spjd break; 510185029Spjd 511219089Spjd /* Must be ZPL and not gzip compressed. */ 512219089Spjd 513219089Spjd if (dmu_objset_type(os) != DMU_OST_ZFS) { 514249195Smm error = SET_ERROR(ENOTSUP); 515248571Smm } else if ((error = 516248571Smm dsl_prop_get_int_ds(dmu_objset_ds(os), 517185029Spjd zfs_prop_to_name(ZFS_PROP_COMPRESSION), 518248571Smm &compress)) == 0 && 519185029Spjd !BOOTFS_COMPRESS_VALID(compress)) { 520249195Smm error = SET_ERROR(ENOTSUP); 521185029Spjd } else { 522185029Spjd objnum = dmu_objset_id(os); 523185029Spjd } 524219089Spjd dmu_objset_rele(os, FTAG); 525185029Spjd } 526185029Spjd break; 527185029Spjd 528185029Spjd case ZPOOL_PROP_FAILUREMODE: 529185029Spjd error = nvpair_value_uint64(elem, &intval); 530185029Spjd if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 531185029Spjd intval > ZIO_FAILURE_MODE_PANIC)) 532249195Smm error = SET_ERROR(EINVAL); 533185029Spjd 534185029Spjd /* 535185029Spjd * This is a special case which only occurs when 536185029Spjd * the pool has completely failed. This allows 537185029Spjd * the user to change the in-core failmode property 538185029Spjd * without syncing it out to disk (I/Os might 539185029Spjd * currently be blocked). We do this by returning 540185029Spjd * EIO to the caller (spa_prop_set) to trick it 541185029Spjd * into thinking we encountered a property validation 542185029Spjd * error. 543185029Spjd */ 544185029Spjd if (!error && spa_suspended(spa)) { 545185029Spjd spa->spa_failmode = intval; 546249195Smm error = SET_ERROR(EIO); 547185029Spjd } 548185029Spjd break; 549185029Spjd 550185029Spjd case ZPOOL_PROP_CACHEFILE: 551185029Spjd if ((error = nvpair_value_string(elem, &strval)) != 0) 552185029Spjd break; 553185029Spjd 554185029Spjd if (strval[0] == '\0') 555185029Spjd break; 556185029Spjd 557185029Spjd if (strcmp(strval, "none") == 0) 558185029Spjd break; 559185029Spjd 560185029Spjd if (strval[0] != '/') { 561249195Smm error = SET_ERROR(EINVAL); 562185029Spjd break; 563185029Spjd } 564185029Spjd 565185029Spjd slash = strrchr(strval, '/'); 566185029Spjd ASSERT(slash != NULL); 567185029Spjd 568185029Spjd if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 569185029Spjd strcmp(slash, "/..") == 0) 570249195Smm error = SET_ERROR(EINVAL); 571185029Spjd break; 572219089Spjd 573228103Smm case ZPOOL_PROP_COMMENT: 574228103Smm if ((error = nvpair_value_string(elem, &strval)) != 0) 575228103Smm break; 576228103Smm for (check = strval; *check != '\0'; check++) { 577228103Smm /* 578228103Smm * The kernel doesn't have an easy isprint() 579228103Smm * check. For this kernel check, we merely 580228103Smm * check ASCII apart from DEL. Fix this if 581228103Smm * there is an easy-to-use kernel isprint(). 582228103Smm */ 583228103Smm if (*check >= 0x7f) { 584249195Smm error = SET_ERROR(EINVAL); 585228103Smm break; 586228103Smm } 587228103Smm check++; 588228103Smm } 589228103Smm if (strlen(strval) > ZPROP_MAX_COMMENT) 590228103Smm error = E2BIG; 591228103Smm break; 592228103Smm 593219089Spjd case ZPOOL_PROP_DEDUPDITTO: 594219089Spjd if (spa_version(spa) < SPA_VERSION_DEDUP) 595249195Smm error = SET_ERROR(ENOTSUP); 596219089Spjd else 597219089Spjd error = nvpair_value_uint64(elem, &intval); 598219089Spjd if (error == 0 && 599219089Spjd intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 600249195Smm error = SET_ERROR(EINVAL); 601219089Spjd break; 602185029Spjd } 603185029Spjd 604185029Spjd if (error) 605185029Spjd break; 606185029Spjd } 607185029Spjd 608185029Spjd if (!error && reset_bootfs) { 609185029Spjd error = nvlist_remove(props, 610185029Spjd zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 611185029Spjd 612185029Spjd if (!error) { 613185029Spjd error = nvlist_add_uint64(props, 614185029Spjd zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 615185029Spjd } 616185029Spjd } 617185029Spjd 618185029Spjd return (error); 619185029Spjd} 620185029Spjd 621209962Smmvoid 622209962Smmspa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 623209962Smm{ 624209962Smm char *cachefile; 625209962Smm spa_config_dirent_t *dp; 626209962Smm 627209962Smm if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 628209962Smm &cachefile) != 0) 629209962Smm return; 630209962Smm 631209962Smm dp = kmem_alloc(sizeof (spa_config_dirent_t), 632209962Smm KM_SLEEP); 633209962Smm 634209962Smm if (cachefile[0] == '\0') 635209962Smm dp->scd_path = spa_strdup(spa_config_path); 636209962Smm else if (strcmp(cachefile, "none") == 0) 637209962Smm dp->scd_path = NULL; 638209962Smm else 639209962Smm dp->scd_path = spa_strdup(cachefile); 640209962Smm 641209962Smm list_insert_head(&spa->spa_config_list, dp); 642209962Smm if (need_sync) 643209962Smm spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 644209962Smm} 645209962Smm 646185029Spjdint 647185029Spjdspa_prop_set(spa_t *spa, nvlist_t *nvp) 648185029Spjd{ 649185029Spjd int error; 650236884Smm nvpair_t *elem = NULL; 651209962Smm boolean_t need_sync = B_FALSE; 652185029Spjd 653185029Spjd if ((error = spa_prop_validate(spa, nvp)) != 0) 654185029Spjd return (error); 655185029Spjd 656209962Smm while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 657236884Smm zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 658209962Smm 659219089Spjd if (prop == ZPOOL_PROP_CACHEFILE || 660219089Spjd prop == ZPOOL_PROP_ALTROOT || 661219089Spjd prop == ZPOOL_PROP_READONLY) 662209962Smm continue; 663209962Smm 664236884Smm if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 665236884Smm uint64_t ver; 666236884Smm 667236884Smm if (prop == ZPOOL_PROP_VERSION) { 668236884Smm VERIFY(nvpair_value_uint64(elem, &ver) == 0); 669236884Smm } else { 670236884Smm ASSERT(zpool_prop_feature(nvpair_name(elem))); 671236884Smm ver = SPA_VERSION_FEATURES; 672236884Smm need_sync = B_TRUE; 673236884Smm } 674236884Smm 675236884Smm /* Save time if the version is already set. */ 676236884Smm if (ver == spa_version(spa)) 677236884Smm continue; 678236884Smm 679236884Smm /* 680236884Smm * In addition to the pool directory object, we might 681236884Smm * create the pool properties object, the features for 682236884Smm * read object, the features for write object, or the 683236884Smm * feature descriptions object. 684236884Smm */ 685248571Smm error = dsl_sync_task(spa->spa_name, NULL, 686268473Sdelphij spa_sync_version, &ver, 687268473Sdelphij 6, ZFS_SPACE_CHECK_RESERVED); 688236884Smm if (error) 689236884Smm return (error); 690236884Smm continue; 691236884Smm } 692236884Smm 693209962Smm need_sync = B_TRUE; 694209962Smm break; 695209962Smm } 696209962Smm 697236884Smm if (need_sync) { 698248571Smm return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 699268473Sdelphij nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 700236884Smm } 701236884Smm 702236884Smm return (0); 703185029Spjd} 704185029Spjd 705185029Spjd/* 706185029Spjd * If the bootfs property value is dsobj, clear it. 707185029Spjd */ 708185029Spjdvoid 709185029Spjdspa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 710185029Spjd{ 711185029Spjd if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 712185029Spjd VERIFY(zap_remove(spa->spa_meta_objset, 713185029Spjd spa->spa_pool_props_object, 714185029Spjd zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 715185029Spjd spa->spa_bootfs = 0; 716185029Spjd } 717185029Spjd} 718185029Spjd 719239620Smm/*ARGSUSED*/ 720239620Smmstatic int 721248571Smmspa_change_guid_check(void *arg, dmu_tx_t *tx) 722239620Smm{ 723248571Smm uint64_t *newguid = arg; 724248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 725239620Smm vdev_t *rvd = spa->spa_root_vdev; 726239620Smm uint64_t vdev_state; 727239620Smm 728239620Smm spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 729239620Smm vdev_state = rvd->vdev_state; 730239620Smm spa_config_exit(spa, SCL_STATE, FTAG); 731239620Smm 732239620Smm if (vdev_state != VDEV_STATE_HEALTHY) 733249195Smm return (SET_ERROR(ENXIO)); 734239620Smm 735239620Smm ASSERT3U(spa_guid(spa), !=, *newguid); 736239620Smm 737239620Smm return (0); 738239620Smm} 739239620Smm 740239620Smmstatic void 741248571Smmspa_change_guid_sync(void *arg, dmu_tx_t *tx) 742239620Smm{ 743248571Smm uint64_t *newguid = arg; 744248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 745239620Smm uint64_t oldguid; 746239620Smm vdev_t *rvd = spa->spa_root_vdev; 747239620Smm 748239620Smm oldguid = spa_guid(spa); 749239620Smm 750239620Smm spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 751239620Smm rvd->vdev_guid = *newguid; 752239620Smm rvd->vdev_guid_sum += (*newguid - oldguid); 753239620Smm vdev_config_dirty(rvd); 754239620Smm spa_config_exit(spa, SCL_STATE, FTAG); 755239620Smm 756248571Smm spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 757239620Smm oldguid, *newguid); 758239620Smm} 759239620Smm 760185029Spjd/* 761228103Smm * Change the GUID for the pool. This is done so that we can later 762228103Smm * re-import a pool built from a clone of our own vdevs. We will modify 763228103Smm * the root vdev's guid, our own pool guid, and then mark all of our 764228103Smm * vdevs dirty. Note that we must make sure that all our vdevs are 765228103Smm * online when we do this, or else any vdevs that weren't present 766228103Smm * would be orphaned from our pool. We are also going to issue a 767228103Smm * sysevent to update any watchers. 768228103Smm */ 769228103Smmint 770228103Smmspa_change_guid(spa_t *spa) 771228103Smm{ 772239620Smm int error; 773239620Smm uint64_t guid; 774228103Smm 775254074Sdelphij mutex_enter(&spa->spa_vdev_top_lock); 776239620Smm mutex_enter(&spa_namespace_lock); 777239620Smm guid = spa_generate_guid(NULL); 778228103Smm 779248571Smm error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 780268473Sdelphij spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 781228103Smm 782239620Smm if (error == 0) { 783239620Smm spa_config_sync(spa, B_FALSE, B_TRUE); 784239620Smm spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 785239620Smm } 786228103Smm 787239620Smm mutex_exit(&spa_namespace_lock); 788254074Sdelphij mutex_exit(&spa->spa_vdev_top_lock); 789228103Smm 790239620Smm return (error); 791228103Smm} 792228103Smm 793228103Smm/* 794185029Spjd * ========================================================================== 795168404Spjd * SPA state manipulation (open/create/destroy/import/export) 796168404Spjd * ========================================================================== 797168404Spjd */ 798168404Spjd 799168404Spjdstatic int 800168404Spjdspa_error_entry_compare(const void *a, const void *b) 801168404Spjd{ 802168404Spjd spa_error_entry_t *sa = (spa_error_entry_t *)a; 803168404Spjd spa_error_entry_t *sb = (spa_error_entry_t *)b; 804168404Spjd int ret; 805168404Spjd 806168404Spjd ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 807268123Sdelphij sizeof (zbookmark_phys_t)); 808168404Spjd 809168404Spjd if (ret < 0) 810168404Spjd return (-1); 811168404Spjd else if (ret > 0) 812168404Spjd return (1); 813168404Spjd else 814168404Spjd return (0); 815168404Spjd} 816168404Spjd 817168404Spjd/* 818168404Spjd * Utility function which retrieves copies of the current logs and 819168404Spjd * re-initializes them in the process. 820168404Spjd */ 821168404Spjdvoid 822168404Spjdspa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 823168404Spjd{ 824168404Spjd ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 825168404Spjd 826168404Spjd bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 827168404Spjd bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 828168404Spjd 829168404Spjd avl_create(&spa->spa_errlist_scrub, 830168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 831168404Spjd offsetof(spa_error_entry_t, se_avl)); 832168404Spjd avl_create(&spa->spa_errlist_last, 833168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 834168404Spjd offsetof(spa_error_entry_t, se_avl)); 835168404Spjd} 836168404Spjd 837258631Savgstatic void 838258631Savgspa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 839168404Spjd{ 840258631Savg const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 841258631Savg enum zti_modes mode = ztip->zti_mode; 842258631Savg uint_t value = ztip->zti_value; 843258631Savg uint_t count = ztip->zti_count; 844258631Savg spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 845258631Savg char name[32]; 846258630Savg uint_t flags = 0; 847219089Spjd boolean_t batch = B_FALSE; 848168404Spjd 849258631Savg if (mode == ZTI_MODE_NULL) { 850258631Savg tqs->stqs_count = 0; 851258631Savg tqs->stqs_taskq = NULL; 852258631Savg return; 853258631Savg } 854168404Spjd 855258631Savg ASSERT3U(count, >, 0); 856168404Spjd 857258631Savg tqs->stqs_count = count; 858258631Savg tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 859219089Spjd 860258632Savg switch (mode) { 861258632Savg case ZTI_MODE_FIXED: 862258632Savg ASSERT3U(value, >=, 1); 863258632Savg value = MAX(value, 1); 864258632Savg break; 865219089Spjd 866258632Savg case ZTI_MODE_BATCH: 867258632Savg batch = B_TRUE; 868258632Savg flags |= TASKQ_THREADS_CPU_PCT; 869258632Savg value = zio_taskq_batch_pct; 870258632Savg break; 871219089Spjd 872258632Savg default: 873258632Savg panic("unrecognized mode for %s_%s taskq (%u:%u) in " 874258632Savg "spa_activate()", 875258632Savg zio_type_name[t], zio_taskq_types[q], mode, value); 876258632Savg break; 877258632Savg } 878258631Savg 879258632Savg for (uint_t i = 0; i < count; i++) { 880258632Savg taskq_t *tq; 881258631Savg 882258631Savg if (count > 1) { 883258631Savg (void) snprintf(name, sizeof (name), "%s_%s_%u", 884258631Savg zio_type_name[t], zio_taskq_types[q], i); 885258631Savg } else { 886258631Savg (void) snprintf(name, sizeof (name), "%s_%s", 887258631Savg zio_type_name[t], zio_taskq_types[q]); 888258631Savg } 889258631Savg 890219089Spjd#ifdef SYSDC 891258631Savg if (zio_taskq_sysdc && spa->spa_proc != &p0) { 892258631Savg if (batch) 893258631Savg flags |= TASKQ_DC_BATCH; 894219089Spjd 895258631Savg tq = taskq_create_sysdc(name, value, 50, INT_MAX, 896258631Savg spa->spa_proc, zio_taskq_basedc, flags); 897258631Savg } else { 898258631Savg#endif 899258632Savg pri_t pri = maxclsyspri; 900258632Savg /* 901258632Savg * The write issue taskq can be extremely CPU 902258632Savg * intensive. Run it at slightly lower priority 903258632Savg * than the other taskqs. 904258632Savg */ 905258632Savg if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 906258632Savg pri--; 907258632Savg 908258632Savg tq = taskq_create_proc(name, value, pri, 50, 909258631Savg INT_MAX, spa->spa_proc, flags); 910258631Savg#ifdef SYSDC 911258631Savg } 912258631Savg#endif 913258631Savg 914258631Savg tqs->stqs_taskq[i] = tq; 915219089Spjd } 916219089Spjd} 917219089Spjd 918219089Spjdstatic void 919258631Savgspa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 920258631Savg{ 921258631Savg spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 922258631Savg 923258631Savg if (tqs->stqs_taskq == NULL) { 924258631Savg ASSERT0(tqs->stqs_count); 925258631Savg return; 926258631Savg } 927258631Savg 928258631Savg for (uint_t i = 0; i < tqs->stqs_count; i++) { 929258631Savg ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 930258631Savg taskq_destroy(tqs->stqs_taskq[i]); 931258631Savg } 932258631Savg 933258631Savg kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 934258631Savg tqs->stqs_taskq = NULL; 935258631Savg} 936258631Savg 937258631Savg/* 938258631Savg * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 939258631Savg * Note that a type may have multiple discrete taskqs to avoid lock contention 940258631Savg * on the taskq itself. In that case we choose which taskq at random by using 941258631Savg * the low bits of gethrtime(). 942258631Savg */ 943258631Savgvoid 944258631Savgspa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 945258631Savg task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 946258631Savg{ 947258631Savg spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 948258631Savg taskq_t *tq; 949258631Savg 950258631Savg ASSERT3P(tqs->stqs_taskq, !=, NULL); 951258631Savg ASSERT3U(tqs->stqs_count, !=, 0); 952258631Savg 953258631Savg if (tqs->stqs_count == 1) { 954258631Savg tq = tqs->stqs_taskq[0]; 955258631Savg } else { 956267038Sbdrewery#ifdef _KERNEL 957267029Smav tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count]; 958267038Sbdrewery#else 959267038Sbdrewery tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 960267038Sbdrewery#endif 961258631Savg } 962258631Savg 963258631Savg taskq_dispatch_ent(tq, func, arg, flags, ent); 964258631Savg} 965258631Savg 966258631Savgstatic void 967219089Spjdspa_create_zio_taskqs(spa_t *spa) 968219089Spjd{ 969185029Spjd for (int t = 0; t < ZIO_TYPES; t++) { 970185029Spjd for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 971258631Savg spa_taskqs_init(spa, t, q); 972219089Spjd } 973219089Spjd } 974219089Spjd} 975209962Smm 976219089Spjd#ifdef _KERNEL 977219089Spjd#ifdef SPA_PROCESS 978219089Spjdstatic void 979219089Spjdspa_thread(void *arg) 980219089Spjd{ 981219089Spjd callb_cpr_t cprinfo; 982209962Smm 983219089Spjd spa_t *spa = arg; 984219089Spjd user_t *pu = PTOU(curproc); 985209962Smm 986219089Spjd CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 987219089Spjd spa->spa_name); 988209962Smm 989219089Spjd ASSERT(curproc != &p0); 990219089Spjd (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 991219089Spjd "zpool-%s", spa->spa_name); 992219089Spjd (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 993211931Smm 994219089Spjd#ifdef PSRSET_BIND 995219089Spjd /* bind this thread to the requested psrset */ 996219089Spjd if (zio_taskq_psrset_bind != PS_NONE) { 997219089Spjd pool_lock(); 998219089Spjd mutex_enter(&cpu_lock); 999219089Spjd mutex_enter(&pidlock); 1000219089Spjd mutex_enter(&curproc->p_lock); 1001219089Spjd 1002219089Spjd if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1003219089Spjd 0, NULL, NULL) == 0) { 1004219089Spjd curthread->t_bind_pset = zio_taskq_psrset_bind; 1005219089Spjd } else { 1006219089Spjd cmn_err(CE_WARN, 1007219089Spjd "Couldn't bind process for zfs pool \"%s\" to " 1008219089Spjd "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1009219089Spjd } 1010219089Spjd 1011219089Spjd mutex_exit(&curproc->p_lock); 1012219089Spjd mutex_exit(&pidlock); 1013219089Spjd mutex_exit(&cpu_lock); 1014219089Spjd pool_unlock(); 1015219089Spjd } 1016219089Spjd#endif 1017219089Spjd 1018219089Spjd#ifdef SYSDC 1019219089Spjd if (zio_taskq_sysdc) { 1020219089Spjd sysdc_thread_enter(curthread, 100, 0); 1021219089Spjd } 1022219089Spjd#endif 1023219089Spjd 1024219089Spjd spa->spa_proc = curproc; 1025219089Spjd spa->spa_did = curthread->t_did; 1026219089Spjd 1027219089Spjd spa_create_zio_taskqs(spa); 1028219089Spjd 1029219089Spjd mutex_enter(&spa->spa_proc_lock); 1030219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1031219089Spjd 1032219089Spjd spa->spa_proc_state = SPA_PROC_ACTIVE; 1033219089Spjd cv_broadcast(&spa->spa_proc_cv); 1034219089Spjd 1035219089Spjd CALLB_CPR_SAFE_BEGIN(&cprinfo); 1036219089Spjd while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1037219089Spjd cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1038219089Spjd CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1039219089Spjd 1040219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1041219089Spjd spa->spa_proc_state = SPA_PROC_GONE; 1042219089Spjd spa->spa_proc = &p0; 1043219089Spjd cv_broadcast(&spa->spa_proc_cv); 1044219089Spjd CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1045219089Spjd 1046219089Spjd mutex_enter(&curproc->p_lock); 1047219089Spjd lwp_exit(); 1048219089Spjd} 1049219089Spjd#endif /* SPA_PROCESS */ 1050219089Spjd#endif 1051219089Spjd 1052219089Spjd/* 1053219089Spjd * Activate an uninitialized pool. 1054219089Spjd */ 1055219089Spjdstatic void 1056219089Spjdspa_activate(spa_t *spa, int mode) 1057219089Spjd{ 1058219089Spjd ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1059219089Spjd 1060219089Spjd spa->spa_state = POOL_STATE_ACTIVE; 1061219089Spjd spa->spa_mode = mode; 1062219089Spjd 1063219089Spjd spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1064219089Spjd spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1065219089Spjd 1066219089Spjd /* Try to create a covering process */ 1067219089Spjd mutex_enter(&spa->spa_proc_lock); 1068219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1069219089Spjd ASSERT(spa->spa_proc == &p0); 1070219089Spjd spa->spa_did = 0; 1071219089Spjd 1072219089Spjd#ifdef SPA_PROCESS 1073219089Spjd /* Only create a process if we're going to be around a while. */ 1074219089Spjd if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1075219089Spjd if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1076219089Spjd NULL, 0) == 0) { 1077219089Spjd spa->spa_proc_state = SPA_PROC_CREATED; 1078219089Spjd while (spa->spa_proc_state == SPA_PROC_CREATED) { 1079219089Spjd cv_wait(&spa->spa_proc_cv, 1080219089Spjd &spa->spa_proc_lock); 1081209962Smm } 1082219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1083219089Spjd ASSERT(spa->spa_proc != &p0); 1084219089Spjd ASSERT(spa->spa_did != 0); 1085219089Spjd } else { 1086219089Spjd#ifdef _KERNEL 1087219089Spjd cmn_err(CE_WARN, 1088219089Spjd "Couldn't create process for zfs pool \"%s\"\n", 1089219089Spjd spa->spa_name); 1090219089Spjd#endif 1091185029Spjd } 1092168404Spjd } 1093219089Spjd#endif /* SPA_PROCESS */ 1094219089Spjd mutex_exit(&spa->spa_proc_lock); 1095168404Spjd 1096219089Spjd /* If we didn't create a process, we need to create our taskqs. */ 1097219089Spjd ASSERT(spa->spa_proc == &p0); 1098219089Spjd if (spa->spa_proc == &p0) { 1099219089Spjd spa_create_zio_taskqs(spa); 1100219089Spjd } 1101219089Spjd 1102240868Spjd /* 1103240868Spjd * Start TRIM thread. 1104240868Spjd */ 1105240868Spjd trim_thread_create(spa); 1106240868Spjd 1107185029Spjd list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1108185029Spjd offsetof(vdev_t, vdev_config_dirty_node)); 1109185029Spjd list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1110185029Spjd offsetof(vdev_t, vdev_state_dirty_node)); 1111168404Spjd 1112168404Spjd txg_list_create(&spa->spa_vdev_txg_list, 1113168404Spjd offsetof(struct vdev, vdev_txg_node)); 1114168404Spjd 1115168404Spjd avl_create(&spa->spa_errlist_scrub, 1116168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 1117168404Spjd offsetof(spa_error_entry_t, se_avl)); 1118168404Spjd avl_create(&spa->spa_errlist_last, 1119168404Spjd spa_error_entry_compare, sizeof (spa_error_entry_t), 1120168404Spjd offsetof(spa_error_entry_t, se_avl)); 1121168404Spjd} 1122168404Spjd 1123168404Spjd/* 1124168404Spjd * Opposite of spa_activate(). 1125168404Spjd */ 1126168404Spjdstatic void 1127168404Spjdspa_deactivate(spa_t *spa) 1128168404Spjd{ 1129168404Spjd ASSERT(spa->spa_sync_on == B_FALSE); 1130168404Spjd ASSERT(spa->spa_dsl_pool == NULL); 1131168404Spjd ASSERT(spa->spa_root_vdev == NULL); 1132209962Smm ASSERT(spa->spa_async_zio_root == NULL); 1133168404Spjd ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1134168404Spjd 1135240868Spjd /* 1136240868Spjd * Stop TRIM thread in case spa_unload() wasn't called directly 1137240868Spjd * before spa_deactivate(). 1138240868Spjd */ 1139240868Spjd trim_thread_destroy(spa); 1140240868Spjd 1141168404Spjd txg_list_destroy(&spa->spa_vdev_txg_list); 1142168404Spjd 1143185029Spjd list_destroy(&spa->spa_config_dirty_list); 1144185029Spjd list_destroy(&spa->spa_state_dirty_list); 1145168404Spjd 1146185029Spjd for (int t = 0; t < ZIO_TYPES; t++) { 1147185029Spjd for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1148258631Savg spa_taskqs_fini(spa, t, q); 1149185029Spjd } 1150168404Spjd } 1151168404Spjd 1152168404Spjd metaslab_class_destroy(spa->spa_normal_class); 1153168404Spjd spa->spa_normal_class = NULL; 1154168404Spjd 1155185029Spjd metaslab_class_destroy(spa->spa_log_class); 1156185029Spjd spa->spa_log_class = NULL; 1157185029Spjd 1158168404Spjd /* 1159168404Spjd * If this was part of an import or the open otherwise failed, we may 1160168404Spjd * still have errors left in the queues. Empty them just in case. 1161168404Spjd */ 1162168404Spjd spa_errlog_drain(spa); 1163168404Spjd 1164168404Spjd avl_destroy(&spa->spa_errlist_scrub); 1165168404Spjd avl_destroy(&spa->spa_errlist_last); 1166168404Spjd 1167168404Spjd spa->spa_state = POOL_STATE_UNINITIALIZED; 1168219089Spjd 1169219089Spjd mutex_enter(&spa->spa_proc_lock); 1170219089Spjd if (spa->spa_proc_state != SPA_PROC_NONE) { 1171219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1172219089Spjd spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1173219089Spjd cv_broadcast(&spa->spa_proc_cv); 1174219089Spjd while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1175219089Spjd ASSERT(spa->spa_proc != &p0); 1176219089Spjd cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1177219089Spjd } 1178219089Spjd ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1179219089Spjd spa->spa_proc_state = SPA_PROC_NONE; 1180219089Spjd } 1181219089Spjd ASSERT(spa->spa_proc == &p0); 1182219089Spjd mutex_exit(&spa->spa_proc_lock); 1183219089Spjd 1184219089Spjd#ifdef SPA_PROCESS 1185219089Spjd /* 1186219089Spjd * We want to make sure spa_thread() has actually exited the ZFS 1187219089Spjd * module, so that the module can't be unloaded out from underneath 1188219089Spjd * it. 1189219089Spjd */ 1190219089Spjd if (spa->spa_did != 0) { 1191219089Spjd thread_join(spa->spa_did); 1192219089Spjd spa->spa_did = 0; 1193219089Spjd } 1194219089Spjd#endif /* SPA_PROCESS */ 1195168404Spjd} 1196168404Spjd 1197168404Spjd/* 1198168404Spjd * Verify a pool configuration, and construct the vdev tree appropriately. This 1199168404Spjd * will create all the necessary vdevs in the appropriate layout, with each vdev 1200168404Spjd * in the CLOSED state. This will prep the pool before open/creation/import. 1201168404Spjd * All vdev validation is done by the vdev_alloc() routine. 1202168404Spjd */ 1203168404Spjdstatic int 1204168404Spjdspa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1205168404Spjd uint_t id, int atype) 1206168404Spjd{ 1207168404Spjd nvlist_t **child; 1208219089Spjd uint_t children; 1209168404Spjd int error; 1210168404Spjd 1211168404Spjd if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1212168404Spjd return (error); 1213168404Spjd 1214168404Spjd if ((*vdp)->vdev_ops->vdev_op_leaf) 1215168404Spjd return (0); 1216168404Spjd 1217185029Spjd error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1218185029Spjd &child, &children); 1219185029Spjd 1220185029Spjd if (error == ENOENT) 1221185029Spjd return (0); 1222185029Spjd 1223185029Spjd if (error) { 1224168404Spjd vdev_free(*vdp); 1225168404Spjd *vdp = NULL; 1226249195Smm return (SET_ERROR(EINVAL)); 1227168404Spjd } 1228168404Spjd 1229219089Spjd for (int c = 0; c < children; c++) { 1230168404Spjd vdev_t *vd; 1231168404Spjd if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1232168404Spjd atype)) != 0) { 1233168404Spjd vdev_free(*vdp); 1234168404Spjd *vdp = NULL; 1235168404Spjd return (error); 1236168404Spjd } 1237168404Spjd } 1238168404Spjd 1239168404Spjd ASSERT(*vdp != NULL); 1240168404Spjd 1241168404Spjd return (0); 1242168404Spjd} 1243168404Spjd 1244168404Spjd/* 1245168404Spjd * Opposite of spa_load(). 1246168404Spjd */ 1247168404Spjdstatic void 1248168404Spjdspa_unload(spa_t *spa) 1249168404Spjd{ 1250168404Spjd int i; 1251168404Spjd 1252185029Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1253185029Spjd 1254168404Spjd /* 1255240868Spjd * Stop TRIM thread. 1256240868Spjd */ 1257240868Spjd trim_thread_destroy(spa); 1258240868Spjd 1259240868Spjd /* 1260168404Spjd * Stop async tasks. 1261168404Spjd */ 1262168404Spjd spa_async_suspend(spa); 1263168404Spjd 1264168404Spjd /* 1265168404Spjd * Stop syncing. 1266168404Spjd */ 1267168404Spjd if (spa->spa_sync_on) { 1268168404Spjd txg_sync_stop(spa->spa_dsl_pool); 1269168404Spjd spa->spa_sync_on = B_FALSE; 1270168404Spjd } 1271168404Spjd 1272168404Spjd /* 1273185029Spjd * Wait for any outstanding async I/O to complete. 1274168404Spjd */ 1275209962Smm if (spa->spa_async_zio_root != NULL) { 1276209962Smm (void) zio_wait(spa->spa_async_zio_root); 1277209962Smm spa->spa_async_zio_root = NULL; 1278209962Smm } 1279168404Spjd 1280219089Spjd bpobj_close(&spa->spa_deferred_bpobj); 1281219089Spjd 1282258717Savg spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1283258717Savg 1284168404Spjd /* 1285258717Savg * Close all vdevs. 1286258717Savg */ 1287258717Savg if (spa->spa_root_vdev) 1288258717Savg vdev_free(spa->spa_root_vdev); 1289258717Savg ASSERT(spa->spa_root_vdev == NULL); 1290258717Savg 1291258717Savg /* 1292168404Spjd * Close the dsl pool. 1293168404Spjd */ 1294168404Spjd if (spa->spa_dsl_pool) { 1295168404Spjd dsl_pool_close(spa->spa_dsl_pool); 1296168404Spjd spa->spa_dsl_pool = NULL; 1297219089Spjd spa->spa_meta_objset = NULL; 1298168404Spjd } 1299168404Spjd 1300219089Spjd ddt_unload(spa); 1301219089Spjd 1302209962Smm 1303168404Spjd /* 1304209962Smm * Drop and purge level 2 cache 1305209962Smm */ 1306209962Smm spa_l2cache_drop(spa); 1307209962Smm 1308185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) 1309185029Spjd vdev_free(spa->spa_spares.sav_vdevs[i]); 1310185029Spjd if (spa->spa_spares.sav_vdevs) { 1311185029Spjd kmem_free(spa->spa_spares.sav_vdevs, 1312185029Spjd spa->spa_spares.sav_count * sizeof (void *)); 1313185029Spjd spa->spa_spares.sav_vdevs = NULL; 1314168404Spjd } 1315185029Spjd if (spa->spa_spares.sav_config) { 1316185029Spjd nvlist_free(spa->spa_spares.sav_config); 1317185029Spjd spa->spa_spares.sav_config = NULL; 1318168404Spjd } 1319185029Spjd spa->spa_spares.sav_count = 0; 1320168404Spjd 1321230514Smm for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1322230514Smm vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1323185029Spjd vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1324230514Smm } 1325185029Spjd if (spa->spa_l2cache.sav_vdevs) { 1326185029Spjd kmem_free(spa->spa_l2cache.sav_vdevs, 1327185029Spjd spa->spa_l2cache.sav_count * sizeof (void *)); 1328185029Spjd spa->spa_l2cache.sav_vdevs = NULL; 1329185029Spjd } 1330185029Spjd if (spa->spa_l2cache.sav_config) { 1331185029Spjd nvlist_free(spa->spa_l2cache.sav_config); 1332185029Spjd spa->spa_l2cache.sav_config = NULL; 1333185029Spjd } 1334185029Spjd spa->spa_l2cache.sav_count = 0; 1335185029Spjd 1336168404Spjd spa->spa_async_suspended = 0; 1337209962Smm 1338228103Smm if (spa->spa_comment != NULL) { 1339228103Smm spa_strfree(spa->spa_comment); 1340228103Smm spa->spa_comment = NULL; 1341228103Smm } 1342228103Smm 1343209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 1344168404Spjd} 1345168404Spjd 1346168404Spjd/* 1347168404Spjd * Load (or re-load) the current list of vdevs describing the active spares for 1348168404Spjd * this pool. When this is called, we have some form of basic information in 1349185029Spjd * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1350185029Spjd * then re-generate a more complete list including status information. 1351168404Spjd */ 1352168404Spjdstatic void 1353168404Spjdspa_load_spares(spa_t *spa) 1354168404Spjd{ 1355168404Spjd nvlist_t **spares; 1356168404Spjd uint_t nspares; 1357168404Spjd int i; 1358168404Spjd vdev_t *vd, *tvd; 1359168404Spjd 1360185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1361185029Spjd 1362168404Spjd /* 1363168404Spjd * First, close and free any existing spare vdevs. 1364168404Spjd */ 1365185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) { 1366185029Spjd vd = spa->spa_spares.sav_vdevs[i]; 1367168404Spjd 1368168404Spjd /* Undo the call to spa_activate() below */ 1369185029Spjd if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1370185029Spjd B_FALSE)) != NULL && tvd->vdev_isspare) 1371168404Spjd spa_spare_remove(tvd); 1372168404Spjd vdev_close(vd); 1373168404Spjd vdev_free(vd); 1374168404Spjd } 1375168404Spjd 1376185029Spjd if (spa->spa_spares.sav_vdevs) 1377185029Spjd kmem_free(spa->spa_spares.sav_vdevs, 1378185029Spjd spa->spa_spares.sav_count * sizeof (void *)); 1379168404Spjd 1380185029Spjd if (spa->spa_spares.sav_config == NULL) 1381168404Spjd nspares = 0; 1382168404Spjd else 1383185029Spjd VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1384168404Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1385168404Spjd 1386185029Spjd spa->spa_spares.sav_count = (int)nspares; 1387185029Spjd spa->spa_spares.sav_vdevs = NULL; 1388168404Spjd 1389168404Spjd if (nspares == 0) 1390168404Spjd return; 1391168404Spjd 1392168404Spjd /* 1393168404Spjd * Construct the array of vdevs, opening them to get status in the 1394168404Spjd * process. For each spare, there is potentially two different vdev_t 1395168404Spjd * structures associated with it: one in the list of spares (used only 1396168404Spjd * for basic validation purposes) and one in the active vdev 1397168404Spjd * configuration (if it's spared in). During this phase we open and 1398168404Spjd * validate each vdev on the spare list. If the vdev also exists in the 1399168404Spjd * active configuration, then we also mark this vdev as an active spare. 1400168404Spjd */ 1401185029Spjd spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1402185029Spjd KM_SLEEP); 1403185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) { 1404168404Spjd VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1405168404Spjd VDEV_ALLOC_SPARE) == 0); 1406168404Spjd ASSERT(vd != NULL); 1407168404Spjd 1408185029Spjd spa->spa_spares.sav_vdevs[i] = vd; 1409168404Spjd 1410185029Spjd if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1411185029Spjd B_FALSE)) != NULL) { 1412168404Spjd if (!tvd->vdev_isspare) 1413168404Spjd spa_spare_add(tvd); 1414168404Spjd 1415168404Spjd /* 1416168404Spjd * We only mark the spare active if we were successfully 1417168404Spjd * able to load the vdev. Otherwise, importing a pool 1418168404Spjd * with a bad active spare would result in strange 1419168404Spjd * behavior, because multiple pool would think the spare 1420168404Spjd * is actively in use. 1421168404Spjd * 1422168404Spjd * There is a vulnerability here to an equally bizarre 1423168404Spjd * circumstance, where a dead active spare is later 1424168404Spjd * brought back to life (onlined or otherwise). Given 1425168404Spjd * the rarity of this scenario, and the extra complexity 1426168404Spjd * it adds, we ignore the possibility. 1427168404Spjd */ 1428168404Spjd if (!vdev_is_dead(tvd)) 1429168404Spjd spa_spare_activate(tvd); 1430168404Spjd } 1431168404Spjd 1432185029Spjd vd->vdev_top = vd; 1433209962Smm vd->vdev_aux = &spa->spa_spares; 1434185029Spjd 1435168404Spjd if (vdev_open(vd) != 0) 1436168404Spjd continue; 1437168404Spjd 1438185029Spjd if (vdev_validate_aux(vd) == 0) 1439185029Spjd spa_spare_add(vd); 1440168404Spjd } 1441168404Spjd 1442168404Spjd /* 1443168404Spjd * Recompute the stashed list of spares, with status information 1444168404Spjd * this time. 1445168404Spjd */ 1446185029Spjd VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1447168404Spjd DATA_TYPE_NVLIST_ARRAY) == 0); 1448168404Spjd 1449185029Spjd spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1450185029Spjd KM_SLEEP); 1451185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) 1452185029Spjd spares[i] = vdev_config_generate(spa, 1453219089Spjd spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1454185029Spjd VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1455185029Spjd ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1456185029Spjd for (i = 0; i < spa->spa_spares.sav_count; i++) 1457168404Spjd nvlist_free(spares[i]); 1458185029Spjd kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1459168404Spjd} 1460168404Spjd 1461185029Spjd/* 1462185029Spjd * Load (or re-load) the current list of vdevs describing the active l2cache for 1463185029Spjd * this pool. When this is called, we have some form of basic information in 1464185029Spjd * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1465185029Spjd * then re-generate a more complete list including status information. 1466185029Spjd * Devices which are already active have their details maintained, and are 1467185029Spjd * not re-opened. 1468185029Spjd */ 1469185029Spjdstatic void 1470185029Spjdspa_load_l2cache(spa_t *spa) 1471185029Spjd{ 1472185029Spjd nvlist_t **l2cache; 1473185029Spjd uint_t nl2cache; 1474185029Spjd int i, j, oldnvdevs; 1475219089Spjd uint64_t guid; 1476185029Spjd vdev_t *vd, **oldvdevs, **newvdevs; 1477185029Spjd spa_aux_vdev_t *sav = &spa->spa_l2cache; 1478185029Spjd 1479185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1480185029Spjd 1481185029Spjd if (sav->sav_config != NULL) { 1482185029Spjd VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1483185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1484185029Spjd newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1485185029Spjd } else { 1486185029Spjd nl2cache = 0; 1487247187Smm newvdevs = NULL; 1488185029Spjd } 1489185029Spjd 1490185029Spjd oldvdevs = sav->sav_vdevs; 1491185029Spjd oldnvdevs = sav->sav_count; 1492185029Spjd sav->sav_vdevs = NULL; 1493185029Spjd sav->sav_count = 0; 1494185029Spjd 1495185029Spjd /* 1496185029Spjd * Process new nvlist of vdevs. 1497185029Spjd */ 1498185029Spjd for (i = 0; i < nl2cache; i++) { 1499185029Spjd VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1500185029Spjd &guid) == 0); 1501185029Spjd 1502185029Spjd newvdevs[i] = NULL; 1503185029Spjd for (j = 0; j < oldnvdevs; j++) { 1504185029Spjd vd = oldvdevs[j]; 1505185029Spjd if (vd != NULL && guid == vd->vdev_guid) { 1506185029Spjd /* 1507185029Spjd * Retain previous vdev for add/remove ops. 1508185029Spjd */ 1509185029Spjd newvdevs[i] = vd; 1510185029Spjd oldvdevs[j] = NULL; 1511185029Spjd break; 1512185029Spjd } 1513185029Spjd } 1514185029Spjd 1515185029Spjd if (newvdevs[i] == NULL) { 1516185029Spjd /* 1517185029Spjd * Create new vdev 1518185029Spjd */ 1519185029Spjd VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1520185029Spjd VDEV_ALLOC_L2CACHE) == 0); 1521185029Spjd ASSERT(vd != NULL); 1522185029Spjd newvdevs[i] = vd; 1523185029Spjd 1524185029Spjd /* 1525185029Spjd * Commit this vdev as an l2cache device, 1526185029Spjd * even if it fails to open. 1527185029Spjd */ 1528185029Spjd spa_l2cache_add(vd); 1529185029Spjd 1530185029Spjd vd->vdev_top = vd; 1531185029Spjd vd->vdev_aux = sav; 1532185029Spjd 1533185029Spjd spa_l2cache_activate(vd); 1534185029Spjd 1535185029Spjd if (vdev_open(vd) != 0) 1536185029Spjd continue; 1537185029Spjd 1538185029Spjd (void) vdev_validate_aux(vd); 1539185029Spjd 1540219089Spjd if (!vdev_is_dead(vd)) 1541219089Spjd l2arc_add_vdev(spa, vd); 1542185029Spjd } 1543185029Spjd } 1544185029Spjd 1545185029Spjd /* 1546185029Spjd * Purge vdevs that were dropped 1547185029Spjd */ 1548185029Spjd for (i = 0; i < oldnvdevs; i++) { 1549185029Spjd uint64_t pool; 1550185029Spjd 1551185029Spjd vd = oldvdevs[i]; 1552185029Spjd if (vd != NULL) { 1553230514Smm ASSERT(vd->vdev_isl2cache); 1554230514Smm 1555209962Smm if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1556209962Smm pool != 0ULL && l2arc_vdev_present(vd)) 1557185029Spjd l2arc_remove_vdev(vd); 1558230514Smm vdev_clear_stats(vd); 1559230514Smm vdev_free(vd); 1560185029Spjd } 1561185029Spjd } 1562185029Spjd 1563185029Spjd if (oldvdevs) 1564185029Spjd kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1565185029Spjd 1566185029Spjd if (sav->sav_config == NULL) 1567185029Spjd goto out; 1568185029Spjd 1569185029Spjd sav->sav_vdevs = newvdevs; 1570185029Spjd sav->sav_count = (int)nl2cache; 1571185029Spjd 1572185029Spjd /* 1573185029Spjd * Recompute the stashed list of l2cache devices, with status 1574185029Spjd * information this time. 1575185029Spjd */ 1576185029Spjd VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1577185029Spjd DATA_TYPE_NVLIST_ARRAY) == 0); 1578185029Spjd 1579185029Spjd l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1580185029Spjd for (i = 0; i < sav->sav_count; i++) 1581185029Spjd l2cache[i] = vdev_config_generate(spa, 1582219089Spjd sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1583185029Spjd VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1584185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1585185029Spjdout: 1586185029Spjd for (i = 0; i < sav->sav_count; i++) 1587185029Spjd nvlist_free(l2cache[i]); 1588185029Spjd if (sav->sav_count) 1589185029Spjd kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1590185029Spjd} 1591185029Spjd 1592168404Spjdstatic int 1593168404Spjdload_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1594168404Spjd{ 1595168404Spjd dmu_buf_t *db; 1596168404Spjd char *packed = NULL; 1597168404Spjd size_t nvsize = 0; 1598168404Spjd int error; 1599168404Spjd *value = NULL; 1600168404Spjd 1601262676Sdelphij error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1602262676Sdelphij if (error != 0) 1603262676Sdelphij return (error); 1604168404Spjd nvsize = *(uint64_t *)db->db_data; 1605168404Spjd dmu_buf_rele(db, FTAG); 1606168404Spjd 1607168404Spjd packed = kmem_alloc(nvsize, KM_SLEEP); 1608209962Smm error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1609209962Smm DMU_READ_PREFETCH); 1610168404Spjd if (error == 0) 1611168404Spjd error = nvlist_unpack(packed, nvsize, value, 0); 1612168404Spjd kmem_free(packed, nvsize); 1613168404Spjd 1614168404Spjd return (error); 1615168404Spjd} 1616168404Spjd 1617168404Spjd/* 1618185029Spjd * Checks to see if the given vdev could not be opened, in which case we post a 1619185029Spjd * sysevent to notify the autoreplace code that the device has been removed. 1620185029Spjd */ 1621185029Spjdstatic void 1622185029Spjdspa_check_removed(vdev_t *vd) 1623185029Spjd{ 1624219089Spjd for (int c = 0; c < vd->vdev_children; c++) 1625185029Spjd spa_check_removed(vd->vdev_child[c]); 1626185029Spjd 1627249188Smm if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1628249188Smm !vd->vdev_ishole) { 1629185029Spjd zfs_post_autoreplace(vd->vdev_spa, vd); 1630185029Spjd spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1631185029Spjd } 1632185029Spjd} 1633185029Spjd 1634185029Spjd/* 1635219089Spjd * Validate the current config against the MOS config 1636213197Smm */ 1637219089Spjdstatic boolean_t 1638219089Spjdspa_config_valid(spa_t *spa, nvlist_t *config) 1639213197Smm{ 1640219089Spjd vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1641219089Spjd nvlist_t *nv; 1642213197Smm 1643219089Spjd VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1644213197Smm 1645219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1646219089Spjd VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1647219089Spjd 1648219089Spjd ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1649219089Spjd 1650219089Spjd /* 1651219089Spjd * If we're doing a normal import, then build up any additional 1652219089Spjd * diagnostic information about missing devices in this config. 1653219089Spjd * We'll pass this up to the user for further processing. 1654219089Spjd */ 1655219089Spjd if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1656219089Spjd nvlist_t **child, *nv; 1657219089Spjd uint64_t idx = 0; 1658219089Spjd 1659219089Spjd child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1660219089Spjd KM_SLEEP); 1661219089Spjd VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1662219089Spjd 1663219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1664219089Spjd vdev_t *tvd = rvd->vdev_child[c]; 1665219089Spjd vdev_t *mtvd = mrvd->vdev_child[c]; 1666219089Spjd 1667219089Spjd if (tvd->vdev_ops == &vdev_missing_ops && 1668219089Spjd mtvd->vdev_ops != &vdev_missing_ops && 1669219089Spjd mtvd->vdev_islog) 1670219089Spjd child[idx++] = vdev_config_generate(spa, mtvd, 1671219089Spjd B_FALSE, 0); 1672219089Spjd } 1673219089Spjd 1674219089Spjd if (idx) { 1675219089Spjd VERIFY(nvlist_add_nvlist_array(nv, 1676219089Spjd ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1677219089Spjd VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1678219089Spjd ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1679219089Spjd 1680219089Spjd for (int i = 0; i < idx; i++) 1681219089Spjd nvlist_free(child[i]); 1682219089Spjd } 1683219089Spjd nvlist_free(nv); 1684219089Spjd kmem_free(child, rvd->vdev_children * sizeof (char **)); 1685219089Spjd } 1686219089Spjd 1687219089Spjd /* 1688219089Spjd * Compare the root vdev tree with the information we have 1689219089Spjd * from the MOS config (mrvd). Check each top-level vdev 1690219089Spjd * with the corresponding MOS config top-level (mtvd). 1691219089Spjd */ 1692219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1693213197Smm vdev_t *tvd = rvd->vdev_child[c]; 1694219089Spjd vdev_t *mtvd = mrvd->vdev_child[c]; 1695213197Smm 1696219089Spjd /* 1697219089Spjd * Resolve any "missing" vdevs in the current configuration. 1698219089Spjd * If we find that the MOS config has more accurate information 1699219089Spjd * about the top-level vdev then use that vdev instead. 1700219089Spjd */ 1701219089Spjd if (tvd->vdev_ops == &vdev_missing_ops && 1702219089Spjd mtvd->vdev_ops != &vdev_missing_ops) { 1703219089Spjd 1704219089Spjd if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1705219089Spjd continue; 1706219089Spjd 1707219089Spjd /* 1708219089Spjd * Device specific actions. 1709219089Spjd */ 1710219089Spjd if (mtvd->vdev_islog) { 1711219089Spjd spa_set_log_state(spa, SPA_LOG_CLEAR); 1712219089Spjd } else { 1713219089Spjd /* 1714219089Spjd * XXX - once we have 'readonly' pool 1715219089Spjd * support we should be able to handle 1716219089Spjd * missing data devices by transitioning 1717219089Spjd * the pool to readonly. 1718219089Spjd */ 1719219089Spjd continue; 1720219089Spjd } 1721219089Spjd 1722219089Spjd /* 1723219089Spjd * Swap the missing vdev with the data we were 1724219089Spjd * able to obtain from the MOS config. 1725219089Spjd */ 1726219089Spjd vdev_remove_child(rvd, tvd); 1727219089Spjd vdev_remove_child(mrvd, mtvd); 1728219089Spjd 1729219089Spjd vdev_add_child(rvd, mtvd); 1730219089Spjd vdev_add_child(mrvd, tvd); 1731219089Spjd 1732219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 1733219089Spjd vdev_load(mtvd); 1734219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1735219089Spjd 1736219089Spjd vdev_reopen(rvd); 1737219089Spjd } else if (mtvd->vdev_islog) { 1738219089Spjd /* 1739219089Spjd * Load the slog device's state from the MOS config 1740219089Spjd * since it's possible that the label does not 1741219089Spjd * contain the most up-to-date information. 1742219089Spjd */ 1743219089Spjd vdev_load_log_state(tvd, mtvd); 1744219089Spjd vdev_reopen(tvd); 1745219089Spjd } 1746213197Smm } 1747219089Spjd vdev_free(mrvd); 1748219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 1749219089Spjd 1750219089Spjd /* 1751219089Spjd * Ensure we were able to validate the config. 1752219089Spjd */ 1753219089Spjd return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1754213197Smm} 1755213197Smm 1756213197Smm/* 1757185029Spjd * Check for missing log devices 1758185029Spjd */ 1759248571Smmstatic boolean_t 1760185029Spjdspa_check_logs(spa_t *spa) 1761185029Spjd{ 1762248571Smm boolean_t rv = B_FALSE; 1763248571Smm 1764185029Spjd switch (spa->spa_log_state) { 1765185029Spjd case SPA_LOG_MISSING: 1766185029Spjd /* need to recheck in case slog has been restored */ 1767185029Spjd case SPA_LOG_UNKNOWN: 1768248571Smm rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain, 1769248571Smm NULL, DS_FIND_CHILDREN) != 0); 1770248571Smm if (rv) 1771219089Spjd spa_set_log_state(spa, SPA_LOG_MISSING); 1772185029Spjd break; 1773185029Spjd } 1774248571Smm return (rv); 1775185029Spjd} 1776185029Spjd 1777219089Spjdstatic boolean_t 1778219089Spjdspa_passivate_log(spa_t *spa) 1779219089Spjd{ 1780219089Spjd vdev_t *rvd = spa->spa_root_vdev; 1781219089Spjd boolean_t slog_found = B_FALSE; 1782219089Spjd 1783219089Spjd ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1784219089Spjd 1785219089Spjd if (!spa_has_slogs(spa)) 1786219089Spjd return (B_FALSE); 1787219089Spjd 1788219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1789219089Spjd vdev_t *tvd = rvd->vdev_child[c]; 1790219089Spjd metaslab_group_t *mg = tvd->vdev_mg; 1791219089Spjd 1792219089Spjd if (tvd->vdev_islog) { 1793219089Spjd metaslab_group_passivate(mg); 1794219089Spjd slog_found = B_TRUE; 1795219089Spjd } 1796219089Spjd } 1797219089Spjd 1798219089Spjd return (slog_found); 1799219089Spjd} 1800219089Spjd 1801219089Spjdstatic void 1802219089Spjdspa_activate_log(spa_t *spa) 1803219089Spjd{ 1804219089Spjd vdev_t *rvd = spa->spa_root_vdev; 1805219089Spjd 1806219089Spjd ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1807219089Spjd 1808219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 1809219089Spjd vdev_t *tvd = rvd->vdev_child[c]; 1810219089Spjd metaslab_group_t *mg = tvd->vdev_mg; 1811219089Spjd 1812219089Spjd if (tvd->vdev_islog) 1813219089Spjd metaslab_group_activate(mg); 1814219089Spjd } 1815219089Spjd} 1816219089Spjd 1817219089Spjdint 1818219089Spjdspa_offline_log(spa_t *spa) 1819219089Spjd{ 1820248571Smm int error; 1821219089Spjd 1822248571Smm error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1823248571Smm NULL, DS_FIND_CHILDREN); 1824248571Smm if (error == 0) { 1825219089Spjd /* 1826219089Spjd * We successfully offlined the log device, sync out the 1827219089Spjd * current txg so that the "stubby" block can be removed 1828219089Spjd * by zil_sync(). 1829219089Spjd */ 1830219089Spjd txg_wait_synced(spa->spa_dsl_pool, 0); 1831219089Spjd } 1832219089Spjd return (error); 1833219089Spjd} 1834219089Spjd 1835219089Spjdstatic void 1836219089Spjdspa_aux_check_removed(spa_aux_vdev_t *sav) 1837219089Spjd{ 1838219089Spjd int i; 1839219089Spjd 1840219089Spjd for (i = 0; i < sav->sav_count; i++) 1841219089Spjd spa_check_removed(sav->sav_vdevs[i]); 1842219089Spjd} 1843219089Spjd 1844219089Spjdvoid 1845219089Spjdspa_claim_notify(zio_t *zio) 1846219089Spjd{ 1847219089Spjd spa_t *spa = zio->io_spa; 1848219089Spjd 1849219089Spjd if (zio->io_error) 1850219089Spjd return; 1851219089Spjd 1852219089Spjd mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1853219089Spjd if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1854219089Spjd spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1855219089Spjd mutex_exit(&spa->spa_props_lock); 1856219089Spjd} 1857219089Spjd 1858219089Spjdtypedef struct spa_load_error { 1859219089Spjd uint64_t sle_meta_count; 1860219089Spjd uint64_t sle_data_count; 1861219089Spjd} spa_load_error_t; 1862219089Spjd 1863219089Spjdstatic void 1864219089Spjdspa_load_verify_done(zio_t *zio) 1865219089Spjd{ 1866219089Spjd blkptr_t *bp = zio->io_bp; 1867219089Spjd spa_load_error_t *sle = zio->io_private; 1868219089Spjd dmu_object_type_t type = BP_GET_TYPE(bp); 1869219089Spjd int error = zio->io_error; 1870268720Sdelphij spa_t *spa = zio->io_spa; 1871219089Spjd 1872219089Spjd if (error) { 1873236884Smm if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1874219089Spjd type != DMU_OT_INTENT_LOG) 1875219089Spjd atomic_add_64(&sle->sle_meta_count, 1); 1876219089Spjd else 1877219089Spjd atomic_add_64(&sle->sle_data_count, 1); 1878219089Spjd } 1879219089Spjd zio_data_buf_free(zio->io_data, zio->io_size); 1880268720Sdelphij 1881268720Sdelphij mutex_enter(&spa->spa_scrub_lock); 1882268720Sdelphij spa->spa_scrub_inflight--; 1883268720Sdelphij cv_broadcast(&spa->spa_scrub_io_cv); 1884268720Sdelphij mutex_exit(&spa->spa_scrub_lock); 1885219089Spjd} 1886219089Spjd 1887268720Sdelphij/* 1888268720Sdelphij * Maximum number of concurrent scrub i/os to create while verifying 1889268720Sdelphij * a pool while importing it. 1890268720Sdelphij */ 1891268720Sdelphijint spa_load_verify_maxinflight = 10000; 1892268720Sdelphijboolean_t spa_load_verify_metadata = B_TRUE; 1893268720Sdelphijboolean_t spa_load_verify_data = B_TRUE; 1894268720Sdelphij 1895268720SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN, 1896268720Sdelphij &spa_load_verify_maxinflight, 0, 1897268720Sdelphij "Maximum number of concurrent scrub I/Os to create while verifying a " 1898268720Sdelphij "pool while importing it"); 1899268720Sdelphij 1900268720SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN, 1901268720Sdelphij &spa_load_verify_metadata, 0, 1902268720Sdelphij "Check metadata on import?"); 1903268720Sdelphij 1904268720SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN, 1905268720Sdelphij &spa_load_verify_data, 0, 1906268720Sdelphij "Check user data on import?"); 1907268720Sdelphij 1908219089Spjd/*ARGSUSED*/ 1909219089Spjdstatic int 1910219089Spjdspa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1911268123Sdelphij const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1912219089Spjd{ 1913268720Sdelphij if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 1914268720Sdelphij return (0); 1915268720Sdelphij /* 1916268720Sdelphij * Note: normally this routine will not be called if 1917268720Sdelphij * spa_load_verify_metadata is not set. However, it may be useful 1918268720Sdelphij * to manually set the flag after the traversal has begun. 1919268720Sdelphij */ 1920268720Sdelphij if (!spa_load_verify_metadata) 1921268720Sdelphij return (0); 1922268720Sdelphij if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data) 1923268720Sdelphij return (0); 1924219089Spjd 1925268720Sdelphij zio_t *rio = arg; 1926268720Sdelphij size_t size = BP_GET_PSIZE(bp); 1927268720Sdelphij void *data = zio_data_buf_alloc(size); 1928268720Sdelphij 1929268720Sdelphij mutex_enter(&spa->spa_scrub_lock); 1930268720Sdelphij while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 1931268720Sdelphij cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1932268720Sdelphij spa->spa_scrub_inflight++; 1933268720Sdelphij mutex_exit(&spa->spa_scrub_lock); 1934268720Sdelphij 1935268720Sdelphij zio_nowait(zio_read(rio, spa, bp, data, size, 1936268720Sdelphij spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1937268720Sdelphij ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1938268720Sdelphij ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1939219089Spjd return (0); 1940219089Spjd} 1941219089Spjd 1942219089Spjdstatic int 1943219089Spjdspa_load_verify(spa_t *spa) 1944219089Spjd{ 1945219089Spjd zio_t *rio; 1946219089Spjd spa_load_error_t sle = { 0 }; 1947219089Spjd zpool_rewind_policy_t policy; 1948219089Spjd boolean_t verify_ok = B_FALSE; 1949268720Sdelphij int error = 0; 1950219089Spjd 1951219089Spjd zpool_get_rewind_policy(spa->spa_config, &policy); 1952219089Spjd 1953219089Spjd if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1954219089Spjd return (0); 1955219089Spjd 1956219089Spjd rio = zio_root(spa, NULL, &sle, 1957219089Spjd ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1958219089Spjd 1959268720Sdelphij if (spa_load_verify_metadata) { 1960268720Sdelphij error = traverse_pool(spa, spa->spa_verify_min_txg, 1961268720Sdelphij TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 1962268720Sdelphij spa_load_verify_cb, rio); 1963268720Sdelphij } 1964219089Spjd 1965219089Spjd (void) zio_wait(rio); 1966219089Spjd 1967219089Spjd spa->spa_load_meta_errors = sle.sle_meta_count; 1968219089Spjd spa->spa_load_data_errors = sle.sle_data_count; 1969219089Spjd 1970219089Spjd if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1971219089Spjd sle.sle_data_count <= policy.zrp_maxdata) { 1972219089Spjd int64_t loss = 0; 1973219089Spjd 1974219089Spjd verify_ok = B_TRUE; 1975219089Spjd spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1976219089Spjd spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 1977219089Spjd 1978219089Spjd loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 1979219089Spjd VERIFY(nvlist_add_uint64(spa->spa_load_info, 1980219089Spjd ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 1981219089Spjd VERIFY(nvlist_add_int64(spa->spa_load_info, 1982219089Spjd ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 1983219089Spjd VERIFY(nvlist_add_uint64(spa->spa_load_info, 1984219089Spjd ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1985219089Spjd } else { 1986219089Spjd spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1987219089Spjd } 1988219089Spjd 1989219089Spjd if (error) { 1990219089Spjd if (error != ENXIO && error != EIO) 1991249195Smm error = SET_ERROR(EIO); 1992219089Spjd return (error); 1993219089Spjd } 1994219089Spjd 1995219089Spjd return (verify_ok ? 0 : EIO); 1996219089Spjd} 1997219089Spjd 1998185029Spjd/* 1999219089Spjd * Find a value in the pool props object. 2000168404Spjd */ 2001219089Spjdstatic void 2002219089Spjdspa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2003219089Spjd{ 2004219089Spjd (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2005219089Spjd zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2006219089Spjd} 2007219089Spjd 2008219089Spjd/* 2009219089Spjd * Find a value in the pool directory object. 2010219089Spjd */ 2011168404Spjdstatic int 2012219089Spjdspa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 2013168404Spjd{ 2014219089Spjd return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2015219089Spjd name, sizeof (uint64_t), 1, val)); 2016219089Spjd} 2017168404Spjd 2018219089Spjdstatic int 2019219089Spjdspa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2020219089Spjd{ 2021219089Spjd vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2022219089Spjd return (err); 2023219089Spjd} 2024219089Spjd 2025219089Spjd/* 2026219089Spjd * Fix up config after a partly-completed split. This is done with the 2027219089Spjd * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2028219089Spjd * pool have that entry in their config, but only the splitting one contains 2029219089Spjd * a list of all the guids of the vdevs that are being split off. 2030219089Spjd * 2031219089Spjd * This function determines what to do with that list: either rejoin 2032219089Spjd * all the disks to the pool, or complete the splitting process. To attempt 2033219089Spjd * the rejoin, each disk that is offlined is marked online again, and 2034219089Spjd * we do a reopen() call. If the vdev label for every disk that was 2035219089Spjd * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2036219089Spjd * then we call vdev_split() on each disk, and complete the split. 2037219089Spjd * 2038219089Spjd * Otherwise we leave the config alone, with all the vdevs in place in 2039219089Spjd * the original pool. 2040219089Spjd */ 2041219089Spjdstatic void 2042219089Spjdspa_try_repair(spa_t *spa, nvlist_t *config) 2043219089Spjd{ 2044219089Spjd uint_t extracted; 2045219089Spjd uint64_t *glist; 2046219089Spjd uint_t i, gcount; 2047219089Spjd nvlist_t *nvl; 2048219089Spjd vdev_t **vd; 2049219089Spjd boolean_t attempt_reopen; 2050219089Spjd 2051219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2052219089Spjd return; 2053219089Spjd 2054219089Spjd /* check that the config is complete */ 2055219089Spjd if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2056219089Spjd &glist, &gcount) != 0) 2057219089Spjd return; 2058219089Spjd 2059219089Spjd vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2060219089Spjd 2061219089Spjd /* attempt to online all the vdevs & validate */ 2062219089Spjd attempt_reopen = B_TRUE; 2063219089Spjd for (i = 0; i < gcount; i++) { 2064219089Spjd if (glist[i] == 0) /* vdev is hole */ 2065219089Spjd continue; 2066219089Spjd 2067219089Spjd vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2068219089Spjd if (vd[i] == NULL) { 2069219089Spjd /* 2070219089Spjd * Don't bother attempting to reopen the disks; 2071219089Spjd * just do the split. 2072219089Spjd */ 2073219089Spjd attempt_reopen = B_FALSE; 2074219089Spjd } else { 2075219089Spjd /* attempt to re-online it */ 2076219089Spjd vd[i]->vdev_offline = B_FALSE; 2077219089Spjd } 2078219089Spjd } 2079219089Spjd 2080219089Spjd if (attempt_reopen) { 2081219089Spjd vdev_reopen(spa->spa_root_vdev); 2082219089Spjd 2083219089Spjd /* check each device to see what state it's in */ 2084219089Spjd for (extracted = 0, i = 0; i < gcount; i++) { 2085219089Spjd if (vd[i] != NULL && 2086219089Spjd vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2087219089Spjd break; 2088219089Spjd ++extracted; 2089219089Spjd } 2090219089Spjd } 2091219089Spjd 2092209962Smm /* 2093219089Spjd * If every disk has been moved to the new pool, or if we never 2094219089Spjd * even attempted to look at them, then we split them off for 2095219089Spjd * good. 2096209962Smm */ 2097219089Spjd if (!attempt_reopen || gcount == extracted) { 2098219089Spjd for (i = 0; i < gcount; i++) 2099219089Spjd if (vd[i] != NULL) 2100219089Spjd vdev_split(vd[i]); 2101219089Spjd vdev_reopen(spa->spa_root_vdev); 2102219089Spjd } 2103209962Smm 2104219089Spjd kmem_free(vd, gcount * sizeof (vdev_t *)); 2105219089Spjd} 2106185029Spjd 2107219089Spjdstatic int 2108219089Spjdspa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2109219089Spjd boolean_t mosconfig) 2110219089Spjd{ 2111219089Spjd nvlist_t *config = spa->spa_config; 2112219089Spjd char *ereport = FM_EREPORT_ZFS_POOL; 2113228103Smm char *comment; 2114219089Spjd int error; 2115219089Spjd uint64_t pool_guid; 2116219089Spjd nvlist_t *nvl; 2117168404Spjd 2118219089Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2119249195Smm return (SET_ERROR(EINVAL)); 2120168404Spjd 2121228103Smm ASSERT(spa->spa_comment == NULL); 2122228103Smm if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2123228103Smm spa->spa_comment = spa_strdup(comment); 2124228103Smm 2125168404Spjd /* 2126168404Spjd * Versioning wasn't explicitly added to the label until later, so if 2127168404Spjd * it's not present treat it as the initial version. 2128168404Spjd */ 2129219089Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2130219089Spjd &spa->spa_ubsync.ub_version) != 0) 2131219089Spjd spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2132168404Spjd 2133168404Spjd (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2134168404Spjd &spa->spa_config_txg); 2135168404Spjd 2136168404Spjd if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2137168404Spjd spa_guid_exists(pool_guid, 0)) { 2138249195Smm error = SET_ERROR(EEXIST); 2139219089Spjd } else { 2140228103Smm spa->spa_config_guid = pool_guid; 2141219089Spjd 2142219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2143219089Spjd &nvl) == 0) { 2144219089Spjd VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2145219089Spjd KM_SLEEP) == 0); 2146219089Spjd } 2147219089Spjd 2148236884Smm nvlist_free(spa->spa_load_info); 2149236884Smm spa->spa_load_info = fnvlist_alloc(); 2150236884Smm 2151219089Spjd gethrestime(&spa->spa_loaded_ts); 2152219089Spjd error = spa_load_impl(spa, pool_guid, config, state, type, 2153219089Spjd mosconfig, &ereport); 2154168404Spjd } 2155168404Spjd 2156219089Spjd spa->spa_minref = refcount_count(&spa->spa_refcount); 2157219089Spjd if (error) { 2158219089Spjd if (error != EEXIST) { 2159219089Spjd spa->spa_loaded_ts.tv_sec = 0; 2160219089Spjd spa->spa_loaded_ts.tv_nsec = 0; 2161219089Spjd } 2162219089Spjd if (error != EBADF) { 2163219089Spjd zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2164219089Spjd } 2165219089Spjd } 2166219089Spjd spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2167219089Spjd spa->spa_ena = 0; 2168168404Spjd 2169219089Spjd return (error); 2170219089Spjd} 2171219089Spjd 2172219089Spjd/* 2173219089Spjd * Load an existing storage pool, using the pool's builtin spa_config as a 2174219089Spjd * source of configuration information. 2175219089Spjd */ 2176219089Spjdstatic int 2177219089Spjdspa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2178219089Spjd spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2179219089Spjd char **ereport) 2180219089Spjd{ 2181219089Spjd int error = 0; 2182219089Spjd nvlist_t *nvroot = NULL; 2183236884Smm nvlist_t *label; 2184219089Spjd vdev_t *rvd; 2185219089Spjd uberblock_t *ub = &spa->spa_uberblock; 2186219089Spjd uint64_t children, config_cache_txg = spa->spa_config_txg; 2187219089Spjd int orig_mode = spa->spa_mode; 2188219089Spjd int parse; 2189219089Spjd uint64_t obj; 2190236884Smm boolean_t missing_feat_write = B_FALSE; 2191219089Spjd 2192168404Spjd /* 2193219089Spjd * If this is an untrusted config, access the pool in read-only mode. 2194219089Spjd * This prevents things like resilvering recently removed devices. 2195219089Spjd */ 2196219089Spjd if (!mosconfig) 2197219089Spjd spa->spa_mode = FREAD; 2198219089Spjd 2199219089Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2200219089Spjd 2201219089Spjd spa->spa_load_state = state; 2202219089Spjd 2203219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2204249195Smm return (SET_ERROR(EINVAL)); 2205219089Spjd 2206219089Spjd parse = (type == SPA_IMPORT_EXISTING ? 2207219089Spjd VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2208219089Spjd 2209219089Spjd /* 2210209962Smm * Create "The Godfather" zio to hold all async IOs 2211209962Smm */ 2212209962Smm spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 2213209962Smm ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 2214209962Smm 2215209962Smm /* 2216168404Spjd * Parse the configuration into a vdev tree. We explicitly set the 2217168404Spjd * value that will be returned by spa_version() since parsing the 2218168404Spjd * configuration requires knowing the version number. 2219168404Spjd */ 2220185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2221219089Spjd error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2222185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2223168404Spjd 2224168404Spjd if (error != 0) 2225219089Spjd return (error); 2226168404Spjd 2227168404Spjd ASSERT(spa->spa_root_vdev == rvd); 2228168404Spjd 2229219089Spjd if (type != SPA_IMPORT_ASSEMBLE) { 2230219089Spjd ASSERT(spa_guid(spa) == pool_guid); 2231219089Spjd } 2232219089Spjd 2233168404Spjd /* 2234168404Spjd * Try to open all vdevs, loading each label in the process. 2235168404Spjd */ 2236185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2237168926Spjd error = vdev_open(rvd); 2238185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2239168926Spjd if (error != 0) 2240219089Spjd return (error); 2241168404Spjd 2242168404Spjd /* 2243209962Smm * We need to validate the vdev labels against the configuration that 2244209962Smm * we have in hand, which is dependent on the setting of mosconfig. If 2245209962Smm * mosconfig is true then we're validating the vdev labels based on 2246219089Spjd * that config. Otherwise, we're validating against the cached config 2247209962Smm * (zpool.cache) that was read when we loaded the zfs module, and then 2248209962Smm * later we will recursively call spa_load() and validate against 2249209962Smm * the vdev config. 2250219089Spjd * 2251219089Spjd * If we're assembling a new pool that's been split off from an 2252219089Spjd * existing pool, the labels haven't yet been updated so we skip 2253219089Spjd * validation for now. 2254168404Spjd */ 2255219089Spjd if (type != SPA_IMPORT_ASSEMBLE) { 2256219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2257230514Smm error = vdev_validate(rvd, mosconfig); 2258219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2259168404Spjd 2260219089Spjd if (error != 0) 2261219089Spjd return (error); 2262219089Spjd 2263219089Spjd if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2264249195Smm return (SET_ERROR(ENXIO)); 2265168404Spjd } 2266168404Spjd 2267168404Spjd /* 2268168404Spjd * Find the best uberblock. 2269168404Spjd */ 2270236884Smm vdev_uberblock_load(rvd, ub, &label); 2271168404Spjd 2272168404Spjd /* 2273168404Spjd * If we weren't able to find a single valid uberblock, return failure. 2274168404Spjd */ 2275236884Smm if (ub->ub_txg == 0) { 2276236884Smm nvlist_free(label); 2277219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2278236884Smm } 2279168404Spjd 2280168404Spjd /* 2281236884Smm * If the pool has an unsupported version we can't open it. 2282168404Spjd */ 2283236884Smm if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2284236884Smm nvlist_free(label); 2285219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2286236884Smm } 2287168404Spjd 2288236884Smm if (ub->ub_version >= SPA_VERSION_FEATURES) { 2289236884Smm nvlist_t *features; 2290236884Smm 2291236884Smm /* 2292236884Smm * If we weren't able to find what's necessary for reading the 2293236884Smm * MOS in the label, return failure. 2294236884Smm */ 2295236884Smm if (label == NULL || nvlist_lookup_nvlist(label, 2296236884Smm ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2297236884Smm nvlist_free(label); 2298236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2299236884Smm ENXIO)); 2300236884Smm } 2301236884Smm 2302236884Smm /* 2303236884Smm * Update our in-core representation with the definitive values 2304236884Smm * from the label. 2305236884Smm */ 2306236884Smm nvlist_free(spa->spa_label_features); 2307236884Smm VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2308236884Smm } 2309236884Smm 2310236884Smm nvlist_free(label); 2311236884Smm 2312168404Spjd /* 2313236884Smm * Look through entries in the label nvlist's features_for_read. If 2314236884Smm * there is a feature listed there which we don't understand then we 2315236884Smm * cannot open a pool. 2316236884Smm */ 2317236884Smm if (ub->ub_version >= SPA_VERSION_FEATURES) { 2318236884Smm nvlist_t *unsup_feat; 2319236884Smm 2320236884Smm VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2321236884Smm 0); 2322236884Smm 2323236884Smm for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2324236884Smm NULL); nvp != NULL; 2325236884Smm nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2326236884Smm if (!zfeature_is_supported(nvpair_name(nvp))) { 2327236884Smm VERIFY(nvlist_add_string(unsup_feat, 2328236884Smm nvpair_name(nvp), "") == 0); 2329236884Smm } 2330236884Smm } 2331236884Smm 2332236884Smm if (!nvlist_empty(unsup_feat)) { 2333236884Smm VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2334236884Smm ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2335236884Smm nvlist_free(unsup_feat); 2336236884Smm return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2337236884Smm ENOTSUP)); 2338236884Smm } 2339236884Smm 2340236884Smm nvlist_free(unsup_feat); 2341236884Smm } 2342236884Smm 2343236884Smm /* 2344168404Spjd * If the vdev guid sum doesn't match the uberblock, we have an 2345219089Spjd * incomplete configuration. We first check to see if the pool 2346219089Spjd * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2347219089Spjd * If it is, defer the vdev_guid_sum check till later so we 2348219089Spjd * can handle missing vdevs. 2349168404Spjd */ 2350219089Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2351219089Spjd &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2352219089Spjd rvd->vdev_guid_sum != ub->ub_guid_sum) 2353219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2354219089Spjd 2355219089Spjd if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2356219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2357219089Spjd spa_try_repair(spa, config); 2358219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2359219089Spjd nvlist_free(spa->spa_config_splitting); 2360219089Spjd spa->spa_config_splitting = NULL; 2361168404Spjd } 2362168404Spjd 2363168404Spjd /* 2364168404Spjd * Initialize internal SPA structures. 2365168404Spjd */ 2366168404Spjd spa->spa_state = POOL_STATE_ACTIVE; 2367168404Spjd spa->spa_ubsync = spa->spa_uberblock; 2368219089Spjd spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2369219089Spjd TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2370219089Spjd spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2371219089Spjd spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2372219089Spjd spa->spa_claim_max_txg = spa->spa_first_txg; 2373219089Spjd spa->spa_prev_software_version = ub->ub_software_version; 2374219089Spjd 2375236884Smm error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2376219089Spjd if (error) 2377219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2378168404Spjd spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2379168404Spjd 2380219089Spjd if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2381219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2382168404Spjd 2383236884Smm if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2384236884Smm boolean_t missing_feat_read = B_FALSE; 2385238926Smm nvlist_t *unsup_feat, *enabled_feat; 2386236884Smm 2387236884Smm if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2388236884Smm &spa->spa_feat_for_read_obj) != 0) { 2389236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2390236884Smm } 2391236884Smm 2392236884Smm if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2393236884Smm &spa->spa_feat_for_write_obj) != 0) { 2394236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2395236884Smm } 2396236884Smm 2397236884Smm if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2398236884Smm &spa->spa_feat_desc_obj) != 0) { 2399236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2400236884Smm } 2401236884Smm 2402238926Smm enabled_feat = fnvlist_alloc(); 2403238926Smm unsup_feat = fnvlist_alloc(); 2404236884Smm 2405259813Sdelphij if (!spa_features_check(spa, B_FALSE, 2406238926Smm unsup_feat, enabled_feat)) 2407236884Smm missing_feat_read = B_TRUE; 2408236884Smm 2409236884Smm if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2410259813Sdelphij if (!spa_features_check(spa, B_TRUE, 2411238926Smm unsup_feat, enabled_feat)) { 2412236884Smm missing_feat_write = B_TRUE; 2413238926Smm } 2414236884Smm } 2415236884Smm 2416238926Smm fnvlist_add_nvlist(spa->spa_load_info, 2417238926Smm ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2418238926Smm 2419236884Smm if (!nvlist_empty(unsup_feat)) { 2420238926Smm fnvlist_add_nvlist(spa->spa_load_info, 2421238926Smm ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2422236884Smm } 2423236884Smm 2424238926Smm fnvlist_free(enabled_feat); 2425238926Smm fnvlist_free(unsup_feat); 2426236884Smm 2427236884Smm if (!missing_feat_read) { 2428236884Smm fnvlist_add_boolean(spa->spa_load_info, 2429236884Smm ZPOOL_CONFIG_CAN_RDONLY); 2430236884Smm } 2431236884Smm 2432236884Smm /* 2433236884Smm * If the state is SPA_LOAD_TRYIMPORT, our objective is 2434236884Smm * twofold: to determine whether the pool is available for 2435236884Smm * import in read-write mode and (if it is not) whether the 2436236884Smm * pool is available for import in read-only mode. If the pool 2437236884Smm * is available for import in read-write mode, it is displayed 2438236884Smm * as available in userland; if it is not available for import 2439236884Smm * in read-only mode, it is displayed as unavailable in 2440236884Smm * userland. If the pool is available for import in read-only 2441236884Smm * mode but not read-write mode, it is displayed as unavailable 2442236884Smm * in userland with a special note that the pool is actually 2443236884Smm * available for open in read-only mode. 2444236884Smm * 2445236884Smm * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2446236884Smm * missing a feature for write, we must first determine whether 2447236884Smm * the pool can be opened read-only before returning to 2448236884Smm * userland in order to know whether to display the 2449236884Smm * abovementioned note. 2450236884Smm */ 2451236884Smm if (missing_feat_read || (missing_feat_write && 2452236884Smm spa_writeable(spa))) { 2453236884Smm return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2454236884Smm ENOTSUP)); 2455236884Smm } 2456260150Sdelphij 2457260150Sdelphij /* 2458260150Sdelphij * Load refcounts for ZFS features from disk into an in-memory 2459260150Sdelphij * cache during SPA initialization. 2460260150Sdelphij */ 2461260150Sdelphij for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2462260150Sdelphij uint64_t refcount; 2463260150Sdelphij 2464260150Sdelphij error = feature_get_refcount_from_disk(spa, 2465260150Sdelphij &spa_feature_table[i], &refcount); 2466260150Sdelphij if (error == 0) { 2467260150Sdelphij spa->spa_feat_refcount_cache[i] = refcount; 2468260150Sdelphij } else if (error == ENOTSUP) { 2469260150Sdelphij spa->spa_feat_refcount_cache[i] = 2470260150Sdelphij SPA_FEATURE_DISABLED; 2471260150Sdelphij } else { 2472260150Sdelphij return (spa_vdev_err(rvd, 2473260150Sdelphij VDEV_AUX_CORRUPT_DATA, EIO)); 2474260150Sdelphij } 2475260150Sdelphij } 2476236884Smm } 2477236884Smm 2478260150Sdelphij if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 2479260150Sdelphij if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 2480268075Sdelphij &spa->spa_feat_enabled_txg_obj) != 0) 2481260150Sdelphij return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2482260150Sdelphij } 2483260150Sdelphij 2484236884Smm spa->spa_is_initializing = B_TRUE; 2485236884Smm error = dsl_pool_open(spa->spa_dsl_pool); 2486236884Smm spa->spa_is_initializing = B_FALSE; 2487236884Smm if (error != 0) 2488236884Smm return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2489236884Smm 2490168404Spjd if (!mosconfig) { 2491168498Spjd uint64_t hostid; 2492219089Spjd nvlist_t *policy = NULL, *nvconfig; 2493168404Spjd 2494219089Spjd if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2495219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2496168404Spjd 2497219089Spjd if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2498185029Spjd ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2499168498Spjd char *hostname; 2500168498Spjd unsigned long myhostid = 0; 2501168498Spjd 2502219089Spjd VERIFY(nvlist_lookup_string(nvconfig, 2503168498Spjd ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2504168498Spjd 2505219089Spjd#ifdef _KERNEL 2506219089Spjd myhostid = zone_get_hostid(NULL); 2507219089Spjd#else /* _KERNEL */ 2508219089Spjd /* 2509219089Spjd * We're emulating the system's hostid in userland, so 2510219089Spjd * we can't use zone_get_hostid(). 2511219089Spjd */ 2512168498Spjd (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2513219089Spjd#endif /* _KERNEL */ 2514204073Spjd if (check_hostid && hostid != 0 && myhostid != 0 && 2515219089Spjd hostid != myhostid) { 2516219089Spjd nvlist_free(nvconfig); 2517168498Spjd cmn_err(CE_WARN, "pool '%s' could not be " 2518168498Spjd "loaded as it was last accessed by " 2519185029Spjd "another system (host: %s hostid: 0x%lx). " 2520236146Smm "See: http://illumos.org/msg/ZFS-8000-EY", 2521185029Spjd spa_name(spa), hostname, 2522168498Spjd (unsigned long)hostid); 2523249195Smm return (SET_ERROR(EBADF)); 2524168498Spjd } 2525168498Spjd } 2526219089Spjd if (nvlist_lookup_nvlist(spa->spa_config, 2527219089Spjd ZPOOL_REWIND_POLICY, &policy) == 0) 2528219089Spjd VERIFY(nvlist_add_nvlist(nvconfig, 2529219089Spjd ZPOOL_REWIND_POLICY, policy) == 0); 2530168498Spjd 2531219089Spjd spa_config_set(spa, nvconfig); 2532168404Spjd spa_unload(spa); 2533168404Spjd spa_deactivate(spa); 2534209962Smm spa_activate(spa, orig_mode); 2535168404Spjd 2536219089Spjd return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2537168404Spjd } 2538168404Spjd 2539219089Spjd if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2540219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2541219089Spjd error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2542219089Spjd if (error != 0) 2543219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2544168404Spjd 2545168404Spjd /* 2546168404Spjd * Load the bit that tells us to use the new accounting function 2547168404Spjd * (raid-z deflation). If we have an older pool, this will not 2548168404Spjd * be present. 2549168404Spjd */ 2550219089Spjd error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2551219089Spjd if (error != 0 && error != ENOENT) 2552219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2553168404Spjd 2554219089Spjd error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2555219089Spjd &spa->spa_creation_version); 2556219089Spjd if (error != 0 && error != ENOENT) 2557219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2558219089Spjd 2559168404Spjd /* 2560168404Spjd * Load the persistent error log. If we have an older pool, this will 2561168404Spjd * not be present. 2562168404Spjd */ 2563219089Spjd error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2564219089Spjd if (error != 0 && error != ENOENT) 2565219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2566168404Spjd 2567219089Spjd error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2568219089Spjd &spa->spa_errlog_scrub); 2569219089Spjd if (error != 0 && error != ENOENT) 2570219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2571168404Spjd 2572168404Spjd /* 2573168404Spjd * Load the history object. If we have an older pool, this 2574168404Spjd * will not be present. 2575168404Spjd */ 2576219089Spjd error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2577219089Spjd if (error != 0 && error != ENOENT) 2578219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2579168404Spjd 2580168404Spjd /* 2581219089Spjd * If we're assembling the pool from the split-off vdevs of 2582219089Spjd * an existing pool, we don't want to attach the spares & cache 2583219089Spjd * devices. 2584219089Spjd */ 2585219089Spjd 2586219089Spjd /* 2587168404Spjd * Load any hot spares for this pool. 2588168404Spjd */ 2589219089Spjd error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2590219089Spjd if (error != 0 && error != ENOENT) 2591219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2592219089Spjd if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2593185029Spjd ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2594185029Spjd if (load_nvlist(spa, spa->spa_spares.sav_object, 2595219089Spjd &spa->spa_spares.sav_config) != 0) 2596219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2597168404Spjd 2598185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2599168404Spjd spa_load_spares(spa); 2600185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2601219089Spjd } else if (error == 0) { 2602219089Spjd spa->spa_spares.sav_sync = B_TRUE; 2603168404Spjd } 2604168404Spjd 2605185029Spjd /* 2606185029Spjd * Load any level 2 ARC devices for this pool. 2607185029Spjd */ 2608219089Spjd error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2609185029Spjd &spa->spa_l2cache.sav_object); 2610219089Spjd if (error != 0 && error != ENOENT) 2611219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2612219089Spjd if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2613185029Spjd ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2614185029Spjd if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2615219089Spjd &spa->spa_l2cache.sav_config) != 0) 2616219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2617185029Spjd 2618185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2619185029Spjd spa_load_l2cache(spa); 2620185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2621219089Spjd } else if (error == 0) { 2622219089Spjd spa->spa_l2cache.sav_sync = B_TRUE; 2623185029Spjd } 2624185029Spjd 2625219089Spjd spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2626213197Smm 2627219089Spjd error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2628219089Spjd if (error && error != ENOENT) 2629219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2630185029Spjd 2631219089Spjd if (error == 0) { 2632219089Spjd uint64_t autoreplace; 2633185029Spjd 2634219089Spjd spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2635219089Spjd spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2636219089Spjd spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2637219089Spjd spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2638219089Spjd spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2639219089Spjd spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2640219089Spjd &spa->spa_dedup_ditto); 2641185029Spjd 2642219089Spjd spa->spa_autoreplace = (autoreplace != 0); 2643168404Spjd } 2644168404Spjd 2645168404Spjd /* 2646185029Spjd * If the 'autoreplace' property is set, then post a resource notifying 2647185029Spjd * the ZFS DE that it should not issue any faults for unopenable 2648185029Spjd * devices. We also iterate over the vdevs, and post a sysevent for any 2649185029Spjd * unopenable vdevs so that the normal autoreplace handler can take 2650185029Spjd * over. 2651185029Spjd */ 2652219089Spjd if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2653185029Spjd spa_check_removed(spa->spa_root_vdev); 2654219089Spjd /* 2655219089Spjd * For the import case, this is done in spa_import(), because 2656219089Spjd * at this point we're using the spare definitions from 2657219089Spjd * the MOS config, not necessarily from the userland config. 2658219089Spjd */ 2659219089Spjd if (state != SPA_LOAD_IMPORT) { 2660219089Spjd spa_aux_check_removed(&spa->spa_spares); 2661219089Spjd spa_aux_check_removed(&spa->spa_l2cache); 2662219089Spjd } 2663219089Spjd } 2664185029Spjd 2665185029Spjd /* 2666168404Spjd * Load the vdev state for all toplevel vdevs. 2667168404Spjd */ 2668168404Spjd vdev_load(rvd); 2669168404Spjd 2670168404Spjd /* 2671168404Spjd * Propagate the leaf DTLs we just loaded all the way up the tree. 2672168404Spjd */ 2673185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2674168404Spjd vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2675185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 2676168404Spjd 2677168404Spjd /* 2678219089Spjd * Load the DDTs (dedup tables). 2679168404Spjd */ 2680219089Spjd error = ddt_load(spa); 2681219089Spjd if (error != 0) 2682219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2683219089Spjd 2684219089Spjd spa_update_dspace(spa); 2685219089Spjd 2686219089Spjd /* 2687219089Spjd * Validate the config, using the MOS config to fill in any 2688219089Spjd * information which might be missing. If we fail to validate 2689219089Spjd * the config then declare the pool unfit for use. If we're 2690219089Spjd * assembling a pool from a split, the log is not transferred 2691219089Spjd * over. 2692219089Spjd */ 2693219089Spjd if (type != SPA_IMPORT_ASSEMBLE) { 2694219089Spjd nvlist_t *nvconfig; 2695219089Spjd 2696219089Spjd if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2697219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2698219089Spjd 2699219089Spjd if (!spa_config_valid(spa, nvconfig)) { 2700219089Spjd nvlist_free(nvconfig); 2701219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2702219089Spjd ENXIO)); 2703219089Spjd } 2704219089Spjd nvlist_free(nvconfig); 2705219089Spjd 2706219089Spjd /* 2707236884Smm * Now that we've validated the config, check the state of the 2708219089Spjd * root vdev. If it can't be opened, it indicates one or 2709219089Spjd * more toplevel vdevs are faulted. 2710219089Spjd */ 2711219089Spjd if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2712249195Smm return (SET_ERROR(ENXIO)); 2713219089Spjd 2714219089Spjd if (spa_check_logs(spa)) { 2715219089Spjd *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2716219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2717219089Spjd } 2718168404Spjd } 2719168404Spjd 2720236884Smm if (missing_feat_write) { 2721236884Smm ASSERT(state == SPA_LOAD_TRYIMPORT); 2722236884Smm 2723236884Smm /* 2724236884Smm * At this point, we know that we can open the pool in 2725236884Smm * read-only mode but not read-write mode. We now have enough 2726236884Smm * information and can return to userland. 2727236884Smm */ 2728236884Smm return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2729236884Smm } 2730236884Smm 2731219089Spjd /* 2732219089Spjd * We've successfully opened the pool, verify that we're ready 2733219089Spjd * to start pushing transactions. 2734219089Spjd */ 2735219089Spjd if (state != SPA_LOAD_TRYIMPORT) { 2736219089Spjd if (error = spa_load_verify(spa)) 2737219089Spjd return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2738219089Spjd error)); 2739219089Spjd } 2740219089Spjd 2741219089Spjd if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2742219089Spjd spa->spa_load_max_txg == UINT64_MAX)) { 2743168404Spjd dmu_tx_t *tx; 2744168404Spjd int need_update = B_FALSE; 2745168404Spjd 2746209962Smm ASSERT(state != SPA_LOAD_TRYIMPORT); 2747209962Smm 2748168404Spjd /* 2749168404Spjd * Claim log blocks that haven't been committed yet. 2750168404Spjd * This must all happen in a single txg. 2751219089Spjd * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2752219089Spjd * invoked from zil_claim_log_block()'s i/o done callback. 2753219089Spjd * Price of rollback is that we abandon the log. 2754168404Spjd */ 2755219089Spjd spa->spa_claiming = B_TRUE; 2756219089Spjd 2757168404Spjd tx = dmu_tx_create_assigned(spa_get_dsl(spa), 2758168404Spjd spa_first_txg(spa)); 2759185029Spjd (void) dmu_objset_find(spa_name(spa), 2760168404Spjd zil_claim, tx, DS_FIND_CHILDREN); 2761168404Spjd dmu_tx_commit(tx); 2762168404Spjd 2763219089Spjd spa->spa_claiming = B_FALSE; 2764219089Spjd 2765219089Spjd spa_set_log_state(spa, SPA_LOG_GOOD); 2766168404Spjd spa->spa_sync_on = B_TRUE; 2767168404Spjd txg_sync_start(spa->spa_dsl_pool); 2768168404Spjd 2769168404Spjd /* 2770219089Spjd * Wait for all claims to sync. We sync up to the highest 2771219089Spjd * claimed log block birth time so that claimed log blocks 2772219089Spjd * don't appear to be from the future. spa_claim_max_txg 2773219089Spjd * will have been set for us by either zil_check_log_chain() 2774219089Spjd * (invoked from spa_check_logs()) or zil_claim() above. 2775168404Spjd */ 2776219089Spjd txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2777168404Spjd 2778168404Spjd /* 2779168404Spjd * If the config cache is stale, or we have uninitialized 2780168404Spjd * metaslabs (see spa_vdev_add()), then update the config. 2781209962Smm * 2782219089Spjd * If this is a verbatim import, trust the current 2783209962Smm * in-core spa_config and update the disk labels. 2784168404Spjd */ 2785168404Spjd if (config_cache_txg != spa->spa_config_txg || 2786219089Spjd state == SPA_LOAD_IMPORT || 2787219089Spjd state == SPA_LOAD_RECOVER || 2788219089Spjd (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2789168404Spjd need_update = B_TRUE; 2790168404Spjd 2791209962Smm for (int c = 0; c < rvd->vdev_children; c++) 2792168404Spjd if (rvd->vdev_child[c]->vdev_ms_array == 0) 2793168404Spjd need_update = B_TRUE; 2794168404Spjd 2795168404Spjd /* 2796168404Spjd * Update the config cache asychronously in case we're the 2797168404Spjd * root pool, in which case the config cache isn't writable yet. 2798168404Spjd */ 2799168404Spjd if (need_update) 2800168404Spjd spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2801208683Spjd 2802208683Spjd /* 2803208683Spjd * Check all DTLs to see if anything needs resilvering. 2804208683Spjd */ 2805219089Spjd if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2806219089Spjd vdev_resilver_needed(rvd, NULL, NULL)) 2807208683Spjd spa_async_request(spa, SPA_ASYNC_RESILVER); 2808219089Spjd 2809219089Spjd /* 2810248571Smm * Log the fact that we booted up (so that we can detect if 2811248571Smm * we rebooted in the middle of an operation). 2812248571Smm */ 2813248571Smm spa_history_log_version(spa, "open"); 2814248571Smm 2815248571Smm /* 2816219089Spjd * Delete any inconsistent datasets. 2817219089Spjd */ 2818219089Spjd (void) dmu_objset_find(spa_name(spa), 2819219089Spjd dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2820219089Spjd 2821219089Spjd /* 2822219089Spjd * Clean up any stale temporary dataset userrefs. 2823219089Spjd */ 2824219089Spjd dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2825168404Spjd } 2826168404Spjd 2827219089Spjd return (0); 2828219089Spjd} 2829168404Spjd 2830219089Spjdstatic int 2831219089Spjdspa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2832219089Spjd{ 2833219089Spjd int mode = spa->spa_mode; 2834219089Spjd 2835219089Spjd spa_unload(spa); 2836219089Spjd spa_deactivate(spa); 2837219089Spjd 2838268720Sdelphij spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 2839219089Spjd 2840219089Spjd spa_activate(spa, mode); 2841219089Spjd spa_async_suspend(spa); 2842219089Spjd 2843219089Spjd return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2844168404Spjd} 2845168404Spjd 2846236884Smm/* 2847236884Smm * If spa_load() fails this function will try loading prior txg's. If 2848236884Smm * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2849236884Smm * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2850236884Smm * function will not rewind the pool and will return the same error as 2851236884Smm * spa_load(). 2852236884Smm */ 2853219089Spjdstatic int 2854219089Spjdspa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2855219089Spjd uint64_t max_request, int rewind_flags) 2856219089Spjd{ 2857236884Smm nvlist_t *loadinfo = NULL; 2858219089Spjd nvlist_t *config = NULL; 2859219089Spjd int load_error, rewind_error; 2860219089Spjd uint64_t safe_rewind_txg; 2861219089Spjd uint64_t min_txg; 2862219089Spjd 2863219089Spjd if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2864219089Spjd spa->spa_load_max_txg = spa->spa_load_txg; 2865219089Spjd spa_set_log_state(spa, SPA_LOG_CLEAR); 2866219089Spjd } else { 2867219089Spjd spa->spa_load_max_txg = max_request; 2868268720Sdelphij if (max_request != UINT64_MAX) 2869268720Sdelphij spa->spa_extreme_rewind = B_TRUE; 2870219089Spjd } 2871219089Spjd 2872219089Spjd load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 2873219089Spjd mosconfig); 2874219089Spjd if (load_error == 0) 2875219089Spjd return (0); 2876219089Spjd 2877219089Spjd if (spa->spa_root_vdev != NULL) 2878219089Spjd config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2879219089Spjd 2880219089Spjd spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2881219089Spjd spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2882219089Spjd 2883219089Spjd if (rewind_flags & ZPOOL_NEVER_REWIND) { 2884219089Spjd nvlist_free(config); 2885219089Spjd return (load_error); 2886219089Spjd } 2887219089Spjd 2888236884Smm if (state == SPA_LOAD_RECOVER) { 2889236884Smm /* Price of rolling back is discarding txgs, including log */ 2890219089Spjd spa_set_log_state(spa, SPA_LOG_CLEAR); 2891236884Smm } else { 2892236884Smm /* 2893236884Smm * If we aren't rolling back save the load info from our first 2894236884Smm * import attempt so that we can restore it after attempting 2895236884Smm * to rewind. 2896236884Smm */ 2897236884Smm loadinfo = spa->spa_load_info; 2898236884Smm spa->spa_load_info = fnvlist_alloc(); 2899236884Smm } 2900219089Spjd 2901219089Spjd spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 2902219089Spjd safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 2903219089Spjd min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 2904219089Spjd TXG_INITIAL : safe_rewind_txg; 2905219089Spjd 2906219089Spjd /* 2907219089Spjd * Continue as long as we're finding errors, we're still within 2908219089Spjd * the acceptable rewind range, and we're still finding uberblocks 2909219089Spjd */ 2910219089Spjd while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 2911219089Spjd spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 2912219089Spjd if (spa->spa_load_max_txg < safe_rewind_txg) 2913219089Spjd spa->spa_extreme_rewind = B_TRUE; 2914219089Spjd rewind_error = spa_load_retry(spa, state, mosconfig); 2915219089Spjd } 2916219089Spjd 2917219089Spjd spa->spa_extreme_rewind = B_FALSE; 2918219089Spjd spa->spa_load_max_txg = UINT64_MAX; 2919219089Spjd 2920219089Spjd if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 2921219089Spjd spa_config_set(spa, config); 2922219089Spjd 2923236884Smm if (state == SPA_LOAD_RECOVER) { 2924236884Smm ASSERT3P(loadinfo, ==, NULL); 2925236884Smm return (rewind_error); 2926236884Smm } else { 2927236884Smm /* Store the rewind info as part of the initial load info */ 2928236884Smm fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 2929236884Smm spa->spa_load_info); 2930236884Smm 2931236884Smm /* Restore the initial load info */ 2932236884Smm fnvlist_free(spa->spa_load_info); 2933236884Smm spa->spa_load_info = loadinfo; 2934236884Smm 2935236884Smm return (load_error); 2936236884Smm } 2937219089Spjd} 2938219089Spjd 2939168404Spjd/* 2940168404Spjd * Pool Open/Import 2941168404Spjd * 2942168404Spjd * The import case is identical to an open except that the configuration is sent 2943168404Spjd * down from userland, instead of grabbed from the configuration cache. For the 2944168404Spjd * case of an open, the pool configuration will exist in the 2945185029Spjd * POOL_STATE_UNINITIALIZED state. 2946168404Spjd * 2947168404Spjd * The stats information (gen/count/ustats) is used to gather vdev statistics at 2948168404Spjd * the same time open the pool, without having to keep around the spa_t in some 2949168404Spjd * ambiguous state. 2950168404Spjd */ 2951168404Spjdstatic int 2952219089Spjdspa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 2953219089Spjd nvlist_t **config) 2954168404Spjd{ 2955168404Spjd spa_t *spa; 2956219089Spjd spa_load_state_t state = SPA_LOAD_OPEN; 2957168404Spjd int error; 2958168404Spjd int locked = B_FALSE; 2959219089Spjd int firstopen = B_FALSE; 2960168404Spjd 2961168404Spjd *spapp = NULL; 2962168404Spjd 2963168404Spjd /* 2964168404Spjd * As disgusting as this is, we need to support recursive calls to this 2965168404Spjd * function because dsl_dir_open() is called during spa_load(), and ends 2966168404Spjd * up calling spa_open() again. The real fix is to figure out how to 2967168404Spjd * avoid dsl_dir_open() calling this in the first place. 2968168404Spjd */ 2969168404Spjd if (mutex_owner(&spa_namespace_lock) != curthread) { 2970168404Spjd mutex_enter(&spa_namespace_lock); 2971168404Spjd locked = B_TRUE; 2972168404Spjd } 2973168404Spjd 2974168404Spjd if ((spa = spa_lookup(pool)) == NULL) { 2975168404Spjd if (locked) 2976168404Spjd mutex_exit(&spa_namespace_lock); 2977249195Smm return (SET_ERROR(ENOENT)); 2978168404Spjd } 2979219089Spjd 2980168404Spjd if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 2981219089Spjd zpool_rewind_policy_t policy; 2982168404Spjd 2983219089Spjd firstopen = B_TRUE; 2984219089Spjd 2985219089Spjd zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 2986219089Spjd &policy); 2987219089Spjd if (policy.zrp_request & ZPOOL_DO_REWIND) 2988219089Spjd state = SPA_LOAD_RECOVER; 2989219089Spjd 2990209962Smm spa_activate(spa, spa_mode_global); 2991168404Spjd 2992219089Spjd if (state != SPA_LOAD_RECOVER) 2993219089Spjd spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 2994168404Spjd 2995219089Spjd error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 2996219089Spjd policy.zrp_request); 2997219089Spjd 2998168404Spjd if (error == EBADF) { 2999168404Spjd /* 3000168404Spjd * If vdev_validate() returns failure (indicated by 3001168404Spjd * EBADF), it indicates that one of the vdevs indicates 3002168404Spjd * that the pool has been exported or destroyed. If 3003168404Spjd * this is the case, the config cache is out of sync and 3004168404Spjd * we should remove the pool from the namespace. 3005168404Spjd */ 3006168404Spjd spa_unload(spa); 3007168404Spjd spa_deactivate(spa); 3008185029Spjd spa_config_sync(spa, B_TRUE, B_TRUE); 3009168404Spjd spa_remove(spa); 3010168404Spjd if (locked) 3011168404Spjd mutex_exit(&spa_namespace_lock); 3012249195Smm return (SET_ERROR(ENOENT)); 3013168404Spjd } 3014168404Spjd 3015168404Spjd if (error) { 3016168404Spjd /* 3017168404Spjd * We can't open the pool, but we still have useful 3018168404Spjd * information: the state of each vdev after the 3019168404Spjd * attempted vdev_open(). Return this to the user. 3020168404Spjd */ 3021219089Spjd if (config != NULL && spa->spa_config) { 3022219089Spjd VERIFY(nvlist_dup(spa->spa_config, config, 3023219089Spjd KM_SLEEP) == 0); 3024219089Spjd VERIFY(nvlist_add_nvlist(*config, 3025219089Spjd ZPOOL_CONFIG_LOAD_INFO, 3026219089Spjd spa->spa_load_info) == 0); 3027219089Spjd } 3028168404Spjd spa_unload(spa); 3029168404Spjd spa_deactivate(spa); 3030219089Spjd spa->spa_last_open_failed = error; 3031168404Spjd if (locked) 3032168404Spjd mutex_exit(&spa_namespace_lock); 3033168404Spjd *spapp = NULL; 3034168404Spjd return (error); 3035168404Spjd } 3036168404Spjd } 3037168404Spjd 3038168404Spjd spa_open_ref(spa, tag); 3039185029Spjd 3040219089Spjd if (config != NULL) 3041219089Spjd *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 3042219089Spjd 3043219089Spjd /* 3044219089Spjd * If we've recovered the pool, pass back any information we 3045219089Spjd * gathered while doing the load. 3046219089Spjd */ 3047219089Spjd if (state == SPA_LOAD_RECOVER) { 3048219089Spjd VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 3049219089Spjd spa->spa_load_info) == 0); 3050219089Spjd } 3051219089Spjd 3052219089Spjd if (locked) { 3053219089Spjd spa->spa_last_open_failed = 0; 3054219089Spjd spa->spa_last_ubsync_txg = 0; 3055219089Spjd spa->spa_load_txg = 0; 3056168404Spjd mutex_exit(&spa_namespace_lock); 3057219089Spjd#ifdef __FreeBSD__ 3058219089Spjd#ifdef _KERNEL 3059219089Spjd if (firstopen) 3060249047Savg zvol_create_minors(spa->spa_name); 3061219089Spjd#endif 3062219089Spjd#endif 3063219089Spjd } 3064168404Spjd 3065168404Spjd *spapp = spa; 3066168404Spjd 3067168404Spjd return (0); 3068168404Spjd} 3069168404Spjd 3070168404Spjdint 3071219089Spjdspa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 3072219089Spjd nvlist_t **config) 3073219089Spjd{ 3074219089Spjd return (spa_open_common(name, spapp, tag, policy, config)); 3075219089Spjd} 3076219089Spjd 3077219089Spjdint 3078168404Spjdspa_open(const char *name, spa_t **spapp, void *tag) 3079168404Spjd{ 3080219089Spjd return (spa_open_common(name, spapp, tag, NULL, NULL)); 3081168404Spjd} 3082168404Spjd 3083168404Spjd/* 3084168404Spjd * Lookup the given spa_t, incrementing the inject count in the process, 3085168404Spjd * preventing it from being exported or destroyed. 3086168404Spjd */ 3087168404Spjdspa_t * 3088168404Spjdspa_inject_addref(char *name) 3089168404Spjd{ 3090168404Spjd spa_t *spa; 3091168404Spjd 3092168404Spjd mutex_enter(&spa_namespace_lock); 3093168404Spjd if ((spa = spa_lookup(name)) == NULL) { 3094168404Spjd mutex_exit(&spa_namespace_lock); 3095168404Spjd return (NULL); 3096168404Spjd } 3097168404Spjd spa->spa_inject_ref++; 3098168404Spjd mutex_exit(&spa_namespace_lock); 3099168404Spjd 3100168404Spjd return (spa); 3101168404Spjd} 3102168404Spjd 3103168404Spjdvoid 3104168404Spjdspa_inject_delref(spa_t *spa) 3105168404Spjd{ 3106168404Spjd mutex_enter(&spa_namespace_lock); 3107168404Spjd spa->spa_inject_ref--; 3108168404Spjd mutex_exit(&spa_namespace_lock); 3109168404Spjd} 3110168404Spjd 3111185029Spjd/* 3112185029Spjd * Add spares device information to the nvlist. 3113185029Spjd */ 3114168404Spjdstatic void 3115168404Spjdspa_add_spares(spa_t *spa, nvlist_t *config) 3116168404Spjd{ 3117168404Spjd nvlist_t **spares; 3118168404Spjd uint_t i, nspares; 3119168404Spjd nvlist_t *nvroot; 3120168404Spjd uint64_t guid; 3121168404Spjd vdev_stat_t *vs; 3122168404Spjd uint_t vsc; 3123168404Spjd uint64_t pool; 3124168404Spjd 3125209962Smm ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3126209962Smm 3127185029Spjd if (spa->spa_spares.sav_count == 0) 3128168404Spjd return; 3129168404Spjd 3130168404Spjd VERIFY(nvlist_lookup_nvlist(config, 3131168404Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3132185029Spjd VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3133168404Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3134168404Spjd if (nspares != 0) { 3135168404Spjd VERIFY(nvlist_add_nvlist_array(nvroot, 3136168404Spjd ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3137168404Spjd VERIFY(nvlist_lookup_nvlist_array(nvroot, 3138168404Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3139168404Spjd 3140168404Spjd /* 3141168404Spjd * Go through and find any spares which have since been 3142168404Spjd * repurposed as an active spare. If this is the case, update 3143168404Spjd * their status appropriately. 3144168404Spjd */ 3145168404Spjd for (i = 0; i < nspares; i++) { 3146168404Spjd VERIFY(nvlist_lookup_uint64(spares[i], 3147168404Spjd ZPOOL_CONFIG_GUID, &guid) == 0); 3148185029Spjd if (spa_spare_exists(guid, &pool, NULL) && 3149185029Spjd pool != 0ULL) { 3150168404Spjd VERIFY(nvlist_lookup_uint64_array( 3151219089Spjd spares[i], ZPOOL_CONFIG_VDEV_STATS, 3152168404Spjd (uint64_t **)&vs, &vsc) == 0); 3153168404Spjd vs->vs_state = VDEV_STATE_CANT_OPEN; 3154168404Spjd vs->vs_aux = VDEV_AUX_SPARED; 3155168404Spjd } 3156168404Spjd } 3157168404Spjd } 3158168404Spjd} 3159168404Spjd 3160185029Spjd/* 3161185029Spjd * Add l2cache device information to the nvlist, including vdev stats. 3162185029Spjd */ 3163185029Spjdstatic void 3164185029Spjdspa_add_l2cache(spa_t *spa, nvlist_t *config) 3165185029Spjd{ 3166185029Spjd nvlist_t **l2cache; 3167185029Spjd uint_t i, j, nl2cache; 3168185029Spjd nvlist_t *nvroot; 3169185029Spjd uint64_t guid; 3170185029Spjd vdev_t *vd; 3171185029Spjd vdev_stat_t *vs; 3172185029Spjd uint_t vsc; 3173185029Spjd 3174209962Smm ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3175209962Smm 3176185029Spjd if (spa->spa_l2cache.sav_count == 0) 3177185029Spjd return; 3178185029Spjd 3179185029Spjd VERIFY(nvlist_lookup_nvlist(config, 3180185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3181185029Spjd VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3182185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3183185029Spjd if (nl2cache != 0) { 3184185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, 3185185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3186185029Spjd VERIFY(nvlist_lookup_nvlist_array(nvroot, 3187185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3188185029Spjd 3189185029Spjd /* 3190185029Spjd * Update level 2 cache device stats. 3191185029Spjd */ 3192185029Spjd 3193185029Spjd for (i = 0; i < nl2cache; i++) { 3194185029Spjd VERIFY(nvlist_lookup_uint64(l2cache[i], 3195185029Spjd ZPOOL_CONFIG_GUID, &guid) == 0); 3196185029Spjd 3197185029Spjd vd = NULL; 3198185029Spjd for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3199185029Spjd if (guid == 3200185029Spjd spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3201185029Spjd vd = spa->spa_l2cache.sav_vdevs[j]; 3202185029Spjd break; 3203185029Spjd } 3204185029Spjd } 3205185029Spjd ASSERT(vd != NULL); 3206185029Spjd 3207185029Spjd VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3208219089Spjd ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3209219089Spjd == 0); 3210185029Spjd vdev_get_stats(vd, vs); 3211185029Spjd } 3212185029Spjd } 3213185029Spjd} 3214185029Spjd 3215236884Smmstatic void 3216236884Smmspa_add_feature_stats(spa_t *spa, nvlist_t *config) 3217236884Smm{ 3218236884Smm nvlist_t *features; 3219236884Smm zap_cursor_t zc; 3220236884Smm zap_attribute_t za; 3221236884Smm 3222236884Smm ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3223236884Smm VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3224236884Smm 3225253993Smav /* We may be unable to read features if pool is suspended. */ 3226253993Smav if (spa_suspended(spa)) 3227253993Smav goto out; 3228253993Smav 3229236884Smm if (spa->spa_feat_for_read_obj != 0) { 3230236884Smm for (zap_cursor_init(&zc, spa->spa_meta_objset, 3231236884Smm spa->spa_feat_for_read_obj); 3232236884Smm zap_cursor_retrieve(&zc, &za) == 0; 3233236884Smm zap_cursor_advance(&zc)) { 3234236884Smm ASSERT(za.za_integer_length == sizeof (uint64_t) && 3235236884Smm za.za_num_integers == 1); 3236236884Smm VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3237236884Smm za.za_first_integer)); 3238236884Smm } 3239236884Smm zap_cursor_fini(&zc); 3240236884Smm } 3241236884Smm 3242236884Smm if (spa->spa_feat_for_write_obj != 0) { 3243236884Smm for (zap_cursor_init(&zc, spa->spa_meta_objset, 3244236884Smm spa->spa_feat_for_write_obj); 3245236884Smm zap_cursor_retrieve(&zc, &za) == 0; 3246236884Smm zap_cursor_advance(&zc)) { 3247236884Smm ASSERT(za.za_integer_length == sizeof (uint64_t) && 3248236884Smm za.za_num_integers == 1); 3249236884Smm VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3250236884Smm za.za_first_integer)); 3251236884Smm } 3252236884Smm zap_cursor_fini(&zc); 3253236884Smm } 3254236884Smm 3255253993Smavout: 3256236884Smm VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3257236884Smm features) == 0); 3258236884Smm nvlist_free(features); 3259236884Smm} 3260236884Smm 3261168404Spjdint 3262236884Smmspa_get_stats(const char *name, nvlist_t **config, 3263236884Smm char *altroot, size_t buflen) 3264168404Spjd{ 3265168404Spjd int error; 3266168404Spjd spa_t *spa; 3267168404Spjd 3268168404Spjd *config = NULL; 3269219089Spjd error = spa_open_common(name, &spa, FTAG, NULL, config); 3270168404Spjd 3271209962Smm if (spa != NULL) { 3272209962Smm /* 3273209962Smm * This still leaves a window of inconsistency where the spares 3274209962Smm * or l2cache devices could change and the config would be 3275209962Smm * self-inconsistent. 3276209962Smm */ 3277209962Smm spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3278168404Spjd 3279209962Smm if (*config != NULL) { 3280219089Spjd uint64_t loadtimes[2]; 3281219089Spjd 3282219089Spjd loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3283219089Spjd loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3284219089Spjd VERIFY(nvlist_add_uint64_array(*config, 3285219089Spjd ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3286219089Spjd 3287185029Spjd VERIFY(nvlist_add_uint64(*config, 3288209962Smm ZPOOL_CONFIG_ERRCOUNT, 3289209962Smm spa_get_errlog_size(spa)) == 0); 3290185029Spjd 3291209962Smm if (spa_suspended(spa)) 3292209962Smm VERIFY(nvlist_add_uint64(*config, 3293209962Smm ZPOOL_CONFIG_SUSPENDED, 3294209962Smm spa->spa_failmode) == 0); 3295209962Smm 3296209962Smm spa_add_spares(spa, *config); 3297209962Smm spa_add_l2cache(spa, *config); 3298236884Smm spa_add_feature_stats(spa, *config); 3299209962Smm } 3300168404Spjd } 3301168404Spjd 3302168404Spjd /* 3303168404Spjd * We want to get the alternate root even for faulted pools, so we cheat 3304168404Spjd * and call spa_lookup() directly. 3305168404Spjd */ 3306168404Spjd if (altroot) { 3307168404Spjd if (spa == NULL) { 3308168404Spjd mutex_enter(&spa_namespace_lock); 3309168404Spjd spa = spa_lookup(name); 3310168404Spjd if (spa) 3311168404Spjd spa_altroot(spa, altroot, buflen); 3312168404Spjd else 3313168404Spjd altroot[0] = '\0'; 3314168404Spjd spa = NULL; 3315168404Spjd mutex_exit(&spa_namespace_lock); 3316168404Spjd } else { 3317168404Spjd spa_altroot(spa, altroot, buflen); 3318168404Spjd } 3319168404Spjd } 3320168404Spjd 3321209962Smm if (spa != NULL) { 3322209962Smm spa_config_exit(spa, SCL_CONFIG, FTAG); 3323168404Spjd spa_close(spa, FTAG); 3324209962Smm } 3325168404Spjd 3326168404Spjd return (error); 3327168404Spjd} 3328168404Spjd 3329168404Spjd/* 3330185029Spjd * Validate that the auxiliary device array is well formed. We must have an 3331185029Spjd * array of nvlists, each which describes a valid leaf vdev. If this is an 3332185029Spjd * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3333185029Spjd * specified, as long as they are well-formed. 3334168404Spjd */ 3335168404Spjdstatic int 3336185029Spjdspa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3337185029Spjd spa_aux_vdev_t *sav, const char *config, uint64_t version, 3338185029Spjd vdev_labeltype_t label) 3339168404Spjd{ 3340185029Spjd nvlist_t **dev; 3341185029Spjd uint_t i, ndev; 3342168404Spjd vdev_t *vd; 3343168404Spjd int error; 3344168404Spjd 3345185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3346185029Spjd 3347168404Spjd /* 3348185029Spjd * It's acceptable to have no devs specified. 3349168404Spjd */ 3350185029Spjd if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3351168404Spjd return (0); 3352168404Spjd 3353185029Spjd if (ndev == 0) 3354249195Smm return (SET_ERROR(EINVAL)); 3355168404Spjd 3356168404Spjd /* 3357185029Spjd * Make sure the pool is formatted with a version that supports this 3358185029Spjd * device type. 3359168404Spjd */ 3360185029Spjd if (spa_version(spa) < version) 3361249195Smm return (SET_ERROR(ENOTSUP)); 3362168404Spjd 3363168404Spjd /* 3364185029Spjd * Set the pending device list so we correctly handle device in-use 3365168404Spjd * checking. 3366168404Spjd */ 3367185029Spjd sav->sav_pending = dev; 3368185029Spjd sav->sav_npending = ndev; 3369168404Spjd 3370185029Spjd for (i = 0; i < ndev; i++) { 3371185029Spjd if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3372168404Spjd mode)) != 0) 3373168404Spjd goto out; 3374168404Spjd 3375168404Spjd if (!vd->vdev_ops->vdev_op_leaf) { 3376168404Spjd vdev_free(vd); 3377249195Smm error = SET_ERROR(EINVAL); 3378168404Spjd goto out; 3379168404Spjd } 3380168404Spjd 3381185029Spjd /* 3382185029Spjd * The L2ARC currently only supports disk devices in 3383185029Spjd * kernel context. For user-level testing, we allow it. 3384185029Spjd */ 3385185029Spjd#ifdef _KERNEL 3386185029Spjd if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3387185029Spjd strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3388249195Smm error = SET_ERROR(ENOTBLK); 3389230514Smm vdev_free(vd); 3390185029Spjd goto out; 3391185029Spjd } 3392185029Spjd#endif 3393168404Spjd vd->vdev_top = vd; 3394168404Spjd 3395168404Spjd if ((error = vdev_open(vd)) == 0 && 3396185029Spjd (error = vdev_label_init(vd, crtxg, label)) == 0) { 3397185029Spjd VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3398168404Spjd vd->vdev_guid) == 0); 3399168404Spjd } 3400168404Spjd 3401168404Spjd vdev_free(vd); 3402168404Spjd 3403185029Spjd if (error && 3404185029Spjd (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3405168404Spjd goto out; 3406168404Spjd else 3407168404Spjd error = 0; 3408168404Spjd } 3409168404Spjd 3410168404Spjdout: 3411185029Spjd sav->sav_pending = NULL; 3412185029Spjd sav->sav_npending = 0; 3413168404Spjd return (error); 3414168404Spjd} 3415168404Spjd 3416185029Spjdstatic int 3417185029Spjdspa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3418185029Spjd{ 3419185029Spjd int error; 3420185029Spjd 3421185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3422185029Spjd 3423185029Spjd if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3424185029Spjd &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3425185029Spjd VDEV_LABEL_SPARE)) != 0) { 3426185029Spjd return (error); 3427185029Spjd } 3428185029Spjd 3429185029Spjd return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3430185029Spjd &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3431185029Spjd VDEV_LABEL_L2CACHE)); 3432185029Spjd} 3433185029Spjd 3434185029Spjdstatic void 3435185029Spjdspa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3436185029Spjd const char *config) 3437185029Spjd{ 3438185029Spjd int i; 3439185029Spjd 3440185029Spjd if (sav->sav_config != NULL) { 3441185029Spjd nvlist_t **olddevs; 3442185029Spjd uint_t oldndevs; 3443185029Spjd nvlist_t **newdevs; 3444185029Spjd 3445185029Spjd /* 3446185029Spjd * Generate new dev list by concatentating with the 3447185029Spjd * current dev list. 3448185029Spjd */ 3449185029Spjd VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3450185029Spjd &olddevs, &oldndevs) == 0); 3451185029Spjd 3452185029Spjd newdevs = kmem_alloc(sizeof (void *) * 3453185029Spjd (ndevs + oldndevs), KM_SLEEP); 3454185029Spjd for (i = 0; i < oldndevs; i++) 3455185029Spjd VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3456185029Spjd KM_SLEEP) == 0); 3457185029Spjd for (i = 0; i < ndevs; i++) 3458185029Spjd VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3459185029Spjd KM_SLEEP) == 0); 3460185029Spjd 3461185029Spjd VERIFY(nvlist_remove(sav->sav_config, config, 3462185029Spjd DATA_TYPE_NVLIST_ARRAY) == 0); 3463185029Spjd 3464185029Spjd VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3465185029Spjd config, newdevs, ndevs + oldndevs) == 0); 3466185029Spjd for (i = 0; i < oldndevs + ndevs; i++) 3467185029Spjd nvlist_free(newdevs[i]); 3468185029Spjd kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3469185029Spjd } else { 3470185029Spjd /* 3471185029Spjd * Generate a new dev list. 3472185029Spjd */ 3473185029Spjd VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3474185029Spjd KM_SLEEP) == 0); 3475185029Spjd VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3476185029Spjd devs, ndevs) == 0); 3477185029Spjd } 3478185029Spjd} 3479185029Spjd 3480168404Spjd/* 3481185029Spjd * Stop and drop level 2 ARC devices 3482185029Spjd */ 3483185029Spjdvoid 3484185029Spjdspa_l2cache_drop(spa_t *spa) 3485185029Spjd{ 3486185029Spjd vdev_t *vd; 3487185029Spjd int i; 3488185029Spjd spa_aux_vdev_t *sav = &spa->spa_l2cache; 3489185029Spjd 3490185029Spjd for (i = 0; i < sav->sav_count; i++) { 3491185029Spjd uint64_t pool; 3492185029Spjd 3493185029Spjd vd = sav->sav_vdevs[i]; 3494185029Spjd ASSERT(vd != NULL); 3495185029Spjd 3496209962Smm if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3497209962Smm pool != 0ULL && l2arc_vdev_present(vd)) 3498185029Spjd l2arc_remove_vdev(vd); 3499185029Spjd } 3500185029Spjd} 3501185029Spjd 3502185029Spjd/* 3503168404Spjd * Pool Creation 3504168404Spjd */ 3505168404Spjdint 3506185029Spjdspa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3507248571Smm nvlist_t *zplprops) 3508168404Spjd{ 3509168404Spjd spa_t *spa; 3510185029Spjd char *altroot = NULL; 3511168404Spjd vdev_t *rvd; 3512168404Spjd dsl_pool_t *dp; 3513168404Spjd dmu_tx_t *tx; 3514219089Spjd int error = 0; 3515168404Spjd uint64_t txg = TXG_INITIAL; 3516185029Spjd nvlist_t **spares, **l2cache; 3517185029Spjd uint_t nspares, nl2cache; 3518219089Spjd uint64_t version, obj; 3519236884Smm boolean_t has_features; 3520168404Spjd 3521168404Spjd /* 3522168404Spjd * If this pool already exists, return failure. 3523168404Spjd */ 3524168404Spjd mutex_enter(&spa_namespace_lock); 3525168404Spjd if (spa_lookup(pool) != NULL) { 3526168404Spjd mutex_exit(&spa_namespace_lock); 3527249195Smm return (SET_ERROR(EEXIST)); 3528168404Spjd } 3529168404Spjd 3530168404Spjd /* 3531168404Spjd * Allocate a new spa_t structure. 3532168404Spjd */ 3533185029Spjd (void) nvlist_lookup_string(props, 3534185029Spjd zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3535219089Spjd spa = spa_add(pool, NULL, altroot); 3536209962Smm spa_activate(spa, spa_mode_global); 3537168404Spjd 3538185029Spjd if (props && (error = spa_prop_validate(spa, props))) { 3539185029Spjd spa_deactivate(spa); 3540185029Spjd spa_remove(spa); 3541185029Spjd mutex_exit(&spa_namespace_lock); 3542185029Spjd return (error); 3543185029Spjd } 3544185029Spjd 3545236884Smm has_features = B_FALSE; 3546236884Smm for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3547236884Smm elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3548236884Smm if (zpool_prop_feature(nvpair_name(elem))) 3549236884Smm has_features = B_TRUE; 3550236884Smm } 3551236884Smm 3552236884Smm if (has_features || nvlist_lookup_uint64(props, 3553236884Smm zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3554185029Spjd version = SPA_VERSION; 3555236884Smm } 3556236884Smm ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3557219089Spjd 3558219089Spjd spa->spa_first_txg = txg; 3559219089Spjd spa->spa_uberblock.ub_txg = txg - 1; 3560185029Spjd spa->spa_uberblock.ub_version = version; 3561168404Spjd spa->spa_ubsync = spa->spa_uberblock; 3562168404Spjd 3563168404Spjd /* 3564209962Smm * Create "The Godfather" zio to hold all async IOs 3565209962Smm */ 3566209962Smm spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 3567209962Smm ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 3568209962Smm 3569209962Smm /* 3570168404Spjd * Create the root vdev. 3571168404Spjd */ 3572185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3573168404Spjd 3574168404Spjd error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3575168404Spjd 3576168404Spjd ASSERT(error != 0 || rvd != NULL); 3577168404Spjd ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3578168404Spjd 3579185029Spjd if (error == 0 && !zfs_allocatable_devs(nvroot)) 3580249195Smm error = SET_ERROR(EINVAL); 3581168404Spjd 3582168404Spjd if (error == 0 && 3583168404Spjd (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3584185029Spjd (error = spa_validate_aux(spa, nvroot, txg, 3585168404Spjd VDEV_ALLOC_ADD)) == 0) { 3586219089Spjd for (int c = 0; c < rvd->vdev_children; c++) { 3587254591Sgibbs vdev_ashift_optimize(rvd->vdev_child[c]); 3588219089Spjd vdev_metaslab_set_size(rvd->vdev_child[c]); 3589219089Spjd vdev_expand(rvd->vdev_child[c], txg); 3590219089Spjd } 3591168404Spjd } 3592168404Spjd 3593185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3594168404Spjd 3595168404Spjd if (error != 0) { 3596168404Spjd spa_unload(spa); 3597168404Spjd spa_deactivate(spa); 3598168404Spjd spa_remove(spa); 3599168404Spjd mutex_exit(&spa_namespace_lock); 3600168404Spjd return (error); 3601168404Spjd } 3602168404Spjd 3603168404Spjd /* 3604168404Spjd * Get the list of spares, if specified. 3605168404Spjd */ 3606168404Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3607168404Spjd &spares, &nspares) == 0) { 3608185029Spjd VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3609168404Spjd KM_SLEEP) == 0); 3610185029Spjd VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3611168404Spjd ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3612185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3613168404Spjd spa_load_spares(spa); 3614185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3615185029Spjd spa->spa_spares.sav_sync = B_TRUE; 3616168404Spjd } 3617168404Spjd 3618185029Spjd /* 3619185029Spjd * Get the list of level 2 cache devices, if specified. 3620185029Spjd */ 3621185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3622185029Spjd &l2cache, &nl2cache) == 0) { 3623185029Spjd VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3624185029Spjd NV_UNIQUE_NAME, KM_SLEEP) == 0); 3625185029Spjd VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3626185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3627185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3628185029Spjd spa_load_l2cache(spa); 3629185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3630185029Spjd spa->spa_l2cache.sav_sync = B_TRUE; 3631185029Spjd } 3632185029Spjd 3633236884Smm spa->spa_is_initializing = B_TRUE; 3634185029Spjd spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3635168404Spjd spa->spa_meta_objset = dp->dp_meta_objset; 3636236884Smm spa->spa_is_initializing = B_FALSE; 3637168404Spjd 3638219089Spjd /* 3639219089Spjd * Create DDTs (dedup tables). 3640219089Spjd */ 3641219089Spjd ddt_create(spa); 3642219089Spjd 3643219089Spjd spa_update_dspace(spa); 3644219089Spjd 3645168404Spjd tx = dmu_tx_create_assigned(dp, txg); 3646168404Spjd 3647168404Spjd /* 3648168404Spjd * Create the pool config object. 3649168404Spjd */ 3650168404Spjd spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3651185029Spjd DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3652168404Spjd DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3653168404Spjd 3654168404Spjd if (zap_add(spa->spa_meta_objset, 3655168404Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3656168404Spjd sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3657168404Spjd cmn_err(CE_PANIC, "failed to add pool config"); 3658168404Spjd } 3659168404Spjd 3660236884Smm if (spa_version(spa) >= SPA_VERSION_FEATURES) 3661236884Smm spa_feature_create_zap_objects(spa, tx); 3662236884Smm 3663219089Spjd if (zap_add(spa->spa_meta_objset, 3664219089Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3665219089Spjd sizeof (uint64_t), 1, &version, tx) != 0) { 3666219089Spjd cmn_err(CE_PANIC, "failed to add pool version"); 3667219089Spjd } 3668219089Spjd 3669185029Spjd /* Newly created pools with the right version are always deflated. */ 3670185029Spjd if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3671185029Spjd spa->spa_deflate = TRUE; 3672185029Spjd if (zap_add(spa->spa_meta_objset, 3673185029Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3674185029Spjd sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3675185029Spjd cmn_err(CE_PANIC, "failed to add deflate"); 3676185029Spjd } 3677168404Spjd } 3678168404Spjd 3679168404Spjd /* 3680219089Spjd * Create the deferred-free bpobj. Turn off compression 3681168404Spjd * because sync-to-convergence takes longer if the blocksize 3682168404Spjd * keeps changing. 3683168404Spjd */ 3684219089Spjd obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3685219089Spjd dmu_object_set_compress(spa->spa_meta_objset, obj, 3686168404Spjd ZIO_COMPRESS_OFF, tx); 3687168404Spjd if (zap_add(spa->spa_meta_objset, 3688219089Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3689219089Spjd sizeof (uint64_t), 1, &obj, tx) != 0) { 3690219089Spjd cmn_err(CE_PANIC, "failed to add bpobj"); 3691168404Spjd } 3692219089Spjd VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3693219089Spjd spa->spa_meta_objset, obj)); 3694168404Spjd 3695168404Spjd /* 3696168404Spjd * Create the pool's history object. 3697168404Spjd */ 3698185029Spjd if (version >= SPA_VERSION_ZPOOL_HISTORY) 3699185029Spjd spa_history_create_obj(spa, tx); 3700168404Spjd 3701185029Spjd /* 3702185029Spjd * Set pool properties. 3703185029Spjd */ 3704185029Spjd spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3705185029Spjd spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3706185029Spjd spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3707219089Spjd spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3708219089Spjd 3709209962Smm if (props != NULL) { 3710209962Smm spa_configfile_set(spa, props, B_FALSE); 3711248571Smm spa_sync_props(props, tx); 3712209962Smm } 3713185029Spjd 3714168404Spjd dmu_tx_commit(tx); 3715168404Spjd 3716168404Spjd spa->spa_sync_on = B_TRUE; 3717168404Spjd txg_sync_start(spa->spa_dsl_pool); 3718168404Spjd 3719168404Spjd /* 3720168404Spjd * We explicitly wait for the first transaction to complete so that our 3721168404Spjd * bean counters are appropriately updated. 3722168404Spjd */ 3723168404Spjd txg_wait_synced(spa->spa_dsl_pool, txg); 3724168404Spjd 3725185029Spjd spa_config_sync(spa, B_FALSE, B_TRUE); 3726168404Spjd 3727248571Smm spa_history_log_version(spa, "create"); 3728185029Spjd 3729208442Smm spa->spa_minref = refcount_count(&spa->spa_refcount); 3730208442Smm 3731168404Spjd mutex_exit(&spa_namespace_lock); 3732168404Spjd 3733168404Spjd return (0); 3734168404Spjd} 3735168404Spjd 3736241286Savg#ifdef _KERNEL 3737219089Spjd#if defined(sun) 3738185029Spjd/* 3739219089Spjd * Get the root pool information from the root disk, then import the root pool 3740219089Spjd * during the system boot up time. 3741185029Spjd */ 3742219089Spjdextern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3743219089Spjd 3744219089Spjdstatic nvlist_t * 3745219089Spjdspa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3746185029Spjd{ 3747219089Spjd nvlist_t *config; 3748185029Spjd nvlist_t *nvtop, *nvroot; 3749185029Spjd uint64_t pgid; 3750185029Spjd 3751219089Spjd if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3752219089Spjd return (NULL); 3753219089Spjd 3754168404Spjd /* 3755185029Spjd * Add this top-level vdev to the child array. 3756168404Spjd */ 3757219089Spjd VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3758219089Spjd &nvtop) == 0); 3759219089Spjd VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3760219089Spjd &pgid) == 0); 3761219089Spjd VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3762168404Spjd 3763185029Spjd /* 3764185029Spjd * Put this pool's top-level vdevs into a root vdev. 3765185029Spjd */ 3766185029Spjd VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3767219089Spjd VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3768219089Spjd VDEV_TYPE_ROOT) == 0); 3769185029Spjd VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3770185029Spjd VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3771185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3772185029Spjd &nvtop, 1) == 0); 3773168404Spjd 3774168404Spjd /* 3775185029Spjd * Replace the existing vdev_tree with the new root vdev in 3776185029Spjd * this pool's configuration (remove the old, add the new). 3777168404Spjd */ 3778185029Spjd VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3779185029Spjd nvlist_free(nvroot); 3780219089Spjd return (config); 3781185029Spjd} 3782168404Spjd 3783185029Spjd/* 3784219089Spjd * Walk the vdev tree and see if we can find a device with "better" 3785219089Spjd * configuration. A configuration is "better" if the label on that 3786219089Spjd * device has a more recent txg. 3787185029Spjd */ 3788219089Spjdstatic void 3789219089Spjdspa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3790185029Spjd{ 3791219089Spjd for (int c = 0; c < vd->vdev_children; c++) 3792219089Spjd spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3793185029Spjd 3794219089Spjd if (vd->vdev_ops->vdev_op_leaf) { 3795219089Spjd nvlist_t *label; 3796219089Spjd uint64_t label_txg; 3797185029Spjd 3798219089Spjd if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3799219089Spjd &label) != 0) 3800219089Spjd return; 3801185029Spjd 3802219089Spjd VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 3803219089Spjd &label_txg) == 0); 3804168404Spjd 3805219089Spjd /* 3806219089Spjd * Do we have a better boot device? 3807219089Spjd */ 3808219089Spjd if (label_txg > *txg) { 3809219089Spjd *txg = label_txg; 3810219089Spjd *avd = vd; 3811185029Spjd } 3812219089Spjd nvlist_free(label); 3813185029Spjd } 3814185029Spjd} 3815185029Spjd 3816185029Spjd/* 3817185029Spjd * Import a root pool. 3818185029Spjd * 3819185029Spjd * For x86. devpath_list will consist of devid and/or physpath name of 3820185029Spjd * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3821185029Spjd * The GRUB "findroot" command will return the vdev we should boot. 3822185029Spjd * 3823185029Spjd * For Sparc, devpath_list consists the physpath name of the booting device 3824185029Spjd * no matter the rootpool is a single device pool or a mirrored pool. 3825185029Spjd * e.g. 3826185029Spjd * "/pci@1f,0/ide@d/disk@0,0:a" 3827185029Spjd */ 3828185029Spjdint 3829185029Spjdspa_import_rootpool(char *devpath, char *devid) 3830185029Spjd{ 3831219089Spjd spa_t *spa; 3832219089Spjd vdev_t *rvd, *bvd, *avd = NULL; 3833219089Spjd nvlist_t *config, *nvtop; 3834219089Spjd uint64_t guid, txg; 3835185029Spjd char *pname; 3836185029Spjd int error; 3837185029Spjd 3838185029Spjd /* 3839219089Spjd * Read the label from the boot device and generate a configuration. 3840185029Spjd */ 3841219089Spjd config = spa_generate_rootconf(devpath, devid, &guid); 3842219089Spjd#if defined(_OBP) && defined(_KERNEL) 3843219089Spjd if (config == NULL) { 3844219089Spjd if (strstr(devpath, "/iscsi/ssd") != NULL) { 3845219089Spjd /* iscsi boot */ 3846219089Spjd get_iscsi_bootpath_phy(devpath); 3847219089Spjd config = spa_generate_rootconf(devpath, devid, &guid); 3848219089Spjd } 3849219089Spjd } 3850219089Spjd#endif 3851219089Spjd if (config == NULL) { 3852236884Smm cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 3853219089Spjd devpath); 3854249195Smm return (SET_ERROR(EIO)); 3855219089Spjd } 3856185029Spjd 3857219089Spjd VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3858219089Spjd &pname) == 0); 3859219089Spjd VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3860185029Spjd 3861209962Smm mutex_enter(&spa_namespace_lock); 3862209962Smm if ((spa = spa_lookup(pname)) != NULL) { 3863209962Smm /* 3864209962Smm * Remove the existing root pool from the namespace so that we 3865209962Smm * can replace it with the correct config we just read in. 3866209962Smm */ 3867209962Smm spa_remove(spa); 3868209962Smm } 3869185029Spjd 3870219089Spjd spa = spa_add(pname, config, NULL); 3871209962Smm spa->spa_is_root = B_TRUE; 3872219089Spjd spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 3873209962Smm 3874219089Spjd /* 3875219089Spjd * Build up a vdev tree based on the boot device's label config. 3876219089Spjd */ 3877219089Spjd VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3878219089Spjd &nvtop) == 0); 3879219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3880219089Spjd error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 3881219089Spjd VDEV_ALLOC_ROOTPOOL); 3882219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3883219089Spjd if (error) { 3884209962Smm mutex_exit(&spa_namespace_lock); 3885219089Spjd nvlist_free(config); 3886219089Spjd cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 3887219089Spjd pname); 3888219089Spjd return (error); 3889209962Smm } 3890209962Smm 3891219089Spjd /* 3892219089Spjd * Get the boot vdev. 3893219089Spjd */ 3894219089Spjd if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 3895219089Spjd cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 3896219089Spjd (u_longlong_t)guid); 3897249195Smm error = SET_ERROR(ENOENT); 3898219089Spjd goto out; 3899219089Spjd } 3900209962Smm 3901219089Spjd /* 3902219089Spjd * Determine if there is a better boot device. 3903219089Spjd */ 3904219089Spjd avd = bvd; 3905219089Spjd spa_alt_rootvdev(rvd, &avd, &txg); 3906219089Spjd if (avd != bvd) { 3907219089Spjd cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 3908219089Spjd "try booting from '%s'", avd->vdev_path); 3909249195Smm error = SET_ERROR(EINVAL); 3910219089Spjd goto out; 3911219089Spjd } 3912209962Smm 3913219089Spjd /* 3914219089Spjd * If the boot device is part of a spare vdev then ensure that 3915219089Spjd * we're booting off the active spare. 3916219089Spjd */ 3917219089Spjd if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3918219089Spjd !bvd->vdev_isspare) { 3919219089Spjd cmn_err(CE_NOTE, "The boot device is currently spared. Please " 3920219089Spjd "try booting from '%s'", 3921219089Spjd bvd->vdev_parent-> 3922219089Spjd vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 3923249195Smm error = SET_ERROR(EINVAL); 3924219089Spjd goto out; 3925219089Spjd } 3926209962Smm 3927219089Spjd error = 0; 3928219089Spjdout: 3929219089Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3930219089Spjd vdev_free(rvd); 3931219089Spjd spa_config_exit(spa, SCL_ALL, FTAG); 3932209962Smm mutex_exit(&spa_namespace_lock); 3933209962Smm 3934219089Spjd nvlist_free(config); 3935219089Spjd return (error); 3936185029Spjd} 3937185029Spjd 3938241286Savg#else 3939241286Savg 3940243502Savgextern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs, 3941243502Savg uint64_t *count); 3942241286Savg 3943241286Savgstatic nvlist_t * 3944241286Savgspa_generate_rootconf(const char *name) 3945241286Savg{ 3946243502Savg nvlist_t **configs, **tops; 3947241286Savg nvlist_t *config; 3948243502Savg nvlist_t *best_cfg, *nvtop, *nvroot; 3949243502Savg uint64_t *holes; 3950243502Savg uint64_t best_txg; 3951243213Savg uint64_t nchildren; 3952241286Savg uint64_t pgid; 3953243502Savg uint64_t count; 3954243502Savg uint64_t i; 3955243502Savg uint_t nholes; 3956241286Savg 3957243502Savg if (vdev_geom_read_pool_label(name, &configs, &count) != 0) 3958241286Savg return (NULL); 3959241286Savg 3960243502Savg ASSERT3U(count, !=, 0); 3961243502Savg best_txg = 0; 3962243502Savg for (i = 0; i < count; i++) { 3963243502Savg uint64_t txg; 3964243502Savg 3965243502Savg VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG, 3966243502Savg &txg) == 0); 3967243502Savg if (txg > best_txg) { 3968243502Savg best_txg = txg; 3969243502Savg best_cfg = configs[i]; 3970243502Savg } 3971243502Savg } 3972243502Savg 3973245945Savg nchildren = 1; 3974245945Savg nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren); 3975243502Savg holes = NULL; 3976243502Savg nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY, 3977243502Savg &holes, &nholes); 3978243502Savg 3979244635Savg tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP); 3980243502Savg for (i = 0; i < nchildren; i++) { 3981243502Savg if (i >= count) 3982243502Savg break; 3983243502Savg if (configs[i] == NULL) 3984243502Savg continue; 3985243502Savg VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE, 3986243502Savg &nvtop) == 0); 3987243502Savg nvlist_dup(nvtop, &tops[i], KM_SLEEP); 3988243213Savg } 3989243502Savg for (i = 0; holes != NULL && i < nholes; i++) { 3990243502Savg if (i >= nchildren) 3991243502Savg continue; 3992243502Savg if (tops[holes[i]] != NULL) 3993243502Savg continue; 3994243502Savg nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP); 3995243502Savg VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE, 3996243502Savg VDEV_TYPE_HOLE) == 0); 3997243502Savg VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, 3998243502Savg holes[i]) == 0); 3999243502Savg VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 4000243502Savg 0) == 0); 4001243502Savg } 4002243502Savg for (i = 0; i < nchildren; i++) { 4003243502Savg if (tops[i] != NULL) 4004243502Savg continue; 4005243502Savg nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP); 4006243502Savg VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE, 4007243502Savg VDEV_TYPE_MISSING) == 0); 4008243502Savg VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, 4009243502Savg i) == 0); 4010243502Savg VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 4011243502Savg 0) == 0); 4012243502Savg } 4013243213Savg 4014243213Savg /* 4015243502Savg * Create pool config based on the best vdev config. 4016241286Savg */ 4017243502Savg nvlist_dup(best_cfg, &config, KM_SLEEP); 4018241286Savg 4019241286Savg /* 4020241286Savg * Put this pool's top-level vdevs into a root vdev. 4021241286Savg */ 4022243502Savg VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4023243502Savg &pgid) == 0); 4024241286Savg VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4025241286Savg VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 4026241286Savg VDEV_TYPE_ROOT) == 0); 4027241286Savg VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 4028241286Savg VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 4029241286Savg VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4030243502Savg tops, nchildren) == 0); 4031241286Savg 4032241286Savg /* 4033241286Savg * Replace the existing vdev_tree with the new root vdev in 4034241286Savg * this pool's configuration (remove the old, add the new). 4035241286Savg */ 4036241286Savg VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 4037243502Savg 4038243502Savg /* 4039243502Savg * Drop vdev config elements that should not be present at pool level. 4040243502Savg */ 4041243502Savg nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64); 4042243502Savg nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64); 4043243502Savg 4044243502Savg for (i = 0; i < count; i++) 4045243502Savg nvlist_free(configs[i]); 4046243502Savg kmem_free(configs, count * sizeof(void *)); 4047243502Savg for (i = 0; i < nchildren; i++) 4048243502Savg nvlist_free(tops[i]); 4049243502Savg kmem_free(tops, nchildren * sizeof(void *)); 4050241286Savg nvlist_free(nvroot); 4051241286Savg return (config); 4052241286Savg} 4053241286Savg 4054241286Savgint 4055241286Savgspa_import_rootpool(const char *name) 4056241286Savg{ 4057241286Savg spa_t *spa; 4058241286Savg vdev_t *rvd, *bvd, *avd = NULL; 4059241286Savg nvlist_t *config, *nvtop; 4060241286Savg uint64_t txg; 4061241286Savg char *pname; 4062241286Savg int error; 4063241286Savg 4064241286Savg /* 4065241286Savg * Read the label from the boot device and generate a configuration. 4066241286Savg */ 4067241286Savg config = spa_generate_rootconf(name); 4068243213Savg 4069243213Savg mutex_enter(&spa_namespace_lock); 4070243213Savg if (config != NULL) { 4071243213Savg VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 4072243213Savg &pname) == 0 && strcmp(name, pname) == 0); 4073243213Savg VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) 4074243213Savg == 0); 4075243213Savg 4076243213Savg if ((spa = spa_lookup(pname)) != NULL) { 4077243213Savg /* 4078243213Savg * Remove the existing root pool from the namespace so 4079243213Savg * that we can replace it with the correct config 4080243213Savg * we just read in. 4081243213Savg */ 4082243213Savg spa_remove(spa); 4083243213Savg } 4084243213Savg spa = spa_add(pname, config, NULL); 4085243501Savg 4086243501Savg /* 4087243501Savg * Set spa_ubsync.ub_version as it can be used in vdev_alloc() 4088243501Savg * via spa_version(). 4089243501Savg */ 4090243501Savg if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 4091243501Savg &spa->spa_ubsync.ub_version) != 0) 4092243501Savg spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 4093243213Savg } else if ((spa = spa_lookup(name)) == NULL) { 4094241286Savg cmn_err(CE_NOTE, "Cannot find the pool label for '%s'", 4095241286Savg name); 4096241286Savg return (EIO); 4097243213Savg } else { 4098243213Savg VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0); 4099241286Savg } 4100241286Savg spa->spa_is_root = B_TRUE; 4101241286Savg spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 4102241286Savg 4103241286Savg /* 4104241286Savg * Build up a vdev tree based on the boot device's label config. 4105241286Savg */ 4106241286Savg VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4107241286Savg &nvtop) == 0); 4108241286Savg spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4109241286Savg error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 4110241286Savg VDEV_ALLOC_ROOTPOOL); 4111241286Savg spa_config_exit(spa, SCL_ALL, FTAG); 4112241286Savg if (error) { 4113241286Savg mutex_exit(&spa_namespace_lock); 4114241286Savg nvlist_free(config); 4115241286Savg cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 4116241286Savg pname); 4117241286Savg return (error); 4118241286Savg } 4119241286Savg 4120241286Savg spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4121241286Savg vdev_free(rvd); 4122241286Savg spa_config_exit(spa, SCL_ALL, FTAG); 4123241286Savg mutex_exit(&spa_namespace_lock); 4124241286Savg 4125243213Savg nvlist_free(config); 4126243213Savg return (0); 4127241286Savg} 4128241286Savg 4129241286Savg#endif /* sun */ 4130219089Spjd#endif 4131219089Spjd 4132209962Smm/* 4133209962Smm * Import a non-root pool into the system. 4134209962Smm */ 4135185029Spjdint 4136219089Spjdspa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 4137185029Spjd{ 4138209962Smm spa_t *spa; 4139209962Smm char *altroot = NULL; 4140219089Spjd spa_load_state_t state = SPA_LOAD_IMPORT; 4141219089Spjd zpool_rewind_policy_t policy; 4142219089Spjd uint64_t mode = spa_mode_global; 4143219089Spjd uint64_t readonly = B_FALSE; 4144209962Smm int error; 4145209962Smm nvlist_t *nvroot; 4146209962Smm nvlist_t **spares, **l2cache; 4147209962Smm uint_t nspares, nl2cache; 4148209962Smm 4149209962Smm /* 4150209962Smm * If a pool with this name exists, return failure. 4151209962Smm */ 4152209962Smm mutex_enter(&spa_namespace_lock); 4153219089Spjd if (spa_lookup(pool) != NULL) { 4154209962Smm mutex_exit(&spa_namespace_lock); 4155249195Smm return (SET_ERROR(EEXIST)); 4156209962Smm } 4157209962Smm 4158209962Smm /* 4159209962Smm * Create and initialize the spa structure. 4160209962Smm */ 4161209962Smm (void) nvlist_lookup_string(props, 4162209962Smm zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4163219089Spjd (void) nvlist_lookup_uint64(props, 4164219089Spjd zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 4165219089Spjd if (readonly) 4166219089Spjd mode = FREAD; 4167219089Spjd spa = spa_add(pool, config, altroot); 4168219089Spjd spa->spa_import_flags = flags; 4169209962Smm 4170209962Smm /* 4171219089Spjd * Verbatim import - Take a pool and insert it into the namespace 4172219089Spjd * as if it had been loaded at boot. 4173219089Spjd */ 4174219089Spjd if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 4175219089Spjd if (props != NULL) 4176219089Spjd spa_configfile_set(spa, props, B_FALSE); 4177219089Spjd 4178219089Spjd spa_config_sync(spa, B_FALSE, B_TRUE); 4179219089Spjd 4180219089Spjd mutex_exit(&spa_namespace_lock); 4181219089Spjd return (0); 4182219089Spjd } 4183219089Spjd 4184219089Spjd spa_activate(spa, mode); 4185219089Spjd 4186219089Spjd /* 4187209962Smm * Don't start async tasks until we know everything is healthy. 4188209962Smm */ 4189209962Smm spa_async_suspend(spa); 4190209962Smm 4191219089Spjd zpool_get_rewind_policy(config, &policy); 4192219089Spjd if (policy.zrp_request & ZPOOL_DO_REWIND) 4193219089Spjd state = SPA_LOAD_RECOVER; 4194219089Spjd 4195209962Smm /* 4196209962Smm * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 4197209962Smm * because the user-supplied config is actually the one to trust when 4198209962Smm * doing an import. 4199209962Smm */ 4200219089Spjd if (state != SPA_LOAD_RECOVER) 4201219089Spjd spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4202209962Smm 4203219089Spjd error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 4204219089Spjd policy.zrp_request); 4205219089Spjd 4206219089Spjd /* 4207219089Spjd * Propagate anything learned while loading the pool and pass it 4208219089Spjd * back to caller (i.e. rewind info, missing devices, etc). 4209219089Spjd */ 4210219089Spjd VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4211219089Spjd spa->spa_load_info) == 0); 4212219089Spjd 4213209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4214209962Smm /* 4215209962Smm * Toss any existing sparelist, as it doesn't have any validity 4216209962Smm * anymore, and conflicts with spa_has_spare(). 4217209962Smm */ 4218209962Smm if (spa->spa_spares.sav_config) { 4219209962Smm nvlist_free(spa->spa_spares.sav_config); 4220209962Smm spa->spa_spares.sav_config = NULL; 4221209962Smm spa_load_spares(spa); 4222209962Smm } 4223209962Smm if (spa->spa_l2cache.sav_config) { 4224209962Smm nvlist_free(spa->spa_l2cache.sav_config); 4225209962Smm spa->spa_l2cache.sav_config = NULL; 4226209962Smm spa_load_l2cache(spa); 4227209962Smm } 4228209962Smm 4229209962Smm VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4230209962Smm &nvroot) == 0); 4231209962Smm if (error == 0) 4232209962Smm error = spa_validate_aux(spa, nvroot, -1ULL, 4233209962Smm VDEV_ALLOC_SPARE); 4234209962Smm if (error == 0) 4235209962Smm error = spa_validate_aux(spa, nvroot, -1ULL, 4236209962Smm VDEV_ALLOC_L2CACHE); 4237209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 4238209962Smm 4239209962Smm if (props != NULL) 4240209962Smm spa_configfile_set(spa, props, B_FALSE); 4241209962Smm 4242209962Smm if (error != 0 || (props && spa_writeable(spa) && 4243209962Smm (error = spa_prop_set(spa, props)))) { 4244209962Smm spa_unload(spa); 4245209962Smm spa_deactivate(spa); 4246209962Smm spa_remove(spa); 4247209962Smm mutex_exit(&spa_namespace_lock); 4248209962Smm return (error); 4249209962Smm } 4250209962Smm 4251209962Smm spa_async_resume(spa); 4252209962Smm 4253209962Smm /* 4254209962Smm * Override any spares and level 2 cache devices as specified by 4255209962Smm * the user, as these may have correct device names/devids, etc. 4256209962Smm */ 4257209962Smm if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 4258209962Smm &spares, &nspares) == 0) { 4259209962Smm if (spa->spa_spares.sav_config) 4260209962Smm VERIFY(nvlist_remove(spa->spa_spares.sav_config, 4261209962Smm ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 4262209962Smm else 4263209962Smm VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 4264209962Smm NV_UNIQUE_NAME, KM_SLEEP) == 0); 4265209962Smm VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 4266209962Smm ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4267209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4268209962Smm spa_load_spares(spa); 4269209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 4270209962Smm spa->spa_spares.sav_sync = B_TRUE; 4271209962Smm } 4272209962Smm if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4273209962Smm &l2cache, &nl2cache) == 0) { 4274209962Smm if (spa->spa_l2cache.sav_config) 4275209962Smm VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 4276209962Smm ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 4277209962Smm else 4278209962Smm VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4279209962Smm NV_UNIQUE_NAME, KM_SLEEP) == 0); 4280209962Smm VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4281209962Smm ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4282209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4283209962Smm spa_load_l2cache(spa); 4284209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 4285209962Smm spa->spa_l2cache.sav_sync = B_TRUE; 4286209962Smm } 4287209962Smm 4288219089Spjd /* 4289219089Spjd * Check for any removed devices. 4290219089Spjd */ 4291219089Spjd if (spa->spa_autoreplace) { 4292219089Spjd spa_aux_check_removed(&spa->spa_spares); 4293219089Spjd spa_aux_check_removed(&spa->spa_l2cache); 4294219089Spjd } 4295219089Spjd 4296209962Smm if (spa_writeable(spa)) { 4297209962Smm /* 4298209962Smm * Update the config cache to include the newly-imported pool. 4299209962Smm */ 4300209962Smm spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4301209962Smm } 4302209962Smm 4303219089Spjd /* 4304219089Spjd * It's possible that the pool was expanded while it was exported. 4305219089Spjd * We kick off an async task to handle this for us. 4306219089Spjd */ 4307219089Spjd spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4308219089Spjd 4309209962Smm mutex_exit(&spa_namespace_lock); 4310248571Smm spa_history_log_version(spa, "import"); 4311209962Smm 4312219089Spjd#ifdef __FreeBSD__ 4313219089Spjd#ifdef _KERNEL 4314219089Spjd zvol_create_minors(pool); 4315219089Spjd#endif 4316219089Spjd#endif 4317209962Smm return (0); 4318185029Spjd} 4319185029Spjd 4320168404Spjdnvlist_t * 4321168404Spjdspa_tryimport(nvlist_t *tryconfig) 4322168404Spjd{ 4323168404Spjd nvlist_t *config = NULL; 4324168404Spjd char *poolname; 4325168404Spjd spa_t *spa; 4326168404Spjd uint64_t state; 4327208443Smm int error; 4328168404Spjd 4329168404Spjd if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4330168404Spjd return (NULL); 4331168404Spjd 4332168404Spjd if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4333168404Spjd return (NULL); 4334168404Spjd 4335168404Spjd /* 4336168404Spjd * Create and initialize the spa structure. 4337168404Spjd */ 4338168404Spjd mutex_enter(&spa_namespace_lock); 4339219089Spjd spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4340209962Smm spa_activate(spa, FREAD); 4341168404Spjd 4342168404Spjd /* 4343168404Spjd * Pass off the heavy lifting to spa_load(). 4344168404Spjd * Pass TRUE for mosconfig because the user-supplied config 4345168404Spjd * is actually the one to trust when doing an import. 4346168404Spjd */ 4347219089Spjd error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4348168404Spjd 4349168404Spjd /* 4350168404Spjd * If 'tryconfig' was at least parsable, return the current config. 4351168404Spjd */ 4352168404Spjd if (spa->spa_root_vdev != NULL) { 4353168404Spjd config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4354168404Spjd VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4355168404Spjd poolname) == 0); 4356168404Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4357168404Spjd state) == 0); 4358168498Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4359168498Spjd spa->spa_uberblock.ub_timestamp) == 0); 4360236884Smm VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4361236884Smm spa->spa_load_info) == 0); 4362168404Spjd 4363168404Spjd /* 4364185029Spjd * If the bootfs property exists on this pool then we 4365185029Spjd * copy it out so that external consumers can tell which 4366185029Spjd * pools are bootable. 4367168404Spjd */ 4368208443Smm if ((!error || error == EEXIST) && spa->spa_bootfs) { 4369185029Spjd char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4370185029Spjd 4371185029Spjd /* 4372185029Spjd * We have to play games with the name since the 4373185029Spjd * pool was opened as TRYIMPORT_NAME. 4374185029Spjd */ 4375185029Spjd if (dsl_dsobj_to_dsname(spa_name(spa), 4376185029Spjd spa->spa_bootfs, tmpname) == 0) { 4377185029Spjd char *cp; 4378185029Spjd char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4379185029Spjd 4380185029Spjd cp = strchr(tmpname, '/'); 4381185029Spjd if (cp == NULL) { 4382185029Spjd (void) strlcpy(dsname, tmpname, 4383185029Spjd MAXPATHLEN); 4384185029Spjd } else { 4385185029Spjd (void) snprintf(dsname, MAXPATHLEN, 4386185029Spjd "%s/%s", poolname, ++cp); 4387185029Spjd } 4388185029Spjd VERIFY(nvlist_add_string(config, 4389185029Spjd ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4390185029Spjd kmem_free(dsname, MAXPATHLEN); 4391185029Spjd } 4392185029Spjd kmem_free(tmpname, MAXPATHLEN); 4393185029Spjd } 4394185029Spjd 4395185029Spjd /* 4396185029Spjd * Add the list of hot spares and level 2 cache devices. 4397185029Spjd */ 4398209962Smm spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4399168404Spjd spa_add_spares(spa, config); 4400185029Spjd spa_add_l2cache(spa, config); 4401209962Smm spa_config_exit(spa, SCL_CONFIG, FTAG); 4402168404Spjd } 4403168404Spjd 4404168404Spjd spa_unload(spa); 4405168404Spjd spa_deactivate(spa); 4406168404Spjd spa_remove(spa); 4407168404Spjd mutex_exit(&spa_namespace_lock); 4408168404Spjd 4409168404Spjd return (config); 4410168404Spjd} 4411168404Spjd 4412168404Spjd/* 4413168404Spjd * Pool export/destroy 4414168404Spjd * 4415168404Spjd * The act of destroying or exporting a pool is very simple. We make sure there 4416168404Spjd * is no more pending I/O and any references to the pool are gone. Then, we 4417168404Spjd * update the pool state and sync all the labels to disk, removing the 4418207670Smm * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4419207670Smm * we don't sync the labels or remove the configuration cache. 4420168404Spjd */ 4421168404Spjdstatic int 4422185029Spjdspa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4423207670Smm boolean_t force, boolean_t hardforce) 4424168404Spjd{ 4425168404Spjd spa_t *spa; 4426168404Spjd 4427168404Spjd if (oldconfig) 4428168404Spjd *oldconfig = NULL; 4429168404Spjd 4430209962Smm if (!(spa_mode_global & FWRITE)) 4431249195Smm return (SET_ERROR(EROFS)); 4432168404Spjd 4433168404Spjd mutex_enter(&spa_namespace_lock); 4434168404Spjd if ((spa = spa_lookup(pool)) == NULL) { 4435168404Spjd mutex_exit(&spa_namespace_lock); 4436249195Smm return (SET_ERROR(ENOENT)); 4437168404Spjd } 4438168404Spjd 4439168404Spjd /* 4440168404Spjd * Put a hold on the pool, drop the namespace lock, stop async tasks, 4441168404Spjd * reacquire the namespace lock, and see if we can export. 4442168404Spjd */ 4443168404Spjd spa_open_ref(spa, FTAG); 4444168404Spjd mutex_exit(&spa_namespace_lock); 4445168404Spjd spa_async_suspend(spa); 4446168404Spjd mutex_enter(&spa_namespace_lock); 4447168404Spjd spa_close(spa, FTAG); 4448168404Spjd 4449168404Spjd /* 4450168404Spjd * The pool will be in core if it's openable, 4451168404Spjd * in which case we can modify its state. 4452168404Spjd */ 4453168404Spjd if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4454168404Spjd /* 4455168404Spjd * Objsets may be open only because they're dirty, so we 4456168404Spjd * have to force it to sync before checking spa_refcnt. 4457168404Spjd */ 4458168404Spjd txg_wait_synced(spa->spa_dsl_pool, 0); 4459168404Spjd 4460168404Spjd /* 4461168404Spjd * A pool cannot be exported or destroyed if there are active 4462168404Spjd * references. If we are resetting a pool, allow references by 4463168404Spjd * fault injection handlers. 4464168404Spjd */ 4465168404Spjd if (!spa_refcount_zero(spa) || 4466168404Spjd (spa->spa_inject_ref != 0 && 4467168404Spjd new_state != POOL_STATE_UNINITIALIZED)) { 4468168404Spjd spa_async_resume(spa); 4469168404Spjd mutex_exit(&spa_namespace_lock); 4470249195Smm return (SET_ERROR(EBUSY)); 4471168404Spjd } 4472168404Spjd 4473185029Spjd /* 4474185029Spjd * A pool cannot be exported if it has an active shared spare. 4475185029Spjd * This is to prevent other pools stealing the active spare 4476185029Spjd * from an exported pool. At user's own will, such pool can 4477185029Spjd * be forcedly exported. 4478185029Spjd */ 4479185029Spjd if (!force && new_state == POOL_STATE_EXPORTED && 4480185029Spjd spa_has_active_shared_spare(spa)) { 4481185029Spjd spa_async_resume(spa); 4482185029Spjd mutex_exit(&spa_namespace_lock); 4483249195Smm return (SET_ERROR(EXDEV)); 4484185029Spjd } 4485168404Spjd 4486168404Spjd /* 4487168404Spjd * We want this to be reflected on every label, 4488168404Spjd * so mark them all dirty. spa_unload() will do the 4489168404Spjd * final sync that pushes these changes out. 4490168404Spjd */ 4491207670Smm if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4492185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4493168404Spjd spa->spa_state = new_state; 4494219089Spjd spa->spa_final_txg = spa_last_synced_txg(spa) + 4495219089Spjd TXG_DEFER_SIZE + 1; 4496168404Spjd vdev_config_dirty(spa->spa_root_vdev); 4497185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 4498168404Spjd } 4499168404Spjd } 4500168404Spjd 4501185029Spjd spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4502185029Spjd 4503168404Spjd if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4504168404Spjd spa_unload(spa); 4505168404Spjd spa_deactivate(spa); 4506168404Spjd } 4507168404Spjd 4508168404Spjd if (oldconfig && spa->spa_config) 4509168404Spjd VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4510168404Spjd 4511168404Spjd if (new_state != POOL_STATE_UNINITIALIZED) { 4512207670Smm if (!hardforce) 4513207670Smm spa_config_sync(spa, B_TRUE, B_TRUE); 4514168404Spjd spa_remove(spa); 4515168404Spjd } 4516168404Spjd mutex_exit(&spa_namespace_lock); 4517168404Spjd 4518168404Spjd return (0); 4519168404Spjd} 4520168404Spjd 4521168404Spjd/* 4522168404Spjd * Destroy a storage pool. 4523168404Spjd */ 4524168404Spjdint 4525168404Spjdspa_destroy(char *pool) 4526168404Spjd{ 4527207670Smm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4528207670Smm B_FALSE, B_FALSE)); 4529168404Spjd} 4530168404Spjd 4531168404Spjd/* 4532168404Spjd * Export a storage pool. 4533168404Spjd */ 4534168404Spjdint 4535207670Smmspa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4536207670Smm boolean_t hardforce) 4537168404Spjd{ 4538207670Smm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4539207670Smm force, hardforce)); 4540168404Spjd} 4541168404Spjd 4542168404Spjd/* 4543168404Spjd * Similar to spa_export(), this unloads the spa_t without actually removing it 4544168404Spjd * from the namespace in any way. 4545168404Spjd */ 4546168404Spjdint 4547168404Spjdspa_reset(char *pool) 4548168404Spjd{ 4549185029Spjd return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4550207670Smm B_FALSE, B_FALSE)); 4551168404Spjd} 4552168404Spjd 4553168404Spjd/* 4554168404Spjd * ========================================================================== 4555168404Spjd * Device manipulation 4556168404Spjd * ========================================================================== 4557168404Spjd */ 4558168404Spjd 4559168404Spjd/* 4560185029Spjd * Add a device to a storage pool. 4561168404Spjd */ 4562168404Spjdint 4563168404Spjdspa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4564168404Spjd{ 4565219089Spjd uint64_t txg, id; 4566209962Smm int error; 4567168404Spjd vdev_t *rvd = spa->spa_root_vdev; 4568168404Spjd vdev_t *vd, *tvd; 4569185029Spjd nvlist_t **spares, **l2cache; 4570185029Spjd uint_t nspares, nl2cache; 4571168404Spjd 4572219089Spjd ASSERT(spa_writeable(spa)); 4573219089Spjd 4574168404Spjd txg = spa_vdev_enter(spa); 4575168404Spjd 4576168404Spjd if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4577168404Spjd VDEV_ALLOC_ADD)) != 0) 4578168404Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 4579168404Spjd 4580185029Spjd spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4581168404Spjd 4582185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4583185029Spjd &nspares) != 0) 4584168404Spjd nspares = 0; 4585168404Spjd 4586185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4587185029Spjd &nl2cache) != 0) 4588185029Spjd nl2cache = 0; 4589185029Spjd 4590185029Spjd if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4591168404Spjd return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4592168404Spjd 4593185029Spjd if (vd->vdev_children != 0 && 4594185029Spjd (error = vdev_create(vd, txg, B_FALSE)) != 0) 4595185029Spjd return (spa_vdev_exit(spa, vd, txg, error)); 4596168404Spjd 4597168404Spjd /* 4598185029Spjd * We must validate the spares and l2cache devices after checking the 4599185029Spjd * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4600168404Spjd */ 4601185029Spjd if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4602168404Spjd return (spa_vdev_exit(spa, vd, txg, error)); 4603168404Spjd 4604168404Spjd /* 4605168404Spjd * Transfer each new top-level vdev from vd to rvd. 4606168404Spjd */ 4607209962Smm for (int c = 0; c < vd->vdev_children; c++) { 4608219089Spjd 4609219089Spjd /* 4610219089Spjd * Set the vdev id to the first hole, if one exists. 4611219089Spjd */ 4612219089Spjd for (id = 0; id < rvd->vdev_children; id++) { 4613219089Spjd if (rvd->vdev_child[id]->vdev_ishole) { 4614219089Spjd vdev_free(rvd->vdev_child[id]); 4615219089Spjd break; 4616219089Spjd } 4617219089Spjd } 4618168404Spjd tvd = vd->vdev_child[c]; 4619168404Spjd vdev_remove_child(vd, tvd); 4620219089Spjd tvd->vdev_id = id; 4621168404Spjd vdev_add_child(rvd, tvd); 4622168404Spjd vdev_config_dirty(tvd); 4623168404Spjd } 4624168404Spjd 4625168404Spjd if (nspares != 0) { 4626185029Spjd spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4627185029Spjd ZPOOL_CONFIG_SPARES); 4628168404Spjd spa_load_spares(spa); 4629185029Spjd spa->spa_spares.sav_sync = B_TRUE; 4630168404Spjd } 4631168404Spjd 4632185029Spjd if (nl2cache != 0) { 4633185029Spjd spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4634185029Spjd ZPOOL_CONFIG_L2CACHE); 4635185029Spjd spa_load_l2cache(spa); 4636185029Spjd spa->spa_l2cache.sav_sync = B_TRUE; 4637185029Spjd } 4638185029Spjd 4639168404Spjd /* 4640168404Spjd * We have to be careful when adding new vdevs to an existing pool. 4641168404Spjd * If other threads start allocating from these vdevs before we 4642168404Spjd * sync the config cache, and we lose power, then upon reboot we may 4643168404Spjd * fail to open the pool because there are DVAs that the config cache 4644168404Spjd * can't translate. Therefore, we first add the vdevs without 4645168404Spjd * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4646168404Spjd * and then let spa_config_update() initialize the new metaslabs. 4647168404Spjd * 4648168404Spjd * spa_load() checks for added-but-not-initialized vdevs, so that 4649168404Spjd * if we lose power at any point in this sequence, the remaining 4650168404Spjd * steps will be completed the next time we load the pool. 4651168404Spjd */ 4652168404Spjd (void) spa_vdev_exit(spa, vd, txg, 0); 4653168404Spjd 4654168404Spjd mutex_enter(&spa_namespace_lock); 4655168404Spjd spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4656168404Spjd mutex_exit(&spa_namespace_lock); 4657168404Spjd 4658168404Spjd return (0); 4659168404Spjd} 4660168404Spjd 4661168404Spjd/* 4662168404Spjd * Attach a device to a mirror. The arguments are the path to any device 4663168404Spjd * in the mirror, and the nvroot for the new device. If the path specifies 4664168404Spjd * a device that is not mirrored, we automatically insert the mirror vdev. 4665168404Spjd * 4666168404Spjd * If 'replacing' is specified, the new device is intended to replace the 4667168404Spjd * existing device; in this case the two devices are made into their own 4668185029Spjd * mirror using the 'replacing' vdev, which is functionally identical to 4669168404Spjd * the mirror vdev (it actually reuses all the same ops) but has a few 4670168404Spjd * extra rules: you can't attach to it after it's been created, and upon 4671168404Spjd * completion of resilvering, the first disk (the one being replaced) 4672168404Spjd * is automatically detached. 4673168404Spjd */ 4674168404Spjdint 4675168404Spjdspa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4676168404Spjd{ 4677219089Spjd uint64_t txg, dtl_max_txg; 4678168404Spjd vdev_t *rvd = spa->spa_root_vdev; 4679168404Spjd vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4680168404Spjd vdev_ops_t *pvops; 4681185029Spjd char *oldvdpath, *newvdpath; 4682185029Spjd int newvd_isspare; 4683185029Spjd int error; 4684168404Spjd 4685219089Spjd ASSERT(spa_writeable(spa)); 4686219089Spjd 4687168404Spjd txg = spa_vdev_enter(spa); 4688168404Spjd 4689185029Spjd oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4690168404Spjd 4691168404Spjd if (oldvd == NULL) 4692168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4693168404Spjd 4694168404Spjd if (!oldvd->vdev_ops->vdev_op_leaf) 4695168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4696168404Spjd 4697168404Spjd pvd = oldvd->vdev_parent; 4698168404Spjd 4699168404Spjd if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4700230514Smm VDEV_ALLOC_ATTACH)) != 0) 4701185029Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4702185029Spjd 4703185029Spjd if (newrootvd->vdev_children != 1) 4704168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4705168404Spjd 4706168404Spjd newvd = newrootvd->vdev_child[0]; 4707168404Spjd 4708168404Spjd if (!newvd->vdev_ops->vdev_op_leaf) 4709168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4710168404Spjd 4711168404Spjd if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4712168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, error)); 4713168404Spjd 4714185029Spjd /* 4715185029Spjd * Spares can't replace logs 4716185029Spjd */ 4717185029Spjd if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4718185029Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4719185029Spjd 4720168404Spjd if (!replacing) { 4721168404Spjd /* 4722168404Spjd * For attach, the only allowable parent is a mirror or the root 4723168404Spjd * vdev. 4724168404Spjd */ 4725168404Spjd if (pvd->vdev_ops != &vdev_mirror_ops && 4726168404Spjd pvd->vdev_ops != &vdev_root_ops) 4727168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4728168404Spjd 4729168404Spjd pvops = &vdev_mirror_ops; 4730168404Spjd } else { 4731168404Spjd /* 4732168404Spjd * Active hot spares can only be replaced by inactive hot 4733168404Spjd * spares. 4734168404Spjd */ 4735168404Spjd if (pvd->vdev_ops == &vdev_spare_ops && 4736219089Spjd oldvd->vdev_isspare && 4737168404Spjd !spa_has_spare(spa, newvd->vdev_guid)) 4738168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4739168404Spjd 4740168404Spjd /* 4741168404Spjd * If the source is a hot spare, and the parent isn't already a 4742168404Spjd * spare, then we want to create a new hot spare. Otherwise, we 4743168404Spjd * want to create a replacing vdev. The user is not allowed to 4744168404Spjd * attach to a spared vdev child unless the 'isspare' state is 4745168404Spjd * the same (spare replaces spare, non-spare replaces 4746168404Spjd * non-spare). 4747168404Spjd */ 4748219089Spjd if (pvd->vdev_ops == &vdev_replacing_ops && 4749219089Spjd spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4750168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4751219089Spjd } else if (pvd->vdev_ops == &vdev_spare_ops && 4752219089Spjd newvd->vdev_isspare != oldvd->vdev_isspare) { 4753168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4754219089Spjd } 4755219089Spjd 4756219089Spjd if (newvd->vdev_isspare) 4757168404Spjd pvops = &vdev_spare_ops; 4758168404Spjd else 4759168404Spjd pvops = &vdev_replacing_ops; 4760168404Spjd } 4761168404Spjd 4762168404Spjd /* 4763219089Spjd * Make sure the new device is big enough. 4764168404Spjd */ 4765219089Spjd if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4766168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4767168404Spjd 4768168404Spjd /* 4769168404Spjd * The new device cannot have a higher alignment requirement 4770168404Spjd * than the top-level vdev. 4771168404Spjd */ 4772168404Spjd if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4773168404Spjd return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4774168404Spjd 4775168404Spjd /* 4776168404Spjd * If this is an in-place replacement, update oldvd's path and devid 4777168404Spjd * to make it distinguishable from newvd, and unopenable from now on. 4778168404Spjd */ 4779168404Spjd if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4780168404Spjd spa_strfree(oldvd->vdev_path); 4781168404Spjd oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4782168404Spjd KM_SLEEP); 4783168404Spjd (void) sprintf(oldvd->vdev_path, "%s/%s", 4784168404Spjd newvd->vdev_path, "old"); 4785168404Spjd if (oldvd->vdev_devid != NULL) { 4786168404Spjd spa_strfree(oldvd->vdev_devid); 4787168404Spjd oldvd->vdev_devid = NULL; 4788168404Spjd } 4789168404Spjd } 4790168404Spjd 4791219089Spjd /* mark the device being resilvered */ 4792254112Sdelphij newvd->vdev_resilver_txg = txg; 4793219089Spjd 4794168404Spjd /* 4795168404Spjd * If the parent is not a mirror, or if we're replacing, insert the new 4796168404Spjd * mirror/replacing/spare vdev above oldvd. 4797168404Spjd */ 4798168404Spjd if (pvd->vdev_ops != pvops) 4799168404Spjd pvd = vdev_add_parent(oldvd, pvops); 4800168404Spjd 4801168404Spjd ASSERT(pvd->vdev_top->vdev_parent == rvd); 4802168404Spjd ASSERT(pvd->vdev_ops == pvops); 4803168404Spjd ASSERT(oldvd->vdev_parent == pvd); 4804168404Spjd 4805168404Spjd /* 4806168404Spjd * Extract the new device from its root and add it to pvd. 4807168404Spjd */ 4808168404Spjd vdev_remove_child(newrootvd, newvd); 4809168404Spjd newvd->vdev_id = pvd->vdev_children; 4810219089Spjd newvd->vdev_crtxg = oldvd->vdev_crtxg; 4811168404Spjd vdev_add_child(pvd, newvd); 4812168404Spjd 4813168404Spjd tvd = newvd->vdev_top; 4814168404Spjd ASSERT(pvd->vdev_top == tvd); 4815168404Spjd ASSERT(tvd->vdev_parent == rvd); 4816168404Spjd 4817168404Spjd vdev_config_dirty(tvd); 4818168404Spjd 4819168404Spjd /* 4820219089Spjd * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 4821219089Spjd * for any dmu_sync-ed blocks. It will propagate upward when 4822219089Spjd * spa_vdev_exit() calls vdev_dtl_reassess(). 4823168404Spjd */ 4824219089Spjd dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4825168404Spjd 4826219089Spjd vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 4827219089Spjd dtl_max_txg - TXG_INITIAL); 4828168404Spjd 4829209962Smm if (newvd->vdev_isspare) { 4830168404Spjd spa_spare_activate(newvd); 4831209962Smm spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 4832209962Smm } 4833209962Smm 4834185029Spjd oldvdpath = spa_strdup(oldvd->vdev_path); 4835185029Spjd newvdpath = spa_strdup(newvd->vdev_path); 4836185029Spjd newvd_isspare = newvd->vdev_isspare; 4837168404Spjd 4838168404Spjd /* 4839168404Spjd * Mark newvd's DTL dirty in this txg. 4840168404Spjd */ 4841168404Spjd vdev_dirty(tvd, VDD_DTL, newvd, txg); 4842168404Spjd 4843219089Spjd /* 4844258717Savg * Schedule the resilver to restart in the future. We do this to 4845258717Savg * ensure that dmu_sync-ed blocks have been stitched into the 4846258717Savg * respective datasets. 4847219089Spjd */ 4848219089Spjd dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4849168404Spjd 4850219089Spjd /* 4851219089Spjd * Commit the config 4852219089Spjd */ 4853219089Spjd (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 4854185029Spjd 4855248571Smm spa_history_log_internal(spa, "vdev attach", NULL, 4856219089Spjd "%s vdev=%s %s vdev=%s", 4857219089Spjd replacing && newvd_isspare ? "spare in" : 4858219089Spjd replacing ? "replace" : "attach", newvdpath, 4859219089Spjd replacing ? "for" : "to", oldvdpath); 4860219089Spjd 4861185029Spjd spa_strfree(oldvdpath); 4862185029Spjd spa_strfree(newvdpath); 4863185029Spjd 4864219089Spjd if (spa->spa_bootfs) 4865219089Spjd spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 4866168404Spjd 4867168404Spjd return (0); 4868168404Spjd} 4869168404Spjd 4870168404Spjd/* 4871168404Spjd * Detach a device from a mirror or replacing vdev. 4872251631Sdelphij * 4873168404Spjd * If 'replace_done' is specified, only detach if the parent 4874168404Spjd * is a replacing vdev. 4875168404Spjd */ 4876168404Spjdint 4877209962Smmspa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4878168404Spjd{ 4879168404Spjd uint64_t txg; 4880209962Smm int error; 4881168404Spjd vdev_t *rvd = spa->spa_root_vdev; 4882168404Spjd vdev_t *vd, *pvd, *cvd, *tvd; 4883168404Spjd boolean_t unspare = B_FALSE; 4884247187Smm uint64_t unspare_guid = 0; 4885219089Spjd char *vdpath; 4886168404Spjd 4887219089Spjd ASSERT(spa_writeable(spa)); 4888219089Spjd 4889168404Spjd txg = spa_vdev_enter(spa); 4890168404Spjd 4891185029Spjd vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4892168404Spjd 4893168404Spjd if (vd == NULL) 4894168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4895168404Spjd 4896168404Spjd if (!vd->vdev_ops->vdev_op_leaf) 4897168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4898168404Spjd 4899168404Spjd pvd = vd->vdev_parent; 4900168404Spjd 4901168404Spjd /* 4902209962Smm * If the parent/child relationship is not as expected, don't do it. 4903209962Smm * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 4904209962Smm * vdev that's replacing B with C. The user's intent in replacing 4905209962Smm * is to go from M(A,B) to M(A,C). If the user decides to cancel 4906209962Smm * the replace by detaching C, the expected behavior is to end up 4907209962Smm * M(A,B). But suppose that right after deciding to detach C, 4908209962Smm * the replacement of B completes. We would have M(A,C), and then 4909209962Smm * ask to detach C, which would leave us with just A -- not what 4910209962Smm * the user wanted. To prevent this, we make sure that the 4911209962Smm * parent/child relationship hasn't changed -- in this example, 4912209962Smm * that C's parent is still the replacing vdev R. 4913209962Smm */ 4914209962Smm if (pvd->vdev_guid != pguid && pguid != 0) 4915209962Smm return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4916209962Smm 4917209962Smm /* 4918219089Spjd * Only 'replacing' or 'spare' vdevs can be replaced. 4919168404Spjd */ 4920219089Spjd if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4921219089Spjd pvd->vdev_ops != &vdev_spare_ops) 4922219089Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4923168404Spjd 4924168404Spjd ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4925185029Spjd spa_version(spa) >= SPA_VERSION_SPARES); 4926168404Spjd 4927168404Spjd /* 4928168404Spjd * Only mirror, replacing, and spare vdevs support detach. 4929168404Spjd */ 4930168404Spjd if (pvd->vdev_ops != &vdev_replacing_ops && 4931168404Spjd pvd->vdev_ops != &vdev_mirror_ops && 4932168404Spjd pvd->vdev_ops != &vdev_spare_ops) 4933168404Spjd return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4934168404Spjd 4935168404Spjd /* 4936209962Smm * If this device has the only valid copy of some data, 4937209962Smm * we cannot safely detach it. 4938168404Spjd */ 4939209962Smm if (vdev_dtl_required(vd)) 4940168404Spjd return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4941168404Spjd 4942209962Smm ASSERT(pvd->vdev_children >= 2); 4943168404Spjd 4944168404Spjd /* 4945185029Spjd * If we are detaching the second disk from a replacing vdev, then 4946185029Spjd * check to see if we changed the original vdev's path to have "/old" 4947185029Spjd * at the end in spa_vdev_attach(). If so, undo that change now. 4948168404Spjd */ 4949219089Spjd if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4950219089Spjd vd->vdev_path != NULL) { 4951219089Spjd size_t len = strlen(vd->vdev_path); 4952219089Spjd 4953219089Spjd for (int c = 0; c < pvd->vdev_children; c++) { 4954219089Spjd cvd = pvd->vdev_child[c]; 4955219089Spjd 4956219089Spjd if (cvd == vd || cvd->vdev_path == NULL) 4957219089Spjd continue; 4958219089Spjd 4959219089Spjd if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4960219089Spjd strcmp(cvd->vdev_path + len, "/old") == 0) { 4961219089Spjd spa_strfree(cvd->vdev_path); 4962219089Spjd cvd->vdev_path = spa_strdup(vd->vdev_path); 4963219089Spjd break; 4964219089Spjd } 4965185029Spjd } 4966185029Spjd } 4967168404Spjd 4968168404Spjd /* 4969168404Spjd * If we are detaching the original disk from a spare, then it implies 4970168404Spjd * that the spare should become a real disk, and be removed from the 4971168404Spjd * active spare list for the pool. 4972168404Spjd */ 4973168404Spjd if (pvd->vdev_ops == &vdev_spare_ops && 4974219089Spjd vd->vdev_id == 0 && 4975219089Spjd pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 4976168404Spjd unspare = B_TRUE; 4977168404Spjd 4978168404Spjd /* 4979168404Spjd * Erase the disk labels so the disk can be used for other things. 4980168404Spjd * This must be done after all other error cases are handled, 4981168404Spjd * but before we disembowel vd (so we can still do I/O to it). 4982168404Spjd * But if we can't do it, don't treat the error as fatal -- 4983168404Spjd * it may be that the unwritability of the disk is the reason 4984168404Spjd * it's being detached! 4985168404Spjd */ 4986168404Spjd error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4987168404Spjd 4988168404Spjd /* 4989168404Spjd * Remove vd from its parent and compact the parent's children. 4990168404Spjd */ 4991168404Spjd vdev_remove_child(pvd, vd); 4992168404Spjd vdev_compact_children(pvd); 4993168404Spjd 4994168404Spjd /* 4995168404Spjd * Remember one of the remaining children so we can get tvd below. 4996168404Spjd */ 4997219089Spjd cvd = pvd->vdev_child[pvd->vdev_children - 1]; 4998168404Spjd 4999168404Spjd /* 5000168404Spjd * If we need to remove the remaining child from the list of hot spares, 5001209962Smm * do it now, marking the vdev as no longer a spare in the process. 5002209962Smm * We must do this before vdev_remove_parent(), because that can 5003209962Smm * change the GUID if it creates a new toplevel GUID. For a similar 5004209962Smm * reason, we must remove the spare now, in the same txg as the detach; 5005209962Smm * otherwise someone could attach a new sibling, change the GUID, and 5006209962Smm * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 5007168404Spjd */ 5008168404Spjd if (unspare) { 5009168404Spjd ASSERT(cvd->vdev_isspare); 5010168404Spjd spa_spare_remove(cvd); 5011168404Spjd unspare_guid = cvd->vdev_guid; 5012209962Smm (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 5013219089Spjd cvd->vdev_unspare = B_TRUE; 5014168404Spjd } 5015168404Spjd 5016168404Spjd /* 5017168404Spjd * If the parent mirror/replacing vdev only has one child, 5018168404Spjd * the parent is no longer needed. Remove it from the tree. 5019168404Spjd */ 5020219089Spjd if (pvd->vdev_children == 1) { 5021219089Spjd if (pvd->vdev_ops == &vdev_spare_ops) 5022219089Spjd cvd->vdev_unspare = B_FALSE; 5023168404Spjd vdev_remove_parent(cvd); 5024219089Spjd } 5025168404Spjd 5026219089Spjd 5027168404Spjd /* 5028168404Spjd * We don't set tvd until now because the parent we just removed 5029168404Spjd * may have been the previous top-level vdev. 5030168404Spjd */ 5031168404Spjd tvd = cvd->vdev_top; 5032168404Spjd ASSERT(tvd->vdev_parent == rvd); 5033168404Spjd 5034168404Spjd /* 5035168404Spjd * Reevaluate the parent vdev state. 5036168404Spjd */ 5037185029Spjd vdev_propagate_state(cvd); 5038168404Spjd 5039168404Spjd /* 5040219089Spjd * If the 'autoexpand' property is set on the pool then automatically 5041219089Spjd * try to expand the size of the pool. For example if the device we 5042219089Spjd * just detached was smaller than the others, it may be possible to 5043219089Spjd * add metaslabs (i.e. grow the pool). We need to reopen the vdev 5044219089Spjd * first so that we can obtain the updated sizes of the leaf vdevs. 5045168404Spjd */ 5046219089Spjd if (spa->spa_autoexpand) { 5047219089Spjd vdev_reopen(tvd); 5048219089Spjd vdev_expand(tvd, txg); 5049219089Spjd } 5050168404Spjd 5051168404Spjd vdev_config_dirty(tvd); 5052168404Spjd 5053168404Spjd /* 5054168404Spjd * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 5055168404Spjd * vd->vdev_detached is set and free vd's DTL object in syncing context. 5056168404Spjd * But first make sure we're not on any *other* txg's DTL list, to 5057168404Spjd * prevent vd from being accessed after it's freed. 5058168404Spjd */ 5059219089Spjd vdpath = spa_strdup(vd->vdev_path); 5060209962Smm for (int t = 0; t < TXG_SIZE; t++) 5061168404Spjd (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 5062168404Spjd vd->vdev_detached = B_TRUE; 5063168404Spjd vdev_dirty(tvd, VDD_DTL, vd, txg); 5064168404Spjd 5065185029Spjd spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 5066185029Spjd 5067219089Spjd /* hang on to the spa before we release the lock */ 5068219089Spjd spa_open_ref(spa, FTAG); 5069219089Spjd 5070168404Spjd error = spa_vdev_exit(spa, vd, txg, 0); 5071168404Spjd 5072248571Smm spa_history_log_internal(spa, "detach", NULL, 5073219089Spjd "vdev=%s", vdpath); 5074219089Spjd spa_strfree(vdpath); 5075219089Spjd 5076168404Spjd /* 5077168404Spjd * If this was the removal of the original device in a hot spare vdev, 5078168404Spjd * then we want to go through and remove the device from the hot spare 5079168404Spjd * list of every other pool. 5080168404Spjd */ 5081168404Spjd if (unspare) { 5082219089Spjd spa_t *altspa = NULL; 5083219089Spjd 5084168404Spjd mutex_enter(&spa_namespace_lock); 5085219089Spjd while ((altspa = spa_next(altspa)) != NULL) { 5086219089Spjd if (altspa->spa_state != POOL_STATE_ACTIVE || 5087219089Spjd altspa == spa) 5088168404Spjd continue; 5089219089Spjd 5090219089Spjd spa_open_ref(altspa, FTAG); 5091185029Spjd mutex_exit(&spa_namespace_lock); 5092219089Spjd (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 5093185029Spjd mutex_enter(&spa_namespace_lock); 5094219089Spjd spa_close(altspa, FTAG); 5095168404Spjd } 5096168404Spjd mutex_exit(&spa_namespace_lock); 5097219089Spjd 5098219089Spjd /* search the rest of the vdevs for spares to remove */ 5099219089Spjd spa_vdev_resilver_done(spa); 5100168404Spjd } 5101168404Spjd 5102219089Spjd /* all done with the spa; OK to release */ 5103219089Spjd mutex_enter(&spa_namespace_lock); 5104219089Spjd spa_close(spa, FTAG); 5105219089Spjd mutex_exit(&spa_namespace_lock); 5106219089Spjd 5107168404Spjd return (error); 5108168404Spjd} 5109168404Spjd 5110219089Spjd/* 5111219089Spjd * Split a set of devices from their mirrors, and create a new pool from them. 5112219089Spjd */ 5113219089Spjdint 5114219089Spjdspa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 5115219089Spjd nvlist_t *props, boolean_t exp) 5116219089Spjd{ 5117219089Spjd int error = 0; 5118219089Spjd uint64_t txg, *glist; 5119219089Spjd spa_t *newspa; 5120219089Spjd uint_t c, children, lastlog; 5121219089Spjd nvlist_t **child, *nvl, *tmp; 5122219089Spjd dmu_tx_t *tx; 5123219089Spjd char *altroot = NULL; 5124219089Spjd vdev_t *rvd, **vml = NULL; /* vdev modify list */ 5125219089Spjd boolean_t activate_slog; 5126219089Spjd 5127219089Spjd ASSERT(spa_writeable(spa)); 5128219089Spjd 5129219089Spjd txg = spa_vdev_enter(spa); 5130219089Spjd 5131219089Spjd /* clear the log and flush everything up to now */ 5132219089Spjd activate_slog = spa_passivate_log(spa); 5133219089Spjd (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5134219089Spjd error = spa_offline_log(spa); 5135219089Spjd txg = spa_vdev_config_enter(spa); 5136219089Spjd 5137219089Spjd if (activate_slog) 5138219089Spjd spa_activate_log(spa); 5139219089Spjd 5140219089Spjd if (error != 0) 5141219089Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 5142219089Spjd 5143219089Spjd /* check new spa name before going any further */ 5144219089Spjd if (spa_lookup(newname) != NULL) 5145219089Spjd return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 5146219089Spjd 5147219089Spjd /* 5148219089Spjd * scan through all the children to ensure they're all mirrors 5149219089Spjd */ 5150219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 5151219089Spjd nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 5152219089Spjd &children) != 0) 5153219089Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5154219089Spjd 5155219089Spjd /* first, check to ensure we've got the right child count */ 5156219089Spjd rvd = spa->spa_root_vdev; 5157219089Spjd lastlog = 0; 5158219089Spjd for (c = 0; c < rvd->vdev_children; c++) { 5159219089Spjd vdev_t *vd = rvd->vdev_child[c]; 5160219089Spjd 5161219089Spjd /* don't count the holes & logs as children */ 5162219089Spjd if (vd->vdev_islog || vd->vdev_ishole) { 5163219089Spjd if (lastlog == 0) 5164219089Spjd lastlog = c; 5165219089Spjd continue; 5166219089Spjd } 5167219089Spjd 5168219089Spjd lastlog = 0; 5169219089Spjd } 5170219089Spjd if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 5171219089Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5172219089Spjd 5173219089Spjd /* next, ensure no spare or cache devices are part of the split */ 5174219089Spjd if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 5175219089Spjd nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 5176219089Spjd return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5177219089Spjd 5178219089Spjd vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 5179219089Spjd glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 5180219089Spjd 5181219089Spjd /* then, loop over each vdev and validate it */ 5182219089Spjd for (c = 0; c < children; c++) { 5183219089Spjd uint64_t is_hole = 0; 5184219089Spjd 5185219089Spjd (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 5186219089Spjd &is_hole); 5187219089Spjd 5188219089Spjd if (is_hole != 0) { 5189219089Spjd if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 5190219089Spjd spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 5191219089Spjd continue; 5192219089Spjd } else { 5193249195Smm error = SET_ERROR(EINVAL); 5194219089Spjd break; 5195219089Spjd } 5196219089Spjd } 5197219089Spjd 5198219089Spjd /* which disk is going to be split? */ 5199219089Spjd if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 5200219089Spjd &glist[c]) != 0) { 5201249195Smm error = SET_ERROR(EINVAL); 5202219089Spjd break; 5203219089Spjd } 5204219089Spjd 5205219089Spjd /* look it up in the spa */ 5206219089Spjd vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 5207219089Spjd if (vml[c] == NULL) { 5208249195Smm error = SET_ERROR(ENODEV); 5209219089Spjd break; 5210219089Spjd } 5211219089Spjd 5212219089Spjd /* make sure there's nothing stopping the split */ 5213219089Spjd if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 5214219089Spjd vml[c]->vdev_islog || 5215219089Spjd vml[c]->vdev_ishole || 5216219089Spjd vml[c]->vdev_isspare || 5217219089Spjd vml[c]->vdev_isl2cache || 5218219089Spjd !vdev_writeable(vml[c]) || 5219219089Spjd vml[c]->vdev_children != 0 || 5220219089Spjd vml[c]->vdev_state != VDEV_STATE_HEALTHY || 5221219089Spjd c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 5222249195Smm error = SET_ERROR(EINVAL); 5223219089Spjd break; 5224219089Spjd } 5225219089Spjd 5226219089Spjd if (vdev_dtl_required(vml[c])) { 5227249195Smm error = SET_ERROR(EBUSY); 5228219089Spjd break; 5229219089Spjd } 5230219089Spjd 5231219089Spjd /* we need certain info from the top level */ 5232219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 5233219089Spjd vml[c]->vdev_top->vdev_ms_array) == 0); 5234219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 5235219089Spjd vml[c]->vdev_top->vdev_ms_shift) == 0); 5236219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 5237219089Spjd vml[c]->vdev_top->vdev_asize) == 0); 5238219089Spjd VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 5239219089Spjd vml[c]->vdev_top->vdev_ashift) == 0); 5240219089Spjd } 5241219089Spjd 5242219089Spjd if (error != 0) { 5243219089Spjd kmem_free(vml, children * sizeof (vdev_t *)); 5244219089Spjd kmem_free(glist, children * sizeof (uint64_t)); 5245219089Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 5246219089Spjd } 5247219089Spjd 5248219089Spjd /* stop writers from using the disks */ 5249219089Spjd for (c = 0; c < children; c++) { 5250219089Spjd if (vml[c] != NULL) 5251219089Spjd vml[c]->vdev_offline = B_TRUE; 5252219089Spjd } 5253219089Spjd vdev_reopen(spa->spa_root_vdev); 5254219089Spjd 5255219089Spjd /* 5256219089Spjd * Temporarily record the splitting vdevs in the spa config. This 5257219089Spjd * will disappear once the config is regenerated. 5258219089Spjd */ 5259219089Spjd VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5260219089Spjd VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 5261219089Spjd glist, children) == 0); 5262219089Spjd kmem_free(glist, children * sizeof (uint64_t)); 5263219089Spjd 5264219089Spjd mutex_enter(&spa->spa_props_lock); 5265219089Spjd VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 5266219089Spjd nvl) == 0); 5267219089Spjd mutex_exit(&spa->spa_props_lock); 5268219089Spjd spa->spa_config_splitting = nvl; 5269219089Spjd vdev_config_dirty(spa->spa_root_vdev); 5270219089Spjd 5271219089Spjd /* configure and create the new pool */ 5272219089Spjd VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 5273219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5274219089Spjd exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 5275219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5276219089Spjd spa_version(spa)) == 0); 5277219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 5278219089Spjd spa->spa_config_txg) == 0); 5279219089Spjd VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5280219089Spjd spa_generate_guid(NULL)) == 0); 5281219089Spjd (void) nvlist_lookup_string(props, 5282219089Spjd zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5283219089Spjd 5284219089Spjd /* add the new pool to the namespace */ 5285219089Spjd newspa = spa_add(newname, config, altroot); 5286219089Spjd newspa->spa_config_txg = spa->spa_config_txg; 5287219089Spjd spa_set_log_state(newspa, SPA_LOG_CLEAR); 5288219089Spjd 5289219089Spjd /* release the spa config lock, retaining the namespace lock */ 5290219089Spjd spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5291219089Spjd 5292219089Spjd if (zio_injection_enabled) 5293219089Spjd zio_handle_panic_injection(spa, FTAG, 1); 5294219089Spjd 5295219089Spjd spa_activate(newspa, spa_mode_global); 5296219089Spjd spa_async_suspend(newspa); 5297219089Spjd 5298219089Spjd#ifndef sun 5299219089Spjd /* mark that we are creating new spa by splitting */ 5300219089Spjd newspa->spa_splitting_newspa = B_TRUE; 5301219089Spjd#endif 5302219089Spjd /* create the new pool from the disks of the original pool */ 5303219089Spjd error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 5304219089Spjd#ifndef sun 5305219089Spjd newspa->spa_splitting_newspa = B_FALSE; 5306219089Spjd#endif 5307219089Spjd if (error) 5308219089Spjd goto out; 5309219089Spjd 5310219089Spjd /* if that worked, generate a real config for the new pool */ 5311219089Spjd if (newspa->spa_root_vdev != NULL) { 5312219089Spjd VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5313219089Spjd NV_UNIQUE_NAME, KM_SLEEP) == 0); 5314219089Spjd VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5315219089Spjd ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5316219089Spjd spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5317219089Spjd B_TRUE)); 5318219089Spjd } 5319219089Spjd 5320219089Spjd /* set the props */ 5321219089Spjd if (props != NULL) { 5322219089Spjd spa_configfile_set(newspa, props, B_FALSE); 5323219089Spjd error = spa_prop_set(newspa, props); 5324219089Spjd if (error) 5325219089Spjd goto out; 5326219089Spjd } 5327219089Spjd 5328219089Spjd /* flush everything */ 5329219089Spjd txg = spa_vdev_config_enter(newspa); 5330219089Spjd vdev_config_dirty(newspa->spa_root_vdev); 5331219089Spjd (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5332219089Spjd 5333219089Spjd if (zio_injection_enabled) 5334219089Spjd zio_handle_panic_injection(spa, FTAG, 2); 5335219089Spjd 5336219089Spjd spa_async_resume(newspa); 5337219089Spjd 5338219089Spjd /* finally, update the original pool's config */ 5339219089Spjd txg = spa_vdev_config_enter(spa); 5340219089Spjd tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5341219089Spjd error = dmu_tx_assign(tx, TXG_WAIT); 5342219089Spjd if (error != 0) 5343219089Spjd dmu_tx_abort(tx); 5344219089Spjd for (c = 0; c < children; c++) { 5345219089Spjd if (vml[c] != NULL) { 5346219089Spjd vdev_split(vml[c]); 5347219089Spjd if (error == 0) 5348248571Smm spa_history_log_internal(spa, "detach", tx, 5349248571Smm "vdev=%s", vml[c]->vdev_path); 5350219089Spjd vdev_free(vml[c]); 5351219089Spjd } 5352219089Spjd } 5353219089Spjd vdev_config_dirty(spa->spa_root_vdev); 5354219089Spjd spa->spa_config_splitting = NULL; 5355219089Spjd nvlist_free(nvl); 5356219089Spjd if (error == 0) 5357219089Spjd dmu_tx_commit(tx); 5358219089Spjd (void) spa_vdev_exit(spa, NULL, txg, 0); 5359219089Spjd 5360219089Spjd if (zio_injection_enabled) 5361219089Spjd zio_handle_panic_injection(spa, FTAG, 3); 5362219089Spjd 5363219089Spjd /* split is complete; log a history record */ 5364248571Smm spa_history_log_internal(newspa, "split", NULL, 5365248571Smm "from pool %s", spa_name(spa)); 5366219089Spjd 5367219089Spjd kmem_free(vml, children * sizeof (vdev_t *)); 5368219089Spjd 5369219089Spjd /* if we're not going to mount the filesystems in userland, export */ 5370219089Spjd if (exp) 5371219089Spjd error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5372219089Spjd B_FALSE, B_FALSE); 5373219089Spjd 5374219089Spjd return (error); 5375219089Spjd 5376219089Spjdout: 5377219089Spjd spa_unload(newspa); 5378219089Spjd spa_deactivate(newspa); 5379219089Spjd spa_remove(newspa); 5380219089Spjd 5381219089Spjd txg = spa_vdev_config_enter(spa); 5382219089Spjd 5383219089Spjd /* re-online all offlined disks */ 5384219089Spjd for (c = 0; c < children; c++) { 5385219089Spjd if (vml[c] != NULL) 5386219089Spjd vml[c]->vdev_offline = B_FALSE; 5387219089Spjd } 5388219089Spjd vdev_reopen(spa->spa_root_vdev); 5389219089Spjd 5390219089Spjd nvlist_free(spa->spa_config_splitting); 5391219089Spjd spa->spa_config_splitting = NULL; 5392219089Spjd (void) spa_vdev_exit(spa, NULL, txg, error); 5393219089Spjd 5394219089Spjd kmem_free(vml, children * sizeof (vdev_t *)); 5395219089Spjd return (error); 5396219089Spjd} 5397219089Spjd 5398185029Spjdstatic nvlist_t * 5399185029Spjdspa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5400185029Spjd{ 5401185029Spjd for (int i = 0; i < count; i++) { 5402185029Spjd uint64_t guid; 5403185029Spjd 5404185029Spjd VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5405185029Spjd &guid) == 0); 5406185029Spjd 5407185029Spjd if (guid == target_guid) 5408185029Spjd return (nvpp[i]); 5409185029Spjd } 5410185029Spjd 5411185029Spjd return (NULL); 5412185029Spjd} 5413185029Spjd 5414185029Spjdstatic void 5415185029Spjdspa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5416185029Spjd nvlist_t *dev_to_remove) 5417185029Spjd{ 5418185029Spjd nvlist_t **newdev = NULL; 5419185029Spjd 5420185029Spjd if (count > 1) 5421185029Spjd newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5422185029Spjd 5423185029Spjd for (int i = 0, j = 0; i < count; i++) { 5424185029Spjd if (dev[i] == dev_to_remove) 5425185029Spjd continue; 5426185029Spjd VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5427185029Spjd } 5428185029Spjd 5429185029Spjd VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5430185029Spjd VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5431185029Spjd 5432185029Spjd for (int i = 0; i < count - 1; i++) 5433185029Spjd nvlist_free(newdev[i]); 5434185029Spjd 5435185029Spjd if (count > 1) 5436185029Spjd kmem_free(newdev, (count - 1) * sizeof (void *)); 5437185029Spjd} 5438185029Spjd 5439168404Spjd/* 5440219089Spjd * Evacuate the device. 5441219089Spjd */ 5442219089Spjdstatic int 5443219089Spjdspa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5444219089Spjd{ 5445219089Spjd uint64_t txg; 5446219089Spjd int error = 0; 5447219089Spjd 5448219089Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5449219089Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5450219089Spjd ASSERT(vd == vd->vdev_top); 5451219089Spjd 5452219089Spjd /* 5453219089Spjd * Evacuate the device. We don't hold the config lock as writer 5454219089Spjd * since we need to do I/O but we do keep the 5455219089Spjd * spa_namespace_lock held. Once this completes the device 5456219089Spjd * should no longer have any blocks allocated on it. 5457219089Spjd */ 5458219089Spjd if (vd->vdev_islog) { 5459219089Spjd if (vd->vdev_stat.vs_alloc != 0) 5460219089Spjd error = spa_offline_log(spa); 5461219089Spjd } else { 5462249195Smm error = SET_ERROR(ENOTSUP); 5463219089Spjd } 5464219089Spjd 5465219089Spjd if (error) 5466219089Spjd return (error); 5467219089Spjd 5468219089Spjd /* 5469219089Spjd * The evacuation succeeded. Remove any remaining MOS metadata 5470219089Spjd * associated with this vdev, and wait for these changes to sync. 5471219089Spjd */ 5472240415Smm ASSERT0(vd->vdev_stat.vs_alloc); 5473219089Spjd txg = spa_vdev_config_enter(spa); 5474219089Spjd vd->vdev_removing = B_TRUE; 5475258717Savg vdev_dirty_leaves(vd, VDD_DTL, txg); 5476219089Spjd vdev_config_dirty(vd); 5477219089Spjd spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5478219089Spjd 5479219089Spjd return (0); 5480219089Spjd} 5481219089Spjd 5482219089Spjd/* 5483219089Spjd * Complete the removal by cleaning up the namespace. 5484219089Spjd */ 5485219089Spjdstatic void 5486219089Spjdspa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5487219089Spjd{ 5488219089Spjd vdev_t *rvd = spa->spa_root_vdev; 5489219089Spjd uint64_t id = vd->vdev_id; 5490219089Spjd boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5491219089Spjd 5492219089Spjd ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5493219089Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5494219089Spjd ASSERT(vd == vd->vdev_top); 5495219089Spjd 5496219089Spjd /* 5497219089Spjd * Only remove any devices which are empty. 5498219089Spjd */ 5499219089Spjd if (vd->vdev_stat.vs_alloc != 0) 5500219089Spjd return; 5501219089Spjd 5502219089Spjd (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5503219089Spjd 5504219089Spjd if (list_link_active(&vd->vdev_state_dirty_node)) 5505219089Spjd vdev_state_clean(vd); 5506219089Spjd if (list_link_active(&vd->vdev_config_dirty_node)) 5507219089Spjd vdev_config_clean(vd); 5508219089Spjd 5509219089Spjd vdev_free(vd); 5510219089Spjd 5511219089Spjd if (last_vdev) { 5512219089Spjd vdev_compact_children(rvd); 5513219089Spjd } else { 5514219089Spjd vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5515219089Spjd vdev_add_child(rvd, vd); 5516219089Spjd } 5517219089Spjd vdev_config_dirty(rvd); 5518219089Spjd 5519219089Spjd /* 5520219089Spjd * Reassess the health of our root vdev. 5521219089Spjd */ 5522219089Spjd vdev_reopen(rvd); 5523219089Spjd} 5524219089Spjd 5525219089Spjd/* 5526219089Spjd * Remove a device from the pool - 5527219089Spjd * 5528219089Spjd * Removing a device from the vdev namespace requires several steps 5529219089Spjd * and can take a significant amount of time. As a result we use 5530219089Spjd * the spa_vdev_config_[enter/exit] functions which allow us to 5531219089Spjd * grab and release the spa_config_lock while still holding the namespace 5532219089Spjd * lock. During each step the configuration is synced out. 5533251631Sdelphij * 5534251631Sdelphij * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5535251631Sdelphij * devices. 5536219089Spjd */ 5537168404Spjdint 5538168404Spjdspa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5539168404Spjd{ 5540168404Spjd vdev_t *vd; 5541219089Spjd metaslab_group_t *mg; 5542185029Spjd nvlist_t **spares, **l2cache, *nv; 5543219089Spjd uint64_t txg = 0; 5544185029Spjd uint_t nspares, nl2cache; 5545185029Spjd int error = 0; 5546209962Smm boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5547168404Spjd 5548219089Spjd ASSERT(spa_writeable(spa)); 5549219089Spjd 5550209962Smm if (!locked) 5551209962Smm txg = spa_vdev_enter(spa); 5552168404Spjd 5553185029Spjd vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5554168404Spjd 5555185029Spjd if (spa->spa_spares.sav_vdevs != NULL && 5556185029Spjd nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5557185029Spjd ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5558185029Spjd (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5559185029Spjd /* 5560185029Spjd * Only remove the hot spare if it's not currently in use 5561185029Spjd * in this pool. 5562185029Spjd */ 5563185029Spjd if (vd == NULL || unspare) { 5564185029Spjd spa_vdev_remove_aux(spa->spa_spares.sav_config, 5565185029Spjd ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5566185029Spjd spa_load_spares(spa); 5567185029Spjd spa->spa_spares.sav_sync = B_TRUE; 5568185029Spjd } else { 5569249195Smm error = SET_ERROR(EBUSY); 5570168404Spjd } 5571185029Spjd } else if (spa->spa_l2cache.sav_vdevs != NULL && 5572185029Spjd nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5573185029Spjd ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5574185029Spjd (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5575185029Spjd /* 5576185029Spjd * Cache devices can always be removed. 5577185029Spjd */ 5578185029Spjd spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5579185029Spjd ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5580185029Spjd spa_load_l2cache(spa); 5581185029Spjd spa->spa_l2cache.sav_sync = B_TRUE; 5582219089Spjd } else if (vd != NULL && vd->vdev_islog) { 5583219089Spjd ASSERT(!locked); 5584219089Spjd ASSERT(vd == vd->vdev_top); 5585219089Spjd 5586219089Spjd mg = vd->vdev_mg; 5587219089Spjd 5588219089Spjd /* 5589219089Spjd * Stop allocating from this vdev. 5590219089Spjd */ 5591219089Spjd metaslab_group_passivate(mg); 5592219089Spjd 5593219089Spjd /* 5594219089Spjd * Wait for the youngest allocations and frees to sync, 5595219089Spjd * and then wait for the deferral of those frees to finish. 5596219089Spjd */ 5597219089Spjd spa_vdev_config_exit(spa, NULL, 5598219089Spjd txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5599219089Spjd 5600219089Spjd /* 5601219089Spjd * Attempt to evacuate the vdev. 5602219089Spjd */ 5603219089Spjd error = spa_vdev_remove_evacuate(spa, vd); 5604219089Spjd 5605219089Spjd txg = spa_vdev_config_enter(spa); 5606219089Spjd 5607219089Spjd /* 5608219089Spjd * If we couldn't evacuate the vdev, unwind. 5609219089Spjd */ 5610219089Spjd if (error) { 5611219089Spjd metaslab_group_activate(mg); 5612219089Spjd return (spa_vdev_exit(spa, NULL, txg, error)); 5613219089Spjd } 5614219089Spjd 5615219089Spjd /* 5616219089Spjd * Clean up the vdev namespace. 5617219089Spjd */ 5618219089Spjd spa_vdev_remove_from_namespace(spa, vd); 5619219089Spjd 5620185029Spjd } else if (vd != NULL) { 5621185029Spjd /* 5622185029Spjd * Normal vdevs cannot be removed (yet). 5623185029Spjd */ 5624249195Smm error = SET_ERROR(ENOTSUP); 5625168404Spjd } else { 5626185029Spjd /* 5627185029Spjd * There is no vdev of any kind with the specified guid. 5628185029Spjd */ 5629249195Smm error = SET_ERROR(ENOENT); 5630168404Spjd } 5631168404Spjd 5632209962Smm if (!locked) 5633209962Smm return (spa_vdev_exit(spa, NULL, txg, error)); 5634209962Smm 5635209962Smm return (error); 5636168404Spjd} 5637168404Spjd 5638168404Spjd/* 5639185029Spjd * Find any device that's done replacing, or a vdev marked 'unspare' that's 5640251631Sdelphij * currently spared, so we can detach it. 5641168404Spjd */ 5642168404Spjdstatic vdev_t * 5643185029Spjdspa_vdev_resilver_done_hunt(vdev_t *vd) 5644168404Spjd{ 5645168404Spjd vdev_t *newvd, *oldvd; 5646168404Spjd 5647219089Spjd for (int c = 0; c < vd->vdev_children; c++) { 5648185029Spjd oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5649168404Spjd if (oldvd != NULL) 5650168404Spjd return (oldvd); 5651168404Spjd } 5652168404Spjd 5653185029Spjd /* 5654219089Spjd * Check for a completed replacement. We always consider the first 5655219089Spjd * vdev in the list to be the oldest vdev, and the last one to be 5656219089Spjd * the newest (see spa_vdev_attach() for how that works). In 5657219089Spjd * the case where the newest vdev is faulted, we will not automatically 5658219089Spjd * remove it after a resilver completes. This is OK as it will require 5659219089Spjd * user intervention to determine which disk the admin wishes to keep. 5660185029Spjd */ 5661219089Spjd if (vd->vdev_ops == &vdev_replacing_ops) { 5662219089Spjd ASSERT(vd->vdev_children > 1); 5663219089Spjd 5664219089Spjd newvd = vd->vdev_child[vd->vdev_children - 1]; 5665168404Spjd oldvd = vd->vdev_child[0]; 5666168404Spjd 5667209962Smm if (vdev_dtl_empty(newvd, DTL_MISSING) && 5668219089Spjd vdev_dtl_empty(newvd, DTL_OUTAGE) && 5669209962Smm !vdev_dtl_required(oldvd)) 5670168404Spjd return (oldvd); 5671168404Spjd } 5672168404Spjd 5673185029Spjd /* 5674185029Spjd * Check for a completed resilver with the 'unspare' flag set. 5675185029Spjd */ 5676219089Spjd if (vd->vdev_ops == &vdev_spare_ops) { 5677219089Spjd vdev_t *first = vd->vdev_child[0]; 5678219089Spjd vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5679185029Spjd 5680219089Spjd if (last->vdev_unspare) { 5681219089Spjd oldvd = first; 5682219089Spjd newvd = last; 5683219089Spjd } else if (first->vdev_unspare) { 5684219089Spjd oldvd = last; 5685219089Spjd newvd = first; 5686219089Spjd } else { 5687219089Spjd oldvd = NULL; 5688219089Spjd } 5689219089Spjd 5690219089Spjd if (oldvd != NULL && 5691209962Smm vdev_dtl_empty(newvd, DTL_MISSING) && 5692219089Spjd vdev_dtl_empty(newvd, DTL_OUTAGE) && 5693219089Spjd !vdev_dtl_required(oldvd)) 5694185029Spjd return (oldvd); 5695219089Spjd 5696219089Spjd /* 5697219089Spjd * If there are more than two spares attached to a disk, 5698219089Spjd * and those spares are not required, then we want to 5699219089Spjd * attempt to free them up now so that they can be used 5700219089Spjd * by other pools. Once we're back down to a single 5701219089Spjd * disk+spare, we stop removing them. 5702219089Spjd */ 5703219089Spjd if (vd->vdev_children > 2) { 5704219089Spjd newvd = vd->vdev_child[1]; 5705219089Spjd 5706219089Spjd if (newvd->vdev_isspare && last->vdev_isspare && 5707219089Spjd vdev_dtl_empty(last, DTL_MISSING) && 5708219089Spjd vdev_dtl_empty(last, DTL_OUTAGE) && 5709219089Spjd !vdev_dtl_required(newvd)) 5710219089Spjd return (newvd); 5711185029Spjd } 5712185029Spjd } 5713185029Spjd 5714168404Spjd return (NULL); 5715168404Spjd} 5716168404Spjd 5717168404Spjdstatic void 5718185029Spjdspa_vdev_resilver_done(spa_t *spa) 5719168404Spjd{ 5720209962Smm vdev_t *vd, *pvd, *ppvd; 5721209962Smm uint64_t guid, sguid, pguid, ppguid; 5722168404Spjd 5723209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5724168404Spjd 5725185029Spjd while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5726209962Smm pvd = vd->vdev_parent; 5727209962Smm ppvd = pvd->vdev_parent; 5728168404Spjd guid = vd->vdev_guid; 5729209962Smm pguid = pvd->vdev_guid; 5730209962Smm ppguid = ppvd->vdev_guid; 5731209962Smm sguid = 0; 5732168404Spjd /* 5733168404Spjd * If we have just finished replacing a hot spared device, then 5734168404Spjd * we need to detach the parent's first child (the original hot 5735168404Spjd * spare) as well. 5736168404Spjd */ 5737219089Spjd if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5738219089Spjd ppvd->vdev_children == 2) { 5739168404Spjd ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5740209962Smm sguid = ppvd->vdev_child[1]->vdev_guid; 5741168404Spjd } 5742254112Sdelphij ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5743254112Sdelphij 5744209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 5745209962Smm if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5746168404Spjd return; 5747209962Smm if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5748168404Spjd return; 5749209962Smm spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5750168404Spjd } 5751168404Spjd 5752209962Smm spa_config_exit(spa, SCL_ALL, FTAG); 5753168404Spjd} 5754168404Spjd 5755168404Spjd/* 5756219089Spjd * Update the stored path or FRU for this vdev. 5757168404Spjd */ 5758168404Spjdint 5759209962Smmspa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5760209962Smm boolean_t ispath) 5761168404Spjd{ 5762185029Spjd vdev_t *vd; 5763219089Spjd boolean_t sync = B_FALSE; 5764168404Spjd 5765219089Spjd ASSERT(spa_writeable(spa)); 5766168404Spjd 5767219089Spjd spa_vdev_state_enter(spa, SCL_ALL); 5768219089Spjd 5769209962Smm if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5770219089Spjd return (spa_vdev_state_exit(spa, NULL, ENOENT)); 5771168404Spjd 5772168404Spjd if (!vd->vdev_ops->vdev_op_leaf) 5773219089Spjd return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 5774168404Spjd 5775209962Smm if (ispath) { 5776219089Spjd if (strcmp(value, vd->vdev_path) != 0) { 5777219089Spjd spa_strfree(vd->vdev_path); 5778219089Spjd vd->vdev_path = spa_strdup(value); 5779219089Spjd sync = B_TRUE; 5780219089Spjd } 5781209962Smm } else { 5782219089Spjd if (vd->vdev_fru == NULL) { 5783219089Spjd vd->vdev_fru = spa_strdup(value); 5784219089Spjd sync = B_TRUE; 5785219089Spjd } else if (strcmp(value, vd->vdev_fru) != 0) { 5786209962Smm spa_strfree(vd->vdev_fru); 5787219089Spjd vd->vdev_fru = spa_strdup(value); 5788219089Spjd sync = B_TRUE; 5789219089Spjd } 5790209962Smm } 5791168404Spjd 5792219089Spjd return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5793168404Spjd} 5794168404Spjd 5795209962Smmint 5796209962Smmspa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 5797209962Smm{ 5798209962Smm return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 5799209962Smm} 5800209962Smm 5801209962Smmint 5802209962Smmspa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 5803209962Smm{ 5804209962Smm return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 5805209962Smm} 5806209962Smm 5807168404Spjd/* 5808168404Spjd * ========================================================================== 5809219089Spjd * SPA Scanning 5810168404Spjd * ========================================================================== 5811168404Spjd */ 5812168404Spjd 5813168404Spjdint 5814219089Spjdspa_scan_stop(spa_t *spa) 5815168404Spjd{ 5816185029Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5817219089Spjd if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5818249195Smm return (SET_ERROR(EBUSY)); 5819219089Spjd return (dsl_scan_cancel(spa->spa_dsl_pool)); 5820219089Spjd} 5821168404Spjd 5822219089Spjdint 5823219089Spjdspa_scan(spa_t *spa, pool_scan_func_t func) 5824219089Spjd{ 5825219089Spjd ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5826219089Spjd 5827219089Spjd if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5828249195Smm return (SET_ERROR(ENOTSUP)); 5829168404Spjd 5830168404Spjd /* 5831185029Spjd * If a resilver was requested, but there is no DTL on a 5832185029Spjd * writeable leaf device, we have nothing to do. 5833168404Spjd */ 5834219089Spjd if (func == POOL_SCAN_RESILVER && 5835185029Spjd !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5836185029Spjd spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5837168404Spjd return (0); 5838168404Spjd } 5839168404Spjd 5840219089Spjd return (dsl_scan(spa->spa_dsl_pool, func)); 5841168404Spjd} 5842168404Spjd 5843168404Spjd/* 5844168404Spjd * ========================================================================== 5845168404Spjd * SPA async task processing 5846168404Spjd * ========================================================================== 5847168404Spjd */ 5848168404Spjd 5849168404Spjdstatic void 5850185029Spjdspa_async_remove(spa_t *spa, vdev_t *vd) 5851168404Spjd{ 5852185029Spjd if (vd->vdev_remove_wanted) { 5853219089Spjd vd->vdev_remove_wanted = B_FALSE; 5854219089Spjd vd->vdev_delayed_close = B_FALSE; 5855185029Spjd vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 5856209962Smm 5857209962Smm /* 5858209962Smm * We want to clear the stats, but we don't want to do a full 5859209962Smm * vdev_clear() as that will cause us to throw away 5860209962Smm * degraded/faulted state as well as attempt to reopen the 5861209962Smm * device, all of which is a waste. 5862209962Smm */ 5863209962Smm vd->vdev_stat.vs_read_errors = 0; 5864209962Smm vd->vdev_stat.vs_write_errors = 0; 5865209962Smm vd->vdev_stat.vs_checksum_errors = 0; 5866209962Smm 5867185029Spjd vdev_state_dirty(vd->vdev_top); 5868185029Spjd } 5869168404Spjd 5870185029Spjd for (int c = 0; c < vd->vdev_children; c++) 5871185029Spjd spa_async_remove(spa, vd->vdev_child[c]); 5872185029Spjd} 5873168404Spjd 5874185029Spjdstatic void 5875185029Spjdspa_async_probe(spa_t *spa, vdev_t *vd) 5876185029Spjd{ 5877185029Spjd if (vd->vdev_probe_wanted) { 5878219089Spjd vd->vdev_probe_wanted = B_FALSE; 5879185029Spjd vdev_reopen(vd); /* vdev_open() does the actual probe */ 5880168404Spjd } 5881168404Spjd 5882185029Spjd for (int c = 0; c < vd->vdev_children; c++) 5883185029Spjd spa_async_probe(spa, vd->vdev_child[c]); 5884168404Spjd} 5885168404Spjd 5886168404Spjdstatic void 5887219089Spjdspa_async_autoexpand(spa_t *spa, vdev_t *vd) 5888219089Spjd{ 5889219089Spjd sysevent_id_t eid; 5890219089Spjd nvlist_t *attr; 5891219089Spjd char *physpath; 5892219089Spjd 5893219089Spjd if (!spa->spa_autoexpand) 5894219089Spjd return; 5895219089Spjd 5896219089Spjd for (int c = 0; c < vd->vdev_children; c++) { 5897219089Spjd vdev_t *cvd = vd->vdev_child[c]; 5898219089Spjd spa_async_autoexpand(spa, cvd); 5899219089Spjd } 5900219089Spjd 5901219089Spjd if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5902219089Spjd return; 5903219089Spjd 5904219089Spjd physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5905219089Spjd (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5906219089Spjd 5907219089Spjd VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5908219089Spjd VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5909219089Spjd 5910219089Spjd (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5911219089Spjd ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP); 5912219089Spjd 5913219089Spjd nvlist_free(attr); 5914219089Spjd kmem_free(physpath, MAXPATHLEN); 5915219089Spjd} 5916219089Spjd 5917219089Spjdstatic void 5918168404Spjdspa_async_thread(void *arg) 5919168404Spjd{ 5920168404Spjd spa_t *spa = arg; 5921168404Spjd int tasks; 5922168404Spjd 5923168404Spjd ASSERT(spa->spa_sync_on); 5924168404Spjd 5925168404Spjd mutex_enter(&spa->spa_async_lock); 5926168404Spjd tasks = spa->spa_async_tasks; 5927253990Smav spa->spa_async_tasks &= SPA_ASYNC_REMOVE; 5928168404Spjd mutex_exit(&spa->spa_async_lock); 5929168404Spjd 5930168404Spjd /* 5931168404Spjd * See if the config needs to be updated. 5932168404Spjd */ 5933168404Spjd if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5934219089Spjd uint64_t old_space, new_space; 5935219089Spjd 5936168404Spjd mutex_enter(&spa_namespace_lock); 5937219089Spjd old_space = metaslab_class_get_space(spa_normal_class(spa)); 5938168404Spjd spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5939219089Spjd new_space = metaslab_class_get_space(spa_normal_class(spa)); 5940168404Spjd mutex_exit(&spa_namespace_lock); 5941219089Spjd 5942219089Spjd /* 5943219089Spjd * If the pool grew as a result of the config update, 5944219089Spjd * then log an internal history event. 5945219089Spjd */ 5946219089Spjd if (new_space != old_space) { 5947248571Smm spa_history_log_internal(spa, "vdev online", NULL, 5948219089Spjd "pool '%s' size: %llu(+%llu)", 5949219089Spjd spa_name(spa), new_space, new_space - old_space); 5950219089Spjd } 5951168404Spjd } 5952168404Spjd 5953219089Spjd if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5954219089Spjd spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5955219089Spjd spa_async_autoexpand(spa, spa->spa_root_vdev); 5956219089Spjd spa_config_exit(spa, SCL_CONFIG, FTAG); 5957219089Spjd } 5958219089Spjd 5959168404Spjd /* 5960185029Spjd * See if any devices need to be probed. 5961168404Spjd */ 5962185029Spjd if (tasks & SPA_ASYNC_PROBE) { 5963219089Spjd spa_vdev_state_enter(spa, SCL_NONE); 5964185029Spjd spa_async_probe(spa, spa->spa_root_vdev); 5965185029Spjd (void) spa_vdev_state_exit(spa, NULL, 0); 5966185029Spjd } 5967168404Spjd 5968168404Spjd /* 5969185029Spjd * If any devices are done replacing, detach them. 5970168404Spjd */ 5971185029Spjd if (tasks & SPA_ASYNC_RESILVER_DONE) 5972185029Spjd spa_vdev_resilver_done(spa); 5973168404Spjd 5974168404Spjd /* 5975168404Spjd * Kick off a resilver. 5976168404Spjd */ 5977168404Spjd if (tasks & SPA_ASYNC_RESILVER) 5978219089Spjd dsl_resilver_restart(spa->spa_dsl_pool, 0); 5979168404Spjd 5980168404Spjd /* 5981168404Spjd * Let the world know that we're done. 5982168404Spjd */ 5983168404Spjd mutex_enter(&spa->spa_async_lock); 5984168404Spjd spa->spa_async_thread = NULL; 5985168404Spjd cv_broadcast(&spa->spa_async_cv); 5986168404Spjd mutex_exit(&spa->spa_async_lock); 5987168404Spjd thread_exit(); 5988168404Spjd} 5989168404Spjd 5990253990Smavstatic void 5991253990Smavspa_async_thread_vd(void *arg) 5992253990Smav{ 5993253990Smav spa_t *spa = arg; 5994253990Smav int tasks; 5995253990Smav 5996253990Smav ASSERT(spa->spa_sync_on); 5997253990Smav 5998253990Smav mutex_enter(&spa->spa_async_lock); 5999253990Smav tasks = spa->spa_async_tasks; 6000253990Smavretry: 6001253990Smav spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE; 6002253990Smav mutex_exit(&spa->spa_async_lock); 6003253990Smav 6004253990Smav /* 6005253990Smav * See if any devices need to be marked REMOVED. 6006253990Smav */ 6007253990Smav if (tasks & SPA_ASYNC_REMOVE) { 6008253990Smav spa_vdev_state_enter(spa, SCL_NONE); 6009253990Smav spa_async_remove(spa, spa->spa_root_vdev); 6010253990Smav for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 6011253990Smav spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 6012253990Smav for (int i = 0; i < spa->spa_spares.sav_count; i++) 6013253990Smav spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 6014253990Smav (void) spa_vdev_state_exit(spa, NULL, 0); 6015253990Smav } 6016253990Smav 6017253990Smav /* 6018253990Smav * Let the world know that we're done. 6019253990Smav */ 6020253990Smav mutex_enter(&spa->spa_async_lock); 6021253990Smav tasks = spa->spa_async_tasks; 6022253990Smav if ((tasks & SPA_ASYNC_REMOVE) != 0) 6023253990Smav goto retry; 6024253990Smav spa->spa_async_thread_vd = NULL; 6025253990Smav cv_broadcast(&spa->spa_async_cv); 6026253990Smav mutex_exit(&spa->spa_async_lock); 6027253990Smav thread_exit(); 6028253990Smav} 6029253990Smav 6030168404Spjdvoid 6031168404Spjdspa_async_suspend(spa_t *spa) 6032168404Spjd{ 6033168404Spjd mutex_enter(&spa->spa_async_lock); 6034168404Spjd spa->spa_async_suspended++; 6035253990Smav while (spa->spa_async_thread != NULL && 6036253990Smav spa->spa_async_thread_vd != NULL) 6037168404Spjd cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 6038168404Spjd mutex_exit(&spa->spa_async_lock); 6039168404Spjd} 6040168404Spjd 6041168404Spjdvoid 6042168404Spjdspa_async_resume(spa_t *spa) 6043168404Spjd{ 6044168404Spjd mutex_enter(&spa->spa_async_lock); 6045168404Spjd ASSERT(spa->spa_async_suspended != 0); 6046168404Spjd spa->spa_async_suspended--; 6047168404Spjd mutex_exit(&spa->spa_async_lock); 6048168404Spjd} 6049168404Spjd 6050251636Sdelphijstatic boolean_t 6051251636Sdelphijspa_async_tasks_pending(spa_t *spa) 6052251636Sdelphij{ 6053251636Sdelphij uint_t non_config_tasks; 6054251636Sdelphij uint_t config_task; 6055251636Sdelphij boolean_t config_task_suspended; 6056251636Sdelphij 6057253990Smav non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE | 6058253990Smav SPA_ASYNC_REMOVE); 6059251636Sdelphij config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 6060251636Sdelphij if (spa->spa_ccw_fail_time == 0) { 6061251636Sdelphij config_task_suspended = B_FALSE; 6062251636Sdelphij } else { 6063251636Sdelphij config_task_suspended = 6064251636Sdelphij (gethrtime() - spa->spa_ccw_fail_time) < 6065251636Sdelphij (zfs_ccw_retry_interval * NANOSEC); 6066251636Sdelphij } 6067251636Sdelphij 6068251636Sdelphij return (non_config_tasks || (config_task && !config_task_suspended)); 6069251636Sdelphij} 6070251636Sdelphij 6071168404Spjdstatic void 6072168404Spjdspa_async_dispatch(spa_t *spa) 6073168404Spjd{ 6074168404Spjd mutex_enter(&spa->spa_async_lock); 6075251636Sdelphij if (spa_async_tasks_pending(spa) && 6076251636Sdelphij !spa->spa_async_suspended && 6077168404Spjd spa->spa_async_thread == NULL && 6078251636Sdelphij rootdir != NULL) 6079168404Spjd spa->spa_async_thread = thread_create(NULL, 0, 6080168404Spjd spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 6081168404Spjd mutex_exit(&spa->spa_async_lock); 6082168404Spjd} 6083168404Spjd 6084253990Smavstatic void 6085253990Smavspa_async_dispatch_vd(spa_t *spa) 6086253990Smav{ 6087253990Smav mutex_enter(&spa->spa_async_lock); 6088253990Smav if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 && 6089253990Smav !spa->spa_async_suspended && 6090253990Smav spa->spa_async_thread_vd == NULL && 6091253990Smav rootdir != NULL) 6092253990Smav spa->spa_async_thread_vd = thread_create(NULL, 0, 6093253990Smav spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri); 6094253990Smav mutex_exit(&spa->spa_async_lock); 6095253990Smav} 6096253990Smav 6097168404Spjdvoid 6098168404Spjdspa_async_request(spa_t *spa, int task) 6099168404Spjd{ 6100219089Spjd zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 6101168404Spjd mutex_enter(&spa->spa_async_lock); 6102168404Spjd spa->spa_async_tasks |= task; 6103168404Spjd mutex_exit(&spa->spa_async_lock); 6104253990Smav spa_async_dispatch_vd(spa); 6105168404Spjd} 6106168404Spjd 6107168404Spjd/* 6108168404Spjd * ========================================================================== 6109168404Spjd * SPA syncing routines 6110168404Spjd * ========================================================================== 6111168404Spjd */ 6112168404Spjd 6113219089Spjdstatic int 6114219089Spjdbpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6115168404Spjd{ 6116219089Spjd bpobj_t *bpo = arg; 6117219089Spjd bpobj_enqueue(bpo, bp, tx); 6118219089Spjd return (0); 6119219089Spjd} 6120168404Spjd 6121219089Spjdstatic int 6122219089Spjdspa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6123219089Spjd{ 6124219089Spjd zio_t *zio = arg; 6125168404Spjd 6126219089Spjd zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 6127240868Spjd BP_GET_PSIZE(bp), zio->io_flags)); 6128219089Spjd return (0); 6129168404Spjd} 6130168404Spjd 6131258632Savg/* 6132258632Savg * Note: this simple function is not inlined to make it easier to dtrace the 6133258632Savg * amount of time spent syncing frees. 6134258632Savg */ 6135168404Spjdstatic void 6136258632Savgspa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 6137258632Savg{ 6138258632Savg zio_t *zio = zio_root(spa, NULL, NULL, 0); 6139258632Savg bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 6140258632Savg VERIFY(zio_wait(zio) == 0); 6141258632Savg} 6142258632Savg 6143258632Savg/* 6144258632Savg * Note: this simple function is not inlined to make it easier to dtrace the 6145258632Savg * amount of time spent syncing deferred frees. 6146258632Savg */ 6147258632Savgstatic void 6148258632Savgspa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 6149258632Savg{ 6150258632Savg zio_t *zio = zio_root(spa, NULL, NULL, 0); 6151258632Savg VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 6152258632Savg spa_free_sync_cb, zio, tx), ==, 0); 6153258632Savg VERIFY0(zio_wait(zio)); 6154258632Savg} 6155258632Savg 6156258632Savg 6157258632Savgstatic void 6158168404Spjdspa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 6159168404Spjd{ 6160168404Spjd char *packed = NULL; 6161185029Spjd size_t bufsize; 6162168404Spjd size_t nvsize = 0; 6163168404Spjd dmu_buf_t *db; 6164168404Spjd 6165168404Spjd VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 6166168404Spjd 6167185029Spjd /* 6168185029Spjd * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 6169260150Sdelphij * information. This avoids the dmu_buf_will_dirty() path and 6170185029Spjd * saves us a pre-read to get data we don't actually care about. 6171185029Spjd */ 6172236884Smm bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 6173185029Spjd packed = kmem_alloc(bufsize, KM_SLEEP); 6174168404Spjd 6175168404Spjd VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 6176168404Spjd KM_SLEEP) == 0); 6177185029Spjd bzero(packed + nvsize, bufsize - nvsize); 6178168404Spjd 6179185029Spjd dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 6180168404Spjd 6181185029Spjd kmem_free(packed, bufsize); 6182168404Spjd 6183168404Spjd VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 6184168404Spjd dmu_buf_will_dirty(db, tx); 6185168404Spjd *(uint64_t *)db->db_data = nvsize; 6186168404Spjd dmu_buf_rele(db, FTAG); 6187168404Spjd} 6188168404Spjd 6189168404Spjdstatic void 6190185029Spjdspa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 6191185029Spjd const char *config, const char *entry) 6192168404Spjd{ 6193168404Spjd nvlist_t *nvroot; 6194185029Spjd nvlist_t **list; 6195168404Spjd int i; 6196168404Spjd 6197185029Spjd if (!sav->sav_sync) 6198168404Spjd return; 6199168404Spjd 6200168404Spjd /* 6201185029Spjd * Update the MOS nvlist describing the list of available devices. 6202185029Spjd * spa_validate_aux() will have already made sure this nvlist is 6203185029Spjd * valid and the vdevs are labeled appropriately. 6204168404Spjd */ 6205185029Spjd if (sav->sav_object == 0) { 6206185029Spjd sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 6207185029Spjd DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 6208185029Spjd sizeof (uint64_t), tx); 6209168404Spjd VERIFY(zap_update(spa->spa_meta_objset, 6210185029Spjd DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 6211185029Spjd &sav->sav_object, tx) == 0); 6212168404Spjd } 6213168404Spjd 6214168404Spjd VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6215185029Spjd if (sav->sav_count == 0) { 6216185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 6217168404Spjd } else { 6218185029Spjd list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 6219185029Spjd for (i = 0; i < sav->sav_count; i++) 6220185029Spjd list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 6221219089Spjd B_FALSE, VDEV_CONFIG_L2CACHE); 6222185029Spjd VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 6223185029Spjd sav->sav_count) == 0); 6224185029Spjd for (i = 0; i < sav->sav_count; i++) 6225185029Spjd nvlist_free(list[i]); 6226185029Spjd kmem_free(list, sav->sav_count * sizeof (void *)); 6227168404Spjd } 6228168404Spjd 6229185029Spjd spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 6230168404Spjd nvlist_free(nvroot); 6231168404Spjd 6232185029Spjd sav->sav_sync = B_FALSE; 6233168404Spjd} 6234168404Spjd 6235168404Spjdstatic void 6236168404Spjdspa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 6237168404Spjd{ 6238168404Spjd nvlist_t *config; 6239168404Spjd 6240185029Spjd if (list_is_empty(&spa->spa_config_dirty_list)) 6241168404Spjd return; 6242168404Spjd 6243185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6244168404Spjd 6245185029Spjd config = spa_config_generate(spa, spa->spa_root_vdev, 6246185029Spjd dmu_tx_get_txg(tx), B_FALSE); 6247185029Spjd 6248243505Smm /* 6249243505Smm * If we're upgrading the spa version then make sure that 6250243505Smm * the config object gets updated with the correct version. 6251243505Smm */ 6252243505Smm if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 6253243505Smm fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6254243505Smm spa->spa_uberblock.ub_version); 6255243505Smm 6256185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 6257185029Spjd 6258168404Spjd if (spa->spa_config_syncing) 6259168404Spjd nvlist_free(spa->spa_config_syncing); 6260168404Spjd spa->spa_config_syncing = config; 6261168404Spjd 6262168404Spjd spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 6263168404Spjd} 6264168404Spjd 6265236884Smmstatic void 6266248571Smmspa_sync_version(void *arg, dmu_tx_t *tx) 6267236884Smm{ 6268248571Smm uint64_t *versionp = arg; 6269248571Smm uint64_t version = *versionp; 6270248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6271236884Smm 6272236884Smm /* 6273236884Smm * Setting the version is special cased when first creating the pool. 6274236884Smm */ 6275236884Smm ASSERT(tx->tx_txg != TXG_INITIAL); 6276236884Smm 6277247592Sdelphij ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6278236884Smm ASSERT(version >= spa_version(spa)); 6279236884Smm 6280236884Smm spa->spa_uberblock.ub_version = version; 6281236884Smm vdev_config_dirty(spa->spa_root_vdev); 6282248571Smm spa_history_log_internal(spa, "set", tx, "version=%lld", version); 6283236884Smm} 6284236884Smm 6285185029Spjd/* 6286185029Spjd * Set zpool properties. 6287185029Spjd */ 6288168404Spjdstatic void 6289248571Smmspa_sync_props(void *arg, dmu_tx_t *tx) 6290168404Spjd{ 6291248571Smm nvlist_t *nvp = arg; 6292248571Smm spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6293185029Spjd objset_t *mos = spa->spa_meta_objset; 6294236884Smm nvpair_t *elem = NULL; 6295168404Spjd 6296168404Spjd mutex_enter(&spa->spa_props_lock); 6297168404Spjd 6298185029Spjd while ((elem = nvlist_next_nvpair(nvp, elem))) { 6299236884Smm uint64_t intval; 6300236884Smm char *strval, *fname; 6301236884Smm zpool_prop_t prop; 6302236884Smm const char *propname; 6303236884Smm zprop_type_t proptype; 6304259813Sdelphij spa_feature_t fid; 6305236884Smm 6306185029Spjd switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 6307236884Smm case ZPROP_INVAL: 6308236884Smm /* 6309236884Smm * We checked this earlier in spa_prop_validate(). 6310236884Smm */ 6311236884Smm ASSERT(zpool_prop_feature(nvpair_name(elem))); 6312236884Smm 6313236884Smm fname = strchr(nvpair_name(elem), '@') + 1; 6314259813Sdelphij VERIFY0(zfeature_lookup_name(fname, &fid)); 6315236884Smm 6316259813Sdelphij spa_feature_enable(spa, fid, tx); 6317248571Smm spa_history_log_internal(spa, "set", tx, 6318248571Smm "%s=enabled", nvpair_name(elem)); 6319236884Smm break; 6320236884Smm 6321185029Spjd case ZPOOL_PROP_VERSION: 6322258717Savg intval = fnvpair_value_uint64(elem); 6323185029Spjd /* 6324236884Smm * The version is synced seperatly before other 6325236884Smm * properties and should be correct by now. 6326185029Spjd */ 6327236884Smm ASSERT3U(spa_version(spa), >=, intval); 6328185029Spjd break; 6329168404Spjd 6330185029Spjd case ZPOOL_PROP_ALTROOT: 6331185029Spjd /* 6332185029Spjd * 'altroot' is a non-persistent property. It should 6333185029Spjd * have been set temporarily at creation or import time. 6334185029Spjd */ 6335185029Spjd ASSERT(spa->spa_root != NULL); 6336185029Spjd break; 6337168404Spjd 6338219089Spjd case ZPOOL_PROP_READONLY: 6339185029Spjd case ZPOOL_PROP_CACHEFILE: 6340185029Spjd /* 6341219089Spjd * 'readonly' and 'cachefile' are also non-persisitent 6342219089Spjd * properties. 6343185029Spjd */ 6344168404Spjd break; 6345228103Smm case ZPOOL_PROP_COMMENT: 6346258717Savg strval = fnvpair_value_string(elem); 6347228103Smm if (spa->spa_comment != NULL) 6348228103Smm spa_strfree(spa->spa_comment); 6349228103Smm spa->spa_comment = spa_strdup(strval); 6350228103Smm /* 6351228103Smm * We need to dirty the configuration on all the vdevs 6352228103Smm * so that their labels get updated. It's unnecessary 6353228103Smm * to do this for pool creation since the vdev's 6354228103Smm * configuratoin has already been dirtied. 6355228103Smm */ 6356228103Smm if (tx->tx_txg != TXG_INITIAL) 6357228103Smm vdev_config_dirty(spa->spa_root_vdev); 6358248571Smm spa_history_log_internal(spa, "set", tx, 6359248571Smm "%s=%s", nvpair_name(elem), strval); 6360228103Smm break; 6361185029Spjd default: 6362185029Spjd /* 6363185029Spjd * Set pool property values in the poolprops mos object. 6364185029Spjd */ 6365185029Spjd if (spa->spa_pool_props_object == 0) { 6366236884Smm spa->spa_pool_props_object = 6367236884Smm zap_create_link(mos, DMU_OT_POOL_PROPS, 6368185029Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6369236884Smm tx); 6370185029Spjd } 6371185029Spjd 6372185029Spjd /* normalize the property name */ 6373185029Spjd propname = zpool_prop_to_name(prop); 6374185029Spjd proptype = zpool_prop_get_type(prop); 6375185029Spjd 6376185029Spjd if (nvpair_type(elem) == DATA_TYPE_STRING) { 6377185029Spjd ASSERT(proptype == PROP_TYPE_STRING); 6378258717Savg strval = fnvpair_value_string(elem); 6379258717Savg VERIFY0(zap_update(mos, 6380185029Spjd spa->spa_pool_props_object, propname, 6381258717Savg 1, strlen(strval) + 1, strval, tx)); 6382248571Smm spa_history_log_internal(spa, "set", tx, 6383248571Smm "%s=%s", nvpair_name(elem), strval); 6384185029Spjd } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6385258717Savg intval = fnvpair_value_uint64(elem); 6386185029Spjd 6387185029Spjd if (proptype == PROP_TYPE_INDEX) { 6388185029Spjd const char *unused; 6389258717Savg VERIFY0(zpool_prop_index_to_string( 6390258717Savg prop, intval, &unused)); 6391185029Spjd } 6392258717Savg VERIFY0(zap_update(mos, 6393185029Spjd spa->spa_pool_props_object, propname, 6394258717Savg 8, 1, &intval, tx)); 6395248571Smm spa_history_log_internal(spa, "set", tx, 6396248571Smm "%s=%lld", nvpair_name(elem), intval); 6397185029Spjd } else { 6398185029Spjd ASSERT(0); /* not allowed */ 6399185029Spjd } 6400185029Spjd 6401185029Spjd switch (prop) { 6402185029Spjd case ZPOOL_PROP_DELEGATION: 6403185029Spjd spa->spa_delegation = intval; 6404185029Spjd break; 6405185029Spjd case ZPOOL_PROP_BOOTFS: 6406185029Spjd spa->spa_bootfs = intval; 6407185029Spjd break; 6408185029Spjd case ZPOOL_PROP_FAILUREMODE: 6409185029Spjd spa->spa_failmode = intval; 6410185029Spjd break; 6411219089Spjd case ZPOOL_PROP_AUTOEXPAND: 6412219089Spjd spa->spa_autoexpand = intval; 6413219089Spjd if (tx->tx_txg != TXG_INITIAL) 6414219089Spjd spa_async_request(spa, 6415219089Spjd SPA_ASYNC_AUTOEXPAND); 6416219089Spjd break; 6417219089Spjd case ZPOOL_PROP_DEDUPDITTO: 6418219089Spjd spa->spa_dedup_ditto = intval; 6419219089Spjd break; 6420185029Spjd default: 6421185029Spjd break; 6422185029Spjd } 6423168404Spjd } 6424185029Spjd 6425168404Spjd } 6426185029Spjd 6427185029Spjd mutex_exit(&spa->spa_props_lock); 6428168404Spjd} 6429168404Spjd 6430168404Spjd/* 6431219089Spjd * Perform one-time upgrade on-disk changes. spa_version() does not 6432219089Spjd * reflect the new version this txg, so there must be no changes this 6433219089Spjd * txg to anything that the upgrade code depends on after it executes. 6434219089Spjd * Therefore this must be called after dsl_pool_sync() does the sync 6435219089Spjd * tasks. 6436219089Spjd */ 6437219089Spjdstatic void 6438219089Spjdspa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6439219089Spjd{ 6440219089Spjd dsl_pool_t *dp = spa->spa_dsl_pool; 6441219089Spjd 6442219089Spjd ASSERT(spa->spa_sync_pass == 1); 6443219089Spjd 6444248571Smm rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6445248571Smm 6446219089Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6447219089Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6448219089Spjd dsl_pool_create_origin(dp, tx); 6449219089Spjd 6450219089Spjd /* Keeping the origin open increases spa_minref */ 6451219089Spjd spa->spa_minref += 3; 6452219089Spjd } 6453219089Spjd 6454219089Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6455219089Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6456219089Spjd dsl_pool_upgrade_clones(dp, tx); 6457219089Spjd } 6458219089Spjd 6459219089Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6460219089Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6461219089Spjd dsl_pool_upgrade_dir_clones(dp, tx); 6462219089Spjd 6463219089Spjd /* Keeping the freedir open increases spa_minref */ 6464219089Spjd spa->spa_minref += 3; 6465219089Spjd } 6466236884Smm 6467236884Smm if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6468236884Smm spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6469236884Smm spa_feature_create_zap_objects(spa, tx); 6470236884Smm } 6471268126Sdelphij 6472268126Sdelphij /* 6473268126Sdelphij * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 6474268126Sdelphij * when possibility to use lz4 compression for metadata was added 6475268126Sdelphij * Old pools that have this feature enabled must be upgraded to have 6476268126Sdelphij * this feature active 6477268126Sdelphij */ 6478268126Sdelphij if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6479268126Sdelphij boolean_t lz4_en = spa_feature_is_enabled(spa, 6480268126Sdelphij SPA_FEATURE_LZ4_COMPRESS); 6481268126Sdelphij boolean_t lz4_ac = spa_feature_is_active(spa, 6482268126Sdelphij SPA_FEATURE_LZ4_COMPRESS); 6483268126Sdelphij 6484268126Sdelphij if (lz4_en && !lz4_ac) 6485268126Sdelphij spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 6486268126Sdelphij } 6487248571Smm rrw_exit(&dp->dp_config_rwlock, FTAG); 6488219089Spjd} 6489219089Spjd 6490219089Spjd/* 6491168404Spjd * Sync the specified transaction group. New blocks may be dirtied as 6492168404Spjd * part of the process, so we iterate until it converges. 6493168404Spjd */ 6494168404Spjdvoid 6495168404Spjdspa_sync(spa_t *spa, uint64_t txg) 6496168404Spjd{ 6497168404Spjd dsl_pool_t *dp = spa->spa_dsl_pool; 6498168404Spjd objset_t *mos = spa->spa_meta_objset; 6499219089Spjd bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6500168404Spjd vdev_t *rvd = spa->spa_root_vdev; 6501168404Spjd vdev_t *vd; 6502168404Spjd dmu_tx_t *tx; 6503185029Spjd int error; 6504168404Spjd 6505219089Spjd VERIFY(spa_writeable(spa)); 6506219089Spjd 6507168404Spjd /* 6508168404Spjd * Lock out configuration changes. 6509168404Spjd */ 6510185029Spjd spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6511168404Spjd 6512168404Spjd spa->spa_syncing_txg = txg; 6513168404Spjd spa->spa_sync_pass = 0; 6514168404Spjd 6515185029Spjd /* 6516185029Spjd * If there are any pending vdev state changes, convert them 6517185029Spjd * into config changes that go out with this transaction group. 6518185029Spjd */ 6519185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6520209962Smm while (list_head(&spa->spa_state_dirty_list) != NULL) { 6521209962Smm /* 6522209962Smm * We need the write lock here because, for aux vdevs, 6523209962Smm * calling vdev_config_dirty() modifies sav_config. 6524209962Smm * This is ugly and will become unnecessary when we 6525209962Smm * eliminate the aux vdev wart by integrating all vdevs 6526209962Smm * into the root vdev tree. 6527209962Smm */ 6528209962Smm spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6529209962Smm spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6530209962Smm while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6531209962Smm vdev_state_clean(vd); 6532209962Smm vdev_config_dirty(vd); 6533209962Smm } 6534209962Smm spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6535209962Smm spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6536185029Spjd } 6537185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 6538185029Spjd 6539168404Spjd tx = dmu_tx_create_assigned(dp, txg); 6540168404Spjd 6541247265Smm spa->spa_sync_starttime = gethrtime(); 6542247265Smm#ifdef illumos 6543247265Smm VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6544247265Smm spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6545247265Smm#else /* FreeBSD */ 6546247265Smm#ifdef _KERNEL 6547247265Smm callout_reset(&spa->spa_deadman_cycid, 6548247265Smm hz * spa->spa_deadman_synctime / NANOSEC, spa_deadman, spa); 6549247265Smm#endif 6550247265Smm#endif 6551247265Smm 6552168404Spjd /* 6553185029Spjd * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6554168404Spjd * set spa_deflate if we have no raid-z vdevs. 6555168404Spjd */ 6556185029Spjd if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6557185029Spjd spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6558168404Spjd int i; 6559168404Spjd 6560168404Spjd for (i = 0; i < rvd->vdev_children; i++) { 6561168404Spjd vd = rvd->vdev_child[i]; 6562168404Spjd if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6563168404Spjd break; 6564168404Spjd } 6565168404Spjd if (i == rvd->vdev_children) { 6566168404Spjd spa->spa_deflate = TRUE; 6567168404Spjd VERIFY(0 == zap_add(spa->spa_meta_objset, 6568168404Spjd DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6569168404Spjd sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6570168404Spjd } 6571168404Spjd } 6572168404Spjd 6573168404Spjd /* 6574219089Spjd * If anything has changed in this txg, or if someone is waiting 6575219089Spjd * for this txg to sync (eg, spa_vdev_remove()), push the 6576219089Spjd * deferred frees from the previous txg. If not, leave them 6577219089Spjd * alone so that we don't generate work on an otherwise idle 6578219089Spjd * system. 6579168404Spjd */ 6580168404Spjd if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 6581168404Spjd !txg_list_empty(&dp->dp_dirty_dirs, txg) || 6582219089Spjd !txg_list_empty(&dp->dp_sync_tasks, txg) || 6583219089Spjd ((dsl_scan_active(dp->dp_scan) || 6584219089Spjd txg_sync_waiting(dp)) && !spa_shutting_down(spa))) { 6585258632Savg spa_sync_deferred_frees(spa, tx); 6586219089Spjd } 6587168404Spjd 6588168404Spjd /* 6589168404Spjd * Iterate to convergence. 6590168404Spjd */ 6591168404Spjd do { 6592219089Spjd int pass = ++spa->spa_sync_pass; 6593168404Spjd 6594168404Spjd spa_sync_config_object(spa, tx); 6595185029Spjd spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6596185029Spjd ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6597185029Spjd spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6598185029Spjd ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6599168404Spjd spa_errlog_sync(spa, txg); 6600168404Spjd dsl_pool_sync(dp, txg); 6601168404Spjd 6602243503Smm if (pass < zfs_sync_pass_deferred_free) { 6603258632Savg spa_sync_frees(spa, free_bpl, tx); 6604219089Spjd } else { 6605219089Spjd bplist_iterate(free_bpl, bpobj_enqueue_cb, 6606258632Savg &spa->spa_deferred_bpobj, tx); 6607168404Spjd } 6608168404Spjd 6609219089Spjd ddt_sync(spa, txg); 6610219089Spjd dsl_scan_sync(dp, tx); 6611168404Spjd 6612219089Spjd while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6613219089Spjd vdev_sync(vd, txg); 6614168404Spjd 6615219089Spjd if (pass == 1) 6616219089Spjd spa_sync_upgrades(spa, tx); 6617168404Spjd 6618219089Spjd } while (dmu_objset_is_dirty(mos, txg)); 6619219089Spjd 6620168404Spjd /* 6621168404Spjd * Rewrite the vdev configuration (which includes the uberblock) 6622168404Spjd * to commit the transaction group. 6623168404Spjd * 6624185029Spjd * If there are no dirty vdevs, we sync the uberblock to a few 6625185029Spjd * random top-level vdevs that are known to be visible in the 6626185029Spjd * config cache (see spa_vdev_add() for a complete description). 6627185029Spjd * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6628168404Spjd */ 6629185029Spjd for (;;) { 6630185029Spjd /* 6631185029Spjd * We hold SCL_STATE to prevent vdev open/close/etc. 6632185029Spjd * while we're attempting to write the vdev labels. 6633185029Spjd */ 6634185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6635168404Spjd 6636185029Spjd if (list_is_empty(&spa->spa_config_dirty_list)) { 6637185029Spjd vdev_t *svd[SPA_DVAS_PER_BP]; 6638185029Spjd int svdcount = 0; 6639185029Spjd int children = rvd->vdev_children; 6640185029Spjd int c0 = spa_get_random(children); 6641185029Spjd 6642219089Spjd for (int c = 0; c < children; c++) { 6643185029Spjd vd = rvd->vdev_child[(c0 + c) % children]; 6644185029Spjd if (vd->vdev_ms_array == 0 || vd->vdev_islog) 6645185029Spjd continue; 6646185029Spjd svd[svdcount++] = vd; 6647185029Spjd if (svdcount == SPA_DVAS_PER_BP) 6648185029Spjd break; 6649185029Spjd } 6650213198Smm error = vdev_config_sync(svd, svdcount, txg, B_FALSE); 6651213198Smm if (error != 0) 6652213198Smm error = vdev_config_sync(svd, svdcount, txg, 6653213198Smm B_TRUE); 6654185029Spjd } else { 6655185029Spjd error = vdev_config_sync(rvd->vdev_child, 6656213198Smm rvd->vdev_children, txg, B_FALSE); 6657213198Smm if (error != 0) 6658213198Smm error = vdev_config_sync(rvd->vdev_child, 6659213198Smm rvd->vdev_children, txg, B_TRUE); 6660168404Spjd } 6661185029Spjd 6662239620Smm if (error == 0) 6663239620Smm spa->spa_last_synced_guid = rvd->vdev_guid; 6664239620Smm 6665185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 6666185029Spjd 6667185029Spjd if (error == 0) 6668185029Spjd break; 6669185029Spjd zio_suspend(spa, NULL); 6670185029Spjd zio_resume_wait(spa); 6671168404Spjd } 6672168404Spjd dmu_tx_commit(tx); 6673168404Spjd 6674247265Smm#ifdef illumos 6675247265Smm VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6676247265Smm#else /* FreeBSD */ 6677247265Smm#ifdef _KERNEL 6678247265Smm callout_drain(&spa->spa_deadman_cycid); 6679247265Smm#endif 6680247265Smm#endif 6681247265Smm 6682168404Spjd /* 6683168404Spjd * Clear the dirty config list. 6684168404Spjd */ 6685185029Spjd while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 6686168404Spjd vdev_config_clean(vd); 6687168404Spjd 6688168404Spjd /* 6689168404Spjd * Now that the new config has synced transactionally, 6690168404Spjd * let it become visible to the config cache. 6691168404Spjd */ 6692168404Spjd if (spa->spa_config_syncing != NULL) { 6693168404Spjd spa_config_set(spa, spa->spa_config_syncing); 6694168404Spjd spa->spa_config_txg = txg; 6695168404Spjd spa->spa_config_syncing = NULL; 6696168404Spjd } 6697168404Spjd 6698168404Spjd spa->spa_ubsync = spa->spa_uberblock; 6699168404Spjd 6700219089Spjd dsl_pool_sync_done(dp, txg); 6701168404Spjd 6702168404Spjd /* 6703168404Spjd * Update usable space statistics. 6704168404Spjd */ 6705168404Spjd while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6706168404Spjd vdev_sync_done(vd, txg); 6707168404Spjd 6708219089Spjd spa_update_dspace(spa); 6709219089Spjd 6710168404Spjd /* 6711168404Spjd * It had better be the case that we didn't dirty anything 6712168404Spjd * since vdev_config_sync(). 6713168404Spjd */ 6714168404Spjd ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6715168404Spjd ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6716168404Spjd ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6717168404Spjd 6718219089Spjd spa->spa_sync_pass = 0; 6719219089Spjd 6720185029Spjd spa_config_exit(spa, SCL_CONFIG, FTAG); 6721168404Spjd 6722219089Spjd spa_handle_ignored_writes(spa); 6723219089Spjd 6724168404Spjd /* 6725168404Spjd * If any async tasks have been requested, kick them off. 6726168404Spjd */ 6727168404Spjd spa_async_dispatch(spa); 6728253990Smav spa_async_dispatch_vd(spa); 6729168404Spjd} 6730168404Spjd 6731168404Spjd/* 6732168404Spjd * Sync all pools. We don't want to hold the namespace lock across these 6733168404Spjd * operations, so we take a reference on the spa_t and drop the lock during the 6734168404Spjd * sync. 6735168404Spjd */ 6736168404Spjdvoid 6737168404Spjdspa_sync_allpools(void) 6738168404Spjd{ 6739168404Spjd spa_t *spa = NULL; 6740168404Spjd mutex_enter(&spa_namespace_lock); 6741168404Spjd while ((spa = spa_next(spa)) != NULL) { 6742219089Spjd if (spa_state(spa) != POOL_STATE_ACTIVE || 6743219089Spjd !spa_writeable(spa) || spa_suspended(spa)) 6744168404Spjd continue; 6745168404Spjd spa_open_ref(spa, FTAG); 6746168404Spjd mutex_exit(&spa_namespace_lock); 6747168404Spjd txg_wait_synced(spa_get_dsl(spa), 0); 6748168404Spjd mutex_enter(&spa_namespace_lock); 6749168404Spjd spa_close(spa, FTAG); 6750168404Spjd } 6751168404Spjd mutex_exit(&spa_namespace_lock); 6752168404Spjd} 6753168404Spjd 6754168404Spjd/* 6755168404Spjd * ========================================================================== 6756168404Spjd * Miscellaneous routines 6757168404Spjd * ========================================================================== 6758168404Spjd */ 6759168404Spjd 6760168404Spjd/* 6761168404Spjd * Remove all pools in the system. 6762168404Spjd */ 6763168404Spjdvoid 6764168404Spjdspa_evict_all(void) 6765168404Spjd{ 6766168404Spjd spa_t *spa; 6767168404Spjd 6768168404Spjd /* 6769168404Spjd * Remove all cached state. All pools should be closed now, 6770168404Spjd * so every spa in the AVL tree should be unreferenced. 6771168404Spjd */ 6772168404Spjd mutex_enter(&spa_namespace_lock); 6773168404Spjd while ((spa = spa_next(NULL)) != NULL) { 6774168404Spjd /* 6775168404Spjd * Stop async tasks. The async thread may need to detach 6776168404Spjd * a device that's been replaced, which requires grabbing 6777168404Spjd * spa_namespace_lock, so we must drop it here. 6778168404Spjd */ 6779168404Spjd spa_open_ref(spa, FTAG); 6780168404Spjd mutex_exit(&spa_namespace_lock); 6781168404Spjd spa_async_suspend(spa); 6782168404Spjd mutex_enter(&spa_namespace_lock); 6783168404Spjd spa_close(spa, FTAG); 6784168404Spjd 6785168404Spjd if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6786168404Spjd spa_unload(spa); 6787168404Spjd spa_deactivate(spa); 6788168404Spjd } 6789168404Spjd spa_remove(spa); 6790168404Spjd } 6791168404Spjd mutex_exit(&spa_namespace_lock); 6792168404Spjd} 6793168404Spjd 6794168404Spjdvdev_t * 6795209962Smmspa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6796168404Spjd{ 6797185029Spjd vdev_t *vd; 6798185029Spjd int i; 6799185029Spjd 6800185029Spjd if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6801185029Spjd return (vd); 6802185029Spjd 6803209962Smm if (aux) { 6804185029Spjd for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6805185029Spjd vd = spa->spa_l2cache.sav_vdevs[i]; 6806185029Spjd if (vd->vdev_guid == guid) 6807185029Spjd return (vd); 6808185029Spjd } 6809209962Smm 6810209962Smm for (i = 0; i < spa->spa_spares.sav_count; i++) { 6811209962Smm vd = spa->spa_spares.sav_vdevs[i]; 6812209962Smm if (vd->vdev_guid == guid) 6813209962Smm return (vd); 6814209962Smm } 6815185029Spjd } 6816185029Spjd 6817185029Spjd return (NULL); 6818168404Spjd} 6819168404Spjd 6820168404Spjdvoid 6821185029Spjdspa_upgrade(spa_t *spa, uint64_t version) 6822168404Spjd{ 6823219089Spjd ASSERT(spa_writeable(spa)); 6824219089Spjd 6825185029Spjd spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6826168404Spjd 6827168404Spjd /* 6828168404Spjd * This should only be called for a non-faulted pool, and since a 6829168404Spjd * future version would result in an unopenable pool, this shouldn't be 6830168404Spjd * possible. 6831168404Spjd */ 6832247592Sdelphij ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 6833268075Sdelphij ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 6834168404Spjd 6835185029Spjd spa->spa_uberblock.ub_version = version; 6836168404Spjd vdev_config_dirty(spa->spa_root_vdev); 6837168404Spjd 6838185029Spjd spa_config_exit(spa, SCL_ALL, FTAG); 6839168404Spjd 6840168404Spjd txg_wait_synced(spa_get_dsl(spa), 0); 6841168404Spjd} 6842168404Spjd 6843168404Spjdboolean_t 6844168404Spjdspa_has_spare(spa_t *spa, uint64_t guid) 6845168404Spjd{ 6846168404Spjd int i; 6847168404Spjd uint64_t spareguid; 6848185029Spjd spa_aux_vdev_t *sav = &spa->spa_spares; 6849168404Spjd 6850185029Spjd for (i = 0; i < sav->sav_count; i++) 6851185029Spjd if (sav->sav_vdevs[i]->vdev_guid == guid) 6852168404Spjd return (B_TRUE); 6853168404Spjd 6854185029Spjd for (i = 0; i < sav->sav_npending; i++) { 6855185029Spjd if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6856185029Spjd &spareguid) == 0 && spareguid == guid) 6857168404Spjd return (B_TRUE); 6858168404Spjd } 6859168404Spjd 6860168404Spjd return (B_FALSE); 6861168404Spjd} 6862168404Spjd 6863185029Spjd/* 6864185029Spjd * Check if a pool has an active shared spare device. 6865185029Spjd * Note: reference count of an active spare is 2, as a spare and as a replace 6866185029Spjd */ 6867185029Spjdstatic boolean_t 6868185029Spjdspa_has_active_shared_spare(spa_t *spa) 6869168404Spjd{ 6870185029Spjd int i, refcnt; 6871185029Spjd uint64_t pool; 6872185029Spjd spa_aux_vdev_t *sav = &spa->spa_spares; 6873185029Spjd 6874185029Spjd for (i = 0; i < sav->sav_count; i++) { 6875185029Spjd if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 6876185029Spjd &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 6877185029Spjd refcnt > 2) 6878185029Spjd return (B_TRUE); 6879185029Spjd } 6880185029Spjd 6881185029Spjd return (B_FALSE); 6882168404Spjd} 6883168404Spjd 6884185029Spjd/* 6885185029Spjd * Post a sysevent corresponding to the given event. The 'name' must be one of 6886185029Spjd * the event definitions in sys/sysevent/eventdefs.h. The payload will be 6887185029Spjd * filled in from the spa and (optionally) the vdev. This doesn't do anything 6888185029Spjd * in the userland libzpool, as we don't want consumers to misinterpret ztest 6889185029Spjd * or zdb as real changes. 6890185029Spjd */ 6891185029Spjdvoid 6892185029Spjdspa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 6893168404Spjd{ 6894185029Spjd#ifdef _KERNEL 6895185029Spjd sysevent_t *ev; 6896185029Spjd sysevent_attr_list_t *attr = NULL; 6897185029Spjd sysevent_value_t value; 6898185029Spjd sysevent_id_t eid; 6899168404Spjd 6900185029Spjd ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 6901185029Spjd SE_SLEEP); 6902168404Spjd 6903185029Spjd value.value_type = SE_DATA_TYPE_STRING; 6904185029Spjd value.value.sv_string = spa_name(spa); 6905185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 6906185029Spjd goto done; 6907168404Spjd 6908185029Spjd value.value_type = SE_DATA_TYPE_UINT64; 6909185029Spjd value.value.sv_uint64 = spa_guid(spa); 6910185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 6911185029Spjd goto done; 6912168404Spjd 6913185029Spjd if (vd) { 6914185029Spjd value.value_type = SE_DATA_TYPE_UINT64; 6915185029Spjd value.value.sv_uint64 = vd->vdev_guid; 6916185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 6917185029Spjd SE_SLEEP) != 0) 6918185029Spjd goto done; 6919168404Spjd 6920185029Spjd if (vd->vdev_path) { 6921185029Spjd value.value_type = SE_DATA_TYPE_STRING; 6922185029Spjd value.value.sv_string = vd->vdev_path; 6923185029Spjd if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 6924185029Spjd &value, SE_SLEEP) != 0) 6925185029Spjd goto done; 6926168404Spjd } 6927168404Spjd } 6928168404Spjd 6929185029Spjd if (sysevent_attach_attributes(ev, attr) != 0) 6930185029Spjd goto done; 6931185029Spjd attr = NULL; 6932168404Spjd 6933185029Spjd (void) log_sysevent(ev, SE_SLEEP, &eid); 6934185029Spjd 6935185029Spjddone: 6936185029Spjd if (attr) 6937185029Spjd sysevent_free_attr(attr); 6938185029Spjd sysevent_free(ev); 6939185029Spjd#endif 6940168404Spjd} 6941