Deleted Added
full compact
spa.c (321610) spa.c (323746)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright 2013 Saso Kiselkov. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 */
31
32/*
33 * SPA: Storage Pool Allocator
34 *
35 * This file contains all the routines used when modifying on-disk SPA state.
36 * This includes opening, importing, destroying, exporting a pool, and syncing a
37 * pool.
38 */
39
40#include <sys/zfs_context.h>
41#include <sys/fm/fs/zfs.h>
42#include <sys/spa_impl.h>
43#include <sys/zio.h>
44#include <sys/zio_checksum.h>
45#include <sys/dmu.h>
46#include <sys/dmu_tx.h>
47#include <sys/zap.h>
48#include <sys/zil.h>
49#include <sys/ddt.h>
50#include <sys/vdev_impl.h>
51#include <sys/metaslab.h>
52#include <sys/metaslab_impl.h>
53#include <sys/uberblock_impl.h>
54#include <sys/txg.h>
55#include <sys/avl.h>
56#include <sys/dmu_traverse.h>
57#include <sys/dmu_objset.h>
58#include <sys/unique.h>
59#include <sys/dsl_pool.h>
60#include <sys/dsl_dataset.h>
61#include <sys/dsl_dir.h>
62#include <sys/dsl_prop.h>
63#include <sys/dsl_synctask.h>
64#include <sys/fs/zfs.h>
65#include <sys/arc.h>
66#include <sys/callb.h>
67#include <sys/spa_boot.h>
68#include <sys/zfs_ioctl.h>
69#include <sys/dsl_scan.h>
70#include <sys/dmu_send.h>
71#include <sys/dsl_destroy.h>
72#include <sys/dsl_userhold.h>
73#include <sys/zfeature.h>
74#include <sys/zvol.h>
75#include <sys/trim_map.h>
76#include <sys/abd.h>
77
78#ifdef _KERNEL
79#include <sys/callb.h>
80#include <sys/cpupart.h>
81#include <sys/zone.h>
82#endif /* _KERNEL */
83
84#include "zfs_prop.h"
85#include "zfs_comutil.h"
86
87/* Check hostid on import? */
88static int check_hostid = 1;
89
90/*
91 * The interval, in seconds, at which failed configuration cache file writes
92 * should be retried.
93 */
94static int zfs_ccw_retry_interval = 300;
95
96SYSCTL_DECL(_vfs_zfs);
97SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0,
98 "Check hostid on import?");
99TUNABLE_INT("vfs.zfs.ccw_retry_interval", &zfs_ccw_retry_interval);
100SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RW,
101 &zfs_ccw_retry_interval, 0,
102 "Configuration cache file write, retry after failure, interval (seconds)");
103
104typedef enum zti_modes {
105 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
106 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
107 ZTI_MODE_NULL, /* don't create a taskq */
108 ZTI_NMODES
109} zti_modes_t;
110
111#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
112#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
113#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
114
115#define ZTI_N(n) ZTI_P(n, 1)
116#define ZTI_ONE ZTI_N(1)
117
118typedef struct zio_taskq_info {
119 zti_modes_t zti_mode;
120 uint_t zti_value;
121 uint_t zti_count;
122} zio_taskq_info_t;
123
124static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
125 "issue", "issue_high", "intr", "intr_high"
126};
127
128/*
129 * This table defines the taskq settings for each ZFS I/O type. When
130 * initializing a pool, we use this table to create an appropriately sized
131 * taskq. Some operations are low volume and therefore have a small, static
132 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
133 * macros. Other operations process a large amount of data; the ZTI_BATCH
134 * macro causes us to create a taskq oriented for throughput. Some operations
135 * are so high frequency and short-lived that the taskq itself can become a a
136 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
137 * additional degree of parallelism specified by the number of threads per-
138 * taskq and the number of taskqs; when dispatching an event in this case, the
139 * particular taskq is chosen at random.
140 *
141 * The different taskq priorities are to handle the different contexts (issue
142 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
143 * need to be handled with minimum delay.
144 */
145const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
146 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
147 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
148 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
149 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */
150 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
151 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
152 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
153};
154
155static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, const char *name);
156static void spa_event_post(sysevent_t *ev);
157static void spa_sync_version(void *arg, dmu_tx_t *tx);
158static void spa_sync_props(void *arg, dmu_tx_t *tx);
159static boolean_t spa_has_active_shared_spare(spa_t *spa);
160static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
161 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
162 char **ereport);
163static void spa_vdev_resilver_done(spa_t *spa);
164
165uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
166#ifdef PSRSET_BIND
167id_t zio_taskq_psrset_bind = PS_NONE;
168#endif
169#ifdef SYSDC
170boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
171uint_t zio_taskq_basedc = 80; /* base duty cycle */
172#endif
173
174boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
175extern int zfs_sync_pass_deferred_free;
176
177/*
178 * This (illegal) pool name is used when temporarily importing a spa_t in order
179 * to get the vdev stats associated with the imported devices.
180 */
181#define TRYIMPORT_NAME "$import"
182
183/*
184 * ==========================================================================
185 * SPA properties routines
186 * ==========================================================================
187 */
188
189/*
190 * Add a (source=src, propname=propval) list to an nvlist.
191 */
192static void
193spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
194 uint64_t intval, zprop_source_t src)
195{
196 const char *propname = zpool_prop_to_name(prop);
197 nvlist_t *propval;
198
199 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
200 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
201
202 if (strval != NULL)
203 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
204 else
205 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
206
207 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
208 nvlist_free(propval);
209}
210
211/*
212 * Get property values from the spa configuration.
213 */
214static void
215spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
216{
217 vdev_t *rvd = spa->spa_root_vdev;
218 dsl_pool_t *pool = spa->spa_dsl_pool;
219 uint64_t size, alloc, cap, version;
220 zprop_source_t src = ZPROP_SRC_NONE;
221 spa_config_dirent_t *dp;
222 metaslab_class_t *mc = spa_normal_class(spa);
223
224 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
225
226 if (rvd != NULL) {
227 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
228 size = metaslab_class_get_space(spa_normal_class(spa));
229 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
230 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
231 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
232 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
233 size - alloc, src);
234
235 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
236 metaslab_class_fragmentation(mc), src);
237 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
238 metaslab_class_expandable_space(mc), src);
239 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
240 (spa_mode(spa) == FREAD), src);
241
242 cap = (size == 0) ? 0 : (alloc * 100 / size);
243 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
244
245 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
246 ddt_get_pool_dedup_ratio(spa), src);
247
248 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
249 rvd->vdev_state, src);
250
251 version = spa_version(spa);
252 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
253 src = ZPROP_SRC_DEFAULT;
254 else
255 src = ZPROP_SRC_LOCAL;
256 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
257 }
258
259 if (pool != NULL) {
260 /*
261 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
262 * when opening pools before this version freedir will be NULL.
263 */
264 if (pool->dp_free_dir != NULL) {
265 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
266 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
267 src);
268 } else {
269 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
270 NULL, 0, src);
271 }
272
273 if (pool->dp_leak_dir != NULL) {
274 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
275 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
276 src);
277 } else {
278 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
279 NULL, 0, src);
280 }
281 }
282
283 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
284
285 if (spa->spa_comment != NULL) {
286 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
287 0, ZPROP_SRC_LOCAL);
288 }
289
290 if (spa->spa_root != NULL)
291 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
292 0, ZPROP_SRC_LOCAL);
293
294 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
295 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
296 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
297 } else {
298 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
299 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
300 }
301
302 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
303 if (dp->scd_path == NULL) {
304 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
305 "none", 0, ZPROP_SRC_LOCAL);
306 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
307 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
308 dp->scd_path, 0, ZPROP_SRC_LOCAL);
309 }
310 }
311}
312
313/*
314 * Get zpool property values.
315 */
316int
317spa_prop_get(spa_t *spa, nvlist_t **nvp)
318{
319 objset_t *mos = spa->spa_meta_objset;
320 zap_cursor_t zc;
321 zap_attribute_t za;
322 int err;
323
324 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
325
326 mutex_enter(&spa->spa_props_lock);
327
328 /*
329 * Get properties from the spa config.
330 */
331 spa_prop_get_config(spa, nvp);
332
333 /* If no pool property object, no more prop to get. */
334 if (mos == NULL || spa->spa_pool_props_object == 0) {
335 mutex_exit(&spa->spa_props_lock);
336 return (0);
337 }
338
339 /*
340 * Get properties from the MOS pool property object.
341 */
342 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
343 (err = zap_cursor_retrieve(&zc, &za)) == 0;
344 zap_cursor_advance(&zc)) {
345 uint64_t intval = 0;
346 char *strval = NULL;
347 zprop_source_t src = ZPROP_SRC_DEFAULT;
348 zpool_prop_t prop;
349
350 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
351 continue;
352
353 switch (za.za_integer_length) {
354 case 8:
355 /* integer property */
356 if (za.za_first_integer !=
357 zpool_prop_default_numeric(prop))
358 src = ZPROP_SRC_LOCAL;
359
360 if (prop == ZPOOL_PROP_BOOTFS) {
361 dsl_pool_t *dp;
362 dsl_dataset_t *ds = NULL;
363
364 dp = spa_get_dsl(spa);
365 dsl_pool_config_enter(dp, FTAG);
366 if (err = dsl_dataset_hold_obj(dp,
367 za.za_first_integer, FTAG, &ds)) {
368 dsl_pool_config_exit(dp, FTAG);
369 break;
370 }
371
372 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
373 KM_SLEEP);
374 dsl_dataset_name(ds, strval);
375 dsl_dataset_rele(ds, FTAG);
376 dsl_pool_config_exit(dp, FTAG);
377 } else {
378 strval = NULL;
379 intval = za.za_first_integer;
380 }
381
382 spa_prop_add_list(*nvp, prop, strval, intval, src);
383
384 if (strval != NULL)
385 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
386
387 break;
388
389 case 1:
390 /* string property */
391 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
392 err = zap_lookup(mos, spa->spa_pool_props_object,
393 za.za_name, 1, za.za_num_integers, strval);
394 if (err) {
395 kmem_free(strval, za.za_num_integers);
396 break;
397 }
398 spa_prop_add_list(*nvp, prop, strval, 0, src);
399 kmem_free(strval, za.za_num_integers);
400 break;
401
402 default:
403 break;
404 }
405 }
406 zap_cursor_fini(&zc);
407 mutex_exit(&spa->spa_props_lock);
408out:
409 if (err && err != ENOENT) {
410 nvlist_free(*nvp);
411 *nvp = NULL;
412 return (err);
413 }
414
415 return (0);
416}
417
418/*
419 * Validate the given pool properties nvlist and modify the list
420 * for the property values to be set.
421 */
422static int
423spa_prop_validate(spa_t *spa, nvlist_t *props)
424{
425 nvpair_t *elem;
426 int error = 0, reset_bootfs = 0;
427 uint64_t objnum = 0;
428 boolean_t has_feature = B_FALSE;
429
430 elem = NULL;
431 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
432 uint64_t intval;
433 char *strval, *slash, *check, *fname;
434 const char *propname = nvpair_name(elem);
435 zpool_prop_t prop = zpool_name_to_prop(propname);
436
437 switch (prop) {
438 case ZPROP_INVAL:
439 if (!zpool_prop_feature(propname)) {
440 error = SET_ERROR(EINVAL);
441 break;
442 }
443
444 /*
445 * Sanitize the input.
446 */
447 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
448 error = SET_ERROR(EINVAL);
449 break;
450 }
451
452 if (nvpair_value_uint64(elem, &intval) != 0) {
453 error = SET_ERROR(EINVAL);
454 break;
455 }
456
457 if (intval != 0) {
458 error = SET_ERROR(EINVAL);
459 break;
460 }
461
462 fname = strchr(propname, '@') + 1;
463 if (zfeature_lookup_name(fname, NULL) != 0) {
464 error = SET_ERROR(EINVAL);
465 break;
466 }
467
468 has_feature = B_TRUE;
469 break;
470
471 case ZPOOL_PROP_VERSION:
472 error = nvpair_value_uint64(elem, &intval);
473 if (!error &&
474 (intval < spa_version(spa) ||
475 intval > SPA_VERSION_BEFORE_FEATURES ||
476 has_feature))
477 error = SET_ERROR(EINVAL);
478 break;
479
480 case ZPOOL_PROP_DELEGATION:
481 case ZPOOL_PROP_AUTOREPLACE:
482 case ZPOOL_PROP_LISTSNAPS:
483 case ZPOOL_PROP_AUTOEXPAND:
484 error = nvpair_value_uint64(elem, &intval);
485 if (!error && intval > 1)
486 error = SET_ERROR(EINVAL);
487 break;
488
489 case ZPOOL_PROP_BOOTFS:
490 /*
491 * If the pool version is less than SPA_VERSION_BOOTFS,
492 * or the pool is still being created (version == 0),
493 * the bootfs property cannot be set.
494 */
495 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
496 error = SET_ERROR(ENOTSUP);
497 break;
498 }
499
500 /*
501 * Make sure the vdev config is bootable
502 */
503 if (!vdev_is_bootable(spa->spa_root_vdev)) {
504 error = SET_ERROR(ENOTSUP);
505 break;
506 }
507
508 reset_bootfs = 1;
509
510 error = nvpair_value_string(elem, &strval);
511
512 if (!error) {
513 objset_t *os;
514 uint64_t propval;
515
516 if (strval == NULL || strval[0] == '\0') {
517 objnum = zpool_prop_default_numeric(
518 ZPOOL_PROP_BOOTFS);
519 break;
520 }
521
522 if (error = dmu_objset_hold(strval, FTAG, &os))
523 break;
524
525 /*
526 * Must be ZPL, and its property settings
527 * must be supported by GRUB (compression
528 * is not gzip, and large blocks are not used).
529 */
530
531 if (dmu_objset_type(os) != DMU_OST_ZFS) {
532 error = SET_ERROR(ENOTSUP);
533 } else if ((error =
534 dsl_prop_get_int_ds(dmu_objset_ds(os),
535 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
536 &propval)) == 0 &&
537 !BOOTFS_COMPRESS_VALID(propval)) {
538 error = SET_ERROR(ENOTSUP);
539 } else if ((error =
540 dsl_prop_get_int_ds(dmu_objset_ds(os),
541 zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
542 &propval)) == 0 &&
543 propval > SPA_OLD_MAXBLOCKSIZE) {
544 error = SET_ERROR(ENOTSUP);
545 } else {
546 objnum = dmu_objset_id(os);
547 }
548 dmu_objset_rele(os, FTAG);
549 }
550 break;
551
552 case ZPOOL_PROP_FAILUREMODE:
553 error = nvpair_value_uint64(elem, &intval);
554 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
555 intval > ZIO_FAILURE_MODE_PANIC))
556 error = SET_ERROR(EINVAL);
557
558 /*
559 * This is a special case which only occurs when
560 * the pool has completely failed. This allows
561 * the user to change the in-core failmode property
562 * without syncing it out to disk (I/Os might
563 * currently be blocked). We do this by returning
564 * EIO to the caller (spa_prop_set) to trick it
565 * into thinking we encountered a property validation
566 * error.
567 */
568 if (!error && spa_suspended(spa)) {
569 spa->spa_failmode = intval;
570 error = SET_ERROR(EIO);
571 }
572 break;
573
574 case ZPOOL_PROP_CACHEFILE:
575 if ((error = nvpair_value_string(elem, &strval)) != 0)
576 break;
577
578 if (strval[0] == '\0')
579 break;
580
581 if (strcmp(strval, "none") == 0)
582 break;
583
584 if (strval[0] != '/') {
585 error = SET_ERROR(EINVAL);
586 break;
587 }
588
589 slash = strrchr(strval, '/');
590 ASSERT(slash != NULL);
591
592 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
593 strcmp(slash, "/..") == 0)
594 error = SET_ERROR(EINVAL);
595 break;
596
597 case ZPOOL_PROP_COMMENT:
598 if ((error = nvpair_value_string(elem, &strval)) != 0)
599 break;
600 for (check = strval; *check != '\0'; check++) {
601 /*
602 * The kernel doesn't have an easy isprint()
603 * check. For this kernel check, we merely
604 * check ASCII apart from DEL. Fix this if
605 * there is an easy-to-use kernel isprint().
606 */
607 if (*check >= 0x7f) {
608 error = SET_ERROR(EINVAL);
609 break;
610 }
611 }
612 if (strlen(strval) > ZPROP_MAX_COMMENT)
613 error = E2BIG;
614 break;
615
616 case ZPOOL_PROP_DEDUPDITTO:
617 if (spa_version(spa) < SPA_VERSION_DEDUP)
618 error = SET_ERROR(ENOTSUP);
619 else
620 error = nvpair_value_uint64(elem, &intval);
621 if (error == 0 &&
622 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
623 error = SET_ERROR(EINVAL);
624 break;
625 }
626
627 if (error)
628 break;
629 }
630
631 if (!error && reset_bootfs) {
632 error = nvlist_remove(props,
633 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
634
635 if (!error) {
636 error = nvlist_add_uint64(props,
637 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
638 }
639 }
640
641 return (error);
642}
643
644void
645spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
646{
647 char *cachefile;
648 spa_config_dirent_t *dp;
649
650 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
651 &cachefile) != 0)
652 return;
653
654 dp = kmem_alloc(sizeof (spa_config_dirent_t),
655 KM_SLEEP);
656
657 if (cachefile[0] == '\0')
658 dp->scd_path = spa_strdup(spa_config_path);
659 else if (strcmp(cachefile, "none") == 0)
660 dp->scd_path = NULL;
661 else
662 dp->scd_path = spa_strdup(cachefile);
663
664 list_insert_head(&spa->spa_config_list, dp);
665 if (need_sync)
666 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
667}
668
669int
670spa_prop_set(spa_t *spa, nvlist_t *nvp)
671{
672 int error;
673 nvpair_t *elem = NULL;
674 boolean_t need_sync = B_FALSE;
675
676 if ((error = spa_prop_validate(spa, nvp)) != 0)
677 return (error);
678
679 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
680 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
681
682 if (prop == ZPOOL_PROP_CACHEFILE ||
683 prop == ZPOOL_PROP_ALTROOT ||
684 prop == ZPOOL_PROP_READONLY)
685 continue;
686
687 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
688 uint64_t ver;
689
690 if (prop == ZPOOL_PROP_VERSION) {
691 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
692 } else {
693 ASSERT(zpool_prop_feature(nvpair_name(elem)));
694 ver = SPA_VERSION_FEATURES;
695 need_sync = B_TRUE;
696 }
697
698 /* Save time if the version is already set. */
699 if (ver == spa_version(spa))
700 continue;
701
702 /*
703 * In addition to the pool directory object, we might
704 * create the pool properties object, the features for
705 * read object, the features for write object, or the
706 * feature descriptions object.
707 */
708 error = dsl_sync_task(spa->spa_name, NULL,
709 spa_sync_version, &ver,
710 6, ZFS_SPACE_CHECK_RESERVED);
711 if (error)
712 return (error);
713 continue;
714 }
715
716 need_sync = B_TRUE;
717 break;
718 }
719
720 if (need_sync) {
721 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
722 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
723 }
724
725 return (0);
726}
727
728/*
729 * If the bootfs property value is dsobj, clear it.
730 */
731void
732spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
733{
734 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
735 VERIFY(zap_remove(spa->spa_meta_objset,
736 spa->spa_pool_props_object,
737 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
738 spa->spa_bootfs = 0;
739 }
740}
741
742/*ARGSUSED*/
743static int
744spa_change_guid_check(void *arg, dmu_tx_t *tx)
745{
746 uint64_t *newguid = arg;
747 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
748 vdev_t *rvd = spa->spa_root_vdev;
749 uint64_t vdev_state;
750
751 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
752 vdev_state = rvd->vdev_state;
753 spa_config_exit(spa, SCL_STATE, FTAG);
754
755 if (vdev_state != VDEV_STATE_HEALTHY)
756 return (SET_ERROR(ENXIO));
757
758 ASSERT3U(spa_guid(spa), !=, *newguid);
759
760 return (0);
761}
762
763static void
764spa_change_guid_sync(void *arg, dmu_tx_t *tx)
765{
766 uint64_t *newguid = arg;
767 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
768 uint64_t oldguid;
769 vdev_t *rvd = spa->spa_root_vdev;
770
771 oldguid = spa_guid(spa);
772
773 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
774 rvd->vdev_guid = *newguid;
775 rvd->vdev_guid_sum += (*newguid - oldguid);
776 vdev_config_dirty(rvd);
777 spa_config_exit(spa, SCL_STATE, FTAG);
778
779 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
780 oldguid, *newguid);
781}
782
783/*
784 * Change the GUID for the pool. This is done so that we can later
785 * re-import a pool built from a clone of our own vdevs. We will modify
786 * the root vdev's guid, our own pool guid, and then mark all of our
787 * vdevs dirty. Note that we must make sure that all our vdevs are
788 * online when we do this, or else any vdevs that weren't present
789 * would be orphaned from our pool. We are also going to issue a
790 * sysevent to update any watchers.
791 */
792int
793spa_change_guid(spa_t *spa)
794{
795 int error;
796 uint64_t guid;
797
798 mutex_enter(&spa->spa_vdev_top_lock);
799 mutex_enter(&spa_namespace_lock);
800 guid = spa_generate_guid(NULL);
801
802 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
803 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
804
805 if (error == 0) {
806 spa_config_sync(spa, B_FALSE, B_TRUE);
807 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
808 }
809
810 mutex_exit(&spa_namespace_lock);
811 mutex_exit(&spa->spa_vdev_top_lock);
812
813 return (error);
814}
815
816/*
817 * ==========================================================================
818 * SPA state manipulation (open/create/destroy/import/export)
819 * ==========================================================================
820 */
821
822static int
823spa_error_entry_compare(const void *a, const void *b)
824{
825 spa_error_entry_t *sa = (spa_error_entry_t *)a;
826 spa_error_entry_t *sb = (spa_error_entry_t *)b;
827 int ret;
828
829 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
830 sizeof (zbookmark_phys_t));
831
832 if (ret < 0)
833 return (-1);
834 else if (ret > 0)
835 return (1);
836 else
837 return (0);
838}
839
840/*
841 * Utility function which retrieves copies of the current logs and
842 * re-initializes them in the process.
843 */
844void
845spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
846{
847 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
848
849 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
850 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
851
852 avl_create(&spa->spa_errlist_scrub,
853 spa_error_entry_compare, sizeof (spa_error_entry_t),
854 offsetof(spa_error_entry_t, se_avl));
855 avl_create(&spa->spa_errlist_last,
856 spa_error_entry_compare, sizeof (spa_error_entry_t),
857 offsetof(spa_error_entry_t, se_avl));
858}
859
860static void
861spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
862{
863 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
864 enum zti_modes mode = ztip->zti_mode;
865 uint_t value = ztip->zti_value;
866 uint_t count = ztip->zti_count;
867 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
868 char name[32];
869 uint_t flags = 0;
870 boolean_t batch = B_FALSE;
871
872 if (mode == ZTI_MODE_NULL) {
873 tqs->stqs_count = 0;
874 tqs->stqs_taskq = NULL;
875 return;
876 }
877
878 ASSERT3U(count, >, 0);
879
880 tqs->stqs_count = count;
881 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
882
883 switch (mode) {
884 case ZTI_MODE_FIXED:
885 ASSERT3U(value, >=, 1);
886 value = MAX(value, 1);
887 break;
888
889 case ZTI_MODE_BATCH:
890 batch = B_TRUE;
891 flags |= TASKQ_THREADS_CPU_PCT;
892 value = zio_taskq_batch_pct;
893 break;
894
895 default:
896 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
897 "spa_activate()",
898 zio_type_name[t], zio_taskq_types[q], mode, value);
899 break;
900 }
901
902 for (uint_t i = 0; i < count; i++) {
903 taskq_t *tq;
904
905 if (count > 1) {
906 (void) snprintf(name, sizeof (name), "%s_%s_%u",
907 zio_type_name[t], zio_taskq_types[q], i);
908 } else {
909 (void) snprintf(name, sizeof (name), "%s_%s",
910 zio_type_name[t], zio_taskq_types[q]);
911 }
912
913#ifdef SYSDC
914 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
915 if (batch)
916 flags |= TASKQ_DC_BATCH;
917
918 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
919 spa->spa_proc, zio_taskq_basedc, flags);
920 } else {
921#endif
922 pri_t pri = maxclsyspri;
923 /*
924 * The write issue taskq can be extremely CPU
925 * intensive. Run it at slightly lower priority
926 * than the other taskqs.
927 * FreeBSD notes:
928 * - numerically higher priorities are lower priorities;
929 * - if priorities divided by four (RQ_PPQ) are equal
930 * then a difference between them is insignificant.
931 */
932 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
933#ifdef illumos
934 pri--;
935#else
936 pri += 4;
937#endif
938
939 tq = taskq_create_proc(name, value, pri, 50,
940 INT_MAX, spa->spa_proc, flags);
941#ifdef SYSDC
942 }
943#endif
944
945 tqs->stqs_taskq[i] = tq;
946 }
947}
948
949static void
950spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
951{
952 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
953
954 if (tqs->stqs_taskq == NULL) {
955 ASSERT0(tqs->stqs_count);
956 return;
957 }
958
959 for (uint_t i = 0; i < tqs->stqs_count; i++) {
960 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
961 taskq_destroy(tqs->stqs_taskq[i]);
962 }
963
964 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
965 tqs->stqs_taskq = NULL;
966}
967
968/*
969 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
970 * Note that a type may have multiple discrete taskqs to avoid lock contention
971 * on the taskq itself. In that case we choose which taskq at random by using
972 * the low bits of gethrtime().
973 */
974void
975spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
976 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
977{
978 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
979 taskq_t *tq;
980
981 ASSERT3P(tqs->stqs_taskq, !=, NULL);
982 ASSERT3U(tqs->stqs_count, !=, 0);
983
984 if (tqs->stqs_count == 1) {
985 tq = tqs->stqs_taskq[0];
986 } else {
987#ifdef _KERNEL
988 tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count];
989#else
990 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
991#endif
992 }
993
994 taskq_dispatch_ent(tq, func, arg, flags, ent);
995}
996
997static void
998spa_create_zio_taskqs(spa_t *spa)
999{
1000 for (int t = 0; t < ZIO_TYPES; t++) {
1001 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1002 spa_taskqs_init(spa, t, q);
1003 }
1004 }
1005}
1006
1007#ifdef _KERNEL
1008#ifdef SPA_PROCESS
1009static void
1010spa_thread(void *arg)
1011{
1012 callb_cpr_t cprinfo;
1013
1014 spa_t *spa = arg;
1015 user_t *pu = PTOU(curproc);
1016
1017 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1018 spa->spa_name);
1019
1020 ASSERT(curproc != &p0);
1021 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1022 "zpool-%s", spa->spa_name);
1023 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1024
1025#ifdef PSRSET_BIND
1026 /* bind this thread to the requested psrset */
1027 if (zio_taskq_psrset_bind != PS_NONE) {
1028 pool_lock();
1029 mutex_enter(&cpu_lock);
1030 mutex_enter(&pidlock);
1031 mutex_enter(&curproc->p_lock);
1032
1033 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1034 0, NULL, NULL) == 0) {
1035 curthread->t_bind_pset = zio_taskq_psrset_bind;
1036 } else {
1037 cmn_err(CE_WARN,
1038 "Couldn't bind process for zfs pool \"%s\" to "
1039 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1040 }
1041
1042 mutex_exit(&curproc->p_lock);
1043 mutex_exit(&pidlock);
1044 mutex_exit(&cpu_lock);
1045 pool_unlock();
1046 }
1047#endif
1048
1049#ifdef SYSDC
1050 if (zio_taskq_sysdc) {
1051 sysdc_thread_enter(curthread, 100, 0);
1052 }
1053#endif
1054
1055 spa->spa_proc = curproc;
1056 spa->spa_did = curthread->t_did;
1057
1058 spa_create_zio_taskqs(spa);
1059
1060 mutex_enter(&spa->spa_proc_lock);
1061 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1062
1063 spa->spa_proc_state = SPA_PROC_ACTIVE;
1064 cv_broadcast(&spa->spa_proc_cv);
1065
1066 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1067 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1068 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1069 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1070
1071 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1072 spa->spa_proc_state = SPA_PROC_GONE;
1073 spa->spa_proc = &p0;
1074 cv_broadcast(&spa->spa_proc_cv);
1075 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1076
1077 mutex_enter(&curproc->p_lock);
1078 lwp_exit();
1079}
1080#endif /* SPA_PROCESS */
1081#endif
1082
1083/*
1084 * Activate an uninitialized pool.
1085 */
1086static void
1087spa_activate(spa_t *spa, int mode)
1088{
1089 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1090
1091 spa->spa_state = POOL_STATE_ACTIVE;
1092 spa->spa_mode = mode;
1093
1094 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1095 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1096
1097 /* Try to create a covering process */
1098 mutex_enter(&spa->spa_proc_lock);
1099 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1100 ASSERT(spa->spa_proc == &p0);
1101 spa->spa_did = 0;
1102
1103#ifdef SPA_PROCESS
1104 /* Only create a process if we're going to be around a while. */
1105 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1106 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1107 NULL, 0) == 0) {
1108 spa->spa_proc_state = SPA_PROC_CREATED;
1109 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1110 cv_wait(&spa->spa_proc_cv,
1111 &spa->spa_proc_lock);
1112 }
1113 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1114 ASSERT(spa->spa_proc != &p0);
1115 ASSERT(spa->spa_did != 0);
1116 } else {
1117#ifdef _KERNEL
1118 cmn_err(CE_WARN,
1119 "Couldn't create process for zfs pool \"%s\"\n",
1120 spa->spa_name);
1121#endif
1122 }
1123 }
1124#endif /* SPA_PROCESS */
1125 mutex_exit(&spa->spa_proc_lock);
1126
1127 /* If we didn't create a process, we need to create our taskqs. */
1128 ASSERT(spa->spa_proc == &p0);
1129 if (spa->spa_proc == &p0) {
1130 spa_create_zio_taskqs(spa);
1131 }
1132
1133 /*
1134 * Start TRIM thread.
1135 */
1136 trim_thread_create(spa);
1137
1138 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1139 offsetof(vdev_t, vdev_config_dirty_node));
1140 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1141 offsetof(objset_t, os_evicting_node));
1142 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1143 offsetof(vdev_t, vdev_state_dirty_node));
1144
1145 txg_list_create(&spa->spa_vdev_txg_list, spa,
1146 offsetof(struct vdev, vdev_txg_node));
1147
1148 avl_create(&spa->spa_errlist_scrub,
1149 spa_error_entry_compare, sizeof (spa_error_entry_t),
1150 offsetof(spa_error_entry_t, se_avl));
1151 avl_create(&spa->spa_errlist_last,
1152 spa_error_entry_compare, sizeof (spa_error_entry_t),
1153 offsetof(spa_error_entry_t, se_avl));
1154}
1155
1156/*
1157 * Opposite of spa_activate().
1158 */
1159static void
1160spa_deactivate(spa_t *spa)
1161{
1162 ASSERT(spa->spa_sync_on == B_FALSE);
1163 ASSERT(spa->spa_dsl_pool == NULL);
1164 ASSERT(spa->spa_root_vdev == NULL);
1165 ASSERT(spa->spa_async_zio_root == NULL);
1166 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1167
1168 /*
1169 * Stop TRIM thread in case spa_unload() wasn't called directly
1170 * before spa_deactivate().
1171 */
1172 trim_thread_destroy(spa);
1173
1174 spa_evicting_os_wait(spa);
1175
1176 txg_list_destroy(&spa->spa_vdev_txg_list);
1177
1178 list_destroy(&spa->spa_config_dirty_list);
1179 list_destroy(&spa->spa_evicting_os_list);
1180 list_destroy(&spa->spa_state_dirty_list);
1181
1182 for (int t = 0; t < ZIO_TYPES; t++) {
1183 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1184 spa_taskqs_fini(spa, t, q);
1185 }
1186 }
1187
1188 metaslab_class_destroy(spa->spa_normal_class);
1189 spa->spa_normal_class = NULL;
1190
1191 metaslab_class_destroy(spa->spa_log_class);
1192 spa->spa_log_class = NULL;
1193
1194 /*
1195 * If this was part of an import or the open otherwise failed, we may
1196 * still have errors left in the queues. Empty them just in case.
1197 */
1198 spa_errlog_drain(spa);
1199
1200 avl_destroy(&spa->spa_errlist_scrub);
1201 avl_destroy(&spa->spa_errlist_last);
1202
1203 spa->spa_state = POOL_STATE_UNINITIALIZED;
1204
1205 mutex_enter(&spa->spa_proc_lock);
1206 if (spa->spa_proc_state != SPA_PROC_NONE) {
1207 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1208 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1209 cv_broadcast(&spa->spa_proc_cv);
1210 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1211 ASSERT(spa->spa_proc != &p0);
1212 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1213 }
1214 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1215 spa->spa_proc_state = SPA_PROC_NONE;
1216 }
1217 ASSERT(spa->spa_proc == &p0);
1218 mutex_exit(&spa->spa_proc_lock);
1219
1220#ifdef SPA_PROCESS
1221 /*
1222 * We want to make sure spa_thread() has actually exited the ZFS
1223 * module, so that the module can't be unloaded out from underneath
1224 * it.
1225 */
1226 if (spa->spa_did != 0) {
1227 thread_join(spa->spa_did);
1228 spa->spa_did = 0;
1229 }
1230#endif /* SPA_PROCESS */
1231}
1232
1233/*
1234 * Verify a pool configuration, and construct the vdev tree appropriately. This
1235 * will create all the necessary vdevs in the appropriate layout, with each vdev
1236 * in the CLOSED state. This will prep the pool before open/creation/import.
1237 * All vdev validation is done by the vdev_alloc() routine.
1238 */
1239static int
1240spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1241 uint_t id, int atype)
1242{
1243 nvlist_t **child;
1244 uint_t children;
1245 int error;
1246
1247 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1248 return (error);
1249
1250 if ((*vdp)->vdev_ops->vdev_op_leaf)
1251 return (0);
1252
1253 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1254 &child, &children);
1255
1256 if (error == ENOENT)
1257 return (0);
1258
1259 if (error) {
1260 vdev_free(*vdp);
1261 *vdp = NULL;
1262 return (SET_ERROR(EINVAL));
1263 }
1264
1265 for (int c = 0; c < children; c++) {
1266 vdev_t *vd;
1267 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1268 atype)) != 0) {
1269 vdev_free(*vdp);
1270 *vdp = NULL;
1271 return (error);
1272 }
1273 }
1274
1275 ASSERT(*vdp != NULL);
1276
1277 return (0);
1278}
1279
1280/*
1281 * Opposite of spa_load().
1282 */
1283static void
1284spa_unload(spa_t *spa)
1285{
1286 int i;
1287
1288 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1289
1290 /*
1291 * Stop TRIM thread.
1292 */
1293 trim_thread_destroy(spa);
1294
1295 /*
1296 * Stop async tasks.
1297 */
1298 spa_async_suspend(spa);
1299
1300 /*
1301 * Stop syncing.
1302 */
1303 if (spa->spa_sync_on) {
1304 txg_sync_stop(spa->spa_dsl_pool);
1305 spa->spa_sync_on = B_FALSE;
1306 }
1307
1308 /*
1309 * Even though vdev_free() also calls vdev_metaslab_fini, we need
1310 * to call it earlier, before we wait for async i/o to complete.
1311 * This ensures that there is no async metaslab prefetching, by
1312 * calling taskq_wait(mg_taskq).
1313 */
1314 if (spa->spa_root_vdev != NULL) {
1315 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1316 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
1317 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
1318 spa_config_exit(spa, SCL_ALL, FTAG);
1319 }
1320
1321 /*
1322 * Wait for any outstanding async I/O to complete.
1323 */
1324 if (spa->spa_async_zio_root != NULL) {
1325 for (int i = 0; i < max_ncpus; i++)
1326 (void) zio_wait(spa->spa_async_zio_root[i]);
1327 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1328 spa->spa_async_zio_root = NULL;
1329 }
1330
1331 bpobj_close(&spa->spa_deferred_bpobj);
1332
1333 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1334
1335 /*
1336 * Close all vdevs.
1337 */
1338 if (spa->spa_root_vdev)
1339 vdev_free(spa->spa_root_vdev);
1340 ASSERT(spa->spa_root_vdev == NULL);
1341
1342 /*
1343 * Close the dsl pool.
1344 */
1345 if (spa->spa_dsl_pool) {
1346 dsl_pool_close(spa->spa_dsl_pool);
1347 spa->spa_dsl_pool = NULL;
1348 spa->spa_meta_objset = NULL;
1349 }
1350
1351 ddt_unload(spa);
1352
1353 /*
1354 * Drop and purge level 2 cache
1355 */
1356 spa_l2cache_drop(spa);
1357
1358 for (i = 0; i < spa->spa_spares.sav_count; i++)
1359 vdev_free(spa->spa_spares.sav_vdevs[i]);
1360 if (spa->spa_spares.sav_vdevs) {
1361 kmem_free(spa->spa_spares.sav_vdevs,
1362 spa->spa_spares.sav_count * sizeof (void *));
1363 spa->spa_spares.sav_vdevs = NULL;
1364 }
1365 if (spa->spa_spares.sav_config) {
1366 nvlist_free(spa->spa_spares.sav_config);
1367 spa->spa_spares.sav_config = NULL;
1368 }
1369 spa->spa_spares.sav_count = 0;
1370
1371 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1372 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1373 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1374 }
1375 if (spa->spa_l2cache.sav_vdevs) {
1376 kmem_free(spa->spa_l2cache.sav_vdevs,
1377 spa->spa_l2cache.sav_count * sizeof (void *));
1378 spa->spa_l2cache.sav_vdevs = NULL;
1379 }
1380 if (spa->spa_l2cache.sav_config) {
1381 nvlist_free(spa->spa_l2cache.sav_config);
1382 spa->spa_l2cache.sav_config = NULL;
1383 }
1384 spa->spa_l2cache.sav_count = 0;
1385
1386 spa->spa_async_suspended = 0;
1387
1388 if (spa->spa_comment != NULL) {
1389 spa_strfree(spa->spa_comment);
1390 spa->spa_comment = NULL;
1391 }
1392
1393 spa_config_exit(spa, SCL_ALL, FTAG);
1394}
1395
1396/*
1397 * Load (or re-load) the current list of vdevs describing the active spares for
1398 * this pool. When this is called, we have some form of basic information in
1399 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1400 * then re-generate a more complete list including status information.
1401 */
1402static void
1403spa_load_spares(spa_t *spa)
1404{
1405 nvlist_t **spares;
1406 uint_t nspares;
1407 int i;
1408 vdev_t *vd, *tvd;
1409
1410 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1411
1412 /*
1413 * First, close and free any existing spare vdevs.
1414 */
1415 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1416 vd = spa->spa_spares.sav_vdevs[i];
1417
1418 /* Undo the call to spa_activate() below */
1419 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1420 B_FALSE)) != NULL && tvd->vdev_isspare)
1421 spa_spare_remove(tvd);
1422 vdev_close(vd);
1423 vdev_free(vd);
1424 }
1425
1426 if (spa->spa_spares.sav_vdevs)
1427 kmem_free(spa->spa_spares.sav_vdevs,
1428 spa->spa_spares.sav_count * sizeof (void *));
1429
1430 if (spa->spa_spares.sav_config == NULL)
1431 nspares = 0;
1432 else
1433 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1434 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1435
1436 spa->spa_spares.sav_count = (int)nspares;
1437 spa->spa_spares.sav_vdevs = NULL;
1438
1439 if (nspares == 0)
1440 return;
1441
1442 /*
1443 * Construct the array of vdevs, opening them to get status in the
1444 * process. For each spare, there is potentially two different vdev_t
1445 * structures associated with it: one in the list of spares (used only
1446 * for basic validation purposes) and one in the active vdev
1447 * configuration (if it's spared in). During this phase we open and
1448 * validate each vdev on the spare list. If the vdev also exists in the
1449 * active configuration, then we also mark this vdev as an active spare.
1450 */
1451 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1452 KM_SLEEP);
1453 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1454 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1455 VDEV_ALLOC_SPARE) == 0);
1456 ASSERT(vd != NULL);
1457
1458 spa->spa_spares.sav_vdevs[i] = vd;
1459
1460 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1461 B_FALSE)) != NULL) {
1462 if (!tvd->vdev_isspare)
1463 spa_spare_add(tvd);
1464
1465 /*
1466 * We only mark the spare active if we were successfully
1467 * able to load the vdev. Otherwise, importing a pool
1468 * with a bad active spare would result in strange
1469 * behavior, because multiple pool would think the spare
1470 * is actively in use.
1471 *
1472 * There is a vulnerability here to an equally bizarre
1473 * circumstance, where a dead active spare is later
1474 * brought back to life (onlined or otherwise). Given
1475 * the rarity of this scenario, and the extra complexity
1476 * it adds, we ignore the possibility.
1477 */
1478 if (!vdev_is_dead(tvd))
1479 spa_spare_activate(tvd);
1480 }
1481
1482 vd->vdev_top = vd;
1483 vd->vdev_aux = &spa->spa_spares;
1484
1485 if (vdev_open(vd) != 0)
1486 continue;
1487
1488 if (vdev_validate_aux(vd) == 0)
1489 spa_spare_add(vd);
1490 }
1491
1492 /*
1493 * Recompute the stashed list of spares, with status information
1494 * this time.
1495 */
1496 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1497 DATA_TYPE_NVLIST_ARRAY) == 0);
1498
1499 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1500 KM_SLEEP);
1501 for (i = 0; i < spa->spa_spares.sav_count; i++)
1502 spares[i] = vdev_config_generate(spa,
1503 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1504 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1505 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1506 for (i = 0; i < spa->spa_spares.sav_count; i++)
1507 nvlist_free(spares[i]);
1508 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1509}
1510
1511/*
1512 * Load (or re-load) the current list of vdevs describing the active l2cache for
1513 * this pool. When this is called, we have some form of basic information in
1514 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1515 * then re-generate a more complete list including status information.
1516 * Devices which are already active have their details maintained, and are
1517 * not re-opened.
1518 */
1519static void
1520spa_load_l2cache(spa_t *spa)
1521{
1522 nvlist_t **l2cache;
1523 uint_t nl2cache;
1524 int i, j, oldnvdevs;
1525 uint64_t guid;
1526 vdev_t *vd, **oldvdevs, **newvdevs;
1527 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1528
1529 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1530
1531 if (sav->sav_config != NULL) {
1532 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1533 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1534 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1535 } else {
1536 nl2cache = 0;
1537 newvdevs = NULL;
1538 }
1539
1540 oldvdevs = sav->sav_vdevs;
1541 oldnvdevs = sav->sav_count;
1542 sav->sav_vdevs = NULL;
1543 sav->sav_count = 0;
1544
1545 /*
1546 * Process new nvlist of vdevs.
1547 */
1548 for (i = 0; i < nl2cache; i++) {
1549 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1550 &guid) == 0);
1551
1552 newvdevs[i] = NULL;
1553 for (j = 0; j < oldnvdevs; j++) {
1554 vd = oldvdevs[j];
1555 if (vd != NULL && guid == vd->vdev_guid) {
1556 /*
1557 * Retain previous vdev for add/remove ops.
1558 */
1559 newvdevs[i] = vd;
1560 oldvdevs[j] = NULL;
1561 break;
1562 }
1563 }
1564
1565 if (newvdevs[i] == NULL) {
1566 /*
1567 * Create new vdev
1568 */
1569 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1570 VDEV_ALLOC_L2CACHE) == 0);
1571 ASSERT(vd != NULL);
1572 newvdevs[i] = vd;
1573
1574 /*
1575 * Commit this vdev as an l2cache device,
1576 * even if it fails to open.
1577 */
1578 spa_l2cache_add(vd);
1579
1580 vd->vdev_top = vd;
1581 vd->vdev_aux = sav;
1582
1583 spa_l2cache_activate(vd);
1584
1585 if (vdev_open(vd) != 0)
1586 continue;
1587
1588 (void) vdev_validate_aux(vd);
1589
1590 if (!vdev_is_dead(vd))
1591 l2arc_add_vdev(spa, vd);
1592 }
1593 }
1594
1595 /*
1596 * Purge vdevs that were dropped
1597 */
1598 for (i = 0; i < oldnvdevs; i++) {
1599 uint64_t pool;
1600
1601 vd = oldvdevs[i];
1602 if (vd != NULL) {
1603 ASSERT(vd->vdev_isl2cache);
1604
1605 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1606 pool != 0ULL && l2arc_vdev_present(vd))
1607 l2arc_remove_vdev(vd);
1608 vdev_clear_stats(vd);
1609 vdev_free(vd);
1610 }
1611 }
1612
1613 if (oldvdevs)
1614 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1615
1616 if (sav->sav_config == NULL)
1617 goto out;
1618
1619 sav->sav_vdevs = newvdevs;
1620 sav->sav_count = (int)nl2cache;
1621
1622 /*
1623 * Recompute the stashed list of l2cache devices, with status
1624 * information this time.
1625 */
1626 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1627 DATA_TYPE_NVLIST_ARRAY) == 0);
1628
1629 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1630 for (i = 0; i < sav->sav_count; i++)
1631 l2cache[i] = vdev_config_generate(spa,
1632 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1633 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1634 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1635out:
1636 for (i = 0; i < sav->sav_count; i++)
1637 nvlist_free(l2cache[i]);
1638 if (sav->sav_count)
1639 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1640}
1641
1642static int
1643load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1644{
1645 dmu_buf_t *db;
1646 char *packed = NULL;
1647 size_t nvsize = 0;
1648 int error;
1649 *value = NULL;
1650
1651 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1652 if (error != 0)
1653 return (error);
1654
1655 nvsize = *(uint64_t *)db->db_data;
1656 dmu_buf_rele(db, FTAG);
1657
1658 packed = kmem_alloc(nvsize, KM_SLEEP);
1659 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1660 DMU_READ_PREFETCH);
1661 if (error == 0)
1662 error = nvlist_unpack(packed, nvsize, value, 0);
1663 kmem_free(packed, nvsize);
1664
1665 return (error);
1666}
1667
1668/*
1669 * Checks to see if the given vdev could not be opened, in which case we post a
1670 * sysevent to notify the autoreplace code that the device has been removed.
1671 */
1672static void
1673spa_check_removed(vdev_t *vd)
1674{
1675 for (int c = 0; c < vd->vdev_children; c++)
1676 spa_check_removed(vd->vdev_child[c]);
1677
1678 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1679 !vd->vdev_ishole) {
1680 zfs_post_autoreplace(vd->vdev_spa, vd);
1681 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1682 }
1683}
1684
1685static void
1686spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
1687{
1688 ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
1689
1690 vd->vdev_top_zap = mvd->vdev_top_zap;
1691 vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
1692
1693 for (uint64_t i = 0; i < vd->vdev_children; i++) {
1694 spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
1695 }
1696}
1697
1698/*
1699 * Validate the current config against the MOS config
1700 */
1701static boolean_t
1702spa_config_valid(spa_t *spa, nvlist_t *config)
1703{
1704 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1705 nvlist_t *nv;
1706
1707 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1708
1709 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1710 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1711
1712 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1713
1714 /*
1715 * If we're doing a normal import, then build up any additional
1716 * diagnostic information about missing devices in this config.
1717 * We'll pass this up to the user for further processing.
1718 */
1719 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1720 nvlist_t **child, *nv;
1721 uint64_t idx = 0;
1722
1723 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1724 KM_SLEEP);
1725 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1726
1727 for (int c = 0; c < rvd->vdev_children; c++) {
1728 vdev_t *tvd = rvd->vdev_child[c];
1729 vdev_t *mtvd = mrvd->vdev_child[c];
1730
1731 if (tvd->vdev_ops == &vdev_missing_ops &&
1732 mtvd->vdev_ops != &vdev_missing_ops &&
1733 mtvd->vdev_islog)
1734 child[idx++] = vdev_config_generate(spa, mtvd,
1735 B_FALSE, 0);
1736 }
1737
1738 if (idx) {
1739 VERIFY(nvlist_add_nvlist_array(nv,
1740 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1741 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1742 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1743
1744 for (int i = 0; i < idx; i++)
1745 nvlist_free(child[i]);
1746 }
1747 nvlist_free(nv);
1748 kmem_free(child, rvd->vdev_children * sizeof (char **));
1749 }
1750
1751 /*
1752 * Compare the root vdev tree with the information we have
1753 * from the MOS config (mrvd). Check each top-level vdev
1754 * with the corresponding MOS config top-level (mtvd).
1755 */
1756 for (int c = 0; c < rvd->vdev_children; c++) {
1757 vdev_t *tvd = rvd->vdev_child[c];
1758 vdev_t *mtvd = mrvd->vdev_child[c];
1759
1760 /*
1761 * Resolve any "missing" vdevs in the current configuration.
1762 * If we find that the MOS config has more accurate information
1763 * about the top-level vdev then use that vdev instead.
1764 */
1765 if (tvd->vdev_ops == &vdev_missing_ops &&
1766 mtvd->vdev_ops != &vdev_missing_ops) {
1767
1768 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1769 continue;
1770
1771 /*
1772 * Device specific actions.
1773 */
1774 if (mtvd->vdev_islog) {
1775 spa_set_log_state(spa, SPA_LOG_CLEAR);
1776 } else {
1777 /*
1778 * XXX - once we have 'readonly' pool
1779 * support we should be able to handle
1780 * missing data devices by transitioning
1781 * the pool to readonly.
1782 */
1783 continue;
1784 }
1785
1786 /*
1787 * Swap the missing vdev with the data we were
1788 * able to obtain from the MOS config.
1789 */
1790 vdev_remove_child(rvd, tvd);
1791 vdev_remove_child(mrvd, mtvd);
1792
1793 vdev_add_child(rvd, mtvd);
1794 vdev_add_child(mrvd, tvd);
1795
1796 spa_config_exit(spa, SCL_ALL, FTAG);
1797 vdev_load(mtvd);
1798 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1799
1800 vdev_reopen(rvd);
1801 } else {
1802 if (mtvd->vdev_islog) {
1803 /*
1804 * Load the slog device's state from the MOS
1805 * config since it's possible that the label
1806 * does not contain the most up-to-date
1807 * information.
1808 */
1809 vdev_load_log_state(tvd, mtvd);
1810 vdev_reopen(tvd);
1811 }
1812
1813 /*
1814 * Per-vdev ZAP info is stored exclusively in the MOS.
1815 */
1816 spa_config_valid_zaps(tvd, mtvd);
1817 }
1818 }
1819
1820 vdev_free(mrvd);
1821 spa_config_exit(spa, SCL_ALL, FTAG);
1822
1823 /*
1824 * Ensure we were able to validate the config.
1825 */
1826 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1827}
1828
1829/*
1830 * Check for missing log devices
1831 */
1832static boolean_t
1833spa_check_logs(spa_t *spa)
1834{
1835 boolean_t rv = B_FALSE;
1836 dsl_pool_t *dp = spa_get_dsl(spa);
1837
1838 switch (spa->spa_log_state) {
1839 case SPA_LOG_MISSING:
1840 /* need to recheck in case slog has been restored */
1841 case SPA_LOG_UNKNOWN:
1842 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1843 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1844 if (rv)
1845 spa_set_log_state(spa, SPA_LOG_MISSING);
1846 break;
1847 }
1848 return (rv);
1849}
1850
1851static boolean_t
1852spa_passivate_log(spa_t *spa)
1853{
1854 vdev_t *rvd = spa->spa_root_vdev;
1855 boolean_t slog_found = B_FALSE;
1856
1857 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1858
1859 if (!spa_has_slogs(spa))
1860 return (B_FALSE);
1861
1862 for (int c = 0; c < rvd->vdev_children; c++) {
1863 vdev_t *tvd = rvd->vdev_child[c];
1864 metaslab_group_t *mg = tvd->vdev_mg;
1865
1866 if (tvd->vdev_islog) {
1867 metaslab_group_passivate(mg);
1868 slog_found = B_TRUE;
1869 }
1870 }
1871
1872 return (slog_found);
1873}
1874
1875static void
1876spa_activate_log(spa_t *spa)
1877{
1878 vdev_t *rvd = spa->spa_root_vdev;
1879
1880 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1881
1882 for (int c = 0; c < rvd->vdev_children; c++) {
1883 vdev_t *tvd = rvd->vdev_child[c];
1884 metaslab_group_t *mg = tvd->vdev_mg;
1885
1886 if (tvd->vdev_islog)
1887 metaslab_group_activate(mg);
1888 }
1889}
1890
1891int
1892spa_offline_log(spa_t *spa)
1893{
1894 int error;
1895
1896 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1897 NULL, DS_FIND_CHILDREN);
1898 if (error == 0) {
1899 /*
1900 * We successfully offlined the log device, sync out the
1901 * current txg so that the "stubby" block can be removed
1902 * by zil_sync().
1903 */
1904 txg_wait_synced(spa->spa_dsl_pool, 0);
1905 }
1906 return (error);
1907}
1908
1909static void
1910spa_aux_check_removed(spa_aux_vdev_t *sav)
1911{
1912 int i;
1913
1914 for (i = 0; i < sav->sav_count; i++)
1915 spa_check_removed(sav->sav_vdevs[i]);
1916}
1917
1918void
1919spa_claim_notify(zio_t *zio)
1920{
1921 spa_t *spa = zio->io_spa;
1922
1923 if (zio->io_error)
1924 return;
1925
1926 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1927 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1928 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1929 mutex_exit(&spa->spa_props_lock);
1930}
1931
1932typedef struct spa_load_error {
1933 uint64_t sle_meta_count;
1934 uint64_t sle_data_count;
1935} spa_load_error_t;
1936
1937static void
1938spa_load_verify_done(zio_t *zio)
1939{
1940 blkptr_t *bp = zio->io_bp;
1941 spa_load_error_t *sle = zio->io_private;
1942 dmu_object_type_t type = BP_GET_TYPE(bp);
1943 int error = zio->io_error;
1944 spa_t *spa = zio->io_spa;
1945
1946 abd_free(zio->io_abd);
1947 if (error) {
1948 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1949 type != DMU_OT_INTENT_LOG)
1950 atomic_inc_64(&sle->sle_meta_count);
1951 else
1952 atomic_inc_64(&sle->sle_data_count);
1953 }
1954
1955 mutex_enter(&spa->spa_scrub_lock);
1956 spa->spa_scrub_inflight--;
1957 cv_broadcast(&spa->spa_scrub_io_cv);
1958 mutex_exit(&spa->spa_scrub_lock);
1959}
1960
1961/*
1962 * Maximum number of concurrent scrub i/os to create while verifying
1963 * a pool while importing it.
1964 */
1965int spa_load_verify_maxinflight = 10000;
1966boolean_t spa_load_verify_metadata = B_TRUE;
1967boolean_t spa_load_verify_data = B_TRUE;
1968
1969SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN,
1970 &spa_load_verify_maxinflight, 0,
1971 "Maximum number of concurrent scrub I/Os to create while verifying a "
1972 "pool while importing it");
1973
1974SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN,
1975 &spa_load_verify_metadata, 0,
1976 "Check metadata on import?");
1977
1978SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN,
1979 &spa_load_verify_data, 0,
1980 "Check user data on import?");
1981
1982/*ARGSUSED*/
1983static int
1984spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1985 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1986{
1987 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1988 return (0);
1989 /*
1990 * Note: normally this routine will not be called if
1991 * spa_load_verify_metadata is not set. However, it may be useful
1992 * to manually set the flag after the traversal has begun.
1993 */
1994 if (!spa_load_verify_metadata)
1995 return (0);
1996 if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
1997 return (0);
1998
1999 zio_t *rio = arg;
2000 size_t size = BP_GET_PSIZE(bp);
2001
2002 mutex_enter(&spa->spa_scrub_lock);
2003 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
2004 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2005 spa->spa_scrub_inflight++;
2006 mutex_exit(&spa->spa_scrub_lock);
2007
2008 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2009 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2010 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2011 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2012 return (0);
2013}
2014
2015/* ARGSUSED */
2016int
2017verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2018{
2019 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2020 return (SET_ERROR(ENAMETOOLONG));
2021
2022 return (0);
2023}
2024
2025static int
2026spa_load_verify(spa_t *spa)
2027{
2028 zio_t *rio;
2029 spa_load_error_t sle = { 0 };
2030 zpool_rewind_policy_t policy;
2031 boolean_t verify_ok = B_FALSE;
2032 int error = 0;
2033
2034 zpool_get_rewind_policy(spa->spa_config, &policy);
2035
2036 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
2037 return (0);
2038
2039 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2040 error = dmu_objset_find_dp(spa->spa_dsl_pool,
2041 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2042 DS_FIND_CHILDREN);
2043 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2044 if (error != 0)
2045 return (error);
2046
2047 rio = zio_root(spa, NULL, &sle,
2048 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2049
2050 if (spa_load_verify_metadata) {
2051 error = traverse_pool(spa, spa->spa_verify_min_txg,
2052 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2053 spa_load_verify_cb, rio);
2054 }
2055
2056 (void) zio_wait(rio);
2057
2058 spa->spa_load_meta_errors = sle.sle_meta_count;
2059 spa->spa_load_data_errors = sle.sle_data_count;
2060
2061 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
2062 sle.sle_data_count <= policy.zrp_maxdata) {
2063 int64_t loss = 0;
2064
2065 verify_ok = B_TRUE;
2066 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2067 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2068
2069 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2070 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2071 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2072 VERIFY(nvlist_add_int64(spa->spa_load_info,
2073 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2074 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2075 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2076 } else {
2077 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2078 }
2079
2080 if (error) {
2081 if (error != ENXIO && error != EIO)
2082 error = SET_ERROR(EIO);
2083 return (error);
2084 }
2085
2086 return (verify_ok ? 0 : EIO);
2087}
2088
2089/*
2090 * Find a value in the pool props object.
2091 */
2092static void
2093spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2094{
2095 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2096 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2097}
2098
2099/*
2100 * Find a value in the pool directory object.
2101 */
2102static int
2103spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
2104{
2105 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2106 name, sizeof (uint64_t), 1, val));
2107}
2108
2109static int
2110spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2111{
2112 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2113 return (err);
2114}
2115
2116/*
2117 * Fix up config after a partly-completed split. This is done with the
2118 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2119 * pool have that entry in their config, but only the splitting one contains
2120 * a list of all the guids of the vdevs that are being split off.
2121 *
2122 * This function determines what to do with that list: either rejoin
2123 * all the disks to the pool, or complete the splitting process. To attempt
2124 * the rejoin, each disk that is offlined is marked online again, and
2125 * we do a reopen() call. If the vdev label for every disk that was
2126 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2127 * then we call vdev_split() on each disk, and complete the split.
2128 *
2129 * Otherwise we leave the config alone, with all the vdevs in place in
2130 * the original pool.
2131 */
2132static void
2133spa_try_repair(spa_t *spa, nvlist_t *config)
2134{
2135 uint_t extracted;
2136 uint64_t *glist;
2137 uint_t i, gcount;
2138 nvlist_t *nvl;
2139 vdev_t **vd;
2140 boolean_t attempt_reopen;
2141
2142 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2143 return;
2144
2145 /* check that the config is complete */
2146 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2147 &glist, &gcount) != 0)
2148 return;
2149
2150 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2151
2152 /* attempt to online all the vdevs & validate */
2153 attempt_reopen = B_TRUE;
2154 for (i = 0; i < gcount; i++) {
2155 if (glist[i] == 0) /* vdev is hole */
2156 continue;
2157
2158 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2159 if (vd[i] == NULL) {
2160 /*
2161 * Don't bother attempting to reopen the disks;
2162 * just do the split.
2163 */
2164 attempt_reopen = B_FALSE;
2165 } else {
2166 /* attempt to re-online it */
2167 vd[i]->vdev_offline = B_FALSE;
2168 }
2169 }
2170
2171 if (attempt_reopen) {
2172 vdev_reopen(spa->spa_root_vdev);
2173
2174 /* check each device to see what state it's in */
2175 for (extracted = 0, i = 0; i < gcount; i++) {
2176 if (vd[i] != NULL &&
2177 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2178 break;
2179 ++extracted;
2180 }
2181 }
2182
2183 /*
2184 * If every disk has been moved to the new pool, or if we never
2185 * even attempted to look at them, then we split them off for
2186 * good.
2187 */
2188 if (!attempt_reopen || gcount == extracted) {
2189 for (i = 0; i < gcount; i++)
2190 if (vd[i] != NULL)
2191 vdev_split(vd[i]);
2192 vdev_reopen(spa->spa_root_vdev);
2193 }
2194
2195 kmem_free(vd, gcount * sizeof (vdev_t *));
2196}
2197
2198static int
2199spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2200 boolean_t mosconfig)
2201{
2202 nvlist_t *config = spa->spa_config;
2203 char *ereport = FM_EREPORT_ZFS_POOL;
2204 char *comment;
2205 int error;
2206 uint64_t pool_guid;
2207 nvlist_t *nvl;
2208
2209 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2210 return (SET_ERROR(EINVAL));
2211
2212 ASSERT(spa->spa_comment == NULL);
2213 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2214 spa->spa_comment = spa_strdup(comment);
2215
2216 /*
2217 * Versioning wasn't explicitly added to the label until later, so if
2218 * it's not present treat it as the initial version.
2219 */
2220 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2221 &spa->spa_ubsync.ub_version) != 0)
2222 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2223
2224 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2225 &spa->spa_config_txg);
2226
2227 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2228 spa_guid_exists(pool_guid, 0)) {
2229 error = SET_ERROR(EEXIST);
2230 } else {
2231 spa->spa_config_guid = pool_guid;
2232
2233 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2234 &nvl) == 0) {
2235 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
2236 KM_SLEEP) == 0);
2237 }
2238
2239 nvlist_free(spa->spa_load_info);
2240 spa->spa_load_info = fnvlist_alloc();
2241
2242 gethrestime(&spa->spa_loaded_ts);
2243 error = spa_load_impl(spa, pool_guid, config, state, type,
2244 mosconfig, &ereport);
2245 }
2246
2247 /*
2248 * Don't count references from objsets that are already closed
2249 * and are making their way through the eviction process.
2250 */
2251 spa_evicting_os_wait(spa);
2252 spa->spa_minref = refcount_count(&spa->spa_refcount);
2253 if (error) {
2254 if (error != EEXIST) {
2255 spa->spa_loaded_ts.tv_sec = 0;
2256 spa->spa_loaded_ts.tv_nsec = 0;
2257 }
2258 if (error != EBADF) {
2259 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2260 }
2261 }
2262 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2263 spa->spa_ena = 0;
2264
2265 return (error);
2266}
2267
2268/*
2269 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2270 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2271 * spa's per-vdev ZAP list.
2272 */
2273static uint64_t
2274vdev_count_verify_zaps(vdev_t *vd)
2275{
2276 spa_t *spa = vd->vdev_spa;
2277 uint64_t total = 0;
2278 if (vd->vdev_top_zap != 0) {
2279 total++;
2280 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2281 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2282 }
2283 if (vd->vdev_leaf_zap != 0) {
2284 total++;
2285 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2286 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2287 }
2288
2289 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2290 total += vdev_count_verify_zaps(vd->vdev_child[i]);
2291 }
2292
2293 return (total);
2294}
2295
2296/*
2297 * Load an existing storage pool, using the pool's builtin spa_config as a
2298 * source of configuration information.
2299 */
2300static int
2301spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2302 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2303 char **ereport)
2304{
2305 int error = 0;
2306 nvlist_t *nvroot = NULL;
2307 nvlist_t *label;
2308 vdev_t *rvd;
2309 uberblock_t *ub = &spa->spa_uberblock;
2310 uint64_t children, config_cache_txg = spa->spa_config_txg;
2311 int orig_mode = spa->spa_mode;
2312 int parse;
2313 uint64_t obj;
2314 boolean_t missing_feat_write = B_FALSE;
2315
2316 /*
2317 * If this is an untrusted config, access the pool in read-only mode.
2318 * This prevents things like resilvering recently removed devices.
2319 */
2320 if (!mosconfig)
2321 spa->spa_mode = FREAD;
2322
2323 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2324
2325 spa->spa_load_state = state;
2326
2327 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2328 return (SET_ERROR(EINVAL));
2329
2330 parse = (type == SPA_IMPORT_EXISTING ?
2331 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2332
2333 /*
2334 * Create "The Godfather" zio to hold all async IOs
2335 */
2336 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2337 KM_SLEEP);
2338 for (int i = 0; i < max_ncpus; i++) {
2339 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2340 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2341 ZIO_FLAG_GODFATHER);
2342 }
2343
2344 /*
2345 * Parse the configuration into a vdev tree. We explicitly set the
2346 * value that will be returned by spa_version() since parsing the
2347 * configuration requires knowing the version number.
2348 */
2349 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2350 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2351 spa_config_exit(spa, SCL_ALL, FTAG);
2352
2353 if (error != 0)
2354 return (error);
2355
2356 ASSERT(spa->spa_root_vdev == rvd);
2357 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2358 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2359
2360 if (type != SPA_IMPORT_ASSEMBLE) {
2361 ASSERT(spa_guid(spa) == pool_guid);
2362 }
2363
2364 /*
2365 * Try to open all vdevs, loading each label in the process.
2366 */
2367 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2368 error = vdev_open(rvd);
2369 spa_config_exit(spa, SCL_ALL, FTAG);
2370 if (error != 0)
2371 return (error);
2372
2373 /*
2374 * We need to validate the vdev labels against the configuration that
2375 * we have in hand, which is dependent on the setting of mosconfig. If
2376 * mosconfig is true then we're validating the vdev labels based on
2377 * that config. Otherwise, we're validating against the cached config
2378 * (zpool.cache) that was read when we loaded the zfs module, and then
2379 * later we will recursively call spa_load() and validate against
2380 * the vdev config.
2381 *
2382 * If we're assembling a new pool that's been split off from an
2383 * existing pool, the labels haven't yet been updated so we skip
2384 * validation for now.
2385 */
2386 if (type != SPA_IMPORT_ASSEMBLE) {
2387 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2388 error = vdev_validate(rvd, mosconfig);
2389 spa_config_exit(spa, SCL_ALL, FTAG);
2390
2391 if (error != 0)
2392 return (error);
2393
2394 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2395 return (SET_ERROR(ENXIO));
2396 }
2397
2398 /*
2399 * Find the best uberblock.
2400 */
2401 vdev_uberblock_load(rvd, ub, &label);
2402
2403 /*
2404 * If we weren't able to find a single valid uberblock, return failure.
2405 */
2406 if (ub->ub_txg == 0) {
2407 nvlist_free(label);
2408 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2409 }
2410
2411 /*
2412 * If the pool has an unsupported version we can't open it.
2413 */
2414 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2415 nvlist_free(label);
2416 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2417 }
2418
2419 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2420 nvlist_t *features;
2421
2422 /*
2423 * If we weren't able to find what's necessary for reading the
2424 * MOS in the label, return failure.
2425 */
2426 if (label == NULL || nvlist_lookup_nvlist(label,
2427 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2428 nvlist_free(label);
2429 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2430 ENXIO));
2431 }
2432
2433 /*
2434 * Update our in-core representation with the definitive values
2435 * from the label.
2436 */
2437 nvlist_free(spa->spa_label_features);
2438 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2439 }
2440
2441 nvlist_free(label);
2442
2443 /*
2444 * Look through entries in the label nvlist's features_for_read. If
2445 * there is a feature listed there which we don't understand then we
2446 * cannot open a pool.
2447 */
2448 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2449 nvlist_t *unsup_feat;
2450
2451 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2452 0);
2453
2454 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2455 NULL); nvp != NULL;
2456 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2457 if (!zfeature_is_supported(nvpair_name(nvp))) {
2458 VERIFY(nvlist_add_string(unsup_feat,
2459 nvpair_name(nvp), "") == 0);
2460 }
2461 }
2462
2463 if (!nvlist_empty(unsup_feat)) {
2464 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2465 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2466 nvlist_free(unsup_feat);
2467 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2468 ENOTSUP));
2469 }
2470
2471 nvlist_free(unsup_feat);
2472 }
2473
2474 /*
2475 * If the vdev guid sum doesn't match the uberblock, we have an
2476 * incomplete configuration. We first check to see if the pool
2477 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2478 * If it is, defer the vdev_guid_sum check till later so we
2479 * can handle missing vdevs.
2480 */
2481 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2482 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2483 rvd->vdev_guid_sum != ub->ub_guid_sum)
2484 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2485
2486 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2487 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2488 spa_try_repair(spa, config);
2489 spa_config_exit(spa, SCL_ALL, FTAG);
2490 nvlist_free(spa->spa_config_splitting);
2491 spa->spa_config_splitting = NULL;
2492 }
2493
2494 /*
2495 * Initialize internal SPA structures.
2496 */
2497 spa->spa_state = POOL_STATE_ACTIVE;
2498 spa->spa_ubsync = spa->spa_uberblock;
2499 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2500 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2501 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2502 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2503 spa->spa_claim_max_txg = spa->spa_first_txg;
2504 spa->spa_prev_software_version = ub->ub_software_version;
2505
2506 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2507 if (error)
2508 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2509 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2510
2511 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2512 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2513
2514 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2515 boolean_t missing_feat_read = B_FALSE;
2516 nvlist_t *unsup_feat, *enabled_feat;
2517
2518 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2519 &spa->spa_feat_for_read_obj) != 0) {
2520 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2521 }
2522
2523 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2524 &spa->spa_feat_for_write_obj) != 0) {
2525 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2526 }
2527
2528 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2529 &spa->spa_feat_desc_obj) != 0) {
2530 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2531 }
2532
2533 enabled_feat = fnvlist_alloc();
2534 unsup_feat = fnvlist_alloc();
2535
2536 if (!spa_features_check(spa, B_FALSE,
2537 unsup_feat, enabled_feat))
2538 missing_feat_read = B_TRUE;
2539
2540 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2541 if (!spa_features_check(spa, B_TRUE,
2542 unsup_feat, enabled_feat)) {
2543 missing_feat_write = B_TRUE;
2544 }
2545 }
2546
2547 fnvlist_add_nvlist(spa->spa_load_info,
2548 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2549
2550 if (!nvlist_empty(unsup_feat)) {
2551 fnvlist_add_nvlist(spa->spa_load_info,
2552 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2553 }
2554
2555 fnvlist_free(enabled_feat);
2556 fnvlist_free(unsup_feat);
2557
2558 if (!missing_feat_read) {
2559 fnvlist_add_boolean(spa->spa_load_info,
2560 ZPOOL_CONFIG_CAN_RDONLY);
2561 }
2562
2563 /*
2564 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2565 * twofold: to determine whether the pool is available for
2566 * import in read-write mode and (if it is not) whether the
2567 * pool is available for import in read-only mode. If the pool
2568 * is available for import in read-write mode, it is displayed
2569 * as available in userland; if it is not available for import
2570 * in read-only mode, it is displayed as unavailable in
2571 * userland. If the pool is available for import in read-only
2572 * mode but not read-write mode, it is displayed as unavailable
2573 * in userland with a special note that the pool is actually
2574 * available for open in read-only mode.
2575 *
2576 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2577 * missing a feature for write, we must first determine whether
2578 * the pool can be opened read-only before returning to
2579 * userland in order to know whether to display the
2580 * abovementioned note.
2581 */
2582 if (missing_feat_read || (missing_feat_write &&
2583 spa_writeable(spa))) {
2584 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2585 ENOTSUP));
2586 }
2587
2588 /*
2589 * Load refcounts for ZFS features from disk into an in-memory
2590 * cache during SPA initialization.
2591 */
2592 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2593 uint64_t refcount;
2594
2595 error = feature_get_refcount_from_disk(spa,
2596 &spa_feature_table[i], &refcount);
2597 if (error == 0) {
2598 spa->spa_feat_refcount_cache[i] = refcount;
2599 } else if (error == ENOTSUP) {
2600 spa->spa_feat_refcount_cache[i] =
2601 SPA_FEATURE_DISABLED;
2602 } else {
2603 return (spa_vdev_err(rvd,
2604 VDEV_AUX_CORRUPT_DATA, EIO));
2605 }
2606 }
2607 }
2608
2609 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2610 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2611 &spa->spa_feat_enabled_txg_obj) != 0)
2612 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2613 }
2614
2615 spa->spa_is_initializing = B_TRUE;
2616 error = dsl_pool_open(spa->spa_dsl_pool);
2617 spa->spa_is_initializing = B_FALSE;
2618 if (error != 0)
2619 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2620
2621 if (!mosconfig) {
2622 uint64_t hostid;
2623 nvlist_t *policy = NULL, *nvconfig;
2624
2625 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2626 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2627
2628 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2629 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2630 char *hostname;
2631 unsigned long myhostid = 0;
2632
2633 VERIFY(nvlist_lookup_string(nvconfig,
2634 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2635
2636#ifdef _KERNEL
2637 myhostid = zone_get_hostid(NULL);
2638#else /* _KERNEL */
2639 /*
2640 * We're emulating the system's hostid in userland, so
2641 * we can't use zone_get_hostid().
2642 */
2643 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2644#endif /* _KERNEL */
2645 if (check_hostid && hostid != 0 && myhostid != 0 &&
2646 hostid != myhostid) {
2647 nvlist_free(nvconfig);
2648 cmn_err(CE_WARN, "pool '%s' could not be "
2649 "loaded as it was last accessed by "
2650 "another system (host: %s hostid: 0x%lx). "
2651 "See: http://illumos.org/msg/ZFS-8000-EY",
2652 spa_name(spa), hostname,
2653 (unsigned long)hostid);
2654 return (SET_ERROR(EBADF));
2655 }
2656 }
2657 if (nvlist_lookup_nvlist(spa->spa_config,
2658 ZPOOL_REWIND_POLICY, &policy) == 0)
2659 VERIFY(nvlist_add_nvlist(nvconfig,
2660 ZPOOL_REWIND_POLICY, policy) == 0);
2661
2662 spa_config_set(spa, nvconfig);
2663 spa_unload(spa);
2664 spa_deactivate(spa);
2665 spa_activate(spa, orig_mode);
2666
2667 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2668 }
2669
2670 /* Grab the secret checksum salt from the MOS. */
2671 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2672 DMU_POOL_CHECKSUM_SALT, 1,
2673 sizeof (spa->spa_cksum_salt.zcs_bytes),
2674 spa->spa_cksum_salt.zcs_bytes);
2675 if (error == ENOENT) {
2676 /* Generate a new salt for subsequent use */
2677 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
2678 sizeof (spa->spa_cksum_salt.zcs_bytes));
2679 } else if (error != 0) {
2680 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2681 }
2682
2683 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2684 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2685 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2686 if (error != 0)
2687 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2688
2689 /*
2690 * Load the bit that tells us to use the new accounting function
2691 * (raid-z deflation). If we have an older pool, this will not
2692 * be present.
2693 */
2694 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2695 if (error != 0 && error != ENOENT)
2696 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2697
2698 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2699 &spa->spa_creation_version);
2700 if (error != 0 && error != ENOENT)
2701 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2702
2703 /*
2704 * Load the persistent error log. If we have an older pool, this will
2705 * not be present.
2706 */
2707 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2708 if (error != 0 && error != ENOENT)
2709 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2710
2711 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2712 &spa->spa_errlog_scrub);
2713 if (error != 0 && error != ENOENT)
2714 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2715
2716 /*
2717 * Load the history object. If we have an older pool, this
2718 * will not be present.
2719 */
2720 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2721 if (error != 0 && error != ENOENT)
2722 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2723
2724 /*
2725 * Load the per-vdev ZAP map. If we have an older pool, this will not
2726 * be present; in this case, defer its creation to a later time to
2727 * avoid dirtying the MOS this early / out of sync context. See
2728 * spa_sync_config_object.
2729 */
2730
2731 /* The sentinel is only available in the MOS config. */
2732 nvlist_t *mos_config;
2733 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
2734 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2735
2736 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
2737 &spa->spa_all_vdev_zaps);
2738
2739 if (error == ENOENT) {
2740 VERIFY(!nvlist_exists(mos_config,
2741 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
2742 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
2743 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2744 } else if (error != 0) {
2745 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2746 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
2747 /*
2748 * An older version of ZFS overwrote the sentinel value, so
2749 * we have orphaned per-vdev ZAPs in the MOS. Defer their
2750 * destruction to later; see spa_sync_config_object.
2751 */
2752 spa->spa_avz_action = AVZ_ACTION_DESTROY;
2753 /*
2754 * We're assuming that no vdevs have had their ZAPs created
2755 * before this. Better be sure of it.
2756 */
2757 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2758 }
2759 nvlist_free(mos_config);
2760
2761 /*
2762 * If we're assembling the pool from the split-off vdevs of
2763 * an existing pool, we don't want to attach the spares & cache
2764 * devices.
2765 */
2766
2767 /*
2768 * Load any hot spares for this pool.
2769 */
2770 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2771 if (error != 0 && error != ENOENT)
2772 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2773 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2774 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2775 if (load_nvlist(spa, spa->spa_spares.sav_object,
2776 &spa->spa_spares.sav_config) != 0)
2777 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2778
2779 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2780 spa_load_spares(spa);
2781 spa_config_exit(spa, SCL_ALL, FTAG);
2782 } else if (error == 0) {
2783 spa->spa_spares.sav_sync = B_TRUE;
2784 }
2785
2786 /*
2787 * Load any level 2 ARC devices for this pool.
2788 */
2789 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2790 &spa->spa_l2cache.sav_object);
2791 if (error != 0 && error != ENOENT)
2792 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2793 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2794 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2795 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2796 &spa->spa_l2cache.sav_config) != 0)
2797 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2798
2799 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2800 spa_load_l2cache(spa);
2801 spa_config_exit(spa, SCL_ALL, FTAG);
2802 } else if (error == 0) {
2803 spa->spa_l2cache.sav_sync = B_TRUE;
2804 }
2805
2806 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2807
2808 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2809 if (error && error != ENOENT)
2810 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2811
2812 if (error == 0) {
2813 uint64_t autoreplace;
2814
2815 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2816 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2817 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2818 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2819 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2820 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2821 &spa->spa_dedup_ditto);
2822
2823 spa->spa_autoreplace = (autoreplace != 0);
2824 }
2825
2826 /*
2827 * If the 'autoreplace' property is set, then post a resource notifying
2828 * the ZFS DE that it should not issue any faults for unopenable
2829 * devices. We also iterate over the vdevs, and post a sysevent for any
2830 * unopenable vdevs so that the normal autoreplace handler can take
2831 * over.
2832 */
2833 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2834 spa_check_removed(spa->spa_root_vdev);
2835 /*
2836 * For the import case, this is done in spa_import(), because
2837 * at this point we're using the spare definitions from
2838 * the MOS config, not necessarily from the userland config.
2839 */
2840 if (state != SPA_LOAD_IMPORT) {
2841 spa_aux_check_removed(&spa->spa_spares);
2842 spa_aux_check_removed(&spa->spa_l2cache);
2843 }
2844 }
2845
2846 /*
2847 * Load the vdev state for all toplevel vdevs.
2848 */
2849 vdev_load(rvd);
2850
2851 /*
2852 * Propagate the leaf DTLs we just loaded all the way up the tree.
2853 */
2854 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2855 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2856 spa_config_exit(spa, SCL_ALL, FTAG);
2857
2858 /*
2859 * Load the DDTs (dedup tables).
2860 */
2861 error = ddt_load(spa);
2862 if (error != 0)
2863 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2864
2865 spa_update_dspace(spa);
2866
2867 /*
2868 * Validate the config, using the MOS config to fill in any
2869 * information which might be missing. If we fail to validate
2870 * the config then declare the pool unfit for use. If we're
2871 * assembling a pool from a split, the log is not transferred
2872 * over.
2873 */
2874 if (type != SPA_IMPORT_ASSEMBLE) {
2875 nvlist_t *nvconfig;
2876
2877 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2878 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2879
2880 if (!spa_config_valid(spa, nvconfig)) {
2881 nvlist_free(nvconfig);
2882 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2883 ENXIO));
2884 }
2885 nvlist_free(nvconfig);
2886
2887 /*
2888 * Now that we've validated the config, check the state of the
2889 * root vdev. If it can't be opened, it indicates one or
2890 * more toplevel vdevs are faulted.
2891 */
2892 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2893 return (SET_ERROR(ENXIO));
2894
2895 if (spa_writeable(spa) && spa_check_logs(spa)) {
2896 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2897 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2898 }
2899 }
2900
2901 if (missing_feat_write) {
2902 ASSERT(state == SPA_LOAD_TRYIMPORT);
2903
2904 /*
2905 * At this point, we know that we can open the pool in
2906 * read-only mode but not read-write mode. We now have enough
2907 * information and can return to userland.
2908 */
2909 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2910 }
2911
2912 /*
2913 * We've successfully opened the pool, verify that we're ready
2914 * to start pushing transactions.
2915 */
2916 if (state != SPA_LOAD_TRYIMPORT) {
2917 if (error = spa_load_verify(spa))
2918 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2919 error));
2920 }
2921
2922 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2923 spa->spa_load_max_txg == UINT64_MAX)) {
2924 dmu_tx_t *tx;
2925 int need_update = B_FALSE;
2926 dsl_pool_t *dp = spa_get_dsl(spa);
2927
2928 ASSERT(state != SPA_LOAD_TRYIMPORT);
2929
2930 /*
2931 * Claim log blocks that haven't been committed yet.
2932 * This must all happen in a single txg.
2933 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2934 * invoked from zil_claim_log_block()'s i/o done callback.
2935 * Price of rollback is that we abandon the log.
2936 */
2937 spa->spa_claiming = B_TRUE;
2938
2939 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
2940 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2941 zil_claim, tx, DS_FIND_CHILDREN);
2942 dmu_tx_commit(tx);
2943
2944 spa->spa_claiming = B_FALSE;
2945
2946 spa_set_log_state(spa, SPA_LOG_GOOD);
2947 spa->spa_sync_on = B_TRUE;
2948 txg_sync_start(spa->spa_dsl_pool);
2949
2950 /*
2951 * Wait for all claims to sync. We sync up to the highest
2952 * claimed log block birth time so that claimed log blocks
2953 * don't appear to be from the future. spa_claim_max_txg
2954 * will have been set for us by either zil_check_log_chain()
2955 * (invoked from spa_check_logs()) or zil_claim() above.
2956 */
2957 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2958
2959 /*
2960 * If the config cache is stale, or we have uninitialized
2961 * metaslabs (see spa_vdev_add()), then update the config.
2962 *
2963 * If this is a verbatim import, trust the current
2964 * in-core spa_config and update the disk labels.
2965 */
2966 if (config_cache_txg != spa->spa_config_txg ||
2967 state == SPA_LOAD_IMPORT ||
2968 state == SPA_LOAD_RECOVER ||
2969 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2970 need_update = B_TRUE;
2971
2972 for (int c = 0; c < rvd->vdev_children; c++)
2973 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2974 need_update = B_TRUE;
2975
2976 /*
2977 * Update the config cache asychronously in case we're the
2978 * root pool, in which case the config cache isn't writable yet.
2979 */
2980 if (need_update)
2981 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2982
2983 /*
2984 * Check all DTLs to see if anything needs resilvering.
2985 */
2986 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2987 vdev_resilver_needed(rvd, NULL, NULL))
2988 spa_async_request(spa, SPA_ASYNC_RESILVER);
2989
2990 /*
2991 * Log the fact that we booted up (so that we can detect if
2992 * we rebooted in the middle of an operation).
2993 */
2994 spa_history_log_version(spa, "open");
2995
2996 /*
2997 * Delete any inconsistent datasets.
2998 */
2999 (void) dmu_objset_find(spa_name(spa),
3000 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
3001
3002 /*
3003 * Clean up any stale temporary dataset userrefs.
3004 */
3005 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
3006 }
3007
3008 return (0);
3009}
3010
3011static int
3012spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
3013{
3014 int mode = spa->spa_mode;
3015
3016 spa_unload(spa);
3017 spa_deactivate(spa);
3018
3019 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
3020
3021 spa_activate(spa, mode);
3022 spa_async_suspend(spa);
3023
3024 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
3025}
3026
3027/*
3028 * If spa_load() fails this function will try loading prior txg's. If
3029 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
3030 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
3031 * function will not rewind the pool and will return the same error as
3032 * spa_load().
3033 */
3034static int
3035spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
3036 uint64_t max_request, int rewind_flags)
3037{
3038 nvlist_t *loadinfo = NULL;
3039 nvlist_t *config = NULL;
3040 int load_error, rewind_error;
3041 uint64_t safe_rewind_txg;
3042 uint64_t min_txg;
3043
3044 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
3045 spa->spa_load_max_txg = spa->spa_load_txg;
3046 spa_set_log_state(spa, SPA_LOG_CLEAR);
3047 } else {
3048 spa->spa_load_max_txg = max_request;
3049 if (max_request != UINT64_MAX)
3050 spa->spa_extreme_rewind = B_TRUE;
3051 }
3052
3053 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
3054 mosconfig);
3055 if (load_error == 0)
3056 return (0);
3057
3058 if (spa->spa_root_vdev != NULL)
3059 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3060
3061 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
3062 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
3063
3064 if (rewind_flags & ZPOOL_NEVER_REWIND) {
3065 nvlist_free(config);
3066 return (load_error);
3067 }
3068
3069 if (state == SPA_LOAD_RECOVER) {
3070 /* Price of rolling back is discarding txgs, including log */
3071 spa_set_log_state(spa, SPA_LOG_CLEAR);
3072 } else {
3073 /*
3074 * If we aren't rolling back save the load info from our first
3075 * import attempt so that we can restore it after attempting
3076 * to rewind.
3077 */
3078 loadinfo = spa->spa_load_info;
3079 spa->spa_load_info = fnvlist_alloc();
3080 }
3081
3082 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
3083 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
3084 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
3085 TXG_INITIAL : safe_rewind_txg;
3086
3087 /*
3088 * Continue as long as we're finding errors, we're still within
3089 * the acceptable rewind range, and we're still finding uberblocks
3090 */
3091 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
3092 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
3093 if (spa->spa_load_max_txg < safe_rewind_txg)
3094 spa->spa_extreme_rewind = B_TRUE;
3095 rewind_error = spa_load_retry(spa, state, mosconfig);
3096 }
3097
3098 spa->spa_extreme_rewind = B_FALSE;
3099 spa->spa_load_max_txg = UINT64_MAX;
3100
3101 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
3102 spa_config_set(spa, config);
3103
3104 if (state == SPA_LOAD_RECOVER) {
3105 ASSERT3P(loadinfo, ==, NULL);
3106 return (rewind_error);
3107 } else {
3108 /* Store the rewind info as part of the initial load info */
3109 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
3110 spa->spa_load_info);
3111
3112 /* Restore the initial load info */
3113 fnvlist_free(spa->spa_load_info);
3114 spa->spa_load_info = loadinfo;
3115
3116 return (load_error);
3117 }
3118}
3119
3120/*
3121 * Pool Open/Import
3122 *
3123 * The import case is identical to an open except that the configuration is sent
3124 * down from userland, instead of grabbed from the configuration cache. For the
3125 * case of an open, the pool configuration will exist in the
3126 * POOL_STATE_UNINITIALIZED state.
3127 *
3128 * The stats information (gen/count/ustats) is used to gather vdev statistics at
3129 * the same time open the pool, without having to keep around the spa_t in some
3130 * ambiguous state.
3131 */
3132static int
3133spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
3134 nvlist_t **config)
3135{
3136 spa_t *spa;
3137 spa_load_state_t state = SPA_LOAD_OPEN;
3138 int error;
3139 int locked = B_FALSE;
3140 int firstopen = B_FALSE;
3141
3142 *spapp = NULL;
3143
3144 /*
3145 * As disgusting as this is, we need to support recursive calls to this
3146 * function because dsl_dir_open() is called during spa_load(), and ends
3147 * up calling spa_open() again. The real fix is to figure out how to
3148 * avoid dsl_dir_open() calling this in the first place.
3149 */
3150 if (mutex_owner(&spa_namespace_lock) != curthread) {
3151 mutex_enter(&spa_namespace_lock);
3152 locked = B_TRUE;
3153 }
3154
3155 if ((spa = spa_lookup(pool)) == NULL) {
3156 if (locked)
3157 mutex_exit(&spa_namespace_lock);
3158 return (SET_ERROR(ENOENT));
3159 }
3160
3161 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
3162 zpool_rewind_policy_t policy;
3163
3164 firstopen = B_TRUE;
3165
3166 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
3167 &policy);
3168 if (policy.zrp_request & ZPOOL_DO_REWIND)
3169 state = SPA_LOAD_RECOVER;
3170
3171 spa_activate(spa, spa_mode_global);
3172
3173 if (state != SPA_LOAD_RECOVER)
3174 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3175
3176 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
3177 policy.zrp_request);
3178
3179 if (error == EBADF) {
3180 /*
3181 * If vdev_validate() returns failure (indicated by
3182 * EBADF), it indicates that one of the vdevs indicates
3183 * that the pool has been exported or destroyed. If
3184 * this is the case, the config cache is out of sync and
3185 * we should remove the pool from the namespace.
3186 */
3187 spa_unload(spa);
3188 spa_deactivate(spa);
3189 spa_config_sync(spa, B_TRUE, B_TRUE);
3190 spa_remove(spa);
3191 if (locked)
3192 mutex_exit(&spa_namespace_lock);
3193 return (SET_ERROR(ENOENT));
3194 }
3195
3196 if (error) {
3197 /*
3198 * We can't open the pool, but we still have useful
3199 * information: the state of each vdev after the
3200 * attempted vdev_open(). Return this to the user.
3201 */
3202 if (config != NULL && spa->spa_config) {
3203 VERIFY(nvlist_dup(spa->spa_config, config,
3204 KM_SLEEP) == 0);
3205 VERIFY(nvlist_add_nvlist(*config,
3206 ZPOOL_CONFIG_LOAD_INFO,
3207 spa->spa_load_info) == 0);
3208 }
3209 spa_unload(spa);
3210 spa_deactivate(spa);
3211 spa->spa_last_open_failed = error;
3212 if (locked)
3213 mutex_exit(&spa_namespace_lock);
3214 *spapp = NULL;
3215 return (error);
3216 }
3217 }
3218
3219 spa_open_ref(spa, tag);
3220
3221 if (config != NULL)
3222 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3223
3224 /*
3225 * If we've recovered the pool, pass back any information we
3226 * gathered while doing the load.
3227 */
3228 if (state == SPA_LOAD_RECOVER) {
3229 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3230 spa->spa_load_info) == 0);
3231 }
3232
3233 if (locked) {
3234 spa->spa_last_open_failed = 0;
3235 spa->spa_last_ubsync_txg = 0;
3236 spa->spa_load_txg = 0;
3237 mutex_exit(&spa_namespace_lock);
3238#ifdef __FreeBSD__
3239#ifdef _KERNEL
3240 if (firstopen)
3241 zvol_create_minors(spa->spa_name);
3242#endif
3243#endif
3244 }
3245
3246 *spapp = spa;
3247
3248 return (0);
3249}
3250
3251int
3252spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3253 nvlist_t **config)
3254{
3255 return (spa_open_common(name, spapp, tag, policy, config));
3256}
3257
3258int
3259spa_open(const char *name, spa_t **spapp, void *tag)
3260{
3261 return (spa_open_common(name, spapp, tag, NULL, NULL));
3262}
3263
3264/*
3265 * Lookup the given spa_t, incrementing the inject count in the process,
3266 * preventing it from being exported or destroyed.
3267 */
3268spa_t *
3269spa_inject_addref(char *name)
3270{
3271 spa_t *spa;
3272
3273 mutex_enter(&spa_namespace_lock);
3274 if ((spa = spa_lookup(name)) == NULL) {
3275 mutex_exit(&spa_namespace_lock);
3276 return (NULL);
3277 }
3278 spa->spa_inject_ref++;
3279 mutex_exit(&spa_namespace_lock);
3280
3281 return (spa);
3282}
3283
3284void
3285spa_inject_delref(spa_t *spa)
3286{
3287 mutex_enter(&spa_namespace_lock);
3288 spa->spa_inject_ref--;
3289 mutex_exit(&spa_namespace_lock);
3290}
3291
3292/*
3293 * Add spares device information to the nvlist.
3294 */
3295static void
3296spa_add_spares(spa_t *spa, nvlist_t *config)
3297{
3298 nvlist_t **spares;
3299 uint_t i, nspares;
3300 nvlist_t *nvroot;
3301 uint64_t guid;
3302 vdev_stat_t *vs;
3303 uint_t vsc;
3304 uint64_t pool;
3305
3306 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3307
3308 if (spa->spa_spares.sav_count == 0)
3309 return;
3310
3311 VERIFY(nvlist_lookup_nvlist(config,
3312 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3313 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3314 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3315 if (nspares != 0) {
3316 VERIFY(nvlist_add_nvlist_array(nvroot,
3317 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3318 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3319 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3320
3321 /*
3322 * Go through and find any spares which have since been
3323 * repurposed as an active spare. If this is the case, update
3324 * their status appropriately.
3325 */
3326 for (i = 0; i < nspares; i++) {
3327 VERIFY(nvlist_lookup_uint64(spares[i],
3328 ZPOOL_CONFIG_GUID, &guid) == 0);
3329 if (spa_spare_exists(guid, &pool, NULL) &&
3330 pool != 0ULL) {
3331 VERIFY(nvlist_lookup_uint64_array(
3332 spares[i], ZPOOL_CONFIG_VDEV_STATS,
3333 (uint64_t **)&vs, &vsc) == 0);
3334 vs->vs_state = VDEV_STATE_CANT_OPEN;
3335 vs->vs_aux = VDEV_AUX_SPARED;
3336 }
3337 }
3338 }
3339}
3340
3341/*
3342 * Add l2cache device information to the nvlist, including vdev stats.
3343 */
3344static void
3345spa_add_l2cache(spa_t *spa, nvlist_t *config)
3346{
3347 nvlist_t **l2cache;
3348 uint_t i, j, nl2cache;
3349 nvlist_t *nvroot;
3350 uint64_t guid;
3351 vdev_t *vd;
3352 vdev_stat_t *vs;
3353 uint_t vsc;
3354
3355 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3356
3357 if (spa->spa_l2cache.sav_count == 0)
3358 return;
3359
3360 VERIFY(nvlist_lookup_nvlist(config,
3361 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3362 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3363 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3364 if (nl2cache != 0) {
3365 VERIFY(nvlist_add_nvlist_array(nvroot,
3366 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3367 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3368 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3369
3370 /*
3371 * Update level 2 cache device stats.
3372 */
3373
3374 for (i = 0; i < nl2cache; i++) {
3375 VERIFY(nvlist_lookup_uint64(l2cache[i],
3376 ZPOOL_CONFIG_GUID, &guid) == 0);
3377
3378 vd = NULL;
3379 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3380 if (guid ==
3381 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3382 vd = spa->spa_l2cache.sav_vdevs[j];
3383 break;
3384 }
3385 }
3386 ASSERT(vd != NULL);
3387
3388 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3389 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3390 == 0);
3391 vdev_get_stats(vd, vs);
3392 }
3393 }
3394}
3395
3396static void
3397spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3398{
3399 nvlist_t *features;
3400 zap_cursor_t zc;
3401 zap_attribute_t za;
3402
3403 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3404 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3405
3406 /* We may be unable to read features if pool is suspended. */
3407 if (spa_suspended(spa))
3408 goto out;
3409
3410 if (spa->spa_feat_for_read_obj != 0) {
3411 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3412 spa->spa_feat_for_read_obj);
3413 zap_cursor_retrieve(&zc, &za) == 0;
3414 zap_cursor_advance(&zc)) {
3415 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3416 za.za_num_integers == 1);
3417 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3418 za.za_first_integer));
3419 }
3420 zap_cursor_fini(&zc);
3421 }
3422
3423 if (spa->spa_feat_for_write_obj != 0) {
3424 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3425 spa->spa_feat_for_write_obj);
3426 zap_cursor_retrieve(&zc, &za) == 0;
3427 zap_cursor_advance(&zc)) {
3428 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3429 za.za_num_integers == 1);
3430 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3431 za.za_first_integer));
3432 }
3433 zap_cursor_fini(&zc);
3434 }
3435
3436out:
3437 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3438 features) == 0);
3439 nvlist_free(features);
3440}
3441
3442int
3443spa_get_stats(const char *name, nvlist_t **config,
3444 char *altroot, size_t buflen)
3445{
3446 int error;
3447 spa_t *spa;
3448
3449 *config = NULL;
3450 error = spa_open_common(name, &spa, FTAG, NULL, config);
3451
3452 if (spa != NULL) {
3453 /*
3454 * This still leaves a window of inconsistency where the spares
3455 * or l2cache devices could change and the config would be
3456 * self-inconsistent.
3457 */
3458 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3459
3460 if (*config != NULL) {
3461 uint64_t loadtimes[2];
3462
3463 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3464 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3465 VERIFY(nvlist_add_uint64_array(*config,
3466 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3467
3468 VERIFY(nvlist_add_uint64(*config,
3469 ZPOOL_CONFIG_ERRCOUNT,
3470 spa_get_errlog_size(spa)) == 0);
3471
3472 if (spa_suspended(spa))
3473 VERIFY(nvlist_add_uint64(*config,
3474 ZPOOL_CONFIG_SUSPENDED,
3475 spa->spa_failmode) == 0);
3476
3477 spa_add_spares(spa, *config);
3478 spa_add_l2cache(spa, *config);
3479 spa_add_feature_stats(spa, *config);
3480 }
3481 }
3482
3483 /*
3484 * We want to get the alternate root even for faulted pools, so we cheat
3485 * and call spa_lookup() directly.
3486 */
3487 if (altroot) {
3488 if (spa == NULL) {
3489 mutex_enter(&spa_namespace_lock);
3490 spa = spa_lookup(name);
3491 if (spa)
3492 spa_altroot(spa, altroot, buflen);
3493 else
3494 altroot[0] = '\0';
3495 spa = NULL;
3496 mutex_exit(&spa_namespace_lock);
3497 } else {
3498 spa_altroot(spa, altroot, buflen);
3499 }
3500 }
3501
3502 if (spa != NULL) {
3503 spa_config_exit(spa, SCL_CONFIG, FTAG);
3504 spa_close(spa, FTAG);
3505 }
3506
3507 return (error);
3508}
3509
3510/*
3511 * Validate that the auxiliary device array is well formed. We must have an
3512 * array of nvlists, each which describes a valid leaf vdev. If this is an
3513 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3514 * specified, as long as they are well-formed.
3515 */
3516static int
3517spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3518 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3519 vdev_labeltype_t label)
3520{
3521 nvlist_t **dev;
3522 uint_t i, ndev;
3523 vdev_t *vd;
3524 int error;
3525
3526 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3527
3528 /*
3529 * It's acceptable to have no devs specified.
3530 */
3531 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3532 return (0);
3533
3534 if (ndev == 0)
3535 return (SET_ERROR(EINVAL));
3536
3537 /*
3538 * Make sure the pool is formatted with a version that supports this
3539 * device type.
3540 */
3541 if (spa_version(spa) < version)
3542 return (SET_ERROR(ENOTSUP));
3543
3544 /*
3545 * Set the pending device list so we correctly handle device in-use
3546 * checking.
3547 */
3548 sav->sav_pending = dev;
3549 sav->sav_npending = ndev;
3550
3551 for (i = 0; i < ndev; i++) {
3552 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3553 mode)) != 0)
3554 goto out;
3555
3556 if (!vd->vdev_ops->vdev_op_leaf) {
3557 vdev_free(vd);
3558 error = SET_ERROR(EINVAL);
3559 goto out;
3560 }
3561
3562 /*
3563 * The L2ARC currently only supports disk devices in
3564 * kernel context. For user-level testing, we allow it.
3565 */
3566#ifdef _KERNEL
3567 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3568 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3569 error = SET_ERROR(ENOTBLK);
3570 vdev_free(vd);
3571 goto out;
3572 }
3573#endif
3574 vd->vdev_top = vd;
3575
3576 if ((error = vdev_open(vd)) == 0 &&
3577 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3578 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3579 vd->vdev_guid) == 0);
3580 }
3581
3582 vdev_free(vd);
3583
3584 if (error &&
3585 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3586 goto out;
3587 else
3588 error = 0;
3589 }
3590
3591out:
3592 sav->sav_pending = NULL;
3593 sav->sav_npending = 0;
3594 return (error);
3595}
3596
3597static int
3598spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3599{
3600 int error;
3601
3602 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3603
3604 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3605 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3606 VDEV_LABEL_SPARE)) != 0) {
3607 return (error);
3608 }
3609
3610 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3611 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3612 VDEV_LABEL_L2CACHE));
3613}
3614
3615static void
3616spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3617 const char *config)
3618{
3619 int i;
3620
3621 if (sav->sav_config != NULL) {
3622 nvlist_t **olddevs;
3623 uint_t oldndevs;
3624 nvlist_t **newdevs;
3625
3626 /*
3627 * Generate new dev list by concatentating with the
3628 * current dev list.
3629 */
3630 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3631 &olddevs, &oldndevs) == 0);
3632
3633 newdevs = kmem_alloc(sizeof (void *) *
3634 (ndevs + oldndevs), KM_SLEEP);
3635 for (i = 0; i < oldndevs; i++)
3636 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3637 KM_SLEEP) == 0);
3638 for (i = 0; i < ndevs; i++)
3639 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3640 KM_SLEEP) == 0);
3641
3642 VERIFY(nvlist_remove(sav->sav_config, config,
3643 DATA_TYPE_NVLIST_ARRAY) == 0);
3644
3645 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3646 config, newdevs, ndevs + oldndevs) == 0);
3647 for (i = 0; i < oldndevs + ndevs; i++)
3648 nvlist_free(newdevs[i]);
3649 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3650 } else {
3651 /*
3652 * Generate a new dev list.
3653 */
3654 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3655 KM_SLEEP) == 0);
3656 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3657 devs, ndevs) == 0);
3658 }
3659}
3660
3661/*
3662 * Stop and drop level 2 ARC devices
3663 */
3664void
3665spa_l2cache_drop(spa_t *spa)
3666{
3667 vdev_t *vd;
3668 int i;
3669 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3670
3671 for (i = 0; i < sav->sav_count; i++) {
3672 uint64_t pool;
3673
3674 vd = sav->sav_vdevs[i];
3675 ASSERT(vd != NULL);
3676
3677 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3678 pool != 0ULL && l2arc_vdev_present(vd))
3679 l2arc_remove_vdev(vd);
3680 }
3681}
3682
3683/*
3684 * Pool Creation
3685 */
3686int
3687spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3688 nvlist_t *zplprops)
3689{
3690 spa_t *spa;
3691 char *altroot = NULL;
3692 vdev_t *rvd;
3693 dsl_pool_t *dp;
3694 dmu_tx_t *tx;
3695 int error = 0;
3696 uint64_t txg = TXG_INITIAL;
3697 nvlist_t **spares, **l2cache;
3698 uint_t nspares, nl2cache;
3699 uint64_t version, obj;
3700 boolean_t has_features;
3701
3702 /*
3703 * If this pool already exists, return failure.
3704 */
3705 mutex_enter(&spa_namespace_lock);
3706 if (spa_lookup(pool) != NULL) {
3707 mutex_exit(&spa_namespace_lock);
3708 return (SET_ERROR(EEXIST));
3709 }
3710
3711 /*
3712 * Allocate a new spa_t structure.
3713 */
3714 (void) nvlist_lookup_string(props,
3715 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3716 spa = spa_add(pool, NULL, altroot);
3717 spa_activate(spa, spa_mode_global);
3718
3719 if (props && (error = spa_prop_validate(spa, props))) {
3720 spa_deactivate(spa);
3721 spa_remove(spa);
3722 mutex_exit(&spa_namespace_lock);
3723 return (error);
3724 }
3725
3726 has_features = B_FALSE;
3727 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3728 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3729 if (zpool_prop_feature(nvpair_name(elem)))
3730 has_features = B_TRUE;
3731 }
3732
3733 if (has_features || nvlist_lookup_uint64(props,
3734 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3735 version = SPA_VERSION;
3736 }
3737 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3738
3739 spa->spa_first_txg = txg;
3740 spa->spa_uberblock.ub_txg = txg - 1;
3741 spa->spa_uberblock.ub_version = version;
3742 spa->spa_ubsync = spa->spa_uberblock;
3743 spa->spa_load_state = SPA_LOAD_CREATE;
3744
3745 /*
3746 * Create "The Godfather" zio to hold all async IOs
3747 */
3748 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3749 KM_SLEEP);
3750 for (int i = 0; i < max_ncpus; i++) {
3751 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3752 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3753 ZIO_FLAG_GODFATHER);
3754 }
3755
3756 /*
3757 * Create the root vdev.
3758 */
3759 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3760
3761 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3762
3763 ASSERT(error != 0 || rvd != NULL);
3764 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3765
3766 if (error == 0 && !zfs_allocatable_devs(nvroot))
3767 error = SET_ERROR(EINVAL);
3768
3769 if (error == 0 &&
3770 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3771 (error = spa_validate_aux(spa, nvroot, txg,
3772 VDEV_ALLOC_ADD)) == 0) {
3773 for (int c = 0; c < rvd->vdev_children; c++) {
3774 vdev_ashift_optimize(rvd->vdev_child[c]);
3775 vdev_metaslab_set_size(rvd->vdev_child[c]);
3776 vdev_expand(rvd->vdev_child[c], txg);
3777 }
3778 }
3779
3780 spa_config_exit(spa, SCL_ALL, FTAG);
3781
3782 if (error != 0) {
3783 spa_unload(spa);
3784 spa_deactivate(spa);
3785 spa_remove(spa);
3786 mutex_exit(&spa_namespace_lock);
3787 return (error);
3788 }
3789
3790 /*
3791 * Get the list of spares, if specified.
3792 */
3793 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3794 &spares, &nspares) == 0) {
3795 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3796 KM_SLEEP) == 0);
3797 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3798 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3799 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3800 spa_load_spares(spa);
3801 spa_config_exit(spa, SCL_ALL, FTAG);
3802 spa->spa_spares.sav_sync = B_TRUE;
3803 }
3804
3805 /*
3806 * Get the list of level 2 cache devices, if specified.
3807 */
3808 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3809 &l2cache, &nl2cache) == 0) {
3810 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3811 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3812 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3813 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3814 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3815 spa_load_l2cache(spa);
3816 spa_config_exit(spa, SCL_ALL, FTAG);
3817 spa->spa_l2cache.sav_sync = B_TRUE;
3818 }
3819
3820 spa->spa_is_initializing = B_TRUE;
3821 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3822 spa->spa_meta_objset = dp->dp_meta_objset;
3823 spa->spa_is_initializing = B_FALSE;
3824
3825 /*
3826 * Create DDTs (dedup tables).
3827 */
3828 ddt_create(spa);
3829
3830 spa_update_dspace(spa);
3831
3832 tx = dmu_tx_create_assigned(dp, txg);
3833
3834 /*
3835 * Create the pool config object.
3836 */
3837 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3838 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3839 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3840
3841 if (zap_add(spa->spa_meta_objset,
3842 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3843 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3844 cmn_err(CE_PANIC, "failed to add pool config");
3845 }
3846
3847 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3848 spa_feature_create_zap_objects(spa, tx);
3849
3850 if (zap_add(spa->spa_meta_objset,
3851 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3852 sizeof (uint64_t), 1, &version, tx) != 0) {
3853 cmn_err(CE_PANIC, "failed to add pool version");
3854 }
3855
3856 /* Newly created pools with the right version are always deflated. */
3857 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3858 spa->spa_deflate = TRUE;
3859 if (zap_add(spa->spa_meta_objset,
3860 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3861 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3862 cmn_err(CE_PANIC, "failed to add deflate");
3863 }
3864 }
3865
3866 /*
3867 * Create the deferred-free bpobj. Turn off compression
3868 * because sync-to-convergence takes longer if the blocksize
3869 * keeps changing.
3870 */
3871 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3872 dmu_object_set_compress(spa->spa_meta_objset, obj,
3873 ZIO_COMPRESS_OFF, tx);
3874 if (zap_add(spa->spa_meta_objset,
3875 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3876 sizeof (uint64_t), 1, &obj, tx) != 0) {
3877 cmn_err(CE_PANIC, "failed to add bpobj");
3878 }
3879 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3880 spa->spa_meta_objset, obj));
3881
3882 /*
3883 * Create the pool's history object.
3884 */
3885 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3886 spa_history_create_obj(spa, tx);
3887
3888 /*
3889 * Generate some random noise for salted checksums to operate on.
3890 */
3891 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3892 sizeof (spa->spa_cksum_salt.zcs_bytes));
3893
3894 /*
3895 * Set pool properties.
3896 */
3897 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3898 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3899 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3900 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3901
3902 if (props != NULL) {
3903 spa_configfile_set(spa, props, B_FALSE);
3904 spa_sync_props(props, tx);
3905 }
3906
3907 dmu_tx_commit(tx);
3908
3909 spa->spa_sync_on = B_TRUE;
3910 txg_sync_start(spa->spa_dsl_pool);
3911
3912 /*
3913 * We explicitly wait for the first transaction to complete so that our
3914 * bean counters are appropriately updated.
3915 */
3916 txg_wait_synced(spa->spa_dsl_pool, txg);
3917
3918 spa_config_sync(spa, B_FALSE, B_TRUE);
3919 spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE);
3920
3921 spa_history_log_version(spa, "create");
3922
3923 /*
3924 * Don't count references from objsets that are already closed
3925 * and are making their way through the eviction process.
3926 */
3927 spa_evicting_os_wait(spa);
3928 spa->spa_minref = refcount_count(&spa->spa_refcount);
3929 spa->spa_load_state = SPA_LOAD_NONE;
3930
3931 mutex_exit(&spa_namespace_lock);
3932
3933 return (0);
3934}
3935
3936#ifdef _KERNEL
3937#ifdef illumos
3938/*
3939 * Get the root pool information from the root disk, then import the root pool
3940 * during the system boot up time.
3941 */
3942extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3943
3944static nvlist_t *
3945spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3946{
3947 nvlist_t *config;
3948 nvlist_t *nvtop, *nvroot;
3949 uint64_t pgid;
3950
3951 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3952 return (NULL);
3953
3954 /*
3955 * Add this top-level vdev to the child array.
3956 */
3957 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3958 &nvtop) == 0);
3959 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3960 &pgid) == 0);
3961 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3962
3963 /*
3964 * Put this pool's top-level vdevs into a root vdev.
3965 */
3966 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3967 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3968 VDEV_TYPE_ROOT) == 0);
3969 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3970 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3971 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3972 &nvtop, 1) == 0);
3973
3974 /*
3975 * Replace the existing vdev_tree with the new root vdev in
3976 * this pool's configuration (remove the old, add the new).
3977 */
3978 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3979 nvlist_free(nvroot);
3980 return (config);
3981}
3982
3983/*
3984 * Walk the vdev tree and see if we can find a device with "better"
3985 * configuration. A configuration is "better" if the label on that
3986 * device has a more recent txg.
3987 */
3988static void
3989spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3990{
3991 for (int c = 0; c < vd->vdev_children; c++)
3992 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3993
3994 if (vd->vdev_ops->vdev_op_leaf) {
3995 nvlist_t *label;
3996 uint64_t label_txg;
3997
3998 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3999 &label) != 0)
4000 return;
4001
4002 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
4003 &label_txg) == 0);
4004
4005 /*
4006 * Do we have a better boot device?
4007 */
4008 if (label_txg > *txg) {
4009 *txg = label_txg;
4010 *avd = vd;
4011 }
4012 nvlist_free(label);
4013 }
4014}
4015
4016/*
4017 * Import a root pool.
4018 *
4019 * For x86. devpath_list will consist of devid and/or physpath name of
4020 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
4021 * The GRUB "findroot" command will return the vdev we should boot.
4022 *
4023 * For Sparc, devpath_list consists the physpath name of the booting device
4024 * no matter the rootpool is a single device pool or a mirrored pool.
4025 * e.g.
4026 * "/pci@1f,0/ide@d/disk@0,0:a"
4027 */
4028int
4029spa_import_rootpool(char *devpath, char *devid)
4030{
4031 spa_t *spa;
4032 vdev_t *rvd, *bvd, *avd = NULL;
4033 nvlist_t *config, *nvtop;
4034 uint64_t guid, txg;
4035 char *pname;
4036 int error;
4037
4038 /*
4039 * Read the label from the boot device and generate a configuration.
4040 */
4041 config = spa_generate_rootconf(devpath, devid, &guid);
4042#if defined(_OBP) && defined(_KERNEL)
4043 if (config == NULL) {
4044 if (strstr(devpath, "/iscsi/ssd") != NULL) {
4045 /* iscsi boot */
4046 get_iscsi_bootpath_phy(devpath);
4047 config = spa_generate_rootconf(devpath, devid, &guid);
4048 }
4049 }
4050#endif
4051 if (config == NULL) {
4052 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
4053 devpath);
4054 return (SET_ERROR(EIO));
4055 }
4056
4057 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4058 &pname) == 0);
4059 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
4060
4061 mutex_enter(&spa_namespace_lock);
4062 if ((spa = spa_lookup(pname)) != NULL) {
4063 /*
4064 * Remove the existing root pool from the namespace so that we
4065 * can replace it with the correct config we just read in.
4066 */
4067 spa_remove(spa);
4068 }
4069
4070 spa = spa_add(pname, config, NULL);
4071 spa->spa_is_root = B_TRUE;
4072 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4073
4074 /*
4075 * Build up a vdev tree based on the boot device's label config.
4076 */
4077 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4078 &nvtop) == 0);
4079 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4080 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4081 VDEV_ALLOC_ROOTPOOL);
4082 spa_config_exit(spa, SCL_ALL, FTAG);
4083 if (error) {
4084 mutex_exit(&spa_namespace_lock);
4085 nvlist_free(config);
4086 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4087 pname);
4088 return (error);
4089 }
4090
4091 /*
4092 * Get the boot vdev.
4093 */
4094 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
4095 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
4096 (u_longlong_t)guid);
4097 error = SET_ERROR(ENOENT);
4098 goto out;
4099 }
4100
4101 /*
4102 * Determine if there is a better boot device.
4103 */
4104 avd = bvd;
4105 spa_alt_rootvdev(rvd, &avd, &txg);
4106 if (avd != bvd) {
4107 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
4108 "try booting from '%s'", avd->vdev_path);
4109 error = SET_ERROR(EINVAL);
4110 goto out;
4111 }
4112
4113 /*
4114 * If the boot device is part of a spare vdev then ensure that
4115 * we're booting off the active spare.
4116 */
4117 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4118 !bvd->vdev_isspare) {
4119 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
4120 "try booting from '%s'",
4121 bvd->vdev_parent->
4122 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
4123 error = SET_ERROR(EINVAL);
4124 goto out;
4125 }
4126
4127 error = 0;
4128out:
4129 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4130 vdev_free(rvd);
4131 spa_config_exit(spa, SCL_ALL, FTAG);
4132 mutex_exit(&spa_namespace_lock);
4133
4134 nvlist_free(config);
4135 return (error);
4136}
4137
4138#else /* !illumos */
4139
4140extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs,
4141 uint64_t *count);
4142
4143static nvlist_t *
4144spa_generate_rootconf(const char *name)
4145{
4146 nvlist_t **configs, **tops;
4147 nvlist_t *config;
4148 nvlist_t *best_cfg, *nvtop, *nvroot;
4149 uint64_t *holes;
4150 uint64_t best_txg;
4151 uint64_t nchildren;
4152 uint64_t pgid;
4153 uint64_t count;
4154 uint64_t i;
4155 uint_t nholes;
4156
4157 if (vdev_geom_read_pool_label(name, &configs, &count) != 0)
4158 return (NULL);
4159
4160 ASSERT3U(count, !=, 0);
4161 best_txg = 0;
4162 for (i = 0; i < count; i++) {
4163 uint64_t txg;
4164
4165 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG,
4166 &txg) == 0);
4167 if (txg > best_txg) {
4168 best_txg = txg;
4169 best_cfg = configs[i];
4170 }
4171 }
4172
4173 nchildren = 1;
4174 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren);
4175 holes = NULL;
4176 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY,
4177 &holes, &nholes);
4178
4179 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP);
4180 for (i = 0; i < nchildren; i++) {
4181 if (i >= count)
4182 break;
4183 if (configs[i] == NULL)
4184 continue;
4185 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE,
4186 &nvtop) == 0);
4187 nvlist_dup(nvtop, &tops[i], KM_SLEEP);
4188 }
4189 for (i = 0; holes != NULL && i < nholes; i++) {
4190 if (i >= nchildren)
4191 continue;
4192 if (tops[holes[i]] != NULL)
4193 continue;
4194 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP);
4195 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE,
4196 VDEV_TYPE_HOLE) == 0);
4197 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID,
4198 holes[i]) == 0);
4199 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID,
4200 0) == 0);
4201 }
4202 for (i = 0; i < nchildren; i++) {
4203 if (tops[i] != NULL)
4204 continue;
4205 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP);
4206 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE,
4207 VDEV_TYPE_MISSING) == 0);
4208 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID,
4209 i) == 0);
4210 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID,
4211 0) == 0);
4212 }
4213
4214 /*
4215 * Create pool config based on the best vdev config.
4216 */
4217 nvlist_dup(best_cfg, &config, KM_SLEEP);
4218
4219 /*
4220 * Put this pool's top-level vdevs into a root vdev.
4221 */
4222 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4223 &pgid) == 0);
4224 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4225 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
4226 VDEV_TYPE_ROOT) == 0);
4227 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
4228 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
4229 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4230 tops, nchildren) == 0);
4231
4232 /*
4233 * Replace the existing vdev_tree with the new root vdev in
4234 * this pool's configuration (remove the old, add the new).
4235 */
4236 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
4237
4238 /*
4239 * Drop vdev config elements that should not be present at pool level.
4240 */
4241 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64);
4242 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64);
4243
4244 for (i = 0; i < count; i++)
4245 nvlist_free(configs[i]);
4246 kmem_free(configs, count * sizeof(void *));
4247 for (i = 0; i < nchildren; i++)
4248 nvlist_free(tops[i]);
4249 kmem_free(tops, nchildren * sizeof(void *));
4250 nvlist_free(nvroot);
4251 return (config);
4252}
4253
4254int
4255spa_import_rootpool(const char *name)
4256{
4257 spa_t *spa;
4258 vdev_t *rvd, *bvd, *avd = NULL;
4259 nvlist_t *config, *nvtop;
4260 uint64_t txg;
4261 char *pname;
4262 int error;
4263
4264 /*
4265 * Read the label from the boot device and generate a configuration.
4266 */
4267 config = spa_generate_rootconf(name);
4268
4269 mutex_enter(&spa_namespace_lock);
4270 if (config != NULL) {
4271 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4272 &pname) == 0 && strcmp(name, pname) == 0);
4273 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg)
4274 == 0);
4275
4276 if ((spa = spa_lookup(pname)) != NULL) {
4277 /*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright 2013 Saso Kiselkov. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 */
31
32/*
33 * SPA: Storage Pool Allocator
34 *
35 * This file contains all the routines used when modifying on-disk SPA state.
36 * This includes opening, importing, destroying, exporting a pool, and syncing a
37 * pool.
38 */
39
40#include <sys/zfs_context.h>
41#include <sys/fm/fs/zfs.h>
42#include <sys/spa_impl.h>
43#include <sys/zio.h>
44#include <sys/zio_checksum.h>
45#include <sys/dmu.h>
46#include <sys/dmu_tx.h>
47#include <sys/zap.h>
48#include <sys/zil.h>
49#include <sys/ddt.h>
50#include <sys/vdev_impl.h>
51#include <sys/metaslab.h>
52#include <sys/metaslab_impl.h>
53#include <sys/uberblock_impl.h>
54#include <sys/txg.h>
55#include <sys/avl.h>
56#include <sys/dmu_traverse.h>
57#include <sys/dmu_objset.h>
58#include <sys/unique.h>
59#include <sys/dsl_pool.h>
60#include <sys/dsl_dataset.h>
61#include <sys/dsl_dir.h>
62#include <sys/dsl_prop.h>
63#include <sys/dsl_synctask.h>
64#include <sys/fs/zfs.h>
65#include <sys/arc.h>
66#include <sys/callb.h>
67#include <sys/spa_boot.h>
68#include <sys/zfs_ioctl.h>
69#include <sys/dsl_scan.h>
70#include <sys/dmu_send.h>
71#include <sys/dsl_destroy.h>
72#include <sys/dsl_userhold.h>
73#include <sys/zfeature.h>
74#include <sys/zvol.h>
75#include <sys/trim_map.h>
76#include <sys/abd.h>
77
78#ifdef _KERNEL
79#include <sys/callb.h>
80#include <sys/cpupart.h>
81#include <sys/zone.h>
82#endif /* _KERNEL */
83
84#include "zfs_prop.h"
85#include "zfs_comutil.h"
86
87/* Check hostid on import? */
88static int check_hostid = 1;
89
90/*
91 * The interval, in seconds, at which failed configuration cache file writes
92 * should be retried.
93 */
94static int zfs_ccw_retry_interval = 300;
95
96SYSCTL_DECL(_vfs_zfs);
97SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0,
98 "Check hostid on import?");
99TUNABLE_INT("vfs.zfs.ccw_retry_interval", &zfs_ccw_retry_interval);
100SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RW,
101 &zfs_ccw_retry_interval, 0,
102 "Configuration cache file write, retry after failure, interval (seconds)");
103
104typedef enum zti_modes {
105 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
106 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
107 ZTI_MODE_NULL, /* don't create a taskq */
108 ZTI_NMODES
109} zti_modes_t;
110
111#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
112#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
113#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
114
115#define ZTI_N(n) ZTI_P(n, 1)
116#define ZTI_ONE ZTI_N(1)
117
118typedef struct zio_taskq_info {
119 zti_modes_t zti_mode;
120 uint_t zti_value;
121 uint_t zti_count;
122} zio_taskq_info_t;
123
124static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
125 "issue", "issue_high", "intr", "intr_high"
126};
127
128/*
129 * This table defines the taskq settings for each ZFS I/O type. When
130 * initializing a pool, we use this table to create an appropriately sized
131 * taskq. Some operations are low volume and therefore have a small, static
132 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
133 * macros. Other operations process a large amount of data; the ZTI_BATCH
134 * macro causes us to create a taskq oriented for throughput. Some operations
135 * are so high frequency and short-lived that the taskq itself can become a a
136 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
137 * additional degree of parallelism specified by the number of threads per-
138 * taskq and the number of taskqs; when dispatching an event in this case, the
139 * particular taskq is chosen at random.
140 *
141 * The different taskq priorities are to handle the different contexts (issue
142 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
143 * need to be handled with minimum delay.
144 */
145const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
146 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
147 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
148 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
149 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */
150 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
151 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
152 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
153};
154
155static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, const char *name);
156static void spa_event_post(sysevent_t *ev);
157static void spa_sync_version(void *arg, dmu_tx_t *tx);
158static void spa_sync_props(void *arg, dmu_tx_t *tx);
159static boolean_t spa_has_active_shared_spare(spa_t *spa);
160static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
161 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
162 char **ereport);
163static void spa_vdev_resilver_done(spa_t *spa);
164
165uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
166#ifdef PSRSET_BIND
167id_t zio_taskq_psrset_bind = PS_NONE;
168#endif
169#ifdef SYSDC
170boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
171uint_t zio_taskq_basedc = 80; /* base duty cycle */
172#endif
173
174boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
175extern int zfs_sync_pass_deferred_free;
176
177/*
178 * This (illegal) pool name is used when temporarily importing a spa_t in order
179 * to get the vdev stats associated with the imported devices.
180 */
181#define TRYIMPORT_NAME "$import"
182
183/*
184 * ==========================================================================
185 * SPA properties routines
186 * ==========================================================================
187 */
188
189/*
190 * Add a (source=src, propname=propval) list to an nvlist.
191 */
192static void
193spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
194 uint64_t intval, zprop_source_t src)
195{
196 const char *propname = zpool_prop_to_name(prop);
197 nvlist_t *propval;
198
199 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
200 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
201
202 if (strval != NULL)
203 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
204 else
205 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
206
207 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
208 nvlist_free(propval);
209}
210
211/*
212 * Get property values from the spa configuration.
213 */
214static void
215spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
216{
217 vdev_t *rvd = spa->spa_root_vdev;
218 dsl_pool_t *pool = spa->spa_dsl_pool;
219 uint64_t size, alloc, cap, version;
220 zprop_source_t src = ZPROP_SRC_NONE;
221 spa_config_dirent_t *dp;
222 metaslab_class_t *mc = spa_normal_class(spa);
223
224 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
225
226 if (rvd != NULL) {
227 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
228 size = metaslab_class_get_space(spa_normal_class(spa));
229 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
230 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
231 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
232 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
233 size - alloc, src);
234
235 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
236 metaslab_class_fragmentation(mc), src);
237 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
238 metaslab_class_expandable_space(mc), src);
239 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
240 (spa_mode(spa) == FREAD), src);
241
242 cap = (size == 0) ? 0 : (alloc * 100 / size);
243 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
244
245 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
246 ddt_get_pool_dedup_ratio(spa), src);
247
248 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
249 rvd->vdev_state, src);
250
251 version = spa_version(spa);
252 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
253 src = ZPROP_SRC_DEFAULT;
254 else
255 src = ZPROP_SRC_LOCAL;
256 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
257 }
258
259 if (pool != NULL) {
260 /*
261 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
262 * when opening pools before this version freedir will be NULL.
263 */
264 if (pool->dp_free_dir != NULL) {
265 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
266 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
267 src);
268 } else {
269 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
270 NULL, 0, src);
271 }
272
273 if (pool->dp_leak_dir != NULL) {
274 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
275 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
276 src);
277 } else {
278 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
279 NULL, 0, src);
280 }
281 }
282
283 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
284
285 if (spa->spa_comment != NULL) {
286 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
287 0, ZPROP_SRC_LOCAL);
288 }
289
290 if (spa->spa_root != NULL)
291 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
292 0, ZPROP_SRC_LOCAL);
293
294 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
295 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
296 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
297 } else {
298 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
299 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
300 }
301
302 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
303 if (dp->scd_path == NULL) {
304 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
305 "none", 0, ZPROP_SRC_LOCAL);
306 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
307 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
308 dp->scd_path, 0, ZPROP_SRC_LOCAL);
309 }
310 }
311}
312
313/*
314 * Get zpool property values.
315 */
316int
317spa_prop_get(spa_t *spa, nvlist_t **nvp)
318{
319 objset_t *mos = spa->spa_meta_objset;
320 zap_cursor_t zc;
321 zap_attribute_t za;
322 int err;
323
324 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
325
326 mutex_enter(&spa->spa_props_lock);
327
328 /*
329 * Get properties from the spa config.
330 */
331 spa_prop_get_config(spa, nvp);
332
333 /* If no pool property object, no more prop to get. */
334 if (mos == NULL || spa->spa_pool_props_object == 0) {
335 mutex_exit(&spa->spa_props_lock);
336 return (0);
337 }
338
339 /*
340 * Get properties from the MOS pool property object.
341 */
342 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
343 (err = zap_cursor_retrieve(&zc, &za)) == 0;
344 zap_cursor_advance(&zc)) {
345 uint64_t intval = 0;
346 char *strval = NULL;
347 zprop_source_t src = ZPROP_SRC_DEFAULT;
348 zpool_prop_t prop;
349
350 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
351 continue;
352
353 switch (za.za_integer_length) {
354 case 8:
355 /* integer property */
356 if (za.za_first_integer !=
357 zpool_prop_default_numeric(prop))
358 src = ZPROP_SRC_LOCAL;
359
360 if (prop == ZPOOL_PROP_BOOTFS) {
361 dsl_pool_t *dp;
362 dsl_dataset_t *ds = NULL;
363
364 dp = spa_get_dsl(spa);
365 dsl_pool_config_enter(dp, FTAG);
366 if (err = dsl_dataset_hold_obj(dp,
367 za.za_first_integer, FTAG, &ds)) {
368 dsl_pool_config_exit(dp, FTAG);
369 break;
370 }
371
372 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
373 KM_SLEEP);
374 dsl_dataset_name(ds, strval);
375 dsl_dataset_rele(ds, FTAG);
376 dsl_pool_config_exit(dp, FTAG);
377 } else {
378 strval = NULL;
379 intval = za.za_first_integer;
380 }
381
382 spa_prop_add_list(*nvp, prop, strval, intval, src);
383
384 if (strval != NULL)
385 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
386
387 break;
388
389 case 1:
390 /* string property */
391 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
392 err = zap_lookup(mos, spa->spa_pool_props_object,
393 za.za_name, 1, za.za_num_integers, strval);
394 if (err) {
395 kmem_free(strval, za.za_num_integers);
396 break;
397 }
398 spa_prop_add_list(*nvp, prop, strval, 0, src);
399 kmem_free(strval, za.za_num_integers);
400 break;
401
402 default:
403 break;
404 }
405 }
406 zap_cursor_fini(&zc);
407 mutex_exit(&spa->spa_props_lock);
408out:
409 if (err && err != ENOENT) {
410 nvlist_free(*nvp);
411 *nvp = NULL;
412 return (err);
413 }
414
415 return (0);
416}
417
418/*
419 * Validate the given pool properties nvlist and modify the list
420 * for the property values to be set.
421 */
422static int
423spa_prop_validate(spa_t *spa, nvlist_t *props)
424{
425 nvpair_t *elem;
426 int error = 0, reset_bootfs = 0;
427 uint64_t objnum = 0;
428 boolean_t has_feature = B_FALSE;
429
430 elem = NULL;
431 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
432 uint64_t intval;
433 char *strval, *slash, *check, *fname;
434 const char *propname = nvpair_name(elem);
435 zpool_prop_t prop = zpool_name_to_prop(propname);
436
437 switch (prop) {
438 case ZPROP_INVAL:
439 if (!zpool_prop_feature(propname)) {
440 error = SET_ERROR(EINVAL);
441 break;
442 }
443
444 /*
445 * Sanitize the input.
446 */
447 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
448 error = SET_ERROR(EINVAL);
449 break;
450 }
451
452 if (nvpair_value_uint64(elem, &intval) != 0) {
453 error = SET_ERROR(EINVAL);
454 break;
455 }
456
457 if (intval != 0) {
458 error = SET_ERROR(EINVAL);
459 break;
460 }
461
462 fname = strchr(propname, '@') + 1;
463 if (zfeature_lookup_name(fname, NULL) != 0) {
464 error = SET_ERROR(EINVAL);
465 break;
466 }
467
468 has_feature = B_TRUE;
469 break;
470
471 case ZPOOL_PROP_VERSION:
472 error = nvpair_value_uint64(elem, &intval);
473 if (!error &&
474 (intval < spa_version(spa) ||
475 intval > SPA_VERSION_BEFORE_FEATURES ||
476 has_feature))
477 error = SET_ERROR(EINVAL);
478 break;
479
480 case ZPOOL_PROP_DELEGATION:
481 case ZPOOL_PROP_AUTOREPLACE:
482 case ZPOOL_PROP_LISTSNAPS:
483 case ZPOOL_PROP_AUTOEXPAND:
484 error = nvpair_value_uint64(elem, &intval);
485 if (!error && intval > 1)
486 error = SET_ERROR(EINVAL);
487 break;
488
489 case ZPOOL_PROP_BOOTFS:
490 /*
491 * If the pool version is less than SPA_VERSION_BOOTFS,
492 * or the pool is still being created (version == 0),
493 * the bootfs property cannot be set.
494 */
495 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
496 error = SET_ERROR(ENOTSUP);
497 break;
498 }
499
500 /*
501 * Make sure the vdev config is bootable
502 */
503 if (!vdev_is_bootable(spa->spa_root_vdev)) {
504 error = SET_ERROR(ENOTSUP);
505 break;
506 }
507
508 reset_bootfs = 1;
509
510 error = nvpair_value_string(elem, &strval);
511
512 if (!error) {
513 objset_t *os;
514 uint64_t propval;
515
516 if (strval == NULL || strval[0] == '\0') {
517 objnum = zpool_prop_default_numeric(
518 ZPOOL_PROP_BOOTFS);
519 break;
520 }
521
522 if (error = dmu_objset_hold(strval, FTAG, &os))
523 break;
524
525 /*
526 * Must be ZPL, and its property settings
527 * must be supported by GRUB (compression
528 * is not gzip, and large blocks are not used).
529 */
530
531 if (dmu_objset_type(os) != DMU_OST_ZFS) {
532 error = SET_ERROR(ENOTSUP);
533 } else if ((error =
534 dsl_prop_get_int_ds(dmu_objset_ds(os),
535 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
536 &propval)) == 0 &&
537 !BOOTFS_COMPRESS_VALID(propval)) {
538 error = SET_ERROR(ENOTSUP);
539 } else if ((error =
540 dsl_prop_get_int_ds(dmu_objset_ds(os),
541 zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
542 &propval)) == 0 &&
543 propval > SPA_OLD_MAXBLOCKSIZE) {
544 error = SET_ERROR(ENOTSUP);
545 } else {
546 objnum = dmu_objset_id(os);
547 }
548 dmu_objset_rele(os, FTAG);
549 }
550 break;
551
552 case ZPOOL_PROP_FAILUREMODE:
553 error = nvpair_value_uint64(elem, &intval);
554 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
555 intval > ZIO_FAILURE_MODE_PANIC))
556 error = SET_ERROR(EINVAL);
557
558 /*
559 * This is a special case which only occurs when
560 * the pool has completely failed. This allows
561 * the user to change the in-core failmode property
562 * without syncing it out to disk (I/Os might
563 * currently be blocked). We do this by returning
564 * EIO to the caller (spa_prop_set) to trick it
565 * into thinking we encountered a property validation
566 * error.
567 */
568 if (!error && spa_suspended(spa)) {
569 spa->spa_failmode = intval;
570 error = SET_ERROR(EIO);
571 }
572 break;
573
574 case ZPOOL_PROP_CACHEFILE:
575 if ((error = nvpair_value_string(elem, &strval)) != 0)
576 break;
577
578 if (strval[0] == '\0')
579 break;
580
581 if (strcmp(strval, "none") == 0)
582 break;
583
584 if (strval[0] != '/') {
585 error = SET_ERROR(EINVAL);
586 break;
587 }
588
589 slash = strrchr(strval, '/');
590 ASSERT(slash != NULL);
591
592 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
593 strcmp(slash, "/..") == 0)
594 error = SET_ERROR(EINVAL);
595 break;
596
597 case ZPOOL_PROP_COMMENT:
598 if ((error = nvpair_value_string(elem, &strval)) != 0)
599 break;
600 for (check = strval; *check != '\0'; check++) {
601 /*
602 * The kernel doesn't have an easy isprint()
603 * check. For this kernel check, we merely
604 * check ASCII apart from DEL. Fix this if
605 * there is an easy-to-use kernel isprint().
606 */
607 if (*check >= 0x7f) {
608 error = SET_ERROR(EINVAL);
609 break;
610 }
611 }
612 if (strlen(strval) > ZPROP_MAX_COMMENT)
613 error = E2BIG;
614 break;
615
616 case ZPOOL_PROP_DEDUPDITTO:
617 if (spa_version(spa) < SPA_VERSION_DEDUP)
618 error = SET_ERROR(ENOTSUP);
619 else
620 error = nvpair_value_uint64(elem, &intval);
621 if (error == 0 &&
622 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
623 error = SET_ERROR(EINVAL);
624 break;
625 }
626
627 if (error)
628 break;
629 }
630
631 if (!error && reset_bootfs) {
632 error = nvlist_remove(props,
633 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
634
635 if (!error) {
636 error = nvlist_add_uint64(props,
637 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
638 }
639 }
640
641 return (error);
642}
643
644void
645spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
646{
647 char *cachefile;
648 spa_config_dirent_t *dp;
649
650 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
651 &cachefile) != 0)
652 return;
653
654 dp = kmem_alloc(sizeof (spa_config_dirent_t),
655 KM_SLEEP);
656
657 if (cachefile[0] == '\0')
658 dp->scd_path = spa_strdup(spa_config_path);
659 else if (strcmp(cachefile, "none") == 0)
660 dp->scd_path = NULL;
661 else
662 dp->scd_path = spa_strdup(cachefile);
663
664 list_insert_head(&spa->spa_config_list, dp);
665 if (need_sync)
666 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
667}
668
669int
670spa_prop_set(spa_t *spa, nvlist_t *nvp)
671{
672 int error;
673 nvpair_t *elem = NULL;
674 boolean_t need_sync = B_FALSE;
675
676 if ((error = spa_prop_validate(spa, nvp)) != 0)
677 return (error);
678
679 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
680 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
681
682 if (prop == ZPOOL_PROP_CACHEFILE ||
683 prop == ZPOOL_PROP_ALTROOT ||
684 prop == ZPOOL_PROP_READONLY)
685 continue;
686
687 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
688 uint64_t ver;
689
690 if (prop == ZPOOL_PROP_VERSION) {
691 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
692 } else {
693 ASSERT(zpool_prop_feature(nvpair_name(elem)));
694 ver = SPA_VERSION_FEATURES;
695 need_sync = B_TRUE;
696 }
697
698 /* Save time if the version is already set. */
699 if (ver == spa_version(spa))
700 continue;
701
702 /*
703 * In addition to the pool directory object, we might
704 * create the pool properties object, the features for
705 * read object, the features for write object, or the
706 * feature descriptions object.
707 */
708 error = dsl_sync_task(spa->spa_name, NULL,
709 spa_sync_version, &ver,
710 6, ZFS_SPACE_CHECK_RESERVED);
711 if (error)
712 return (error);
713 continue;
714 }
715
716 need_sync = B_TRUE;
717 break;
718 }
719
720 if (need_sync) {
721 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
722 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
723 }
724
725 return (0);
726}
727
728/*
729 * If the bootfs property value is dsobj, clear it.
730 */
731void
732spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
733{
734 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
735 VERIFY(zap_remove(spa->spa_meta_objset,
736 spa->spa_pool_props_object,
737 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
738 spa->spa_bootfs = 0;
739 }
740}
741
742/*ARGSUSED*/
743static int
744spa_change_guid_check(void *arg, dmu_tx_t *tx)
745{
746 uint64_t *newguid = arg;
747 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
748 vdev_t *rvd = spa->spa_root_vdev;
749 uint64_t vdev_state;
750
751 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
752 vdev_state = rvd->vdev_state;
753 spa_config_exit(spa, SCL_STATE, FTAG);
754
755 if (vdev_state != VDEV_STATE_HEALTHY)
756 return (SET_ERROR(ENXIO));
757
758 ASSERT3U(spa_guid(spa), !=, *newguid);
759
760 return (0);
761}
762
763static void
764spa_change_guid_sync(void *arg, dmu_tx_t *tx)
765{
766 uint64_t *newguid = arg;
767 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
768 uint64_t oldguid;
769 vdev_t *rvd = spa->spa_root_vdev;
770
771 oldguid = spa_guid(spa);
772
773 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
774 rvd->vdev_guid = *newguid;
775 rvd->vdev_guid_sum += (*newguid - oldguid);
776 vdev_config_dirty(rvd);
777 spa_config_exit(spa, SCL_STATE, FTAG);
778
779 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
780 oldguid, *newguid);
781}
782
783/*
784 * Change the GUID for the pool. This is done so that we can later
785 * re-import a pool built from a clone of our own vdevs. We will modify
786 * the root vdev's guid, our own pool guid, and then mark all of our
787 * vdevs dirty. Note that we must make sure that all our vdevs are
788 * online when we do this, or else any vdevs that weren't present
789 * would be orphaned from our pool. We are also going to issue a
790 * sysevent to update any watchers.
791 */
792int
793spa_change_guid(spa_t *spa)
794{
795 int error;
796 uint64_t guid;
797
798 mutex_enter(&spa->spa_vdev_top_lock);
799 mutex_enter(&spa_namespace_lock);
800 guid = spa_generate_guid(NULL);
801
802 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
803 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
804
805 if (error == 0) {
806 spa_config_sync(spa, B_FALSE, B_TRUE);
807 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
808 }
809
810 mutex_exit(&spa_namespace_lock);
811 mutex_exit(&spa->spa_vdev_top_lock);
812
813 return (error);
814}
815
816/*
817 * ==========================================================================
818 * SPA state manipulation (open/create/destroy/import/export)
819 * ==========================================================================
820 */
821
822static int
823spa_error_entry_compare(const void *a, const void *b)
824{
825 spa_error_entry_t *sa = (spa_error_entry_t *)a;
826 spa_error_entry_t *sb = (spa_error_entry_t *)b;
827 int ret;
828
829 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
830 sizeof (zbookmark_phys_t));
831
832 if (ret < 0)
833 return (-1);
834 else if (ret > 0)
835 return (1);
836 else
837 return (0);
838}
839
840/*
841 * Utility function which retrieves copies of the current logs and
842 * re-initializes them in the process.
843 */
844void
845spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
846{
847 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
848
849 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
850 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
851
852 avl_create(&spa->spa_errlist_scrub,
853 spa_error_entry_compare, sizeof (spa_error_entry_t),
854 offsetof(spa_error_entry_t, se_avl));
855 avl_create(&spa->spa_errlist_last,
856 spa_error_entry_compare, sizeof (spa_error_entry_t),
857 offsetof(spa_error_entry_t, se_avl));
858}
859
860static void
861spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
862{
863 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
864 enum zti_modes mode = ztip->zti_mode;
865 uint_t value = ztip->zti_value;
866 uint_t count = ztip->zti_count;
867 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
868 char name[32];
869 uint_t flags = 0;
870 boolean_t batch = B_FALSE;
871
872 if (mode == ZTI_MODE_NULL) {
873 tqs->stqs_count = 0;
874 tqs->stqs_taskq = NULL;
875 return;
876 }
877
878 ASSERT3U(count, >, 0);
879
880 tqs->stqs_count = count;
881 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
882
883 switch (mode) {
884 case ZTI_MODE_FIXED:
885 ASSERT3U(value, >=, 1);
886 value = MAX(value, 1);
887 break;
888
889 case ZTI_MODE_BATCH:
890 batch = B_TRUE;
891 flags |= TASKQ_THREADS_CPU_PCT;
892 value = zio_taskq_batch_pct;
893 break;
894
895 default:
896 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
897 "spa_activate()",
898 zio_type_name[t], zio_taskq_types[q], mode, value);
899 break;
900 }
901
902 for (uint_t i = 0; i < count; i++) {
903 taskq_t *tq;
904
905 if (count > 1) {
906 (void) snprintf(name, sizeof (name), "%s_%s_%u",
907 zio_type_name[t], zio_taskq_types[q], i);
908 } else {
909 (void) snprintf(name, sizeof (name), "%s_%s",
910 zio_type_name[t], zio_taskq_types[q]);
911 }
912
913#ifdef SYSDC
914 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
915 if (batch)
916 flags |= TASKQ_DC_BATCH;
917
918 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
919 spa->spa_proc, zio_taskq_basedc, flags);
920 } else {
921#endif
922 pri_t pri = maxclsyspri;
923 /*
924 * The write issue taskq can be extremely CPU
925 * intensive. Run it at slightly lower priority
926 * than the other taskqs.
927 * FreeBSD notes:
928 * - numerically higher priorities are lower priorities;
929 * - if priorities divided by four (RQ_PPQ) are equal
930 * then a difference between them is insignificant.
931 */
932 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
933#ifdef illumos
934 pri--;
935#else
936 pri += 4;
937#endif
938
939 tq = taskq_create_proc(name, value, pri, 50,
940 INT_MAX, spa->spa_proc, flags);
941#ifdef SYSDC
942 }
943#endif
944
945 tqs->stqs_taskq[i] = tq;
946 }
947}
948
949static void
950spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
951{
952 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
953
954 if (tqs->stqs_taskq == NULL) {
955 ASSERT0(tqs->stqs_count);
956 return;
957 }
958
959 for (uint_t i = 0; i < tqs->stqs_count; i++) {
960 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
961 taskq_destroy(tqs->stqs_taskq[i]);
962 }
963
964 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
965 tqs->stqs_taskq = NULL;
966}
967
968/*
969 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
970 * Note that a type may have multiple discrete taskqs to avoid lock contention
971 * on the taskq itself. In that case we choose which taskq at random by using
972 * the low bits of gethrtime().
973 */
974void
975spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
976 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
977{
978 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
979 taskq_t *tq;
980
981 ASSERT3P(tqs->stqs_taskq, !=, NULL);
982 ASSERT3U(tqs->stqs_count, !=, 0);
983
984 if (tqs->stqs_count == 1) {
985 tq = tqs->stqs_taskq[0];
986 } else {
987#ifdef _KERNEL
988 tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count];
989#else
990 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
991#endif
992 }
993
994 taskq_dispatch_ent(tq, func, arg, flags, ent);
995}
996
997static void
998spa_create_zio_taskqs(spa_t *spa)
999{
1000 for (int t = 0; t < ZIO_TYPES; t++) {
1001 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1002 spa_taskqs_init(spa, t, q);
1003 }
1004 }
1005}
1006
1007#ifdef _KERNEL
1008#ifdef SPA_PROCESS
1009static void
1010spa_thread(void *arg)
1011{
1012 callb_cpr_t cprinfo;
1013
1014 spa_t *spa = arg;
1015 user_t *pu = PTOU(curproc);
1016
1017 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1018 spa->spa_name);
1019
1020 ASSERT(curproc != &p0);
1021 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1022 "zpool-%s", spa->spa_name);
1023 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1024
1025#ifdef PSRSET_BIND
1026 /* bind this thread to the requested psrset */
1027 if (zio_taskq_psrset_bind != PS_NONE) {
1028 pool_lock();
1029 mutex_enter(&cpu_lock);
1030 mutex_enter(&pidlock);
1031 mutex_enter(&curproc->p_lock);
1032
1033 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1034 0, NULL, NULL) == 0) {
1035 curthread->t_bind_pset = zio_taskq_psrset_bind;
1036 } else {
1037 cmn_err(CE_WARN,
1038 "Couldn't bind process for zfs pool \"%s\" to "
1039 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1040 }
1041
1042 mutex_exit(&curproc->p_lock);
1043 mutex_exit(&pidlock);
1044 mutex_exit(&cpu_lock);
1045 pool_unlock();
1046 }
1047#endif
1048
1049#ifdef SYSDC
1050 if (zio_taskq_sysdc) {
1051 sysdc_thread_enter(curthread, 100, 0);
1052 }
1053#endif
1054
1055 spa->spa_proc = curproc;
1056 spa->spa_did = curthread->t_did;
1057
1058 spa_create_zio_taskqs(spa);
1059
1060 mutex_enter(&spa->spa_proc_lock);
1061 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1062
1063 spa->spa_proc_state = SPA_PROC_ACTIVE;
1064 cv_broadcast(&spa->spa_proc_cv);
1065
1066 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1067 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1068 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1069 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1070
1071 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1072 spa->spa_proc_state = SPA_PROC_GONE;
1073 spa->spa_proc = &p0;
1074 cv_broadcast(&spa->spa_proc_cv);
1075 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1076
1077 mutex_enter(&curproc->p_lock);
1078 lwp_exit();
1079}
1080#endif /* SPA_PROCESS */
1081#endif
1082
1083/*
1084 * Activate an uninitialized pool.
1085 */
1086static void
1087spa_activate(spa_t *spa, int mode)
1088{
1089 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1090
1091 spa->spa_state = POOL_STATE_ACTIVE;
1092 spa->spa_mode = mode;
1093
1094 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1095 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1096
1097 /* Try to create a covering process */
1098 mutex_enter(&spa->spa_proc_lock);
1099 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1100 ASSERT(spa->spa_proc == &p0);
1101 spa->spa_did = 0;
1102
1103#ifdef SPA_PROCESS
1104 /* Only create a process if we're going to be around a while. */
1105 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1106 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1107 NULL, 0) == 0) {
1108 spa->spa_proc_state = SPA_PROC_CREATED;
1109 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1110 cv_wait(&spa->spa_proc_cv,
1111 &spa->spa_proc_lock);
1112 }
1113 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1114 ASSERT(spa->spa_proc != &p0);
1115 ASSERT(spa->spa_did != 0);
1116 } else {
1117#ifdef _KERNEL
1118 cmn_err(CE_WARN,
1119 "Couldn't create process for zfs pool \"%s\"\n",
1120 spa->spa_name);
1121#endif
1122 }
1123 }
1124#endif /* SPA_PROCESS */
1125 mutex_exit(&spa->spa_proc_lock);
1126
1127 /* If we didn't create a process, we need to create our taskqs. */
1128 ASSERT(spa->spa_proc == &p0);
1129 if (spa->spa_proc == &p0) {
1130 spa_create_zio_taskqs(spa);
1131 }
1132
1133 /*
1134 * Start TRIM thread.
1135 */
1136 trim_thread_create(spa);
1137
1138 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1139 offsetof(vdev_t, vdev_config_dirty_node));
1140 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1141 offsetof(objset_t, os_evicting_node));
1142 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1143 offsetof(vdev_t, vdev_state_dirty_node));
1144
1145 txg_list_create(&spa->spa_vdev_txg_list, spa,
1146 offsetof(struct vdev, vdev_txg_node));
1147
1148 avl_create(&spa->spa_errlist_scrub,
1149 spa_error_entry_compare, sizeof (spa_error_entry_t),
1150 offsetof(spa_error_entry_t, se_avl));
1151 avl_create(&spa->spa_errlist_last,
1152 spa_error_entry_compare, sizeof (spa_error_entry_t),
1153 offsetof(spa_error_entry_t, se_avl));
1154}
1155
1156/*
1157 * Opposite of spa_activate().
1158 */
1159static void
1160spa_deactivate(spa_t *spa)
1161{
1162 ASSERT(spa->spa_sync_on == B_FALSE);
1163 ASSERT(spa->spa_dsl_pool == NULL);
1164 ASSERT(spa->spa_root_vdev == NULL);
1165 ASSERT(spa->spa_async_zio_root == NULL);
1166 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1167
1168 /*
1169 * Stop TRIM thread in case spa_unload() wasn't called directly
1170 * before spa_deactivate().
1171 */
1172 trim_thread_destroy(spa);
1173
1174 spa_evicting_os_wait(spa);
1175
1176 txg_list_destroy(&spa->spa_vdev_txg_list);
1177
1178 list_destroy(&spa->spa_config_dirty_list);
1179 list_destroy(&spa->spa_evicting_os_list);
1180 list_destroy(&spa->spa_state_dirty_list);
1181
1182 for (int t = 0; t < ZIO_TYPES; t++) {
1183 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1184 spa_taskqs_fini(spa, t, q);
1185 }
1186 }
1187
1188 metaslab_class_destroy(spa->spa_normal_class);
1189 spa->spa_normal_class = NULL;
1190
1191 metaslab_class_destroy(spa->spa_log_class);
1192 spa->spa_log_class = NULL;
1193
1194 /*
1195 * If this was part of an import or the open otherwise failed, we may
1196 * still have errors left in the queues. Empty them just in case.
1197 */
1198 spa_errlog_drain(spa);
1199
1200 avl_destroy(&spa->spa_errlist_scrub);
1201 avl_destroy(&spa->spa_errlist_last);
1202
1203 spa->spa_state = POOL_STATE_UNINITIALIZED;
1204
1205 mutex_enter(&spa->spa_proc_lock);
1206 if (spa->spa_proc_state != SPA_PROC_NONE) {
1207 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1208 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1209 cv_broadcast(&spa->spa_proc_cv);
1210 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1211 ASSERT(spa->spa_proc != &p0);
1212 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1213 }
1214 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1215 spa->spa_proc_state = SPA_PROC_NONE;
1216 }
1217 ASSERT(spa->spa_proc == &p0);
1218 mutex_exit(&spa->spa_proc_lock);
1219
1220#ifdef SPA_PROCESS
1221 /*
1222 * We want to make sure spa_thread() has actually exited the ZFS
1223 * module, so that the module can't be unloaded out from underneath
1224 * it.
1225 */
1226 if (spa->spa_did != 0) {
1227 thread_join(spa->spa_did);
1228 spa->spa_did = 0;
1229 }
1230#endif /* SPA_PROCESS */
1231}
1232
1233/*
1234 * Verify a pool configuration, and construct the vdev tree appropriately. This
1235 * will create all the necessary vdevs in the appropriate layout, with each vdev
1236 * in the CLOSED state. This will prep the pool before open/creation/import.
1237 * All vdev validation is done by the vdev_alloc() routine.
1238 */
1239static int
1240spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1241 uint_t id, int atype)
1242{
1243 nvlist_t **child;
1244 uint_t children;
1245 int error;
1246
1247 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1248 return (error);
1249
1250 if ((*vdp)->vdev_ops->vdev_op_leaf)
1251 return (0);
1252
1253 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1254 &child, &children);
1255
1256 if (error == ENOENT)
1257 return (0);
1258
1259 if (error) {
1260 vdev_free(*vdp);
1261 *vdp = NULL;
1262 return (SET_ERROR(EINVAL));
1263 }
1264
1265 for (int c = 0; c < children; c++) {
1266 vdev_t *vd;
1267 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1268 atype)) != 0) {
1269 vdev_free(*vdp);
1270 *vdp = NULL;
1271 return (error);
1272 }
1273 }
1274
1275 ASSERT(*vdp != NULL);
1276
1277 return (0);
1278}
1279
1280/*
1281 * Opposite of spa_load().
1282 */
1283static void
1284spa_unload(spa_t *spa)
1285{
1286 int i;
1287
1288 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1289
1290 /*
1291 * Stop TRIM thread.
1292 */
1293 trim_thread_destroy(spa);
1294
1295 /*
1296 * Stop async tasks.
1297 */
1298 spa_async_suspend(spa);
1299
1300 /*
1301 * Stop syncing.
1302 */
1303 if (spa->spa_sync_on) {
1304 txg_sync_stop(spa->spa_dsl_pool);
1305 spa->spa_sync_on = B_FALSE;
1306 }
1307
1308 /*
1309 * Even though vdev_free() also calls vdev_metaslab_fini, we need
1310 * to call it earlier, before we wait for async i/o to complete.
1311 * This ensures that there is no async metaslab prefetching, by
1312 * calling taskq_wait(mg_taskq).
1313 */
1314 if (spa->spa_root_vdev != NULL) {
1315 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1316 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
1317 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
1318 spa_config_exit(spa, SCL_ALL, FTAG);
1319 }
1320
1321 /*
1322 * Wait for any outstanding async I/O to complete.
1323 */
1324 if (spa->spa_async_zio_root != NULL) {
1325 for (int i = 0; i < max_ncpus; i++)
1326 (void) zio_wait(spa->spa_async_zio_root[i]);
1327 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1328 spa->spa_async_zio_root = NULL;
1329 }
1330
1331 bpobj_close(&spa->spa_deferred_bpobj);
1332
1333 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1334
1335 /*
1336 * Close all vdevs.
1337 */
1338 if (spa->spa_root_vdev)
1339 vdev_free(spa->spa_root_vdev);
1340 ASSERT(spa->spa_root_vdev == NULL);
1341
1342 /*
1343 * Close the dsl pool.
1344 */
1345 if (spa->spa_dsl_pool) {
1346 dsl_pool_close(spa->spa_dsl_pool);
1347 spa->spa_dsl_pool = NULL;
1348 spa->spa_meta_objset = NULL;
1349 }
1350
1351 ddt_unload(spa);
1352
1353 /*
1354 * Drop and purge level 2 cache
1355 */
1356 spa_l2cache_drop(spa);
1357
1358 for (i = 0; i < spa->spa_spares.sav_count; i++)
1359 vdev_free(spa->spa_spares.sav_vdevs[i]);
1360 if (spa->spa_spares.sav_vdevs) {
1361 kmem_free(spa->spa_spares.sav_vdevs,
1362 spa->spa_spares.sav_count * sizeof (void *));
1363 spa->spa_spares.sav_vdevs = NULL;
1364 }
1365 if (spa->spa_spares.sav_config) {
1366 nvlist_free(spa->spa_spares.sav_config);
1367 spa->spa_spares.sav_config = NULL;
1368 }
1369 spa->spa_spares.sav_count = 0;
1370
1371 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1372 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1373 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1374 }
1375 if (spa->spa_l2cache.sav_vdevs) {
1376 kmem_free(spa->spa_l2cache.sav_vdevs,
1377 spa->spa_l2cache.sav_count * sizeof (void *));
1378 spa->spa_l2cache.sav_vdevs = NULL;
1379 }
1380 if (spa->spa_l2cache.sav_config) {
1381 nvlist_free(spa->spa_l2cache.sav_config);
1382 spa->spa_l2cache.sav_config = NULL;
1383 }
1384 spa->spa_l2cache.sav_count = 0;
1385
1386 spa->spa_async_suspended = 0;
1387
1388 if (spa->spa_comment != NULL) {
1389 spa_strfree(spa->spa_comment);
1390 spa->spa_comment = NULL;
1391 }
1392
1393 spa_config_exit(spa, SCL_ALL, FTAG);
1394}
1395
1396/*
1397 * Load (or re-load) the current list of vdevs describing the active spares for
1398 * this pool. When this is called, we have some form of basic information in
1399 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1400 * then re-generate a more complete list including status information.
1401 */
1402static void
1403spa_load_spares(spa_t *spa)
1404{
1405 nvlist_t **spares;
1406 uint_t nspares;
1407 int i;
1408 vdev_t *vd, *tvd;
1409
1410 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1411
1412 /*
1413 * First, close and free any existing spare vdevs.
1414 */
1415 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1416 vd = spa->spa_spares.sav_vdevs[i];
1417
1418 /* Undo the call to spa_activate() below */
1419 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1420 B_FALSE)) != NULL && tvd->vdev_isspare)
1421 spa_spare_remove(tvd);
1422 vdev_close(vd);
1423 vdev_free(vd);
1424 }
1425
1426 if (spa->spa_spares.sav_vdevs)
1427 kmem_free(spa->spa_spares.sav_vdevs,
1428 spa->spa_spares.sav_count * sizeof (void *));
1429
1430 if (spa->spa_spares.sav_config == NULL)
1431 nspares = 0;
1432 else
1433 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1434 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1435
1436 spa->spa_spares.sav_count = (int)nspares;
1437 spa->spa_spares.sav_vdevs = NULL;
1438
1439 if (nspares == 0)
1440 return;
1441
1442 /*
1443 * Construct the array of vdevs, opening them to get status in the
1444 * process. For each spare, there is potentially two different vdev_t
1445 * structures associated with it: one in the list of spares (used only
1446 * for basic validation purposes) and one in the active vdev
1447 * configuration (if it's spared in). During this phase we open and
1448 * validate each vdev on the spare list. If the vdev also exists in the
1449 * active configuration, then we also mark this vdev as an active spare.
1450 */
1451 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1452 KM_SLEEP);
1453 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1454 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1455 VDEV_ALLOC_SPARE) == 0);
1456 ASSERT(vd != NULL);
1457
1458 spa->spa_spares.sav_vdevs[i] = vd;
1459
1460 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1461 B_FALSE)) != NULL) {
1462 if (!tvd->vdev_isspare)
1463 spa_spare_add(tvd);
1464
1465 /*
1466 * We only mark the spare active if we were successfully
1467 * able to load the vdev. Otherwise, importing a pool
1468 * with a bad active spare would result in strange
1469 * behavior, because multiple pool would think the spare
1470 * is actively in use.
1471 *
1472 * There is a vulnerability here to an equally bizarre
1473 * circumstance, where a dead active spare is later
1474 * brought back to life (onlined or otherwise). Given
1475 * the rarity of this scenario, and the extra complexity
1476 * it adds, we ignore the possibility.
1477 */
1478 if (!vdev_is_dead(tvd))
1479 spa_spare_activate(tvd);
1480 }
1481
1482 vd->vdev_top = vd;
1483 vd->vdev_aux = &spa->spa_spares;
1484
1485 if (vdev_open(vd) != 0)
1486 continue;
1487
1488 if (vdev_validate_aux(vd) == 0)
1489 spa_spare_add(vd);
1490 }
1491
1492 /*
1493 * Recompute the stashed list of spares, with status information
1494 * this time.
1495 */
1496 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1497 DATA_TYPE_NVLIST_ARRAY) == 0);
1498
1499 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1500 KM_SLEEP);
1501 for (i = 0; i < spa->spa_spares.sav_count; i++)
1502 spares[i] = vdev_config_generate(spa,
1503 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1504 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1505 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1506 for (i = 0; i < spa->spa_spares.sav_count; i++)
1507 nvlist_free(spares[i]);
1508 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1509}
1510
1511/*
1512 * Load (or re-load) the current list of vdevs describing the active l2cache for
1513 * this pool. When this is called, we have some form of basic information in
1514 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1515 * then re-generate a more complete list including status information.
1516 * Devices which are already active have their details maintained, and are
1517 * not re-opened.
1518 */
1519static void
1520spa_load_l2cache(spa_t *spa)
1521{
1522 nvlist_t **l2cache;
1523 uint_t nl2cache;
1524 int i, j, oldnvdevs;
1525 uint64_t guid;
1526 vdev_t *vd, **oldvdevs, **newvdevs;
1527 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1528
1529 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1530
1531 if (sav->sav_config != NULL) {
1532 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1533 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1534 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1535 } else {
1536 nl2cache = 0;
1537 newvdevs = NULL;
1538 }
1539
1540 oldvdevs = sav->sav_vdevs;
1541 oldnvdevs = sav->sav_count;
1542 sav->sav_vdevs = NULL;
1543 sav->sav_count = 0;
1544
1545 /*
1546 * Process new nvlist of vdevs.
1547 */
1548 for (i = 0; i < nl2cache; i++) {
1549 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1550 &guid) == 0);
1551
1552 newvdevs[i] = NULL;
1553 for (j = 0; j < oldnvdevs; j++) {
1554 vd = oldvdevs[j];
1555 if (vd != NULL && guid == vd->vdev_guid) {
1556 /*
1557 * Retain previous vdev for add/remove ops.
1558 */
1559 newvdevs[i] = vd;
1560 oldvdevs[j] = NULL;
1561 break;
1562 }
1563 }
1564
1565 if (newvdevs[i] == NULL) {
1566 /*
1567 * Create new vdev
1568 */
1569 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1570 VDEV_ALLOC_L2CACHE) == 0);
1571 ASSERT(vd != NULL);
1572 newvdevs[i] = vd;
1573
1574 /*
1575 * Commit this vdev as an l2cache device,
1576 * even if it fails to open.
1577 */
1578 spa_l2cache_add(vd);
1579
1580 vd->vdev_top = vd;
1581 vd->vdev_aux = sav;
1582
1583 spa_l2cache_activate(vd);
1584
1585 if (vdev_open(vd) != 0)
1586 continue;
1587
1588 (void) vdev_validate_aux(vd);
1589
1590 if (!vdev_is_dead(vd))
1591 l2arc_add_vdev(spa, vd);
1592 }
1593 }
1594
1595 /*
1596 * Purge vdevs that were dropped
1597 */
1598 for (i = 0; i < oldnvdevs; i++) {
1599 uint64_t pool;
1600
1601 vd = oldvdevs[i];
1602 if (vd != NULL) {
1603 ASSERT(vd->vdev_isl2cache);
1604
1605 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1606 pool != 0ULL && l2arc_vdev_present(vd))
1607 l2arc_remove_vdev(vd);
1608 vdev_clear_stats(vd);
1609 vdev_free(vd);
1610 }
1611 }
1612
1613 if (oldvdevs)
1614 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1615
1616 if (sav->sav_config == NULL)
1617 goto out;
1618
1619 sav->sav_vdevs = newvdevs;
1620 sav->sav_count = (int)nl2cache;
1621
1622 /*
1623 * Recompute the stashed list of l2cache devices, with status
1624 * information this time.
1625 */
1626 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1627 DATA_TYPE_NVLIST_ARRAY) == 0);
1628
1629 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1630 for (i = 0; i < sav->sav_count; i++)
1631 l2cache[i] = vdev_config_generate(spa,
1632 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1633 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1634 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1635out:
1636 for (i = 0; i < sav->sav_count; i++)
1637 nvlist_free(l2cache[i]);
1638 if (sav->sav_count)
1639 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1640}
1641
1642static int
1643load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1644{
1645 dmu_buf_t *db;
1646 char *packed = NULL;
1647 size_t nvsize = 0;
1648 int error;
1649 *value = NULL;
1650
1651 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1652 if (error != 0)
1653 return (error);
1654
1655 nvsize = *(uint64_t *)db->db_data;
1656 dmu_buf_rele(db, FTAG);
1657
1658 packed = kmem_alloc(nvsize, KM_SLEEP);
1659 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1660 DMU_READ_PREFETCH);
1661 if (error == 0)
1662 error = nvlist_unpack(packed, nvsize, value, 0);
1663 kmem_free(packed, nvsize);
1664
1665 return (error);
1666}
1667
1668/*
1669 * Checks to see if the given vdev could not be opened, in which case we post a
1670 * sysevent to notify the autoreplace code that the device has been removed.
1671 */
1672static void
1673spa_check_removed(vdev_t *vd)
1674{
1675 for (int c = 0; c < vd->vdev_children; c++)
1676 spa_check_removed(vd->vdev_child[c]);
1677
1678 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1679 !vd->vdev_ishole) {
1680 zfs_post_autoreplace(vd->vdev_spa, vd);
1681 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1682 }
1683}
1684
1685static void
1686spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
1687{
1688 ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
1689
1690 vd->vdev_top_zap = mvd->vdev_top_zap;
1691 vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
1692
1693 for (uint64_t i = 0; i < vd->vdev_children; i++) {
1694 spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
1695 }
1696}
1697
1698/*
1699 * Validate the current config against the MOS config
1700 */
1701static boolean_t
1702spa_config_valid(spa_t *spa, nvlist_t *config)
1703{
1704 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1705 nvlist_t *nv;
1706
1707 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1708
1709 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1710 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1711
1712 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1713
1714 /*
1715 * If we're doing a normal import, then build up any additional
1716 * diagnostic information about missing devices in this config.
1717 * We'll pass this up to the user for further processing.
1718 */
1719 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1720 nvlist_t **child, *nv;
1721 uint64_t idx = 0;
1722
1723 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1724 KM_SLEEP);
1725 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1726
1727 for (int c = 0; c < rvd->vdev_children; c++) {
1728 vdev_t *tvd = rvd->vdev_child[c];
1729 vdev_t *mtvd = mrvd->vdev_child[c];
1730
1731 if (tvd->vdev_ops == &vdev_missing_ops &&
1732 mtvd->vdev_ops != &vdev_missing_ops &&
1733 mtvd->vdev_islog)
1734 child[idx++] = vdev_config_generate(spa, mtvd,
1735 B_FALSE, 0);
1736 }
1737
1738 if (idx) {
1739 VERIFY(nvlist_add_nvlist_array(nv,
1740 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1741 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1742 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1743
1744 for (int i = 0; i < idx; i++)
1745 nvlist_free(child[i]);
1746 }
1747 nvlist_free(nv);
1748 kmem_free(child, rvd->vdev_children * sizeof (char **));
1749 }
1750
1751 /*
1752 * Compare the root vdev tree with the information we have
1753 * from the MOS config (mrvd). Check each top-level vdev
1754 * with the corresponding MOS config top-level (mtvd).
1755 */
1756 for (int c = 0; c < rvd->vdev_children; c++) {
1757 vdev_t *tvd = rvd->vdev_child[c];
1758 vdev_t *mtvd = mrvd->vdev_child[c];
1759
1760 /*
1761 * Resolve any "missing" vdevs in the current configuration.
1762 * If we find that the MOS config has more accurate information
1763 * about the top-level vdev then use that vdev instead.
1764 */
1765 if (tvd->vdev_ops == &vdev_missing_ops &&
1766 mtvd->vdev_ops != &vdev_missing_ops) {
1767
1768 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1769 continue;
1770
1771 /*
1772 * Device specific actions.
1773 */
1774 if (mtvd->vdev_islog) {
1775 spa_set_log_state(spa, SPA_LOG_CLEAR);
1776 } else {
1777 /*
1778 * XXX - once we have 'readonly' pool
1779 * support we should be able to handle
1780 * missing data devices by transitioning
1781 * the pool to readonly.
1782 */
1783 continue;
1784 }
1785
1786 /*
1787 * Swap the missing vdev with the data we were
1788 * able to obtain from the MOS config.
1789 */
1790 vdev_remove_child(rvd, tvd);
1791 vdev_remove_child(mrvd, mtvd);
1792
1793 vdev_add_child(rvd, mtvd);
1794 vdev_add_child(mrvd, tvd);
1795
1796 spa_config_exit(spa, SCL_ALL, FTAG);
1797 vdev_load(mtvd);
1798 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1799
1800 vdev_reopen(rvd);
1801 } else {
1802 if (mtvd->vdev_islog) {
1803 /*
1804 * Load the slog device's state from the MOS
1805 * config since it's possible that the label
1806 * does not contain the most up-to-date
1807 * information.
1808 */
1809 vdev_load_log_state(tvd, mtvd);
1810 vdev_reopen(tvd);
1811 }
1812
1813 /*
1814 * Per-vdev ZAP info is stored exclusively in the MOS.
1815 */
1816 spa_config_valid_zaps(tvd, mtvd);
1817 }
1818 }
1819
1820 vdev_free(mrvd);
1821 spa_config_exit(spa, SCL_ALL, FTAG);
1822
1823 /*
1824 * Ensure we were able to validate the config.
1825 */
1826 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1827}
1828
1829/*
1830 * Check for missing log devices
1831 */
1832static boolean_t
1833spa_check_logs(spa_t *spa)
1834{
1835 boolean_t rv = B_FALSE;
1836 dsl_pool_t *dp = spa_get_dsl(spa);
1837
1838 switch (spa->spa_log_state) {
1839 case SPA_LOG_MISSING:
1840 /* need to recheck in case slog has been restored */
1841 case SPA_LOG_UNKNOWN:
1842 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1843 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1844 if (rv)
1845 spa_set_log_state(spa, SPA_LOG_MISSING);
1846 break;
1847 }
1848 return (rv);
1849}
1850
1851static boolean_t
1852spa_passivate_log(spa_t *spa)
1853{
1854 vdev_t *rvd = spa->spa_root_vdev;
1855 boolean_t slog_found = B_FALSE;
1856
1857 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1858
1859 if (!spa_has_slogs(spa))
1860 return (B_FALSE);
1861
1862 for (int c = 0; c < rvd->vdev_children; c++) {
1863 vdev_t *tvd = rvd->vdev_child[c];
1864 metaslab_group_t *mg = tvd->vdev_mg;
1865
1866 if (tvd->vdev_islog) {
1867 metaslab_group_passivate(mg);
1868 slog_found = B_TRUE;
1869 }
1870 }
1871
1872 return (slog_found);
1873}
1874
1875static void
1876spa_activate_log(spa_t *spa)
1877{
1878 vdev_t *rvd = spa->spa_root_vdev;
1879
1880 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1881
1882 for (int c = 0; c < rvd->vdev_children; c++) {
1883 vdev_t *tvd = rvd->vdev_child[c];
1884 metaslab_group_t *mg = tvd->vdev_mg;
1885
1886 if (tvd->vdev_islog)
1887 metaslab_group_activate(mg);
1888 }
1889}
1890
1891int
1892spa_offline_log(spa_t *spa)
1893{
1894 int error;
1895
1896 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1897 NULL, DS_FIND_CHILDREN);
1898 if (error == 0) {
1899 /*
1900 * We successfully offlined the log device, sync out the
1901 * current txg so that the "stubby" block can be removed
1902 * by zil_sync().
1903 */
1904 txg_wait_synced(spa->spa_dsl_pool, 0);
1905 }
1906 return (error);
1907}
1908
1909static void
1910spa_aux_check_removed(spa_aux_vdev_t *sav)
1911{
1912 int i;
1913
1914 for (i = 0; i < sav->sav_count; i++)
1915 spa_check_removed(sav->sav_vdevs[i]);
1916}
1917
1918void
1919spa_claim_notify(zio_t *zio)
1920{
1921 spa_t *spa = zio->io_spa;
1922
1923 if (zio->io_error)
1924 return;
1925
1926 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1927 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1928 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1929 mutex_exit(&spa->spa_props_lock);
1930}
1931
1932typedef struct spa_load_error {
1933 uint64_t sle_meta_count;
1934 uint64_t sle_data_count;
1935} spa_load_error_t;
1936
1937static void
1938spa_load_verify_done(zio_t *zio)
1939{
1940 blkptr_t *bp = zio->io_bp;
1941 spa_load_error_t *sle = zio->io_private;
1942 dmu_object_type_t type = BP_GET_TYPE(bp);
1943 int error = zio->io_error;
1944 spa_t *spa = zio->io_spa;
1945
1946 abd_free(zio->io_abd);
1947 if (error) {
1948 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1949 type != DMU_OT_INTENT_LOG)
1950 atomic_inc_64(&sle->sle_meta_count);
1951 else
1952 atomic_inc_64(&sle->sle_data_count);
1953 }
1954
1955 mutex_enter(&spa->spa_scrub_lock);
1956 spa->spa_scrub_inflight--;
1957 cv_broadcast(&spa->spa_scrub_io_cv);
1958 mutex_exit(&spa->spa_scrub_lock);
1959}
1960
1961/*
1962 * Maximum number of concurrent scrub i/os to create while verifying
1963 * a pool while importing it.
1964 */
1965int spa_load_verify_maxinflight = 10000;
1966boolean_t spa_load_verify_metadata = B_TRUE;
1967boolean_t spa_load_verify_data = B_TRUE;
1968
1969SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN,
1970 &spa_load_verify_maxinflight, 0,
1971 "Maximum number of concurrent scrub I/Os to create while verifying a "
1972 "pool while importing it");
1973
1974SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN,
1975 &spa_load_verify_metadata, 0,
1976 "Check metadata on import?");
1977
1978SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN,
1979 &spa_load_verify_data, 0,
1980 "Check user data on import?");
1981
1982/*ARGSUSED*/
1983static int
1984spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1985 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1986{
1987 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1988 return (0);
1989 /*
1990 * Note: normally this routine will not be called if
1991 * spa_load_verify_metadata is not set. However, it may be useful
1992 * to manually set the flag after the traversal has begun.
1993 */
1994 if (!spa_load_verify_metadata)
1995 return (0);
1996 if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
1997 return (0);
1998
1999 zio_t *rio = arg;
2000 size_t size = BP_GET_PSIZE(bp);
2001
2002 mutex_enter(&spa->spa_scrub_lock);
2003 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
2004 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2005 spa->spa_scrub_inflight++;
2006 mutex_exit(&spa->spa_scrub_lock);
2007
2008 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2009 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2010 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2011 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2012 return (0);
2013}
2014
2015/* ARGSUSED */
2016int
2017verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2018{
2019 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2020 return (SET_ERROR(ENAMETOOLONG));
2021
2022 return (0);
2023}
2024
2025static int
2026spa_load_verify(spa_t *spa)
2027{
2028 zio_t *rio;
2029 spa_load_error_t sle = { 0 };
2030 zpool_rewind_policy_t policy;
2031 boolean_t verify_ok = B_FALSE;
2032 int error = 0;
2033
2034 zpool_get_rewind_policy(spa->spa_config, &policy);
2035
2036 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
2037 return (0);
2038
2039 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2040 error = dmu_objset_find_dp(spa->spa_dsl_pool,
2041 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2042 DS_FIND_CHILDREN);
2043 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2044 if (error != 0)
2045 return (error);
2046
2047 rio = zio_root(spa, NULL, &sle,
2048 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2049
2050 if (spa_load_verify_metadata) {
2051 error = traverse_pool(spa, spa->spa_verify_min_txg,
2052 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2053 spa_load_verify_cb, rio);
2054 }
2055
2056 (void) zio_wait(rio);
2057
2058 spa->spa_load_meta_errors = sle.sle_meta_count;
2059 spa->spa_load_data_errors = sle.sle_data_count;
2060
2061 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
2062 sle.sle_data_count <= policy.zrp_maxdata) {
2063 int64_t loss = 0;
2064
2065 verify_ok = B_TRUE;
2066 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2067 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2068
2069 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2070 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2071 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2072 VERIFY(nvlist_add_int64(spa->spa_load_info,
2073 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2074 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2075 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2076 } else {
2077 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2078 }
2079
2080 if (error) {
2081 if (error != ENXIO && error != EIO)
2082 error = SET_ERROR(EIO);
2083 return (error);
2084 }
2085
2086 return (verify_ok ? 0 : EIO);
2087}
2088
2089/*
2090 * Find a value in the pool props object.
2091 */
2092static void
2093spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2094{
2095 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2096 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2097}
2098
2099/*
2100 * Find a value in the pool directory object.
2101 */
2102static int
2103spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
2104{
2105 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2106 name, sizeof (uint64_t), 1, val));
2107}
2108
2109static int
2110spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2111{
2112 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2113 return (err);
2114}
2115
2116/*
2117 * Fix up config after a partly-completed split. This is done with the
2118 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2119 * pool have that entry in their config, but only the splitting one contains
2120 * a list of all the guids of the vdevs that are being split off.
2121 *
2122 * This function determines what to do with that list: either rejoin
2123 * all the disks to the pool, or complete the splitting process. To attempt
2124 * the rejoin, each disk that is offlined is marked online again, and
2125 * we do a reopen() call. If the vdev label for every disk that was
2126 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2127 * then we call vdev_split() on each disk, and complete the split.
2128 *
2129 * Otherwise we leave the config alone, with all the vdevs in place in
2130 * the original pool.
2131 */
2132static void
2133spa_try_repair(spa_t *spa, nvlist_t *config)
2134{
2135 uint_t extracted;
2136 uint64_t *glist;
2137 uint_t i, gcount;
2138 nvlist_t *nvl;
2139 vdev_t **vd;
2140 boolean_t attempt_reopen;
2141
2142 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2143 return;
2144
2145 /* check that the config is complete */
2146 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2147 &glist, &gcount) != 0)
2148 return;
2149
2150 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2151
2152 /* attempt to online all the vdevs & validate */
2153 attempt_reopen = B_TRUE;
2154 for (i = 0; i < gcount; i++) {
2155 if (glist[i] == 0) /* vdev is hole */
2156 continue;
2157
2158 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2159 if (vd[i] == NULL) {
2160 /*
2161 * Don't bother attempting to reopen the disks;
2162 * just do the split.
2163 */
2164 attempt_reopen = B_FALSE;
2165 } else {
2166 /* attempt to re-online it */
2167 vd[i]->vdev_offline = B_FALSE;
2168 }
2169 }
2170
2171 if (attempt_reopen) {
2172 vdev_reopen(spa->spa_root_vdev);
2173
2174 /* check each device to see what state it's in */
2175 for (extracted = 0, i = 0; i < gcount; i++) {
2176 if (vd[i] != NULL &&
2177 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2178 break;
2179 ++extracted;
2180 }
2181 }
2182
2183 /*
2184 * If every disk has been moved to the new pool, or if we never
2185 * even attempted to look at them, then we split them off for
2186 * good.
2187 */
2188 if (!attempt_reopen || gcount == extracted) {
2189 for (i = 0; i < gcount; i++)
2190 if (vd[i] != NULL)
2191 vdev_split(vd[i]);
2192 vdev_reopen(spa->spa_root_vdev);
2193 }
2194
2195 kmem_free(vd, gcount * sizeof (vdev_t *));
2196}
2197
2198static int
2199spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2200 boolean_t mosconfig)
2201{
2202 nvlist_t *config = spa->spa_config;
2203 char *ereport = FM_EREPORT_ZFS_POOL;
2204 char *comment;
2205 int error;
2206 uint64_t pool_guid;
2207 nvlist_t *nvl;
2208
2209 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2210 return (SET_ERROR(EINVAL));
2211
2212 ASSERT(spa->spa_comment == NULL);
2213 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2214 spa->spa_comment = spa_strdup(comment);
2215
2216 /*
2217 * Versioning wasn't explicitly added to the label until later, so if
2218 * it's not present treat it as the initial version.
2219 */
2220 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2221 &spa->spa_ubsync.ub_version) != 0)
2222 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2223
2224 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2225 &spa->spa_config_txg);
2226
2227 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2228 spa_guid_exists(pool_guid, 0)) {
2229 error = SET_ERROR(EEXIST);
2230 } else {
2231 spa->spa_config_guid = pool_guid;
2232
2233 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2234 &nvl) == 0) {
2235 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
2236 KM_SLEEP) == 0);
2237 }
2238
2239 nvlist_free(spa->spa_load_info);
2240 spa->spa_load_info = fnvlist_alloc();
2241
2242 gethrestime(&spa->spa_loaded_ts);
2243 error = spa_load_impl(spa, pool_guid, config, state, type,
2244 mosconfig, &ereport);
2245 }
2246
2247 /*
2248 * Don't count references from objsets that are already closed
2249 * and are making their way through the eviction process.
2250 */
2251 spa_evicting_os_wait(spa);
2252 spa->spa_minref = refcount_count(&spa->spa_refcount);
2253 if (error) {
2254 if (error != EEXIST) {
2255 spa->spa_loaded_ts.tv_sec = 0;
2256 spa->spa_loaded_ts.tv_nsec = 0;
2257 }
2258 if (error != EBADF) {
2259 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2260 }
2261 }
2262 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2263 spa->spa_ena = 0;
2264
2265 return (error);
2266}
2267
2268/*
2269 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2270 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2271 * spa's per-vdev ZAP list.
2272 */
2273static uint64_t
2274vdev_count_verify_zaps(vdev_t *vd)
2275{
2276 spa_t *spa = vd->vdev_spa;
2277 uint64_t total = 0;
2278 if (vd->vdev_top_zap != 0) {
2279 total++;
2280 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2281 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2282 }
2283 if (vd->vdev_leaf_zap != 0) {
2284 total++;
2285 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2286 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2287 }
2288
2289 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2290 total += vdev_count_verify_zaps(vd->vdev_child[i]);
2291 }
2292
2293 return (total);
2294}
2295
2296/*
2297 * Load an existing storage pool, using the pool's builtin spa_config as a
2298 * source of configuration information.
2299 */
2300static int
2301spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2302 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2303 char **ereport)
2304{
2305 int error = 0;
2306 nvlist_t *nvroot = NULL;
2307 nvlist_t *label;
2308 vdev_t *rvd;
2309 uberblock_t *ub = &spa->spa_uberblock;
2310 uint64_t children, config_cache_txg = spa->spa_config_txg;
2311 int orig_mode = spa->spa_mode;
2312 int parse;
2313 uint64_t obj;
2314 boolean_t missing_feat_write = B_FALSE;
2315
2316 /*
2317 * If this is an untrusted config, access the pool in read-only mode.
2318 * This prevents things like resilvering recently removed devices.
2319 */
2320 if (!mosconfig)
2321 spa->spa_mode = FREAD;
2322
2323 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2324
2325 spa->spa_load_state = state;
2326
2327 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2328 return (SET_ERROR(EINVAL));
2329
2330 parse = (type == SPA_IMPORT_EXISTING ?
2331 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2332
2333 /*
2334 * Create "The Godfather" zio to hold all async IOs
2335 */
2336 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2337 KM_SLEEP);
2338 for (int i = 0; i < max_ncpus; i++) {
2339 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2340 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2341 ZIO_FLAG_GODFATHER);
2342 }
2343
2344 /*
2345 * Parse the configuration into a vdev tree. We explicitly set the
2346 * value that will be returned by spa_version() since parsing the
2347 * configuration requires knowing the version number.
2348 */
2349 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2350 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2351 spa_config_exit(spa, SCL_ALL, FTAG);
2352
2353 if (error != 0)
2354 return (error);
2355
2356 ASSERT(spa->spa_root_vdev == rvd);
2357 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2358 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2359
2360 if (type != SPA_IMPORT_ASSEMBLE) {
2361 ASSERT(spa_guid(spa) == pool_guid);
2362 }
2363
2364 /*
2365 * Try to open all vdevs, loading each label in the process.
2366 */
2367 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2368 error = vdev_open(rvd);
2369 spa_config_exit(spa, SCL_ALL, FTAG);
2370 if (error != 0)
2371 return (error);
2372
2373 /*
2374 * We need to validate the vdev labels against the configuration that
2375 * we have in hand, which is dependent on the setting of mosconfig. If
2376 * mosconfig is true then we're validating the vdev labels based on
2377 * that config. Otherwise, we're validating against the cached config
2378 * (zpool.cache) that was read when we loaded the zfs module, and then
2379 * later we will recursively call spa_load() and validate against
2380 * the vdev config.
2381 *
2382 * If we're assembling a new pool that's been split off from an
2383 * existing pool, the labels haven't yet been updated so we skip
2384 * validation for now.
2385 */
2386 if (type != SPA_IMPORT_ASSEMBLE) {
2387 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2388 error = vdev_validate(rvd, mosconfig);
2389 spa_config_exit(spa, SCL_ALL, FTAG);
2390
2391 if (error != 0)
2392 return (error);
2393
2394 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2395 return (SET_ERROR(ENXIO));
2396 }
2397
2398 /*
2399 * Find the best uberblock.
2400 */
2401 vdev_uberblock_load(rvd, ub, &label);
2402
2403 /*
2404 * If we weren't able to find a single valid uberblock, return failure.
2405 */
2406 if (ub->ub_txg == 0) {
2407 nvlist_free(label);
2408 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2409 }
2410
2411 /*
2412 * If the pool has an unsupported version we can't open it.
2413 */
2414 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2415 nvlist_free(label);
2416 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2417 }
2418
2419 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2420 nvlist_t *features;
2421
2422 /*
2423 * If we weren't able to find what's necessary for reading the
2424 * MOS in the label, return failure.
2425 */
2426 if (label == NULL || nvlist_lookup_nvlist(label,
2427 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2428 nvlist_free(label);
2429 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2430 ENXIO));
2431 }
2432
2433 /*
2434 * Update our in-core representation with the definitive values
2435 * from the label.
2436 */
2437 nvlist_free(spa->spa_label_features);
2438 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2439 }
2440
2441 nvlist_free(label);
2442
2443 /*
2444 * Look through entries in the label nvlist's features_for_read. If
2445 * there is a feature listed there which we don't understand then we
2446 * cannot open a pool.
2447 */
2448 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2449 nvlist_t *unsup_feat;
2450
2451 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2452 0);
2453
2454 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2455 NULL); nvp != NULL;
2456 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2457 if (!zfeature_is_supported(nvpair_name(nvp))) {
2458 VERIFY(nvlist_add_string(unsup_feat,
2459 nvpair_name(nvp), "") == 0);
2460 }
2461 }
2462
2463 if (!nvlist_empty(unsup_feat)) {
2464 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2465 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2466 nvlist_free(unsup_feat);
2467 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2468 ENOTSUP));
2469 }
2470
2471 nvlist_free(unsup_feat);
2472 }
2473
2474 /*
2475 * If the vdev guid sum doesn't match the uberblock, we have an
2476 * incomplete configuration. We first check to see if the pool
2477 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2478 * If it is, defer the vdev_guid_sum check till later so we
2479 * can handle missing vdevs.
2480 */
2481 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2482 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2483 rvd->vdev_guid_sum != ub->ub_guid_sum)
2484 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2485
2486 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2487 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2488 spa_try_repair(spa, config);
2489 spa_config_exit(spa, SCL_ALL, FTAG);
2490 nvlist_free(spa->spa_config_splitting);
2491 spa->spa_config_splitting = NULL;
2492 }
2493
2494 /*
2495 * Initialize internal SPA structures.
2496 */
2497 spa->spa_state = POOL_STATE_ACTIVE;
2498 spa->spa_ubsync = spa->spa_uberblock;
2499 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2500 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2501 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2502 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2503 spa->spa_claim_max_txg = spa->spa_first_txg;
2504 spa->spa_prev_software_version = ub->ub_software_version;
2505
2506 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2507 if (error)
2508 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2509 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2510
2511 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2512 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2513
2514 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2515 boolean_t missing_feat_read = B_FALSE;
2516 nvlist_t *unsup_feat, *enabled_feat;
2517
2518 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2519 &spa->spa_feat_for_read_obj) != 0) {
2520 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2521 }
2522
2523 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2524 &spa->spa_feat_for_write_obj) != 0) {
2525 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2526 }
2527
2528 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2529 &spa->spa_feat_desc_obj) != 0) {
2530 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2531 }
2532
2533 enabled_feat = fnvlist_alloc();
2534 unsup_feat = fnvlist_alloc();
2535
2536 if (!spa_features_check(spa, B_FALSE,
2537 unsup_feat, enabled_feat))
2538 missing_feat_read = B_TRUE;
2539
2540 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2541 if (!spa_features_check(spa, B_TRUE,
2542 unsup_feat, enabled_feat)) {
2543 missing_feat_write = B_TRUE;
2544 }
2545 }
2546
2547 fnvlist_add_nvlist(spa->spa_load_info,
2548 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2549
2550 if (!nvlist_empty(unsup_feat)) {
2551 fnvlist_add_nvlist(spa->spa_load_info,
2552 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2553 }
2554
2555 fnvlist_free(enabled_feat);
2556 fnvlist_free(unsup_feat);
2557
2558 if (!missing_feat_read) {
2559 fnvlist_add_boolean(spa->spa_load_info,
2560 ZPOOL_CONFIG_CAN_RDONLY);
2561 }
2562
2563 /*
2564 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2565 * twofold: to determine whether the pool is available for
2566 * import in read-write mode and (if it is not) whether the
2567 * pool is available for import in read-only mode. If the pool
2568 * is available for import in read-write mode, it is displayed
2569 * as available in userland; if it is not available for import
2570 * in read-only mode, it is displayed as unavailable in
2571 * userland. If the pool is available for import in read-only
2572 * mode but not read-write mode, it is displayed as unavailable
2573 * in userland with a special note that the pool is actually
2574 * available for open in read-only mode.
2575 *
2576 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2577 * missing a feature for write, we must first determine whether
2578 * the pool can be opened read-only before returning to
2579 * userland in order to know whether to display the
2580 * abovementioned note.
2581 */
2582 if (missing_feat_read || (missing_feat_write &&
2583 spa_writeable(spa))) {
2584 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2585 ENOTSUP));
2586 }
2587
2588 /*
2589 * Load refcounts for ZFS features from disk into an in-memory
2590 * cache during SPA initialization.
2591 */
2592 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2593 uint64_t refcount;
2594
2595 error = feature_get_refcount_from_disk(spa,
2596 &spa_feature_table[i], &refcount);
2597 if (error == 0) {
2598 spa->spa_feat_refcount_cache[i] = refcount;
2599 } else if (error == ENOTSUP) {
2600 spa->spa_feat_refcount_cache[i] =
2601 SPA_FEATURE_DISABLED;
2602 } else {
2603 return (spa_vdev_err(rvd,
2604 VDEV_AUX_CORRUPT_DATA, EIO));
2605 }
2606 }
2607 }
2608
2609 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2610 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2611 &spa->spa_feat_enabled_txg_obj) != 0)
2612 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2613 }
2614
2615 spa->spa_is_initializing = B_TRUE;
2616 error = dsl_pool_open(spa->spa_dsl_pool);
2617 spa->spa_is_initializing = B_FALSE;
2618 if (error != 0)
2619 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2620
2621 if (!mosconfig) {
2622 uint64_t hostid;
2623 nvlist_t *policy = NULL, *nvconfig;
2624
2625 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2626 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2627
2628 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2629 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2630 char *hostname;
2631 unsigned long myhostid = 0;
2632
2633 VERIFY(nvlist_lookup_string(nvconfig,
2634 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2635
2636#ifdef _KERNEL
2637 myhostid = zone_get_hostid(NULL);
2638#else /* _KERNEL */
2639 /*
2640 * We're emulating the system's hostid in userland, so
2641 * we can't use zone_get_hostid().
2642 */
2643 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2644#endif /* _KERNEL */
2645 if (check_hostid && hostid != 0 && myhostid != 0 &&
2646 hostid != myhostid) {
2647 nvlist_free(nvconfig);
2648 cmn_err(CE_WARN, "pool '%s' could not be "
2649 "loaded as it was last accessed by "
2650 "another system (host: %s hostid: 0x%lx). "
2651 "See: http://illumos.org/msg/ZFS-8000-EY",
2652 spa_name(spa), hostname,
2653 (unsigned long)hostid);
2654 return (SET_ERROR(EBADF));
2655 }
2656 }
2657 if (nvlist_lookup_nvlist(spa->spa_config,
2658 ZPOOL_REWIND_POLICY, &policy) == 0)
2659 VERIFY(nvlist_add_nvlist(nvconfig,
2660 ZPOOL_REWIND_POLICY, policy) == 0);
2661
2662 spa_config_set(spa, nvconfig);
2663 spa_unload(spa);
2664 spa_deactivate(spa);
2665 spa_activate(spa, orig_mode);
2666
2667 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2668 }
2669
2670 /* Grab the secret checksum salt from the MOS. */
2671 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2672 DMU_POOL_CHECKSUM_SALT, 1,
2673 sizeof (spa->spa_cksum_salt.zcs_bytes),
2674 spa->spa_cksum_salt.zcs_bytes);
2675 if (error == ENOENT) {
2676 /* Generate a new salt for subsequent use */
2677 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
2678 sizeof (spa->spa_cksum_salt.zcs_bytes));
2679 } else if (error != 0) {
2680 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2681 }
2682
2683 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2684 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2685 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2686 if (error != 0)
2687 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2688
2689 /*
2690 * Load the bit that tells us to use the new accounting function
2691 * (raid-z deflation). If we have an older pool, this will not
2692 * be present.
2693 */
2694 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2695 if (error != 0 && error != ENOENT)
2696 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2697
2698 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2699 &spa->spa_creation_version);
2700 if (error != 0 && error != ENOENT)
2701 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2702
2703 /*
2704 * Load the persistent error log. If we have an older pool, this will
2705 * not be present.
2706 */
2707 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2708 if (error != 0 && error != ENOENT)
2709 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2710
2711 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2712 &spa->spa_errlog_scrub);
2713 if (error != 0 && error != ENOENT)
2714 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2715
2716 /*
2717 * Load the history object. If we have an older pool, this
2718 * will not be present.
2719 */
2720 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2721 if (error != 0 && error != ENOENT)
2722 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2723
2724 /*
2725 * Load the per-vdev ZAP map. If we have an older pool, this will not
2726 * be present; in this case, defer its creation to a later time to
2727 * avoid dirtying the MOS this early / out of sync context. See
2728 * spa_sync_config_object.
2729 */
2730
2731 /* The sentinel is only available in the MOS config. */
2732 nvlist_t *mos_config;
2733 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
2734 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2735
2736 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
2737 &spa->spa_all_vdev_zaps);
2738
2739 if (error == ENOENT) {
2740 VERIFY(!nvlist_exists(mos_config,
2741 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
2742 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
2743 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2744 } else if (error != 0) {
2745 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2746 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
2747 /*
2748 * An older version of ZFS overwrote the sentinel value, so
2749 * we have orphaned per-vdev ZAPs in the MOS. Defer their
2750 * destruction to later; see spa_sync_config_object.
2751 */
2752 spa->spa_avz_action = AVZ_ACTION_DESTROY;
2753 /*
2754 * We're assuming that no vdevs have had their ZAPs created
2755 * before this. Better be sure of it.
2756 */
2757 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2758 }
2759 nvlist_free(mos_config);
2760
2761 /*
2762 * If we're assembling the pool from the split-off vdevs of
2763 * an existing pool, we don't want to attach the spares & cache
2764 * devices.
2765 */
2766
2767 /*
2768 * Load any hot spares for this pool.
2769 */
2770 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2771 if (error != 0 && error != ENOENT)
2772 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2773 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2774 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2775 if (load_nvlist(spa, spa->spa_spares.sav_object,
2776 &spa->spa_spares.sav_config) != 0)
2777 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2778
2779 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2780 spa_load_spares(spa);
2781 spa_config_exit(spa, SCL_ALL, FTAG);
2782 } else if (error == 0) {
2783 spa->spa_spares.sav_sync = B_TRUE;
2784 }
2785
2786 /*
2787 * Load any level 2 ARC devices for this pool.
2788 */
2789 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2790 &spa->spa_l2cache.sav_object);
2791 if (error != 0 && error != ENOENT)
2792 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2793 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2794 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2795 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2796 &spa->spa_l2cache.sav_config) != 0)
2797 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2798
2799 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2800 spa_load_l2cache(spa);
2801 spa_config_exit(spa, SCL_ALL, FTAG);
2802 } else if (error == 0) {
2803 spa->spa_l2cache.sav_sync = B_TRUE;
2804 }
2805
2806 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2807
2808 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2809 if (error && error != ENOENT)
2810 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2811
2812 if (error == 0) {
2813 uint64_t autoreplace;
2814
2815 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2816 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2817 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2818 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2819 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2820 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2821 &spa->spa_dedup_ditto);
2822
2823 spa->spa_autoreplace = (autoreplace != 0);
2824 }
2825
2826 /*
2827 * If the 'autoreplace' property is set, then post a resource notifying
2828 * the ZFS DE that it should not issue any faults for unopenable
2829 * devices. We also iterate over the vdevs, and post a sysevent for any
2830 * unopenable vdevs so that the normal autoreplace handler can take
2831 * over.
2832 */
2833 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2834 spa_check_removed(spa->spa_root_vdev);
2835 /*
2836 * For the import case, this is done in spa_import(), because
2837 * at this point we're using the spare definitions from
2838 * the MOS config, not necessarily from the userland config.
2839 */
2840 if (state != SPA_LOAD_IMPORT) {
2841 spa_aux_check_removed(&spa->spa_spares);
2842 spa_aux_check_removed(&spa->spa_l2cache);
2843 }
2844 }
2845
2846 /*
2847 * Load the vdev state for all toplevel vdevs.
2848 */
2849 vdev_load(rvd);
2850
2851 /*
2852 * Propagate the leaf DTLs we just loaded all the way up the tree.
2853 */
2854 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2855 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2856 spa_config_exit(spa, SCL_ALL, FTAG);
2857
2858 /*
2859 * Load the DDTs (dedup tables).
2860 */
2861 error = ddt_load(spa);
2862 if (error != 0)
2863 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2864
2865 spa_update_dspace(spa);
2866
2867 /*
2868 * Validate the config, using the MOS config to fill in any
2869 * information which might be missing. If we fail to validate
2870 * the config then declare the pool unfit for use. If we're
2871 * assembling a pool from a split, the log is not transferred
2872 * over.
2873 */
2874 if (type != SPA_IMPORT_ASSEMBLE) {
2875 nvlist_t *nvconfig;
2876
2877 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2878 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2879
2880 if (!spa_config_valid(spa, nvconfig)) {
2881 nvlist_free(nvconfig);
2882 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2883 ENXIO));
2884 }
2885 nvlist_free(nvconfig);
2886
2887 /*
2888 * Now that we've validated the config, check the state of the
2889 * root vdev. If it can't be opened, it indicates one or
2890 * more toplevel vdevs are faulted.
2891 */
2892 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2893 return (SET_ERROR(ENXIO));
2894
2895 if (spa_writeable(spa) && spa_check_logs(spa)) {
2896 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2897 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2898 }
2899 }
2900
2901 if (missing_feat_write) {
2902 ASSERT(state == SPA_LOAD_TRYIMPORT);
2903
2904 /*
2905 * At this point, we know that we can open the pool in
2906 * read-only mode but not read-write mode. We now have enough
2907 * information and can return to userland.
2908 */
2909 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2910 }
2911
2912 /*
2913 * We've successfully opened the pool, verify that we're ready
2914 * to start pushing transactions.
2915 */
2916 if (state != SPA_LOAD_TRYIMPORT) {
2917 if (error = spa_load_verify(spa))
2918 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2919 error));
2920 }
2921
2922 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2923 spa->spa_load_max_txg == UINT64_MAX)) {
2924 dmu_tx_t *tx;
2925 int need_update = B_FALSE;
2926 dsl_pool_t *dp = spa_get_dsl(spa);
2927
2928 ASSERT(state != SPA_LOAD_TRYIMPORT);
2929
2930 /*
2931 * Claim log blocks that haven't been committed yet.
2932 * This must all happen in a single txg.
2933 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2934 * invoked from zil_claim_log_block()'s i/o done callback.
2935 * Price of rollback is that we abandon the log.
2936 */
2937 spa->spa_claiming = B_TRUE;
2938
2939 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
2940 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2941 zil_claim, tx, DS_FIND_CHILDREN);
2942 dmu_tx_commit(tx);
2943
2944 spa->spa_claiming = B_FALSE;
2945
2946 spa_set_log_state(spa, SPA_LOG_GOOD);
2947 spa->spa_sync_on = B_TRUE;
2948 txg_sync_start(spa->spa_dsl_pool);
2949
2950 /*
2951 * Wait for all claims to sync. We sync up to the highest
2952 * claimed log block birth time so that claimed log blocks
2953 * don't appear to be from the future. spa_claim_max_txg
2954 * will have been set for us by either zil_check_log_chain()
2955 * (invoked from spa_check_logs()) or zil_claim() above.
2956 */
2957 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2958
2959 /*
2960 * If the config cache is stale, or we have uninitialized
2961 * metaslabs (see spa_vdev_add()), then update the config.
2962 *
2963 * If this is a verbatim import, trust the current
2964 * in-core spa_config and update the disk labels.
2965 */
2966 if (config_cache_txg != spa->spa_config_txg ||
2967 state == SPA_LOAD_IMPORT ||
2968 state == SPA_LOAD_RECOVER ||
2969 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2970 need_update = B_TRUE;
2971
2972 for (int c = 0; c < rvd->vdev_children; c++)
2973 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2974 need_update = B_TRUE;
2975
2976 /*
2977 * Update the config cache asychronously in case we're the
2978 * root pool, in which case the config cache isn't writable yet.
2979 */
2980 if (need_update)
2981 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2982
2983 /*
2984 * Check all DTLs to see if anything needs resilvering.
2985 */
2986 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2987 vdev_resilver_needed(rvd, NULL, NULL))
2988 spa_async_request(spa, SPA_ASYNC_RESILVER);
2989
2990 /*
2991 * Log the fact that we booted up (so that we can detect if
2992 * we rebooted in the middle of an operation).
2993 */
2994 spa_history_log_version(spa, "open");
2995
2996 /*
2997 * Delete any inconsistent datasets.
2998 */
2999 (void) dmu_objset_find(spa_name(spa),
3000 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
3001
3002 /*
3003 * Clean up any stale temporary dataset userrefs.
3004 */
3005 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
3006 }
3007
3008 return (0);
3009}
3010
3011static int
3012spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
3013{
3014 int mode = spa->spa_mode;
3015
3016 spa_unload(spa);
3017 spa_deactivate(spa);
3018
3019 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
3020
3021 spa_activate(spa, mode);
3022 spa_async_suspend(spa);
3023
3024 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
3025}
3026
3027/*
3028 * If spa_load() fails this function will try loading prior txg's. If
3029 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
3030 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
3031 * function will not rewind the pool and will return the same error as
3032 * spa_load().
3033 */
3034static int
3035spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
3036 uint64_t max_request, int rewind_flags)
3037{
3038 nvlist_t *loadinfo = NULL;
3039 nvlist_t *config = NULL;
3040 int load_error, rewind_error;
3041 uint64_t safe_rewind_txg;
3042 uint64_t min_txg;
3043
3044 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
3045 spa->spa_load_max_txg = spa->spa_load_txg;
3046 spa_set_log_state(spa, SPA_LOG_CLEAR);
3047 } else {
3048 spa->spa_load_max_txg = max_request;
3049 if (max_request != UINT64_MAX)
3050 spa->spa_extreme_rewind = B_TRUE;
3051 }
3052
3053 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
3054 mosconfig);
3055 if (load_error == 0)
3056 return (0);
3057
3058 if (spa->spa_root_vdev != NULL)
3059 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3060
3061 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
3062 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
3063
3064 if (rewind_flags & ZPOOL_NEVER_REWIND) {
3065 nvlist_free(config);
3066 return (load_error);
3067 }
3068
3069 if (state == SPA_LOAD_RECOVER) {
3070 /* Price of rolling back is discarding txgs, including log */
3071 spa_set_log_state(spa, SPA_LOG_CLEAR);
3072 } else {
3073 /*
3074 * If we aren't rolling back save the load info from our first
3075 * import attempt so that we can restore it after attempting
3076 * to rewind.
3077 */
3078 loadinfo = spa->spa_load_info;
3079 spa->spa_load_info = fnvlist_alloc();
3080 }
3081
3082 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
3083 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
3084 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
3085 TXG_INITIAL : safe_rewind_txg;
3086
3087 /*
3088 * Continue as long as we're finding errors, we're still within
3089 * the acceptable rewind range, and we're still finding uberblocks
3090 */
3091 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
3092 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
3093 if (spa->spa_load_max_txg < safe_rewind_txg)
3094 spa->spa_extreme_rewind = B_TRUE;
3095 rewind_error = spa_load_retry(spa, state, mosconfig);
3096 }
3097
3098 spa->spa_extreme_rewind = B_FALSE;
3099 spa->spa_load_max_txg = UINT64_MAX;
3100
3101 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
3102 spa_config_set(spa, config);
3103
3104 if (state == SPA_LOAD_RECOVER) {
3105 ASSERT3P(loadinfo, ==, NULL);
3106 return (rewind_error);
3107 } else {
3108 /* Store the rewind info as part of the initial load info */
3109 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
3110 spa->spa_load_info);
3111
3112 /* Restore the initial load info */
3113 fnvlist_free(spa->spa_load_info);
3114 spa->spa_load_info = loadinfo;
3115
3116 return (load_error);
3117 }
3118}
3119
3120/*
3121 * Pool Open/Import
3122 *
3123 * The import case is identical to an open except that the configuration is sent
3124 * down from userland, instead of grabbed from the configuration cache. For the
3125 * case of an open, the pool configuration will exist in the
3126 * POOL_STATE_UNINITIALIZED state.
3127 *
3128 * The stats information (gen/count/ustats) is used to gather vdev statistics at
3129 * the same time open the pool, without having to keep around the spa_t in some
3130 * ambiguous state.
3131 */
3132static int
3133spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
3134 nvlist_t **config)
3135{
3136 spa_t *spa;
3137 spa_load_state_t state = SPA_LOAD_OPEN;
3138 int error;
3139 int locked = B_FALSE;
3140 int firstopen = B_FALSE;
3141
3142 *spapp = NULL;
3143
3144 /*
3145 * As disgusting as this is, we need to support recursive calls to this
3146 * function because dsl_dir_open() is called during spa_load(), and ends
3147 * up calling spa_open() again. The real fix is to figure out how to
3148 * avoid dsl_dir_open() calling this in the first place.
3149 */
3150 if (mutex_owner(&spa_namespace_lock) != curthread) {
3151 mutex_enter(&spa_namespace_lock);
3152 locked = B_TRUE;
3153 }
3154
3155 if ((spa = spa_lookup(pool)) == NULL) {
3156 if (locked)
3157 mutex_exit(&spa_namespace_lock);
3158 return (SET_ERROR(ENOENT));
3159 }
3160
3161 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
3162 zpool_rewind_policy_t policy;
3163
3164 firstopen = B_TRUE;
3165
3166 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
3167 &policy);
3168 if (policy.zrp_request & ZPOOL_DO_REWIND)
3169 state = SPA_LOAD_RECOVER;
3170
3171 spa_activate(spa, spa_mode_global);
3172
3173 if (state != SPA_LOAD_RECOVER)
3174 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3175
3176 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
3177 policy.zrp_request);
3178
3179 if (error == EBADF) {
3180 /*
3181 * If vdev_validate() returns failure (indicated by
3182 * EBADF), it indicates that one of the vdevs indicates
3183 * that the pool has been exported or destroyed. If
3184 * this is the case, the config cache is out of sync and
3185 * we should remove the pool from the namespace.
3186 */
3187 spa_unload(spa);
3188 spa_deactivate(spa);
3189 spa_config_sync(spa, B_TRUE, B_TRUE);
3190 spa_remove(spa);
3191 if (locked)
3192 mutex_exit(&spa_namespace_lock);
3193 return (SET_ERROR(ENOENT));
3194 }
3195
3196 if (error) {
3197 /*
3198 * We can't open the pool, but we still have useful
3199 * information: the state of each vdev after the
3200 * attempted vdev_open(). Return this to the user.
3201 */
3202 if (config != NULL && spa->spa_config) {
3203 VERIFY(nvlist_dup(spa->spa_config, config,
3204 KM_SLEEP) == 0);
3205 VERIFY(nvlist_add_nvlist(*config,
3206 ZPOOL_CONFIG_LOAD_INFO,
3207 spa->spa_load_info) == 0);
3208 }
3209 spa_unload(spa);
3210 spa_deactivate(spa);
3211 spa->spa_last_open_failed = error;
3212 if (locked)
3213 mutex_exit(&spa_namespace_lock);
3214 *spapp = NULL;
3215 return (error);
3216 }
3217 }
3218
3219 spa_open_ref(spa, tag);
3220
3221 if (config != NULL)
3222 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3223
3224 /*
3225 * If we've recovered the pool, pass back any information we
3226 * gathered while doing the load.
3227 */
3228 if (state == SPA_LOAD_RECOVER) {
3229 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3230 spa->spa_load_info) == 0);
3231 }
3232
3233 if (locked) {
3234 spa->spa_last_open_failed = 0;
3235 spa->spa_last_ubsync_txg = 0;
3236 spa->spa_load_txg = 0;
3237 mutex_exit(&spa_namespace_lock);
3238#ifdef __FreeBSD__
3239#ifdef _KERNEL
3240 if (firstopen)
3241 zvol_create_minors(spa->spa_name);
3242#endif
3243#endif
3244 }
3245
3246 *spapp = spa;
3247
3248 return (0);
3249}
3250
3251int
3252spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3253 nvlist_t **config)
3254{
3255 return (spa_open_common(name, spapp, tag, policy, config));
3256}
3257
3258int
3259spa_open(const char *name, spa_t **spapp, void *tag)
3260{
3261 return (spa_open_common(name, spapp, tag, NULL, NULL));
3262}
3263
3264/*
3265 * Lookup the given spa_t, incrementing the inject count in the process,
3266 * preventing it from being exported or destroyed.
3267 */
3268spa_t *
3269spa_inject_addref(char *name)
3270{
3271 spa_t *spa;
3272
3273 mutex_enter(&spa_namespace_lock);
3274 if ((spa = spa_lookup(name)) == NULL) {
3275 mutex_exit(&spa_namespace_lock);
3276 return (NULL);
3277 }
3278 spa->spa_inject_ref++;
3279 mutex_exit(&spa_namespace_lock);
3280
3281 return (spa);
3282}
3283
3284void
3285spa_inject_delref(spa_t *spa)
3286{
3287 mutex_enter(&spa_namespace_lock);
3288 spa->spa_inject_ref--;
3289 mutex_exit(&spa_namespace_lock);
3290}
3291
3292/*
3293 * Add spares device information to the nvlist.
3294 */
3295static void
3296spa_add_spares(spa_t *spa, nvlist_t *config)
3297{
3298 nvlist_t **spares;
3299 uint_t i, nspares;
3300 nvlist_t *nvroot;
3301 uint64_t guid;
3302 vdev_stat_t *vs;
3303 uint_t vsc;
3304 uint64_t pool;
3305
3306 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3307
3308 if (spa->spa_spares.sav_count == 0)
3309 return;
3310
3311 VERIFY(nvlist_lookup_nvlist(config,
3312 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3313 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3314 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3315 if (nspares != 0) {
3316 VERIFY(nvlist_add_nvlist_array(nvroot,
3317 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3318 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3319 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3320
3321 /*
3322 * Go through and find any spares which have since been
3323 * repurposed as an active spare. If this is the case, update
3324 * their status appropriately.
3325 */
3326 for (i = 0; i < nspares; i++) {
3327 VERIFY(nvlist_lookup_uint64(spares[i],
3328 ZPOOL_CONFIG_GUID, &guid) == 0);
3329 if (spa_spare_exists(guid, &pool, NULL) &&
3330 pool != 0ULL) {
3331 VERIFY(nvlist_lookup_uint64_array(
3332 spares[i], ZPOOL_CONFIG_VDEV_STATS,
3333 (uint64_t **)&vs, &vsc) == 0);
3334 vs->vs_state = VDEV_STATE_CANT_OPEN;
3335 vs->vs_aux = VDEV_AUX_SPARED;
3336 }
3337 }
3338 }
3339}
3340
3341/*
3342 * Add l2cache device information to the nvlist, including vdev stats.
3343 */
3344static void
3345spa_add_l2cache(spa_t *spa, nvlist_t *config)
3346{
3347 nvlist_t **l2cache;
3348 uint_t i, j, nl2cache;
3349 nvlist_t *nvroot;
3350 uint64_t guid;
3351 vdev_t *vd;
3352 vdev_stat_t *vs;
3353 uint_t vsc;
3354
3355 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3356
3357 if (spa->spa_l2cache.sav_count == 0)
3358 return;
3359
3360 VERIFY(nvlist_lookup_nvlist(config,
3361 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3362 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3363 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3364 if (nl2cache != 0) {
3365 VERIFY(nvlist_add_nvlist_array(nvroot,
3366 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3367 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3368 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3369
3370 /*
3371 * Update level 2 cache device stats.
3372 */
3373
3374 for (i = 0; i < nl2cache; i++) {
3375 VERIFY(nvlist_lookup_uint64(l2cache[i],
3376 ZPOOL_CONFIG_GUID, &guid) == 0);
3377
3378 vd = NULL;
3379 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3380 if (guid ==
3381 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3382 vd = spa->spa_l2cache.sav_vdevs[j];
3383 break;
3384 }
3385 }
3386 ASSERT(vd != NULL);
3387
3388 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3389 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3390 == 0);
3391 vdev_get_stats(vd, vs);
3392 }
3393 }
3394}
3395
3396static void
3397spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3398{
3399 nvlist_t *features;
3400 zap_cursor_t zc;
3401 zap_attribute_t za;
3402
3403 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3404 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3405
3406 /* We may be unable to read features if pool is suspended. */
3407 if (spa_suspended(spa))
3408 goto out;
3409
3410 if (spa->spa_feat_for_read_obj != 0) {
3411 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3412 spa->spa_feat_for_read_obj);
3413 zap_cursor_retrieve(&zc, &za) == 0;
3414 zap_cursor_advance(&zc)) {
3415 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3416 za.za_num_integers == 1);
3417 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3418 za.za_first_integer));
3419 }
3420 zap_cursor_fini(&zc);
3421 }
3422
3423 if (spa->spa_feat_for_write_obj != 0) {
3424 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3425 spa->spa_feat_for_write_obj);
3426 zap_cursor_retrieve(&zc, &za) == 0;
3427 zap_cursor_advance(&zc)) {
3428 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3429 za.za_num_integers == 1);
3430 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3431 za.za_first_integer));
3432 }
3433 zap_cursor_fini(&zc);
3434 }
3435
3436out:
3437 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3438 features) == 0);
3439 nvlist_free(features);
3440}
3441
3442int
3443spa_get_stats(const char *name, nvlist_t **config,
3444 char *altroot, size_t buflen)
3445{
3446 int error;
3447 spa_t *spa;
3448
3449 *config = NULL;
3450 error = spa_open_common(name, &spa, FTAG, NULL, config);
3451
3452 if (spa != NULL) {
3453 /*
3454 * This still leaves a window of inconsistency where the spares
3455 * or l2cache devices could change and the config would be
3456 * self-inconsistent.
3457 */
3458 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3459
3460 if (*config != NULL) {
3461 uint64_t loadtimes[2];
3462
3463 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3464 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3465 VERIFY(nvlist_add_uint64_array(*config,
3466 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3467
3468 VERIFY(nvlist_add_uint64(*config,
3469 ZPOOL_CONFIG_ERRCOUNT,
3470 spa_get_errlog_size(spa)) == 0);
3471
3472 if (spa_suspended(spa))
3473 VERIFY(nvlist_add_uint64(*config,
3474 ZPOOL_CONFIG_SUSPENDED,
3475 spa->spa_failmode) == 0);
3476
3477 spa_add_spares(spa, *config);
3478 spa_add_l2cache(spa, *config);
3479 spa_add_feature_stats(spa, *config);
3480 }
3481 }
3482
3483 /*
3484 * We want to get the alternate root even for faulted pools, so we cheat
3485 * and call spa_lookup() directly.
3486 */
3487 if (altroot) {
3488 if (spa == NULL) {
3489 mutex_enter(&spa_namespace_lock);
3490 spa = spa_lookup(name);
3491 if (spa)
3492 spa_altroot(spa, altroot, buflen);
3493 else
3494 altroot[0] = '\0';
3495 spa = NULL;
3496 mutex_exit(&spa_namespace_lock);
3497 } else {
3498 spa_altroot(spa, altroot, buflen);
3499 }
3500 }
3501
3502 if (spa != NULL) {
3503 spa_config_exit(spa, SCL_CONFIG, FTAG);
3504 spa_close(spa, FTAG);
3505 }
3506
3507 return (error);
3508}
3509
3510/*
3511 * Validate that the auxiliary device array is well formed. We must have an
3512 * array of nvlists, each which describes a valid leaf vdev. If this is an
3513 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3514 * specified, as long as they are well-formed.
3515 */
3516static int
3517spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3518 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3519 vdev_labeltype_t label)
3520{
3521 nvlist_t **dev;
3522 uint_t i, ndev;
3523 vdev_t *vd;
3524 int error;
3525
3526 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3527
3528 /*
3529 * It's acceptable to have no devs specified.
3530 */
3531 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3532 return (0);
3533
3534 if (ndev == 0)
3535 return (SET_ERROR(EINVAL));
3536
3537 /*
3538 * Make sure the pool is formatted with a version that supports this
3539 * device type.
3540 */
3541 if (spa_version(spa) < version)
3542 return (SET_ERROR(ENOTSUP));
3543
3544 /*
3545 * Set the pending device list so we correctly handle device in-use
3546 * checking.
3547 */
3548 sav->sav_pending = dev;
3549 sav->sav_npending = ndev;
3550
3551 for (i = 0; i < ndev; i++) {
3552 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3553 mode)) != 0)
3554 goto out;
3555
3556 if (!vd->vdev_ops->vdev_op_leaf) {
3557 vdev_free(vd);
3558 error = SET_ERROR(EINVAL);
3559 goto out;
3560 }
3561
3562 /*
3563 * The L2ARC currently only supports disk devices in
3564 * kernel context. For user-level testing, we allow it.
3565 */
3566#ifdef _KERNEL
3567 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3568 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3569 error = SET_ERROR(ENOTBLK);
3570 vdev_free(vd);
3571 goto out;
3572 }
3573#endif
3574 vd->vdev_top = vd;
3575
3576 if ((error = vdev_open(vd)) == 0 &&
3577 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3578 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3579 vd->vdev_guid) == 0);
3580 }
3581
3582 vdev_free(vd);
3583
3584 if (error &&
3585 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3586 goto out;
3587 else
3588 error = 0;
3589 }
3590
3591out:
3592 sav->sav_pending = NULL;
3593 sav->sav_npending = 0;
3594 return (error);
3595}
3596
3597static int
3598spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3599{
3600 int error;
3601
3602 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3603
3604 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3605 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3606 VDEV_LABEL_SPARE)) != 0) {
3607 return (error);
3608 }
3609
3610 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3611 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3612 VDEV_LABEL_L2CACHE));
3613}
3614
3615static void
3616spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3617 const char *config)
3618{
3619 int i;
3620
3621 if (sav->sav_config != NULL) {
3622 nvlist_t **olddevs;
3623 uint_t oldndevs;
3624 nvlist_t **newdevs;
3625
3626 /*
3627 * Generate new dev list by concatentating with the
3628 * current dev list.
3629 */
3630 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3631 &olddevs, &oldndevs) == 0);
3632
3633 newdevs = kmem_alloc(sizeof (void *) *
3634 (ndevs + oldndevs), KM_SLEEP);
3635 for (i = 0; i < oldndevs; i++)
3636 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3637 KM_SLEEP) == 0);
3638 for (i = 0; i < ndevs; i++)
3639 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3640 KM_SLEEP) == 0);
3641
3642 VERIFY(nvlist_remove(sav->sav_config, config,
3643 DATA_TYPE_NVLIST_ARRAY) == 0);
3644
3645 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3646 config, newdevs, ndevs + oldndevs) == 0);
3647 for (i = 0; i < oldndevs + ndevs; i++)
3648 nvlist_free(newdevs[i]);
3649 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3650 } else {
3651 /*
3652 * Generate a new dev list.
3653 */
3654 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3655 KM_SLEEP) == 0);
3656 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3657 devs, ndevs) == 0);
3658 }
3659}
3660
3661/*
3662 * Stop and drop level 2 ARC devices
3663 */
3664void
3665spa_l2cache_drop(spa_t *spa)
3666{
3667 vdev_t *vd;
3668 int i;
3669 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3670
3671 for (i = 0; i < sav->sav_count; i++) {
3672 uint64_t pool;
3673
3674 vd = sav->sav_vdevs[i];
3675 ASSERT(vd != NULL);
3676
3677 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3678 pool != 0ULL && l2arc_vdev_present(vd))
3679 l2arc_remove_vdev(vd);
3680 }
3681}
3682
3683/*
3684 * Pool Creation
3685 */
3686int
3687spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3688 nvlist_t *zplprops)
3689{
3690 spa_t *spa;
3691 char *altroot = NULL;
3692 vdev_t *rvd;
3693 dsl_pool_t *dp;
3694 dmu_tx_t *tx;
3695 int error = 0;
3696 uint64_t txg = TXG_INITIAL;
3697 nvlist_t **spares, **l2cache;
3698 uint_t nspares, nl2cache;
3699 uint64_t version, obj;
3700 boolean_t has_features;
3701
3702 /*
3703 * If this pool already exists, return failure.
3704 */
3705 mutex_enter(&spa_namespace_lock);
3706 if (spa_lookup(pool) != NULL) {
3707 mutex_exit(&spa_namespace_lock);
3708 return (SET_ERROR(EEXIST));
3709 }
3710
3711 /*
3712 * Allocate a new spa_t structure.
3713 */
3714 (void) nvlist_lookup_string(props,
3715 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3716 spa = spa_add(pool, NULL, altroot);
3717 spa_activate(spa, spa_mode_global);
3718
3719 if (props && (error = spa_prop_validate(spa, props))) {
3720 spa_deactivate(spa);
3721 spa_remove(spa);
3722 mutex_exit(&spa_namespace_lock);
3723 return (error);
3724 }
3725
3726 has_features = B_FALSE;
3727 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3728 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3729 if (zpool_prop_feature(nvpair_name(elem)))
3730 has_features = B_TRUE;
3731 }
3732
3733 if (has_features || nvlist_lookup_uint64(props,
3734 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3735 version = SPA_VERSION;
3736 }
3737 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3738
3739 spa->spa_first_txg = txg;
3740 spa->spa_uberblock.ub_txg = txg - 1;
3741 spa->spa_uberblock.ub_version = version;
3742 spa->spa_ubsync = spa->spa_uberblock;
3743 spa->spa_load_state = SPA_LOAD_CREATE;
3744
3745 /*
3746 * Create "The Godfather" zio to hold all async IOs
3747 */
3748 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3749 KM_SLEEP);
3750 for (int i = 0; i < max_ncpus; i++) {
3751 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3752 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3753 ZIO_FLAG_GODFATHER);
3754 }
3755
3756 /*
3757 * Create the root vdev.
3758 */
3759 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3760
3761 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3762
3763 ASSERT(error != 0 || rvd != NULL);
3764 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3765
3766 if (error == 0 && !zfs_allocatable_devs(nvroot))
3767 error = SET_ERROR(EINVAL);
3768
3769 if (error == 0 &&
3770 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3771 (error = spa_validate_aux(spa, nvroot, txg,
3772 VDEV_ALLOC_ADD)) == 0) {
3773 for (int c = 0; c < rvd->vdev_children; c++) {
3774 vdev_ashift_optimize(rvd->vdev_child[c]);
3775 vdev_metaslab_set_size(rvd->vdev_child[c]);
3776 vdev_expand(rvd->vdev_child[c], txg);
3777 }
3778 }
3779
3780 spa_config_exit(spa, SCL_ALL, FTAG);
3781
3782 if (error != 0) {
3783 spa_unload(spa);
3784 spa_deactivate(spa);
3785 spa_remove(spa);
3786 mutex_exit(&spa_namespace_lock);
3787 return (error);
3788 }
3789
3790 /*
3791 * Get the list of spares, if specified.
3792 */
3793 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3794 &spares, &nspares) == 0) {
3795 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3796 KM_SLEEP) == 0);
3797 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3798 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3799 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3800 spa_load_spares(spa);
3801 spa_config_exit(spa, SCL_ALL, FTAG);
3802 spa->spa_spares.sav_sync = B_TRUE;
3803 }
3804
3805 /*
3806 * Get the list of level 2 cache devices, if specified.
3807 */
3808 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3809 &l2cache, &nl2cache) == 0) {
3810 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3811 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3812 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3813 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3814 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3815 spa_load_l2cache(spa);
3816 spa_config_exit(spa, SCL_ALL, FTAG);
3817 spa->spa_l2cache.sav_sync = B_TRUE;
3818 }
3819
3820 spa->spa_is_initializing = B_TRUE;
3821 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3822 spa->spa_meta_objset = dp->dp_meta_objset;
3823 spa->spa_is_initializing = B_FALSE;
3824
3825 /*
3826 * Create DDTs (dedup tables).
3827 */
3828 ddt_create(spa);
3829
3830 spa_update_dspace(spa);
3831
3832 tx = dmu_tx_create_assigned(dp, txg);
3833
3834 /*
3835 * Create the pool config object.
3836 */
3837 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3838 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3839 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3840
3841 if (zap_add(spa->spa_meta_objset,
3842 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3843 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3844 cmn_err(CE_PANIC, "failed to add pool config");
3845 }
3846
3847 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3848 spa_feature_create_zap_objects(spa, tx);
3849
3850 if (zap_add(spa->spa_meta_objset,
3851 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3852 sizeof (uint64_t), 1, &version, tx) != 0) {
3853 cmn_err(CE_PANIC, "failed to add pool version");
3854 }
3855
3856 /* Newly created pools with the right version are always deflated. */
3857 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3858 spa->spa_deflate = TRUE;
3859 if (zap_add(spa->spa_meta_objset,
3860 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3861 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3862 cmn_err(CE_PANIC, "failed to add deflate");
3863 }
3864 }
3865
3866 /*
3867 * Create the deferred-free bpobj. Turn off compression
3868 * because sync-to-convergence takes longer if the blocksize
3869 * keeps changing.
3870 */
3871 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3872 dmu_object_set_compress(spa->spa_meta_objset, obj,
3873 ZIO_COMPRESS_OFF, tx);
3874 if (zap_add(spa->spa_meta_objset,
3875 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3876 sizeof (uint64_t), 1, &obj, tx) != 0) {
3877 cmn_err(CE_PANIC, "failed to add bpobj");
3878 }
3879 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3880 spa->spa_meta_objset, obj));
3881
3882 /*
3883 * Create the pool's history object.
3884 */
3885 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3886 spa_history_create_obj(spa, tx);
3887
3888 /*
3889 * Generate some random noise for salted checksums to operate on.
3890 */
3891 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3892 sizeof (spa->spa_cksum_salt.zcs_bytes));
3893
3894 /*
3895 * Set pool properties.
3896 */
3897 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3898 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3899 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3900 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3901
3902 if (props != NULL) {
3903 spa_configfile_set(spa, props, B_FALSE);
3904 spa_sync_props(props, tx);
3905 }
3906
3907 dmu_tx_commit(tx);
3908
3909 spa->spa_sync_on = B_TRUE;
3910 txg_sync_start(spa->spa_dsl_pool);
3911
3912 /*
3913 * We explicitly wait for the first transaction to complete so that our
3914 * bean counters are appropriately updated.
3915 */
3916 txg_wait_synced(spa->spa_dsl_pool, txg);
3917
3918 spa_config_sync(spa, B_FALSE, B_TRUE);
3919 spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE);
3920
3921 spa_history_log_version(spa, "create");
3922
3923 /*
3924 * Don't count references from objsets that are already closed
3925 * and are making their way through the eviction process.
3926 */
3927 spa_evicting_os_wait(spa);
3928 spa->spa_minref = refcount_count(&spa->spa_refcount);
3929 spa->spa_load_state = SPA_LOAD_NONE;
3930
3931 mutex_exit(&spa_namespace_lock);
3932
3933 return (0);
3934}
3935
3936#ifdef _KERNEL
3937#ifdef illumos
3938/*
3939 * Get the root pool information from the root disk, then import the root pool
3940 * during the system boot up time.
3941 */
3942extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3943
3944static nvlist_t *
3945spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3946{
3947 nvlist_t *config;
3948 nvlist_t *nvtop, *nvroot;
3949 uint64_t pgid;
3950
3951 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3952 return (NULL);
3953
3954 /*
3955 * Add this top-level vdev to the child array.
3956 */
3957 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3958 &nvtop) == 0);
3959 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3960 &pgid) == 0);
3961 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3962
3963 /*
3964 * Put this pool's top-level vdevs into a root vdev.
3965 */
3966 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3967 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3968 VDEV_TYPE_ROOT) == 0);
3969 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3970 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3971 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3972 &nvtop, 1) == 0);
3973
3974 /*
3975 * Replace the existing vdev_tree with the new root vdev in
3976 * this pool's configuration (remove the old, add the new).
3977 */
3978 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3979 nvlist_free(nvroot);
3980 return (config);
3981}
3982
3983/*
3984 * Walk the vdev tree and see if we can find a device with "better"
3985 * configuration. A configuration is "better" if the label on that
3986 * device has a more recent txg.
3987 */
3988static void
3989spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3990{
3991 for (int c = 0; c < vd->vdev_children; c++)
3992 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3993
3994 if (vd->vdev_ops->vdev_op_leaf) {
3995 nvlist_t *label;
3996 uint64_t label_txg;
3997
3998 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3999 &label) != 0)
4000 return;
4001
4002 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
4003 &label_txg) == 0);
4004
4005 /*
4006 * Do we have a better boot device?
4007 */
4008 if (label_txg > *txg) {
4009 *txg = label_txg;
4010 *avd = vd;
4011 }
4012 nvlist_free(label);
4013 }
4014}
4015
4016/*
4017 * Import a root pool.
4018 *
4019 * For x86. devpath_list will consist of devid and/or physpath name of
4020 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
4021 * The GRUB "findroot" command will return the vdev we should boot.
4022 *
4023 * For Sparc, devpath_list consists the physpath name of the booting device
4024 * no matter the rootpool is a single device pool or a mirrored pool.
4025 * e.g.
4026 * "/pci@1f,0/ide@d/disk@0,0:a"
4027 */
4028int
4029spa_import_rootpool(char *devpath, char *devid)
4030{
4031 spa_t *spa;
4032 vdev_t *rvd, *bvd, *avd = NULL;
4033 nvlist_t *config, *nvtop;
4034 uint64_t guid, txg;
4035 char *pname;
4036 int error;
4037
4038 /*
4039 * Read the label from the boot device and generate a configuration.
4040 */
4041 config = spa_generate_rootconf(devpath, devid, &guid);
4042#if defined(_OBP) && defined(_KERNEL)
4043 if (config == NULL) {
4044 if (strstr(devpath, "/iscsi/ssd") != NULL) {
4045 /* iscsi boot */
4046 get_iscsi_bootpath_phy(devpath);
4047 config = spa_generate_rootconf(devpath, devid, &guid);
4048 }
4049 }
4050#endif
4051 if (config == NULL) {
4052 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
4053 devpath);
4054 return (SET_ERROR(EIO));
4055 }
4056
4057 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4058 &pname) == 0);
4059 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
4060
4061 mutex_enter(&spa_namespace_lock);
4062 if ((spa = spa_lookup(pname)) != NULL) {
4063 /*
4064 * Remove the existing root pool from the namespace so that we
4065 * can replace it with the correct config we just read in.
4066 */
4067 spa_remove(spa);
4068 }
4069
4070 spa = spa_add(pname, config, NULL);
4071 spa->spa_is_root = B_TRUE;
4072 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4073
4074 /*
4075 * Build up a vdev tree based on the boot device's label config.
4076 */
4077 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4078 &nvtop) == 0);
4079 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4080 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4081 VDEV_ALLOC_ROOTPOOL);
4082 spa_config_exit(spa, SCL_ALL, FTAG);
4083 if (error) {
4084 mutex_exit(&spa_namespace_lock);
4085 nvlist_free(config);
4086 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4087 pname);
4088 return (error);
4089 }
4090
4091 /*
4092 * Get the boot vdev.
4093 */
4094 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
4095 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
4096 (u_longlong_t)guid);
4097 error = SET_ERROR(ENOENT);
4098 goto out;
4099 }
4100
4101 /*
4102 * Determine if there is a better boot device.
4103 */
4104 avd = bvd;
4105 spa_alt_rootvdev(rvd, &avd, &txg);
4106 if (avd != bvd) {
4107 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
4108 "try booting from '%s'", avd->vdev_path);
4109 error = SET_ERROR(EINVAL);
4110 goto out;
4111 }
4112
4113 /*
4114 * If the boot device is part of a spare vdev then ensure that
4115 * we're booting off the active spare.
4116 */
4117 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4118 !bvd->vdev_isspare) {
4119 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
4120 "try booting from '%s'",
4121 bvd->vdev_parent->
4122 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
4123 error = SET_ERROR(EINVAL);
4124 goto out;
4125 }
4126
4127 error = 0;
4128out:
4129 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4130 vdev_free(rvd);
4131 spa_config_exit(spa, SCL_ALL, FTAG);
4132 mutex_exit(&spa_namespace_lock);
4133
4134 nvlist_free(config);
4135 return (error);
4136}
4137
4138#else /* !illumos */
4139
4140extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs,
4141 uint64_t *count);
4142
4143static nvlist_t *
4144spa_generate_rootconf(const char *name)
4145{
4146 nvlist_t **configs, **tops;
4147 nvlist_t *config;
4148 nvlist_t *best_cfg, *nvtop, *nvroot;
4149 uint64_t *holes;
4150 uint64_t best_txg;
4151 uint64_t nchildren;
4152 uint64_t pgid;
4153 uint64_t count;
4154 uint64_t i;
4155 uint_t nholes;
4156
4157 if (vdev_geom_read_pool_label(name, &configs, &count) != 0)
4158 return (NULL);
4159
4160 ASSERT3U(count, !=, 0);
4161 best_txg = 0;
4162 for (i = 0; i < count; i++) {
4163 uint64_t txg;
4164
4165 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG,
4166 &txg) == 0);
4167 if (txg > best_txg) {
4168 best_txg = txg;
4169 best_cfg = configs[i];
4170 }
4171 }
4172
4173 nchildren = 1;
4174 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren);
4175 holes = NULL;
4176 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY,
4177 &holes, &nholes);
4178
4179 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP);
4180 for (i = 0; i < nchildren; i++) {
4181 if (i >= count)
4182 break;
4183 if (configs[i] == NULL)
4184 continue;
4185 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE,
4186 &nvtop) == 0);
4187 nvlist_dup(nvtop, &tops[i], KM_SLEEP);
4188 }
4189 for (i = 0; holes != NULL && i < nholes; i++) {
4190 if (i >= nchildren)
4191 continue;
4192 if (tops[holes[i]] != NULL)
4193 continue;
4194 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP);
4195 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE,
4196 VDEV_TYPE_HOLE) == 0);
4197 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID,
4198 holes[i]) == 0);
4199 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID,
4200 0) == 0);
4201 }
4202 for (i = 0; i < nchildren; i++) {
4203 if (tops[i] != NULL)
4204 continue;
4205 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP);
4206 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE,
4207 VDEV_TYPE_MISSING) == 0);
4208 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID,
4209 i) == 0);
4210 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID,
4211 0) == 0);
4212 }
4213
4214 /*
4215 * Create pool config based on the best vdev config.
4216 */
4217 nvlist_dup(best_cfg, &config, KM_SLEEP);
4218
4219 /*
4220 * Put this pool's top-level vdevs into a root vdev.
4221 */
4222 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4223 &pgid) == 0);
4224 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4225 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
4226 VDEV_TYPE_ROOT) == 0);
4227 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
4228 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
4229 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4230 tops, nchildren) == 0);
4231
4232 /*
4233 * Replace the existing vdev_tree with the new root vdev in
4234 * this pool's configuration (remove the old, add the new).
4235 */
4236 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
4237
4238 /*
4239 * Drop vdev config elements that should not be present at pool level.
4240 */
4241 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64);
4242 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64);
4243
4244 for (i = 0; i < count; i++)
4245 nvlist_free(configs[i]);
4246 kmem_free(configs, count * sizeof(void *));
4247 for (i = 0; i < nchildren; i++)
4248 nvlist_free(tops[i]);
4249 kmem_free(tops, nchildren * sizeof(void *));
4250 nvlist_free(nvroot);
4251 return (config);
4252}
4253
4254int
4255spa_import_rootpool(const char *name)
4256{
4257 spa_t *spa;
4258 vdev_t *rvd, *bvd, *avd = NULL;
4259 nvlist_t *config, *nvtop;
4260 uint64_t txg;
4261 char *pname;
4262 int error;
4263
4264 /*
4265 * Read the label from the boot device and generate a configuration.
4266 */
4267 config = spa_generate_rootconf(name);
4268
4269 mutex_enter(&spa_namespace_lock);
4270 if (config != NULL) {
4271 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4272 &pname) == 0 && strcmp(name, pname) == 0);
4273 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg)
4274 == 0);
4275
4276 if ((spa = spa_lookup(pname)) != NULL) {
4277 /*
4278 * The pool could already be imported,
4279 * e.g., after reboot -r.
4280 */
4281 if (spa->spa_state == POOL_STATE_ACTIVE) {
4282 mutex_exit(&spa_namespace_lock);
4283 nvlist_free(config);
4284 return (0);
4285 }
4286
4287 /*
4278 * Remove the existing root pool from the namespace so
4279 * that we can replace it with the correct config
4280 * we just read in.
4281 */
4282 spa_remove(spa);
4283 }
4284 spa = spa_add(pname, config, NULL);
4285
4286 /*
4287 * Set spa_ubsync.ub_version as it can be used in vdev_alloc()
4288 * via spa_version().
4289 */
4290 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
4291 &spa->spa_ubsync.ub_version) != 0)
4292 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
4293 } else if ((spa = spa_lookup(name)) == NULL) {
4294 mutex_exit(&spa_namespace_lock);
4295 nvlist_free(config);
4296 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
4297 name);
4298 return (EIO);
4299 } else {
4300 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0);
4301 }
4302 spa->spa_is_root = B_TRUE;
4303 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4304
4305 /*
4306 * Build up a vdev tree based on the boot device's label config.
4307 */
4308 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4309 &nvtop) == 0);
4310 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4311 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4312 VDEV_ALLOC_ROOTPOOL);
4313 spa_config_exit(spa, SCL_ALL, FTAG);
4314 if (error) {
4315 mutex_exit(&spa_namespace_lock);
4316 nvlist_free(config);
4317 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4318 pname);
4319 return (error);
4320 }
4321
4322 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4323 vdev_free(rvd);
4324 spa_config_exit(spa, SCL_ALL, FTAG);
4325 mutex_exit(&spa_namespace_lock);
4326
4327 nvlist_free(config);
4328 return (0);
4329}
4330
4331#endif /* illumos */
4332#endif /* _KERNEL */
4333
4334/*
4335 * Import a non-root pool into the system.
4336 */
4337int
4338spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
4339{
4340 spa_t *spa;
4341 char *altroot = NULL;
4342 spa_load_state_t state = SPA_LOAD_IMPORT;
4343 zpool_rewind_policy_t policy;
4344 uint64_t mode = spa_mode_global;
4345 uint64_t readonly = B_FALSE;
4346 int error;
4347 nvlist_t *nvroot;
4348 nvlist_t **spares, **l2cache;
4349 uint_t nspares, nl2cache;
4350
4351 /*
4352 * If a pool with this name exists, return failure.
4353 */
4354 mutex_enter(&spa_namespace_lock);
4355 if (spa_lookup(pool) != NULL) {
4356 mutex_exit(&spa_namespace_lock);
4357 return (SET_ERROR(EEXIST));
4358 }
4359
4360 /*
4361 * Create and initialize the spa structure.
4362 */
4363 (void) nvlist_lookup_string(props,
4364 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4365 (void) nvlist_lookup_uint64(props,
4366 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4367 if (readonly)
4368 mode = FREAD;
4369 spa = spa_add(pool, config, altroot);
4370 spa->spa_import_flags = flags;
4371
4372 /*
4373 * Verbatim import - Take a pool and insert it into the namespace
4374 * as if it had been loaded at boot.
4375 */
4376 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
4377 if (props != NULL)
4378 spa_configfile_set(spa, props, B_FALSE);
4379
4380 spa_config_sync(spa, B_FALSE, B_TRUE);
4381 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4382
4383 mutex_exit(&spa_namespace_lock);
4384 return (0);
4385 }
4386
4387 spa_activate(spa, mode);
4388
4389 /*
4390 * Don't start async tasks until we know everything is healthy.
4391 */
4392 spa_async_suspend(spa);
4393
4394 zpool_get_rewind_policy(config, &policy);
4395 if (policy.zrp_request & ZPOOL_DO_REWIND)
4396 state = SPA_LOAD_RECOVER;
4397
4398 /*
4399 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
4400 * because the user-supplied config is actually the one to trust when
4401 * doing an import.
4402 */
4403 if (state != SPA_LOAD_RECOVER)
4404 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4405
4406 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
4407 policy.zrp_request);
4408
4409 /*
4410 * Propagate anything learned while loading the pool and pass it
4411 * back to caller (i.e. rewind info, missing devices, etc).
4412 */
4413 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4414 spa->spa_load_info) == 0);
4415
4416 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4417 /*
4418 * Toss any existing sparelist, as it doesn't have any validity
4419 * anymore, and conflicts with spa_has_spare().
4420 */
4421 if (spa->spa_spares.sav_config) {
4422 nvlist_free(spa->spa_spares.sav_config);
4423 spa->spa_spares.sav_config = NULL;
4424 spa_load_spares(spa);
4425 }
4426 if (spa->spa_l2cache.sav_config) {
4427 nvlist_free(spa->spa_l2cache.sav_config);
4428 spa->spa_l2cache.sav_config = NULL;
4429 spa_load_l2cache(spa);
4430 }
4431
4432 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4433 &nvroot) == 0);
4434 if (error == 0)
4435 error = spa_validate_aux(spa, nvroot, -1ULL,
4436 VDEV_ALLOC_SPARE);
4437 if (error == 0)
4438 error = spa_validate_aux(spa, nvroot, -1ULL,
4439 VDEV_ALLOC_L2CACHE);
4440 spa_config_exit(spa, SCL_ALL, FTAG);
4441
4442 if (props != NULL)
4443 spa_configfile_set(spa, props, B_FALSE);
4444
4445 if (error != 0 || (props && spa_writeable(spa) &&
4446 (error = spa_prop_set(spa, props)))) {
4447 spa_unload(spa);
4448 spa_deactivate(spa);
4449 spa_remove(spa);
4450 mutex_exit(&spa_namespace_lock);
4451 return (error);
4452 }
4453
4454 spa_async_resume(spa);
4455
4456 /*
4457 * Override any spares and level 2 cache devices as specified by
4458 * the user, as these may have correct device names/devids, etc.
4459 */
4460 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4461 &spares, &nspares) == 0) {
4462 if (spa->spa_spares.sav_config)
4463 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4464 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4465 else
4466 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
4467 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4468 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4469 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4470 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4471 spa_load_spares(spa);
4472 spa_config_exit(spa, SCL_ALL, FTAG);
4473 spa->spa_spares.sav_sync = B_TRUE;
4474 }
4475 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4476 &l2cache, &nl2cache) == 0) {
4477 if (spa->spa_l2cache.sav_config)
4478 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4479 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4480 else
4481 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4482 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4483 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4484 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4485 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4486 spa_load_l2cache(spa);
4487 spa_config_exit(spa, SCL_ALL, FTAG);
4488 spa->spa_l2cache.sav_sync = B_TRUE;
4489 }
4490
4491 /*
4492 * Check for any removed devices.
4493 */
4494 if (spa->spa_autoreplace) {
4495 spa_aux_check_removed(&spa->spa_spares);
4496 spa_aux_check_removed(&spa->spa_l2cache);
4497 }
4498
4499 if (spa_writeable(spa)) {
4500 /*
4501 * Update the config cache to include the newly-imported pool.
4502 */
4503 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4504 }
4505
4506 /*
4507 * It's possible that the pool was expanded while it was exported.
4508 * We kick off an async task to handle this for us.
4509 */
4510 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4511
4512 spa_history_log_version(spa, "import");
4513
4514 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4515
4516 mutex_exit(&spa_namespace_lock);
4517
4518#ifdef __FreeBSD__
4519#ifdef _KERNEL
4520 zvol_create_minors(pool);
4521#endif
4522#endif
4523 return (0);
4524}
4525
4526nvlist_t *
4527spa_tryimport(nvlist_t *tryconfig)
4528{
4529 nvlist_t *config = NULL;
4530 char *poolname;
4531 spa_t *spa;
4532 uint64_t state;
4533 int error;
4534
4535 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4536 return (NULL);
4537
4538 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4539 return (NULL);
4540
4541 /*
4542 * Create and initialize the spa structure.
4543 */
4544 mutex_enter(&spa_namespace_lock);
4545 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
4546 spa_activate(spa, FREAD);
4547
4548 /*
4549 * Pass off the heavy lifting to spa_load().
4550 * Pass TRUE for mosconfig because the user-supplied config
4551 * is actually the one to trust when doing an import.
4552 */
4553 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
4554
4555 /*
4556 * If 'tryconfig' was at least parsable, return the current config.
4557 */
4558 if (spa->spa_root_vdev != NULL) {
4559 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4560 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4561 poolname) == 0);
4562 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4563 state) == 0);
4564 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4565 spa->spa_uberblock.ub_timestamp) == 0);
4566 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4567 spa->spa_load_info) == 0);
4568
4569 /*
4570 * If the bootfs property exists on this pool then we
4571 * copy it out so that external consumers can tell which
4572 * pools are bootable.
4573 */
4574 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4575 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4576
4577 /*
4578 * We have to play games with the name since the
4579 * pool was opened as TRYIMPORT_NAME.
4580 */
4581 if (dsl_dsobj_to_dsname(spa_name(spa),
4582 spa->spa_bootfs, tmpname) == 0) {
4583 char *cp;
4584 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4585
4586 cp = strchr(tmpname, '/');
4587 if (cp == NULL) {
4588 (void) strlcpy(dsname, tmpname,
4589 MAXPATHLEN);
4590 } else {
4591 (void) snprintf(dsname, MAXPATHLEN,
4592 "%s/%s", poolname, ++cp);
4593 }
4594 VERIFY(nvlist_add_string(config,
4595 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4596 kmem_free(dsname, MAXPATHLEN);
4597 }
4598 kmem_free(tmpname, MAXPATHLEN);
4599 }
4600
4601 /*
4602 * Add the list of hot spares and level 2 cache devices.
4603 */
4604 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4605 spa_add_spares(spa, config);
4606 spa_add_l2cache(spa, config);
4607 spa_config_exit(spa, SCL_CONFIG, FTAG);
4608 }
4609
4610 spa_unload(spa);
4611 spa_deactivate(spa);
4612 spa_remove(spa);
4613 mutex_exit(&spa_namespace_lock);
4614
4615 return (config);
4616}
4617
4618/*
4619 * Pool export/destroy
4620 *
4621 * The act of destroying or exporting a pool is very simple. We make sure there
4622 * is no more pending I/O and any references to the pool are gone. Then, we
4623 * update the pool state and sync all the labels to disk, removing the
4624 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4625 * we don't sync the labels or remove the configuration cache.
4626 */
4627static int
4628spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
4629 boolean_t force, boolean_t hardforce)
4630{
4631 spa_t *spa;
4632
4633 if (oldconfig)
4634 *oldconfig = NULL;
4635
4636 if (!(spa_mode_global & FWRITE))
4637 return (SET_ERROR(EROFS));
4638
4639 mutex_enter(&spa_namespace_lock);
4640 if ((spa = spa_lookup(pool)) == NULL) {
4641 mutex_exit(&spa_namespace_lock);
4642 return (SET_ERROR(ENOENT));
4643 }
4644
4645 /*
4646 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4647 * reacquire the namespace lock, and see if we can export.
4648 */
4649 spa_open_ref(spa, FTAG);
4650 mutex_exit(&spa_namespace_lock);
4651 spa_async_suspend(spa);
4652 mutex_enter(&spa_namespace_lock);
4653 spa_close(spa, FTAG);
4654
4655 /*
4656 * The pool will be in core if it's openable,
4657 * in which case we can modify its state.
4658 */
4659 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4660 /*
4661 * Objsets may be open only because they're dirty, so we
4662 * have to force it to sync before checking spa_refcnt.
4663 */
4664 txg_wait_synced(spa->spa_dsl_pool, 0);
4665 spa_evicting_os_wait(spa);
4666
4667 /*
4668 * A pool cannot be exported or destroyed if there are active
4669 * references. If we are resetting a pool, allow references by
4670 * fault injection handlers.
4671 */
4672 if (!spa_refcount_zero(spa) ||
4673 (spa->spa_inject_ref != 0 &&
4674 new_state != POOL_STATE_UNINITIALIZED)) {
4675 spa_async_resume(spa);
4676 mutex_exit(&spa_namespace_lock);
4677 return (SET_ERROR(EBUSY));
4678 }
4679
4680 /*
4681 * A pool cannot be exported if it has an active shared spare.
4682 * This is to prevent other pools stealing the active spare
4683 * from an exported pool. At user's own will, such pool can
4684 * be forcedly exported.
4685 */
4686 if (!force && new_state == POOL_STATE_EXPORTED &&
4687 spa_has_active_shared_spare(spa)) {
4688 spa_async_resume(spa);
4689 mutex_exit(&spa_namespace_lock);
4690 return (SET_ERROR(EXDEV));
4691 }
4692
4693 /*
4694 * We want this to be reflected on every label,
4695 * so mark them all dirty. spa_unload() will do the
4696 * final sync that pushes these changes out.
4697 */
4698 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4699 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4700 spa->spa_state = new_state;
4701 spa->spa_final_txg = spa_last_synced_txg(spa) +
4702 TXG_DEFER_SIZE + 1;
4703 vdev_config_dirty(spa->spa_root_vdev);
4704 spa_config_exit(spa, SCL_ALL, FTAG);
4705 }
4706 }
4707
4708 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4709
4710 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4711 spa_unload(spa);
4712 spa_deactivate(spa);
4713 }
4714
4715 if (oldconfig && spa->spa_config)
4716 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4717
4718 if (new_state != POOL_STATE_UNINITIALIZED) {
4719 if (!hardforce)
4720 spa_config_sync(spa, B_TRUE, B_TRUE);
4721 spa_remove(spa);
4722 }
4723 mutex_exit(&spa_namespace_lock);
4724
4725 return (0);
4726}
4727
4728/*
4729 * Destroy a storage pool.
4730 */
4731int
4732spa_destroy(char *pool)
4733{
4734 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4735 B_FALSE, B_FALSE));
4736}
4737
4738/*
4739 * Export a storage pool.
4740 */
4741int
4742spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4743 boolean_t hardforce)
4744{
4745 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4746 force, hardforce));
4747}
4748
4749/*
4750 * Similar to spa_export(), this unloads the spa_t without actually removing it
4751 * from the namespace in any way.
4752 */
4753int
4754spa_reset(char *pool)
4755{
4756 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4757 B_FALSE, B_FALSE));
4758}
4759
4760/*
4761 * ==========================================================================
4762 * Device manipulation
4763 * ==========================================================================
4764 */
4765
4766/*
4767 * Add a device to a storage pool.
4768 */
4769int
4770spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4771{
4772 uint64_t txg, id;
4773 int error;
4774 vdev_t *rvd = spa->spa_root_vdev;
4775 vdev_t *vd, *tvd;
4776 nvlist_t **spares, **l2cache;
4777 uint_t nspares, nl2cache;
4778
4779 ASSERT(spa_writeable(spa));
4780
4781 txg = spa_vdev_enter(spa);
4782
4783 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4784 VDEV_ALLOC_ADD)) != 0)
4785 return (spa_vdev_exit(spa, NULL, txg, error));
4786
4787 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
4788
4789 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4790 &nspares) != 0)
4791 nspares = 0;
4792
4793 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4794 &nl2cache) != 0)
4795 nl2cache = 0;
4796
4797 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4798 return (spa_vdev_exit(spa, vd, txg, EINVAL));
4799
4800 if (vd->vdev_children != 0 &&
4801 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4802 return (spa_vdev_exit(spa, vd, txg, error));
4803
4804 /*
4805 * We must validate the spares and l2cache devices after checking the
4806 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4807 */
4808 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4809 return (spa_vdev_exit(spa, vd, txg, error));
4810
4811 /*
4812 * Transfer each new top-level vdev from vd to rvd.
4813 */
4814 for (int c = 0; c < vd->vdev_children; c++) {
4815
4816 /*
4817 * Set the vdev id to the first hole, if one exists.
4818 */
4819 for (id = 0; id < rvd->vdev_children; id++) {
4820 if (rvd->vdev_child[id]->vdev_ishole) {
4821 vdev_free(rvd->vdev_child[id]);
4822 break;
4823 }
4824 }
4825 tvd = vd->vdev_child[c];
4826 vdev_remove_child(vd, tvd);
4827 tvd->vdev_id = id;
4828 vdev_add_child(rvd, tvd);
4829 vdev_config_dirty(tvd);
4830 }
4831
4832 if (nspares != 0) {
4833 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4834 ZPOOL_CONFIG_SPARES);
4835 spa_load_spares(spa);
4836 spa->spa_spares.sav_sync = B_TRUE;
4837 }
4838
4839 if (nl2cache != 0) {
4840 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4841 ZPOOL_CONFIG_L2CACHE);
4842 spa_load_l2cache(spa);
4843 spa->spa_l2cache.sav_sync = B_TRUE;
4844 }
4845
4846 /*
4847 * We have to be careful when adding new vdevs to an existing pool.
4848 * If other threads start allocating from these vdevs before we
4849 * sync the config cache, and we lose power, then upon reboot we may
4850 * fail to open the pool because there are DVAs that the config cache
4851 * can't translate. Therefore, we first add the vdevs without
4852 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4853 * and then let spa_config_update() initialize the new metaslabs.
4854 *
4855 * spa_load() checks for added-but-not-initialized vdevs, so that
4856 * if we lose power at any point in this sequence, the remaining
4857 * steps will be completed the next time we load the pool.
4858 */
4859 (void) spa_vdev_exit(spa, vd, txg, 0);
4860
4861 mutex_enter(&spa_namespace_lock);
4862 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4863 spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD);
4864 mutex_exit(&spa_namespace_lock);
4865
4866 return (0);
4867}
4868
4869/*
4870 * Attach a device to a mirror. The arguments are the path to any device
4871 * in the mirror, and the nvroot for the new device. If the path specifies
4872 * a device that is not mirrored, we automatically insert the mirror vdev.
4873 *
4874 * If 'replacing' is specified, the new device is intended to replace the
4875 * existing device; in this case the two devices are made into their own
4876 * mirror using the 'replacing' vdev, which is functionally identical to
4877 * the mirror vdev (it actually reuses all the same ops) but has a few
4878 * extra rules: you can't attach to it after it's been created, and upon
4879 * completion of resilvering, the first disk (the one being replaced)
4880 * is automatically detached.
4881 */
4882int
4883spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4884{
4885 uint64_t txg, dtl_max_txg;
4886 vdev_t *rvd = spa->spa_root_vdev;
4887 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4888 vdev_ops_t *pvops;
4889 char *oldvdpath, *newvdpath;
4890 int newvd_isspare;
4891 int error;
4892
4893 ASSERT(spa_writeable(spa));
4894
4895 txg = spa_vdev_enter(spa);
4896
4897 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4898
4899 if (oldvd == NULL)
4900 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4901
4902 if (!oldvd->vdev_ops->vdev_op_leaf)
4903 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4904
4905 pvd = oldvd->vdev_parent;
4906
4907 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4908 VDEV_ALLOC_ATTACH)) != 0)
4909 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4910
4911 if (newrootvd->vdev_children != 1)
4912 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4913
4914 newvd = newrootvd->vdev_child[0];
4915
4916 if (!newvd->vdev_ops->vdev_op_leaf)
4917 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4918
4919 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4920 return (spa_vdev_exit(spa, newrootvd, txg, error));
4921
4922 /*
4923 * Spares can't replace logs
4924 */
4925 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4926 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4927
4928 if (!replacing) {
4929 /*
4930 * For attach, the only allowable parent is a mirror or the root
4931 * vdev.
4932 */
4933 if (pvd->vdev_ops != &vdev_mirror_ops &&
4934 pvd->vdev_ops != &vdev_root_ops)
4935 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4936
4937 pvops = &vdev_mirror_ops;
4938 } else {
4939 /*
4940 * Active hot spares can only be replaced by inactive hot
4941 * spares.
4942 */
4943 if (pvd->vdev_ops == &vdev_spare_ops &&
4944 oldvd->vdev_isspare &&
4945 !spa_has_spare(spa, newvd->vdev_guid))
4946 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4947
4948 /*
4949 * If the source is a hot spare, and the parent isn't already a
4950 * spare, then we want to create a new hot spare. Otherwise, we
4951 * want to create a replacing vdev. The user is not allowed to
4952 * attach to a spared vdev child unless the 'isspare' state is
4953 * the same (spare replaces spare, non-spare replaces
4954 * non-spare).
4955 */
4956 if (pvd->vdev_ops == &vdev_replacing_ops &&
4957 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4958 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4959 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4960 newvd->vdev_isspare != oldvd->vdev_isspare) {
4961 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4962 }
4963
4964 if (newvd->vdev_isspare)
4965 pvops = &vdev_spare_ops;
4966 else
4967 pvops = &vdev_replacing_ops;
4968 }
4969
4970 /*
4971 * Make sure the new device is big enough.
4972 */
4973 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4974 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4975
4976 /*
4977 * The new device cannot have a higher alignment requirement
4978 * than the top-level vdev.
4979 */
4980 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4981 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4982
4983 /*
4984 * If this is an in-place replacement, update oldvd's path and devid
4985 * to make it distinguishable from newvd, and unopenable from now on.
4986 */
4987 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4988 spa_strfree(oldvd->vdev_path);
4989 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
4990 KM_SLEEP);
4991 (void) sprintf(oldvd->vdev_path, "%s/%s",
4992 newvd->vdev_path, "old");
4993 if (oldvd->vdev_devid != NULL) {
4994 spa_strfree(oldvd->vdev_devid);
4995 oldvd->vdev_devid = NULL;
4996 }
4997 }
4998
4999 /* mark the device being resilvered */
5000 newvd->vdev_resilver_txg = txg;
5001
5002 /*
5003 * If the parent is not a mirror, or if we're replacing, insert the new
5004 * mirror/replacing/spare vdev above oldvd.
5005 */
5006 if (pvd->vdev_ops != pvops)
5007 pvd = vdev_add_parent(oldvd, pvops);
5008
5009 ASSERT(pvd->vdev_top->vdev_parent == rvd);
5010 ASSERT(pvd->vdev_ops == pvops);
5011 ASSERT(oldvd->vdev_parent == pvd);
5012
5013 /*
5014 * Extract the new device from its root and add it to pvd.
5015 */
5016 vdev_remove_child(newrootvd, newvd);
5017 newvd->vdev_id = pvd->vdev_children;
5018 newvd->vdev_crtxg = oldvd->vdev_crtxg;
5019 vdev_add_child(pvd, newvd);
5020
5021 tvd = newvd->vdev_top;
5022 ASSERT(pvd->vdev_top == tvd);
5023 ASSERT(tvd->vdev_parent == rvd);
5024
5025 vdev_config_dirty(tvd);
5026
5027 /*
5028 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
5029 * for any dmu_sync-ed blocks. It will propagate upward when
5030 * spa_vdev_exit() calls vdev_dtl_reassess().
5031 */
5032 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
5033
5034 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
5035 dtl_max_txg - TXG_INITIAL);
5036
5037 if (newvd->vdev_isspare) {
5038 spa_spare_activate(newvd);
5039 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
5040 }
5041
5042 oldvdpath = spa_strdup(oldvd->vdev_path);
5043 newvdpath = spa_strdup(newvd->vdev_path);
5044 newvd_isspare = newvd->vdev_isspare;
5045
5046 /*
5047 * Mark newvd's DTL dirty in this txg.
5048 */
5049 vdev_dirty(tvd, VDD_DTL, newvd, txg);
5050
5051 /*
5052 * Schedule the resilver to restart in the future. We do this to
5053 * ensure that dmu_sync-ed blocks have been stitched into the
5054 * respective datasets.
5055 */
5056 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
5057
5058 if (spa->spa_bootfs)
5059 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
5060
5061 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH);
5062
5063 /*
5064 * Commit the config
5065 */
5066 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
5067
5068 spa_history_log_internal(spa, "vdev attach", NULL,
5069 "%s vdev=%s %s vdev=%s",
5070 replacing && newvd_isspare ? "spare in" :
5071 replacing ? "replace" : "attach", newvdpath,
5072 replacing ? "for" : "to", oldvdpath);
5073
5074 spa_strfree(oldvdpath);
5075 spa_strfree(newvdpath);
5076
5077 return (0);
5078}
5079
5080/*
5081 * Detach a device from a mirror or replacing vdev.
5082 *
5083 * If 'replace_done' is specified, only detach if the parent
5084 * is a replacing vdev.
5085 */
5086int
5087spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
5088{
5089 uint64_t txg;
5090 int error;
5091 vdev_t *rvd = spa->spa_root_vdev;
5092 vdev_t *vd, *pvd, *cvd, *tvd;
5093 boolean_t unspare = B_FALSE;
5094 uint64_t unspare_guid = 0;
5095 char *vdpath;
5096
5097 ASSERT(spa_writeable(spa));
5098
5099 txg = spa_vdev_enter(spa);
5100
5101 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5102
5103 if (vd == NULL)
5104 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5105
5106 if (!vd->vdev_ops->vdev_op_leaf)
5107 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5108
5109 pvd = vd->vdev_parent;
5110
5111 /*
5112 * If the parent/child relationship is not as expected, don't do it.
5113 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
5114 * vdev that's replacing B with C. The user's intent in replacing
5115 * is to go from M(A,B) to M(A,C). If the user decides to cancel
5116 * the replace by detaching C, the expected behavior is to end up
5117 * M(A,B). But suppose that right after deciding to detach C,
5118 * the replacement of B completes. We would have M(A,C), and then
5119 * ask to detach C, which would leave us with just A -- not what
5120 * the user wanted. To prevent this, we make sure that the
5121 * parent/child relationship hasn't changed -- in this example,
5122 * that C's parent is still the replacing vdev R.
5123 */
5124 if (pvd->vdev_guid != pguid && pguid != 0)
5125 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5126
5127 /*
5128 * Only 'replacing' or 'spare' vdevs can be replaced.
5129 */
5130 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
5131 pvd->vdev_ops != &vdev_spare_ops)
5132 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5133
5134 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
5135 spa_version(spa) >= SPA_VERSION_SPARES);
5136
5137 /*
5138 * Only mirror, replacing, and spare vdevs support detach.
5139 */
5140 if (pvd->vdev_ops != &vdev_replacing_ops &&
5141 pvd->vdev_ops != &vdev_mirror_ops &&
5142 pvd->vdev_ops != &vdev_spare_ops)
5143 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5144
5145 /*
5146 * If this device has the only valid copy of some data,
5147 * we cannot safely detach it.
5148 */
5149 if (vdev_dtl_required(vd))
5150 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5151
5152 ASSERT(pvd->vdev_children >= 2);
5153
5154 /*
5155 * If we are detaching the second disk from a replacing vdev, then
5156 * check to see if we changed the original vdev's path to have "/old"
5157 * at the end in spa_vdev_attach(). If so, undo that change now.
5158 */
5159 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
5160 vd->vdev_path != NULL) {
5161 size_t len = strlen(vd->vdev_path);
5162
5163 for (int c = 0; c < pvd->vdev_children; c++) {
5164 cvd = pvd->vdev_child[c];
5165
5166 if (cvd == vd || cvd->vdev_path == NULL)
5167 continue;
5168
5169 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
5170 strcmp(cvd->vdev_path + len, "/old") == 0) {
5171 spa_strfree(cvd->vdev_path);
5172 cvd->vdev_path = spa_strdup(vd->vdev_path);
5173 break;
5174 }
5175 }
5176 }
5177
5178 /*
5179 * If we are detaching the original disk from a spare, then it implies
5180 * that the spare should become a real disk, and be removed from the
5181 * active spare list for the pool.
5182 */
5183 if (pvd->vdev_ops == &vdev_spare_ops &&
5184 vd->vdev_id == 0 &&
5185 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
5186 unspare = B_TRUE;
5187
5188 /*
5189 * Erase the disk labels so the disk can be used for other things.
5190 * This must be done after all other error cases are handled,
5191 * but before we disembowel vd (so we can still do I/O to it).
5192 * But if we can't do it, don't treat the error as fatal --
5193 * it may be that the unwritability of the disk is the reason
5194 * it's being detached!
5195 */
5196 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5197
5198 /*
5199 * Remove vd from its parent and compact the parent's children.
5200 */
5201 vdev_remove_child(pvd, vd);
5202 vdev_compact_children(pvd);
5203
5204 /*
5205 * Remember one of the remaining children so we can get tvd below.
5206 */
5207 cvd = pvd->vdev_child[pvd->vdev_children - 1];
5208
5209 /*
5210 * If we need to remove the remaining child from the list of hot spares,
5211 * do it now, marking the vdev as no longer a spare in the process.
5212 * We must do this before vdev_remove_parent(), because that can
5213 * change the GUID if it creates a new toplevel GUID. For a similar
5214 * reason, we must remove the spare now, in the same txg as the detach;
5215 * otherwise someone could attach a new sibling, change the GUID, and
5216 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
5217 */
5218 if (unspare) {
5219 ASSERT(cvd->vdev_isspare);
5220 spa_spare_remove(cvd);
5221 unspare_guid = cvd->vdev_guid;
5222 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
5223 cvd->vdev_unspare = B_TRUE;
5224 }
5225
5226 /*
5227 * If the parent mirror/replacing vdev only has one child,
5228 * the parent is no longer needed. Remove it from the tree.
5229 */
5230 if (pvd->vdev_children == 1) {
5231 if (pvd->vdev_ops == &vdev_spare_ops)
5232 cvd->vdev_unspare = B_FALSE;
5233 vdev_remove_parent(cvd);
5234 }
5235
5236
5237 /*
5238 * We don't set tvd until now because the parent we just removed
5239 * may have been the previous top-level vdev.
5240 */
5241 tvd = cvd->vdev_top;
5242 ASSERT(tvd->vdev_parent == rvd);
5243
5244 /*
5245 * Reevaluate the parent vdev state.
5246 */
5247 vdev_propagate_state(cvd);
5248
5249 /*
5250 * If the 'autoexpand' property is set on the pool then automatically
5251 * try to expand the size of the pool. For example if the device we
5252 * just detached was smaller than the others, it may be possible to
5253 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
5254 * first so that we can obtain the updated sizes of the leaf vdevs.
5255 */
5256 if (spa->spa_autoexpand) {
5257 vdev_reopen(tvd);
5258 vdev_expand(tvd, txg);
5259 }
5260
5261 vdev_config_dirty(tvd);
5262
5263 /*
5264 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
5265 * vd->vdev_detached is set and free vd's DTL object in syncing context.
5266 * But first make sure we're not on any *other* txg's DTL list, to
5267 * prevent vd from being accessed after it's freed.
5268 */
5269 vdpath = spa_strdup(vd->vdev_path);
5270 for (int t = 0; t < TXG_SIZE; t++)
5271 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
5272 vd->vdev_detached = B_TRUE;
5273 vdev_dirty(tvd, VDD_DTL, vd, txg);
5274
5275 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
5276
5277 /* hang on to the spa before we release the lock */
5278 spa_open_ref(spa, FTAG);
5279
5280 error = spa_vdev_exit(spa, vd, txg, 0);
5281
5282 spa_history_log_internal(spa, "detach", NULL,
5283 "vdev=%s", vdpath);
5284 spa_strfree(vdpath);
5285
5286 /*
5287 * If this was the removal of the original device in a hot spare vdev,
5288 * then we want to go through and remove the device from the hot spare
5289 * list of every other pool.
5290 */
5291 if (unspare) {
5292 spa_t *altspa = NULL;
5293
5294 mutex_enter(&spa_namespace_lock);
5295 while ((altspa = spa_next(altspa)) != NULL) {
5296 if (altspa->spa_state != POOL_STATE_ACTIVE ||
5297 altspa == spa)
5298 continue;
5299
5300 spa_open_ref(altspa, FTAG);
5301 mutex_exit(&spa_namespace_lock);
5302 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
5303 mutex_enter(&spa_namespace_lock);
5304 spa_close(altspa, FTAG);
5305 }
5306 mutex_exit(&spa_namespace_lock);
5307
5308 /* search the rest of the vdevs for spares to remove */
5309 spa_vdev_resilver_done(spa);
5310 }
5311
5312 /* all done with the spa; OK to release */
5313 mutex_enter(&spa_namespace_lock);
5314 spa_close(spa, FTAG);
5315 mutex_exit(&spa_namespace_lock);
5316
5317 return (error);
5318}
5319
5320/*
5321 * Split a set of devices from their mirrors, and create a new pool from them.
5322 */
5323int
5324spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
5325 nvlist_t *props, boolean_t exp)
5326{
5327 int error = 0;
5328 uint64_t txg, *glist;
5329 spa_t *newspa;
5330 uint_t c, children, lastlog;
5331 nvlist_t **child, *nvl, *tmp;
5332 dmu_tx_t *tx;
5333 char *altroot = NULL;
5334 vdev_t *rvd, **vml = NULL; /* vdev modify list */
5335 boolean_t activate_slog;
5336
5337 ASSERT(spa_writeable(spa));
5338
5339 txg = spa_vdev_enter(spa);
5340
5341 /* clear the log and flush everything up to now */
5342 activate_slog = spa_passivate_log(spa);
5343 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5344 error = spa_offline_log(spa);
5345 txg = spa_vdev_config_enter(spa);
5346
5347 if (activate_slog)
5348 spa_activate_log(spa);
5349
5350 if (error != 0)
5351 return (spa_vdev_exit(spa, NULL, txg, error));
5352
5353 /* check new spa name before going any further */
5354 if (spa_lookup(newname) != NULL)
5355 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
5356
5357 /*
5358 * scan through all the children to ensure they're all mirrors
5359 */
5360 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
5361 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
5362 &children) != 0)
5363 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5364
5365 /* first, check to ensure we've got the right child count */
5366 rvd = spa->spa_root_vdev;
5367 lastlog = 0;
5368 for (c = 0; c < rvd->vdev_children; c++) {
5369 vdev_t *vd = rvd->vdev_child[c];
5370
5371 /* don't count the holes & logs as children */
5372 if (vd->vdev_islog || vd->vdev_ishole) {
5373 if (lastlog == 0)
5374 lastlog = c;
5375 continue;
5376 }
5377
5378 lastlog = 0;
5379 }
5380 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
5381 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5382
5383 /* next, ensure no spare or cache devices are part of the split */
5384 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
5385 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
5386 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5387
5388 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
5389 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
5390
5391 /* then, loop over each vdev and validate it */
5392 for (c = 0; c < children; c++) {
5393 uint64_t is_hole = 0;
5394
5395 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
5396 &is_hole);
5397
5398 if (is_hole != 0) {
5399 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
5400 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
5401 continue;
5402 } else {
5403 error = SET_ERROR(EINVAL);
5404 break;
5405 }
5406 }
5407
5408 /* which disk is going to be split? */
5409 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
5410 &glist[c]) != 0) {
5411 error = SET_ERROR(EINVAL);
5412 break;
5413 }
5414
5415 /* look it up in the spa */
5416 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
5417 if (vml[c] == NULL) {
5418 error = SET_ERROR(ENODEV);
5419 break;
5420 }
5421
5422 /* make sure there's nothing stopping the split */
5423 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
5424 vml[c]->vdev_islog ||
5425 vml[c]->vdev_ishole ||
5426 vml[c]->vdev_isspare ||
5427 vml[c]->vdev_isl2cache ||
5428 !vdev_writeable(vml[c]) ||
5429 vml[c]->vdev_children != 0 ||
5430 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
5431 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
5432 error = SET_ERROR(EINVAL);
5433 break;
5434 }
5435
5436 if (vdev_dtl_required(vml[c])) {
5437 error = SET_ERROR(EBUSY);
5438 break;
5439 }
5440
5441 /* we need certain info from the top level */
5442 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
5443 vml[c]->vdev_top->vdev_ms_array) == 0);
5444 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
5445 vml[c]->vdev_top->vdev_ms_shift) == 0);
5446 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
5447 vml[c]->vdev_top->vdev_asize) == 0);
5448 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
5449 vml[c]->vdev_top->vdev_ashift) == 0);
5450
5451 /* transfer per-vdev ZAPs */
5452 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
5453 VERIFY0(nvlist_add_uint64(child[c],
5454 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
5455
5456 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
5457 VERIFY0(nvlist_add_uint64(child[c],
5458 ZPOOL_CONFIG_VDEV_TOP_ZAP,
5459 vml[c]->vdev_parent->vdev_top_zap));
5460 }
5461
5462 if (error != 0) {
5463 kmem_free(vml, children * sizeof (vdev_t *));
5464 kmem_free(glist, children * sizeof (uint64_t));
5465 return (spa_vdev_exit(spa, NULL, txg, error));
5466 }
5467
5468 /* stop writers from using the disks */
5469 for (c = 0; c < children; c++) {
5470 if (vml[c] != NULL)
5471 vml[c]->vdev_offline = B_TRUE;
5472 }
5473 vdev_reopen(spa->spa_root_vdev);
5474
5475 /*
5476 * Temporarily record the splitting vdevs in the spa config. This
5477 * will disappear once the config is regenerated.
5478 */
5479 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5480 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5481 glist, children) == 0);
5482 kmem_free(glist, children * sizeof (uint64_t));
5483
5484 mutex_enter(&spa->spa_props_lock);
5485 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5486 nvl) == 0);
5487 mutex_exit(&spa->spa_props_lock);
5488 spa->spa_config_splitting = nvl;
5489 vdev_config_dirty(spa->spa_root_vdev);
5490
5491 /* configure and create the new pool */
5492 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5493 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5494 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5495 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5496 spa_version(spa)) == 0);
5497 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5498 spa->spa_config_txg) == 0);
5499 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5500 spa_generate_guid(NULL)) == 0);
5501 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
5502 (void) nvlist_lookup_string(props,
5503 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5504
5505 /* add the new pool to the namespace */
5506 newspa = spa_add(newname, config, altroot);
5507 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
5508 newspa->spa_config_txg = spa->spa_config_txg;
5509 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5510
5511 /* release the spa config lock, retaining the namespace lock */
5512 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5513
5514 if (zio_injection_enabled)
5515 zio_handle_panic_injection(spa, FTAG, 1);
5516
5517 spa_activate(newspa, spa_mode_global);
5518 spa_async_suspend(newspa);
5519
5520#ifndef illumos
5521 /* mark that we are creating new spa by splitting */
5522 newspa->spa_splitting_newspa = B_TRUE;
5523#endif
5524 /* create the new pool from the disks of the original pool */
5525 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5526#ifndef illumos
5527 newspa->spa_splitting_newspa = B_FALSE;
5528#endif
5529 if (error)
5530 goto out;
5531
5532 /* if that worked, generate a real config for the new pool */
5533 if (newspa->spa_root_vdev != NULL) {
5534 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
5535 NV_UNIQUE_NAME, KM_SLEEP) == 0);
5536 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5537 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5538 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5539 B_TRUE));
5540 }
5541
5542 /* set the props */
5543 if (props != NULL) {
5544 spa_configfile_set(newspa, props, B_FALSE);
5545 error = spa_prop_set(newspa, props);
5546 if (error)
5547 goto out;
5548 }
5549
5550 /* flush everything */
5551 txg = spa_vdev_config_enter(newspa);
5552 vdev_config_dirty(newspa->spa_root_vdev);
5553 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
5554
5555 if (zio_injection_enabled)
5556 zio_handle_panic_injection(spa, FTAG, 2);
5557
5558 spa_async_resume(newspa);
5559
5560 /* finally, update the original pool's config */
5561 txg = spa_vdev_config_enter(spa);
5562 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5563 error = dmu_tx_assign(tx, TXG_WAIT);
5564 if (error != 0)
5565 dmu_tx_abort(tx);
5566 for (c = 0; c < children; c++) {
5567 if (vml[c] != NULL) {
5568 vdev_split(vml[c]);
5569 if (error == 0)
5570 spa_history_log_internal(spa, "detach", tx,
5571 "vdev=%s", vml[c]->vdev_path);
5572
5573 vdev_free(vml[c]);
5574 }
5575 }
5576 spa->spa_avz_action = AVZ_ACTION_REBUILD;
5577 vdev_config_dirty(spa->spa_root_vdev);
5578 spa->spa_config_splitting = NULL;
5579 nvlist_free(nvl);
5580 if (error == 0)
5581 dmu_tx_commit(tx);
5582 (void) spa_vdev_exit(spa, NULL, txg, 0);
5583
5584 if (zio_injection_enabled)
5585 zio_handle_panic_injection(spa, FTAG, 3);
5586
5587 /* split is complete; log a history record */
5588 spa_history_log_internal(newspa, "split", NULL,
5589 "from pool %s", spa_name(spa));
5590
5591 kmem_free(vml, children * sizeof (vdev_t *));
5592
5593 /* if we're not going to mount the filesystems in userland, export */
5594 if (exp)
5595 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5596 B_FALSE, B_FALSE);
5597
5598 return (error);
5599
5600out:
5601 spa_unload(newspa);
5602 spa_deactivate(newspa);
5603 spa_remove(newspa);
5604
5605 txg = spa_vdev_config_enter(spa);
5606
5607 /* re-online all offlined disks */
5608 for (c = 0; c < children; c++) {
5609 if (vml[c] != NULL)
5610 vml[c]->vdev_offline = B_FALSE;
5611 }
5612 vdev_reopen(spa->spa_root_vdev);
5613
5614 nvlist_free(spa->spa_config_splitting);
5615 spa->spa_config_splitting = NULL;
5616 (void) spa_vdev_exit(spa, NULL, txg, error);
5617
5618 kmem_free(vml, children * sizeof (vdev_t *));
5619 return (error);
5620}
5621
5622static nvlist_t *
5623spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
5624{
5625 for (int i = 0; i < count; i++) {
5626 uint64_t guid;
5627
5628 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5629 &guid) == 0);
5630
5631 if (guid == target_guid)
5632 return (nvpp[i]);
5633 }
5634
5635 return (NULL);
5636}
5637
5638static void
5639spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5640 nvlist_t *dev_to_remove)
5641{
5642 nvlist_t **newdev = NULL;
5643
5644 if (count > 1)
5645 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
5646
5647 for (int i = 0, j = 0; i < count; i++) {
5648 if (dev[i] == dev_to_remove)
5649 continue;
5650 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
5651 }
5652
5653 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5654 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
5655
5656 for (int i = 0; i < count - 1; i++)
5657 nvlist_free(newdev[i]);
5658
5659 if (count > 1)
5660 kmem_free(newdev, (count - 1) * sizeof (void *));
5661}
5662
5663/*
5664 * Evacuate the device.
5665 */
5666static int
5667spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5668{
5669 uint64_t txg;
5670 int error = 0;
5671
5672 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5673 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5674 ASSERT(vd == vd->vdev_top);
5675
5676 /*
5677 * Evacuate the device. We don't hold the config lock as writer
5678 * since we need to do I/O but we do keep the
5679 * spa_namespace_lock held. Once this completes the device
5680 * should no longer have any blocks allocated on it.
5681 */
5682 if (vd->vdev_islog) {
5683 if (vd->vdev_stat.vs_alloc != 0)
5684 error = spa_offline_log(spa);
5685 } else {
5686 error = SET_ERROR(ENOTSUP);
5687 }
5688
5689 if (error)
5690 return (error);
5691
5692 /*
5693 * The evacuation succeeded. Remove any remaining MOS metadata
5694 * associated with this vdev, and wait for these changes to sync.
5695 */
5696 ASSERT0(vd->vdev_stat.vs_alloc);
5697 txg = spa_vdev_config_enter(spa);
5698 vd->vdev_removing = B_TRUE;
5699 vdev_dirty_leaves(vd, VDD_DTL, txg);
5700 vdev_config_dirty(vd);
5701 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5702
5703 return (0);
5704}
5705
5706/*
5707 * Complete the removal by cleaning up the namespace.
5708 */
5709static void
5710spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5711{
5712 vdev_t *rvd = spa->spa_root_vdev;
5713 uint64_t id = vd->vdev_id;
5714 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5715
5716 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5717 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5718 ASSERT(vd == vd->vdev_top);
5719
5720 /*
5721 * Only remove any devices which are empty.
5722 */
5723 if (vd->vdev_stat.vs_alloc != 0)
5724 return;
5725
5726 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5727
5728 if (list_link_active(&vd->vdev_state_dirty_node))
5729 vdev_state_clean(vd);
5730 if (list_link_active(&vd->vdev_config_dirty_node))
5731 vdev_config_clean(vd);
5732
5733 vdev_free(vd);
5734
5735 if (last_vdev) {
5736 vdev_compact_children(rvd);
5737 } else {
5738 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5739 vdev_add_child(rvd, vd);
5740 }
5741 vdev_config_dirty(rvd);
5742
5743 /*
5744 * Reassess the health of our root vdev.
5745 */
5746 vdev_reopen(rvd);
5747}
5748
5749/*
5750 * Remove a device from the pool -
5751 *
5752 * Removing a device from the vdev namespace requires several steps
5753 * and can take a significant amount of time. As a result we use
5754 * the spa_vdev_config_[enter/exit] functions which allow us to
5755 * grab and release the spa_config_lock while still holding the namespace
5756 * lock. During each step the configuration is synced out.
5757 *
5758 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5759 * devices.
5760 */
5761int
5762spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5763{
5764 vdev_t *vd;
5765 sysevent_t *ev = NULL;
5766 metaslab_group_t *mg;
5767 nvlist_t **spares, **l2cache, *nv;
5768 uint64_t txg = 0;
5769 uint_t nspares, nl2cache;
5770 int error = 0;
5771 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5772
5773 ASSERT(spa_writeable(spa));
5774
5775 if (!locked)
5776 txg = spa_vdev_enter(spa);
5777
5778 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5779
5780 if (spa->spa_spares.sav_vdevs != NULL &&
5781 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5782 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5783 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5784 /*
5785 * Only remove the hot spare if it's not currently in use
5786 * in this pool.
5787 */
5788 if (vd == NULL || unspare) {
5789 if (vd == NULL)
5790 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5791 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5792 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5793 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5794 spa_load_spares(spa);
5795 spa->spa_spares.sav_sync = B_TRUE;
5796 } else {
5797 error = SET_ERROR(EBUSY);
5798 }
5799 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
5800 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5801 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5802 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5803 /*
5804 * Cache devices can always be removed.
5805 */
5806 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5807 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5808 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5809 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5810 spa_load_l2cache(spa);
5811 spa->spa_l2cache.sav_sync = B_TRUE;
5812 } else if (vd != NULL && vd->vdev_islog) {
5813 ASSERT(!locked);
5814 ASSERT(vd == vd->vdev_top);
5815
5816 mg = vd->vdev_mg;
5817
5818 /*
5819 * Stop allocating from this vdev.
5820 */
5821 metaslab_group_passivate(mg);
5822
5823 /*
5824 * Wait for the youngest allocations and frees to sync,
5825 * and then wait for the deferral of those frees to finish.
5826 */
5827 spa_vdev_config_exit(spa, NULL,
5828 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5829
5830 /*
5831 * Attempt to evacuate the vdev.
5832 */
5833 error = spa_vdev_remove_evacuate(spa, vd);
5834
5835 txg = spa_vdev_config_enter(spa);
5836
5837 /*
5838 * If we couldn't evacuate the vdev, unwind.
5839 */
5840 if (error) {
5841 metaslab_group_activate(mg);
5842 return (spa_vdev_exit(spa, NULL, txg, error));
5843 }
5844
5845 /*
5846 * Clean up the vdev namespace.
5847 */
5848 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_DEV);
5849 spa_vdev_remove_from_namespace(spa, vd);
5850
5851 } else if (vd != NULL) {
5852 /*
5853 * Normal vdevs cannot be removed (yet).
5854 */
5855 error = SET_ERROR(ENOTSUP);
5856 } else {
5857 /*
5858 * There is no vdev of any kind with the specified guid.
5859 */
5860 error = SET_ERROR(ENOENT);
5861 }
5862
5863 if (!locked)
5864 error = spa_vdev_exit(spa, NULL, txg, error);
5865
5866 if (ev)
5867 spa_event_post(ev);
5868
5869 return (error);
5870}
5871
5872/*
5873 * Find any device that's done replacing, or a vdev marked 'unspare' that's
5874 * currently spared, so we can detach it.
5875 */
5876static vdev_t *
5877spa_vdev_resilver_done_hunt(vdev_t *vd)
5878{
5879 vdev_t *newvd, *oldvd;
5880
5881 for (int c = 0; c < vd->vdev_children; c++) {
5882 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5883 if (oldvd != NULL)
5884 return (oldvd);
5885 }
5886
5887 /*
5888 * Check for a completed replacement. We always consider the first
5889 * vdev in the list to be the oldest vdev, and the last one to be
5890 * the newest (see spa_vdev_attach() for how that works). In
5891 * the case where the newest vdev is faulted, we will not automatically
5892 * remove it after a resilver completes. This is OK as it will require
5893 * user intervention to determine which disk the admin wishes to keep.
5894 */
5895 if (vd->vdev_ops == &vdev_replacing_ops) {
5896 ASSERT(vd->vdev_children > 1);
5897
5898 newvd = vd->vdev_child[vd->vdev_children - 1];
5899 oldvd = vd->vdev_child[0];
5900
5901 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5902 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5903 !vdev_dtl_required(oldvd))
5904 return (oldvd);
5905 }
5906
5907 /*
5908 * Check for a completed resilver with the 'unspare' flag set.
5909 */
5910 if (vd->vdev_ops == &vdev_spare_ops) {
5911 vdev_t *first = vd->vdev_child[0];
5912 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5913
5914 if (last->vdev_unspare) {
5915 oldvd = first;
5916 newvd = last;
5917 } else if (first->vdev_unspare) {
5918 oldvd = last;
5919 newvd = first;
5920 } else {
5921 oldvd = NULL;
5922 }
5923
5924 if (oldvd != NULL &&
5925 vdev_dtl_empty(newvd, DTL_MISSING) &&
5926 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5927 !vdev_dtl_required(oldvd))
5928 return (oldvd);
5929
5930 /*
5931 * If there are more than two spares attached to a disk,
5932 * and those spares are not required, then we want to
5933 * attempt to free them up now so that they can be used
5934 * by other pools. Once we're back down to a single
5935 * disk+spare, we stop removing them.
5936 */
5937 if (vd->vdev_children > 2) {
5938 newvd = vd->vdev_child[1];
5939
5940 if (newvd->vdev_isspare && last->vdev_isspare &&
5941 vdev_dtl_empty(last, DTL_MISSING) &&
5942 vdev_dtl_empty(last, DTL_OUTAGE) &&
5943 !vdev_dtl_required(newvd))
5944 return (newvd);
5945 }
5946 }
5947
5948 return (NULL);
5949}
5950
5951static void
5952spa_vdev_resilver_done(spa_t *spa)
5953{
5954 vdev_t *vd, *pvd, *ppvd;
5955 uint64_t guid, sguid, pguid, ppguid;
5956
5957 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5958
5959 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5960 pvd = vd->vdev_parent;
5961 ppvd = pvd->vdev_parent;
5962 guid = vd->vdev_guid;
5963 pguid = pvd->vdev_guid;
5964 ppguid = ppvd->vdev_guid;
5965 sguid = 0;
5966 /*
5967 * If we have just finished replacing a hot spared device, then
5968 * we need to detach the parent's first child (the original hot
5969 * spare) as well.
5970 */
5971 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5972 ppvd->vdev_children == 2) {
5973 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5974 sguid = ppvd->vdev_child[1]->vdev_guid;
5975 }
5976 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5977
5978 spa_config_exit(spa, SCL_ALL, FTAG);
5979 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5980 return;
5981 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5982 return;
5983 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5984 }
5985
5986 spa_config_exit(spa, SCL_ALL, FTAG);
5987}
5988
5989/*
5990 * Update the stored path or FRU for this vdev.
5991 */
5992int
5993spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5994 boolean_t ispath)
5995{
5996 vdev_t *vd;
5997 boolean_t sync = B_FALSE;
5998
5999 ASSERT(spa_writeable(spa));
6000
6001 spa_vdev_state_enter(spa, SCL_ALL);
6002
6003 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
6004 return (spa_vdev_state_exit(spa, NULL, ENOENT));
6005
6006 if (!vd->vdev_ops->vdev_op_leaf)
6007 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
6008
6009 if (ispath) {
6010 if (strcmp(value, vd->vdev_path) != 0) {
6011 spa_strfree(vd->vdev_path);
6012 vd->vdev_path = spa_strdup(value);
6013 sync = B_TRUE;
6014 }
6015 } else {
6016 if (vd->vdev_fru == NULL) {
6017 vd->vdev_fru = spa_strdup(value);
6018 sync = B_TRUE;
6019 } else if (strcmp(value, vd->vdev_fru) != 0) {
6020 spa_strfree(vd->vdev_fru);
6021 vd->vdev_fru = spa_strdup(value);
6022 sync = B_TRUE;
6023 }
6024 }
6025
6026 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
6027}
6028
6029int
6030spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
6031{
6032 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
6033}
6034
6035int
6036spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
6037{
6038 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
6039}
6040
6041/*
6042 * ==========================================================================
6043 * SPA Scanning
6044 * ==========================================================================
6045 */
6046
6047int
6048spa_scan_stop(spa_t *spa)
6049{
6050 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6051 if (dsl_scan_resilvering(spa->spa_dsl_pool))
6052 return (SET_ERROR(EBUSY));
6053 return (dsl_scan_cancel(spa->spa_dsl_pool));
6054}
6055
6056int
6057spa_scan(spa_t *spa, pool_scan_func_t func)
6058{
6059 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6060
6061 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
6062 return (SET_ERROR(ENOTSUP));
6063
6064 /*
6065 * If a resilver was requested, but there is no DTL on a
6066 * writeable leaf device, we have nothing to do.
6067 */
6068 if (func == POOL_SCAN_RESILVER &&
6069 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6070 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
6071 return (0);
6072 }
6073
6074 return (dsl_scan(spa->spa_dsl_pool, func));
6075}
6076
6077/*
6078 * ==========================================================================
6079 * SPA async task processing
6080 * ==========================================================================
6081 */
6082
6083static void
6084spa_async_remove(spa_t *spa, vdev_t *vd)
6085{
6086 if (vd->vdev_remove_wanted) {
6087 vd->vdev_remove_wanted = B_FALSE;
6088 vd->vdev_delayed_close = B_FALSE;
6089 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
6090
6091 /*
6092 * We want to clear the stats, but we don't want to do a full
6093 * vdev_clear() as that will cause us to throw away
6094 * degraded/faulted state as well as attempt to reopen the
6095 * device, all of which is a waste.
6096 */
6097 vd->vdev_stat.vs_read_errors = 0;
6098 vd->vdev_stat.vs_write_errors = 0;
6099 vd->vdev_stat.vs_checksum_errors = 0;
6100
6101 vdev_state_dirty(vd->vdev_top);
6102 /* Tell userspace that the vdev is gone. */
6103 zfs_post_remove(spa, vd);
6104 }
6105
6106 for (int c = 0; c < vd->vdev_children; c++)
6107 spa_async_remove(spa, vd->vdev_child[c]);
6108}
6109
6110static void
6111spa_async_probe(spa_t *spa, vdev_t *vd)
6112{
6113 if (vd->vdev_probe_wanted) {
6114 vd->vdev_probe_wanted = B_FALSE;
6115 vdev_reopen(vd); /* vdev_open() does the actual probe */
6116 }
6117
6118 for (int c = 0; c < vd->vdev_children; c++)
6119 spa_async_probe(spa, vd->vdev_child[c]);
6120}
6121
6122static void
6123spa_async_autoexpand(spa_t *spa, vdev_t *vd)
6124{
6125 sysevent_id_t eid;
6126 nvlist_t *attr;
6127 char *physpath;
6128
6129 if (!spa->spa_autoexpand)
6130 return;
6131
6132 for (int c = 0; c < vd->vdev_children; c++) {
6133 vdev_t *cvd = vd->vdev_child[c];
6134 spa_async_autoexpand(spa, cvd);
6135 }
6136
6137 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
6138 return;
6139
6140 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6141 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
6142
6143 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6144 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
6145
6146 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
6147 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP);
6148
6149 nvlist_free(attr);
6150 kmem_free(physpath, MAXPATHLEN);
6151}
6152
6153static void
6154spa_async_thread(void *arg)
6155{
6156 spa_t *spa = arg;
6157 int tasks;
6158
6159 ASSERT(spa->spa_sync_on);
6160
6161 mutex_enter(&spa->spa_async_lock);
6162 tasks = spa->spa_async_tasks;
6163 spa->spa_async_tasks &= SPA_ASYNC_REMOVE;
6164 mutex_exit(&spa->spa_async_lock);
6165
6166 /*
6167 * See if the config needs to be updated.
6168 */
6169 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
6170 uint64_t old_space, new_space;
6171
6172 mutex_enter(&spa_namespace_lock);
6173 old_space = metaslab_class_get_space(spa_normal_class(spa));
6174 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6175 new_space = metaslab_class_get_space(spa_normal_class(spa));
6176 mutex_exit(&spa_namespace_lock);
6177
6178 /*
6179 * If the pool grew as a result of the config update,
6180 * then log an internal history event.
6181 */
6182 if (new_space != old_space) {
6183 spa_history_log_internal(spa, "vdev online", NULL,
6184 "pool '%s' size: %llu(+%llu)",
6185 spa_name(spa), new_space, new_space - old_space);
6186 }
6187 }
6188
6189 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
6190 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6191 spa_async_autoexpand(spa, spa->spa_root_vdev);
6192 spa_config_exit(spa, SCL_CONFIG, FTAG);
6193 }
6194
6195 /*
6196 * See if any devices need to be probed.
6197 */
6198 if (tasks & SPA_ASYNC_PROBE) {
6199 spa_vdev_state_enter(spa, SCL_NONE);
6200 spa_async_probe(spa, spa->spa_root_vdev);
6201 (void) spa_vdev_state_exit(spa, NULL, 0);
6202 }
6203
6204 /*
6205 * If any devices are done replacing, detach them.
6206 */
6207 if (tasks & SPA_ASYNC_RESILVER_DONE)
6208 spa_vdev_resilver_done(spa);
6209
6210 /*
6211 * Kick off a resilver.
6212 */
6213 if (tasks & SPA_ASYNC_RESILVER)
6214 dsl_resilver_restart(spa->spa_dsl_pool, 0);
6215
6216 /*
6217 * Let the world know that we're done.
6218 */
6219 mutex_enter(&spa->spa_async_lock);
6220 spa->spa_async_thread = NULL;
6221 cv_broadcast(&spa->spa_async_cv);
6222 mutex_exit(&spa->spa_async_lock);
6223 thread_exit();
6224}
6225
6226static void
6227spa_async_thread_vd(void *arg)
6228{
6229 spa_t *spa = arg;
6230 int tasks;
6231
6232 ASSERT(spa->spa_sync_on);
6233
6234 mutex_enter(&spa->spa_async_lock);
6235 tasks = spa->spa_async_tasks;
6236retry:
6237 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE;
6238 mutex_exit(&spa->spa_async_lock);
6239
6240 /*
6241 * See if any devices need to be marked REMOVED.
6242 */
6243 if (tasks & SPA_ASYNC_REMOVE) {
6244 spa_vdev_state_enter(spa, SCL_NONE);
6245 spa_async_remove(spa, spa->spa_root_vdev);
6246 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
6247 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
6248 for (int i = 0; i < spa->spa_spares.sav_count; i++)
6249 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
6250 (void) spa_vdev_state_exit(spa, NULL, 0);
6251 }
6252
6253 /*
6254 * Let the world know that we're done.
6255 */
6256 mutex_enter(&spa->spa_async_lock);
6257 tasks = spa->spa_async_tasks;
6258 if ((tasks & SPA_ASYNC_REMOVE) != 0)
6259 goto retry;
6260 spa->spa_async_thread_vd = NULL;
6261 cv_broadcast(&spa->spa_async_cv);
6262 mutex_exit(&spa->spa_async_lock);
6263 thread_exit();
6264}
6265
6266void
6267spa_async_suspend(spa_t *spa)
6268{
6269 mutex_enter(&spa->spa_async_lock);
6270 spa->spa_async_suspended++;
6271 while (spa->spa_async_thread != NULL &&
6272 spa->spa_async_thread_vd != NULL)
6273 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
6274 mutex_exit(&spa->spa_async_lock);
6275}
6276
6277void
6278spa_async_resume(spa_t *spa)
6279{
6280 mutex_enter(&spa->spa_async_lock);
6281 ASSERT(spa->spa_async_suspended != 0);
6282 spa->spa_async_suspended--;
6283 mutex_exit(&spa->spa_async_lock);
6284}
6285
6286static boolean_t
6287spa_async_tasks_pending(spa_t *spa)
6288{
6289 uint_t non_config_tasks;
6290 uint_t config_task;
6291 boolean_t config_task_suspended;
6292
6293 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE |
6294 SPA_ASYNC_REMOVE);
6295 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
6296 if (spa->spa_ccw_fail_time == 0) {
6297 config_task_suspended = B_FALSE;
6298 } else {
6299 config_task_suspended =
6300 (gethrtime() - spa->spa_ccw_fail_time) <
6301 (zfs_ccw_retry_interval * NANOSEC);
6302 }
6303
6304 return (non_config_tasks || (config_task && !config_task_suspended));
6305}
6306
6307static void
6308spa_async_dispatch(spa_t *spa)
6309{
6310 mutex_enter(&spa->spa_async_lock);
6311 if (spa_async_tasks_pending(spa) &&
6312 !spa->spa_async_suspended &&
6313 spa->spa_async_thread == NULL &&
6314 rootdir != NULL)
6315 spa->spa_async_thread = thread_create(NULL, 0,
6316 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
6317 mutex_exit(&spa->spa_async_lock);
6318}
6319
6320static void
6321spa_async_dispatch_vd(spa_t *spa)
6322{
6323 mutex_enter(&spa->spa_async_lock);
6324 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 &&
6325 !spa->spa_async_suspended &&
6326 spa->spa_async_thread_vd == NULL &&
6327 rootdir != NULL)
6328 spa->spa_async_thread_vd = thread_create(NULL, 0,
6329 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri);
6330 mutex_exit(&spa->spa_async_lock);
6331}
6332
6333void
6334spa_async_request(spa_t *spa, int task)
6335{
6336 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
6337 mutex_enter(&spa->spa_async_lock);
6338 spa->spa_async_tasks |= task;
6339 mutex_exit(&spa->spa_async_lock);
6340 spa_async_dispatch_vd(spa);
6341}
6342
6343/*
6344 * ==========================================================================
6345 * SPA syncing routines
6346 * ==========================================================================
6347 */
6348
6349static int
6350bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6351{
6352 bpobj_t *bpo = arg;
6353 bpobj_enqueue(bpo, bp, tx);
6354 return (0);
6355}
6356
6357static int
6358spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6359{
6360 zio_t *zio = arg;
6361
6362 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
6363 BP_GET_PSIZE(bp), zio->io_flags));
6364 return (0);
6365}
6366
6367/*
6368 * Note: this simple function is not inlined to make it easier to dtrace the
6369 * amount of time spent syncing frees.
6370 */
6371static void
6372spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
6373{
6374 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6375 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
6376 VERIFY(zio_wait(zio) == 0);
6377}
6378
6379/*
6380 * Note: this simple function is not inlined to make it easier to dtrace the
6381 * amount of time spent syncing deferred frees.
6382 */
6383static void
6384spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
6385{
6386 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6387 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
6388 spa_free_sync_cb, zio, tx), ==, 0);
6389 VERIFY0(zio_wait(zio));
6390}
6391
6392
6393static void
6394spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
6395{
6396 char *packed = NULL;
6397 size_t bufsize;
6398 size_t nvsize = 0;
6399 dmu_buf_t *db;
6400
6401 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
6402
6403 /*
6404 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
6405 * information. This avoids the dmu_buf_will_dirty() path and
6406 * saves us a pre-read to get data we don't actually care about.
6407 */
6408 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
6409 packed = kmem_alloc(bufsize, KM_SLEEP);
6410
6411 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
6412 KM_SLEEP) == 0);
6413 bzero(packed + nvsize, bufsize - nvsize);
6414
6415 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
6416
6417 kmem_free(packed, bufsize);
6418
6419 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
6420 dmu_buf_will_dirty(db, tx);
6421 *(uint64_t *)db->db_data = nvsize;
6422 dmu_buf_rele(db, FTAG);
6423}
6424
6425static void
6426spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
6427 const char *config, const char *entry)
6428{
6429 nvlist_t *nvroot;
6430 nvlist_t **list;
6431 int i;
6432
6433 if (!sav->sav_sync)
6434 return;
6435
6436 /*
6437 * Update the MOS nvlist describing the list of available devices.
6438 * spa_validate_aux() will have already made sure this nvlist is
6439 * valid and the vdevs are labeled appropriately.
6440 */
6441 if (sav->sav_object == 0) {
6442 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
6443 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
6444 sizeof (uint64_t), tx);
6445 VERIFY(zap_update(spa->spa_meta_objset,
6446 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
6447 &sav->sav_object, tx) == 0);
6448 }
6449
6450 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6451 if (sav->sav_count == 0) {
6452 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
6453 } else {
6454 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
6455 for (i = 0; i < sav->sav_count; i++)
6456 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
6457 B_FALSE, VDEV_CONFIG_L2CACHE);
6458 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6459 sav->sav_count) == 0);
6460 for (i = 0; i < sav->sav_count; i++)
6461 nvlist_free(list[i]);
6462 kmem_free(list, sav->sav_count * sizeof (void *));
6463 }
6464
6465 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6466 nvlist_free(nvroot);
6467
6468 sav->sav_sync = B_FALSE;
6469}
6470
6471/*
6472 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
6473 * The all-vdev ZAP must be empty.
6474 */
6475static void
6476spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
6477{
6478 spa_t *spa = vd->vdev_spa;
6479 if (vd->vdev_top_zap != 0) {
6480 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6481 vd->vdev_top_zap, tx));
6482 }
6483 if (vd->vdev_leaf_zap != 0) {
6484 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6485 vd->vdev_leaf_zap, tx));
6486 }
6487 for (uint64_t i = 0; i < vd->vdev_children; i++) {
6488 spa_avz_build(vd->vdev_child[i], avz, tx);
6489 }
6490}
6491
6492static void
6493spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6494{
6495 nvlist_t *config;
6496
6497 /*
6498 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
6499 * its config may not be dirty but we still need to build per-vdev ZAPs.
6500 * Similarly, if the pool is being assembled (e.g. after a split), we
6501 * need to rebuild the AVZ although the config may not be dirty.
6502 */
6503 if (list_is_empty(&spa->spa_config_dirty_list) &&
6504 spa->spa_avz_action == AVZ_ACTION_NONE)
6505 return;
6506
6507 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6508
6509 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
6510 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
6511 spa->spa_all_vdev_zaps != 0);
6512
6513 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
6514 /* Make and build the new AVZ */
6515 uint64_t new_avz = zap_create(spa->spa_meta_objset,
6516 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
6517 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
6518
6519 /* Diff old AVZ with new one */
6520 zap_cursor_t zc;
6521 zap_attribute_t za;
6522
6523 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6524 spa->spa_all_vdev_zaps);
6525 zap_cursor_retrieve(&zc, &za) == 0;
6526 zap_cursor_advance(&zc)) {
6527 uint64_t vdzap = za.za_first_integer;
6528 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
6529 vdzap) == ENOENT) {
6530 /*
6531 * ZAP is listed in old AVZ but not in new one;
6532 * destroy it
6533 */
6534 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
6535 tx));
6536 }
6537 }
6538
6539 zap_cursor_fini(&zc);
6540
6541 /* Destroy the old AVZ */
6542 VERIFY0(zap_destroy(spa->spa_meta_objset,
6543 spa->spa_all_vdev_zaps, tx));
6544
6545 /* Replace the old AVZ in the dir obj with the new one */
6546 VERIFY0(zap_update(spa->spa_meta_objset,
6547 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
6548 sizeof (new_avz), 1, &new_avz, tx));
6549
6550 spa->spa_all_vdev_zaps = new_avz;
6551 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
6552 zap_cursor_t zc;
6553 zap_attribute_t za;
6554
6555 /* Walk through the AVZ and destroy all listed ZAPs */
6556 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6557 spa->spa_all_vdev_zaps);
6558 zap_cursor_retrieve(&zc, &za) == 0;
6559 zap_cursor_advance(&zc)) {
6560 uint64_t zap = za.za_first_integer;
6561 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
6562 }
6563
6564 zap_cursor_fini(&zc);
6565
6566 /* Destroy and unlink the AVZ itself */
6567 VERIFY0(zap_destroy(spa->spa_meta_objset,
6568 spa->spa_all_vdev_zaps, tx));
6569 VERIFY0(zap_remove(spa->spa_meta_objset,
6570 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
6571 spa->spa_all_vdev_zaps = 0;
6572 }
6573
6574 if (spa->spa_all_vdev_zaps == 0) {
6575 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
6576 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
6577 DMU_POOL_VDEV_ZAP_MAP, tx);
6578 }
6579 spa->spa_avz_action = AVZ_ACTION_NONE;
6580
6581 /* Create ZAPs for vdevs that don't have them. */
6582 vdev_construct_zaps(spa->spa_root_vdev, tx);
6583
6584 config = spa_config_generate(spa, spa->spa_root_vdev,
6585 dmu_tx_get_txg(tx), B_FALSE);
6586
6587 /*
6588 * If we're upgrading the spa version then make sure that
6589 * the config object gets updated with the correct version.
6590 */
6591 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
6592 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6593 spa->spa_uberblock.ub_version);
6594
6595 spa_config_exit(spa, SCL_STATE, FTAG);
6596
6597 nvlist_free(spa->spa_config_syncing);
6598 spa->spa_config_syncing = config;
6599
6600 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
6601}
6602
6603static void
6604spa_sync_version(void *arg, dmu_tx_t *tx)
6605{
6606 uint64_t *versionp = arg;
6607 uint64_t version = *versionp;
6608 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6609
6610 /*
6611 * Setting the version is special cased when first creating the pool.
6612 */
6613 ASSERT(tx->tx_txg != TXG_INITIAL);
6614
6615 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
6616 ASSERT(version >= spa_version(spa));
6617
6618 spa->spa_uberblock.ub_version = version;
6619 vdev_config_dirty(spa->spa_root_vdev);
6620 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
6621}
6622
6623/*
6624 * Set zpool properties.
6625 */
6626static void
6627spa_sync_props(void *arg, dmu_tx_t *tx)
6628{
6629 nvlist_t *nvp = arg;
6630 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6631 objset_t *mos = spa->spa_meta_objset;
6632 nvpair_t *elem = NULL;
6633
6634 mutex_enter(&spa->spa_props_lock);
6635
6636 while ((elem = nvlist_next_nvpair(nvp, elem))) {
6637 uint64_t intval;
6638 char *strval, *fname;
6639 zpool_prop_t prop;
6640 const char *propname;
6641 zprop_type_t proptype;
6642 spa_feature_t fid;
6643
6644 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
6645 case ZPROP_INVAL:
6646 /*
6647 * We checked this earlier in spa_prop_validate().
6648 */
6649 ASSERT(zpool_prop_feature(nvpair_name(elem)));
6650
6651 fname = strchr(nvpair_name(elem), '@') + 1;
6652 VERIFY0(zfeature_lookup_name(fname, &fid));
6653
6654 spa_feature_enable(spa, fid, tx);
6655 spa_history_log_internal(spa, "set", tx,
6656 "%s=enabled", nvpair_name(elem));
6657 break;
6658
6659 case ZPOOL_PROP_VERSION:
6660 intval = fnvpair_value_uint64(elem);
6661 /*
6662 * The version is synced seperatly before other
6663 * properties and should be correct by now.
6664 */
6665 ASSERT3U(spa_version(spa), >=, intval);
6666 break;
6667
6668 case ZPOOL_PROP_ALTROOT:
6669 /*
6670 * 'altroot' is a non-persistent property. It should
6671 * have been set temporarily at creation or import time.
6672 */
6673 ASSERT(spa->spa_root != NULL);
6674 break;
6675
6676 case ZPOOL_PROP_READONLY:
6677 case ZPOOL_PROP_CACHEFILE:
6678 /*
6679 * 'readonly' and 'cachefile' are also non-persisitent
6680 * properties.
6681 */
6682 break;
6683 case ZPOOL_PROP_COMMENT:
6684 strval = fnvpair_value_string(elem);
6685 if (spa->spa_comment != NULL)
6686 spa_strfree(spa->spa_comment);
6687 spa->spa_comment = spa_strdup(strval);
6688 /*
6689 * We need to dirty the configuration on all the vdevs
6690 * so that their labels get updated. It's unnecessary
6691 * to do this for pool creation since the vdev's
6692 * configuratoin has already been dirtied.
6693 */
6694 if (tx->tx_txg != TXG_INITIAL)
6695 vdev_config_dirty(spa->spa_root_vdev);
6696 spa_history_log_internal(spa, "set", tx,
6697 "%s=%s", nvpair_name(elem), strval);
6698 break;
6699 default:
6700 /*
6701 * Set pool property values in the poolprops mos object.
6702 */
6703 if (spa->spa_pool_props_object == 0) {
6704 spa->spa_pool_props_object =
6705 zap_create_link(mos, DMU_OT_POOL_PROPS,
6706 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6707 tx);
6708 }
6709
6710 /* normalize the property name */
6711 propname = zpool_prop_to_name(prop);
6712 proptype = zpool_prop_get_type(prop);
6713
6714 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6715 ASSERT(proptype == PROP_TYPE_STRING);
6716 strval = fnvpair_value_string(elem);
6717 VERIFY0(zap_update(mos,
6718 spa->spa_pool_props_object, propname,
6719 1, strlen(strval) + 1, strval, tx));
6720 spa_history_log_internal(spa, "set", tx,
6721 "%s=%s", nvpair_name(elem), strval);
6722 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6723 intval = fnvpair_value_uint64(elem);
6724
6725 if (proptype == PROP_TYPE_INDEX) {
6726 const char *unused;
6727 VERIFY0(zpool_prop_index_to_string(
6728 prop, intval, &unused));
6729 }
6730 VERIFY0(zap_update(mos,
6731 spa->spa_pool_props_object, propname,
6732 8, 1, &intval, tx));
6733 spa_history_log_internal(spa, "set", tx,
6734 "%s=%lld", nvpair_name(elem), intval);
6735 } else {
6736 ASSERT(0); /* not allowed */
6737 }
6738
6739 switch (prop) {
6740 case ZPOOL_PROP_DELEGATION:
6741 spa->spa_delegation = intval;
6742 break;
6743 case ZPOOL_PROP_BOOTFS:
6744 spa->spa_bootfs = intval;
6745 break;
6746 case ZPOOL_PROP_FAILUREMODE:
6747 spa->spa_failmode = intval;
6748 break;
6749 case ZPOOL_PROP_AUTOEXPAND:
6750 spa->spa_autoexpand = intval;
6751 if (tx->tx_txg != TXG_INITIAL)
6752 spa_async_request(spa,
6753 SPA_ASYNC_AUTOEXPAND);
6754 break;
6755 case ZPOOL_PROP_DEDUPDITTO:
6756 spa->spa_dedup_ditto = intval;
6757 break;
6758 default:
6759 break;
6760 }
6761 }
6762
6763 }
6764
6765 mutex_exit(&spa->spa_props_lock);
6766}
6767
6768/*
6769 * Perform one-time upgrade on-disk changes. spa_version() does not
6770 * reflect the new version this txg, so there must be no changes this
6771 * txg to anything that the upgrade code depends on after it executes.
6772 * Therefore this must be called after dsl_pool_sync() does the sync
6773 * tasks.
6774 */
6775static void
6776spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6777{
6778 dsl_pool_t *dp = spa->spa_dsl_pool;
6779
6780 ASSERT(spa->spa_sync_pass == 1);
6781
6782 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6783
6784 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6785 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6786 dsl_pool_create_origin(dp, tx);
6787
6788 /* Keeping the origin open increases spa_minref */
6789 spa->spa_minref += 3;
6790 }
6791
6792 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6793 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6794 dsl_pool_upgrade_clones(dp, tx);
6795 }
6796
6797 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6798 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6799 dsl_pool_upgrade_dir_clones(dp, tx);
6800
6801 /* Keeping the freedir open increases spa_minref */
6802 spa->spa_minref += 3;
6803 }
6804
6805 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6806 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6807 spa_feature_create_zap_objects(spa, tx);
6808 }
6809
6810 /*
6811 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6812 * when possibility to use lz4 compression for metadata was added
6813 * Old pools that have this feature enabled must be upgraded to have
6814 * this feature active
6815 */
6816 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6817 boolean_t lz4_en = spa_feature_is_enabled(spa,
6818 SPA_FEATURE_LZ4_COMPRESS);
6819 boolean_t lz4_ac = spa_feature_is_active(spa,
6820 SPA_FEATURE_LZ4_COMPRESS);
6821
6822 if (lz4_en && !lz4_ac)
6823 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6824 }
6825
6826 /*
6827 * If we haven't written the salt, do so now. Note that the
6828 * feature may not be activated yet, but that's fine since
6829 * the presence of this ZAP entry is backwards compatible.
6830 */
6831 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
6832 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
6833 VERIFY0(zap_add(spa->spa_meta_objset,
6834 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
6835 sizeof (spa->spa_cksum_salt.zcs_bytes),
6836 spa->spa_cksum_salt.zcs_bytes, tx));
6837 }
6838
6839 rrw_exit(&dp->dp_config_rwlock, FTAG);
6840}
6841
6842/*
6843 * Sync the specified transaction group. New blocks may be dirtied as
6844 * part of the process, so we iterate until it converges.
6845 */
6846void
6847spa_sync(spa_t *spa, uint64_t txg)
6848{
6849 dsl_pool_t *dp = spa->spa_dsl_pool;
6850 objset_t *mos = spa->spa_meta_objset;
6851 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
6852 vdev_t *rvd = spa->spa_root_vdev;
6853 vdev_t *vd;
6854 dmu_tx_t *tx;
6855 int error;
6856 uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
6857 zfs_vdev_queue_depth_pct / 100;
6858
6859 VERIFY(spa_writeable(spa));
6860
6861 /*
6862 * Lock out configuration changes.
6863 */
6864 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6865
6866 spa->spa_syncing_txg = txg;
6867 spa->spa_sync_pass = 0;
6868
6869 mutex_enter(&spa->spa_alloc_lock);
6870 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
6871 mutex_exit(&spa->spa_alloc_lock);
6872
6873 /*
6874 * If there are any pending vdev state changes, convert them
6875 * into config changes that go out with this transaction group.
6876 */
6877 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6878 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6879 /*
6880 * We need the write lock here because, for aux vdevs,
6881 * calling vdev_config_dirty() modifies sav_config.
6882 * This is ugly and will become unnecessary when we
6883 * eliminate the aux vdev wart by integrating all vdevs
6884 * into the root vdev tree.
6885 */
6886 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6887 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6888 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6889 vdev_state_clean(vd);
6890 vdev_config_dirty(vd);
6891 }
6892 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6893 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6894 }
6895 spa_config_exit(spa, SCL_STATE, FTAG);
6896
6897 tx = dmu_tx_create_assigned(dp, txg);
6898
6899 spa->spa_sync_starttime = gethrtime();
6900#ifdef illumos
6901 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
6902 spa->spa_sync_starttime + spa->spa_deadman_synctime));
6903#else /* !illumos */
6904#ifdef _KERNEL
6905 callout_schedule(&spa->spa_deadman_cycid,
6906 hz * spa->spa_deadman_synctime / NANOSEC);
6907#endif
6908#endif /* illumos */
6909
6910 /*
6911 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6912 * set spa_deflate if we have no raid-z vdevs.
6913 */
6914 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6915 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6916 int i;
6917
6918 for (i = 0; i < rvd->vdev_children; i++) {
6919 vd = rvd->vdev_child[i];
6920 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6921 break;
6922 }
6923 if (i == rvd->vdev_children) {
6924 spa->spa_deflate = TRUE;
6925 VERIFY(0 == zap_add(spa->spa_meta_objset,
6926 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6927 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6928 }
6929 }
6930
6931 /*
6932 * Set the top-level vdev's max queue depth. Evaluate each
6933 * top-level's async write queue depth in case it changed.
6934 * The max queue depth will not change in the middle of syncing
6935 * out this txg.
6936 */
6937 uint64_t queue_depth_total = 0;
6938 for (int c = 0; c < rvd->vdev_children; c++) {
6939 vdev_t *tvd = rvd->vdev_child[c];
6940 metaslab_group_t *mg = tvd->vdev_mg;
6941
6942 if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
6943 !metaslab_group_initialized(mg))
6944 continue;
6945
6946 /*
6947 * It is safe to do a lock-free check here because only async
6948 * allocations look at mg_max_alloc_queue_depth, and async
6949 * allocations all happen from spa_sync().
6950 */
6951 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
6952 mg->mg_max_alloc_queue_depth = max_queue_depth;
6953 queue_depth_total += mg->mg_max_alloc_queue_depth;
6954 }
6955 metaslab_class_t *mc = spa_normal_class(spa);
6956 ASSERT0(refcount_count(&mc->mc_alloc_slots));
6957 mc->mc_alloc_max_slots = queue_depth_total;
6958 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
6959
6960 ASSERT3U(mc->mc_alloc_max_slots, <=,
6961 max_queue_depth * rvd->vdev_children);
6962
6963 /*
6964 * Iterate to convergence.
6965 */
6966 do {
6967 int pass = ++spa->spa_sync_pass;
6968
6969 spa_sync_config_object(spa, tx);
6970 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6971 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6972 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6973 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6974 spa_errlog_sync(spa, txg);
6975 dsl_pool_sync(dp, txg);
6976
6977 if (pass < zfs_sync_pass_deferred_free) {
6978 spa_sync_frees(spa, free_bpl, tx);
6979 } else {
6980 /*
6981 * We can not defer frees in pass 1, because
6982 * we sync the deferred frees later in pass 1.
6983 */
6984 ASSERT3U(pass, >, 1);
6985 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6986 &spa->spa_deferred_bpobj, tx);
6987 }
6988
6989 ddt_sync(spa, txg);
6990 dsl_scan_sync(dp, tx);
6991
6992 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
6993 vdev_sync(vd, txg);
6994
6995 if (pass == 1) {
6996 spa_sync_upgrades(spa, tx);
6997 ASSERT3U(txg, >=,
6998 spa->spa_uberblock.ub_rootbp.blk_birth);
6999 /*
7000 * Note: We need to check if the MOS is dirty
7001 * because we could have marked the MOS dirty
7002 * without updating the uberblock (e.g. if we
7003 * have sync tasks but no dirty user data). We
7004 * need to check the uberblock's rootbp because
7005 * it is updated if we have synced out dirty
7006 * data (though in this case the MOS will most
7007 * likely also be dirty due to second order
7008 * effects, we don't want to rely on that here).
7009 */
7010 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
7011 !dmu_objset_is_dirty(mos, txg)) {
7012 /*
7013 * Nothing changed on the first pass,
7014 * therefore this TXG is a no-op. Avoid
7015 * syncing deferred frees, so that we
7016 * can keep this TXG as a no-op.
7017 */
7018 ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
7019 txg));
7020 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7021 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
7022 break;
7023 }
7024 spa_sync_deferred_frees(spa, tx);
7025 }
7026
7027 } while (dmu_objset_is_dirty(mos, txg));
7028
7029 if (!list_is_empty(&spa->spa_config_dirty_list)) {
7030 /*
7031 * Make sure that the number of ZAPs for all the vdevs matches
7032 * the number of ZAPs in the per-vdev ZAP list. This only gets
7033 * called if the config is dirty; otherwise there may be
7034 * outstanding AVZ operations that weren't completed in
7035 * spa_sync_config_object.
7036 */
7037 uint64_t all_vdev_zap_entry_count;
7038 ASSERT0(zap_count(spa->spa_meta_objset,
7039 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
7040 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
7041 all_vdev_zap_entry_count);
7042 }
7043
7044 /*
7045 * Rewrite the vdev configuration (which includes the uberblock)
7046 * to commit the transaction group.
7047 *
7048 * If there are no dirty vdevs, we sync the uberblock to a few
7049 * random top-level vdevs that are known to be visible in the
7050 * config cache (see spa_vdev_add() for a complete description).
7051 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
7052 */
7053 for (;;) {
7054 /*
7055 * We hold SCL_STATE to prevent vdev open/close/etc.
7056 * while we're attempting to write the vdev labels.
7057 */
7058 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7059
7060 if (list_is_empty(&spa->spa_config_dirty_list)) {
7061 vdev_t *svd[SPA_DVAS_PER_BP];
7062 int svdcount = 0;
7063 int children = rvd->vdev_children;
7064 int c0 = spa_get_random(children);
7065
7066 for (int c = 0; c < children; c++) {
7067 vd = rvd->vdev_child[(c0 + c) % children];
7068 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
7069 continue;
7070 svd[svdcount++] = vd;
7071 if (svdcount == SPA_DVAS_PER_BP)
7072 break;
7073 }
7074 error = vdev_config_sync(svd, svdcount, txg);
7075 } else {
7076 error = vdev_config_sync(rvd->vdev_child,
7077 rvd->vdev_children, txg);
7078 }
7079
7080 if (error == 0)
7081 spa->spa_last_synced_guid = rvd->vdev_guid;
7082
7083 spa_config_exit(spa, SCL_STATE, FTAG);
7084
7085 if (error == 0)
7086 break;
7087 zio_suspend(spa, NULL);
7088 zio_resume_wait(spa);
7089 }
7090 dmu_tx_commit(tx);
7091
7092#ifdef illumos
7093 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
7094#else /* !illumos */
7095#ifdef _KERNEL
7096 callout_drain(&spa->spa_deadman_cycid);
7097#endif
7098#endif /* illumos */
7099
7100 /*
7101 * Clear the dirty config list.
7102 */
7103 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
7104 vdev_config_clean(vd);
7105
7106 /*
7107 * Now that the new config has synced transactionally,
7108 * let it become visible to the config cache.
7109 */
7110 if (spa->spa_config_syncing != NULL) {
7111 spa_config_set(spa, spa->spa_config_syncing);
7112 spa->spa_config_txg = txg;
7113 spa->spa_config_syncing = NULL;
7114 }
7115
7116 dsl_pool_sync_done(dp, txg);
7117
7118 mutex_enter(&spa->spa_alloc_lock);
7119 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7120 mutex_exit(&spa->spa_alloc_lock);
7121
7122 /*
7123 * Update usable space statistics.
7124 */
7125 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
7126 vdev_sync_done(vd, txg);
7127
7128 spa_update_dspace(spa);
7129
7130 /*
7131 * It had better be the case that we didn't dirty anything
7132 * since vdev_config_sync().
7133 */
7134 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
7135 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7136 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
7137
7138 spa->spa_sync_pass = 0;
7139
7140 /*
7141 * Update the last synced uberblock here. We want to do this at
7142 * the end of spa_sync() so that consumers of spa_last_synced_txg()
7143 * will be guaranteed that all the processing associated with
7144 * that txg has been completed.
7145 */
7146 spa->spa_ubsync = spa->spa_uberblock;
7147 spa_config_exit(spa, SCL_CONFIG, FTAG);
7148
7149 spa_handle_ignored_writes(spa);
7150
7151 /*
7152 * If any async tasks have been requested, kick them off.
7153 */
7154 spa_async_dispatch(spa);
7155 spa_async_dispatch_vd(spa);
7156}
7157
7158/*
7159 * Sync all pools. We don't want to hold the namespace lock across these
7160 * operations, so we take a reference on the spa_t and drop the lock during the
7161 * sync.
7162 */
7163void
7164spa_sync_allpools(void)
7165{
7166 spa_t *spa = NULL;
7167 mutex_enter(&spa_namespace_lock);
7168 while ((spa = spa_next(spa)) != NULL) {
7169 if (spa_state(spa) != POOL_STATE_ACTIVE ||
7170 !spa_writeable(spa) || spa_suspended(spa))
7171 continue;
7172 spa_open_ref(spa, FTAG);
7173 mutex_exit(&spa_namespace_lock);
7174 txg_wait_synced(spa_get_dsl(spa), 0);
7175 mutex_enter(&spa_namespace_lock);
7176 spa_close(spa, FTAG);
7177 }
7178 mutex_exit(&spa_namespace_lock);
7179}
7180
7181/*
7182 * ==========================================================================
7183 * Miscellaneous routines
7184 * ==========================================================================
7185 */
7186
7187/*
7188 * Remove all pools in the system.
7189 */
7190void
7191spa_evict_all(void)
7192{
7193 spa_t *spa;
7194
7195 /*
7196 * Remove all cached state. All pools should be closed now,
7197 * so every spa in the AVL tree should be unreferenced.
7198 */
7199 mutex_enter(&spa_namespace_lock);
7200 while ((spa = spa_next(NULL)) != NULL) {
7201 /*
7202 * Stop async tasks. The async thread may need to detach
7203 * a device that's been replaced, which requires grabbing
7204 * spa_namespace_lock, so we must drop it here.
7205 */
7206 spa_open_ref(spa, FTAG);
7207 mutex_exit(&spa_namespace_lock);
7208 spa_async_suspend(spa);
7209 mutex_enter(&spa_namespace_lock);
7210 spa_close(spa, FTAG);
7211
7212 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7213 spa_unload(spa);
7214 spa_deactivate(spa);
7215 }
7216 spa_remove(spa);
7217 }
7218 mutex_exit(&spa_namespace_lock);
7219}
7220
7221vdev_t *
7222spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
7223{
7224 vdev_t *vd;
7225 int i;
7226
7227 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
7228 return (vd);
7229
7230 if (aux) {
7231 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
7232 vd = spa->spa_l2cache.sav_vdevs[i];
7233 if (vd->vdev_guid == guid)
7234 return (vd);
7235 }
7236
7237 for (i = 0; i < spa->spa_spares.sav_count; i++) {
7238 vd = spa->spa_spares.sav_vdevs[i];
7239 if (vd->vdev_guid == guid)
7240 return (vd);
7241 }
7242 }
7243
7244 return (NULL);
7245}
7246
7247void
7248spa_upgrade(spa_t *spa, uint64_t version)
7249{
7250 ASSERT(spa_writeable(spa));
7251
7252 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7253
7254 /*
7255 * This should only be called for a non-faulted pool, and since a
7256 * future version would result in an unopenable pool, this shouldn't be
7257 * possible.
7258 */
7259 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
7260 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
7261
7262 spa->spa_uberblock.ub_version = version;
7263 vdev_config_dirty(spa->spa_root_vdev);
7264
7265 spa_config_exit(spa, SCL_ALL, FTAG);
7266
7267 txg_wait_synced(spa_get_dsl(spa), 0);
7268}
7269
7270boolean_t
7271spa_has_spare(spa_t *spa, uint64_t guid)
7272{
7273 int i;
7274 uint64_t spareguid;
7275 spa_aux_vdev_t *sav = &spa->spa_spares;
7276
7277 for (i = 0; i < sav->sav_count; i++)
7278 if (sav->sav_vdevs[i]->vdev_guid == guid)
7279 return (B_TRUE);
7280
7281 for (i = 0; i < sav->sav_npending; i++) {
7282 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
7283 &spareguid) == 0 && spareguid == guid)
7284 return (B_TRUE);
7285 }
7286
7287 return (B_FALSE);
7288}
7289
7290/*
7291 * Check if a pool has an active shared spare device.
7292 * Note: reference count of an active spare is 2, as a spare and as a replace
7293 */
7294static boolean_t
7295spa_has_active_shared_spare(spa_t *spa)
7296{
7297 int i, refcnt;
7298 uint64_t pool;
7299 spa_aux_vdev_t *sav = &spa->spa_spares;
7300
7301 for (i = 0; i < sav->sav_count; i++) {
7302 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
7303 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
7304 refcnt > 2)
7305 return (B_TRUE);
7306 }
7307
7308 return (B_FALSE);
7309}
7310
7311static sysevent_t *
7312spa_event_create(spa_t *spa, vdev_t *vd, const char *name)
7313{
7314 sysevent_t *ev = NULL;
7315#ifdef _KERNEL
7316 sysevent_attr_list_t *attr = NULL;
7317 sysevent_value_t value;
7318
7319 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
7320 SE_SLEEP);
7321 ASSERT(ev != NULL);
7322
7323 value.value_type = SE_DATA_TYPE_STRING;
7324 value.value.sv_string = spa_name(spa);
7325 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
7326 goto done;
7327
7328 value.value_type = SE_DATA_TYPE_UINT64;
7329 value.value.sv_uint64 = spa_guid(spa);
7330 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
7331 goto done;
7332
7333 if (vd) {
7334 value.value_type = SE_DATA_TYPE_UINT64;
7335 value.value.sv_uint64 = vd->vdev_guid;
7336 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
7337 SE_SLEEP) != 0)
7338 goto done;
7339
7340 if (vd->vdev_path) {
7341 value.value_type = SE_DATA_TYPE_STRING;
7342 value.value.sv_string = vd->vdev_path;
7343 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
7344 &value, SE_SLEEP) != 0)
7345 goto done;
7346 }
7347 }
7348
7349 if (sysevent_attach_attributes(ev, attr) != 0)
7350 goto done;
7351 attr = NULL;
7352
7353done:
7354 if (attr)
7355 sysevent_free_attr(attr);
7356
7357#endif
7358 return (ev);
7359}
7360
7361static void
7362spa_event_post(sysevent_t *ev)
7363{
7364#ifdef _KERNEL
7365 sysevent_id_t eid;
7366
7367 (void) log_sysevent(ev, SE_SLEEP, &eid);
7368 sysevent_free(ev);
7369#endif
7370}
7371
7372/*
7373 * Post a sysevent corresponding to the given event. The 'name' must be one of
7374 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
7375 * filled in from the spa and (optionally) the vdev. This doesn't do anything
7376 * in the userland libzpool, as we don't want consumers to misinterpret ztest
7377 * or zdb as real changes.
7378 */
7379void
7380spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
7381{
7382 spa_event_post(spa_event_create(spa, vd, name));
7383}
4288 * Remove the existing root pool from the namespace so
4289 * that we can replace it with the correct config
4290 * we just read in.
4291 */
4292 spa_remove(spa);
4293 }
4294 spa = spa_add(pname, config, NULL);
4295
4296 /*
4297 * Set spa_ubsync.ub_version as it can be used in vdev_alloc()
4298 * via spa_version().
4299 */
4300 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
4301 &spa->spa_ubsync.ub_version) != 0)
4302 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
4303 } else if ((spa = spa_lookup(name)) == NULL) {
4304 mutex_exit(&spa_namespace_lock);
4305 nvlist_free(config);
4306 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
4307 name);
4308 return (EIO);
4309 } else {
4310 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0);
4311 }
4312 spa->spa_is_root = B_TRUE;
4313 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4314
4315 /*
4316 * Build up a vdev tree based on the boot device's label config.
4317 */
4318 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4319 &nvtop) == 0);
4320 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4321 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4322 VDEV_ALLOC_ROOTPOOL);
4323 spa_config_exit(spa, SCL_ALL, FTAG);
4324 if (error) {
4325 mutex_exit(&spa_namespace_lock);
4326 nvlist_free(config);
4327 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4328 pname);
4329 return (error);
4330 }
4331
4332 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4333 vdev_free(rvd);
4334 spa_config_exit(spa, SCL_ALL, FTAG);
4335 mutex_exit(&spa_namespace_lock);
4336
4337 nvlist_free(config);
4338 return (0);
4339}
4340
4341#endif /* illumos */
4342#endif /* _KERNEL */
4343
4344/*
4345 * Import a non-root pool into the system.
4346 */
4347int
4348spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
4349{
4350 spa_t *spa;
4351 char *altroot = NULL;
4352 spa_load_state_t state = SPA_LOAD_IMPORT;
4353 zpool_rewind_policy_t policy;
4354 uint64_t mode = spa_mode_global;
4355 uint64_t readonly = B_FALSE;
4356 int error;
4357 nvlist_t *nvroot;
4358 nvlist_t **spares, **l2cache;
4359 uint_t nspares, nl2cache;
4360
4361 /*
4362 * If a pool with this name exists, return failure.
4363 */
4364 mutex_enter(&spa_namespace_lock);
4365 if (spa_lookup(pool) != NULL) {
4366 mutex_exit(&spa_namespace_lock);
4367 return (SET_ERROR(EEXIST));
4368 }
4369
4370 /*
4371 * Create and initialize the spa structure.
4372 */
4373 (void) nvlist_lookup_string(props,
4374 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4375 (void) nvlist_lookup_uint64(props,
4376 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4377 if (readonly)
4378 mode = FREAD;
4379 spa = spa_add(pool, config, altroot);
4380 spa->spa_import_flags = flags;
4381
4382 /*
4383 * Verbatim import - Take a pool and insert it into the namespace
4384 * as if it had been loaded at boot.
4385 */
4386 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
4387 if (props != NULL)
4388 spa_configfile_set(spa, props, B_FALSE);
4389
4390 spa_config_sync(spa, B_FALSE, B_TRUE);
4391 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4392
4393 mutex_exit(&spa_namespace_lock);
4394 return (0);
4395 }
4396
4397 spa_activate(spa, mode);
4398
4399 /*
4400 * Don't start async tasks until we know everything is healthy.
4401 */
4402 spa_async_suspend(spa);
4403
4404 zpool_get_rewind_policy(config, &policy);
4405 if (policy.zrp_request & ZPOOL_DO_REWIND)
4406 state = SPA_LOAD_RECOVER;
4407
4408 /*
4409 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
4410 * because the user-supplied config is actually the one to trust when
4411 * doing an import.
4412 */
4413 if (state != SPA_LOAD_RECOVER)
4414 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4415
4416 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
4417 policy.zrp_request);
4418
4419 /*
4420 * Propagate anything learned while loading the pool and pass it
4421 * back to caller (i.e. rewind info, missing devices, etc).
4422 */
4423 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4424 spa->spa_load_info) == 0);
4425
4426 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4427 /*
4428 * Toss any existing sparelist, as it doesn't have any validity
4429 * anymore, and conflicts with spa_has_spare().
4430 */
4431 if (spa->spa_spares.sav_config) {
4432 nvlist_free(spa->spa_spares.sav_config);
4433 spa->spa_spares.sav_config = NULL;
4434 spa_load_spares(spa);
4435 }
4436 if (spa->spa_l2cache.sav_config) {
4437 nvlist_free(spa->spa_l2cache.sav_config);
4438 spa->spa_l2cache.sav_config = NULL;
4439 spa_load_l2cache(spa);
4440 }
4441
4442 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4443 &nvroot) == 0);
4444 if (error == 0)
4445 error = spa_validate_aux(spa, nvroot, -1ULL,
4446 VDEV_ALLOC_SPARE);
4447 if (error == 0)
4448 error = spa_validate_aux(spa, nvroot, -1ULL,
4449 VDEV_ALLOC_L2CACHE);
4450 spa_config_exit(spa, SCL_ALL, FTAG);
4451
4452 if (props != NULL)
4453 spa_configfile_set(spa, props, B_FALSE);
4454
4455 if (error != 0 || (props && spa_writeable(spa) &&
4456 (error = spa_prop_set(spa, props)))) {
4457 spa_unload(spa);
4458 spa_deactivate(spa);
4459 spa_remove(spa);
4460 mutex_exit(&spa_namespace_lock);
4461 return (error);
4462 }
4463
4464 spa_async_resume(spa);
4465
4466 /*
4467 * Override any spares and level 2 cache devices as specified by
4468 * the user, as these may have correct device names/devids, etc.
4469 */
4470 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4471 &spares, &nspares) == 0) {
4472 if (spa->spa_spares.sav_config)
4473 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4474 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4475 else
4476 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
4477 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4478 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4479 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4480 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4481 spa_load_spares(spa);
4482 spa_config_exit(spa, SCL_ALL, FTAG);
4483 spa->spa_spares.sav_sync = B_TRUE;
4484 }
4485 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4486 &l2cache, &nl2cache) == 0) {
4487 if (spa->spa_l2cache.sav_config)
4488 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4489 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4490 else
4491 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4492 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4493 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4494 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4495 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4496 spa_load_l2cache(spa);
4497 spa_config_exit(spa, SCL_ALL, FTAG);
4498 spa->spa_l2cache.sav_sync = B_TRUE;
4499 }
4500
4501 /*
4502 * Check for any removed devices.
4503 */
4504 if (spa->spa_autoreplace) {
4505 spa_aux_check_removed(&spa->spa_spares);
4506 spa_aux_check_removed(&spa->spa_l2cache);
4507 }
4508
4509 if (spa_writeable(spa)) {
4510 /*
4511 * Update the config cache to include the newly-imported pool.
4512 */
4513 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4514 }
4515
4516 /*
4517 * It's possible that the pool was expanded while it was exported.
4518 * We kick off an async task to handle this for us.
4519 */
4520 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4521
4522 spa_history_log_version(spa, "import");
4523
4524 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4525
4526 mutex_exit(&spa_namespace_lock);
4527
4528#ifdef __FreeBSD__
4529#ifdef _KERNEL
4530 zvol_create_minors(pool);
4531#endif
4532#endif
4533 return (0);
4534}
4535
4536nvlist_t *
4537spa_tryimport(nvlist_t *tryconfig)
4538{
4539 nvlist_t *config = NULL;
4540 char *poolname;
4541 spa_t *spa;
4542 uint64_t state;
4543 int error;
4544
4545 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4546 return (NULL);
4547
4548 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4549 return (NULL);
4550
4551 /*
4552 * Create and initialize the spa structure.
4553 */
4554 mutex_enter(&spa_namespace_lock);
4555 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
4556 spa_activate(spa, FREAD);
4557
4558 /*
4559 * Pass off the heavy lifting to spa_load().
4560 * Pass TRUE for mosconfig because the user-supplied config
4561 * is actually the one to trust when doing an import.
4562 */
4563 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
4564
4565 /*
4566 * If 'tryconfig' was at least parsable, return the current config.
4567 */
4568 if (spa->spa_root_vdev != NULL) {
4569 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4570 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4571 poolname) == 0);
4572 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4573 state) == 0);
4574 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4575 spa->spa_uberblock.ub_timestamp) == 0);
4576 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4577 spa->spa_load_info) == 0);
4578
4579 /*
4580 * If the bootfs property exists on this pool then we
4581 * copy it out so that external consumers can tell which
4582 * pools are bootable.
4583 */
4584 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4585 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4586
4587 /*
4588 * We have to play games with the name since the
4589 * pool was opened as TRYIMPORT_NAME.
4590 */
4591 if (dsl_dsobj_to_dsname(spa_name(spa),
4592 spa->spa_bootfs, tmpname) == 0) {
4593 char *cp;
4594 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4595
4596 cp = strchr(tmpname, '/');
4597 if (cp == NULL) {
4598 (void) strlcpy(dsname, tmpname,
4599 MAXPATHLEN);
4600 } else {
4601 (void) snprintf(dsname, MAXPATHLEN,
4602 "%s/%s", poolname, ++cp);
4603 }
4604 VERIFY(nvlist_add_string(config,
4605 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4606 kmem_free(dsname, MAXPATHLEN);
4607 }
4608 kmem_free(tmpname, MAXPATHLEN);
4609 }
4610
4611 /*
4612 * Add the list of hot spares and level 2 cache devices.
4613 */
4614 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4615 spa_add_spares(spa, config);
4616 spa_add_l2cache(spa, config);
4617 spa_config_exit(spa, SCL_CONFIG, FTAG);
4618 }
4619
4620 spa_unload(spa);
4621 spa_deactivate(spa);
4622 spa_remove(spa);
4623 mutex_exit(&spa_namespace_lock);
4624
4625 return (config);
4626}
4627
4628/*
4629 * Pool export/destroy
4630 *
4631 * The act of destroying or exporting a pool is very simple. We make sure there
4632 * is no more pending I/O and any references to the pool are gone. Then, we
4633 * update the pool state and sync all the labels to disk, removing the
4634 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4635 * we don't sync the labels or remove the configuration cache.
4636 */
4637static int
4638spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
4639 boolean_t force, boolean_t hardforce)
4640{
4641 spa_t *spa;
4642
4643 if (oldconfig)
4644 *oldconfig = NULL;
4645
4646 if (!(spa_mode_global & FWRITE))
4647 return (SET_ERROR(EROFS));
4648
4649 mutex_enter(&spa_namespace_lock);
4650 if ((spa = spa_lookup(pool)) == NULL) {
4651 mutex_exit(&spa_namespace_lock);
4652 return (SET_ERROR(ENOENT));
4653 }
4654
4655 /*
4656 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4657 * reacquire the namespace lock, and see if we can export.
4658 */
4659 spa_open_ref(spa, FTAG);
4660 mutex_exit(&spa_namespace_lock);
4661 spa_async_suspend(spa);
4662 mutex_enter(&spa_namespace_lock);
4663 spa_close(spa, FTAG);
4664
4665 /*
4666 * The pool will be in core if it's openable,
4667 * in which case we can modify its state.
4668 */
4669 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4670 /*
4671 * Objsets may be open only because they're dirty, so we
4672 * have to force it to sync before checking spa_refcnt.
4673 */
4674 txg_wait_synced(spa->spa_dsl_pool, 0);
4675 spa_evicting_os_wait(spa);
4676
4677 /*
4678 * A pool cannot be exported or destroyed if there are active
4679 * references. If we are resetting a pool, allow references by
4680 * fault injection handlers.
4681 */
4682 if (!spa_refcount_zero(spa) ||
4683 (spa->spa_inject_ref != 0 &&
4684 new_state != POOL_STATE_UNINITIALIZED)) {
4685 spa_async_resume(spa);
4686 mutex_exit(&spa_namespace_lock);
4687 return (SET_ERROR(EBUSY));
4688 }
4689
4690 /*
4691 * A pool cannot be exported if it has an active shared spare.
4692 * This is to prevent other pools stealing the active spare
4693 * from an exported pool. At user's own will, such pool can
4694 * be forcedly exported.
4695 */
4696 if (!force && new_state == POOL_STATE_EXPORTED &&
4697 spa_has_active_shared_spare(spa)) {
4698 spa_async_resume(spa);
4699 mutex_exit(&spa_namespace_lock);
4700 return (SET_ERROR(EXDEV));
4701 }
4702
4703 /*
4704 * We want this to be reflected on every label,
4705 * so mark them all dirty. spa_unload() will do the
4706 * final sync that pushes these changes out.
4707 */
4708 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4709 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4710 spa->spa_state = new_state;
4711 spa->spa_final_txg = spa_last_synced_txg(spa) +
4712 TXG_DEFER_SIZE + 1;
4713 vdev_config_dirty(spa->spa_root_vdev);
4714 spa_config_exit(spa, SCL_ALL, FTAG);
4715 }
4716 }
4717
4718 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4719
4720 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4721 spa_unload(spa);
4722 spa_deactivate(spa);
4723 }
4724
4725 if (oldconfig && spa->spa_config)
4726 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4727
4728 if (new_state != POOL_STATE_UNINITIALIZED) {
4729 if (!hardforce)
4730 spa_config_sync(spa, B_TRUE, B_TRUE);
4731 spa_remove(spa);
4732 }
4733 mutex_exit(&spa_namespace_lock);
4734
4735 return (0);
4736}
4737
4738/*
4739 * Destroy a storage pool.
4740 */
4741int
4742spa_destroy(char *pool)
4743{
4744 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4745 B_FALSE, B_FALSE));
4746}
4747
4748/*
4749 * Export a storage pool.
4750 */
4751int
4752spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4753 boolean_t hardforce)
4754{
4755 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4756 force, hardforce));
4757}
4758
4759/*
4760 * Similar to spa_export(), this unloads the spa_t without actually removing it
4761 * from the namespace in any way.
4762 */
4763int
4764spa_reset(char *pool)
4765{
4766 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4767 B_FALSE, B_FALSE));
4768}
4769
4770/*
4771 * ==========================================================================
4772 * Device manipulation
4773 * ==========================================================================
4774 */
4775
4776/*
4777 * Add a device to a storage pool.
4778 */
4779int
4780spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4781{
4782 uint64_t txg, id;
4783 int error;
4784 vdev_t *rvd = spa->spa_root_vdev;
4785 vdev_t *vd, *tvd;
4786 nvlist_t **spares, **l2cache;
4787 uint_t nspares, nl2cache;
4788
4789 ASSERT(spa_writeable(spa));
4790
4791 txg = spa_vdev_enter(spa);
4792
4793 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4794 VDEV_ALLOC_ADD)) != 0)
4795 return (spa_vdev_exit(spa, NULL, txg, error));
4796
4797 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
4798
4799 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4800 &nspares) != 0)
4801 nspares = 0;
4802
4803 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4804 &nl2cache) != 0)
4805 nl2cache = 0;
4806
4807 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4808 return (spa_vdev_exit(spa, vd, txg, EINVAL));
4809
4810 if (vd->vdev_children != 0 &&
4811 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4812 return (spa_vdev_exit(spa, vd, txg, error));
4813
4814 /*
4815 * We must validate the spares and l2cache devices after checking the
4816 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4817 */
4818 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4819 return (spa_vdev_exit(spa, vd, txg, error));
4820
4821 /*
4822 * Transfer each new top-level vdev from vd to rvd.
4823 */
4824 for (int c = 0; c < vd->vdev_children; c++) {
4825
4826 /*
4827 * Set the vdev id to the first hole, if one exists.
4828 */
4829 for (id = 0; id < rvd->vdev_children; id++) {
4830 if (rvd->vdev_child[id]->vdev_ishole) {
4831 vdev_free(rvd->vdev_child[id]);
4832 break;
4833 }
4834 }
4835 tvd = vd->vdev_child[c];
4836 vdev_remove_child(vd, tvd);
4837 tvd->vdev_id = id;
4838 vdev_add_child(rvd, tvd);
4839 vdev_config_dirty(tvd);
4840 }
4841
4842 if (nspares != 0) {
4843 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4844 ZPOOL_CONFIG_SPARES);
4845 spa_load_spares(spa);
4846 spa->spa_spares.sav_sync = B_TRUE;
4847 }
4848
4849 if (nl2cache != 0) {
4850 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4851 ZPOOL_CONFIG_L2CACHE);
4852 spa_load_l2cache(spa);
4853 spa->spa_l2cache.sav_sync = B_TRUE;
4854 }
4855
4856 /*
4857 * We have to be careful when adding new vdevs to an existing pool.
4858 * If other threads start allocating from these vdevs before we
4859 * sync the config cache, and we lose power, then upon reboot we may
4860 * fail to open the pool because there are DVAs that the config cache
4861 * can't translate. Therefore, we first add the vdevs without
4862 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4863 * and then let spa_config_update() initialize the new metaslabs.
4864 *
4865 * spa_load() checks for added-but-not-initialized vdevs, so that
4866 * if we lose power at any point in this sequence, the remaining
4867 * steps will be completed the next time we load the pool.
4868 */
4869 (void) spa_vdev_exit(spa, vd, txg, 0);
4870
4871 mutex_enter(&spa_namespace_lock);
4872 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4873 spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD);
4874 mutex_exit(&spa_namespace_lock);
4875
4876 return (0);
4877}
4878
4879/*
4880 * Attach a device to a mirror. The arguments are the path to any device
4881 * in the mirror, and the nvroot for the new device. If the path specifies
4882 * a device that is not mirrored, we automatically insert the mirror vdev.
4883 *
4884 * If 'replacing' is specified, the new device is intended to replace the
4885 * existing device; in this case the two devices are made into their own
4886 * mirror using the 'replacing' vdev, which is functionally identical to
4887 * the mirror vdev (it actually reuses all the same ops) but has a few
4888 * extra rules: you can't attach to it after it's been created, and upon
4889 * completion of resilvering, the first disk (the one being replaced)
4890 * is automatically detached.
4891 */
4892int
4893spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4894{
4895 uint64_t txg, dtl_max_txg;
4896 vdev_t *rvd = spa->spa_root_vdev;
4897 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4898 vdev_ops_t *pvops;
4899 char *oldvdpath, *newvdpath;
4900 int newvd_isspare;
4901 int error;
4902
4903 ASSERT(spa_writeable(spa));
4904
4905 txg = spa_vdev_enter(spa);
4906
4907 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4908
4909 if (oldvd == NULL)
4910 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4911
4912 if (!oldvd->vdev_ops->vdev_op_leaf)
4913 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4914
4915 pvd = oldvd->vdev_parent;
4916
4917 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4918 VDEV_ALLOC_ATTACH)) != 0)
4919 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4920
4921 if (newrootvd->vdev_children != 1)
4922 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4923
4924 newvd = newrootvd->vdev_child[0];
4925
4926 if (!newvd->vdev_ops->vdev_op_leaf)
4927 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4928
4929 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4930 return (spa_vdev_exit(spa, newrootvd, txg, error));
4931
4932 /*
4933 * Spares can't replace logs
4934 */
4935 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4936 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4937
4938 if (!replacing) {
4939 /*
4940 * For attach, the only allowable parent is a mirror or the root
4941 * vdev.
4942 */
4943 if (pvd->vdev_ops != &vdev_mirror_ops &&
4944 pvd->vdev_ops != &vdev_root_ops)
4945 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4946
4947 pvops = &vdev_mirror_ops;
4948 } else {
4949 /*
4950 * Active hot spares can only be replaced by inactive hot
4951 * spares.
4952 */
4953 if (pvd->vdev_ops == &vdev_spare_ops &&
4954 oldvd->vdev_isspare &&
4955 !spa_has_spare(spa, newvd->vdev_guid))
4956 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4957
4958 /*
4959 * If the source is a hot spare, and the parent isn't already a
4960 * spare, then we want to create a new hot spare. Otherwise, we
4961 * want to create a replacing vdev. The user is not allowed to
4962 * attach to a spared vdev child unless the 'isspare' state is
4963 * the same (spare replaces spare, non-spare replaces
4964 * non-spare).
4965 */
4966 if (pvd->vdev_ops == &vdev_replacing_ops &&
4967 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4968 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4969 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4970 newvd->vdev_isspare != oldvd->vdev_isspare) {
4971 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4972 }
4973
4974 if (newvd->vdev_isspare)
4975 pvops = &vdev_spare_ops;
4976 else
4977 pvops = &vdev_replacing_ops;
4978 }
4979
4980 /*
4981 * Make sure the new device is big enough.
4982 */
4983 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4984 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4985
4986 /*
4987 * The new device cannot have a higher alignment requirement
4988 * than the top-level vdev.
4989 */
4990 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4991 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4992
4993 /*
4994 * If this is an in-place replacement, update oldvd's path and devid
4995 * to make it distinguishable from newvd, and unopenable from now on.
4996 */
4997 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4998 spa_strfree(oldvd->vdev_path);
4999 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
5000 KM_SLEEP);
5001 (void) sprintf(oldvd->vdev_path, "%s/%s",
5002 newvd->vdev_path, "old");
5003 if (oldvd->vdev_devid != NULL) {
5004 spa_strfree(oldvd->vdev_devid);
5005 oldvd->vdev_devid = NULL;
5006 }
5007 }
5008
5009 /* mark the device being resilvered */
5010 newvd->vdev_resilver_txg = txg;
5011
5012 /*
5013 * If the parent is not a mirror, or if we're replacing, insert the new
5014 * mirror/replacing/spare vdev above oldvd.
5015 */
5016 if (pvd->vdev_ops != pvops)
5017 pvd = vdev_add_parent(oldvd, pvops);
5018
5019 ASSERT(pvd->vdev_top->vdev_parent == rvd);
5020 ASSERT(pvd->vdev_ops == pvops);
5021 ASSERT(oldvd->vdev_parent == pvd);
5022
5023 /*
5024 * Extract the new device from its root and add it to pvd.
5025 */
5026 vdev_remove_child(newrootvd, newvd);
5027 newvd->vdev_id = pvd->vdev_children;
5028 newvd->vdev_crtxg = oldvd->vdev_crtxg;
5029 vdev_add_child(pvd, newvd);
5030
5031 tvd = newvd->vdev_top;
5032 ASSERT(pvd->vdev_top == tvd);
5033 ASSERT(tvd->vdev_parent == rvd);
5034
5035 vdev_config_dirty(tvd);
5036
5037 /*
5038 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
5039 * for any dmu_sync-ed blocks. It will propagate upward when
5040 * spa_vdev_exit() calls vdev_dtl_reassess().
5041 */
5042 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
5043
5044 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
5045 dtl_max_txg - TXG_INITIAL);
5046
5047 if (newvd->vdev_isspare) {
5048 spa_spare_activate(newvd);
5049 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
5050 }
5051
5052 oldvdpath = spa_strdup(oldvd->vdev_path);
5053 newvdpath = spa_strdup(newvd->vdev_path);
5054 newvd_isspare = newvd->vdev_isspare;
5055
5056 /*
5057 * Mark newvd's DTL dirty in this txg.
5058 */
5059 vdev_dirty(tvd, VDD_DTL, newvd, txg);
5060
5061 /*
5062 * Schedule the resilver to restart in the future. We do this to
5063 * ensure that dmu_sync-ed blocks have been stitched into the
5064 * respective datasets.
5065 */
5066 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
5067
5068 if (spa->spa_bootfs)
5069 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
5070
5071 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH);
5072
5073 /*
5074 * Commit the config
5075 */
5076 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
5077
5078 spa_history_log_internal(spa, "vdev attach", NULL,
5079 "%s vdev=%s %s vdev=%s",
5080 replacing && newvd_isspare ? "spare in" :
5081 replacing ? "replace" : "attach", newvdpath,
5082 replacing ? "for" : "to", oldvdpath);
5083
5084 spa_strfree(oldvdpath);
5085 spa_strfree(newvdpath);
5086
5087 return (0);
5088}
5089
5090/*
5091 * Detach a device from a mirror or replacing vdev.
5092 *
5093 * If 'replace_done' is specified, only detach if the parent
5094 * is a replacing vdev.
5095 */
5096int
5097spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
5098{
5099 uint64_t txg;
5100 int error;
5101 vdev_t *rvd = spa->spa_root_vdev;
5102 vdev_t *vd, *pvd, *cvd, *tvd;
5103 boolean_t unspare = B_FALSE;
5104 uint64_t unspare_guid = 0;
5105 char *vdpath;
5106
5107 ASSERT(spa_writeable(spa));
5108
5109 txg = spa_vdev_enter(spa);
5110
5111 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5112
5113 if (vd == NULL)
5114 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5115
5116 if (!vd->vdev_ops->vdev_op_leaf)
5117 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5118
5119 pvd = vd->vdev_parent;
5120
5121 /*
5122 * If the parent/child relationship is not as expected, don't do it.
5123 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
5124 * vdev that's replacing B with C. The user's intent in replacing
5125 * is to go from M(A,B) to M(A,C). If the user decides to cancel
5126 * the replace by detaching C, the expected behavior is to end up
5127 * M(A,B). But suppose that right after deciding to detach C,
5128 * the replacement of B completes. We would have M(A,C), and then
5129 * ask to detach C, which would leave us with just A -- not what
5130 * the user wanted. To prevent this, we make sure that the
5131 * parent/child relationship hasn't changed -- in this example,
5132 * that C's parent is still the replacing vdev R.
5133 */
5134 if (pvd->vdev_guid != pguid && pguid != 0)
5135 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5136
5137 /*
5138 * Only 'replacing' or 'spare' vdevs can be replaced.
5139 */
5140 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
5141 pvd->vdev_ops != &vdev_spare_ops)
5142 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5143
5144 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
5145 spa_version(spa) >= SPA_VERSION_SPARES);
5146
5147 /*
5148 * Only mirror, replacing, and spare vdevs support detach.
5149 */
5150 if (pvd->vdev_ops != &vdev_replacing_ops &&
5151 pvd->vdev_ops != &vdev_mirror_ops &&
5152 pvd->vdev_ops != &vdev_spare_ops)
5153 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5154
5155 /*
5156 * If this device has the only valid copy of some data,
5157 * we cannot safely detach it.
5158 */
5159 if (vdev_dtl_required(vd))
5160 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5161
5162 ASSERT(pvd->vdev_children >= 2);
5163
5164 /*
5165 * If we are detaching the second disk from a replacing vdev, then
5166 * check to see if we changed the original vdev's path to have "/old"
5167 * at the end in spa_vdev_attach(). If so, undo that change now.
5168 */
5169 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
5170 vd->vdev_path != NULL) {
5171 size_t len = strlen(vd->vdev_path);
5172
5173 for (int c = 0; c < pvd->vdev_children; c++) {
5174 cvd = pvd->vdev_child[c];
5175
5176 if (cvd == vd || cvd->vdev_path == NULL)
5177 continue;
5178
5179 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
5180 strcmp(cvd->vdev_path + len, "/old") == 0) {
5181 spa_strfree(cvd->vdev_path);
5182 cvd->vdev_path = spa_strdup(vd->vdev_path);
5183 break;
5184 }
5185 }
5186 }
5187
5188 /*
5189 * If we are detaching the original disk from a spare, then it implies
5190 * that the spare should become a real disk, and be removed from the
5191 * active spare list for the pool.
5192 */
5193 if (pvd->vdev_ops == &vdev_spare_ops &&
5194 vd->vdev_id == 0 &&
5195 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
5196 unspare = B_TRUE;
5197
5198 /*
5199 * Erase the disk labels so the disk can be used for other things.
5200 * This must be done after all other error cases are handled,
5201 * but before we disembowel vd (so we can still do I/O to it).
5202 * But if we can't do it, don't treat the error as fatal --
5203 * it may be that the unwritability of the disk is the reason
5204 * it's being detached!
5205 */
5206 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5207
5208 /*
5209 * Remove vd from its parent and compact the parent's children.
5210 */
5211 vdev_remove_child(pvd, vd);
5212 vdev_compact_children(pvd);
5213
5214 /*
5215 * Remember one of the remaining children so we can get tvd below.
5216 */
5217 cvd = pvd->vdev_child[pvd->vdev_children - 1];
5218
5219 /*
5220 * If we need to remove the remaining child from the list of hot spares,
5221 * do it now, marking the vdev as no longer a spare in the process.
5222 * We must do this before vdev_remove_parent(), because that can
5223 * change the GUID if it creates a new toplevel GUID. For a similar
5224 * reason, we must remove the spare now, in the same txg as the detach;
5225 * otherwise someone could attach a new sibling, change the GUID, and
5226 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
5227 */
5228 if (unspare) {
5229 ASSERT(cvd->vdev_isspare);
5230 spa_spare_remove(cvd);
5231 unspare_guid = cvd->vdev_guid;
5232 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
5233 cvd->vdev_unspare = B_TRUE;
5234 }
5235
5236 /*
5237 * If the parent mirror/replacing vdev only has one child,
5238 * the parent is no longer needed. Remove it from the tree.
5239 */
5240 if (pvd->vdev_children == 1) {
5241 if (pvd->vdev_ops == &vdev_spare_ops)
5242 cvd->vdev_unspare = B_FALSE;
5243 vdev_remove_parent(cvd);
5244 }
5245
5246
5247 /*
5248 * We don't set tvd until now because the parent we just removed
5249 * may have been the previous top-level vdev.
5250 */
5251 tvd = cvd->vdev_top;
5252 ASSERT(tvd->vdev_parent == rvd);
5253
5254 /*
5255 * Reevaluate the parent vdev state.
5256 */
5257 vdev_propagate_state(cvd);
5258
5259 /*
5260 * If the 'autoexpand' property is set on the pool then automatically
5261 * try to expand the size of the pool. For example if the device we
5262 * just detached was smaller than the others, it may be possible to
5263 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
5264 * first so that we can obtain the updated sizes of the leaf vdevs.
5265 */
5266 if (spa->spa_autoexpand) {
5267 vdev_reopen(tvd);
5268 vdev_expand(tvd, txg);
5269 }
5270
5271 vdev_config_dirty(tvd);
5272
5273 /*
5274 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
5275 * vd->vdev_detached is set and free vd's DTL object in syncing context.
5276 * But first make sure we're not on any *other* txg's DTL list, to
5277 * prevent vd from being accessed after it's freed.
5278 */
5279 vdpath = spa_strdup(vd->vdev_path);
5280 for (int t = 0; t < TXG_SIZE; t++)
5281 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
5282 vd->vdev_detached = B_TRUE;
5283 vdev_dirty(tvd, VDD_DTL, vd, txg);
5284
5285 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
5286
5287 /* hang on to the spa before we release the lock */
5288 spa_open_ref(spa, FTAG);
5289
5290 error = spa_vdev_exit(spa, vd, txg, 0);
5291
5292 spa_history_log_internal(spa, "detach", NULL,
5293 "vdev=%s", vdpath);
5294 spa_strfree(vdpath);
5295
5296 /*
5297 * If this was the removal of the original device in a hot spare vdev,
5298 * then we want to go through and remove the device from the hot spare
5299 * list of every other pool.
5300 */
5301 if (unspare) {
5302 spa_t *altspa = NULL;
5303
5304 mutex_enter(&spa_namespace_lock);
5305 while ((altspa = spa_next(altspa)) != NULL) {
5306 if (altspa->spa_state != POOL_STATE_ACTIVE ||
5307 altspa == spa)
5308 continue;
5309
5310 spa_open_ref(altspa, FTAG);
5311 mutex_exit(&spa_namespace_lock);
5312 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
5313 mutex_enter(&spa_namespace_lock);
5314 spa_close(altspa, FTAG);
5315 }
5316 mutex_exit(&spa_namespace_lock);
5317
5318 /* search the rest of the vdevs for spares to remove */
5319 spa_vdev_resilver_done(spa);
5320 }
5321
5322 /* all done with the spa; OK to release */
5323 mutex_enter(&spa_namespace_lock);
5324 spa_close(spa, FTAG);
5325 mutex_exit(&spa_namespace_lock);
5326
5327 return (error);
5328}
5329
5330/*
5331 * Split a set of devices from their mirrors, and create a new pool from them.
5332 */
5333int
5334spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
5335 nvlist_t *props, boolean_t exp)
5336{
5337 int error = 0;
5338 uint64_t txg, *glist;
5339 spa_t *newspa;
5340 uint_t c, children, lastlog;
5341 nvlist_t **child, *nvl, *tmp;
5342 dmu_tx_t *tx;
5343 char *altroot = NULL;
5344 vdev_t *rvd, **vml = NULL; /* vdev modify list */
5345 boolean_t activate_slog;
5346
5347 ASSERT(spa_writeable(spa));
5348
5349 txg = spa_vdev_enter(spa);
5350
5351 /* clear the log and flush everything up to now */
5352 activate_slog = spa_passivate_log(spa);
5353 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5354 error = spa_offline_log(spa);
5355 txg = spa_vdev_config_enter(spa);
5356
5357 if (activate_slog)
5358 spa_activate_log(spa);
5359
5360 if (error != 0)
5361 return (spa_vdev_exit(spa, NULL, txg, error));
5362
5363 /* check new spa name before going any further */
5364 if (spa_lookup(newname) != NULL)
5365 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
5366
5367 /*
5368 * scan through all the children to ensure they're all mirrors
5369 */
5370 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
5371 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
5372 &children) != 0)
5373 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5374
5375 /* first, check to ensure we've got the right child count */
5376 rvd = spa->spa_root_vdev;
5377 lastlog = 0;
5378 for (c = 0; c < rvd->vdev_children; c++) {
5379 vdev_t *vd = rvd->vdev_child[c];
5380
5381 /* don't count the holes & logs as children */
5382 if (vd->vdev_islog || vd->vdev_ishole) {
5383 if (lastlog == 0)
5384 lastlog = c;
5385 continue;
5386 }
5387
5388 lastlog = 0;
5389 }
5390 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
5391 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5392
5393 /* next, ensure no spare or cache devices are part of the split */
5394 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
5395 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
5396 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5397
5398 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
5399 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
5400
5401 /* then, loop over each vdev and validate it */
5402 for (c = 0; c < children; c++) {
5403 uint64_t is_hole = 0;
5404
5405 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
5406 &is_hole);
5407
5408 if (is_hole != 0) {
5409 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
5410 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
5411 continue;
5412 } else {
5413 error = SET_ERROR(EINVAL);
5414 break;
5415 }
5416 }
5417
5418 /* which disk is going to be split? */
5419 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
5420 &glist[c]) != 0) {
5421 error = SET_ERROR(EINVAL);
5422 break;
5423 }
5424
5425 /* look it up in the spa */
5426 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
5427 if (vml[c] == NULL) {
5428 error = SET_ERROR(ENODEV);
5429 break;
5430 }
5431
5432 /* make sure there's nothing stopping the split */
5433 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
5434 vml[c]->vdev_islog ||
5435 vml[c]->vdev_ishole ||
5436 vml[c]->vdev_isspare ||
5437 vml[c]->vdev_isl2cache ||
5438 !vdev_writeable(vml[c]) ||
5439 vml[c]->vdev_children != 0 ||
5440 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
5441 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
5442 error = SET_ERROR(EINVAL);
5443 break;
5444 }
5445
5446 if (vdev_dtl_required(vml[c])) {
5447 error = SET_ERROR(EBUSY);
5448 break;
5449 }
5450
5451 /* we need certain info from the top level */
5452 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
5453 vml[c]->vdev_top->vdev_ms_array) == 0);
5454 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
5455 vml[c]->vdev_top->vdev_ms_shift) == 0);
5456 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
5457 vml[c]->vdev_top->vdev_asize) == 0);
5458 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
5459 vml[c]->vdev_top->vdev_ashift) == 0);
5460
5461 /* transfer per-vdev ZAPs */
5462 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
5463 VERIFY0(nvlist_add_uint64(child[c],
5464 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
5465
5466 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
5467 VERIFY0(nvlist_add_uint64(child[c],
5468 ZPOOL_CONFIG_VDEV_TOP_ZAP,
5469 vml[c]->vdev_parent->vdev_top_zap));
5470 }
5471
5472 if (error != 0) {
5473 kmem_free(vml, children * sizeof (vdev_t *));
5474 kmem_free(glist, children * sizeof (uint64_t));
5475 return (spa_vdev_exit(spa, NULL, txg, error));
5476 }
5477
5478 /* stop writers from using the disks */
5479 for (c = 0; c < children; c++) {
5480 if (vml[c] != NULL)
5481 vml[c]->vdev_offline = B_TRUE;
5482 }
5483 vdev_reopen(spa->spa_root_vdev);
5484
5485 /*
5486 * Temporarily record the splitting vdevs in the spa config. This
5487 * will disappear once the config is regenerated.
5488 */
5489 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5490 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5491 glist, children) == 0);
5492 kmem_free(glist, children * sizeof (uint64_t));
5493
5494 mutex_enter(&spa->spa_props_lock);
5495 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5496 nvl) == 0);
5497 mutex_exit(&spa->spa_props_lock);
5498 spa->spa_config_splitting = nvl;
5499 vdev_config_dirty(spa->spa_root_vdev);
5500
5501 /* configure and create the new pool */
5502 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5503 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5504 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5505 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5506 spa_version(spa)) == 0);
5507 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5508 spa->spa_config_txg) == 0);
5509 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5510 spa_generate_guid(NULL)) == 0);
5511 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
5512 (void) nvlist_lookup_string(props,
5513 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5514
5515 /* add the new pool to the namespace */
5516 newspa = spa_add(newname, config, altroot);
5517 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
5518 newspa->spa_config_txg = spa->spa_config_txg;
5519 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5520
5521 /* release the spa config lock, retaining the namespace lock */
5522 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5523
5524 if (zio_injection_enabled)
5525 zio_handle_panic_injection(spa, FTAG, 1);
5526
5527 spa_activate(newspa, spa_mode_global);
5528 spa_async_suspend(newspa);
5529
5530#ifndef illumos
5531 /* mark that we are creating new spa by splitting */
5532 newspa->spa_splitting_newspa = B_TRUE;
5533#endif
5534 /* create the new pool from the disks of the original pool */
5535 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5536#ifndef illumos
5537 newspa->spa_splitting_newspa = B_FALSE;
5538#endif
5539 if (error)
5540 goto out;
5541
5542 /* if that worked, generate a real config for the new pool */
5543 if (newspa->spa_root_vdev != NULL) {
5544 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
5545 NV_UNIQUE_NAME, KM_SLEEP) == 0);
5546 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5547 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5548 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5549 B_TRUE));
5550 }
5551
5552 /* set the props */
5553 if (props != NULL) {
5554 spa_configfile_set(newspa, props, B_FALSE);
5555 error = spa_prop_set(newspa, props);
5556 if (error)
5557 goto out;
5558 }
5559
5560 /* flush everything */
5561 txg = spa_vdev_config_enter(newspa);
5562 vdev_config_dirty(newspa->spa_root_vdev);
5563 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
5564
5565 if (zio_injection_enabled)
5566 zio_handle_panic_injection(spa, FTAG, 2);
5567
5568 spa_async_resume(newspa);
5569
5570 /* finally, update the original pool's config */
5571 txg = spa_vdev_config_enter(spa);
5572 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5573 error = dmu_tx_assign(tx, TXG_WAIT);
5574 if (error != 0)
5575 dmu_tx_abort(tx);
5576 for (c = 0; c < children; c++) {
5577 if (vml[c] != NULL) {
5578 vdev_split(vml[c]);
5579 if (error == 0)
5580 spa_history_log_internal(spa, "detach", tx,
5581 "vdev=%s", vml[c]->vdev_path);
5582
5583 vdev_free(vml[c]);
5584 }
5585 }
5586 spa->spa_avz_action = AVZ_ACTION_REBUILD;
5587 vdev_config_dirty(spa->spa_root_vdev);
5588 spa->spa_config_splitting = NULL;
5589 nvlist_free(nvl);
5590 if (error == 0)
5591 dmu_tx_commit(tx);
5592 (void) spa_vdev_exit(spa, NULL, txg, 0);
5593
5594 if (zio_injection_enabled)
5595 zio_handle_panic_injection(spa, FTAG, 3);
5596
5597 /* split is complete; log a history record */
5598 spa_history_log_internal(newspa, "split", NULL,
5599 "from pool %s", spa_name(spa));
5600
5601 kmem_free(vml, children * sizeof (vdev_t *));
5602
5603 /* if we're not going to mount the filesystems in userland, export */
5604 if (exp)
5605 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5606 B_FALSE, B_FALSE);
5607
5608 return (error);
5609
5610out:
5611 spa_unload(newspa);
5612 spa_deactivate(newspa);
5613 spa_remove(newspa);
5614
5615 txg = spa_vdev_config_enter(spa);
5616
5617 /* re-online all offlined disks */
5618 for (c = 0; c < children; c++) {
5619 if (vml[c] != NULL)
5620 vml[c]->vdev_offline = B_FALSE;
5621 }
5622 vdev_reopen(spa->spa_root_vdev);
5623
5624 nvlist_free(spa->spa_config_splitting);
5625 spa->spa_config_splitting = NULL;
5626 (void) spa_vdev_exit(spa, NULL, txg, error);
5627
5628 kmem_free(vml, children * sizeof (vdev_t *));
5629 return (error);
5630}
5631
5632static nvlist_t *
5633spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
5634{
5635 for (int i = 0; i < count; i++) {
5636 uint64_t guid;
5637
5638 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5639 &guid) == 0);
5640
5641 if (guid == target_guid)
5642 return (nvpp[i]);
5643 }
5644
5645 return (NULL);
5646}
5647
5648static void
5649spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5650 nvlist_t *dev_to_remove)
5651{
5652 nvlist_t **newdev = NULL;
5653
5654 if (count > 1)
5655 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
5656
5657 for (int i = 0, j = 0; i < count; i++) {
5658 if (dev[i] == dev_to_remove)
5659 continue;
5660 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
5661 }
5662
5663 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5664 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
5665
5666 for (int i = 0; i < count - 1; i++)
5667 nvlist_free(newdev[i]);
5668
5669 if (count > 1)
5670 kmem_free(newdev, (count - 1) * sizeof (void *));
5671}
5672
5673/*
5674 * Evacuate the device.
5675 */
5676static int
5677spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5678{
5679 uint64_t txg;
5680 int error = 0;
5681
5682 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5683 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5684 ASSERT(vd == vd->vdev_top);
5685
5686 /*
5687 * Evacuate the device. We don't hold the config lock as writer
5688 * since we need to do I/O but we do keep the
5689 * spa_namespace_lock held. Once this completes the device
5690 * should no longer have any blocks allocated on it.
5691 */
5692 if (vd->vdev_islog) {
5693 if (vd->vdev_stat.vs_alloc != 0)
5694 error = spa_offline_log(spa);
5695 } else {
5696 error = SET_ERROR(ENOTSUP);
5697 }
5698
5699 if (error)
5700 return (error);
5701
5702 /*
5703 * The evacuation succeeded. Remove any remaining MOS metadata
5704 * associated with this vdev, and wait for these changes to sync.
5705 */
5706 ASSERT0(vd->vdev_stat.vs_alloc);
5707 txg = spa_vdev_config_enter(spa);
5708 vd->vdev_removing = B_TRUE;
5709 vdev_dirty_leaves(vd, VDD_DTL, txg);
5710 vdev_config_dirty(vd);
5711 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5712
5713 return (0);
5714}
5715
5716/*
5717 * Complete the removal by cleaning up the namespace.
5718 */
5719static void
5720spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5721{
5722 vdev_t *rvd = spa->spa_root_vdev;
5723 uint64_t id = vd->vdev_id;
5724 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5725
5726 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5727 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5728 ASSERT(vd == vd->vdev_top);
5729
5730 /*
5731 * Only remove any devices which are empty.
5732 */
5733 if (vd->vdev_stat.vs_alloc != 0)
5734 return;
5735
5736 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5737
5738 if (list_link_active(&vd->vdev_state_dirty_node))
5739 vdev_state_clean(vd);
5740 if (list_link_active(&vd->vdev_config_dirty_node))
5741 vdev_config_clean(vd);
5742
5743 vdev_free(vd);
5744
5745 if (last_vdev) {
5746 vdev_compact_children(rvd);
5747 } else {
5748 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5749 vdev_add_child(rvd, vd);
5750 }
5751 vdev_config_dirty(rvd);
5752
5753 /*
5754 * Reassess the health of our root vdev.
5755 */
5756 vdev_reopen(rvd);
5757}
5758
5759/*
5760 * Remove a device from the pool -
5761 *
5762 * Removing a device from the vdev namespace requires several steps
5763 * and can take a significant amount of time. As a result we use
5764 * the spa_vdev_config_[enter/exit] functions which allow us to
5765 * grab and release the spa_config_lock while still holding the namespace
5766 * lock. During each step the configuration is synced out.
5767 *
5768 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5769 * devices.
5770 */
5771int
5772spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5773{
5774 vdev_t *vd;
5775 sysevent_t *ev = NULL;
5776 metaslab_group_t *mg;
5777 nvlist_t **spares, **l2cache, *nv;
5778 uint64_t txg = 0;
5779 uint_t nspares, nl2cache;
5780 int error = 0;
5781 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5782
5783 ASSERT(spa_writeable(spa));
5784
5785 if (!locked)
5786 txg = spa_vdev_enter(spa);
5787
5788 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5789
5790 if (spa->spa_spares.sav_vdevs != NULL &&
5791 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5792 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5793 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5794 /*
5795 * Only remove the hot spare if it's not currently in use
5796 * in this pool.
5797 */
5798 if (vd == NULL || unspare) {
5799 if (vd == NULL)
5800 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5801 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5802 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5803 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5804 spa_load_spares(spa);
5805 spa->spa_spares.sav_sync = B_TRUE;
5806 } else {
5807 error = SET_ERROR(EBUSY);
5808 }
5809 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
5810 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5811 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5812 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5813 /*
5814 * Cache devices can always be removed.
5815 */
5816 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5817 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5818 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5819 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5820 spa_load_l2cache(spa);
5821 spa->spa_l2cache.sav_sync = B_TRUE;
5822 } else if (vd != NULL && vd->vdev_islog) {
5823 ASSERT(!locked);
5824 ASSERT(vd == vd->vdev_top);
5825
5826 mg = vd->vdev_mg;
5827
5828 /*
5829 * Stop allocating from this vdev.
5830 */
5831 metaslab_group_passivate(mg);
5832
5833 /*
5834 * Wait for the youngest allocations and frees to sync,
5835 * and then wait for the deferral of those frees to finish.
5836 */
5837 spa_vdev_config_exit(spa, NULL,
5838 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5839
5840 /*
5841 * Attempt to evacuate the vdev.
5842 */
5843 error = spa_vdev_remove_evacuate(spa, vd);
5844
5845 txg = spa_vdev_config_enter(spa);
5846
5847 /*
5848 * If we couldn't evacuate the vdev, unwind.
5849 */
5850 if (error) {
5851 metaslab_group_activate(mg);
5852 return (spa_vdev_exit(spa, NULL, txg, error));
5853 }
5854
5855 /*
5856 * Clean up the vdev namespace.
5857 */
5858 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_DEV);
5859 spa_vdev_remove_from_namespace(spa, vd);
5860
5861 } else if (vd != NULL) {
5862 /*
5863 * Normal vdevs cannot be removed (yet).
5864 */
5865 error = SET_ERROR(ENOTSUP);
5866 } else {
5867 /*
5868 * There is no vdev of any kind with the specified guid.
5869 */
5870 error = SET_ERROR(ENOENT);
5871 }
5872
5873 if (!locked)
5874 error = spa_vdev_exit(spa, NULL, txg, error);
5875
5876 if (ev)
5877 spa_event_post(ev);
5878
5879 return (error);
5880}
5881
5882/*
5883 * Find any device that's done replacing, or a vdev marked 'unspare' that's
5884 * currently spared, so we can detach it.
5885 */
5886static vdev_t *
5887spa_vdev_resilver_done_hunt(vdev_t *vd)
5888{
5889 vdev_t *newvd, *oldvd;
5890
5891 for (int c = 0; c < vd->vdev_children; c++) {
5892 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5893 if (oldvd != NULL)
5894 return (oldvd);
5895 }
5896
5897 /*
5898 * Check for a completed replacement. We always consider the first
5899 * vdev in the list to be the oldest vdev, and the last one to be
5900 * the newest (see spa_vdev_attach() for how that works). In
5901 * the case where the newest vdev is faulted, we will not automatically
5902 * remove it after a resilver completes. This is OK as it will require
5903 * user intervention to determine which disk the admin wishes to keep.
5904 */
5905 if (vd->vdev_ops == &vdev_replacing_ops) {
5906 ASSERT(vd->vdev_children > 1);
5907
5908 newvd = vd->vdev_child[vd->vdev_children - 1];
5909 oldvd = vd->vdev_child[0];
5910
5911 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5912 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5913 !vdev_dtl_required(oldvd))
5914 return (oldvd);
5915 }
5916
5917 /*
5918 * Check for a completed resilver with the 'unspare' flag set.
5919 */
5920 if (vd->vdev_ops == &vdev_spare_ops) {
5921 vdev_t *first = vd->vdev_child[0];
5922 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5923
5924 if (last->vdev_unspare) {
5925 oldvd = first;
5926 newvd = last;
5927 } else if (first->vdev_unspare) {
5928 oldvd = last;
5929 newvd = first;
5930 } else {
5931 oldvd = NULL;
5932 }
5933
5934 if (oldvd != NULL &&
5935 vdev_dtl_empty(newvd, DTL_MISSING) &&
5936 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5937 !vdev_dtl_required(oldvd))
5938 return (oldvd);
5939
5940 /*
5941 * If there are more than two spares attached to a disk,
5942 * and those spares are not required, then we want to
5943 * attempt to free them up now so that they can be used
5944 * by other pools. Once we're back down to a single
5945 * disk+spare, we stop removing them.
5946 */
5947 if (vd->vdev_children > 2) {
5948 newvd = vd->vdev_child[1];
5949
5950 if (newvd->vdev_isspare && last->vdev_isspare &&
5951 vdev_dtl_empty(last, DTL_MISSING) &&
5952 vdev_dtl_empty(last, DTL_OUTAGE) &&
5953 !vdev_dtl_required(newvd))
5954 return (newvd);
5955 }
5956 }
5957
5958 return (NULL);
5959}
5960
5961static void
5962spa_vdev_resilver_done(spa_t *spa)
5963{
5964 vdev_t *vd, *pvd, *ppvd;
5965 uint64_t guid, sguid, pguid, ppguid;
5966
5967 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5968
5969 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5970 pvd = vd->vdev_parent;
5971 ppvd = pvd->vdev_parent;
5972 guid = vd->vdev_guid;
5973 pguid = pvd->vdev_guid;
5974 ppguid = ppvd->vdev_guid;
5975 sguid = 0;
5976 /*
5977 * If we have just finished replacing a hot spared device, then
5978 * we need to detach the parent's first child (the original hot
5979 * spare) as well.
5980 */
5981 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5982 ppvd->vdev_children == 2) {
5983 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5984 sguid = ppvd->vdev_child[1]->vdev_guid;
5985 }
5986 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5987
5988 spa_config_exit(spa, SCL_ALL, FTAG);
5989 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5990 return;
5991 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5992 return;
5993 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5994 }
5995
5996 spa_config_exit(spa, SCL_ALL, FTAG);
5997}
5998
5999/*
6000 * Update the stored path or FRU for this vdev.
6001 */
6002int
6003spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
6004 boolean_t ispath)
6005{
6006 vdev_t *vd;
6007 boolean_t sync = B_FALSE;
6008
6009 ASSERT(spa_writeable(spa));
6010
6011 spa_vdev_state_enter(spa, SCL_ALL);
6012
6013 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
6014 return (spa_vdev_state_exit(spa, NULL, ENOENT));
6015
6016 if (!vd->vdev_ops->vdev_op_leaf)
6017 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
6018
6019 if (ispath) {
6020 if (strcmp(value, vd->vdev_path) != 0) {
6021 spa_strfree(vd->vdev_path);
6022 vd->vdev_path = spa_strdup(value);
6023 sync = B_TRUE;
6024 }
6025 } else {
6026 if (vd->vdev_fru == NULL) {
6027 vd->vdev_fru = spa_strdup(value);
6028 sync = B_TRUE;
6029 } else if (strcmp(value, vd->vdev_fru) != 0) {
6030 spa_strfree(vd->vdev_fru);
6031 vd->vdev_fru = spa_strdup(value);
6032 sync = B_TRUE;
6033 }
6034 }
6035
6036 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
6037}
6038
6039int
6040spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
6041{
6042 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
6043}
6044
6045int
6046spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
6047{
6048 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
6049}
6050
6051/*
6052 * ==========================================================================
6053 * SPA Scanning
6054 * ==========================================================================
6055 */
6056
6057int
6058spa_scan_stop(spa_t *spa)
6059{
6060 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6061 if (dsl_scan_resilvering(spa->spa_dsl_pool))
6062 return (SET_ERROR(EBUSY));
6063 return (dsl_scan_cancel(spa->spa_dsl_pool));
6064}
6065
6066int
6067spa_scan(spa_t *spa, pool_scan_func_t func)
6068{
6069 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6070
6071 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
6072 return (SET_ERROR(ENOTSUP));
6073
6074 /*
6075 * If a resilver was requested, but there is no DTL on a
6076 * writeable leaf device, we have nothing to do.
6077 */
6078 if (func == POOL_SCAN_RESILVER &&
6079 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6080 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
6081 return (0);
6082 }
6083
6084 return (dsl_scan(spa->spa_dsl_pool, func));
6085}
6086
6087/*
6088 * ==========================================================================
6089 * SPA async task processing
6090 * ==========================================================================
6091 */
6092
6093static void
6094spa_async_remove(spa_t *spa, vdev_t *vd)
6095{
6096 if (vd->vdev_remove_wanted) {
6097 vd->vdev_remove_wanted = B_FALSE;
6098 vd->vdev_delayed_close = B_FALSE;
6099 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
6100
6101 /*
6102 * We want to clear the stats, but we don't want to do a full
6103 * vdev_clear() as that will cause us to throw away
6104 * degraded/faulted state as well as attempt to reopen the
6105 * device, all of which is a waste.
6106 */
6107 vd->vdev_stat.vs_read_errors = 0;
6108 vd->vdev_stat.vs_write_errors = 0;
6109 vd->vdev_stat.vs_checksum_errors = 0;
6110
6111 vdev_state_dirty(vd->vdev_top);
6112 /* Tell userspace that the vdev is gone. */
6113 zfs_post_remove(spa, vd);
6114 }
6115
6116 for (int c = 0; c < vd->vdev_children; c++)
6117 spa_async_remove(spa, vd->vdev_child[c]);
6118}
6119
6120static void
6121spa_async_probe(spa_t *spa, vdev_t *vd)
6122{
6123 if (vd->vdev_probe_wanted) {
6124 vd->vdev_probe_wanted = B_FALSE;
6125 vdev_reopen(vd); /* vdev_open() does the actual probe */
6126 }
6127
6128 for (int c = 0; c < vd->vdev_children; c++)
6129 spa_async_probe(spa, vd->vdev_child[c]);
6130}
6131
6132static void
6133spa_async_autoexpand(spa_t *spa, vdev_t *vd)
6134{
6135 sysevent_id_t eid;
6136 nvlist_t *attr;
6137 char *physpath;
6138
6139 if (!spa->spa_autoexpand)
6140 return;
6141
6142 for (int c = 0; c < vd->vdev_children; c++) {
6143 vdev_t *cvd = vd->vdev_child[c];
6144 spa_async_autoexpand(spa, cvd);
6145 }
6146
6147 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
6148 return;
6149
6150 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6151 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
6152
6153 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6154 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
6155
6156 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
6157 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP);
6158
6159 nvlist_free(attr);
6160 kmem_free(physpath, MAXPATHLEN);
6161}
6162
6163static void
6164spa_async_thread(void *arg)
6165{
6166 spa_t *spa = arg;
6167 int tasks;
6168
6169 ASSERT(spa->spa_sync_on);
6170
6171 mutex_enter(&spa->spa_async_lock);
6172 tasks = spa->spa_async_tasks;
6173 spa->spa_async_tasks &= SPA_ASYNC_REMOVE;
6174 mutex_exit(&spa->spa_async_lock);
6175
6176 /*
6177 * See if the config needs to be updated.
6178 */
6179 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
6180 uint64_t old_space, new_space;
6181
6182 mutex_enter(&spa_namespace_lock);
6183 old_space = metaslab_class_get_space(spa_normal_class(spa));
6184 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6185 new_space = metaslab_class_get_space(spa_normal_class(spa));
6186 mutex_exit(&spa_namespace_lock);
6187
6188 /*
6189 * If the pool grew as a result of the config update,
6190 * then log an internal history event.
6191 */
6192 if (new_space != old_space) {
6193 spa_history_log_internal(spa, "vdev online", NULL,
6194 "pool '%s' size: %llu(+%llu)",
6195 spa_name(spa), new_space, new_space - old_space);
6196 }
6197 }
6198
6199 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
6200 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6201 spa_async_autoexpand(spa, spa->spa_root_vdev);
6202 spa_config_exit(spa, SCL_CONFIG, FTAG);
6203 }
6204
6205 /*
6206 * See if any devices need to be probed.
6207 */
6208 if (tasks & SPA_ASYNC_PROBE) {
6209 spa_vdev_state_enter(spa, SCL_NONE);
6210 spa_async_probe(spa, spa->spa_root_vdev);
6211 (void) spa_vdev_state_exit(spa, NULL, 0);
6212 }
6213
6214 /*
6215 * If any devices are done replacing, detach them.
6216 */
6217 if (tasks & SPA_ASYNC_RESILVER_DONE)
6218 spa_vdev_resilver_done(spa);
6219
6220 /*
6221 * Kick off a resilver.
6222 */
6223 if (tasks & SPA_ASYNC_RESILVER)
6224 dsl_resilver_restart(spa->spa_dsl_pool, 0);
6225
6226 /*
6227 * Let the world know that we're done.
6228 */
6229 mutex_enter(&spa->spa_async_lock);
6230 spa->spa_async_thread = NULL;
6231 cv_broadcast(&spa->spa_async_cv);
6232 mutex_exit(&spa->spa_async_lock);
6233 thread_exit();
6234}
6235
6236static void
6237spa_async_thread_vd(void *arg)
6238{
6239 spa_t *spa = arg;
6240 int tasks;
6241
6242 ASSERT(spa->spa_sync_on);
6243
6244 mutex_enter(&spa->spa_async_lock);
6245 tasks = spa->spa_async_tasks;
6246retry:
6247 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE;
6248 mutex_exit(&spa->spa_async_lock);
6249
6250 /*
6251 * See if any devices need to be marked REMOVED.
6252 */
6253 if (tasks & SPA_ASYNC_REMOVE) {
6254 spa_vdev_state_enter(spa, SCL_NONE);
6255 spa_async_remove(spa, spa->spa_root_vdev);
6256 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
6257 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
6258 for (int i = 0; i < spa->spa_spares.sav_count; i++)
6259 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
6260 (void) spa_vdev_state_exit(spa, NULL, 0);
6261 }
6262
6263 /*
6264 * Let the world know that we're done.
6265 */
6266 mutex_enter(&spa->spa_async_lock);
6267 tasks = spa->spa_async_tasks;
6268 if ((tasks & SPA_ASYNC_REMOVE) != 0)
6269 goto retry;
6270 spa->spa_async_thread_vd = NULL;
6271 cv_broadcast(&spa->spa_async_cv);
6272 mutex_exit(&spa->spa_async_lock);
6273 thread_exit();
6274}
6275
6276void
6277spa_async_suspend(spa_t *spa)
6278{
6279 mutex_enter(&spa->spa_async_lock);
6280 spa->spa_async_suspended++;
6281 while (spa->spa_async_thread != NULL &&
6282 spa->spa_async_thread_vd != NULL)
6283 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
6284 mutex_exit(&spa->spa_async_lock);
6285}
6286
6287void
6288spa_async_resume(spa_t *spa)
6289{
6290 mutex_enter(&spa->spa_async_lock);
6291 ASSERT(spa->spa_async_suspended != 0);
6292 spa->spa_async_suspended--;
6293 mutex_exit(&spa->spa_async_lock);
6294}
6295
6296static boolean_t
6297spa_async_tasks_pending(spa_t *spa)
6298{
6299 uint_t non_config_tasks;
6300 uint_t config_task;
6301 boolean_t config_task_suspended;
6302
6303 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE |
6304 SPA_ASYNC_REMOVE);
6305 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
6306 if (spa->spa_ccw_fail_time == 0) {
6307 config_task_suspended = B_FALSE;
6308 } else {
6309 config_task_suspended =
6310 (gethrtime() - spa->spa_ccw_fail_time) <
6311 (zfs_ccw_retry_interval * NANOSEC);
6312 }
6313
6314 return (non_config_tasks || (config_task && !config_task_suspended));
6315}
6316
6317static void
6318spa_async_dispatch(spa_t *spa)
6319{
6320 mutex_enter(&spa->spa_async_lock);
6321 if (spa_async_tasks_pending(spa) &&
6322 !spa->spa_async_suspended &&
6323 spa->spa_async_thread == NULL &&
6324 rootdir != NULL)
6325 spa->spa_async_thread = thread_create(NULL, 0,
6326 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
6327 mutex_exit(&spa->spa_async_lock);
6328}
6329
6330static void
6331spa_async_dispatch_vd(spa_t *spa)
6332{
6333 mutex_enter(&spa->spa_async_lock);
6334 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 &&
6335 !spa->spa_async_suspended &&
6336 spa->spa_async_thread_vd == NULL &&
6337 rootdir != NULL)
6338 spa->spa_async_thread_vd = thread_create(NULL, 0,
6339 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri);
6340 mutex_exit(&spa->spa_async_lock);
6341}
6342
6343void
6344spa_async_request(spa_t *spa, int task)
6345{
6346 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
6347 mutex_enter(&spa->spa_async_lock);
6348 spa->spa_async_tasks |= task;
6349 mutex_exit(&spa->spa_async_lock);
6350 spa_async_dispatch_vd(spa);
6351}
6352
6353/*
6354 * ==========================================================================
6355 * SPA syncing routines
6356 * ==========================================================================
6357 */
6358
6359static int
6360bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6361{
6362 bpobj_t *bpo = arg;
6363 bpobj_enqueue(bpo, bp, tx);
6364 return (0);
6365}
6366
6367static int
6368spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6369{
6370 zio_t *zio = arg;
6371
6372 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
6373 BP_GET_PSIZE(bp), zio->io_flags));
6374 return (0);
6375}
6376
6377/*
6378 * Note: this simple function is not inlined to make it easier to dtrace the
6379 * amount of time spent syncing frees.
6380 */
6381static void
6382spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
6383{
6384 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6385 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
6386 VERIFY(zio_wait(zio) == 0);
6387}
6388
6389/*
6390 * Note: this simple function is not inlined to make it easier to dtrace the
6391 * amount of time spent syncing deferred frees.
6392 */
6393static void
6394spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
6395{
6396 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6397 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
6398 spa_free_sync_cb, zio, tx), ==, 0);
6399 VERIFY0(zio_wait(zio));
6400}
6401
6402
6403static void
6404spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
6405{
6406 char *packed = NULL;
6407 size_t bufsize;
6408 size_t nvsize = 0;
6409 dmu_buf_t *db;
6410
6411 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
6412
6413 /*
6414 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
6415 * information. This avoids the dmu_buf_will_dirty() path and
6416 * saves us a pre-read to get data we don't actually care about.
6417 */
6418 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
6419 packed = kmem_alloc(bufsize, KM_SLEEP);
6420
6421 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
6422 KM_SLEEP) == 0);
6423 bzero(packed + nvsize, bufsize - nvsize);
6424
6425 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
6426
6427 kmem_free(packed, bufsize);
6428
6429 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
6430 dmu_buf_will_dirty(db, tx);
6431 *(uint64_t *)db->db_data = nvsize;
6432 dmu_buf_rele(db, FTAG);
6433}
6434
6435static void
6436spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
6437 const char *config, const char *entry)
6438{
6439 nvlist_t *nvroot;
6440 nvlist_t **list;
6441 int i;
6442
6443 if (!sav->sav_sync)
6444 return;
6445
6446 /*
6447 * Update the MOS nvlist describing the list of available devices.
6448 * spa_validate_aux() will have already made sure this nvlist is
6449 * valid and the vdevs are labeled appropriately.
6450 */
6451 if (sav->sav_object == 0) {
6452 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
6453 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
6454 sizeof (uint64_t), tx);
6455 VERIFY(zap_update(spa->spa_meta_objset,
6456 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
6457 &sav->sav_object, tx) == 0);
6458 }
6459
6460 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6461 if (sav->sav_count == 0) {
6462 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
6463 } else {
6464 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
6465 for (i = 0; i < sav->sav_count; i++)
6466 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
6467 B_FALSE, VDEV_CONFIG_L2CACHE);
6468 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6469 sav->sav_count) == 0);
6470 for (i = 0; i < sav->sav_count; i++)
6471 nvlist_free(list[i]);
6472 kmem_free(list, sav->sav_count * sizeof (void *));
6473 }
6474
6475 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6476 nvlist_free(nvroot);
6477
6478 sav->sav_sync = B_FALSE;
6479}
6480
6481/*
6482 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
6483 * The all-vdev ZAP must be empty.
6484 */
6485static void
6486spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
6487{
6488 spa_t *spa = vd->vdev_spa;
6489 if (vd->vdev_top_zap != 0) {
6490 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6491 vd->vdev_top_zap, tx));
6492 }
6493 if (vd->vdev_leaf_zap != 0) {
6494 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6495 vd->vdev_leaf_zap, tx));
6496 }
6497 for (uint64_t i = 0; i < vd->vdev_children; i++) {
6498 spa_avz_build(vd->vdev_child[i], avz, tx);
6499 }
6500}
6501
6502static void
6503spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6504{
6505 nvlist_t *config;
6506
6507 /*
6508 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
6509 * its config may not be dirty but we still need to build per-vdev ZAPs.
6510 * Similarly, if the pool is being assembled (e.g. after a split), we
6511 * need to rebuild the AVZ although the config may not be dirty.
6512 */
6513 if (list_is_empty(&spa->spa_config_dirty_list) &&
6514 spa->spa_avz_action == AVZ_ACTION_NONE)
6515 return;
6516
6517 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6518
6519 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
6520 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
6521 spa->spa_all_vdev_zaps != 0);
6522
6523 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
6524 /* Make and build the new AVZ */
6525 uint64_t new_avz = zap_create(spa->spa_meta_objset,
6526 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
6527 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
6528
6529 /* Diff old AVZ with new one */
6530 zap_cursor_t zc;
6531 zap_attribute_t za;
6532
6533 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6534 spa->spa_all_vdev_zaps);
6535 zap_cursor_retrieve(&zc, &za) == 0;
6536 zap_cursor_advance(&zc)) {
6537 uint64_t vdzap = za.za_first_integer;
6538 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
6539 vdzap) == ENOENT) {
6540 /*
6541 * ZAP is listed in old AVZ but not in new one;
6542 * destroy it
6543 */
6544 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
6545 tx));
6546 }
6547 }
6548
6549 zap_cursor_fini(&zc);
6550
6551 /* Destroy the old AVZ */
6552 VERIFY0(zap_destroy(spa->spa_meta_objset,
6553 spa->spa_all_vdev_zaps, tx));
6554
6555 /* Replace the old AVZ in the dir obj with the new one */
6556 VERIFY0(zap_update(spa->spa_meta_objset,
6557 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
6558 sizeof (new_avz), 1, &new_avz, tx));
6559
6560 spa->spa_all_vdev_zaps = new_avz;
6561 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
6562 zap_cursor_t zc;
6563 zap_attribute_t za;
6564
6565 /* Walk through the AVZ and destroy all listed ZAPs */
6566 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6567 spa->spa_all_vdev_zaps);
6568 zap_cursor_retrieve(&zc, &za) == 0;
6569 zap_cursor_advance(&zc)) {
6570 uint64_t zap = za.za_first_integer;
6571 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
6572 }
6573
6574 zap_cursor_fini(&zc);
6575
6576 /* Destroy and unlink the AVZ itself */
6577 VERIFY0(zap_destroy(spa->spa_meta_objset,
6578 spa->spa_all_vdev_zaps, tx));
6579 VERIFY0(zap_remove(spa->spa_meta_objset,
6580 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
6581 spa->spa_all_vdev_zaps = 0;
6582 }
6583
6584 if (spa->spa_all_vdev_zaps == 0) {
6585 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
6586 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
6587 DMU_POOL_VDEV_ZAP_MAP, tx);
6588 }
6589 spa->spa_avz_action = AVZ_ACTION_NONE;
6590
6591 /* Create ZAPs for vdevs that don't have them. */
6592 vdev_construct_zaps(spa->spa_root_vdev, tx);
6593
6594 config = spa_config_generate(spa, spa->spa_root_vdev,
6595 dmu_tx_get_txg(tx), B_FALSE);
6596
6597 /*
6598 * If we're upgrading the spa version then make sure that
6599 * the config object gets updated with the correct version.
6600 */
6601 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
6602 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6603 spa->spa_uberblock.ub_version);
6604
6605 spa_config_exit(spa, SCL_STATE, FTAG);
6606
6607 nvlist_free(spa->spa_config_syncing);
6608 spa->spa_config_syncing = config;
6609
6610 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
6611}
6612
6613static void
6614spa_sync_version(void *arg, dmu_tx_t *tx)
6615{
6616 uint64_t *versionp = arg;
6617 uint64_t version = *versionp;
6618 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6619
6620 /*
6621 * Setting the version is special cased when first creating the pool.
6622 */
6623 ASSERT(tx->tx_txg != TXG_INITIAL);
6624
6625 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
6626 ASSERT(version >= spa_version(spa));
6627
6628 spa->spa_uberblock.ub_version = version;
6629 vdev_config_dirty(spa->spa_root_vdev);
6630 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
6631}
6632
6633/*
6634 * Set zpool properties.
6635 */
6636static void
6637spa_sync_props(void *arg, dmu_tx_t *tx)
6638{
6639 nvlist_t *nvp = arg;
6640 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6641 objset_t *mos = spa->spa_meta_objset;
6642 nvpair_t *elem = NULL;
6643
6644 mutex_enter(&spa->spa_props_lock);
6645
6646 while ((elem = nvlist_next_nvpair(nvp, elem))) {
6647 uint64_t intval;
6648 char *strval, *fname;
6649 zpool_prop_t prop;
6650 const char *propname;
6651 zprop_type_t proptype;
6652 spa_feature_t fid;
6653
6654 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
6655 case ZPROP_INVAL:
6656 /*
6657 * We checked this earlier in spa_prop_validate().
6658 */
6659 ASSERT(zpool_prop_feature(nvpair_name(elem)));
6660
6661 fname = strchr(nvpair_name(elem), '@') + 1;
6662 VERIFY0(zfeature_lookup_name(fname, &fid));
6663
6664 spa_feature_enable(spa, fid, tx);
6665 spa_history_log_internal(spa, "set", tx,
6666 "%s=enabled", nvpair_name(elem));
6667 break;
6668
6669 case ZPOOL_PROP_VERSION:
6670 intval = fnvpair_value_uint64(elem);
6671 /*
6672 * The version is synced seperatly before other
6673 * properties and should be correct by now.
6674 */
6675 ASSERT3U(spa_version(spa), >=, intval);
6676 break;
6677
6678 case ZPOOL_PROP_ALTROOT:
6679 /*
6680 * 'altroot' is a non-persistent property. It should
6681 * have been set temporarily at creation or import time.
6682 */
6683 ASSERT(spa->spa_root != NULL);
6684 break;
6685
6686 case ZPOOL_PROP_READONLY:
6687 case ZPOOL_PROP_CACHEFILE:
6688 /*
6689 * 'readonly' and 'cachefile' are also non-persisitent
6690 * properties.
6691 */
6692 break;
6693 case ZPOOL_PROP_COMMENT:
6694 strval = fnvpair_value_string(elem);
6695 if (spa->spa_comment != NULL)
6696 spa_strfree(spa->spa_comment);
6697 spa->spa_comment = spa_strdup(strval);
6698 /*
6699 * We need to dirty the configuration on all the vdevs
6700 * so that their labels get updated. It's unnecessary
6701 * to do this for pool creation since the vdev's
6702 * configuratoin has already been dirtied.
6703 */
6704 if (tx->tx_txg != TXG_INITIAL)
6705 vdev_config_dirty(spa->spa_root_vdev);
6706 spa_history_log_internal(spa, "set", tx,
6707 "%s=%s", nvpair_name(elem), strval);
6708 break;
6709 default:
6710 /*
6711 * Set pool property values in the poolprops mos object.
6712 */
6713 if (spa->spa_pool_props_object == 0) {
6714 spa->spa_pool_props_object =
6715 zap_create_link(mos, DMU_OT_POOL_PROPS,
6716 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6717 tx);
6718 }
6719
6720 /* normalize the property name */
6721 propname = zpool_prop_to_name(prop);
6722 proptype = zpool_prop_get_type(prop);
6723
6724 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6725 ASSERT(proptype == PROP_TYPE_STRING);
6726 strval = fnvpair_value_string(elem);
6727 VERIFY0(zap_update(mos,
6728 spa->spa_pool_props_object, propname,
6729 1, strlen(strval) + 1, strval, tx));
6730 spa_history_log_internal(spa, "set", tx,
6731 "%s=%s", nvpair_name(elem), strval);
6732 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6733 intval = fnvpair_value_uint64(elem);
6734
6735 if (proptype == PROP_TYPE_INDEX) {
6736 const char *unused;
6737 VERIFY0(zpool_prop_index_to_string(
6738 prop, intval, &unused));
6739 }
6740 VERIFY0(zap_update(mos,
6741 spa->spa_pool_props_object, propname,
6742 8, 1, &intval, tx));
6743 spa_history_log_internal(spa, "set", tx,
6744 "%s=%lld", nvpair_name(elem), intval);
6745 } else {
6746 ASSERT(0); /* not allowed */
6747 }
6748
6749 switch (prop) {
6750 case ZPOOL_PROP_DELEGATION:
6751 spa->spa_delegation = intval;
6752 break;
6753 case ZPOOL_PROP_BOOTFS:
6754 spa->spa_bootfs = intval;
6755 break;
6756 case ZPOOL_PROP_FAILUREMODE:
6757 spa->spa_failmode = intval;
6758 break;
6759 case ZPOOL_PROP_AUTOEXPAND:
6760 spa->spa_autoexpand = intval;
6761 if (tx->tx_txg != TXG_INITIAL)
6762 spa_async_request(spa,
6763 SPA_ASYNC_AUTOEXPAND);
6764 break;
6765 case ZPOOL_PROP_DEDUPDITTO:
6766 spa->spa_dedup_ditto = intval;
6767 break;
6768 default:
6769 break;
6770 }
6771 }
6772
6773 }
6774
6775 mutex_exit(&spa->spa_props_lock);
6776}
6777
6778/*
6779 * Perform one-time upgrade on-disk changes. spa_version() does not
6780 * reflect the new version this txg, so there must be no changes this
6781 * txg to anything that the upgrade code depends on after it executes.
6782 * Therefore this must be called after dsl_pool_sync() does the sync
6783 * tasks.
6784 */
6785static void
6786spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6787{
6788 dsl_pool_t *dp = spa->spa_dsl_pool;
6789
6790 ASSERT(spa->spa_sync_pass == 1);
6791
6792 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6793
6794 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6795 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6796 dsl_pool_create_origin(dp, tx);
6797
6798 /* Keeping the origin open increases spa_minref */
6799 spa->spa_minref += 3;
6800 }
6801
6802 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6803 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6804 dsl_pool_upgrade_clones(dp, tx);
6805 }
6806
6807 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6808 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6809 dsl_pool_upgrade_dir_clones(dp, tx);
6810
6811 /* Keeping the freedir open increases spa_minref */
6812 spa->spa_minref += 3;
6813 }
6814
6815 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6816 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6817 spa_feature_create_zap_objects(spa, tx);
6818 }
6819
6820 /*
6821 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6822 * when possibility to use lz4 compression for metadata was added
6823 * Old pools that have this feature enabled must be upgraded to have
6824 * this feature active
6825 */
6826 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6827 boolean_t lz4_en = spa_feature_is_enabled(spa,
6828 SPA_FEATURE_LZ4_COMPRESS);
6829 boolean_t lz4_ac = spa_feature_is_active(spa,
6830 SPA_FEATURE_LZ4_COMPRESS);
6831
6832 if (lz4_en && !lz4_ac)
6833 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6834 }
6835
6836 /*
6837 * If we haven't written the salt, do so now. Note that the
6838 * feature may not be activated yet, but that's fine since
6839 * the presence of this ZAP entry is backwards compatible.
6840 */
6841 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
6842 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
6843 VERIFY0(zap_add(spa->spa_meta_objset,
6844 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
6845 sizeof (spa->spa_cksum_salt.zcs_bytes),
6846 spa->spa_cksum_salt.zcs_bytes, tx));
6847 }
6848
6849 rrw_exit(&dp->dp_config_rwlock, FTAG);
6850}
6851
6852/*
6853 * Sync the specified transaction group. New blocks may be dirtied as
6854 * part of the process, so we iterate until it converges.
6855 */
6856void
6857spa_sync(spa_t *spa, uint64_t txg)
6858{
6859 dsl_pool_t *dp = spa->spa_dsl_pool;
6860 objset_t *mos = spa->spa_meta_objset;
6861 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
6862 vdev_t *rvd = spa->spa_root_vdev;
6863 vdev_t *vd;
6864 dmu_tx_t *tx;
6865 int error;
6866 uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
6867 zfs_vdev_queue_depth_pct / 100;
6868
6869 VERIFY(spa_writeable(spa));
6870
6871 /*
6872 * Lock out configuration changes.
6873 */
6874 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6875
6876 spa->spa_syncing_txg = txg;
6877 spa->spa_sync_pass = 0;
6878
6879 mutex_enter(&spa->spa_alloc_lock);
6880 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
6881 mutex_exit(&spa->spa_alloc_lock);
6882
6883 /*
6884 * If there are any pending vdev state changes, convert them
6885 * into config changes that go out with this transaction group.
6886 */
6887 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6888 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6889 /*
6890 * We need the write lock here because, for aux vdevs,
6891 * calling vdev_config_dirty() modifies sav_config.
6892 * This is ugly and will become unnecessary when we
6893 * eliminate the aux vdev wart by integrating all vdevs
6894 * into the root vdev tree.
6895 */
6896 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6897 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6898 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6899 vdev_state_clean(vd);
6900 vdev_config_dirty(vd);
6901 }
6902 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6903 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6904 }
6905 spa_config_exit(spa, SCL_STATE, FTAG);
6906
6907 tx = dmu_tx_create_assigned(dp, txg);
6908
6909 spa->spa_sync_starttime = gethrtime();
6910#ifdef illumos
6911 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
6912 spa->spa_sync_starttime + spa->spa_deadman_synctime));
6913#else /* !illumos */
6914#ifdef _KERNEL
6915 callout_schedule(&spa->spa_deadman_cycid,
6916 hz * spa->spa_deadman_synctime / NANOSEC);
6917#endif
6918#endif /* illumos */
6919
6920 /*
6921 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6922 * set spa_deflate if we have no raid-z vdevs.
6923 */
6924 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6925 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6926 int i;
6927
6928 for (i = 0; i < rvd->vdev_children; i++) {
6929 vd = rvd->vdev_child[i];
6930 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6931 break;
6932 }
6933 if (i == rvd->vdev_children) {
6934 spa->spa_deflate = TRUE;
6935 VERIFY(0 == zap_add(spa->spa_meta_objset,
6936 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6937 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6938 }
6939 }
6940
6941 /*
6942 * Set the top-level vdev's max queue depth. Evaluate each
6943 * top-level's async write queue depth in case it changed.
6944 * The max queue depth will not change in the middle of syncing
6945 * out this txg.
6946 */
6947 uint64_t queue_depth_total = 0;
6948 for (int c = 0; c < rvd->vdev_children; c++) {
6949 vdev_t *tvd = rvd->vdev_child[c];
6950 metaslab_group_t *mg = tvd->vdev_mg;
6951
6952 if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
6953 !metaslab_group_initialized(mg))
6954 continue;
6955
6956 /*
6957 * It is safe to do a lock-free check here because only async
6958 * allocations look at mg_max_alloc_queue_depth, and async
6959 * allocations all happen from spa_sync().
6960 */
6961 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
6962 mg->mg_max_alloc_queue_depth = max_queue_depth;
6963 queue_depth_total += mg->mg_max_alloc_queue_depth;
6964 }
6965 metaslab_class_t *mc = spa_normal_class(spa);
6966 ASSERT0(refcount_count(&mc->mc_alloc_slots));
6967 mc->mc_alloc_max_slots = queue_depth_total;
6968 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
6969
6970 ASSERT3U(mc->mc_alloc_max_slots, <=,
6971 max_queue_depth * rvd->vdev_children);
6972
6973 /*
6974 * Iterate to convergence.
6975 */
6976 do {
6977 int pass = ++spa->spa_sync_pass;
6978
6979 spa_sync_config_object(spa, tx);
6980 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6981 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6982 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6983 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6984 spa_errlog_sync(spa, txg);
6985 dsl_pool_sync(dp, txg);
6986
6987 if (pass < zfs_sync_pass_deferred_free) {
6988 spa_sync_frees(spa, free_bpl, tx);
6989 } else {
6990 /*
6991 * We can not defer frees in pass 1, because
6992 * we sync the deferred frees later in pass 1.
6993 */
6994 ASSERT3U(pass, >, 1);
6995 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6996 &spa->spa_deferred_bpobj, tx);
6997 }
6998
6999 ddt_sync(spa, txg);
7000 dsl_scan_sync(dp, tx);
7001
7002 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
7003 vdev_sync(vd, txg);
7004
7005 if (pass == 1) {
7006 spa_sync_upgrades(spa, tx);
7007 ASSERT3U(txg, >=,
7008 spa->spa_uberblock.ub_rootbp.blk_birth);
7009 /*
7010 * Note: We need to check if the MOS is dirty
7011 * because we could have marked the MOS dirty
7012 * without updating the uberblock (e.g. if we
7013 * have sync tasks but no dirty user data). We
7014 * need to check the uberblock's rootbp because
7015 * it is updated if we have synced out dirty
7016 * data (though in this case the MOS will most
7017 * likely also be dirty due to second order
7018 * effects, we don't want to rely on that here).
7019 */
7020 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
7021 !dmu_objset_is_dirty(mos, txg)) {
7022 /*
7023 * Nothing changed on the first pass,
7024 * therefore this TXG is a no-op. Avoid
7025 * syncing deferred frees, so that we
7026 * can keep this TXG as a no-op.
7027 */
7028 ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
7029 txg));
7030 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7031 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
7032 break;
7033 }
7034 spa_sync_deferred_frees(spa, tx);
7035 }
7036
7037 } while (dmu_objset_is_dirty(mos, txg));
7038
7039 if (!list_is_empty(&spa->spa_config_dirty_list)) {
7040 /*
7041 * Make sure that the number of ZAPs for all the vdevs matches
7042 * the number of ZAPs in the per-vdev ZAP list. This only gets
7043 * called if the config is dirty; otherwise there may be
7044 * outstanding AVZ operations that weren't completed in
7045 * spa_sync_config_object.
7046 */
7047 uint64_t all_vdev_zap_entry_count;
7048 ASSERT0(zap_count(spa->spa_meta_objset,
7049 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
7050 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
7051 all_vdev_zap_entry_count);
7052 }
7053
7054 /*
7055 * Rewrite the vdev configuration (which includes the uberblock)
7056 * to commit the transaction group.
7057 *
7058 * If there are no dirty vdevs, we sync the uberblock to a few
7059 * random top-level vdevs that are known to be visible in the
7060 * config cache (see spa_vdev_add() for a complete description).
7061 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
7062 */
7063 for (;;) {
7064 /*
7065 * We hold SCL_STATE to prevent vdev open/close/etc.
7066 * while we're attempting to write the vdev labels.
7067 */
7068 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7069
7070 if (list_is_empty(&spa->spa_config_dirty_list)) {
7071 vdev_t *svd[SPA_DVAS_PER_BP];
7072 int svdcount = 0;
7073 int children = rvd->vdev_children;
7074 int c0 = spa_get_random(children);
7075
7076 for (int c = 0; c < children; c++) {
7077 vd = rvd->vdev_child[(c0 + c) % children];
7078 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
7079 continue;
7080 svd[svdcount++] = vd;
7081 if (svdcount == SPA_DVAS_PER_BP)
7082 break;
7083 }
7084 error = vdev_config_sync(svd, svdcount, txg);
7085 } else {
7086 error = vdev_config_sync(rvd->vdev_child,
7087 rvd->vdev_children, txg);
7088 }
7089
7090 if (error == 0)
7091 spa->spa_last_synced_guid = rvd->vdev_guid;
7092
7093 spa_config_exit(spa, SCL_STATE, FTAG);
7094
7095 if (error == 0)
7096 break;
7097 zio_suspend(spa, NULL);
7098 zio_resume_wait(spa);
7099 }
7100 dmu_tx_commit(tx);
7101
7102#ifdef illumos
7103 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
7104#else /* !illumos */
7105#ifdef _KERNEL
7106 callout_drain(&spa->spa_deadman_cycid);
7107#endif
7108#endif /* illumos */
7109
7110 /*
7111 * Clear the dirty config list.
7112 */
7113 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
7114 vdev_config_clean(vd);
7115
7116 /*
7117 * Now that the new config has synced transactionally,
7118 * let it become visible to the config cache.
7119 */
7120 if (spa->spa_config_syncing != NULL) {
7121 spa_config_set(spa, spa->spa_config_syncing);
7122 spa->spa_config_txg = txg;
7123 spa->spa_config_syncing = NULL;
7124 }
7125
7126 dsl_pool_sync_done(dp, txg);
7127
7128 mutex_enter(&spa->spa_alloc_lock);
7129 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7130 mutex_exit(&spa->spa_alloc_lock);
7131
7132 /*
7133 * Update usable space statistics.
7134 */
7135 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
7136 vdev_sync_done(vd, txg);
7137
7138 spa_update_dspace(spa);
7139
7140 /*
7141 * It had better be the case that we didn't dirty anything
7142 * since vdev_config_sync().
7143 */
7144 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
7145 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7146 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
7147
7148 spa->spa_sync_pass = 0;
7149
7150 /*
7151 * Update the last synced uberblock here. We want to do this at
7152 * the end of spa_sync() so that consumers of spa_last_synced_txg()
7153 * will be guaranteed that all the processing associated with
7154 * that txg has been completed.
7155 */
7156 spa->spa_ubsync = spa->spa_uberblock;
7157 spa_config_exit(spa, SCL_CONFIG, FTAG);
7158
7159 spa_handle_ignored_writes(spa);
7160
7161 /*
7162 * If any async tasks have been requested, kick them off.
7163 */
7164 spa_async_dispatch(spa);
7165 spa_async_dispatch_vd(spa);
7166}
7167
7168/*
7169 * Sync all pools. We don't want to hold the namespace lock across these
7170 * operations, so we take a reference on the spa_t and drop the lock during the
7171 * sync.
7172 */
7173void
7174spa_sync_allpools(void)
7175{
7176 spa_t *spa = NULL;
7177 mutex_enter(&spa_namespace_lock);
7178 while ((spa = spa_next(spa)) != NULL) {
7179 if (spa_state(spa) != POOL_STATE_ACTIVE ||
7180 !spa_writeable(spa) || spa_suspended(spa))
7181 continue;
7182 spa_open_ref(spa, FTAG);
7183 mutex_exit(&spa_namespace_lock);
7184 txg_wait_synced(spa_get_dsl(spa), 0);
7185 mutex_enter(&spa_namespace_lock);
7186 spa_close(spa, FTAG);
7187 }
7188 mutex_exit(&spa_namespace_lock);
7189}
7190
7191/*
7192 * ==========================================================================
7193 * Miscellaneous routines
7194 * ==========================================================================
7195 */
7196
7197/*
7198 * Remove all pools in the system.
7199 */
7200void
7201spa_evict_all(void)
7202{
7203 spa_t *spa;
7204
7205 /*
7206 * Remove all cached state. All pools should be closed now,
7207 * so every spa in the AVL tree should be unreferenced.
7208 */
7209 mutex_enter(&spa_namespace_lock);
7210 while ((spa = spa_next(NULL)) != NULL) {
7211 /*
7212 * Stop async tasks. The async thread may need to detach
7213 * a device that's been replaced, which requires grabbing
7214 * spa_namespace_lock, so we must drop it here.
7215 */
7216 spa_open_ref(spa, FTAG);
7217 mutex_exit(&spa_namespace_lock);
7218 spa_async_suspend(spa);
7219 mutex_enter(&spa_namespace_lock);
7220 spa_close(spa, FTAG);
7221
7222 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7223 spa_unload(spa);
7224 spa_deactivate(spa);
7225 }
7226 spa_remove(spa);
7227 }
7228 mutex_exit(&spa_namespace_lock);
7229}
7230
7231vdev_t *
7232spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
7233{
7234 vdev_t *vd;
7235 int i;
7236
7237 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
7238 return (vd);
7239
7240 if (aux) {
7241 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
7242 vd = spa->spa_l2cache.sav_vdevs[i];
7243 if (vd->vdev_guid == guid)
7244 return (vd);
7245 }
7246
7247 for (i = 0; i < spa->spa_spares.sav_count; i++) {
7248 vd = spa->spa_spares.sav_vdevs[i];
7249 if (vd->vdev_guid == guid)
7250 return (vd);
7251 }
7252 }
7253
7254 return (NULL);
7255}
7256
7257void
7258spa_upgrade(spa_t *spa, uint64_t version)
7259{
7260 ASSERT(spa_writeable(spa));
7261
7262 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7263
7264 /*
7265 * This should only be called for a non-faulted pool, and since a
7266 * future version would result in an unopenable pool, this shouldn't be
7267 * possible.
7268 */
7269 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
7270 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
7271
7272 spa->spa_uberblock.ub_version = version;
7273 vdev_config_dirty(spa->spa_root_vdev);
7274
7275 spa_config_exit(spa, SCL_ALL, FTAG);
7276
7277 txg_wait_synced(spa_get_dsl(spa), 0);
7278}
7279
7280boolean_t
7281spa_has_spare(spa_t *spa, uint64_t guid)
7282{
7283 int i;
7284 uint64_t spareguid;
7285 spa_aux_vdev_t *sav = &spa->spa_spares;
7286
7287 for (i = 0; i < sav->sav_count; i++)
7288 if (sav->sav_vdevs[i]->vdev_guid == guid)
7289 return (B_TRUE);
7290
7291 for (i = 0; i < sav->sav_npending; i++) {
7292 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
7293 &spareguid) == 0 && spareguid == guid)
7294 return (B_TRUE);
7295 }
7296
7297 return (B_FALSE);
7298}
7299
7300/*
7301 * Check if a pool has an active shared spare device.
7302 * Note: reference count of an active spare is 2, as a spare and as a replace
7303 */
7304static boolean_t
7305spa_has_active_shared_spare(spa_t *spa)
7306{
7307 int i, refcnt;
7308 uint64_t pool;
7309 spa_aux_vdev_t *sav = &spa->spa_spares;
7310
7311 for (i = 0; i < sav->sav_count; i++) {
7312 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
7313 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
7314 refcnt > 2)
7315 return (B_TRUE);
7316 }
7317
7318 return (B_FALSE);
7319}
7320
7321static sysevent_t *
7322spa_event_create(spa_t *spa, vdev_t *vd, const char *name)
7323{
7324 sysevent_t *ev = NULL;
7325#ifdef _KERNEL
7326 sysevent_attr_list_t *attr = NULL;
7327 sysevent_value_t value;
7328
7329 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
7330 SE_SLEEP);
7331 ASSERT(ev != NULL);
7332
7333 value.value_type = SE_DATA_TYPE_STRING;
7334 value.value.sv_string = spa_name(spa);
7335 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
7336 goto done;
7337
7338 value.value_type = SE_DATA_TYPE_UINT64;
7339 value.value.sv_uint64 = spa_guid(spa);
7340 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
7341 goto done;
7342
7343 if (vd) {
7344 value.value_type = SE_DATA_TYPE_UINT64;
7345 value.value.sv_uint64 = vd->vdev_guid;
7346 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
7347 SE_SLEEP) != 0)
7348 goto done;
7349
7350 if (vd->vdev_path) {
7351 value.value_type = SE_DATA_TYPE_STRING;
7352 value.value.sv_string = vd->vdev_path;
7353 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
7354 &value, SE_SLEEP) != 0)
7355 goto done;
7356 }
7357 }
7358
7359 if (sysevent_attach_attributes(ev, attr) != 0)
7360 goto done;
7361 attr = NULL;
7362
7363done:
7364 if (attr)
7365 sysevent_free_attr(attr);
7366
7367#endif
7368 return (ev);
7369}
7370
7371static void
7372spa_event_post(sysevent_t *ev)
7373{
7374#ifdef _KERNEL
7375 sysevent_id_t eid;
7376
7377 (void) log_sysevent(ev, SE_SLEEP, &eid);
7378 sysevent_free(ev);
7379#endif
7380}
7381
7382/*
7383 * Post a sysevent corresponding to the given event. The 'name' must be one of
7384 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
7385 * filled in from the spa and (optionally) the vdev. This doesn't do anything
7386 * in the userland libzpool, as we don't want consumers to misinterpret ztest
7387 * or zdb as real changes.
7388 */
7389void
7390spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
7391{
7392 spa_event_post(spa_event_create(spa, vd, name));
7393}