Deleted Added
full compact
spa.c (321529) spa.c (321540)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright 2013 Saso Kiselkov. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 */
31
32/*
33 * SPA: Storage Pool Allocator
34 *
35 * This file contains all the routines used when modifying on-disk SPA state.
36 * This includes opening, importing, destroying, exporting a pool, and syncing a
37 * pool.
38 */
39
40#include <sys/zfs_context.h>
41#include <sys/fm/fs/zfs.h>
42#include <sys/spa_impl.h>
43#include <sys/zio.h>
44#include <sys/zio_checksum.h>
45#include <sys/dmu.h>
46#include <sys/dmu_tx.h>
47#include <sys/zap.h>
48#include <sys/zil.h>
49#include <sys/ddt.h>
50#include <sys/vdev_impl.h>
51#include <sys/metaslab.h>
52#include <sys/metaslab_impl.h>
53#include <sys/uberblock_impl.h>
54#include <sys/txg.h>
55#include <sys/avl.h>
56#include <sys/dmu_traverse.h>
57#include <sys/dmu_objset.h>
58#include <sys/unique.h>
59#include <sys/dsl_pool.h>
60#include <sys/dsl_dataset.h>
61#include <sys/dsl_dir.h>
62#include <sys/dsl_prop.h>
63#include <sys/dsl_synctask.h>
64#include <sys/fs/zfs.h>
65#include <sys/arc.h>
66#include <sys/callb.h>
67#include <sys/spa_boot.h>
68#include <sys/zfs_ioctl.h>
69#include <sys/dsl_scan.h>
70#include <sys/dmu_send.h>
71#include <sys/dsl_destroy.h>
72#include <sys/dsl_userhold.h>
73#include <sys/zfeature.h>
74#include <sys/zvol.h>
75#include <sys/trim_map.h>
76
77#ifdef _KERNEL
78#include <sys/callb.h>
79#include <sys/cpupart.h>
80#include <sys/zone.h>
81#endif /* _KERNEL */
82
83#include "zfs_prop.h"
84#include "zfs_comutil.h"
85
86/* Check hostid on import? */
87static int check_hostid = 1;
88
89/*
90 * The interval, in seconds, at which failed configuration cache file writes
91 * should be retried.
92 */
93static int zfs_ccw_retry_interval = 300;
94
95SYSCTL_DECL(_vfs_zfs);
96SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0,
97 "Check hostid on import?");
98TUNABLE_INT("vfs.zfs.ccw_retry_interval", &zfs_ccw_retry_interval);
99SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RW,
100 &zfs_ccw_retry_interval, 0,
101 "Configuration cache file write, retry after failure, interval (seconds)");
102
103typedef enum zti_modes {
104 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
105 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
106 ZTI_MODE_NULL, /* don't create a taskq */
107 ZTI_NMODES
108} zti_modes_t;
109
110#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
111#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
112#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
113
114#define ZTI_N(n) ZTI_P(n, 1)
115#define ZTI_ONE ZTI_N(1)
116
117typedef struct zio_taskq_info {
118 zti_modes_t zti_mode;
119 uint_t zti_value;
120 uint_t zti_count;
121} zio_taskq_info_t;
122
123static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
124 "issue", "issue_high", "intr", "intr_high"
125};
126
127/*
128 * This table defines the taskq settings for each ZFS I/O type. When
129 * initializing a pool, we use this table to create an appropriately sized
130 * taskq. Some operations are low volume and therefore have a small, static
131 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
132 * macros. Other operations process a large amount of data; the ZTI_BATCH
133 * macro causes us to create a taskq oriented for throughput. Some operations
134 * are so high frequency and short-lived that the taskq itself can become a a
135 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
136 * additional degree of parallelism specified by the number of threads per-
137 * taskq and the number of taskqs; when dispatching an event in this case, the
138 * particular taskq is chosen at random.
139 *
140 * The different taskq priorities are to handle the different contexts (issue
141 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
142 * need to be handled with minimum delay.
143 */
144const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
145 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
146 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
147 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
148 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */
149 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
150 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
151 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
152};
153
154static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, const char *name);
155static void spa_event_post(sysevent_t *ev);
156static void spa_sync_version(void *arg, dmu_tx_t *tx);
157static void spa_sync_props(void *arg, dmu_tx_t *tx);
158static boolean_t spa_has_active_shared_spare(spa_t *spa);
159static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
160 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
161 char **ereport);
162static void spa_vdev_resilver_done(spa_t *spa);
163
164uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
165#ifdef PSRSET_BIND
166id_t zio_taskq_psrset_bind = PS_NONE;
167#endif
168#ifdef SYSDC
169boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
170uint_t zio_taskq_basedc = 80; /* base duty cycle */
171#endif
172
173boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
174extern int zfs_sync_pass_deferred_free;
175
176/*
177 * This (illegal) pool name is used when temporarily importing a spa_t in order
178 * to get the vdev stats associated with the imported devices.
179 */
180#define TRYIMPORT_NAME "$import"
181
182/*
183 * ==========================================================================
184 * SPA properties routines
185 * ==========================================================================
186 */
187
188/*
189 * Add a (source=src, propname=propval) list to an nvlist.
190 */
191static void
192spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
193 uint64_t intval, zprop_source_t src)
194{
195 const char *propname = zpool_prop_to_name(prop);
196 nvlist_t *propval;
197
198 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
199 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
200
201 if (strval != NULL)
202 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
203 else
204 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
205
206 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
207 nvlist_free(propval);
208}
209
210/*
211 * Get property values from the spa configuration.
212 */
213static void
214spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
215{
216 vdev_t *rvd = spa->spa_root_vdev;
217 dsl_pool_t *pool = spa->spa_dsl_pool;
218 uint64_t size, alloc, cap, version;
219 zprop_source_t src = ZPROP_SRC_NONE;
220 spa_config_dirent_t *dp;
221 metaslab_class_t *mc = spa_normal_class(spa);
222
223 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
224
225 if (rvd != NULL) {
226 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
227 size = metaslab_class_get_space(spa_normal_class(spa));
228 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
229 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
230 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
231 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
232 size - alloc, src);
233
234 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
235 metaslab_class_fragmentation(mc), src);
236 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
237 metaslab_class_expandable_space(mc), src);
238 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
239 (spa_mode(spa) == FREAD), src);
240
241 cap = (size == 0) ? 0 : (alloc * 100 / size);
242 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
243
244 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
245 ddt_get_pool_dedup_ratio(spa), src);
246
247 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
248 rvd->vdev_state, src);
249
250 version = spa_version(spa);
251 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
252 src = ZPROP_SRC_DEFAULT;
253 else
254 src = ZPROP_SRC_LOCAL;
255 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
256 }
257
258 if (pool != NULL) {
259 /*
260 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
261 * when opening pools before this version freedir will be NULL.
262 */
263 if (pool->dp_free_dir != NULL) {
264 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
265 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
266 src);
267 } else {
268 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
269 NULL, 0, src);
270 }
271
272 if (pool->dp_leak_dir != NULL) {
273 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
274 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
275 src);
276 } else {
277 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
278 NULL, 0, src);
279 }
280 }
281
282 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
283
284 if (spa->spa_comment != NULL) {
285 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
286 0, ZPROP_SRC_LOCAL);
287 }
288
289 if (spa->spa_root != NULL)
290 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
291 0, ZPROP_SRC_LOCAL);
292
293 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
294 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
295 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
296 } else {
297 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
298 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
299 }
300
301 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
302 if (dp->scd_path == NULL) {
303 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
304 "none", 0, ZPROP_SRC_LOCAL);
305 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
306 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
307 dp->scd_path, 0, ZPROP_SRC_LOCAL);
308 }
309 }
310}
311
312/*
313 * Get zpool property values.
314 */
315int
316spa_prop_get(spa_t *spa, nvlist_t **nvp)
317{
318 objset_t *mos = spa->spa_meta_objset;
319 zap_cursor_t zc;
320 zap_attribute_t za;
321 int err;
322
323 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
324
325 mutex_enter(&spa->spa_props_lock);
326
327 /*
328 * Get properties from the spa config.
329 */
330 spa_prop_get_config(spa, nvp);
331
332 /* If no pool property object, no more prop to get. */
333 if (mos == NULL || spa->spa_pool_props_object == 0) {
334 mutex_exit(&spa->spa_props_lock);
335 return (0);
336 }
337
338 /*
339 * Get properties from the MOS pool property object.
340 */
341 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
342 (err = zap_cursor_retrieve(&zc, &za)) == 0;
343 zap_cursor_advance(&zc)) {
344 uint64_t intval = 0;
345 char *strval = NULL;
346 zprop_source_t src = ZPROP_SRC_DEFAULT;
347 zpool_prop_t prop;
348
349 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
350 continue;
351
352 switch (za.za_integer_length) {
353 case 8:
354 /* integer property */
355 if (za.za_first_integer !=
356 zpool_prop_default_numeric(prop))
357 src = ZPROP_SRC_LOCAL;
358
359 if (prop == ZPOOL_PROP_BOOTFS) {
360 dsl_pool_t *dp;
361 dsl_dataset_t *ds = NULL;
362
363 dp = spa_get_dsl(spa);
364 dsl_pool_config_enter(dp, FTAG);
365 if (err = dsl_dataset_hold_obj(dp,
366 za.za_first_integer, FTAG, &ds)) {
367 dsl_pool_config_exit(dp, FTAG);
368 break;
369 }
370
371 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
372 KM_SLEEP);
373 dsl_dataset_name(ds, strval);
374 dsl_dataset_rele(ds, FTAG);
375 dsl_pool_config_exit(dp, FTAG);
376 } else {
377 strval = NULL;
378 intval = za.za_first_integer;
379 }
380
381 spa_prop_add_list(*nvp, prop, strval, intval, src);
382
383 if (strval != NULL)
384 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
385
386 break;
387
388 case 1:
389 /* string property */
390 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
391 err = zap_lookup(mos, spa->spa_pool_props_object,
392 za.za_name, 1, za.za_num_integers, strval);
393 if (err) {
394 kmem_free(strval, za.za_num_integers);
395 break;
396 }
397 spa_prop_add_list(*nvp, prop, strval, 0, src);
398 kmem_free(strval, za.za_num_integers);
399 break;
400
401 default:
402 break;
403 }
404 }
405 zap_cursor_fini(&zc);
406 mutex_exit(&spa->spa_props_lock);
407out:
408 if (err && err != ENOENT) {
409 nvlist_free(*nvp);
410 *nvp = NULL;
411 return (err);
412 }
413
414 return (0);
415}
416
417/*
418 * Validate the given pool properties nvlist and modify the list
419 * for the property values to be set.
420 */
421static int
422spa_prop_validate(spa_t *spa, nvlist_t *props)
423{
424 nvpair_t *elem;
425 int error = 0, reset_bootfs = 0;
426 uint64_t objnum = 0;
427 boolean_t has_feature = B_FALSE;
428
429 elem = NULL;
430 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
431 uint64_t intval;
432 char *strval, *slash, *check, *fname;
433 const char *propname = nvpair_name(elem);
434 zpool_prop_t prop = zpool_name_to_prop(propname);
435
436 switch (prop) {
437 case ZPROP_INVAL:
438 if (!zpool_prop_feature(propname)) {
439 error = SET_ERROR(EINVAL);
440 break;
441 }
442
443 /*
444 * Sanitize the input.
445 */
446 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
447 error = SET_ERROR(EINVAL);
448 break;
449 }
450
451 if (nvpair_value_uint64(elem, &intval) != 0) {
452 error = SET_ERROR(EINVAL);
453 break;
454 }
455
456 if (intval != 0) {
457 error = SET_ERROR(EINVAL);
458 break;
459 }
460
461 fname = strchr(propname, '@') + 1;
462 if (zfeature_lookup_name(fname, NULL) != 0) {
463 error = SET_ERROR(EINVAL);
464 break;
465 }
466
467 has_feature = B_TRUE;
468 break;
469
470 case ZPOOL_PROP_VERSION:
471 error = nvpair_value_uint64(elem, &intval);
472 if (!error &&
473 (intval < spa_version(spa) ||
474 intval > SPA_VERSION_BEFORE_FEATURES ||
475 has_feature))
476 error = SET_ERROR(EINVAL);
477 break;
478
479 case ZPOOL_PROP_DELEGATION:
480 case ZPOOL_PROP_AUTOREPLACE:
481 case ZPOOL_PROP_LISTSNAPS:
482 case ZPOOL_PROP_AUTOEXPAND:
483 error = nvpair_value_uint64(elem, &intval);
484 if (!error && intval > 1)
485 error = SET_ERROR(EINVAL);
486 break;
487
488 case ZPOOL_PROP_BOOTFS:
489 /*
490 * If the pool version is less than SPA_VERSION_BOOTFS,
491 * or the pool is still being created (version == 0),
492 * the bootfs property cannot be set.
493 */
494 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
495 error = SET_ERROR(ENOTSUP);
496 break;
497 }
498
499 /*
500 * Make sure the vdev config is bootable
501 */
502 if (!vdev_is_bootable(spa->spa_root_vdev)) {
503 error = SET_ERROR(ENOTSUP);
504 break;
505 }
506
507 reset_bootfs = 1;
508
509 error = nvpair_value_string(elem, &strval);
510
511 if (!error) {
512 objset_t *os;
513 uint64_t propval;
514
515 if (strval == NULL || strval[0] == '\0') {
516 objnum = zpool_prop_default_numeric(
517 ZPOOL_PROP_BOOTFS);
518 break;
519 }
520
521 if (error = dmu_objset_hold(strval, FTAG, &os))
522 break;
523
524 /*
525 * Must be ZPL, and its property settings
526 * must be supported by GRUB (compression
527 * is not gzip, and large blocks are not used).
528 */
529
530 if (dmu_objset_type(os) != DMU_OST_ZFS) {
531 error = SET_ERROR(ENOTSUP);
532 } else if ((error =
533 dsl_prop_get_int_ds(dmu_objset_ds(os),
534 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
535 &propval)) == 0 &&
536 !BOOTFS_COMPRESS_VALID(propval)) {
537 error = SET_ERROR(ENOTSUP);
538 } else if ((error =
539 dsl_prop_get_int_ds(dmu_objset_ds(os),
540 zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
541 &propval)) == 0 &&
542 propval > SPA_OLD_MAXBLOCKSIZE) {
543 error = SET_ERROR(ENOTSUP);
544 } else {
545 objnum = dmu_objset_id(os);
546 }
547 dmu_objset_rele(os, FTAG);
548 }
549 break;
550
551 case ZPOOL_PROP_FAILUREMODE:
552 error = nvpair_value_uint64(elem, &intval);
553 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
554 intval > ZIO_FAILURE_MODE_PANIC))
555 error = SET_ERROR(EINVAL);
556
557 /*
558 * This is a special case which only occurs when
559 * the pool has completely failed. This allows
560 * the user to change the in-core failmode property
561 * without syncing it out to disk (I/Os might
562 * currently be blocked). We do this by returning
563 * EIO to the caller (spa_prop_set) to trick it
564 * into thinking we encountered a property validation
565 * error.
566 */
567 if (!error && spa_suspended(spa)) {
568 spa->spa_failmode = intval;
569 error = SET_ERROR(EIO);
570 }
571 break;
572
573 case ZPOOL_PROP_CACHEFILE:
574 if ((error = nvpair_value_string(elem, &strval)) != 0)
575 break;
576
577 if (strval[0] == '\0')
578 break;
579
580 if (strcmp(strval, "none") == 0)
581 break;
582
583 if (strval[0] != '/') {
584 error = SET_ERROR(EINVAL);
585 break;
586 }
587
588 slash = strrchr(strval, '/');
589 ASSERT(slash != NULL);
590
591 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
592 strcmp(slash, "/..") == 0)
593 error = SET_ERROR(EINVAL);
594 break;
595
596 case ZPOOL_PROP_COMMENT:
597 if ((error = nvpair_value_string(elem, &strval)) != 0)
598 break;
599 for (check = strval; *check != '\0'; check++) {
600 /*
601 * The kernel doesn't have an easy isprint()
602 * check. For this kernel check, we merely
603 * check ASCII apart from DEL. Fix this if
604 * there is an easy-to-use kernel isprint().
605 */
606 if (*check >= 0x7f) {
607 error = SET_ERROR(EINVAL);
608 break;
609 }
610 }
611 if (strlen(strval) > ZPROP_MAX_COMMENT)
612 error = E2BIG;
613 break;
614
615 case ZPOOL_PROP_DEDUPDITTO:
616 if (spa_version(spa) < SPA_VERSION_DEDUP)
617 error = SET_ERROR(ENOTSUP);
618 else
619 error = nvpair_value_uint64(elem, &intval);
620 if (error == 0 &&
621 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
622 error = SET_ERROR(EINVAL);
623 break;
624 }
625
626 if (error)
627 break;
628 }
629
630 if (!error && reset_bootfs) {
631 error = nvlist_remove(props,
632 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
633
634 if (!error) {
635 error = nvlist_add_uint64(props,
636 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
637 }
638 }
639
640 return (error);
641}
642
643void
644spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
645{
646 char *cachefile;
647 spa_config_dirent_t *dp;
648
649 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
650 &cachefile) != 0)
651 return;
652
653 dp = kmem_alloc(sizeof (spa_config_dirent_t),
654 KM_SLEEP);
655
656 if (cachefile[0] == '\0')
657 dp->scd_path = spa_strdup(spa_config_path);
658 else if (strcmp(cachefile, "none") == 0)
659 dp->scd_path = NULL;
660 else
661 dp->scd_path = spa_strdup(cachefile);
662
663 list_insert_head(&spa->spa_config_list, dp);
664 if (need_sync)
665 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
666}
667
668int
669spa_prop_set(spa_t *spa, nvlist_t *nvp)
670{
671 int error;
672 nvpair_t *elem = NULL;
673 boolean_t need_sync = B_FALSE;
674
675 if ((error = spa_prop_validate(spa, nvp)) != 0)
676 return (error);
677
678 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
679 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
680
681 if (prop == ZPOOL_PROP_CACHEFILE ||
682 prop == ZPOOL_PROP_ALTROOT ||
683 prop == ZPOOL_PROP_READONLY)
684 continue;
685
686 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
687 uint64_t ver;
688
689 if (prop == ZPOOL_PROP_VERSION) {
690 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
691 } else {
692 ASSERT(zpool_prop_feature(nvpair_name(elem)));
693 ver = SPA_VERSION_FEATURES;
694 need_sync = B_TRUE;
695 }
696
697 /* Save time if the version is already set. */
698 if (ver == spa_version(spa))
699 continue;
700
701 /*
702 * In addition to the pool directory object, we might
703 * create the pool properties object, the features for
704 * read object, the features for write object, or the
705 * feature descriptions object.
706 */
707 error = dsl_sync_task(spa->spa_name, NULL,
708 spa_sync_version, &ver,
709 6, ZFS_SPACE_CHECK_RESERVED);
710 if (error)
711 return (error);
712 continue;
713 }
714
715 need_sync = B_TRUE;
716 break;
717 }
718
719 if (need_sync) {
720 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
721 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
722 }
723
724 return (0);
725}
726
727/*
728 * If the bootfs property value is dsobj, clear it.
729 */
730void
731spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
732{
733 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
734 VERIFY(zap_remove(spa->spa_meta_objset,
735 spa->spa_pool_props_object,
736 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
737 spa->spa_bootfs = 0;
738 }
739}
740
741/*ARGSUSED*/
742static int
743spa_change_guid_check(void *arg, dmu_tx_t *tx)
744{
745 uint64_t *newguid = arg;
746 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
747 vdev_t *rvd = spa->spa_root_vdev;
748 uint64_t vdev_state;
749
750 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
751 vdev_state = rvd->vdev_state;
752 spa_config_exit(spa, SCL_STATE, FTAG);
753
754 if (vdev_state != VDEV_STATE_HEALTHY)
755 return (SET_ERROR(ENXIO));
756
757 ASSERT3U(spa_guid(spa), !=, *newguid);
758
759 return (0);
760}
761
762static void
763spa_change_guid_sync(void *arg, dmu_tx_t *tx)
764{
765 uint64_t *newguid = arg;
766 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
767 uint64_t oldguid;
768 vdev_t *rvd = spa->spa_root_vdev;
769
770 oldguid = spa_guid(spa);
771
772 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
773 rvd->vdev_guid = *newguid;
774 rvd->vdev_guid_sum += (*newguid - oldguid);
775 vdev_config_dirty(rvd);
776 spa_config_exit(spa, SCL_STATE, FTAG);
777
778 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
779 oldguid, *newguid);
780}
781
782/*
783 * Change the GUID for the pool. This is done so that we can later
784 * re-import a pool built from a clone of our own vdevs. We will modify
785 * the root vdev's guid, our own pool guid, and then mark all of our
786 * vdevs dirty. Note that we must make sure that all our vdevs are
787 * online when we do this, or else any vdevs that weren't present
788 * would be orphaned from our pool. We are also going to issue a
789 * sysevent to update any watchers.
790 */
791int
792spa_change_guid(spa_t *spa)
793{
794 int error;
795 uint64_t guid;
796
797 mutex_enter(&spa->spa_vdev_top_lock);
798 mutex_enter(&spa_namespace_lock);
799 guid = spa_generate_guid(NULL);
800
801 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
802 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
803
804 if (error == 0) {
805 spa_config_sync(spa, B_FALSE, B_TRUE);
806 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
807 }
808
809 mutex_exit(&spa_namespace_lock);
810 mutex_exit(&spa->spa_vdev_top_lock);
811
812 return (error);
813}
814
815/*
816 * ==========================================================================
817 * SPA state manipulation (open/create/destroy/import/export)
818 * ==========================================================================
819 */
820
821static int
822spa_error_entry_compare(const void *a, const void *b)
823{
824 spa_error_entry_t *sa = (spa_error_entry_t *)a;
825 spa_error_entry_t *sb = (spa_error_entry_t *)b;
826 int ret;
827
828 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
829 sizeof (zbookmark_phys_t));
830
831 if (ret < 0)
832 return (-1);
833 else if (ret > 0)
834 return (1);
835 else
836 return (0);
837}
838
839/*
840 * Utility function which retrieves copies of the current logs and
841 * re-initializes them in the process.
842 */
843void
844spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
845{
846 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
847
848 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
849 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
850
851 avl_create(&spa->spa_errlist_scrub,
852 spa_error_entry_compare, sizeof (spa_error_entry_t),
853 offsetof(spa_error_entry_t, se_avl));
854 avl_create(&spa->spa_errlist_last,
855 spa_error_entry_compare, sizeof (spa_error_entry_t),
856 offsetof(spa_error_entry_t, se_avl));
857}
858
859static void
860spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
861{
862 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
863 enum zti_modes mode = ztip->zti_mode;
864 uint_t value = ztip->zti_value;
865 uint_t count = ztip->zti_count;
866 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
867 char name[32];
868 uint_t flags = 0;
869 boolean_t batch = B_FALSE;
870
871 if (mode == ZTI_MODE_NULL) {
872 tqs->stqs_count = 0;
873 tqs->stqs_taskq = NULL;
874 return;
875 }
876
877 ASSERT3U(count, >, 0);
878
879 tqs->stqs_count = count;
880 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
881
882 switch (mode) {
883 case ZTI_MODE_FIXED:
884 ASSERT3U(value, >=, 1);
885 value = MAX(value, 1);
886 break;
887
888 case ZTI_MODE_BATCH:
889 batch = B_TRUE;
890 flags |= TASKQ_THREADS_CPU_PCT;
891 value = zio_taskq_batch_pct;
892 break;
893
894 default:
895 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
896 "spa_activate()",
897 zio_type_name[t], zio_taskq_types[q], mode, value);
898 break;
899 }
900
901 for (uint_t i = 0; i < count; i++) {
902 taskq_t *tq;
903
904 if (count > 1) {
905 (void) snprintf(name, sizeof (name), "%s_%s_%u",
906 zio_type_name[t], zio_taskq_types[q], i);
907 } else {
908 (void) snprintf(name, sizeof (name), "%s_%s",
909 zio_type_name[t], zio_taskq_types[q]);
910 }
911
912#ifdef SYSDC
913 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
914 if (batch)
915 flags |= TASKQ_DC_BATCH;
916
917 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
918 spa->spa_proc, zio_taskq_basedc, flags);
919 } else {
920#endif
921 pri_t pri = maxclsyspri;
922 /*
923 * The write issue taskq can be extremely CPU
924 * intensive. Run it at slightly lower priority
925 * than the other taskqs.
926 * FreeBSD notes:
927 * - numerically higher priorities are lower priorities;
928 * - if priorities divided by four (RQ_PPQ) are equal
929 * then a difference between them is insignificant.
930 */
931 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
932#ifdef illumos
933 pri--;
934#else
935 pri += 4;
936#endif
937
938 tq = taskq_create_proc(name, value, pri, 50,
939 INT_MAX, spa->spa_proc, flags);
940#ifdef SYSDC
941 }
942#endif
943
944 tqs->stqs_taskq[i] = tq;
945 }
946}
947
948static void
949spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
950{
951 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
952
953 if (tqs->stqs_taskq == NULL) {
954 ASSERT0(tqs->stqs_count);
955 return;
956 }
957
958 for (uint_t i = 0; i < tqs->stqs_count; i++) {
959 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
960 taskq_destroy(tqs->stqs_taskq[i]);
961 }
962
963 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
964 tqs->stqs_taskq = NULL;
965}
966
967/*
968 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
969 * Note that a type may have multiple discrete taskqs to avoid lock contention
970 * on the taskq itself. In that case we choose which taskq at random by using
971 * the low bits of gethrtime().
972 */
973void
974spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
975 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
976{
977 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
978 taskq_t *tq;
979
980 ASSERT3P(tqs->stqs_taskq, !=, NULL);
981 ASSERT3U(tqs->stqs_count, !=, 0);
982
983 if (tqs->stqs_count == 1) {
984 tq = tqs->stqs_taskq[0];
985 } else {
986#ifdef _KERNEL
987 tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count];
988#else
989 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
990#endif
991 }
992
993 taskq_dispatch_ent(tq, func, arg, flags, ent);
994}
995
996static void
997spa_create_zio_taskqs(spa_t *spa)
998{
999 for (int t = 0; t < ZIO_TYPES; t++) {
1000 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1001 spa_taskqs_init(spa, t, q);
1002 }
1003 }
1004}
1005
1006#ifdef _KERNEL
1007#ifdef SPA_PROCESS
1008static void
1009spa_thread(void *arg)
1010{
1011 callb_cpr_t cprinfo;
1012
1013 spa_t *spa = arg;
1014 user_t *pu = PTOU(curproc);
1015
1016 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1017 spa->spa_name);
1018
1019 ASSERT(curproc != &p0);
1020 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1021 "zpool-%s", spa->spa_name);
1022 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1023
1024#ifdef PSRSET_BIND
1025 /* bind this thread to the requested psrset */
1026 if (zio_taskq_psrset_bind != PS_NONE) {
1027 pool_lock();
1028 mutex_enter(&cpu_lock);
1029 mutex_enter(&pidlock);
1030 mutex_enter(&curproc->p_lock);
1031
1032 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1033 0, NULL, NULL) == 0) {
1034 curthread->t_bind_pset = zio_taskq_psrset_bind;
1035 } else {
1036 cmn_err(CE_WARN,
1037 "Couldn't bind process for zfs pool \"%s\" to "
1038 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1039 }
1040
1041 mutex_exit(&curproc->p_lock);
1042 mutex_exit(&pidlock);
1043 mutex_exit(&cpu_lock);
1044 pool_unlock();
1045 }
1046#endif
1047
1048#ifdef SYSDC
1049 if (zio_taskq_sysdc) {
1050 sysdc_thread_enter(curthread, 100, 0);
1051 }
1052#endif
1053
1054 spa->spa_proc = curproc;
1055 spa->spa_did = curthread->t_did;
1056
1057 spa_create_zio_taskqs(spa);
1058
1059 mutex_enter(&spa->spa_proc_lock);
1060 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1061
1062 spa->spa_proc_state = SPA_PROC_ACTIVE;
1063 cv_broadcast(&spa->spa_proc_cv);
1064
1065 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1066 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1067 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1068 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1069
1070 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1071 spa->spa_proc_state = SPA_PROC_GONE;
1072 spa->spa_proc = &p0;
1073 cv_broadcast(&spa->spa_proc_cv);
1074 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1075
1076 mutex_enter(&curproc->p_lock);
1077 lwp_exit();
1078}
1079#endif /* SPA_PROCESS */
1080#endif
1081
1082/*
1083 * Activate an uninitialized pool.
1084 */
1085static void
1086spa_activate(spa_t *spa, int mode)
1087{
1088 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1089
1090 spa->spa_state = POOL_STATE_ACTIVE;
1091 spa->spa_mode = mode;
1092
1093 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1094 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1095
1096 /* Try to create a covering process */
1097 mutex_enter(&spa->spa_proc_lock);
1098 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1099 ASSERT(spa->spa_proc == &p0);
1100 spa->spa_did = 0;
1101
1102#ifdef SPA_PROCESS
1103 /* Only create a process if we're going to be around a while. */
1104 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1105 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1106 NULL, 0) == 0) {
1107 spa->spa_proc_state = SPA_PROC_CREATED;
1108 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1109 cv_wait(&spa->spa_proc_cv,
1110 &spa->spa_proc_lock);
1111 }
1112 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1113 ASSERT(spa->spa_proc != &p0);
1114 ASSERT(spa->spa_did != 0);
1115 } else {
1116#ifdef _KERNEL
1117 cmn_err(CE_WARN,
1118 "Couldn't create process for zfs pool \"%s\"\n",
1119 spa->spa_name);
1120#endif
1121 }
1122 }
1123#endif /* SPA_PROCESS */
1124 mutex_exit(&spa->spa_proc_lock);
1125
1126 /* If we didn't create a process, we need to create our taskqs. */
1127 ASSERT(spa->spa_proc == &p0);
1128 if (spa->spa_proc == &p0) {
1129 spa_create_zio_taskqs(spa);
1130 }
1131
1132 /*
1133 * Start TRIM thread.
1134 */
1135 trim_thread_create(spa);
1136
1137 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1138 offsetof(vdev_t, vdev_config_dirty_node));
1139 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1140 offsetof(objset_t, os_evicting_node));
1141 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1142 offsetof(vdev_t, vdev_state_dirty_node));
1143
1144 txg_list_create(&spa->spa_vdev_txg_list,
1145 offsetof(struct vdev, vdev_txg_node));
1146
1147 avl_create(&spa->spa_errlist_scrub,
1148 spa_error_entry_compare, sizeof (spa_error_entry_t),
1149 offsetof(spa_error_entry_t, se_avl));
1150 avl_create(&spa->spa_errlist_last,
1151 spa_error_entry_compare, sizeof (spa_error_entry_t),
1152 offsetof(spa_error_entry_t, se_avl));
1153}
1154
1155/*
1156 * Opposite of spa_activate().
1157 */
1158static void
1159spa_deactivate(spa_t *spa)
1160{
1161 ASSERT(spa->spa_sync_on == B_FALSE);
1162 ASSERT(spa->spa_dsl_pool == NULL);
1163 ASSERT(spa->spa_root_vdev == NULL);
1164 ASSERT(spa->spa_async_zio_root == NULL);
1165 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1166
1167 /*
1168 * Stop TRIM thread in case spa_unload() wasn't called directly
1169 * before spa_deactivate().
1170 */
1171 trim_thread_destroy(spa);
1172
1173 spa_evicting_os_wait(spa);
1174
1175 txg_list_destroy(&spa->spa_vdev_txg_list);
1176
1177 list_destroy(&spa->spa_config_dirty_list);
1178 list_destroy(&spa->spa_evicting_os_list);
1179 list_destroy(&spa->spa_state_dirty_list);
1180
1181 for (int t = 0; t < ZIO_TYPES; t++) {
1182 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1183 spa_taskqs_fini(spa, t, q);
1184 }
1185 }
1186
1187 metaslab_class_destroy(spa->spa_normal_class);
1188 spa->spa_normal_class = NULL;
1189
1190 metaslab_class_destroy(spa->spa_log_class);
1191 spa->spa_log_class = NULL;
1192
1193 /*
1194 * If this was part of an import or the open otherwise failed, we may
1195 * still have errors left in the queues. Empty them just in case.
1196 */
1197 spa_errlog_drain(spa);
1198
1199 avl_destroy(&spa->spa_errlist_scrub);
1200 avl_destroy(&spa->spa_errlist_last);
1201
1202 spa->spa_state = POOL_STATE_UNINITIALIZED;
1203
1204 mutex_enter(&spa->spa_proc_lock);
1205 if (spa->spa_proc_state != SPA_PROC_NONE) {
1206 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1207 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1208 cv_broadcast(&spa->spa_proc_cv);
1209 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1210 ASSERT(spa->spa_proc != &p0);
1211 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1212 }
1213 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1214 spa->spa_proc_state = SPA_PROC_NONE;
1215 }
1216 ASSERT(spa->spa_proc == &p0);
1217 mutex_exit(&spa->spa_proc_lock);
1218
1219#ifdef SPA_PROCESS
1220 /*
1221 * We want to make sure spa_thread() has actually exited the ZFS
1222 * module, so that the module can't be unloaded out from underneath
1223 * it.
1224 */
1225 if (spa->spa_did != 0) {
1226 thread_join(spa->spa_did);
1227 spa->spa_did = 0;
1228 }
1229#endif /* SPA_PROCESS */
1230}
1231
1232/*
1233 * Verify a pool configuration, and construct the vdev tree appropriately. This
1234 * will create all the necessary vdevs in the appropriate layout, with each vdev
1235 * in the CLOSED state. This will prep the pool before open/creation/import.
1236 * All vdev validation is done by the vdev_alloc() routine.
1237 */
1238static int
1239spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1240 uint_t id, int atype)
1241{
1242 nvlist_t **child;
1243 uint_t children;
1244 int error;
1245
1246 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1247 return (error);
1248
1249 if ((*vdp)->vdev_ops->vdev_op_leaf)
1250 return (0);
1251
1252 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1253 &child, &children);
1254
1255 if (error == ENOENT)
1256 return (0);
1257
1258 if (error) {
1259 vdev_free(*vdp);
1260 *vdp = NULL;
1261 return (SET_ERROR(EINVAL));
1262 }
1263
1264 for (int c = 0; c < children; c++) {
1265 vdev_t *vd;
1266 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1267 atype)) != 0) {
1268 vdev_free(*vdp);
1269 *vdp = NULL;
1270 return (error);
1271 }
1272 }
1273
1274 ASSERT(*vdp != NULL);
1275
1276 return (0);
1277}
1278
1279/*
1280 * Opposite of spa_load().
1281 */
1282static void
1283spa_unload(spa_t *spa)
1284{
1285 int i;
1286
1287 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1288
1289 /*
1290 * Stop TRIM thread.
1291 */
1292 trim_thread_destroy(spa);
1293
1294 /*
1295 * Stop async tasks.
1296 */
1297 spa_async_suspend(spa);
1298
1299 /*
1300 * Stop syncing.
1301 */
1302 if (spa->spa_sync_on) {
1303 txg_sync_stop(spa->spa_dsl_pool);
1304 spa->spa_sync_on = B_FALSE;
1305 }
1306
1307 /*
1308 * Even though vdev_free() also calls vdev_metaslab_fini, we need
1309 * to call it earlier, before we wait for async i/o to complete.
1310 * This ensures that there is no async metaslab prefetching, by
1311 * calling taskq_wait(mg_taskq).
1312 */
1313 if (spa->spa_root_vdev != NULL) {
1314 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1315 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
1316 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
1317 spa_config_exit(spa, SCL_ALL, FTAG);
1318 }
1319
1320 /*
1321 * Wait for any outstanding async I/O to complete.
1322 */
1323 if (spa->spa_async_zio_root != NULL) {
1324 for (int i = 0; i < max_ncpus; i++)
1325 (void) zio_wait(spa->spa_async_zio_root[i]);
1326 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1327 spa->spa_async_zio_root = NULL;
1328 }
1329
1330 bpobj_close(&spa->spa_deferred_bpobj);
1331
1332 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1333
1334 /*
1335 * Close all vdevs.
1336 */
1337 if (spa->spa_root_vdev)
1338 vdev_free(spa->spa_root_vdev);
1339 ASSERT(spa->spa_root_vdev == NULL);
1340
1341 /*
1342 * Close the dsl pool.
1343 */
1344 if (spa->spa_dsl_pool) {
1345 dsl_pool_close(spa->spa_dsl_pool);
1346 spa->spa_dsl_pool = NULL;
1347 spa->spa_meta_objset = NULL;
1348 }
1349
1350 ddt_unload(spa);
1351
1352 /*
1353 * Drop and purge level 2 cache
1354 */
1355 spa_l2cache_drop(spa);
1356
1357 for (i = 0; i < spa->spa_spares.sav_count; i++)
1358 vdev_free(spa->spa_spares.sav_vdevs[i]);
1359 if (spa->spa_spares.sav_vdevs) {
1360 kmem_free(spa->spa_spares.sav_vdevs,
1361 spa->spa_spares.sav_count * sizeof (void *));
1362 spa->spa_spares.sav_vdevs = NULL;
1363 }
1364 if (spa->spa_spares.sav_config) {
1365 nvlist_free(spa->spa_spares.sav_config);
1366 spa->spa_spares.sav_config = NULL;
1367 }
1368 spa->spa_spares.sav_count = 0;
1369
1370 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1371 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1372 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1373 }
1374 if (spa->spa_l2cache.sav_vdevs) {
1375 kmem_free(spa->spa_l2cache.sav_vdevs,
1376 spa->spa_l2cache.sav_count * sizeof (void *));
1377 spa->spa_l2cache.sav_vdevs = NULL;
1378 }
1379 if (spa->spa_l2cache.sav_config) {
1380 nvlist_free(spa->spa_l2cache.sav_config);
1381 spa->spa_l2cache.sav_config = NULL;
1382 }
1383 spa->spa_l2cache.sav_count = 0;
1384
1385 spa->spa_async_suspended = 0;
1386
1387 if (spa->spa_comment != NULL) {
1388 spa_strfree(spa->spa_comment);
1389 spa->spa_comment = NULL;
1390 }
1391
1392 spa_config_exit(spa, SCL_ALL, FTAG);
1393}
1394
1395/*
1396 * Load (or re-load) the current list of vdevs describing the active spares for
1397 * this pool. When this is called, we have some form of basic information in
1398 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1399 * then re-generate a more complete list including status information.
1400 */
1401static void
1402spa_load_spares(spa_t *spa)
1403{
1404 nvlist_t **spares;
1405 uint_t nspares;
1406 int i;
1407 vdev_t *vd, *tvd;
1408
1409 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1410
1411 /*
1412 * First, close and free any existing spare vdevs.
1413 */
1414 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1415 vd = spa->spa_spares.sav_vdevs[i];
1416
1417 /* Undo the call to spa_activate() below */
1418 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1419 B_FALSE)) != NULL && tvd->vdev_isspare)
1420 spa_spare_remove(tvd);
1421 vdev_close(vd);
1422 vdev_free(vd);
1423 }
1424
1425 if (spa->spa_spares.sav_vdevs)
1426 kmem_free(spa->spa_spares.sav_vdevs,
1427 spa->spa_spares.sav_count * sizeof (void *));
1428
1429 if (spa->spa_spares.sav_config == NULL)
1430 nspares = 0;
1431 else
1432 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1433 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1434
1435 spa->spa_spares.sav_count = (int)nspares;
1436 spa->spa_spares.sav_vdevs = NULL;
1437
1438 if (nspares == 0)
1439 return;
1440
1441 /*
1442 * Construct the array of vdevs, opening them to get status in the
1443 * process. For each spare, there is potentially two different vdev_t
1444 * structures associated with it: one in the list of spares (used only
1445 * for basic validation purposes) and one in the active vdev
1446 * configuration (if it's spared in). During this phase we open and
1447 * validate each vdev on the spare list. If the vdev also exists in the
1448 * active configuration, then we also mark this vdev as an active spare.
1449 */
1450 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1451 KM_SLEEP);
1452 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1453 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1454 VDEV_ALLOC_SPARE) == 0);
1455 ASSERT(vd != NULL);
1456
1457 spa->spa_spares.sav_vdevs[i] = vd;
1458
1459 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1460 B_FALSE)) != NULL) {
1461 if (!tvd->vdev_isspare)
1462 spa_spare_add(tvd);
1463
1464 /*
1465 * We only mark the spare active if we were successfully
1466 * able to load the vdev. Otherwise, importing a pool
1467 * with a bad active spare would result in strange
1468 * behavior, because multiple pool would think the spare
1469 * is actively in use.
1470 *
1471 * There is a vulnerability here to an equally bizarre
1472 * circumstance, where a dead active spare is later
1473 * brought back to life (onlined or otherwise). Given
1474 * the rarity of this scenario, and the extra complexity
1475 * it adds, we ignore the possibility.
1476 */
1477 if (!vdev_is_dead(tvd))
1478 spa_spare_activate(tvd);
1479 }
1480
1481 vd->vdev_top = vd;
1482 vd->vdev_aux = &spa->spa_spares;
1483
1484 if (vdev_open(vd) != 0)
1485 continue;
1486
1487 if (vdev_validate_aux(vd) == 0)
1488 spa_spare_add(vd);
1489 }
1490
1491 /*
1492 * Recompute the stashed list of spares, with status information
1493 * this time.
1494 */
1495 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1496 DATA_TYPE_NVLIST_ARRAY) == 0);
1497
1498 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1499 KM_SLEEP);
1500 for (i = 0; i < spa->spa_spares.sav_count; i++)
1501 spares[i] = vdev_config_generate(spa,
1502 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1503 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1504 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1505 for (i = 0; i < spa->spa_spares.sav_count; i++)
1506 nvlist_free(spares[i]);
1507 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1508}
1509
1510/*
1511 * Load (or re-load) the current list of vdevs describing the active l2cache for
1512 * this pool. When this is called, we have some form of basic information in
1513 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1514 * then re-generate a more complete list including status information.
1515 * Devices which are already active have their details maintained, and are
1516 * not re-opened.
1517 */
1518static void
1519spa_load_l2cache(spa_t *spa)
1520{
1521 nvlist_t **l2cache;
1522 uint_t nl2cache;
1523 int i, j, oldnvdevs;
1524 uint64_t guid;
1525 vdev_t *vd, **oldvdevs, **newvdevs;
1526 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1527
1528 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1529
1530 if (sav->sav_config != NULL) {
1531 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1532 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1533 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1534 } else {
1535 nl2cache = 0;
1536 newvdevs = NULL;
1537 }
1538
1539 oldvdevs = sav->sav_vdevs;
1540 oldnvdevs = sav->sav_count;
1541 sav->sav_vdevs = NULL;
1542 sav->sav_count = 0;
1543
1544 /*
1545 * Process new nvlist of vdevs.
1546 */
1547 for (i = 0; i < nl2cache; i++) {
1548 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1549 &guid) == 0);
1550
1551 newvdevs[i] = NULL;
1552 for (j = 0; j < oldnvdevs; j++) {
1553 vd = oldvdevs[j];
1554 if (vd != NULL && guid == vd->vdev_guid) {
1555 /*
1556 * Retain previous vdev for add/remove ops.
1557 */
1558 newvdevs[i] = vd;
1559 oldvdevs[j] = NULL;
1560 break;
1561 }
1562 }
1563
1564 if (newvdevs[i] == NULL) {
1565 /*
1566 * Create new vdev
1567 */
1568 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1569 VDEV_ALLOC_L2CACHE) == 0);
1570 ASSERT(vd != NULL);
1571 newvdevs[i] = vd;
1572
1573 /*
1574 * Commit this vdev as an l2cache device,
1575 * even if it fails to open.
1576 */
1577 spa_l2cache_add(vd);
1578
1579 vd->vdev_top = vd;
1580 vd->vdev_aux = sav;
1581
1582 spa_l2cache_activate(vd);
1583
1584 if (vdev_open(vd) != 0)
1585 continue;
1586
1587 (void) vdev_validate_aux(vd);
1588
1589 if (!vdev_is_dead(vd))
1590 l2arc_add_vdev(spa, vd);
1591 }
1592 }
1593
1594 /*
1595 * Purge vdevs that were dropped
1596 */
1597 for (i = 0; i < oldnvdevs; i++) {
1598 uint64_t pool;
1599
1600 vd = oldvdevs[i];
1601 if (vd != NULL) {
1602 ASSERT(vd->vdev_isl2cache);
1603
1604 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1605 pool != 0ULL && l2arc_vdev_present(vd))
1606 l2arc_remove_vdev(vd);
1607 vdev_clear_stats(vd);
1608 vdev_free(vd);
1609 }
1610 }
1611
1612 if (oldvdevs)
1613 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1614
1615 if (sav->sav_config == NULL)
1616 goto out;
1617
1618 sav->sav_vdevs = newvdevs;
1619 sav->sav_count = (int)nl2cache;
1620
1621 /*
1622 * Recompute the stashed list of l2cache devices, with status
1623 * information this time.
1624 */
1625 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1626 DATA_TYPE_NVLIST_ARRAY) == 0);
1627
1628 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1629 for (i = 0; i < sav->sav_count; i++)
1630 l2cache[i] = vdev_config_generate(spa,
1631 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1632 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1633 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1634out:
1635 for (i = 0; i < sav->sav_count; i++)
1636 nvlist_free(l2cache[i]);
1637 if (sav->sav_count)
1638 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1639}
1640
1641static int
1642load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1643{
1644 dmu_buf_t *db;
1645 char *packed = NULL;
1646 size_t nvsize = 0;
1647 int error;
1648 *value = NULL;
1649
1650 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1651 if (error != 0)
1652 return (error);
1653
1654 nvsize = *(uint64_t *)db->db_data;
1655 dmu_buf_rele(db, FTAG);
1656
1657 packed = kmem_alloc(nvsize, KM_SLEEP);
1658 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1659 DMU_READ_PREFETCH);
1660 if (error == 0)
1661 error = nvlist_unpack(packed, nvsize, value, 0);
1662 kmem_free(packed, nvsize);
1663
1664 return (error);
1665}
1666
1667/*
1668 * Checks to see if the given vdev could not be opened, in which case we post a
1669 * sysevent to notify the autoreplace code that the device has been removed.
1670 */
1671static void
1672spa_check_removed(vdev_t *vd)
1673{
1674 for (int c = 0; c < vd->vdev_children; c++)
1675 spa_check_removed(vd->vdev_child[c]);
1676
1677 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1678 !vd->vdev_ishole) {
1679 zfs_post_autoreplace(vd->vdev_spa, vd);
1680 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1681 }
1682}
1683
1684static void
1685spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
1686{
1687 ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
1688
1689 vd->vdev_top_zap = mvd->vdev_top_zap;
1690 vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
1691
1692 for (uint64_t i = 0; i < vd->vdev_children; i++) {
1693 spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
1694 }
1695}
1696
1697/*
1698 * Validate the current config against the MOS config
1699 */
1700static boolean_t
1701spa_config_valid(spa_t *spa, nvlist_t *config)
1702{
1703 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1704 nvlist_t *nv;
1705
1706 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1707
1708 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1709 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1710
1711 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1712
1713 /*
1714 * If we're doing a normal import, then build up any additional
1715 * diagnostic information about missing devices in this config.
1716 * We'll pass this up to the user for further processing.
1717 */
1718 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1719 nvlist_t **child, *nv;
1720 uint64_t idx = 0;
1721
1722 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1723 KM_SLEEP);
1724 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1725
1726 for (int c = 0; c < rvd->vdev_children; c++) {
1727 vdev_t *tvd = rvd->vdev_child[c];
1728 vdev_t *mtvd = mrvd->vdev_child[c];
1729
1730 if (tvd->vdev_ops == &vdev_missing_ops &&
1731 mtvd->vdev_ops != &vdev_missing_ops &&
1732 mtvd->vdev_islog)
1733 child[idx++] = vdev_config_generate(spa, mtvd,
1734 B_FALSE, 0);
1735 }
1736
1737 if (idx) {
1738 VERIFY(nvlist_add_nvlist_array(nv,
1739 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1740 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1741 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1742
1743 for (int i = 0; i < idx; i++)
1744 nvlist_free(child[i]);
1745 }
1746 nvlist_free(nv);
1747 kmem_free(child, rvd->vdev_children * sizeof (char **));
1748 }
1749
1750 /*
1751 * Compare the root vdev tree with the information we have
1752 * from the MOS config (mrvd). Check each top-level vdev
1753 * with the corresponding MOS config top-level (mtvd).
1754 */
1755 for (int c = 0; c < rvd->vdev_children; c++) {
1756 vdev_t *tvd = rvd->vdev_child[c];
1757 vdev_t *mtvd = mrvd->vdev_child[c];
1758
1759 /*
1760 * Resolve any "missing" vdevs in the current configuration.
1761 * If we find that the MOS config has more accurate information
1762 * about the top-level vdev then use that vdev instead.
1763 */
1764 if (tvd->vdev_ops == &vdev_missing_ops &&
1765 mtvd->vdev_ops != &vdev_missing_ops) {
1766
1767 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1768 continue;
1769
1770 /*
1771 * Device specific actions.
1772 */
1773 if (mtvd->vdev_islog) {
1774 spa_set_log_state(spa, SPA_LOG_CLEAR);
1775 } else {
1776 /*
1777 * XXX - once we have 'readonly' pool
1778 * support we should be able to handle
1779 * missing data devices by transitioning
1780 * the pool to readonly.
1781 */
1782 continue;
1783 }
1784
1785 /*
1786 * Swap the missing vdev with the data we were
1787 * able to obtain from the MOS config.
1788 */
1789 vdev_remove_child(rvd, tvd);
1790 vdev_remove_child(mrvd, mtvd);
1791
1792 vdev_add_child(rvd, mtvd);
1793 vdev_add_child(mrvd, tvd);
1794
1795 spa_config_exit(spa, SCL_ALL, FTAG);
1796 vdev_load(mtvd);
1797 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1798
1799 vdev_reopen(rvd);
1800 } else {
1801 if (mtvd->vdev_islog) {
1802 /*
1803 * Load the slog device's state from the MOS
1804 * config since it's possible that the label
1805 * does not contain the most up-to-date
1806 * information.
1807 */
1808 vdev_load_log_state(tvd, mtvd);
1809 vdev_reopen(tvd);
1810 }
1811
1812 /*
1813 * Per-vdev ZAP info is stored exclusively in the MOS.
1814 */
1815 spa_config_valid_zaps(tvd, mtvd);
1816 }
1817 }
1818
1819 vdev_free(mrvd);
1820 spa_config_exit(spa, SCL_ALL, FTAG);
1821
1822 /*
1823 * Ensure we were able to validate the config.
1824 */
1825 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1826}
1827
1828/*
1829 * Check for missing log devices
1830 */
1831static boolean_t
1832spa_check_logs(spa_t *spa)
1833{
1834 boolean_t rv = B_FALSE;
1835 dsl_pool_t *dp = spa_get_dsl(spa);
1836
1837 switch (spa->spa_log_state) {
1838 case SPA_LOG_MISSING:
1839 /* need to recheck in case slog has been restored */
1840 case SPA_LOG_UNKNOWN:
1841 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1842 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1843 if (rv)
1844 spa_set_log_state(spa, SPA_LOG_MISSING);
1845 break;
1846 }
1847 return (rv);
1848}
1849
1850static boolean_t
1851spa_passivate_log(spa_t *spa)
1852{
1853 vdev_t *rvd = spa->spa_root_vdev;
1854 boolean_t slog_found = B_FALSE;
1855
1856 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1857
1858 if (!spa_has_slogs(spa))
1859 return (B_FALSE);
1860
1861 for (int c = 0; c < rvd->vdev_children; c++) {
1862 vdev_t *tvd = rvd->vdev_child[c];
1863 metaslab_group_t *mg = tvd->vdev_mg;
1864
1865 if (tvd->vdev_islog) {
1866 metaslab_group_passivate(mg);
1867 slog_found = B_TRUE;
1868 }
1869 }
1870
1871 return (slog_found);
1872}
1873
1874static void
1875spa_activate_log(spa_t *spa)
1876{
1877 vdev_t *rvd = spa->spa_root_vdev;
1878
1879 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1880
1881 for (int c = 0; c < rvd->vdev_children; c++) {
1882 vdev_t *tvd = rvd->vdev_child[c];
1883 metaslab_group_t *mg = tvd->vdev_mg;
1884
1885 if (tvd->vdev_islog)
1886 metaslab_group_activate(mg);
1887 }
1888}
1889
1890int
1891spa_offline_log(spa_t *spa)
1892{
1893 int error;
1894
1895 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1896 NULL, DS_FIND_CHILDREN);
1897 if (error == 0) {
1898 /*
1899 * We successfully offlined the log device, sync out the
1900 * current txg so that the "stubby" block can be removed
1901 * by zil_sync().
1902 */
1903 txg_wait_synced(spa->spa_dsl_pool, 0);
1904 }
1905 return (error);
1906}
1907
1908static void
1909spa_aux_check_removed(spa_aux_vdev_t *sav)
1910{
1911 int i;
1912
1913 for (i = 0; i < sav->sav_count; i++)
1914 spa_check_removed(sav->sav_vdevs[i]);
1915}
1916
1917void
1918spa_claim_notify(zio_t *zio)
1919{
1920 spa_t *spa = zio->io_spa;
1921
1922 if (zio->io_error)
1923 return;
1924
1925 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1926 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1927 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1928 mutex_exit(&spa->spa_props_lock);
1929}
1930
1931typedef struct spa_load_error {
1932 uint64_t sle_meta_count;
1933 uint64_t sle_data_count;
1934} spa_load_error_t;
1935
1936static void
1937spa_load_verify_done(zio_t *zio)
1938{
1939 blkptr_t *bp = zio->io_bp;
1940 spa_load_error_t *sle = zio->io_private;
1941 dmu_object_type_t type = BP_GET_TYPE(bp);
1942 int error = zio->io_error;
1943 spa_t *spa = zio->io_spa;
1944
1945 if (error) {
1946 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1947 type != DMU_OT_INTENT_LOG)
1948 atomic_inc_64(&sle->sle_meta_count);
1949 else
1950 atomic_inc_64(&sle->sle_data_count);
1951 }
1952 zio_data_buf_free(zio->io_data, zio->io_size);
1953
1954 mutex_enter(&spa->spa_scrub_lock);
1955 spa->spa_scrub_inflight--;
1956 cv_broadcast(&spa->spa_scrub_io_cv);
1957 mutex_exit(&spa->spa_scrub_lock);
1958}
1959
1960/*
1961 * Maximum number of concurrent scrub i/os to create while verifying
1962 * a pool while importing it.
1963 */
1964int spa_load_verify_maxinflight = 10000;
1965boolean_t spa_load_verify_metadata = B_TRUE;
1966boolean_t spa_load_verify_data = B_TRUE;
1967
1968SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN,
1969 &spa_load_verify_maxinflight, 0,
1970 "Maximum number of concurrent scrub I/Os to create while verifying a "
1971 "pool while importing it");
1972
1973SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN,
1974 &spa_load_verify_metadata, 0,
1975 "Check metadata on import?");
1976
1977SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN,
1978 &spa_load_verify_data, 0,
1979 "Check user data on import?");
1980
1981/*ARGSUSED*/
1982static int
1983spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1984 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1985{
1986 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1987 return (0);
1988 /*
1989 * Note: normally this routine will not be called if
1990 * spa_load_verify_metadata is not set. However, it may be useful
1991 * to manually set the flag after the traversal has begun.
1992 */
1993 if (!spa_load_verify_metadata)
1994 return (0);
1995 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data)
1996 return (0);
1997
1998 zio_t *rio = arg;
1999 size_t size = BP_GET_PSIZE(bp);
2000 void *data = zio_data_buf_alloc(size);
2001
2002 mutex_enter(&spa->spa_scrub_lock);
2003 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
2004 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2005 spa->spa_scrub_inflight++;
2006 mutex_exit(&spa->spa_scrub_lock);
2007
2008 zio_nowait(zio_read(rio, spa, bp, data, size,
2009 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2010 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2011 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2012 return (0);
2013}
2014
2015/* ARGSUSED */
2016int
2017verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2018{
2019 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2020 return (SET_ERROR(ENAMETOOLONG));
2021
2022 return (0);
2023}
2024
2025static int
2026spa_load_verify(spa_t *spa)
2027{
2028 zio_t *rio;
2029 spa_load_error_t sle = { 0 };
2030 zpool_rewind_policy_t policy;
2031 boolean_t verify_ok = B_FALSE;
2032 int error = 0;
2033
2034 zpool_get_rewind_policy(spa->spa_config, &policy);
2035
2036 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
2037 return (0);
2038
2039 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2040 error = dmu_objset_find_dp(spa->spa_dsl_pool,
2041 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2042 DS_FIND_CHILDREN);
2043 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2044 if (error != 0)
2045 return (error);
2046
2047 rio = zio_root(spa, NULL, &sle,
2048 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2049
2050 if (spa_load_verify_metadata) {
2051 error = traverse_pool(spa, spa->spa_verify_min_txg,
2052 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2053 spa_load_verify_cb, rio);
2054 }
2055
2056 (void) zio_wait(rio);
2057
2058 spa->spa_load_meta_errors = sle.sle_meta_count;
2059 spa->spa_load_data_errors = sle.sle_data_count;
2060
2061 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
2062 sle.sle_data_count <= policy.zrp_maxdata) {
2063 int64_t loss = 0;
2064
2065 verify_ok = B_TRUE;
2066 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2067 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2068
2069 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2070 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2071 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2072 VERIFY(nvlist_add_int64(spa->spa_load_info,
2073 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2074 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2075 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2076 } else {
2077 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2078 }
2079
2080 if (error) {
2081 if (error != ENXIO && error != EIO)
2082 error = SET_ERROR(EIO);
2083 return (error);
2084 }
2085
2086 return (verify_ok ? 0 : EIO);
2087}
2088
2089/*
2090 * Find a value in the pool props object.
2091 */
2092static void
2093spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2094{
2095 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2096 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2097}
2098
2099/*
2100 * Find a value in the pool directory object.
2101 */
2102static int
2103spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
2104{
2105 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2106 name, sizeof (uint64_t), 1, val));
2107}
2108
2109static int
2110spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2111{
2112 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2113 return (err);
2114}
2115
2116/*
2117 * Fix up config after a partly-completed split. This is done with the
2118 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2119 * pool have that entry in their config, but only the splitting one contains
2120 * a list of all the guids of the vdevs that are being split off.
2121 *
2122 * This function determines what to do with that list: either rejoin
2123 * all the disks to the pool, or complete the splitting process. To attempt
2124 * the rejoin, each disk that is offlined is marked online again, and
2125 * we do a reopen() call. If the vdev label for every disk that was
2126 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2127 * then we call vdev_split() on each disk, and complete the split.
2128 *
2129 * Otherwise we leave the config alone, with all the vdevs in place in
2130 * the original pool.
2131 */
2132static void
2133spa_try_repair(spa_t *spa, nvlist_t *config)
2134{
2135 uint_t extracted;
2136 uint64_t *glist;
2137 uint_t i, gcount;
2138 nvlist_t *nvl;
2139 vdev_t **vd;
2140 boolean_t attempt_reopen;
2141
2142 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2143 return;
2144
2145 /* check that the config is complete */
2146 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2147 &glist, &gcount) != 0)
2148 return;
2149
2150 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2151
2152 /* attempt to online all the vdevs & validate */
2153 attempt_reopen = B_TRUE;
2154 for (i = 0; i < gcount; i++) {
2155 if (glist[i] == 0) /* vdev is hole */
2156 continue;
2157
2158 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2159 if (vd[i] == NULL) {
2160 /*
2161 * Don't bother attempting to reopen the disks;
2162 * just do the split.
2163 */
2164 attempt_reopen = B_FALSE;
2165 } else {
2166 /* attempt to re-online it */
2167 vd[i]->vdev_offline = B_FALSE;
2168 }
2169 }
2170
2171 if (attempt_reopen) {
2172 vdev_reopen(spa->spa_root_vdev);
2173
2174 /* check each device to see what state it's in */
2175 for (extracted = 0, i = 0; i < gcount; i++) {
2176 if (vd[i] != NULL &&
2177 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2178 break;
2179 ++extracted;
2180 }
2181 }
2182
2183 /*
2184 * If every disk has been moved to the new pool, or if we never
2185 * even attempted to look at them, then we split them off for
2186 * good.
2187 */
2188 if (!attempt_reopen || gcount == extracted) {
2189 for (i = 0; i < gcount; i++)
2190 if (vd[i] != NULL)
2191 vdev_split(vd[i]);
2192 vdev_reopen(spa->spa_root_vdev);
2193 }
2194
2195 kmem_free(vd, gcount * sizeof (vdev_t *));
2196}
2197
2198static int
2199spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2200 boolean_t mosconfig)
2201{
2202 nvlist_t *config = spa->spa_config;
2203 char *ereport = FM_EREPORT_ZFS_POOL;
2204 char *comment;
2205 int error;
2206 uint64_t pool_guid;
2207 nvlist_t *nvl;
2208
2209 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2210 return (SET_ERROR(EINVAL));
2211
2212 ASSERT(spa->spa_comment == NULL);
2213 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2214 spa->spa_comment = spa_strdup(comment);
2215
2216 /*
2217 * Versioning wasn't explicitly added to the label until later, so if
2218 * it's not present treat it as the initial version.
2219 */
2220 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2221 &spa->spa_ubsync.ub_version) != 0)
2222 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2223
2224 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2225 &spa->spa_config_txg);
2226
2227 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2228 spa_guid_exists(pool_guid, 0)) {
2229 error = SET_ERROR(EEXIST);
2230 } else {
2231 spa->spa_config_guid = pool_guid;
2232
2233 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2234 &nvl) == 0) {
2235 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
2236 KM_SLEEP) == 0);
2237 }
2238
2239 nvlist_free(spa->spa_load_info);
2240 spa->spa_load_info = fnvlist_alloc();
2241
2242 gethrestime(&spa->spa_loaded_ts);
2243 error = spa_load_impl(spa, pool_guid, config, state, type,
2244 mosconfig, &ereport);
2245 }
2246
2247 /*
2248 * Don't count references from objsets that are already closed
2249 * and are making their way through the eviction process.
2250 */
2251 spa_evicting_os_wait(spa);
2252 spa->spa_minref = refcount_count(&spa->spa_refcount);
2253 if (error) {
2254 if (error != EEXIST) {
2255 spa->spa_loaded_ts.tv_sec = 0;
2256 spa->spa_loaded_ts.tv_nsec = 0;
2257 }
2258 if (error != EBADF) {
2259 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2260 }
2261 }
2262 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2263 spa->spa_ena = 0;
2264
2265 return (error);
2266}
2267
2268/*
2269 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2270 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2271 * spa's per-vdev ZAP list.
2272 */
2273static uint64_t
2274vdev_count_verify_zaps(vdev_t *vd)
2275{
2276 spa_t *spa = vd->vdev_spa;
2277 uint64_t total = 0;
2278 if (vd->vdev_top_zap != 0) {
2279 total++;
2280 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2281 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2282 }
2283 if (vd->vdev_leaf_zap != 0) {
2284 total++;
2285 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2286 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2287 }
2288
2289 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2290 total += vdev_count_verify_zaps(vd->vdev_child[i]);
2291 }
2292
2293 return (total);
2294}
2295
2296/*
2297 * Load an existing storage pool, using the pool's builtin spa_config as a
2298 * source of configuration information.
2299 */
2300static int
2301spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2302 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2303 char **ereport)
2304{
2305 int error = 0;
2306 nvlist_t *nvroot = NULL;
2307 nvlist_t *label;
2308 vdev_t *rvd;
2309 uberblock_t *ub = &spa->spa_uberblock;
2310 uint64_t children, config_cache_txg = spa->spa_config_txg;
2311 int orig_mode = spa->spa_mode;
2312 int parse;
2313 uint64_t obj;
2314 boolean_t missing_feat_write = B_FALSE;
2315
2316 /*
2317 * If this is an untrusted config, access the pool in read-only mode.
2318 * This prevents things like resilvering recently removed devices.
2319 */
2320 if (!mosconfig)
2321 spa->spa_mode = FREAD;
2322
2323 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2324
2325 spa->spa_load_state = state;
2326
2327 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2328 return (SET_ERROR(EINVAL));
2329
2330 parse = (type == SPA_IMPORT_EXISTING ?
2331 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2332
2333 /*
2334 * Create "The Godfather" zio to hold all async IOs
2335 */
2336 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2337 KM_SLEEP);
2338 for (int i = 0; i < max_ncpus; i++) {
2339 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2340 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2341 ZIO_FLAG_GODFATHER);
2342 }
2343
2344 /*
2345 * Parse the configuration into a vdev tree. We explicitly set the
2346 * value that will be returned by spa_version() since parsing the
2347 * configuration requires knowing the version number.
2348 */
2349 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2350 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2351 spa_config_exit(spa, SCL_ALL, FTAG);
2352
2353 if (error != 0)
2354 return (error);
2355
2356 ASSERT(spa->spa_root_vdev == rvd);
2357 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2358 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2359
2360 if (type != SPA_IMPORT_ASSEMBLE) {
2361 ASSERT(spa_guid(spa) == pool_guid);
2362 }
2363
2364 /*
2365 * Try to open all vdevs, loading each label in the process.
2366 */
2367 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2368 error = vdev_open(rvd);
2369 spa_config_exit(spa, SCL_ALL, FTAG);
2370 if (error != 0)
2371 return (error);
2372
2373 /*
2374 * We need to validate the vdev labels against the configuration that
2375 * we have in hand, which is dependent on the setting of mosconfig. If
2376 * mosconfig is true then we're validating the vdev labels based on
2377 * that config. Otherwise, we're validating against the cached config
2378 * (zpool.cache) that was read when we loaded the zfs module, and then
2379 * later we will recursively call spa_load() and validate against
2380 * the vdev config.
2381 *
2382 * If we're assembling a new pool that's been split off from an
2383 * existing pool, the labels haven't yet been updated so we skip
2384 * validation for now.
2385 */
2386 if (type != SPA_IMPORT_ASSEMBLE) {
2387 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2388 error = vdev_validate(rvd, mosconfig);
2389 spa_config_exit(spa, SCL_ALL, FTAG);
2390
2391 if (error != 0)
2392 return (error);
2393
2394 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2395 return (SET_ERROR(ENXIO));
2396 }
2397
2398 /*
2399 * Find the best uberblock.
2400 */
2401 vdev_uberblock_load(rvd, ub, &label);
2402
2403 /*
2404 * If we weren't able to find a single valid uberblock, return failure.
2405 */
2406 if (ub->ub_txg == 0) {
2407 nvlist_free(label);
2408 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2409 }
2410
2411 /*
2412 * If the pool has an unsupported version we can't open it.
2413 */
2414 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2415 nvlist_free(label);
2416 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2417 }
2418
2419 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2420 nvlist_t *features;
2421
2422 /*
2423 * If we weren't able to find what's necessary for reading the
2424 * MOS in the label, return failure.
2425 */
2426 if (label == NULL || nvlist_lookup_nvlist(label,
2427 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2428 nvlist_free(label);
2429 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2430 ENXIO));
2431 }
2432
2433 /*
2434 * Update our in-core representation with the definitive values
2435 * from the label.
2436 */
2437 nvlist_free(spa->spa_label_features);
2438 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2439 }
2440
2441 nvlist_free(label);
2442
2443 /*
2444 * Look through entries in the label nvlist's features_for_read. If
2445 * there is a feature listed there which we don't understand then we
2446 * cannot open a pool.
2447 */
2448 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2449 nvlist_t *unsup_feat;
2450
2451 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2452 0);
2453
2454 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2455 NULL); nvp != NULL;
2456 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2457 if (!zfeature_is_supported(nvpair_name(nvp))) {
2458 VERIFY(nvlist_add_string(unsup_feat,
2459 nvpair_name(nvp), "") == 0);
2460 }
2461 }
2462
2463 if (!nvlist_empty(unsup_feat)) {
2464 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2465 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2466 nvlist_free(unsup_feat);
2467 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2468 ENOTSUP));
2469 }
2470
2471 nvlist_free(unsup_feat);
2472 }
2473
2474 /*
2475 * If the vdev guid sum doesn't match the uberblock, we have an
2476 * incomplete configuration. We first check to see if the pool
2477 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2478 * If it is, defer the vdev_guid_sum check till later so we
2479 * can handle missing vdevs.
2480 */
2481 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2482 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2483 rvd->vdev_guid_sum != ub->ub_guid_sum)
2484 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2485
2486 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2487 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2488 spa_try_repair(spa, config);
2489 spa_config_exit(spa, SCL_ALL, FTAG);
2490 nvlist_free(spa->spa_config_splitting);
2491 spa->spa_config_splitting = NULL;
2492 }
2493
2494 /*
2495 * Initialize internal SPA structures.
2496 */
2497 spa->spa_state = POOL_STATE_ACTIVE;
2498 spa->spa_ubsync = spa->spa_uberblock;
2499 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2500 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2501 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2502 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2503 spa->spa_claim_max_txg = spa->spa_first_txg;
2504 spa->spa_prev_software_version = ub->ub_software_version;
2505
2506 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2507 if (error)
2508 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2509 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2510
2511 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2512 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2513
2514 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2515 boolean_t missing_feat_read = B_FALSE;
2516 nvlist_t *unsup_feat, *enabled_feat;
2517
2518 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2519 &spa->spa_feat_for_read_obj) != 0) {
2520 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2521 }
2522
2523 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2524 &spa->spa_feat_for_write_obj) != 0) {
2525 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2526 }
2527
2528 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2529 &spa->spa_feat_desc_obj) != 0) {
2530 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2531 }
2532
2533 enabled_feat = fnvlist_alloc();
2534 unsup_feat = fnvlist_alloc();
2535
2536 if (!spa_features_check(spa, B_FALSE,
2537 unsup_feat, enabled_feat))
2538 missing_feat_read = B_TRUE;
2539
2540 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2541 if (!spa_features_check(spa, B_TRUE,
2542 unsup_feat, enabled_feat)) {
2543 missing_feat_write = B_TRUE;
2544 }
2545 }
2546
2547 fnvlist_add_nvlist(spa->spa_load_info,
2548 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2549
2550 if (!nvlist_empty(unsup_feat)) {
2551 fnvlist_add_nvlist(spa->spa_load_info,
2552 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2553 }
2554
2555 fnvlist_free(enabled_feat);
2556 fnvlist_free(unsup_feat);
2557
2558 if (!missing_feat_read) {
2559 fnvlist_add_boolean(spa->spa_load_info,
2560 ZPOOL_CONFIG_CAN_RDONLY);
2561 }
2562
2563 /*
2564 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2565 * twofold: to determine whether the pool is available for
2566 * import in read-write mode and (if it is not) whether the
2567 * pool is available for import in read-only mode. If the pool
2568 * is available for import in read-write mode, it is displayed
2569 * as available in userland; if it is not available for import
2570 * in read-only mode, it is displayed as unavailable in
2571 * userland. If the pool is available for import in read-only
2572 * mode but not read-write mode, it is displayed as unavailable
2573 * in userland with a special note that the pool is actually
2574 * available for open in read-only mode.
2575 *
2576 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2577 * missing a feature for write, we must first determine whether
2578 * the pool can be opened read-only before returning to
2579 * userland in order to know whether to display the
2580 * abovementioned note.
2581 */
2582 if (missing_feat_read || (missing_feat_write &&
2583 spa_writeable(spa))) {
2584 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2585 ENOTSUP));
2586 }
2587
2588 /*
2589 * Load refcounts for ZFS features from disk into an in-memory
2590 * cache during SPA initialization.
2591 */
2592 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2593 uint64_t refcount;
2594
2595 error = feature_get_refcount_from_disk(spa,
2596 &spa_feature_table[i], &refcount);
2597 if (error == 0) {
2598 spa->spa_feat_refcount_cache[i] = refcount;
2599 } else if (error == ENOTSUP) {
2600 spa->spa_feat_refcount_cache[i] =
2601 SPA_FEATURE_DISABLED;
2602 } else {
2603 return (spa_vdev_err(rvd,
2604 VDEV_AUX_CORRUPT_DATA, EIO));
2605 }
2606 }
2607 }
2608
2609 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2610 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2611 &spa->spa_feat_enabled_txg_obj) != 0)
2612 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2613 }
2614
2615 spa->spa_is_initializing = B_TRUE;
2616 error = dsl_pool_open(spa->spa_dsl_pool);
2617 spa->spa_is_initializing = B_FALSE;
2618 if (error != 0)
2619 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2620
2621 if (!mosconfig) {
2622 uint64_t hostid;
2623 nvlist_t *policy = NULL, *nvconfig;
2624
2625 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2626 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2627
2628 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2629 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2630 char *hostname;
2631 unsigned long myhostid = 0;
2632
2633 VERIFY(nvlist_lookup_string(nvconfig,
2634 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2635
2636#ifdef _KERNEL
2637 myhostid = zone_get_hostid(NULL);
2638#else /* _KERNEL */
2639 /*
2640 * We're emulating the system's hostid in userland, so
2641 * we can't use zone_get_hostid().
2642 */
2643 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2644#endif /* _KERNEL */
2645 if (check_hostid && hostid != 0 && myhostid != 0 &&
2646 hostid != myhostid) {
2647 nvlist_free(nvconfig);
2648 cmn_err(CE_WARN, "pool '%s' could not be "
2649 "loaded as it was last accessed by "
2650 "another system (host: %s hostid: 0x%lx). "
2651 "See: http://illumos.org/msg/ZFS-8000-EY",
2652 spa_name(spa), hostname,
2653 (unsigned long)hostid);
2654 return (SET_ERROR(EBADF));
2655 }
2656 }
2657 if (nvlist_lookup_nvlist(spa->spa_config,
2658 ZPOOL_REWIND_POLICY, &policy) == 0)
2659 VERIFY(nvlist_add_nvlist(nvconfig,
2660 ZPOOL_REWIND_POLICY, policy) == 0);
2661
2662 spa_config_set(spa, nvconfig);
2663 spa_unload(spa);
2664 spa_deactivate(spa);
2665 spa_activate(spa, orig_mode);
2666
2667 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2668 }
2669
2670 /* Grab the secret checksum salt from the MOS. */
2671 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2672 DMU_POOL_CHECKSUM_SALT, 1,
2673 sizeof (spa->spa_cksum_salt.zcs_bytes),
2674 spa->spa_cksum_salt.zcs_bytes);
2675 if (error == ENOENT) {
2676 /* Generate a new salt for subsequent use */
2677 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
2678 sizeof (spa->spa_cksum_salt.zcs_bytes));
2679 } else if (error != 0) {
2680 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2681 }
2682
2683 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2684 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2685 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2686 if (error != 0)
2687 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2688
2689 /*
2690 * Load the bit that tells us to use the new accounting function
2691 * (raid-z deflation). If we have an older pool, this will not
2692 * be present.
2693 */
2694 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2695 if (error != 0 && error != ENOENT)
2696 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2697
2698 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2699 &spa->spa_creation_version);
2700 if (error != 0 && error != ENOENT)
2701 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2702
2703 /*
2704 * Load the persistent error log. If we have an older pool, this will
2705 * not be present.
2706 */
2707 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2708 if (error != 0 && error != ENOENT)
2709 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2710
2711 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2712 &spa->spa_errlog_scrub);
2713 if (error != 0 && error != ENOENT)
2714 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2715
2716 /*
2717 * Load the history object. If we have an older pool, this
2718 * will not be present.
2719 */
2720 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2721 if (error != 0 && error != ENOENT)
2722 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2723
2724 /*
2725 * Load the per-vdev ZAP map. If we have an older pool, this will not
2726 * be present; in this case, defer its creation to a later time to
2727 * avoid dirtying the MOS this early / out of sync context. See
2728 * spa_sync_config_object.
2729 */
2730
2731 /* The sentinel is only available in the MOS config. */
2732 nvlist_t *mos_config;
2733 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
2734 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2735
2736 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
2737 &spa->spa_all_vdev_zaps);
2738
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright 2013 Saso Kiselkov. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 */
31
32/*
33 * SPA: Storage Pool Allocator
34 *
35 * This file contains all the routines used when modifying on-disk SPA state.
36 * This includes opening, importing, destroying, exporting a pool, and syncing a
37 * pool.
38 */
39
40#include <sys/zfs_context.h>
41#include <sys/fm/fs/zfs.h>
42#include <sys/spa_impl.h>
43#include <sys/zio.h>
44#include <sys/zio_checksum.h>
45#include <sys/dmu.h>
46#include <sys/dmu_tx.h>
47#include <sys/zap.h>
48#include <sys/zil.h>
49#include <sys/ddt.h>
50#include <sys/vdev_impl.h>
51#include <sys/metaslab.h>
52#include <sys/metaslab_impl.h>
53#include <sys/uberblock_impl.h>
54#include <sys/txg.h>
55#include <sys/avl.h>
56#include <sys/dmu_traverse.h>
57#include <sys/dmu_objset.h>
58#include <sys/unique.h>
59#include <sys/dsl_pool.h>
60#include <sys/dsl_dataset.h>
61#include <sys/dsl_dir.h>
62#include <sys/dsl_prop.h>
63#include <sys/dsl_synctask.h>
64#include <sys/fs/zfs.h>
65#include <sys/arc.h>
66#include <sys/callb.h>
67#include <sys/spa_boot.h>
68#include <sys/zfs_ioctl.h>
69#include <sys/dsl_scan.h>
70#include <sys/dmu_send.h>
71#include <sys/dsl_destroy.h>
72#include <sys/dsl_userhold.h>
73#include <sys/zfeature.h>
74#include <sys/zvol.h>
75#include <sys/trim_map.h>
76
77#ifdef _KERNEL
78#include <sys/callb.h>
79#include <sys/cpupart.h>
80#include <sys/zone.h>
81#endif /* _KERNEL */
82
83#include "zfs_prop.h"
84#include "zfs_comutil.h"
85
86/* Check hostid on import? */
87static int check_hostid = 1;
88
89/*
90 * The interval, in seconds, at which failed configuration cache file writes
91 * should be retried.
92 */
93static int zfs_ccw_retry_interval = 300;
94
95SYSCTL_DECL(_vfs_zfs);
96SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0,
97 "Check hostid on import?");
98TUNABLE_INT("vfs.zfs.ccw_retry_interval", &zfs_ccw_retry_interval);
99SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RW,
100 &zfs_ccw_retry_interval, 0,
101 "Configuration cache file write, retry after failure, interval (seconds)");
102
103typedef enum zti_modes {
104 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
105 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
106 ZTI_MODE_NULL, /* don't create a taskq */
107 ZTI_NMODES
108} zti_modes_t;
109
110#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
111#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
112#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
113
114#define ZTI_N(n) ZTI_P(n, 1)
115#define ZTI_ONE ZTI_N(1)
116
117typedef struct zio_taskq_info {
118 zti_modes_t zti_mode;
119 uint_t zti_value;
120 uint_t zti_count;
121} zio_taskq_info_t;
122
123static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
124 "issue", "issue_high", "intr", "intr_high"
125};
126
127/*
128 * This table defines the taskq settings for each ZFS I/O type. When
129 * initializing a pool, we use this table to create an appropriately sized
130 * taskq. Some operations are low volume and therefore have a small, static
131 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
132 * macros. Other operations process a large amount of data; the ZTI_BATCH
133 * macro causes us to create a taskq oriented for throughput. Some operations
134 * are so high frequency and short-lived that the taskq itself can become a a
135 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
136 * additional degree of parallelism specified by the number of threads per-
137 * taskq and the number of taskqs; when dispatching an event in this case, the
138 * particular taskq is chosen at random.
139 *
140 * The different taskq priorities are to handle the different contexts (issue
141 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
142 * need to be handled with minimum delay.
143 */
144const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
145 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
146 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
147 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
148 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */
149 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
150 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
151 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
152};
153
154static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, const char *name);
155static void spa_event_post(sysevent_t *ev);
156static void spa_sync_version(void *arg, dmu_tx_t *tx);
157static void spa_sync_props(void *arg, dmu_tx_t *tx);
158static boolean_t spa_has_active_shared_spare(spa_t *spa);
159static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
160 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
161 char **ereport);
162static void spa_vdev_resilver_done(spa_t *spa);
163
164uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
165#ifdef PSRSET_BIND
166id_t zio_taskq_psrset_bind = PS_NONE;
167#endif
168#ifdef SYSDC
169boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
170uint_t zio_taskq_basedc = 80; /* base duty cycle */
171#endif
172
173boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
174extern int zfs_sync_pass_deferred_free;
175
176/*
177 * This (illegal) pool name is used when temporarily importing a spa_t in order
178 * to get the vdev stats associated with the imported devices.
179 */
180#define TRYIMPORT_NAME "$import"
181
182/*
183 * ==========================================================================
184 * SPA properties routines
185 * ==========================================================================
186 */
187
188/*
189 * Add a (source=src, propname=propval) list to an nvlist.
190 */
191static void
192spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
193 uint64_t intval, zprop_source_t src)
194{
195 const char *propname = zpool_prop_to_name(prop);
196 nvlist_t *propval;
197
198 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
199 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
200
201 if (strval != NULL)
202 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
203 else
204 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
205
206 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
207 nvlist_free(propval);
208}
209
210/*
211 * Get property values from the spa configuration.
212 */
213static void
214spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
215{
216 vdev_t *rvd = spa->spa_root_vdev;
217 dsl_pool_t *pool = spa->spa_dsl_pool;
218 uint64_t size, alloc, cap, version;
219 zprop_source_t src = ZPROP_SRC_NONE;
220 spa_config_dirent_t *dp;
221 metaslab_class_t *mc = spa_normal_class(spa);
222
223 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
224
225 if (rvd != NULL) {
226 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
227 size = metaslab_class_get_space(spa_normal_class(spa));
228 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
229 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
230 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
231 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
232 size - alloc, src);
233
234 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
235 metaslab_class_fragmentation(mc), src);
236 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
237 metaslab_class_expandable_space(mc), src);
238 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
239 (spa_mode(spa) == FREAD), src);
240
241 cap = (size == 0) ? 0 : (alloc * 100 / size);
242 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
243
244 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
245 ddt_get_pool_dedup_ratio(spa), src);
246
247 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
248 rvd->vdev_state, src);
249
250 version = spa_version(spa);
251 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
252 src = ZPROP_SRC_DEFAULT;
253 else
254 src = ZPROP_SRC_LOCAL;
255 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
256 }
257
258 if (pool != NULL) {
259 /*
260 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
261 * when opening pools before this version freedir will be NULL.
262 */
263 if (pool->dp_free_dir != NULL) {
264 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
265 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
266 src);
267 } else {
268 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
269 NULL, 0, src);
270 }
271
272 if (pool->dp_leak_dir != NULL) {
273 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
274 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
275 src);
276 } else {
277 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
278 NULL, 0, src);
279 }
280 }
281
282 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
283
284 if (spa->spa_comment != NULL) {
285 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
286 0, ZPROP_SRC_LOCAL);
287 }
288
289 if (spa->spa_root != NULL)
290 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
291 0, ZPROP_SRC_LOCAL);
292
293 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
294 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
295 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
296 } else {
297 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
298 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
299 }
300
301 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
302 if (dp->scd_path == NULL) {
303 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
304 "none", 0, ZPROP_SRC_LOCAL);
305 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
306 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
307 dp->scd_path, 0, ZPROP_SRC_LOCAL);
308 }
309 }
310}
311
312/*
313 * Get zpool property values.
314 */
315int
316spa_prop_get(spa_t *spa, nvlist_t **nvp)
317{
318 objset_t *mos = spa->spa_meta_objset;
319 zap_cursor_t zc;
320 zap_attribute_t za;
321 int err;
322
323 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
324
325 mutex_enter(&spa->spa_props_lock);
326
327 /*
328 * Get properties from the spa config.
329 */
330 spa_prop_get_config(spa, nvp);
331
332 /* If no pool property object, no more prop to get. */
333 if (mos == NULL || spa->spa_pool_props_object == 0) {
334 mutex_exit(&spa->spa_props_lock);
335 return (0);
336 }
337
338 /*
339 * Get properties from the MOS pool property object.
340 */
341 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
342 (err = zap_cursor_retrieve(&zc, &za)) == 0;
343 zap_cursor_advance(&zc)) {
344 uint64_t intval = 0;
345 char *strval = NULL;
346 zprop_source_t src = ZPROP_SRC_DEFAULT;
347 zpool_prop_t prop;
348
349 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
350 continue;
351
352 switch (za.za_integer_length) {
353 case 8:
354 /* integer property */
355 if (za.za_first_integer !=
356 zpool_prop_default_numeric(prop))
357 src = ZPROP_SRC_LOCAL;
358
359 if (prop == ZPOOL_PROP_BOOTFS) {
360 dsl_pool_t *dp;
361 dsl_dataset_t *ds = NULL;
362
363 dp = spa_get_dsl(spa);
364 dsl_pool_config_enter(dp, FTAG);
365 if (err = dsl_dataset_hold_obj(dp,
366 za.za_first_integer, FTAG, &ds)) {
367 dsl_pool_config_exit(dp, FTAG);
368 break;
369 }
370
371 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
372 KM_SLEEP);
373 dsl_dataset_name(ds, strval);
374 dsl_dataset_rele(ds, FTAG);
375 dsl_pool_config_exit(dp, FTAG);
376 } else {
377 strval = NULL;
378 intval = za.za_first_integer;
379 }
380
381 spa_prop_add_list(*nvp, prop, strval, intval, src);
382
383 if (strval != NULL)
384 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
385
386 break;
387
388 case 1:
389 /* string property */
390 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
391 err = zap_lookup(mos, spa->spa_pool_props_object,
392 za.za_name, 1, za.za_num_integers, strval);
393 if (err) {
394 kmem_free(strval, za.za_num_integers);
395 break;
396 }
397 spa_prop_add_list(*nvp, prop, strval, 0, src);
398 kmem_free(strval, za.za_num_integers);
399 break;
400
401 default:
402 break;
403 }
404 }
405 zap_cursor_fini(&zc);
406 mutex_exit(&spa->spa_props_lock);
407out:
408 if (err && err != ENOENT) {
409 nvlist_free(*nvp);
410 *nvp = NULL;
411 return (err);
412 }
413
414 return (0);
415}
416
417/*
418 * Validate the given pool properties nvlist and modify the list
419 * for the property values to be set.
420 */
421static int
422spa_prop_validate(spa_t *spa, nvlist_t *props)
423{
424 nvpair_t *elem;
425 int error = 0, reset_bootfs = 0;
426 uint64_t objnum = 0;
427 boolean_t has_feature = B_FALSE;
428
429 elem = NULL;
430 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
431 uint64_t intval;
432 char *strval, *slash, *check, *fname;
433 const char *propname = nvpair_name(elem);
434 zpool_prop_t prop = zpool_name_to_prop(propname);
435
436 switch (prop) {
437 case ZPROP_INVAL:
438 if (!zpool_prop_feature(propname)) {
439 error = SET_ERROR(EINVAL);
440 break;
441 }
442
443 /*
444 * Sanitize the input.
445 */
446 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
447 error = SET_ERROR(EINVAL);
448 break;
449 }
450
451 if (nvpair_value_uint64(elem, &intval) != 0) {
452 error = SET_ERROR(EINVAL);
453 break;
454 }
455
456 if (intval != 0) {
457 error = SET_ERROR(EINVAL);
458 break;
459 }
460
461 fname = strchr(propname, '@') + 1;
462 if (zfeature_lookup_name(fname, NULL) != 0) {
463 error = SET_ERROR(EINVAL);
464 break;
465 }
466
467 has_feature = B_TRUE;
468 break;
469
470 case ZPOOL_PROP_VERSION:
471 error = nvpair_value_uint64(elem, &intval);
472 if (!error &&
473 (intval < spa_version(spa) ||
474 intval > SPA_VERSION_BEFORE_FEATURES ||
475 has_feature))
476 error = SET_ERROR(EINVAL);
477 break;
478
479 case ZPOOL_PROP_DELEGATION:
480 case ZPOOL_PROP_AUTOREPLACE:
481 case ZPOOL_PROP_LISTSNAPS:
482 case ZPOOL_PROP_AUTOEXPAND:
483 error = nvpair_value_uint64(elem, &intval);
484 if (!error && intval > 1)
485 error = SET_ERROR(EINVAL);
486 break;
487
488 case ZPOOL_PROP_BOOTFS:
489 /*
490 * If the pool version is less than SPA_VERSION_BOOTFS,
491 * or the pool is still being created (version == 0),
492 * the bootfs property cannot be set.
493 */
494 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
495 error = SET_ERROR(ENOTSUP);
496 break;
497 }
498
499 /*
500 * Make sure the vdev config is bootable
501 */
502 if (!vdev_is_bootable(spa->spa_root_vdev)) {
503 error = SET_ERROR(ENOTSUP);
504 break;
505 }
506
507 reset_bootfs = 1;
508
509 error = nvpair_value_string(elem, &strval);
510
511 if (!error) {
512 objset_t *os;
513 uint64_t propval;
514
515 if (strval == NULL || strval[0] == '\0') {
516 objnum = zpool_prop_default_numeric(
517 ZPOOL_PROP_BOOTFS);
518 break;
519 }
520
521 if (error = dmu_objset_hold(strval, FTAG, &os))
522 break;
523
524 /*
525 * Must be ZPL, and its property settings
526 * must be supported by GRUB (compression
527 * is not gzip, and large blocks are not used).
528 */
529
530 if (dmu_objset_type(os) != DMU_OST_ZFS) {
531 error = SET_ERROR(ENOTSUP);
532 } else if ((error =
533 dsl_prop_get_int_ds(dmu_objset_ds(os),
534 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
535 &propval)) == 0 &&
536 !BOOTFS_COMPRESS_VALID(propval)) {
537 error = SET_ERROR(ENOTSUP);
538 } else if ((error =
539 dsl_prop_get_int_ds(dmu_objset_ds(os),
540 zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
541 &propval)) == 0 &&
542 propval > SPA_OLD_MAXBLOCKSIZE) {
543 error = SET_ERROR(ENOTSUP);
544 } else {
545 objnum = dmu_objset_id(os);
546 }
547 dmu_objset_rele(os, FTAG);
548 }
549 break;
550
551 case ZPOOL_PROP_FAILUREMODE:
552 error = nvpair_value_uint64(elem, &intval);
553 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
554 intval > ZIO_FAILURE_MODE_PANIC))
555 error = SET_ERROR(EINVAL);
556
557 /*
558 * This is a special case which only occurs when
559 * the pool has completely failed. This allows
560 * the user to change the in-core failmode property
561 * without syncing it out to disk (I/Os might
562 * currently be blocked). We do this by returning
563 * EIO to the caller (spa_prop_set) to trick it
564 * into thinking we encountered a property validation
565 * error.
566 */
567 if (!error && spa_suspended(spa)) {
568 spa->spa_failmode = intval;
569 error = SET_ERROR(EIO);
570 }
571 break;
572
573 case ZPOOL_PROP_CACHEFILE:
574 if ((error = nvpair_value_string(elem, &strval)) != 0)
575 break;
576
577 if (strval[0] == '\0')
578 break;
579
580 if (strcmp(strval, "none") == 0)
581 break;
582
583 if (strval[0] != '/') {
584 error = SET_ERROR(EINVAL);
585 break;
586 }
587
588 slash = strrchr(strval, '/');
589 ASSERT(slash != NULL);
590
591 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
592 strcmp(slash, "/..") == 0)
593 error = SET_ERROR(EINVAL);
594 break;
595
596 case ZPOOL_PROP_COMMENT:
597 if ((error = nvpair_value_string(elem, &strval)) != 0)
598 break;
599 for (check = strval; *check != '\0'; check++) {
600 /*
601 * The kernel doesn't have an easy isprint()
602 * check. For this kernel check, we merely
603 * check ASCII apart from DEL. Fix this if
604 * there is an easy-to-use kernel isprint().
605 */
606 if (*check >= 0x7f) {
607 error = SET_ERROR(EINVAL);
608 break;
609 }
610 }
611 if (strlen(strval) > ZPROP_MAX_COMMENT)
612 error = E2BIG;
613 break;
614
615 case ZPOOL_PROP_DEDUPDITTO:
616 if (spa_version(spa) < SPA_VERSION_DEDUP)
617 error = SET_ERROR(ENOTSUP);
618 else
619 error = nvpair_value_uint64(elem, &intval);
620 if (error == 0 &&
621 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
622 error = SET_ERROR(EINVAL);
623 break;
624 }
625
626 if (error)
627 break;
628 }
629
630 if (!error && reset_bootfs) {
631 error = nvlist_remove(props,
632 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
633
634 if (!error) {
635 error = nvlist_add_uint64(props,
636 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
637 }
638 }
639
640 return (error);
641}
642
643void
644spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
645{
646 char *cachefile;
647 spa_config_dirent_t *dp;
648
649 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
650 &cachefile) != 0)
651 return;
652
653 dp = kmem_alloc(sizeof (spa_config_dirent_t),
654 KM_SLEEP);
655
656 if (cachefile[0] == '\0')
657 dp->scd_path = spa_strdup(spa_config_path);
658 else if (strcmp(cachefile, "none") == 0)
659 dp->scd_path = NULL;
660 else
661 dp->scd_path = spa_strdup(cachefile);
662
663 list_insert_head(&spa->spa_config_list, dp);
664 if (need_sync)
665 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
666}
667
668int
669spa_prop_set(spa_t *spa, nvlist_t *nvp)
670{
671 int error;
672 nvpair_t *elem = NULL;
673 boolean_t need_sync = B_FALSE;
674
675 if ((error = spa_prop_validate(spa, nvp)) != 0)
676 return (error);
677
678 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
679 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
680
681 if (prop == ZPOOL_PROP_CACHEFILE ||
682 prop == ZPOOL_PROP_ALTROOT ||
683 prop == ZPOOL_PROP_READONLY)
684 continue;
685
686 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
687 uint64_t ver;
688
689 if (prop == ZPOOL_PROP_VERSION) {
690 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
691 } else {
692 ASSERT(zpool_prop_feature(nvpair_name(elem)));
693 ver = SPA_VERSION_FEATURES;
694 need_sync = B_TRUE;
695 }
696
697 /* Save time if the version is already set. */
698 if (ver == spa_version(spa))
699 continue;
700
701 /*
702 * In addition to the pool directory object, we might
703 * create the pool properties object, the features for
704 * read object, the features for write object, or the
705 * feature descriptions object.
706 */
707 error = dsl_sync_task(spa->spa_name, NULL,
708 spa_sync_version, &ver,
709 6, ZFS_SPACE_CHECK_RESERVED);
710 if (error)
711 return (error);
712 continue;
713 }
714
715 need_sync = B_TRUE;
716 break;
717 }
718
719 if (need_sync) {
720 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
721 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
722 }
723
724 return (0);
725}
726
727/*
728 * If the bootfs property value is dsobj, clear it.
729 */
730void
731spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
732{
733 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
734 VERIFY(zap_remove(spa->spa_meta_objset,
735 spa->spa_pool_props_object,
736 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
737 spa->spa_bootfs = 0;
738 }
739}
740
741/*ARGSUSED*/
742static int
743spa_change_guid_check(void *arg, dmu_tx_t *tx)
744{
745 uint64_t *newguid = arg;
746 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
747 vdev_t *rvd = spa->spa_root_vdev;
748 uint64_t vdev_state;
749
750 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
751 vdev_state = rvd->vdev_state;
752 spa_config_exit(spa, SCL_STATE, FTAG);
753
754 if (vdev_state != VDEV_STATE_HEALTHY)
755 return (SET_ERROR(ENXIO));
756
757 ASSERT3U(spa_guid(spa), !=, *newguid);
758
759 return (0);
760}
761
762static void
763spa_change_guid_sync(void *arg, dmu_tx_t *tx)
764{
765 uint64_t *newguid = arg;
766 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
767 uint64_t oldguid;
768 vdev_t *rvd = spa->spa_root_vdev;
769
770 oldguid = spa_guid(spa);
771
772 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
773 rvd->vdev_guid = *newguid;
774 rvd->vdev_guid_sum += (*newguid - oldguid);
775 vdev_config_dirty(rvd);
776 spa_config_exit(spa, SCL_STATE, FTAG);
777
778 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
779 oldguid, *newguid);
780}
781
782/*
783 * Change the GUID for the pool. This is done so that we can later
784 * re-import a pool built from a clone of our own vdevs. We will modify
785 * the root vdev's guid, our own pool guid, and then mark all of our
786 * vdevs dirty. Note that we must make sure that all our vdevs are
787 * online when we do this, or else any vdevs that weren't present
788 * would be orphaned from our pool. We are also going to issue a
789 * sysevent to update any watchers.
790 */
791int
792spa_change_guid(spa_t *spa)
793{
794 int error;
795 uint64_t guid;
796
797 mutex_enter(&spa->spa_vdev_top_lock);
798 mutex_enter(&spa_namespace_lock);
799 guid = spa_generate_guid(NULL);
800
801 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
802 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
803
804 if (error == 0) {
805 spa_config_sync(spa, B_FALSE, B_TRUE);
806 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
807 }
808
809 mutex_exit(&spa_namespace_lock);
810 mutex_exit(&spa->spa_vdev_top_lock);
811
812 return (error);
813}
814
815/*
816 * ==========================================================================
817 * SPA state manipulation (open/create/destroy/import/export)
818 * ==========================================================================
819 */
820
821static int
822spa_error_entry_compare(const void *a, const void *b)
823{
824 spa_error_entry_t *sa = (spa_error_entry_t *)a;
825 spa_error_entry_t *sb = (spa_error_entry_t *)b;
826 int ret;
827
828 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
829 sizeof (zbookmark_phys_t));
830
831 if (ret < 0)
832 return (-1);
833 else if (ret > 0)
834 return (1);
835 else
836 return (0);
837}
838
839/*
840 * Utility function which retrieves copies of the current logs and
841 * re-initializes them in the process.
842 */
843void
844spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
845{
846 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
847
848 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
849 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
850
851 avl_create(&spa->spa_errlist_scrub,
852 spa_error_entry_compare, sizeof (spa_error_entry_t),
853 offsetof(spa_error_entry_t, se_avl));
854 avl_create(&spa->spa_errlist_last,
855 spa_error_entry_compare, sizeof (spa_error_entry_t),
856 offsetof(spa_error_entry_t, se_avl));
857}
858
859static void
860spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
861{
862 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
863 enum zti_modes mode = ztip->zti_mode;
864 uint_t value = ztip->zti_value;
865 uint_t count = ztip->zti_count;
866 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
867 char name[32];
868 uint_t flags = 0;
869 boolean_t batch = B_FALSE;
870
871 if (mode == ZTI_MODE_NULL) {
872 tqs->stqs_count = 0;
873 tqs->stqs_taskq = NULL;
874 return;
875 }
876
877 ASSERT3U(count, >, 0);
878
879 tqs->stqs_count = count;
880 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
881
882 switch (mode) {
883 case ZTI_MODE_FIXED:
884 ASSERT3U(value, >=, 1);
885 value = MAX(value, 1);
886 break;
887
888 case ZTI_MODE_BATCH:
889 batch = B_TRUE;
890 flags |= TASKQ_THREADS_CPU_PCT;
891 value = zio_taskq_batch_pct;
892 break;
893
894 default:
895 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
896 "spa_activate()",
897 zio_type_name[t], zio_taskq_types[q], mode, value);
898 break;
899 }
900
901 for (uint_t i = 0; i < count; i++) {
902 taskq_t *tq;
903
904 if (count > 1) {
905 (void) snprintf(name, sizeof (name), "%s_%s_%u",
906 zio_type_name[t], zio_taskq_types[q], i);
907 } else {
908 (void) snprintf(name, sizeof (name), "%s_%s",
909 zio_type_name[t], zio_taskq_types[q]);
910 }
911
912#ifdef SYSDC
913 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
914 if (batch)
915 flags |= TASKQ_DC_BATCH;
916
917 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
918 spa->spa_proc, zio_taskq_basedc, flags);
919 } else {
920#endif
921 pri_t pri = maxclsyspri;
922 /*
923 * The write issue taskq can be extremely CPU
924 * intensive. Run it at slightly lower priority
925 * than the other taskqs.
926 * FreeBSD notes:
927 * - numerically higher priorities are lower priorities;
928 * - if priorities divided by four (RQ_PPQ) are equal
929 * then a difference between them is insignificant.
930 */
931 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
932#ifdef illumos
933 pri--;
934#else
935 pri += 4;
936#endif
937
938 tq = taskq_create_proc(name, value, pri, 50,
939 INT_MAX, spa->spa_proc, flags);
940#ifdef SYSDC
941 }
942#endif
943
944 tqs->stqs_taskq[i] = tq;
945 }
946}
947
948static void
949spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
950{
951 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
952
953 if (tqs->stqs_taskq == NULL) {
954 ASSERT0(tqs->stqs_count);
955 return;
956 }
957
958 for (uint_t i = 0; i < tqs->stqs_count; i++) {
959 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
960 taskq_destroy(tqs->stqs_taskq[i]);
961 }
962
963 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
964 tqs->stqs_taskq = NULL;
965}
966
967/*
968 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
969 * Note that a type may have multiple discrete taskqs to avoid lock contention
970 * on the taskq itself. In that case we choose which taskq at random by using
971 * the low bits of gethrtime().
972 */
973void
974spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
975 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
976{
977 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
978 taskq_t *tq;
979
980 ASSERT3P(tqs->stqs_taskq, !=, NULL);
981 ASSERT3U(tqs->stqs_count, !=, 0);
982
983 if (tqs->stqs_count == 1) {
984 tq = tqs->stqs_taskq[0];
985 } else {
986#ifdef _KERNEL
987 tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count];
988#else
989 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
990#endif
991 }
992
993 taskq_dispatch_ent(tq, func, arg, flags, ent);
994}
995
996static void
997spa_create_zio_taskqs(spa_t *spa)
998{
999 for (int t = 0; t < ZIO_TYPES; t++) {
1000 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1001 spa_taskqs_init(spa, t, q);
1002 }
1003 }
1004}
1005
1006#ifdef _KERNEL
1007#ifdef SPA_PROCESS
1008static void
1009spa_thread(void *arg)
1010{
1011 callb_cpr_t cprinfo;
1012
1013 spa_t *spa = arg;
1014 user_t *pu = PTOU(curproc);
1015
1016 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1017 spa->spa_name);
1018
1019 ASSERT(curproc != &p0);
1020 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1021 "zpool-%s", spa->spa_name);
1022 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1023
1024#ifdef PSRSET_BIND
1025 /* bind this thread to the requested psrset */
1026 if (zio_taskq_psrset_bind != PS_NONE) {
1027 pool_lock();
1028 mutex_enter(&cpu_lock);
1029 mutex_enter(&pidlock);
1030 mutex_enter(&curproc->p_lock);
1031
1032 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1033 0, NULL, NULL) == 0) {
1034 curthread->t_bind_pset = zio_taskq_psrset_bind;
1035 } else {
1036 cmn_err(CE_WARN,
1037 "Couldn't bind process for zfs pool \"%s\" to "
1038 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1039 }
1040
1041 mutex_exit(&curproc->p_lock);
1042 mutex_exit(&pidlock);
1043 mutex_exit(&cpu_lock);
1044 pool_unlock();
1045 }
1046#endif
1047
1048#ifdef SYSDC
1049 if (zio_taskq_sysdc) {
1050 sysdc_thread_enter(curthread, 100, 0);
1051 }
1052#endif
1053
1054 spa->spa_proc = curproc;
1055 spa->spa_did = curthread->t_did;
1056
1057 spa_create_zio_taskqs(spa);
1058
1059 mutex_enter(&spa->spa_proc_lock);
1060 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1061
1062 spa->spa_proc_state = SPA_PROC_ACTIVE;
1063 cv_broadcast(&spa->spa_proc_cv);
1064
1065 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1066 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1067 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1068 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1069
1070 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1071 spa->spa_proc_state = SPA_PROC_GONE;
1072 spa->spa_proc = &p0;
1073 cv_broadcast(&spa->spa_proc_cv);
1074 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1075
1076 mutex_enter(&curproc->p_lock);
1077 lwp_exit();
1078}
1079#endif /* SPA_PROCESS */
1080#endif
1081
1082/*
1083 * Activate an uninitialized pool.
1084 */
1085static void
1086spa_activate(spa_t *spa, int mode)
1087{
1088 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1089
1090 spa->spa_state = POOL_STATE_ACTIVE;
1091 spa->spa_mode = mode;
1092
1093 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1094 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1095
1096 /* Try to create a covering process */
1097 mutex_enter(&spa->spa_proc_lock);
1098 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1099 ASSERT(spa->spa_proc == &p0);
1100 spa->spa_did = 0;
1101
1102#ifdef SPA_PROCESS
1103 /* Only create a process if we're going to be around a while. */
1104 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1105 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1106 NULL, 0) == 0) {
1107 spa->spa_proc_state = SPA_PROC_CREATED;
1108 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1109 cv_wait(&spa->spa_proc_cv,
1110 &spa->spa_proc_lock);
1111 }
1112 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1113 ASSERT(spa->spa_proc != &p0);
1114 ASSERT(spa->spa_did != 0);
1115 } else {
1116#ifdef _KERNEL
1117 cmn_err(CE_WARN,
1118 "Couldn't create process for zfs pool \"%s\"\n",
1119 spa->spa_name);
1120#endif
1121 }
1122 }
1123#endif /* SPA_PROCESS */
1124 mutex_exit(&spa->spa_proc_lock);
1125
1126 /* If we didn't create a process, we need to create our taskqs. */
1127 ASSERT(spa->spa_proc == &p0);
1128 if (spa->spa_proc == &p0) {
1129 spa_create_zio_taskqs(spa);
1130 }
1131
1132 /*
1133 * Start TRIM thread.
1134 */
1135 trim_thread_create(spa);
1136
1137 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1138 offsetof(vdev_t, vdev_config_dirty_node));
1139 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1140 offsetof(objset_t, os_evicting_node));
1141 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1142 offsetof(vdev_t, vdev_state_dirty_node));
1143
1144 txg_list_create(&spa->spa_vdev_txg_list,
1145 offsetof(struct vdev, vdev_txg_node));
1146
1147 avl_create(&spa->spa_errlist_scrub,
1148 spa_error_entry_compare, sizeof (spa_error_entry_t),
1149 offsetof(spa_error_entry_t, se_avl));
1150 avl_create(&spa->spa_errlist_last,
1151 spa_error_entry_compare, sizeof (spa_error_entry_t),
1152 offsetof(spa_error_entry_t, se_avl));
1153}
1154
1155/*
1156 * Opposite of spa_activate().
1157 */
1158static void
1159spa_deactivate(spa_t *spa)
1160{
1161 ASSERT(spa->spa_sync_on == B_FALSE);
1162 ASSERT(spa->spa_dsl_pool == NULL);
1163 ASSERT(spa->spa_root_vdev == NULL);
1164 ASSERT(spa->spa_async_zio_root == NULL);
1165 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1166
1167 /*
1168 * Stop TRIM thread in case spa_unload() wasn't called directly
1169 * before spa_deactivate().
1170 */
1171 trim_thread_destroy(spa);
1172
1173 spa_evicting_os_wait(spa);
1174
1175 txg_list_destroy(&spa->spa_vdev_txg_list);
1176
1177 list_destroy(&spa->spa_config_dirty_list);
1178 list_destroy(&spa->spa_evicting_os_list);
1179 list_destroy(&spa->spa_state_dirty_list);
1180
1181 for (int t = 0; t < ZIO_TYPES; t++) {
1182 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1183 spa_taskqs_fini(spa, t, q);
1184 }
1185 }
1186
1187 metaslab_class_destroy(spa->spa_normal_class);
1188 spa->spa_normal_class = NULL;
1189
1190 metaslab_class_destroy(spa->spa_log_class);
1191 spa->spa_log_class = NULL;
1192
1193 /*
1194 * If this was part of an import or the open otherwise failed, we may
1195 * still have errors left in the queues. Empty them just in case.
1196 */
1197 spa_errlog_drain(spa);
1198
1199 avl_destroy(&spa->spa_errlist_scrub);
1200 avl_destroy(&spa->spa_errlist_last);
1201
1202 spa->spa_state = POOL_STATE_UNINITIALIZED;
1203
1204 mutex_enter(&spa->spa_proc_lock);
1205 if (spa->spa_proc_state != SPA_PROC_NONE) {
1206 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1207 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1208 cv_broadcast(&spa->spa_proc_cv);
1209 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1210 ASSERT(spa->spa_proc != &p0);
1211 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1212 }
1213 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1214 spa->spa_proc_state = SPA_PROC_NONE;
1215 }
1216 ASSERT(spa->spa_proc == &p0);
1217 mutex_exit(&spa->spa_proc_lock);
1218
1219#ifdef SPA_PROCESS
1220 /*
1221 * We want to make sure spa_thread() has actually exited the ZFS
1222 * module, so that the module can't be unloaded out from underneath
1223 * it.
1224 */
1225 if (spa->spa_did != 0) {
1226 thread_join(spa->spa_did);
1227 spa->spa_did = 0;
1228 }
1229#endif /* SPA_PROCESS */
1230}
1231
1232/*
1233 * Verify a pool configuration, and construct the vdev tree appropriately. This
1234 * will create all the necessary vdevs in the appropriate layout, with each vdev
1235 * in the CLOSED state. This will prep the pool before open/creation/import.
1236 * All vdev validation is done by the vdev_alloc() routine.
1237 */
1238static int
1239spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1240 uint_t id, int atype)
1241{
1242 nvlist_t **child;
1243 uint_t children;
1244 int error;
1245
1246 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1247 return (error);
1248
1249 if ((*vdp)->vdev_ops->vdev_op_leaf)
1250 return (0);
1251
1252 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1253 &child, &children);
1254
1255 if (error == ENOENT)
1256 return (0);
1257
1258 if (error) {
1259 vdev_free(*vdp);
1260 *vdp = NULL;
1261 return (SET_ERROR(EINVAL));
1262 }
1263
1264 for (int c = 0; c < children; c++) {
1265 vdev_t *vd;
1266 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1267 atype)) != 0) {
1268 vdev_free(*vdp);
1269 *vdp = NULL;
1270 return (error);
1271 }
1272 }
1273
1274 ASSERT(*vdp != NULL);
1275
1276 return (0);
1277}
1278
1279/*
1280 * Opposite of spa_load().
1281 */
1282static void
1283spa_unload(spa_t *spa)
1284{
1285 int i;
1286
1287 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1288
1289 /*
1290 * Stop TRIM thread.
1291 */
1292 trim_thread_destroy(spa);
1293
1294 /*
1295 * Stop async tasks.
1296 */
1297 spa_async_suspend(spa);
1298
1299 /*
1300 * Stop syncing.
1301 */
1302 if (spa->spa_sync_on) {
1303 txg_sync_stop(spa->spa_dsl_pool);
1304 spa->spa_sync_on = B_FALSE;
1305 }
1306
1307 /*
1308 * Even though vdev_free() also calls vdev_metaslab_fini, we need
1309 * to call it earlier, before we wait for async i/o to complete.
1310 * This ensures that there is no async metaslab prefetching, by
1311 * calling taskq_wait(mg_taskq).
1312 */
1313 if (spa->spa_root_vdev != NULL) {
1314 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1315 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
1316 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
1317 spa_config_exit(spa, SCL_ALL, FTAG);
1318 }
1319
1320 /*
1321 * Wait for any outstanding async I/O to complete.
1322 */
1323 if (spa->spa_async_zio_root != NULL) {
1324 for (int i = 0; i < max_ncpus; i++)
1325 (void) zio_wait(spa->spa_async_zio_root[i]);
1326 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1327 spa->spa_async_zio_root = NULL;
1328 }
1329
1330 bpobj_close(&spa->spa_deferred_bpobj);
1331
1332 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1333
1334 /*
1335 * Close all vdevs.
1336 */
1337 if (spa->spa_root_vdev)
1338 vdev_free(spa->spa_root_vdev);
1339 ASSERT(spa->spa_root_vdev == NULL);
1340
1341 /*
1342 * Close the dsl pool.
1343 */
1344 if (spa->spa_dsl_pool) {
1345 dsl_pool_close(spa->spa_dsl_pool);
1346 spa->spa_dsl_pool = NULL;
1347 spa->spa_meta_objset = NULL;
1348 }
1349
1350 ddt_unload(spa);
1351
1352 /*
1353 * Drop and purge level 2 cache
1354 */
1355 spa_l2cache_drop(spa);
1356
1357 for (i = 0; i < spa->spa_spares.sav_count; i++)
1358 vdev_free(spa->spa_spares.sav_vdevs[i]);
1359 if (spa->spa_spares.sav_vdevs) {
1360 kmem_free(spa->spa_spares.sav_vdevs,
1361 spa->spa_spares.sav_count * sizeof (void *));
1362 spa->spa_spares.sav_vdevs = NULL;
1363 }
1364 if (spa->spa_spares.sav_config) {
1365 nvlist_free(spa->spa_spares.sav_config);
1366 spa->spa_spares.sav_config = NULL;
1367 }
1368 spa->spa_spares.sav_count = 0;
1369
1370 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1371 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1372 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1373 }
1374 if (spa->spa_l2cache.sav_vdevs) {
1375 kmem_free(spa->spa_l2cache.sav_vdevs,
1376 spa->spa_l2cache.sav_count * sizeof (void *));
1377 spa->spa_l2cache.sav_vdevs = NULL;
1378 }
1379 if (spa->spa_l2cache.sav_config) {
1380 nvlist_free(spa->spa_l2cache.sav_config);
1381 spa->spa_l2cache.sav_config = NULL;
1382 }
1383 spa->spa_l2cache.sav_count = 0;
1384
1385 spa->spa_async_suspended = 0;
1386
1387 if (spa->spa_comment != NULL) {
1388 spa_strfree(spa->spa_comment);
1389 spa->spa_comment = NULL;
1390 }
1391
1392 spa_config_exit(spa, SCL_ALL, FTAG);
1393}
1394
1395/*
1396 * Load (or re-load) the current list of vdevs describing the active spares for
1397 * this pool. When this is called, we have some form of basic information in
1398 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1399 * then re-generate a more complete list including status information.
1400 */
1401static void
1402spa_load_spares(spa_t *spa)
1403{
1404 nvlist_t **spares;
1405 uint_t nspares;
1406 int i;
1407 vdev_t *vd, *tvd;
1408
1409 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1410
1411 /*
1412 * First, close and free any existing spare vdevs.
1413 */
1414 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1415 vd = spa->spa_spares.sav_vdevs[i];
1416
1417 /* Undo the call to spa_activate() below */
1418 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1419 B_FALSE)) != NULL && tvd->vdev_isspare)
1420 spa_spare_remove(tvd);
1421 vdev_close(vd);
1422 vdev_free(vd);
1423 }
1424
1425 if (spa->spa_spares.sav_vdevs)
1426 kmem_free(spa->spa_spares.sav_vdevs,
1427 spa->spa_spares.sav_count * sizeof (void *));
1428
1429 if (spa->spa_spares.sav_config == NULL)
1430 nspares = 0;
1431 else
1432 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1433 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1434
1435 spa->spa_spares.sav_count = (int)nspares;
1436 spa->spa_spares.sav_vdevs = NULL;
1437
1438 if (nspares == 0)
1439 return;
1440
1441 /*
1442 * Construct the array of vdevs, opening them to get status in the
1443 * process. For each spare, there is potentially two different vdev_t
1444 * structures associated with it: one in the list of spares (used only
1445 * for basic validation purposes) and one in the active vdev
1446 * configuration (if it's spared in). During this phase we open and
1447 * validate each vdev on the spare list. If the vdev also exists in the
1448 * active configuration, then we also mark this vdev as an active spare.
1449 */
1450 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1451 KM_SLEEP);
1452 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1453 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1454 VDEV_ALLOC_SPARE) == 0);
1455 ASSERT(vd != NULL);
1456
1457 spa->spa_spares.sav_vdevs[i] = vd;
1458
1459 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1460 B_FALSE)) != NULL) {
1461 if (!tvd->vdev_isspare)
1462 spa_spare_add(tvd);
1463
1464 /*
1465 * We only mark the spare active if we were successfully
1466 * able to load the vdev. Otherwise, importing a pool
1467 * with a bad active spare would result in strange
1468 * behavior, because multiple pool would think the spare
1469 * is actively in use.
1470 *
1471 * There is a vulnerability here to an equally bizarre
1472 * circumstance, where a dead active spare is later
1473 * brought back to life (onlined or otherwise). Given
1474 * the rarity of this scenario, and the extra complexity
1475 * it adds, we ignore the possibility.
1476 */
1477 if (!vdev_is_dead(tvd))
1478 spa_spare_activate(tvd);
1479 }
1480
1481 vd->vdev_top = vd;
1482 vd->vdev_aux = &spa->spa_spares;
1483
1484 if (vdev_open(vd) != 0)
1485 continue;
1486
1487 if (vdev_validate_aux(vd) == 0)
1488 spa_spare_add(vd);
1489 }
1490
1491 /*
1492 * Recompute the stashed list of spares, with status information
1493 * this time.
1494 */
1495 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1496 DATA_TYPE_NVLIST_ARRAY) == 0);
1497
1498 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1499 KM_SLEEP);
1500 for (i = 0; i < spa->spa_spares.sav_count; i++)
1501 spares[i] = vdev_config_generate(spa,
1502 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1503 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1504 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1505 for (i = 0; i < spa->spa_spares.sav_count; i++)
1506 nvlist_free(spares[i]);
1507 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1508}
1509
1510/*
1511 * Load (or re-load) the current list of vdevs describing the active l2cache for
1512 * this pool. When this is called, we have some form of basic information in
1513 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1514 * then re-generate a more complete list including status information.
1515 * Devices which are already active have their details maintained, and are
1516 * not re-opened.
1517 */
1518static void
1519spa_load_l2cache(spa_t *spa)
1520{
1521 nvlist_t **l2cache;
1522 uint_t nl2cache;
1523 int i, j, oldnvdevs;
1524 uint64_t guid;
1525 vdev_t *vd, **oldvdevs, **newvdevs;
1526 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1527
1528 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1529
1530 if (sav->sav_config != NULL) {
1531 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1532 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1533 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1534 } else {
1535 nl2cache = 0;
1536 newvdevs = NULL;
1537 }
1538
1539 oldvdevs = sav->sav_vdevs;
1540 oldnvdevs = sav->sav_count;
1541 sav->sav_vdevs = NULL;
1542 sav->sav_count = 0;
1543
1544 /*
1545 * Process new nvlist of vdevs.
1546 */
1547 for (i = 0; i < nl2cache; i++) {
1548 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1549 &guid) == 0);
1550
1551 newvdevs[i] = NULL;
1552 for (j = 0; j < oldnvdevs; j++) {
1553 vd = oldvdevs[j];
1554 if (vd != NULL && guid == vd->vdev_guid) {
1555 /*
1556 * Retain previous vdev for add/remove ops.
1557 */
1558 newvdevs[i] = vd;
1559 oldvdevs[j] = NULL;
1560 break;
1561 }
1562 }
1563
1564 if (newvdevs[i] == NULL) {
1565 /*
1566 * Create new vdev
1567 */
1568 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1569 VDEV_ALLOC_L2CACHE) == 0);
1570 ASSERT(vd != NULL);
1571 newvdevs[i] = vd;
1572
1573 /*
1574 * Commit this vdev as an l2cache device,
1575 * even if it fails to open.
1576 */
1577 spa_l2cache_add(vd);
1578
1579 vd->vdev_top = vd;
1580 vd->vdev_aux = sav;
1581
1582 spa_l2cache_activate(vd);
1583
1584 if (vdev_open(vd) != 0)
1585 continue;
1586
1587 (void) vdev_validate_aux(vd);
1588
1589 if (!vdev_is_dead(vd))
1590 l2arc_add_vdev(spa, vd);
1591 }
1592 }
1593
1594 /*
1595 * Purge vdevs that were dropped
1596 */
1597 for (i = 0; i < oldnvdevs; i++) {
1598 uint64_t pool;
1599
1600 vd = oldvdevs[i];
1601 if (vd != NULL) {
1602 ASSERT(vd->vdev_isl2cache);
1603
1604 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1605 pool != 0ULL && l2arc_vdev_present(vd))
1606 l2arc_remove_vdev(vd);
1607 vdev_clear_stats(vd);
1608 vdev_free(vd);
1609 }
1610 }
1611
1612 if (oldvdevs)
1613 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1614
1615 if (sav->sav_config == NULL)
1616 goto out;
1617
1618 sav->sav_vdevs = newvdevs;
1619 sav->sav_count = (int)nl2cache;
1620
1621 /*
1622 * Recompute the stashed list of l2cache devices, with status
1623 * information this time.
1624 */
1625 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1626 DATA_TYPE_NVLIST_ARRAY) == 0);
1627
1628 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1629 for (i = 0; i < sav->sav_count; i++)
1630 l2cache[i] = vdev_config_generate(spa,
1631 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1632 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1633 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1634out:
1635 for (i = 0; i < sav->sav_count; i++)
1636 nvlist_free(l2cache[i]);
1637 if (sav->sav_count)
1638 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1639}
1640
1641static int
1642load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1643{
1644 dmu_buf_t *db;
1645 char *packed = NULL;
1646 size_t nvsize = 0;
1647 int error;
1648 *value = NULL;
1649
1650 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1651 if (error != 0)
1652 return (error);
1653
1654 nvsize = *(uint64_t *)db->db_data;
1655 dmu_buf_rele(db, FTAG);
1656
1657 packed = kmem_alloc(nvsize, KM_SLEEP);
1658 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1659 DMU_READ_PREFETCH);
1660 if (error == 0)
1661 error = nvlist_unpack(packed, nvsize, value, 0);
1662 kmem_free(packed, nvsize);
1663
1664 return (error);
1665}
1666
1667/*
1668 * Checks to see if the given vdev could not be opened, in which case we post a
1669 * sysevent to notify the autoreplace code that the device has been removed.
1670 */
1671static void
1672spa_check_removed(vdev_t *vd)
1673{
1674 for (int c = 0; c < vd->vdev_children; c++)
1675 spa_check_removed(vd->vdev_child[c]);
1676
1677 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1678 !vd->vdev_ishole) {
1679 zfs_post_autoreplace(vd->vdev_spa, vd);
1680 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1681 }
1682}
1683
1684static void
1685spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
1686{
1687 ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
1688
1689 vd->vdev_top_zap = mvd->vdev_top_zap;
1690 vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
1691
1692 for (uint64_t i = 0; i < vd->vdev_children; i++) {
1693 spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
1694 }
1695}
1696
1697/*
1698 * Validate the current config against the MOS config
1699 */
1700static boolean_t
1701spa_config_valid(spa_t *spa, nvlist_t *config)
1702{
1703 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1704 nvlist_t *nv;
1705
1706 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1707
1708 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1709 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1710
1711 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1712
1713 /*
1714 * If we're doing a normal import, then build up any additional
1715 * diagnostic information about missing devices in this config.
1716 * We'll pass this up to the user for further processing.
1717 */
1718 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1719 nvlist_t **child, *nv;
1720 uint64_t idx = 0;
1721
1722 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1723 KM_SLEEP);
1724 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1725
1726 for (int c = 0; c < rvd->vdev_children; c++) {
1727 vdev_t *tvd = rvd->vdev_child[c];
1728 vdev_t *mtvd = mrvd->vdev_child[c];
1729
1730 if (tvd->vdev_ops == &vdev_missing_ops &&
1731 mtvd->vdev_ops != &vdev_missing_ops &&
1732 mtvd->vdev_islog)
1733 child[idx++] = vdev_config_generate(spa, mtvd,
1734 B_FALSE, 0);
1735 }
1736
1737 if (idx) {
1738 VERIFY(nvlist_add_nvlist_array(nv,
1739 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1740 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1741 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1742
1743 for (int i = 0; i < idx; i++)
1744 nvlist_free(child[i]);
1745 }
1746 nvlist_free(nv);
1747 kmem_free(child, rvd->vdev_children * sizeof (char **));
1748 }
1749
1750 /*
1751 * Compare the root vdev tree with the information we have
1752 * from the MOS config (mrvd). Check each top-level vdev
1753 * with the corresponding MOS config top-level (mtvd).
1754 */
1755 for (int c = 0; c < rvd->vdev_children; c++) {
1756 vdev_t *tvd = rvd->vdev_child[c];
1757 vdev_t *mtvd = mrvd->vdev_child[c];
1758
1759 /*
1760 * Resolve any "missing" vdevs in the current configuration.
1761 * If we find that the MOS config has more accurate information
1762 * about the top-level vdev then use that vdev instead.
1763 */
1764 if (tvd->vdev_ops == &vdev_missing_ops &&
1765 mtvd->vdev_ops != &vdev_missing_ops) {
1766
1767 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1768 continue;
1769
1770 /*
1771 * Device specific actions.
1772 */
1773 if (mtvd->vdev_islog) {
1774 spa_set_log_state(spa, SPA_LOG_CLEAR);
1775 } else {
1776 /*
1777 * XXX - once we have 'readonly' pool
1778 * support we should be able to handle
1779 * missing data devices by transitioning
1780 * the pool to readonly.
1781 */
1782 continue;
1783 }
1784
1785 /*
1786 * Swap the missing vdev with the data we were
1787 * able to obtain from the MOS config.
1788 */
1789 vdev_remove_child(rvd, tvd);
1790 vdev_remove_child(mrvd, mtvd);
1791
1792 vdev_add_child(rvd, mtvd);
1793 vdev_add_child(mrvd, tvd);
1794
1795 spa_config_exit(spa, SCL_ALL, FTAG);
1796 vdev_load(mtvd);
1797 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1798
1799 vdev_reopen(rvd);
1800 } else {
1801 if (mtvd->vdev_islog) {
1802 /*
1803 * Load the slog device's state from the MOS
1804 * config since it's possible that the label
1805 * does not contain the most up-to-date
1806 * information.
1807 */
1808 vdev_load_log_state(tvd, mtvd);
1809 vdev_reopen(tvd);
1810 }
1811
1812 /*
1813 * Per-vdev ZAP info is stored exclusively in the MOS.
1814 */
1815 spa_config_valid_zaps(tvd, mtvd);
1816 }
1817 }
1818
1819 vdev_free(mrvd);
1820 spa_config_exit(spa, SCL_ALL, FTAG);
1821
1822 /*
1823 * Ensure we were able to validate the config.
1824 */
1825 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1826}
1827
1828/*
1829 * Check for missing log devices
1830 */
1831static boolean_t
1832spa_check_logs(spa_t *spa)
1833{
1834 boolean_t rv = B_FALSE;
1835 dsl_pool_t *dp = spa_get_dsl(spa);
1836
1837 switch (spa->spa_log_state) {
1838 case SPA_LOG_MISSING:
1839 /* need to recheck in case slog has been restored */
1840 case SPA_LOG_UNKNOWN:
1841 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1842 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1843 if (rv)
1844 spa_set_log_state(spa, SPA_LOG_MISSING);
1845 break;
1846 }
1847 return (rv);
1848}
1849
1850static boolean_t
1851spa_passivate_log(spa_t *spa)
1852{
1853 vdev_t *rvd = spa->spa_root_vdev;
1854 boolean_t slog_found = B_FALSE;
1855
1856 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1857
1858 if (!spa_has_slogs(spa))
1859 return (B_FALSE);
1860
1861 for (int c = 0; c < rvd->vdev_children; c++) {
1862 vdev_t *tvd = rvd->vdev_child[c];
1863 metaslab_group_t *mg = tvd->vdev_mg;
1864
1865 if (tvd->vdev_islog) {
1866 metaslab_group_passivate(mg);
1867 slog_found = B_TRUE;
1868 }
1869 }
1870
1871 return (slog_found);
1872}
1873
1874static void
1875spa_activate_log(spa_t *spa)
1876{
1877 vdev_t *rvd = spa->spa_root_vdev;
1878
1879 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1880
1881 for (int c = 0; c < rvd->vdev_children; c++) {
1882 vdev_t *tvd = rvd->vdev_child[c];
1883 metaslab_group_t *mg = tvd->vdev_mg;
1884
1885 if (tvd->vdev_islog)
1886 metaslab_group_activate(mg);
1887 }
1888}
1889
1890int
1891spa_offline_log(spa_t *spa)
1892{
1893 int error;
1894
1895 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1896 NULL, DS_FIND_CHILDREN);
1897 if (error == 0) {
1898 /*
1899 * We successfully offlined the log device, sync out the
1900 * current txg so that the "stubby" block can be removed
1901 * by zil_sync().
1902 */
1903 txg_wait_synced(spa->spa_dsl_pool, 0);
1904 }
1905 return (error);
1906}
1907
1908static void
1909spa_aux_check_removed(spa_aux_vdev_t *sav)
1910{
1911 int i;
1912
1913 for (i = 0; i < sav->sav_count; i++)
1914 spa_check_removed(sav->sav_vdevs[i]);
1915}
1916
1917void
1918spa_claim_notify(zio_t *zio)
1919{
1920 spa_t *spa = zio->io_spa;
1921
1922 if (zio->io_error)
1923 return;
1924
1925 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1926 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1927 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1928 mutex_exit(&spa->spa_props_lock);
1929}
1930
1931typedef struct spa_load_error {
1932 uint64_t sle_meta_count;
1933 uint64_t sle_data_count;
1934} spa_load_error_t;
1935
1936static void
1937spa_load_verify_done(zio_t *zio)
1938{
1939 blkptr_t *bp = zio->io_bp;
1940 spa_load_error_t *sle = zio->io_private;
1941 dmu_object_type_t type = BP_GET_TYPE(bp);
1942 int error = zio->io_error;
1943 spa_t *spa = zio->io_spa;
1944
1945 if (error) {
1946 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1947 type != DMU_OT_INTENT_LOG)
1948 atomic_inc_64(&sle->sle_meta_count);
1949 else
1950 atomic_inc_64(&sle->sle_data_count);
1951 }
1952 zio_data_buf_free(zio->io_data, zio->io_size);
1953
1954 mutex_enter(&spa->spa_scrub_lock);
1955 spa->spa_scrub_inflight--;
1956 cv_broadcast(&spa->spa_scrub_io_cv);
1957 mutex_exit(&spa->spa_scrub_lock);
1958}
1959
1960/*
1961 * Maximum number of concurrent scrub i/os to create while verifying
1962 * a pool while importing it.
1963 */
1964int spa_load_verify_maxinflight = 10000;
1965boolean_t spa_load_verify_metadata = B_TRUE;
1966boolean_t spa_load_verify_data = B_TRUE;
1967
1968SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN,
1969 &spa_load_verify_maxinflight, 0,
1970 "Maximum number of concurrent scrub I/Os to create while verifying a "
1971 "pool while importing it");
1972
1973SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN,
1974 &spa_load_verify_metadata, 0,
1975 "Check metadata on import?");
1976
1977SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN,
1978 &spa_load_verify_data, 0,
1979 "Check user data on import?");
1980
1981/*ARGSUSED*/
1982static int
1983spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1984 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1985{
1986 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1987 return (0);
1988 /*
1989 * Note: normally this routine will not be called if
1990 * spa_load_verify_metadata is not set. However, it may be useful
1991 * to manually set the flag after the traversal has begun.
1992 */
1993 if (!spa_load_verify_metadata)
1994 return (0);
1995 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data)
1996 return (0);
1997
1998 zio_t *rio = arg;
1999 size_t size = BP_GET_PSIZE(bp);
2000 void *data = zio_data_buf_alloc(size);
2001
2002 mutex_enter(&spa->spa_scrub_lock);
2003 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
2004 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2005 spa->spa_scrub_inflight++;
2006 mutex_exit(&spa->spa_scrub_lock);
2007
2008 zio_nowait(zio_read(rio, spa, bp, data, size,
2009 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2010 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2011 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2012 return (0);
2013}
2014
2015/* ARGSUSED */
2016int
2017verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2018{
2019 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2020 return (SET_ERROR(ENAMETOOLONG));
2021
2022 return (0);
2023}
2024
2025static int
2026spa_load_verify(spa_t *spa)
2027{
2028 zio_t *rio;
2029 spa_load_error_t sle = { 0 };
2030 zpool_rewind_policy_t policy;
2031 boolean_t verify_ok = B_FALSE;
2032 int error = 0;
2033
2034 zpool_get_rewind_policy(spa->spa_config, &policy);
2035
2036 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
2037 return (0);
2038
2039 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2040 error = dmu_objset_find_dp(spa->spa_dsl_pool,
2041 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2042 DS_FIND_CHILDREN);
2043 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2044 if (error != 0)
2045 return (error);
2046
2047 rio = zio_root(spa, NULL, &sle,
2048 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2049
2050 if (spa_load_verify_metadata) {
2051 error = traverse_pool(spa, spa->spa_verify_min_txg,
2052 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2053 spa_load_verify_cb, rio);
2054 }
2055
2056 (void) zio_wait(rio);
2057
2058 spa->spa_load_meta_errors = sle.sle_meta_count;
2059 spa->spa_load_data_errors = sle.sle_data_count;
2060
2061 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
2062 sle.sle_data_count <= policy.zrp_maxdata) {
2063 int64_t loss = 0;
2064
2065 verify_ok = B_TRUE;
2066 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2067 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2068
2069 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2070 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2071 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2072 VERIFY(nvlist_add_int64(spa->spa_load_info,
2073 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2074 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2075 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2076 } else {
2077 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2078 }
2079
2080 if (error) {
2081 if (error != ENXIO && error != EIO)
2082 error = SET_ERROR(EIO);
2083 return (error);
2084 }
2085
2086 return (verify_ok ? 0 : EIO);
2087}
2088
2089/*
2090 * Find a value in the pool props object.
2091 */
2092static void
2093spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2094{
2095 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2096 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2097}
2098
2099/*
2100 * Find a value in the pool directory object.
2101 */
2102static int
2103spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
2104{
2105 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2106 name, sizeof (uint64_t), 1, val));
2107}
2108
2109static int
2110spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2111{
2112 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2113 return (err);
2114}
2115
2116/*
2117 * Fix up config after a partly-completed split. This is done with the
2118 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2119 * pool have that entry in their config, but only the splitting one contains
2120 * a list of all the guids of the vdevs that are being split off.
2121 *
2122 * This function determines what to do with that list: either rejoin
2123 * all the disks to the pool, or complete the splitting process. To attempt
2124 * the rejoin, each disk that is offlined is marked online again, and
2125 * we do a reopen() call. If the vdev label for every disk that was
2126 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2127 * then we call vdev_split() on each disk, and complete the split.
2128 *
2129 * Otherwise we leave the config alone, with all the vdevs in place in
2130 * the original pool.
2131 */
2132static void
2133spa_try_repair(spa_t *spa, nvlist_t *config)
2134{
2135 uint_t extracted;
2136 uint64_t *glist;
2137 uint_t i, gcount;
2138 nvlist_t *nvl;
2139 vdev_t **vd;
2140 boolean_t attempt_reopen;
2141
2142 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2143 return;
2144
2145 /* check that the config is complete */
2146 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2147 &glist, &gcount) != 0)
2148 return;
2149
2150 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2151
2152 /* attempt to online all the vdevs & validate */
2153 attempt_reopen = B_TRUE;
2154 for (i = 0; i < gcount; i++) {
2155 if (glist[i] == 0) /* vdev is hole */
2156 continue;
2157
2158 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2159 if (vd[i] == NULL) {
2160 /*
2161 * Don't bother attempting to reopen the disks;
2162 * just do the split.
2163 */
2164 attempt_reopen = B_FALSE;
2165 } else {
2166 /* attempt to re-online it */
2167 vd[i]->vdev_offline = B_FALSE;
2168 }
2169 }
2170
2171 if (attempt_reopen) {
2172 vdev_reopen(spa->spa_root_vdev);
2173
2174 /* check each device to see what state it's in */
2175 for (extracted = 0, i = 0; i < gcount; i++) {
2176 if (vd[i] != NULL &&
2177 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2178 break;
2179 ++extracted;
2180 }
2181 }
2182
2183 /*
2184 * If every disk has been moved to the new pool, or if we never
2185 * even attempted to look at them, then we split them off for
2186 * good.
2187 */
2188 if (!attempt_reopen || gcount == extracted) {
2189 for (i = 0; i < gcount; i++)
2190 if (vd[i] != NULL)
2191 vdev_split(vd[i]);
2192 vdev_reopen(spa->spa_root_vdev);
2193 }
2194
2195 kmem_free(vd, gcount * sizeof (vdev_t *));
2196}
2197
2198static int
2199spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2200 boolean_t mosconfig)
2201{
2202 nvlist_t *config = spa->spa_config;
2203 char *ereport = FM_EREPORT_ZFS_POOL;
2204 char *comment;
2205 int error;
2206 uint64_t pool_guid;
2207 nvlist_t *nvl;
2208
2209 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2210 return (SET_ERROR(EINVAL));
2211
2212 ASSERT(spa->spa_comment == NULL);
2213 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2214 spa->spa_comment = spa_strdup(comment);
2215
2216 /*
2217 * Versioning wasn't explicitly added to the label until later, so if
2218 * it's not present treat it as the initial version.
2219 */
2220 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2221 &spa->spa_ubsync.ub_version) != 0)
2222 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2223
2224 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2225 &spa->spa_config_txg);
2226
2227 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2228 spa_guid_exists(pool_guid, 0)) {
2229 error = SET_ERROR(EEXIST);
2230 } else {
2231 spa->spa_config_guid = pool_guid;
2232
2233 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2234 &nvl) == 0) {
2235 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
2236 KM_SLEEP) == 0);
2237 }
2238
2239 nvlist_free(spa->spa_load_info);
2240 spa->spa_load_info = fnvlist_alloc();
2241
2242 gethrestime(&spa->spa_loaded_ts);
2243 error = spa_load_impl(spa, pool_guid, config, state, type,
2244 mosconfig, &ereport);
2245 }
2246
2247 /*
2248 * Don't count references from objsets that are already closed
2249 * and are making their way through the eviction process.
2250 */
2251 spa_evicting_os_wait(spa);
2252 spa->spa_minref = refcount_count(&spa->spa_refcount);
2253 if (error) {
2254 if (error != EEXIST) {
2255 spa->spa_loaded_ts.tv_sec = 0;
2256 spa->spa_loaded_ts.tv_nsec = 0;
2257 }
2258 if (error != EBADF) {
2259 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2260 }
2261 }
2262 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2263 spa->spa_ena = 0;
2264
2265 return (error);
2266}
2267
2268/*
2269 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2270 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2271 * spa's per-vdev ZAP list.
2272 */
2273static uint64_t
2274vdev_count_verify_zaps(vdev_t *vd)
2275{
2276 spa_t *spa = vd->vdev_spa;
2277 uint64_t total = 0;
2278 if (vd->vdev_top_zap != 0) {
2279 total++;
2280 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2281 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2282 }
2283 if (vd->vdev_leaf_zap != 0) {
2284 total++;
2285 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2286 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2287 }
2288
2289 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2290 total += vdev_count_verify_zaps(vd->vdev_child[i]);
2291 }
2292
2293 return (total);
2294}
2295
2296/*
2297 * Load an existing storage pool, using the pool's builtin spa_config as a
2298 * source of configuration information.
2299 */
2300static int
2301spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2302 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2303 char **ereport)
2304{
2305 int error = 0;
2306 nvlist_t *nvroot = NULL;
2307 nvlist_t *label;
2308 vdev_t *rvd;
2309 uberblock_t *ub = &spa->spa_uberblock;
2310 uint64_t children, config_cache_txg = spa->spa_config_txg;
2311 int orig_mode = spa->spa_mode;
2312 int parse;
2313 uint64_t obj;
2314 boolean_t missing_feat_write = B_FALSE;
2315
2316 /*
2317 * If this is an untrusted config, access the pool in read-only mode.
2318 * This prevents things like resilvering recently removed devices.
2319 */
2320 if (!mosconfig)
2321 spa->spa_mode = FREAD;
2322
2323 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2324
2325 spa->spa_load_state = state;
2326
2327 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2328 return (SET_ERROR(EINVAL));
2329
2330 parse = (type == SPA_IMPORT_EXISTING ?
2331 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2332
2333 /*
2334 * Create "The Godfather" zio to hold all async IOs
2335 */
2336 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2337 KM_SLEEP);
2338 for (int i = 0; i < max_ncpus; i++) {
2339 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2340 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2341 ZIO_FLAG_GODFATHER);
2342 }
2343
2344 /*
2345 * Parse the configuration into a vdev tree. We explicitly set the
2346 * value that will be returned by spa_version() since parsing the
2347 * configuration requires knowing the version number.
2348 */
2349 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2350 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2351 spa_config_exit(spa, SCL_ALL, FTAG);
2352
2353 if (error != 0)
2354 return (error);
2355
2356 ASSERT(spa->spa_root_vdev == rvd);
2357 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2358 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2359
2360 if (type != SPA_IMPORT_ASSEMBLE) {
2361 ASSERT(spa_guid(spa) == pool_guid);
2362 }
2363
2364 /*
2365 * Try to open all vdevs, loading each label in the process.
2366 */
2367 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2368 error = vdev_open(rvd);
2369 spa_config_exit(spa, SCL_ALL, FTAG);
2370 if (error != 0)
2371 return (error);
2372
2373 /*
2374 * We need to validate the vdev labels against the configuration that
2375 * we have in hand, which is dependent on the setting of mosconfig. If
2376 * mosconfig is true then we're validating the vdev labels based on
2377 * that config. Otherwise, we're validating against the cached config
2378 * (zpool.cache) that was read when we loaded the zfs module, and then
2379 * later we will recursively call spa_load() and validate against
2380 * the vdev config.
2381 *
2382 * If we're assembling a new pool that's been split off from an
2383 * existing pool, the labels haven't yet been updated so we skip
2384 * validation for now.
2385 */
2386 if (type != SPA_IMPORT_ASSEMBLE) {
2387 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2388 error = vdev_validate(rvd, mosconfig);
2389 spa_config_exit(spa, SCL_ALL, FTAG);
2390
2391 if (error != 0)
2392 return (error);
2393
2394 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2395 return (SET_ERROR(ENXIO));
2396 }
2397
2398 /*
2399 * Find the best uberblock.
2400 */
2401 vdev_uberblock_load(rvd, ub, &label);
2402
2403 /*
2404 * If we weren't able to find a single valid uberblock, return failure.
2405 */
2406 if (ub->ub_txg == 0) {
2407 nvlist_free(label);
2408 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2409 }
2410
2411 /*
2412 * If the pool has an unsupported version we can't open it.
2413 */
2414 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2415 nvlist_free(label);
2416 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2417 }
2418
2419 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2420 nvlist_t *features;
2421
2422 /*
2423 * If we weren't able to find what's necessary for reading the
2424 * MOS in the label, return failure.
2425 */
2426 if (label == NULL || nvlist_lookup_nvlist(label,
2427 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2428 nvlist_free(label);
2429 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2430 ENXIO));
2431 }
2432
2433 /*
2434 * Update our in-core representation with the definitive values
2435 * from the label.
2436 */
2437 nvlist_free(spa->spa_label_features);
2438 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2439 }
2440
2441 nvlist_free(label);
2442
2443 /*
2444 * Look through entries in the label nvlist's features_for_read. If
2445 * there is a feature listed there which we don't understand then we
2446 * cannot open a pool.
2447 */
2448 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2449 nvlist_t *unsup_feat;
2450
2451 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2452 0);
2453
2454 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2455 NULL); nvp != NULL;
2456 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2457 if (!zfeature_is_supported(nvpair_name(nvp))) {
2458 VERIFY(nvlist_add_string(unsup_feat,
2459 nvpair_name(nvp), "") == 0);
2460 }
2461 }
2462
2463 if (!nvlist_empty(unsup_feat)) {
2464 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2465 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2466 nvlist_free(unsup_feat);
2467 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2468 ENOTSUP));
2469 }
2470
2471 nvlist_free(unsup_feat);
2472 }
2473
2474 /*
2475 * If the vdev guid sum doesn't match the uberblock, we have an
2476 * incomplete configuration. We first check to see if the pool
2477 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2478 * If it is, defer the vdev_guid_sum check till later so we
2479 * can handle missing vdevs.
2480 */
2481 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2482 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2483 rvd->vdev_guid_sum != ub->ub_guid_sum)
2484 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2485
2486 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2487 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2488 spa_try_repair(spa, config);
2489 spa_config_exit(spa, SCL_ALL, FTAG);
2490 nvlist_free(spa->spa_config_splitting);
2491 spa->spa_config_splitting = NULL;
2492 }
2493
2494 /*
2495 * Initialize internal SPA structures.
2496 */
2497 spa->spa_state = POOL_STATE_ACTIVE;
2498 spa->spa_ubsync = spa->spa_uberblock;
2499 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2500 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2501 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2502 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2503 spa->spa_claim_max_txg = spa->spa_first_txg;
2504 spa->spa_prev_software_version = ub->ub_software_version;
2505
2506 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2507 if (error)
2508 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2509 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2510
2511 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2512 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2513
2514 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2515 boolean_t missing_feat_read = B_FALSE;
2516 nvlist_t *unsup_feat, *enabled_feat;
2517
2518 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2519 &spa->spa_feat_for_read_obj) != 0) {
2520 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2521 }
2522
2523 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2524 &spa->spa_feat_for_write_obj) != 0) {
2525 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2526 }
2527
2528 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2529 &spa->spa_feat_desc_obj) != 0) {
2530 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2531 }
2532
2533 enabled_feat = fnvlist_alloc();
2534 unsup_feat = fnvlist_alloc();
2535
2536 if (!spa_features_check(spa, B_FALSE,
2537 unsup_feat, enabled_feat))
2538 missing_feat_read = B_TRUE;
2539
2540 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2541 if (!spa_features_check(spa, B_TRUE,
2542 unsup_feat, enabled_feat)) {
2543 missing_feat_write = B_TRUE;
2544 }
2545 }
2546
2547 fnvlist_add_nvlist(spa->spa_load_info,
2548 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2549
2550 if (!nvlist_empty(unsup_feat)) {
2551 fnvlist_add_nvlist(spa->spa_load_info,
2552 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2553 }
2554
2555 fnvlist_free(enabled_feat);
2556 fnvlist_free(unsup_feat);
2557
2558 if (!missing_feat_read) {
2559 fnvlist_add_boolean(spa->spa_load_info,
2560 ZPOOL_CONFIG_CAN_RDONLY);
2561 }
2562
2563 /*
2564 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2565 * twofold: to determine whether the pool is available for
2566 * import in read-write mode and (if it is not) whether the
2567 * pool is available for import in read-only mode. If the pool
2568 * is available for import in read-write mode, it is displayed
2569 * as available in userland; if it is not available for import
2570 * in read-only mode, it is displayed as unavailable in
2571 * userland. If the pool is available for import in read-only
2572 * mode but not read-write mode, it is displayed as unavailable
2573 * in userland with a special note that the pool is actually
2574 * available for open in read-only mode.
2575 *
2576 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2577 * missing a feature for write, we must first determine whether
2578 * the pool can be opened read-only before returning to
2579 * userland in order to know whether to display the
2580 * abovementioned note.
2581 */
2582 if (missing_feat_read || (missing_feat_write &&
2583 spa_writeable(spa))) {
2584 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2585 ENOTSUP));
2586 }
2587
2588 /*
2589 * Load refcounts for ZFS features from disk into an in-memory
2590 * cache during SPA initialization.
2591 */
2592 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2593 uint64_t refcount;
2594
2595 error = feature_get_refcount_from_disk(spa,
2596 &spa_feature_table[i], &refcount);
2597 if (error == 0) {
2598 spa->spa_feat_refcount_cache[i] = refcount;
2599 } else if (error == ENOTSUP) {
2600 spa->spa_feat_refcount_cache[i] =
2601 SPA_FEATURE_DISABLED;
2602 } else {
2603 return (spa_vdev_err(rvd,
2604 VDEV_AUX_CORRUPT_DATA, EIO));
2605 }
2606 }
2607 }
2608
2609 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2610 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2611 &spa->spa_feat_enabled_txg_obj) != 0)
2612 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2613 }
2614
2615 spa->spa_is_initializing = B_TRUE;
2616 error = dsl_pool_open(spa->spa_dsl_pool);
2617 spa->spa_is_initializing = B_FALSE;
2618 if (error != 0)
2619 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2620
2621 if (!mosconfig) {
2622 uint64_t hostid;
2623 nvlist_t *policy = NULL, *nvconfig;
2624
2625 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2626 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2627
2628 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2629 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2630 char *hostname;
2631 unsigned long myhostid = 0;
2632
2633 VERIFY(nvlist_lookup_string(nvconfig,
2634 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2635
2636#ifdef _KERNEL
2637 myhostid = zone_get_hostid(NULL);
2638#else /* _KERNEL */
2639 /*
2640 * We're emulating the system's hostid in userland, so
2641 * we can't use zone_get_hostid().
2642 */
2643 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2644#endif /* _KERNEL */
2645 if (check_hostid && hostid != 0 && myhostid != 0 &&
2646 hostid != myhostid) {
2647 nvlist_free(nvconfig);
2648 cmn_err(CE_WARN, "pool '%s' could not be "
2649 "loaded as it was last accessed by "
2650 "another system (host: %s hostid: 0x%lx). "
2651 "See: http://illumos.org/msg/ZFS-8000-EY",
2652 spa_name(spa), hostname,
2653 (unsigned long)hostid);
2654 return (SET_ERROR(EBADF));
2655 }
2656 }
2657 if (nvlist_lookup_nvlist(spa->spa_config,
2658 ZPOOL_REWIND_POLICY, &policy) == 0)
2659 VERIFY(nvlist_add_nvlist(nvconfig,
2660 ZPOOL_REWIND_POLICY, policy) == 0);
2661
2662 spa_config_set(spa, nvconfig);
2663 spa_unload(spa);
2664 spa_deactivate(spa);
2665 spa_activate(spa, orig_mode);
2666
2667 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2668 }
2669
2670 /* Grab the secret checksum salt from the MOS. */
2671 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2672 DMU_POOL_CHECKSUM_SALT, 1,
2673 sizeof (spa->spa_cksum_salt.zcs_bytes),
2674 spa->spa_cksum_salt.zcs_bytes);
2675 if (error == ENOENT) {
2676 /* Generate a new salt for subsequent use */
2677 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
2678 sizeof (spa->spa_cksum_salt.zcs_bytes));
2679 } else if (error != 0) {
2680 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2681 }
2682
2683 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2684 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2685 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2686 if (error != 0)
2687 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2688
2689 /*
2690 * Load the bit that tells us to use the new accounting function
2691 * (raid-z deflation). If we have an older pool, this will not
2692 * be present.
2693 */
2694 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2695 if (error != 0 && error != ENOENT)
2696 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2697
2698 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2699 &spa->spa_creation_version);
2700 if (error != 0 && error != ENOENT)
2701 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2702
2703 /*
2704 * Load the persistent error log. If we have an older pool, this will
2705 * not be present.
2706 */
2707 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2708 if (error != 0 && error != ENOENT)
2709 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2710
2711 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2712 &spa->spa_errlog_scrub);
2713 if (error != 0 && error != ENOENT)
2714 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2715
2716 /*
2717 * Load the history object. If we have an older pool, this
2718 * will not be present.
2719 */
2720 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2721 if (error != 0 && error != ENOENT)
2722 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2723
2724 /*
2725 * Load the per-vdev ZAP map. If we have an older pool, this will not
2726 * be present; in this case, defer its creation to a later time to
2727 * avoid dirtying the MOS this early / out of sync context. See
2728 * spa_sync_config_object.
2729 */
2730
2731 /* The sentinel is only available in the MOS config. */
2732 nvlist_t *mos_config;
2733 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
2734 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2735
2736 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
2737 &spa->spa_all_vdev_zaps);
2738
2739 if (error != ENOENT && error != 0) {
2739 if (error == ENOENT) {
2740 VERIFY(!nvlist_exists(mos_config,
2741 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
2742 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
2743 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2744 } else if (error != 0) {
2740 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2745 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2741 } else if (error == 0 && !nvlist_exists(mos_config,
2742 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
2746 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
2743 /*
2744 * An older version of ZFS overwrote the sentinel value, so
2745 * we have orphaned per-vdev ZAPs in the MOS. Defer their
2746 * destruction to later; see spa_sync_config_object.
2747 */
2748 spa->spa_avz_action = AVZ_ACTION_DESTROY;
2749 /*
2750 * We're assuming that no vdevs have had their ZAPs created
2751 * before this. Better be sure of it.
2752 */
2753 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2754 }
2755 nvlist_free(mos_config);
2756
2757 /*
2758 * If we're assembling the pool from the split-off vdevs of
2759 * an existing pool, we don't want to attach the spares & cache
2760 * devices.
2761 */
2762
2763 /*
2764 * Load any hot spares for this pool.
2765 */
2766 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2767 if (error != 0 && error != ENOENT)
2768 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2769 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2770 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2771 if (load_nvlist(spa, spa->spa_spares.sav_object,
2772 &spa->spa_spares.sav_config) != 0)
2773 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2774
2775 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2776 spa_load_spares(spa);
2777 spa_config_exit(spa, SCL_ALL, FTAG);
2778 } else if (error == 0) {
2779 spa->spa_spares.sav_sync = B_TRUE;
2780 }
2781
2782 /*
2783 * Load any level 2 ARC devices for this pool.
2784 */
2785 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2786 &spa->spa_l2cache.sav_object);
2787 if (error != 0 && error != ENOENT)
2788 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2789 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2790 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2791 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2792 &spa->spa_l2cache.sav_config) != 0)
2793 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2794
2795 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2796 spa_load_l2cache(spa);
2797 spa_config_exit(spa, SCL_ALL, FTAG);
2798 } else if (error == 0) {
2799 spa->spa_l2cache.sav_sync = B_TRUE;
2800 }
2801
2802 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2803
2804 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2805 if (error && error != ENOENT)
2806 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2807
2808 if (error == 0) {
2809 uint64_t autoreplace;
2810
2811 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2812 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2813 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2814 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2815 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2816 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2817 &spa->spa_dedup_ditto);
2818
2819 spa->spa_autoreplace = (autoreplace != 0);
2820 }
2821
2822 /*
2823 * If the 'autoreplace' property is set, then post a resource notifying
2824 * the ZFS DE that it should not issue any faults for unopenable
2825 * devices. We also iterate over the vdevs, and post a sysevent for any
2826 * unopenable vdevs so that the normal autoreplace handler can take
2827 * over.
2828 */
2829 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2830 spa_check_removed(spa->spa_root_vdev);
2831 /*
2832 * For the import case, this is done in spa_import(), because
2833 * at this point we're using the spare definitions from
2834 * the MOS config, not necessarily from the userland config.
2835 */
2836 if (state != SPA_LOAD_IMPORT) {
2837 spa_aux_check_removed(&spa->spa_spares);
2838 spa_aux_check_removed(&spa->spa_l2cache);
2839 }
2840 }
2841
2842 /*
2843 * Load the vdev state for all toplevel vdevs.
2844 */
2845 vdev_load(rvd);
2846
2847 /*
2848 * Propagate the leaf DTLs we just loaded all the way up the tree.
2849 */
2850 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2851 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2852 spa_config_exit(spa, SCL_ALL, FTAG);
2853
2854 /*
2855 * Load the DDTs (dedup tables).
2856 */
2857 error = ddt_load(spa);
2858 if (error != 0)
2859 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2860
2861 spa_update_dspace(spa);
2862
2863 /*
2864 * Validate the config, using the MOS config to fill in any
2865 * information which might be missing. If we fail to validate
2866 * the config then declare the pool unfit for use. If we're
2867 * assembling a pool from a split, the log is not transferred
2868 * over.
2869 */
2870 if (type != SPA_IMPORT_ASSEMBLE) {
2871 nvlist_t *nvconfig;
2872
2873 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2874 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2875
2876 if (!spa_config_valid(spa, nvconfig)) {
2877 nvlist_free(nvconfig);
2878 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2879 ENXIO));
2880 }
2881 nvlist_free(nvconfig);
2882
2883 /*
2884 * Now that we've validated the config, check the state of the
2885 * root vdev. If it can't be opened, it indicates one or
2886 * more toplevel vdevs are faulted.
2887 */
2888 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2889 return (SET_ERROR(ENXIO));
2890
2891 if (spa_writeable(spa) && spa_check_logs(spa)) {
2892 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2893 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2894 }
2895 }
2896
2897 if (missing_feat_write) {
2898 ASSERT(state == SPA_LOAD_TRYIMPORT);
2899
2900 /*
2901 * At this point, we know that we can open the pool in
2902 * read-only mode but not read-write mode. We now have enough
2903 * information and can return to userland.
2904 */
2905 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2906 }
2907
2908 /*
2909 * We've successfully opened the pool, verify that we're ready
2910 * to start pushing transactions.
2911 */
2912 if (state != SPA_LOAD_TRYIMPORT) {
2913 if (error = spa_load_verify(spa))
2914 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2915 error));
2916 }
2917
2918 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2919 spa->spa_load_max_txg == UINT64_MAX)) {
2920 dmu_tx_t *tx;
2921 int need_update = B_FALSE;
2922 dsl_pool_t *dp = spa_get_dsl(spa);
2923
2924 ASSERT(state != SPA_LOAD_TRYIMPORT);
2925
2926 /*
2927 * Claim log blocks that haven't been committed yet.
2928 * This must all happen in a single txg.
2929 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2930 * invoked from zil_claim_log_block()'s i/o done callback.
2931 * Price of rollback is that we abandon the log.
2932 */
2933 spa->spa_claiming = B_TRUE;
2934
2935 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
2936 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2937 zil_claim, tx, DS_FIND_CHILDREN);
2938 dmu_tx_commit(tx);
2939
2940 spa->spa_claiming = B_FALSE;
2941
2942 spa_set_log_state(spa, SPA_LOG_GOOD);
2943 spa->spa_sync_on = B_TRUE;
2944 txg_sync_start(spa->spa_dsl_pool);
2945
2946 /*
2947 * Wait for all claims to sync. We sync up to the highest
2948 * claimed log block birth time so that claimed log blocks
2949 * don't appear to be from the future. spa_claim_max_txg
2950 * will have been set for us by either zil_check_log_chain()
2951 * (invoked from spa_check_logs()) or zil_claim() above.
2952 */
2953 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2954
2955 /*
2956 * If the config cache is stale, or we have uninitialized
2957 * metaslabs (see spa_vdev_add()), then update the config.
2958 *
2959 * If this is a verbatim import, trust the current
2960 * in-core spa_config and update the disk labels.
2961 */
2962 if (config_cache_txg != spa->spa_config_txg ||
2963 state == SPA_LOAD_IMPORT ||
2964 state == SPA_LOAD_RECOVER ||
2965 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2966 need_update = B_TRUE;
2967
2968 for (int c = 0; c < rvd->vdev_children; c++)
2969 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2970 need_update = B_TRUE;
2971
2972 /*
2973 * Update the config cache asychronously in case we're the
2974 * root pool, in which case the config cache isn't writable yet.
2975 */
2976 if (need_update)
2977 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2978
2979 /*
2980 * Check all DTLs to see if anything needs resilvering.
2981 */
2982 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2983 vdev_resilver_needed(rvd, NULL, NULL))
2984 spa_async_request(spa, SPA_ASYNC_RESILVER);
2985
2986 /*
2987 * Log the fact that we booted up (so that we can detect if
2988 * we rebooted in the middle of an operation).
2989 */
2990 spa_history_log_version(spa, "open");
2991
2992 /*
2993 * Delete any inconsistent datasets.
2994 */
2995 (void) dmu_objset_find(spa_name(spa),
2996 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2997
2998 /*
2999 * Clean up any stale temporary dataset userrefs.
3000 */
3001 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
3002 }
3003
3004 return (0);
3005}
3006
3007static int
3008spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
3009{
3010 int mode = spa->spa_mode;
3011
3012 spa_unload(spa);
3013 spa_deactivate(spa);
3014
3015 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
3016
3017 spa_activate(spa, mode);
3018 spa_async_suspend(spa);
3019
3020 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
3021}
3022
3023/*
3024 * If spa_load() fails this function will try loading prior txg's. If
3025 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
3026 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
3027 * function will not rewind the pool and will return the same error as
3028 * spa_load().
3029 */
3030static int
3031spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
3032 uint64_t max_request, int rewind_flags)
3033{
3034 nvlist_t *loadinfo = NULL;
3035 nvlist_t *config = NULL;
3036 int load_error, rewind_error;
3037 uint64_t safe_rewind_txg;
3038 uint64_t min_txg;
3039
3040 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
3041 spa->spa_load_max_txg = spa->spa_load_txg;
3042 spa_set_log_state(spa, SPA_LOG_CLEAR);
3043 } else {
3044 spa->spa_load_max_txg = max_request;
3045 if (max_request != UINT64_MAX)
3046 spa->spa_extreme_rewind = B_TRUE;
3047 }
3048
3049 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
3050 mosconfig);
3051 if (load_error == 0)
3052 return (0);
3053
3054 if (spa->spa_root_vdev != NULL)
3055 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3056
3057 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
3058 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
3059
3060 if (rewind_flags & ZPOOL_NEVER_REWIND) {
3061 nvlist_free(config);
3062 return (load_error);
3063 }
3064
3065 if (state == SPA_LOAD_RECOVER) {
3066 /* Price of rolling back is discarding txgs, including log */
3067 spa_set_log_state(spa, SPA_LOG_CLEAR);
3068 } else {
3069 /*
3070 * If we aren't rolling back save the load info from our first
3071 * import attempt so that we can restore it after attempting
3072 * to rewind.
3073 */
3074 loadinfo = spa->spa_load_info;
3075 spa->spa_load_info = fnvlist_alloc();
3076 }
3077
3078 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
3079 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
3080 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
3081 TXG_INITIAL : safe_rewind_txg;
3082
3083 /*
3084 * Continue as long as we're finding errors, we're still within
3085 * the acceptable rewind range, and we're still finding uberblocks
3086 */
3087 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
3088 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
3089 if (spa->spa_load_max_txg < safe_rewind_txg)
3090 spa->spa_extreme_rewind = B_TRUE;
3091 rewind_error = spa_load_retry(spa, state, mosconfig);
3092 }
3093
3094 spa->spa_extreme_rewind = B_FALSE;
3095 spa->spa_load_max_txg = UINT64_MAX;
3096
3097 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
3098 spa_config_set(spa, config);
3099
3100 if (state == SPA_LOAD_RECOVER) {
3101 ASSERT3P(loadinfo, ==, NULL);
3102 return (rewind_error);
3103 } else {
3104 /* Store the rewind info as part of the initial load info */
3105 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
3106 spa->spa_load_info);
3107
3108 /* Restore the initial load info */
3109 fnvlist_free(spa->spa_load_info);
3110 spa->spa_load_info = loadinfo;
3111
3112 return (load_error);
3113 }
3114}
3115
3116/*
3117 * Pool Open/Import
3118 *
3119 * The import case is identical to an open except that the configuration is sent
3120 * down from userland, instead of grabbed from the configuration cache. For the
3121 * case of an open, the pool configuration will exist in the
3122 * POOL_STATE_UNINITIALIZED state.
3123 *
3124 * The stats information (gen/count/ustats) is used to gather vdev statistics at
3125 * the same time open the pool, without having to keep around the spa_t in some
3126 * ambiguous state.
3127 */
3128static int
3129spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
3130 nvlist_t **config)
3131{
3132 spa_t *spa;
3133 spa_load_state_t state = SPA_LOAD_OPEN;
3134 int error;
3135 int locked = B_FALSE;
3136 int firstopen = B_FALSE;
3137
3138 *spapp = NULL;
3139
3140 /*
3141 * As disgusting as this is, we need to support recursive calls to this
3142 * function because dsl_dir_open() is called during spa_load(), and ends
3143 * up calling spa_open() again. The real fix is to figure out how to
3144 * avoid dsl_dir_open() calling this in the first place.
3145 */
3146 if (mutex_owner(&spa_namespace_lock) != curthread) {
3147 mutex_enter(&spa_namespace_lock);
3148 locked = B_TRUE;
3149 }
3150
3151 if ((spa = spa_lookup(pool)) == NULL) {
3152 if (locked)
3153 mutex_exit(&spa_namespace_lock);
3154 return (SET_ERROR(ENOENT));
3155 }
3156
3157 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
3158 zpool_rewind_policy_t policy;
3159
3160 firstopen = B_TRUE;
3161
3162 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
3163 &policy);
3164 if (policy.zrp_request & ZPOOL_DO_REWIND)
3165 state = SPA_LOAD_RECOVER;
3166
3167 spa_activate(spa, spa_mode_global);
3168
3169 if (state != SPA_LOAD_RECOVER)
3170 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3171
3172 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
3173 policy.zrp_request);
3174
3175 if (error == EBADF) {
3176 /*
3177 * If vdev_validate() returns failure (indicated by
3178 * EBADF), it indicates that one of the vdevs indicates
3179 * that the pool has been exported or destroyed. If
3180 * this is the case, the config cache is out of sync and
3181 * we should remove the pool from the namespace.
3182 */
3183 spa_unload(spa);
3184 spa_deactivate(spa);
3185 spa_config_sync(spa, B_TRUE, B_TRUE);
3186 spa_remove(spa);
3187 if (locked)
3188 mutex_exit(&spa_namespace_lock);
3189 return (SET_ERROR(ENOENT));
3190 }
3191
3192 if (error) {
3193 /*
3194 * We can't open the pool, but we still have useful
3195 * information: the state of each vdev after the
3196 * attempted vdev_open(). Return this to the user.
3197 */
3198 if (config != NULL && spa->spa_config) {
3199 VERIFY(nvlist_dup(spa->spa_config, config,
3200 KM_SLEEP) == 0);
3201 VERIFY(nvlist_add_nvlist(*config,
3202 ZPOOL_CONFIG_LOAD_INFO,
3203 spa->spa_load_info) == 0);
3204 }
3205 spa_unload(spa);
3206 spa_deactivate(spa);
3207 spa->spa_last_open_failed = error;
3208 if (locked)
3209 mutex_exit(&spa_namespace_lock);
3210 *spapp = NULL;
3211 return (error);
3212 }
3213 }
3214
3215 spa_open_ref(spa, tag);
3216
3217 if (config != NULL)
3218 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3219
3220 /*
3221 * If we've recovered the pool, pass back any information we
3222 * gathered while doing the load.
3223 */
3224 if (state == SPA_LOAD_RECOVER) {
3225 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3226 spa->spa_load_info) == 0);
3227 }
3228
3229 if (locked) {
3230 spa->spa_last_open_failed = 0;
3231 spa->spa_last_ubsync_txg = 0;
3232 spa->spa_load_txg = 0;
3233 mutex_exit(&spa_namespace_lock);
3234#ifdef __FreeBSD__
3235#ifdef _KERNEL
3236 if (firstopen)
3237 zvol_create_minors(spa->spa_name);
3238#endif
3239#endif
3240 }
3241
3242 *spapp = spa;
3243
3244 return (0);
3245}
3246
3247int
3248spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3249 nvlist_t **config)
3250{
3251 return (spa_open_common(name, spapp, tag, policy, config));
3252}
3253
3254int
3255spa_open(const char *name, spa_t **spapp, void *tag)
3256{
3257 return (spa_open_common(name, spapp, tag, NULL, NULL));
3258}
3259
3260/*
3261 * Lookup the given spa_t, incrementing the inject count in the process,
3262 * preventing it from being exported or destroyed.
3263 */
3264spa_t *
3265spa_inject_addref(char *name)
3266{
3267 spa_t *spa;
3268
3269 mutex_enter(&spa_namespace_lock);
3270 if ((spa = spa_lookup(name)) == NULL) {
3271 mutex_exit(&spa_namespace_lock);
3272 return (NULL);
3273 }
3274 spa->spa_inject_ref++;
3275 mutex_exit(&spa_namespace_lock);
3276
3277 return (spa);
3278}
3279
3280void
3281spa_inject_delref(spa_t *spa)
3282{
3283 mutex_enter(&spa_namespace_lock);
3284 spa->spa_inject_ref--;
3285 mutex_exit(&spa_namespace_lock);
3286}
3287
3288/*
3289 * Add spares device information to the nvlist.
3290 */
3291static void
3292spa_add_spares(spa_t *spa, nvlist_t *config)
3293{
3294 nvlist_t **spares;
3295 uint_t i, nspares;
3296 nvlist_t *nvroot;
3297 uint64_t guid;
3298 vdev_stat_t *vs;
3299 uint_t vsc;
3300 uint64_t pool;
3301
3302 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3303
3304 if (spa->spa_spares.sav_count == 0)
3305 return;
3306
3307 VERIFY(nvlist_lookup_nvlist(config,
3308 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3309 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3310 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3311 if (nspares != 0) {
3312 VERIFY(nvlist_add_nvlist_array(nvroot,
3313 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3314 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3315 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3316
3317 /*
3318 * Go through and find any spares which have since been
3319 * repurposed as an active spare. If this is the case, update
3320 * their status appropriately.
3321 */
3322 for (i = 0; i < nspares; i++) {
3323 VERIFY(nvlist_lookup_uint64(spares[i],
3324 ZPOOL_CONFIG_GUID, &guid) == 0);
3325 if (spa_spare_exists(guid, &pool, NULL) &&
3326 pool != 0ULL) {
3327 VERIFY(nvlist_lookup_uint64_array(
3328 spares[i], ZPOOL_CONFIG_VDEV_STATS,
3329 (uint64_t **)&vs, &vsc) == 0);
3330 vs->vs_state = VDEV_STATE_CANT_OPEN;
3331 vs->vs_aux = VDEV_AUX_SPARED;
3332 }
3333 }
3334 }
3335}
3336
3337/*
3338 * Add l2cache device information to the nvlist, including vdev stats.
3339 */
3340static void
3341spa_add_l2cache(spa_t *spa, nvlist_t *config)
3342{
3343 nvlist_t **l2cache;
3344 uint_t i, j, nl2cache;
3345 nvlist_t *nvroot;
3346 uint64_t guid;
3347 vdev_t *vd;
3348 vdev_stat_t *vs;
3349 uint_t vsc;
3350
3351 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3352
3353 if (spa->spa_l2cache.sav_count == 0)
3354 return;
3355
3356 VERIFY(nvlist_lookup_nvlist(config,
3357 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3358 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3359 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3360 if (nl2cache != 0) {
3361 VERIFY(nvlist_add_nvlist_array(nvroot,
3362 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3363 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3364 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3365
3366 /*
3367 * Update level 2 cache device stats.
3368 */
3369
3370 for (i = 0; i < nl2cache; i++) {
3371 VERIFY(nvlist_lookup_uint64(l2cache[i],
3372 ZPOOL_CONFIG_GUID, &guid) == 0);
3373
3374 vd = NULL;
3375 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3376 if (guid ==
3377 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3378 vd = spa->spa_l2cache.sav_vdevs[j];
3379 break;
3380 }
3381 }
3382 ASSERT(vd != NULL);
3383
3384 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3385 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3386 == 0);
3387 vdev_get_stats(vd, vs);
3388 }
3389 }
3390}
3391
3392static void
3393spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3394{
3395 nvlist_t *features;
3396 zap_cursor_t zc;
3397 zap_attribute_t za;
3398
3399 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3400 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3401
3402 /* We may be unable to read features if pool is suspended. */
3403 if (spa_suspended(spa))
3404 goto out;
3405
3406 if (spa->spa_feat_for_read_obj != 0) {
3407 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3408 spa->spa_feat_for_read_obj);
3409 zap_cursor_retrieve(&zc, &za) == 0;
3410 zap_cursor_advance(&zc)) {
3411 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3412 za.za_num_integers == 1);
3413 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3414 za.za_first_integer));
3415 }
3416 zap_cursor_fini(&zc);
3417 }
3418
3419 if (spa->spa_feat_for_write_obj != 0) {
3420 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3421 spa->spa_feat_for_write_obj);
3422 zap_cursor_retrieve(&zc, &za) == 0;
3423 zap_cursor_advance(&zc)) {
3424 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3425 za.za_num_integers == 1);
3426 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3427 za.za_first_integer));
3428 }
3429 zap_cursor_fini(&zc);
3430 }
3431
3432out:
3433 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3434 features) == 0);
3435 nvlist_free(features);
3436}
3437
3438int
3439spa_get_stats(const char *name, nvlist_t **config,
3440 char *altroot, size_t buflen)
3441{
3442 int error;
3443 spa_t *spa;
3444
3445 *config = NULL;
3446 error = spa_open_common(name, &spa, FTAG, NULL, config);
3447
3448 if (spa != NULL) {
3449 /*
3450 * This still leaves a window of inconsistency where the spares
3451 * or l2cache devices could change and the config would be
3452 * self-inconsistent.
3453 */
3454 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3455
3456 if (*config != NULL) {
3457 uint64_t loadtimes[2];
3458
3459 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3460 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3461 VERIFY(nvlist_add_uint64_array(*config,
3462 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3463
3464 VERIFY(nvlist_add_uint64(*config,
3465 ZPOOL_CONFIG_ERRCOUNT,
3466 spa_get_errlog_size(spa)) == 0);
3467
3468 if (spa_suspended(spa))
3469 VERIFY(nvlist_add_uint64(*config,
3470 ZPOOL_CONFIG_SUSPENDED,
3471 spa->spa_failmode) == 0);
3472
3473 spa_add_spares(spa, *config);
3474 spa_add_l2cache(spa, *config);
3475 spa_add_feature_stats(spa, *config);
3476 }
3477 }
3478
3479 /*
3480 * We want to get the alternate root even for faulted pools, so we cheat
3481 * and call spa_lookup() directly.
3482 */
3483 if (altroot) {
3484 if (spa == NULL) {
3485 mutex_enter(&spa_namespace_lock);
3486 spa = spa_lookup(name);
3487 if (spa)
3488 spa_altroot(spa, altroot, buflen);
3489 else
3490 altroot[0] = '\0';
3491 spa = NULL;
3492 mutex_exit(&spa_namespace_lock);
3493 } else {
3494 spa_altroot(spa, altroot, buflen);
3495 }
3496 }
3497
3498 if (spa != NULL) {
3499 spa_config_exit(spa, SCL_CONFIG, FTAG);
3500 spa_close(spa, FTAG);
3501 }
3502
3503 return (error);
3504}
3505
3506/*
3507 * Validate that the auxiliary device array is well formed. We must have an
3508 * array of nvlists, each which describes a valid leaf vdev. If this is an
3509 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3510 * specified, as long as they are well-formed.
3511 */
3512static int
3513spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3514 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3515 vdev_labeltype_t label)
3516{
3517 nvlist_t **dev;
3518 uint_t i, ndev;
3519 vdev_t *vd;
3520 int error;
3521
3522 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3523
3524 /*
3525 * It's acceptable to have no devs specified.
3526 */
3527 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3528 return (0);
3529
3530 if (ndev == 0)
3531 return (SET_ERROR(EINVAL));
3532
3533 /*
3534 * Make sure the pool is formatted with a version that supports this
3535 * device type.
3536 */
3537 if (spa_version(spa) < version)
3538 return (SET_ERROR(ENOTSUP));
3539
3540 /*
3541 * Set the pending device list so we correctly handle device in-use
3542 * checking.
3543 */
3544 sav->sav_pending = dev;
3545 sav->sav_npending = ndev;
3546
3547 for (i = 0; i < ndev; i++) {
3548 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3549 mode)) != 0)
3550 goto out;
3551
3552 if (!vd->vdev_ops->vdev_op_leaf) {
3553 vdev_free(vd);
3554 error = SET_ERROR(EINVAL);
3555 goto out;
3556 }
3557
3558 /*
3559 * The L2ARC currently only supports disk devices in
3560 * kernel context. For user-level testing, we allow it.
3561 */
3562#ifdef _KERNEL
3563 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3564 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3565 error = SET_ERROR(ENOTBLK);
3566 vdev_free(vd);
3567 goto out;
3568 }
3569#endif
3570 vd->vdev_top = vd;
3571
3572 if ((error = vdev_open(vd)) == 0 &&
3573 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3574 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3575 vd->vdev_guid) == 0);
3576 }
3577
3578 vdev_free(vd);
3579
3580 if (error &&
3581 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3582 goto out;
3583 else
3584 error = 0;
3585 }
3586
3587out:
3588 sav->sav_pending = NULL;
3589 sav->sav_npending = 0;
3590 return (error);
3591}
3592
3593static int
3594spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3595{
3596 int error;
3597
3598 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3599
3600 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3601 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3602 VDEV_LABEL_SPARE)) != 0) {
3603 return (error);
3604 }
3605
3606 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3607 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3608 VDEV_LABEL_L2CACHE));
3609}
3610
3611static void
3612spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3613 const char *config)
3614{
3615 int i;
3616
3617 if (sav->sav_config != NULL) {
3618 nvlist_t **olddevs;
3619 uint_t oldndevs;
3620 nvlist_t **newdevs;
3621
3622 /*
3623 * Generate new dev list by concatentating with the
3624 * current dev list.
3625 */
3626 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3627 &olddevs, &oldndevs) == 0);
3628
3629 newdevs = kmem_alloc(sizeof (void *) *
3630 (ndevs + oldndevs), KM_SLEEP);
3631 for (i = 0; i < oldndevs; i++)
3632 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3633 KM_SLEEP) == 0);
3634 for (i = 0; i < ndevs; i++)
3635 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3636 KM_SLEEP) == 0);
3637
3638 VERIFY(nvlist_remove(sav->sav_config, config,
3639 DATA_TYPE_NVLIST_ARRAY) == 0);
3640
3641 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3642 config, newdevs, ndevs + oldndevs) == 0);
3643 for (i = 0; i < oldndevs + ndevs; i++)
3644 nvlist_free(newdevs[i]);
3645 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3646 } else {
3647 /*
3648 * Generate a new dev list.
3649 */
3650 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3651 KM_SLEEP) == 0);
3652 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3653 devs, ndevs) == 0);
3654 }
3655}
3656
3657/*
3658 * Stop and drop level 2 ARC devices
3659 */
3660void
3661spa_l2cache_drop(spa_t *spa)
3662{
3663 vdev_t *vd;
3664 int i;
3665 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3666
3667 for (i = 0; i < sav->sav_count; i++) {
3668 uint64_t pool;
3669
3670 vd = sav->sav_vdevs[i];
3671 ASSERT(vd != NULL);
3672
3673 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3674 pool != 0ULL && l2arc_vdev_present(vd))
3675 l2arc_remove_vdev(vd);
3676 }
3677}
3678
3679/*
3680 * Pool Creation
3681 */
3682int
3683spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3684 nvlist_t *zplprops)
3685{
3686 spa_t *spa;
3687 char *altroot = NULL;
3688 vdev_t *rvd;
3689 dsl_pool_t *dp;
3690 dmu_tx_t *tx;
3691 int error = 0;
3692 uint64_t txg = TXG_INITIAL;
3693 nvlist_t **spares, **l2cache;
3694 uint_t nspares, nl2cache;
3695 uint64_t version, obj;
3696 boolean_t has_features;
3697
3698 /*
3699 * If this pool already exists, return failure.
3700 */
3701 mutex_enter(&spa_namespace_lock);
3702 if (spa_lookup(pool) != NULL) {
3703 mutex_exit(&spa_namespace_lock);
3704 return (SET_ERROR(EEXIST));
3705 }
3706
3707 /*
3708 * Allocate a new spa_t structure.
3709 */
3710 (void) nvlist_lookup_string(props,
3711 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3712 spa = spa_add(pool, NULL, altroot);
3713 spa_activate(spa, spa_mode_global);
3714
3715 if (props && (error = spa_prop_validate(spa, props))) {
3716 spa_deactivate(spa);
3717 spa_remove(spa);
3718 mutex_exit(&spa_namespace_lock);
3719 return (error);
3720 }
3721
3722 has_features = B_FALSE;
3723 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3724 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3725 if (zpool_prop_feature(nvpair_name(elem)))
3726 has_features = B_TRUE;
3727 }
3728
3729 if (has_features || nvlist_lookup_uint64(props,
3730 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3731 version = SPA_VERSION;
3732 }
3733 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3734
3735 spa->spa_first_txg = txg;
3736 spa->spa_uberblock.ub_txg = txg - 1;
3737 spa->spa_uberblock.ub_version = version;
3738 spa->spa_ubsync = spa->spa_uberblock;
3739 spa->spa_load_state = SPA_LOAD_CREATE;
3740
3741 /*
3742 * Create "The Godfather" zio to hold all async IOs
3743 */
3744 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3745 KM_SLEEP);
3746 for (int i = 0; i < max_ncpus; i++) {
3747 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3748 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3749 ZIO_FLAG_GODFATHER);
3750 }
3751
3752 /*
3753 * Create the root vdev.
3754 */
3755 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3756
3757 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3758
3759 ASSERT(error != 0 || rvd != NULL);
3760 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3761
3762 if (error == 0 && !zfs_allocatable_devs(nvroot))
3763 error = SET_ERROR(EINVAL);
3764
3765 if (error == 0 &&
3766 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3767 (error = spa_validate_aux(spa, nvroot, txg,
3768 VDEV_ALLOC_ADD)) == 0) {
3769 for (int c = 0; c < rvd->vdev_children; c++) {
3770 vdev_ashift_optimize(rvd->vdev_child[c]);
3771 vdev_metaslab_set_size(rvd->vdev_child[c]);
3772 vdev_expand(rvd->vdev_child[c], txg);
3773 }
3774 }
3775
3776 spa_config_exit(spa, SCL_ALL, FTAG);
3777
3778 if (error != 0) {
3779 spa_unload(spa);
3780 spa_deactivate(spa);
3781 spa_remove(spa);
3782 mutex_exit(&spa_namespace_lock);
3783 return (error);
3784 }
3785
3786 /*
3787 * Get the list of spares, if specified.
3788 */
3789 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3790 &spares, &nspares) == 0) {
3791 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3792 KM_SLEEP) == 0);
3793 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3794 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3795 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3796 spa_load_spares(spa);
3797 spa_config_exit(spa, SCL_ALL, FTAG);
3798 spa->spa_spares.sav_sync = B_TRUE;
3799 }
3800
3801 /*
3802 * Get the list of level 2 cache devices, if specified.
3803 */
3804 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3805 &l2cache, &nl2cache) == 0) {
3806 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3807 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3808 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3809 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3810 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3811 spa_load_l2cache(spa);
3812 spa_config_exit(spa, SCL_ALL, FTAG);
3813 spa->spa_l2cache.sav_sync = B_TRUE;
3814 }
3815
3816 spa->spa_is_initializing = B_TRUE;
3817 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3818 spa->spa_meta_objset = dp->dp_meta_objset;
3819 spa->spa_is_initializing = B_FALSE;
3820
3821 /*
3822 * Create DDTs (dedup tables).
3823 */
3824 ddt_create(spa);
3825
3826 spa_update_dspace(spa);
3827
3828 tx = dmu_tx_create_assigned(dp, txg);
3829
3830 /*
3831 * Create the pool config object.
3832 */
3833 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3834 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3835 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3836
3837 if (zap_add(spa->spa_meta_objset,
3838 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3839 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3840 cmn_err(CE_PANIC, "failed to add pool config");
3841 }
3842
3843 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3844 spa_feature_create_zap_objects(spa, tx);
3845
3846 if (zap_add(spa->spa_meta_objset,
3847 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3848 sizeof (uint64_t), 1, &version, tx) != 0) {
3849 cmn_err(CE_PANIC, "failed to add pool version");
3850 }
3851
3852 /* Newly created pools with the right version are always deflated. */
3853 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3854 spa->spa_deflate = TRUE;
3855 if (zap_add(spa->spa_meta_objset,
3856 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3857 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3858 cmn_err(CE_PANIC, "failed to add deflate");
3859 }
3860 }
3861
3862 /*
3863 * Create the deferred-free bpobj. Turn off compression
3864 * because sync-to-convergence takes longer if the blocksize
3865 * keeps changing.
3866 */
3867 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3868 dmu_object_set_compress(spa->spa_meta_objset, obj,
3869 ZIO_COMPRESS_OFF, tx);
3870 if (zap_add(spa->spa_meta_objset,
3871 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3872 sizeof (uint64_t), 1, &obj, tx) != 0) {
3873 cmn_err(CE_PANIC, "failed to add bpobj");
3874 }
3875 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3876 spa->spa_meta_objset, obj));
3877
3878 /*
3879 * Create the pool's history object.
3880 */
3881 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3882 spa_history_create_obj(spa, tx);
3883
3884 /*
3885 * Generate some random noise for salted checksums to operate on.
3886 */
3887 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3888 sizeof (spa->spa_cksum_salt.zcs_bytes));
3889
3890 /*
3891 * Set pool properties.
3892 */
3893 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3894 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3895 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3896 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3897
3898 if (props != NULL) {
3899 spa_configfile_set(spa, props, B_FALSE);
3900 spa_sync_props(props, tx);
3901 }
3902
3903 dmu_tx_commit(tx);
3904
3905 spa->spa_sync_on = B_TRUE;
3906 txg_sync_start(spa->spa_dsl_pool);
3907
3908 /*
3909 * We explicitly wait for the first transaction to complete so that our
3910 * bean counters are appropriately updated.
3911 */
3912 txg_wait_synced(spa->spa_dsl_pool, txg);
3913
3914 spa_config_sync(spa, B_FALSE, B_TRUE);
3915 spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE);
3916
3917 spa_history_log_version(spa, "create");
3918
3919 /*
3920 * Don't count references from objsets that are already closed
3921 * and are making their way through the eviction process.
3922 */
3923 spa_evicting_os_wait(spa);
3924 spa->spa_minref = refcount_count(&spa->spa_refcount);
3925 spa->spa_load_state = SPA_LOAD_NONE;
3926
3927 mutex_exit(&spa_namespace_lock);
3928
3929 return (0);
3930}
3931
3932#ifdef _KERNEL
3933#ifdef illumos
3934/*
3935 * Get the root pool information from the root disk, then import the root pool
3936 * during the system boot up time.
3937 */
3938extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3939
3940static nvlist_t *
3941spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3942{
3943 nvlist_t *config;
3944 nvlist_t *nvtop, *nvroot;
3945 uint64_t pgid;
3946
3947 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3948 return (NULL);
3949
3950 /*
3951 * Add this top-level vdev to the child array.
3952 */
3953 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3954 &nvtop) == 0);
3955 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3956 &pgid) == 0);
3957 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3958
3959 /*
3960 * Put this pool's top-level vdevs into a root vdev.
3961 */
3962 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3963 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3964 VDEV_TYPE_ROOT) == 0);
3965 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3966 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3967 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3968 &nvtop, 1) == 0);
3969
3970 /*
3971 * Replace the existing vdev_tree with the new root vdev in
3972 * this pool's configuration (remove the old, add the new).
3973 */
3974 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3975 nvlist_free(nvroot);
3976 return (config);
3977}
3978
3979/*
3980 * Walk the vdev tree and see if we can find a device with "better"
3981 * configuration. A configuration is "better" if the label on that
3982 * device has a more recent txg.
3983 */
3984static void
3985spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3986{
3987 for (int c = 0; c < vd->vdev_children; c++)
3988 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3989
3990 if (vd->vdev_ops->vdev_op_leaf) {
3991 nvlist_t *label;
3992 uint64_t label_txg;
3993
3994 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3995 &label) != 0)
3996 return;
3997
3998 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3999 &label_txg) == 0);
4000
4001 /*
4002 * Do we have a better boot device?
4003 */
4004 if (label_txg > *txg) {
4005 *txg = label_txg;
4006 *avd = vd;
4007 }
4008 nvlist_free(label);
4009 }
4010}
4011
4012/*
4013 * Import a root pool.
4014 *
4015 * For x86. devpath_list will consist of devid and/or physpath name of
4016 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
4017 * The GRUB "findroot" command will return the vdev we should boot.
4018 *
4019 * For Sparc, devpath_list consists the physpath name of the booting device
4020 * no matter the rootpool is a single device pool or a mirrored pool.
4021 * e.g.
4022 * "/pci@1f,0/ide@d/disk@0,0:a"
4023 */
4024int
4025spa_import_rootpool(char *devpath, char *devid)
4026{
4027 spa_t *spa;
4028 vdev_t *rvd, *bvd, *avd = NULL;
4029 nvlist_t *config, *nvtop;
4030 uint64_t guid, txg;
4031 char *pname;
4032 int error;
4033
4034 /*
4035 * Read the label from the boot device and generate a configuration.
4036 */
4037 config = spa_generate_rootconf(devpath, devid, &guid);
4038#if defined(_OBP) && defined(_KERNEL)
4039 if (config == NULL) {
4040 if (strstr(devpath, "/iscsi/ssd") != NULL) {
4041 /* iscsi boot */
4042 get_iscsi_bootpath_phy(devpath);
4043 config = spa_generate_rootconf(devpath, devid, &guid);
4044 }
4045 }
4046#endif
4047 if (config == NULL) {
4048 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
4049 devpath);
4050 return (SET_ERROR(EIO));
4051 }
4052
4053 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4054 &pname) == 0);
4055 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
4056
4057 mutex_enter(&spa_namespace_lock);
4058 if ((spa = spa_lookup(pname)) != NULL) {
4059 /*
4060 * Remove the existing root pool from the namespace so that we
4061 * can replace it with the correct config we just read in.
4062 */
4063 spa_remove(spa);
4064 }
4065
4066 spa = spa_add(pname, config, NULL);
4067 spa->spa_is_root = B_TRUE;
4068 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4069
4070 /*
4071 * Build up a vdev tree based on the boot device's label config.
4072 */
4073 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4074 &nvtop) == 0);
4075 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4076 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4077 VDEV_ALLOC_ROOTPOOL);
4078 spa_config_exit(spa, SCL_ALL, FTAG);
4079 if (error) {
4080 mutex_exit(&spa_namespace_lock);
4081 nvlist_free(config);
4082 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4083 pname);
4084 return (error);
4085 }
4086
4087 /*
4088 * Get the boot vdev.
4089 */
4090 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
4091 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
4092 (u_longlong_t)guid);
4093 error = SET_ERROR(ENOENT);
4094 goto out;
4095 }
4096
4097 /*
4098 * Determine if there is a better boot device.
4099 */
4100 avd = bvd;
4101 spa_alt_rootvdev(rvd, &avd, &txg);
4102 if (avd != bvd) {
4103 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
4104 "try booting from '%s'", avd->vdev_path);
4105 error = SET_ERROR(EINVAL);
4106 goto out;
4107 }
4108
4109 /*
4110 * If the boot device is part of a spare vdev then ensure that
4111 * we're booting off the active spare.
4112 */
4113 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4114 !bvd->vdev_isspare) {
4115 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
4116 "try booting from '%s'",
4117 bvd->vdev_parent->
4118 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
4119 error = SET_ERROR(EINVAL);
4120 goto out;
4121 }
4122
4123 error = 0;
4124out:
4125 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4126 vdev_free(rvd);
4127 spa_config_exit(spa, SCL_ALL, FTAG);
4128 mutex_exit(&spa_namespace_lock);
4129
4130 nvlist_free(config);
4131 return (error);
4132}
4133
4134#else /* !illumos */
4135
4136extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs,
4137 uint64_t *count);
4138
4139static nvlist_t *
4140spa_generate_rootconf(const char *name)
4141{
4142 nvlist_t **configs, **tops;
4143 nvlist_t *config;
4144 nvlist_t *best_cfg, *nvtop, *nvroot;
4145 uint64_t *holes;
4146 uint64_t best_txg;
4147 uint64_t nchildren;
4148 uint64_t pgid;
4149 uint64_t count;
4150 uint64_t i;
4151 uint_t nholes;
4152
4153 if (vdev_geom_read_pool_label(name, &configs, &count) != 0)
4154 return (NULL);
4155
4156 ASSERT3U(count, !=, 0);
4157 best_txg = 0;
4158 for (i = 0; i < count; i++) {
4159 uint64_t txg;
4160
4161 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG,
4162 &txg) == 0);
4163 if (txg > best_txg) {
4164 best_txg = txg;
4165 best_cfg = configs[i];
4166 }
4167 }
4168
4169 nchildren = 1;
4170 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren);
4171 holes = NULL;
4172 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY,
4173 &holes, &nholes);
4174
4175 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP);
4176 for (i = 0; i < nchildren; i++) {
4177 if (i >= count)
4178 break;
4179 if (configs[i] == NULL)
4180 continue;
4181 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE,
4182 &nvtop) == 0);
4183 nvlist_dup(nvtop, &tops[i], KM_SLEEP);
4184 }
4185 for (i = 0; holes != NULL && i < nholes; i++) {
4186 if (i >= nchildren)
4187 continue;
4188 if (tops[holes[i]] != NULL)
4189 continue;
4190 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP);
4191 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE,
4192 VDEV_TYPE_HOLE) == 0);
4193 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID,
4194 holes[i]) == 0);
4195 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID,
4196 0) == 0);
4197 }
4198 for (i = 0; i < nchildren; i++) {
4199 if (tops[i] != NULL)
4200 continue;
4201 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP);
4202 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE,
4203 VDEV_TYPE_MISSING) == 0);
4204 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID,
4205 i) == 0);
4206 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID,
4207 0) == 0);
4208 }
4209
4210 /*
4211 * Create pool config based on the best vdev config.
4212 */
4213 nvlist_dup(best_cfg, &config, KM_SLEEP);
4214
4215 /*
4216 * Put this pool's top-level vdevs into a root vdev.
4217 */
4218 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4219 &pgid) == 0);
4220 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4221 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
4222 VDEV_TYPE_ROOT) == 0);
4223 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
4224 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
4225 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4226 tops, nchildren) == 0);
4227
4228 /*
4229 * Replace the existing vdev_tree with the new root vdev in
4230 * this pool's configuration (remove the old, add the new).
4231 */
4232 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
4233
4234 /*
4235 * Drop vdev config elements that should not be present at pool level.
4236 */
4237 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64);
4238 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64);
4239
4240 for (i = 0; i < count; i++)
4241 nvlist_free(configs[i]);
4242 kmem_free(configs, count * sizeof(void *));
4243 for (i = 0; i < nchildren; i++)
4244 nvlist_free(tops[i]);
4245 kmem_free(tops, nchildren * sizeof(void *));
4246 nvlist_free(nvroot);
4247 return (config);
4248}
4249
4250int
4251spa_import_rootpool(const char *name)
4252{
4253 spa_t *spa;
4254 vdev_t *rvd, *bvd, *avd = NULL;
4255 nvlist_t *config, *nvtop;
4256 uint64_t txg;
4257 char *pname;
4258 int error;
4259
4260 /*
4261 * Read the label from the boot device and generate a configuration.
4262 */
4263 config = spa_generate_rootconf(name);
4264
4265 mutex_enter(&spa_namespace_lock);
4266 if (config != NULL) {
4267 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4268 &pname) == 0 && strcmp(name, pname) == 0);
4269 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg)
4270 == 0);
4271
4272 if ((spa = spa_lookup(pname)) != NULL) {
4273 /*
4274 * Remove the existing root pool from the namespace so
4275 * that we can replace it with the correct config
4276 * we just read in.
4277 */
4278 spa_remove(spa);
4279 }
4280 spa = spa_add(pname, config, NULL);
4281
4282 /*
4283 * Set spa_ubsync.ub_version as it can be used in vdev_alloc()
4284 * via spa_version().
4285 */
4286 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
4287 &spa->spa_ubsync.ub_version) != 0)
4288 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
4289 } else if ((spa = spa_lookup(name)) == NULL) {
4290 mutex_exit(&spa_namespace_lock);
4291 nvlist_free(config);
4292 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
4293 name);
4294 return (EIO);
4295 } else {
4296 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0);
4297 }
4298 spa->spa_is_root = B_TRUE;
4299 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4300
4301 /*
4302 * Build up a vdev tree based on the boot device's label config.
4303 */
4304 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4305 &nvtop) == 0);
4306 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4307 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4308 VDEV_ALLOC_ROOTPOOL);
4309 spa_config_exit(spa, SCL_ALL, FTAG);
4310 if (error) {
4311 mutex_exit(&spa_namespace_lock);
4312 nvlist_free(config);
4313 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4314 pname);
4315 return (error);
4316 }
4317
4318 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4319 vdev_free(rvd);
4320 spa_config_exit(spa, SCL_ALL, FTAG);
4321 mutex_exit(&spa_namespace_lock);
4322
4323 nvlist_free(config);
4324 return (0);
4325}
4326
4327#endif /* illumos */
4328#endif /* _KERNEL */
4329
4330/*
4331 * Import a non-root pool into the system.
4332 */
4333int
4334spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
4335{
4336 spa_t *spa;
4337 char *altroot = NULL;
4338 spa_load_state_t state = SPA_LOAD_IMPORT;
4339 zpool_rewind_policy_t policy;
4340 uint64_t mode = spa_mode_global;
4341 uint64_t readonly = B_FALSE;
4342 int error;
4343 nvlist_t *nvroot;
4344 nvlist_t **spares, **l2cache;
4345 uint_t nspares, nl2cache;
4346
4347 /*
4348 * If a pool with this name exists, return failure.
4349 */
4350 mutex_enter(&spa_namespace_lock);
4351 if (spa_lookup(pool) != NULL) {
4352 mutex_exit(&spa_namespace_lock);
4353 return (SET_ERROR(EEXIST));
4354 }
4355
4356 /*
4357 * Create and initialize the spa structure.
4358 */
4359 (void) nvlist_lookup_string(props,
4360 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4361 (void) nvlist_lookup_uint64(props,
4362 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4363 if (readonly)
4364 mode = FREAD;
4365 spa = spa_add(pool, config, altroot);
4366 spa->spa_import_flags = flags;
4367
4368 /*
4369 * Verbatim import - Take a pool and insert it into the namespace
4370 * as if it had been loaded at boot.
4371 */
4372 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
4373 if (props != NULL)
4374 spa_configfile_set(spa, props, B_FALSE);
4375
4376 spa_config_sync(spa, B_FALSE, B_TRUE);
4377 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4378
4379 mutex_exit(&spa_namespace_lock);
4380 return (0);
4381 }
4382
4383 spa_activate(spa, mode);
4384
4385 /*
4386 * Don't start async tasks until we know everything is healthy.
4387 */
4388 spa_async_suspend(spa);
4389
4390 zpool_get_rewind_policy(config, &policy);
4391 if (policy.zrp_request & ZPOOL_DO_REWIND)
4392 state = SPA_LOAD_RECOVER;
4393
4394 /*
4395 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
4396 * because the user-supplied config is actually the one to trust when
4397 * doing an import.
4398 */
4399 if (state != SPA_LOAD_RECOVER)
4400 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4401
4402 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
4403 policy.zrp_request);
4404
4405 /*
4406 * Propagate anything learned while loading the pool and pass it
4407 * back to caller (i.e. rewind info, missing devices, etc).
4408 */
4409 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4410 spa->spa_load_info) == 0);
4411
4412 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4413 /*
4414 * Toss any existing sparelist, as it doesn't have any validity
4415 * anymore, and conflicts with spa_has_spare().
4416 */
4417 if (spa->spa_spares.sav_config) {
4418 nvlist_free(spa->spa_spares.sav_config);
4419 spa->spa_spares.sav_config = NULL;
4420 spa_load_spares(spa);
4421 }
4422 if (spa->spa_l2cache.sav_config) {
4423 nvlist_free(spa->spa_l2cache.sav_config);
4424 spa->spa_l2cache.sav_config = NULL;
4425 spa_load_l2cache(spa);
4426 }
4427
4428 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4429 &nvroot) == 0);
4430 if (error == 0)
4431 error = spa_validate_aux(spa, nvroot, -1ULL,
4432 VDEV_ALLOC_SPARE);
4433 if (error == 0)
4434 error = spa_validate_aux(spa, nvroot, -1ULL,
4435 VDEV_ALLOC_L2CACHE);
4436 spa_config_exit(spa, SCL_ALL, FTAG);
4437
4438 if (props != NULL)
4439 spa_configfile_set(spa, props, B_FALSE);
4440
4441 if (error != 0 || (props && spa_writeable(spa) &&
4442 (error = spa_prop_set(spa, props)))) {
4443 spa_unload(spa);
4444 spa_deactivate(spa);
4445 spa_remove(spa);
4446 mutex_exit(&spa_namespace_lock);
4447 return (error);
4448 }
4449
4450 spa_async_resume(spa);
4451
4452 /*
4453 * Override any spares and level 2 cache devices as specified by
4454 * the user, as these may have correct device names/devids, etc.
4455 */
4456 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4457 &spares, &nspares) == 0) {
4458 if (spa->spa_spares.sav_config)
4459 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4460 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4461 else
4462 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
4463 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4464 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4465 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4466 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4467 spa_load_spares(spa);
4468 spa_config_exit(spa, SCL_ALL, FTAG);
4469 spa->spa_spares.sav_sync = B_TRUE;
4470 }
4471 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4472 &l2cache, &nl2cache) == 0) {
4473 if (spa->spa_l2cache.sav_config)
4474 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4475 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4476 else
4477 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4478 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4479 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4480 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4481 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4482 spa_load_l2cache(spa);
4483 spa_config_exit(spa, SCL_ALL, FTAG);
4484 spa->spa_l2cache.sav_sync = B_TRUE;
4485 }
4486
4487 /*
4488 * Check for any removed devices.
4489 */
4490 if (spa->spa_autoreplace) {
4491 spa_aux_check_removed(&spa->spa_spares);
4492 spa_aux_check_removed(&spa->spa_l2cache);
4493 }
4494
4495 if (spa_writeable(spa)) {
4496 /*
4497 * Update the config cache to include the newly-imported pool.
4498 */
4499 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4500 }
4501
4502 /*
4503 * It's possible that the pool was expanded while it was exported.
4504 * We kick off an async task to handle this for us.
4505 */
4506 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4507
4508 spa_history_log_version(spa, "import");
4509
4510 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4511
4512 mutex_exit(&spa_namespace_lock);
4513
4514#ifdef __FreeBSD__
4515#ifdef _KERNEL
4516 zvol_create_minors(pool);
4517#endif
4518#endif
4519 return (0);
4520}
4521
4522nvlist_t *
4523spa_tryimport(nvlist_t *tryconfig)
4524{
4525 nvlist_t *config = NULL;
4526 char *poolname;
4527 spa_t *spa;
4528 uint64_t state;
4529 int error;
4530
4531 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4532 return (NULL);
4533
4534 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4535 return (NULL);
4536
4537 /*
4538 * Create and initialize the spa structure.
4539 */
4540 mutex_enter(&spa_namespace_lock);
4541 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
4542 spa_activate(spa, FREAD);
4543
4544 /*
4545 * Pass off the heavy lifting to spa_load().
4546 * Pass TRUE for mosconfig because the user-supplied config
4547 * is actually the one to trust when doing an import.
4548 */
4549 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
4550
4551 /*
4552 * If 'tryconfig' was at least parsable, return the current config.
4553 */
4554 if (spa->spa_root_vdev != NULL) {
4555 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4556 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4557 poolname) == 0);
4558 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4559 state) == 0);
4560 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4561 spa->spa_uberblock.ub_timestamp) == 0);
4562 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4563 spa->spa_load_info) == 0);
4564
4565 /*
4566 * If the bootfs property exists on this pool then we
4567 * copy it out so that external consumers can tell which
4568 * pools are bootable.
4569 */
4570 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4571 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4572
4573 /*
4574 * We have to play games with the name since the
4575 * pool was opened as TRYIMPORT_NAME.
4576 */
4577 if (dsl_dsobj_to_dsname(spa_name(spa),
4578 spa->spa_bootfs, tmpname) == 0) {
4579 char *cp;
4580 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4581
4582 cp = strchr(tmpname, '/');
4583 if (cp == NULL) {
4584 (void) strlcpy(dsname, tmpname,
4585 MAXPATHLEN);
4586 } else {
4587 (void) snprintf(dsname, MAXPATHLEN,
4588 "%s/%s", poolname, ++cp);
4589 }
4590 VERIFY(nvlist_add_string(config,
4591 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4592 kmem_free(dsname, MAXPATHLEN);
4593 }
4594 kmem_free(tmpname, MAXPATHLEN);
4595 }
4596
4597 /*
4598 * Add the list of hot spares and level 2 cache devices.
4599 */
4600 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4601 spa_add_spares(spa, config);
4602 spa_add_l2cache(spa, config);
4603 spa_config_exit(spa, SCL_CONFIG, FTAG);
4604 }
4605
4606 spa_unload(spa);
4607 spa_deactivate(spa);
4608 spa_remove(spa);
4609 mutex_exit(&spa_namespace_lock);
4610
4611 return (config);
4612}
4613
4614/*
4615 * Pool export/destroy
4616 *
4617 * The act of destroying or exporting a pool is very simple. We make sure there
4618 * is no more pending I/O and any references to the pool are gone. Then, we
4619 * update the pool state and sync all the labels to disk, removing the
4620 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4621 * we don't sync the labels or remove the configuration cache.
4622 */
4623static int
4624spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
4625 boolean_t force, boolean_t hardforce)
4626{
4627 spa_t *spa;
4628
4629 if (oldconfig)
4630 *oldconfig = NULL;
4631
4632 if (!(spa_mode_global & FWRITE))
4633 return (SET_ERROR(EROFS));
4634
4635 mutex_enter(&spa_namespace_lock);
4636 if ((spa = spa_lookup(pool)) == NULL) {
4637 mutex_exit(&spa_namespace_lock);
4638 return (SET_ERROR(ENOENT));
4639 }
4640
4641 /*
4642 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4643 * reacquire the namespace lock, and see if we can export.
4644 */
4645 spa_open_ref(spa, FTAG);
4646 mutex_exit(&spa_namespace_lock);
4647 spa_async_suspend(spa);
4648 mutex_enter(&spa_namespace_lock);
4649 spa_close(spa, FTAG);
4650
4651 /*
4652 * The pool will be in core if it's openable,
4653 * in which case we can modify its state.
4654 */
4655 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4656 /*
4657 * Objsets may be open only because they're dirty, so we
4658 * have to force it to sync before checking spa_refcnt.
4659 */
4660 txg_wait_synced(spa->spa_dsl_pool, 0);
4661 spa_evicting_os_wait(spa);
4662
4663 /*
4664 * A pool cannot be exported or destroyed if there are active
4665 * references. If we are resetting a pool, allow references by
4666 * fault injection handlers.
4667 */
4668 if (!spa_refcount_zero(spa) ||
4669 (spa->spa_inject_ref != 0 &&
4670 new_state != POOL_STATE_UNINITIALIZED)) {
4671 spa_async_resume(spa);
4672 mutex_exit(&spa_namespace_lock);
4673 return (SET_ERROR(EBUSY));
4674 }
4675
4676 /*
4677 * A pool cannot be exported if it has an active shared spare.
4678 * This is to prevent other pools stealing the active spare
4679 * from an exported pool. At user's own will, such pool can
4680 * be forcedly exported.
4681 */
4682 if (!force && new_state == POOL_STATE_EXPORTED &&
4683 spa_has_active_shared_spare(spa)) {
4684 spa_async_resume(spa);
4685 mutex_exit(&spa_namespace_lock);
4686 return (SET_ERROR(EXDEV));
4687 }
4688
4689 /*
4690 * We want this to be reflected on every label,
4691 * so mark them all dirty. spa_unload() will do the
4692 * final sync that pushes these changes out.
4693 */
4694 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4695 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4696 spa->spa_state = new_state;
4697 spa->spa_final_txg = spa_last_synced_txg(spa) +
4698 TXG_DEFER_SIZE + 1;
4699 vdev_config_dirty(spa->spa_root_vdev);
4700 spa_config_exit(spa, SCL_ALL, FTAG);
4701 }
4702 }
4703
4704 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4705
4706 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4707 spa_unload(spa);
4708 spa_deactivate(spa);
4709 }
4710
4711 if (oldconfig && spa->spa_config)
4712 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4713
4714 if (new_state != POOL_STATE_UNINITIALIZED) {
4715 if (!hardforce)
4716 spa_config_sync(spa, B_TRUE, B_TRUE);
4717 spa_remove(spa);
4718 }
4719 mutex_exit(&spa_namespace_lock);
4720
4721 return (0);
4722}
4723
4724/*
4725 * Destroy a storage pool.
4726 */
4727int
4728spa_destroy(char *pool)
4729{
4730 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4731 B_FALSE, B_FALSE));
4732}
4733
4734/*
4735 * Export a storage pool.
4736 */
4737int
4738spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4739 boolean_t hardforce)
4740{
4741 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4742 force, hardforce));
4743}
4744
4745/*
4746 * Similar to spa_export(), this unloads the spa_t without actually removing it
4747 * from the namespace in any way.
4748 */
4749int
4750spa_reset(char *pool)
4751{
4752 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4753 B_FALSE, B_FALSE));
4754}
4755
4756/*
4757 * ==========================================================================
4758 * Device manipulation
4759 * ==========================================================================
4760 */
4761
4762/*
4763 * Add a device to a storage pool.
4764 */
4765int
4766spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4767{
4768 uint64_t txg, id;
4769 int error;
4770 vdev_t *rvd = spa->spa_root_vdev;
4771 vdev_t *vd, *tvd;
4772 nvlist_t **spares, **l2cache;
4773 uint_t nspares, nl2cache;
4774
4775 ASSERT(spa_writeable(spa));
4776
4777 txg = spa_vdev_enter(spa);
4778
4779 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4780 VDEV_ALLOC_ADD)) != 0)
4781 return (spa_vdev_exit(spa, NULL, txg, error));
4782
4783 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
4784
4785 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4786 &nspares) != 0)
4787 nspares = 0;
4788
4789 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4790 &nl2cache) != 0)
4791 nl2cache = 0;
4792
4793 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4794 return (spa_vdev_exit(spa, vd, txg, EINVAL));
4795
4796 if (vd->vdev_children != 0 &&
4797 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4798 return (spa_vdev_exit(spa, vd, txg, error));
4799
4800 /*
4801 * We must validate the spares and l2cache devices after checking the
4802 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4803 */
4804 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4805 return (spa_vdev_exit(spa, vd, txg, error));
4806
4807 /*
4808 * Transfer each new top-level vdev from vd to rvd.
4809 */
4810 for (int c = 0; c < vd->vdev_children; c++) {
4811
4812 /*
4813 * Set the vdev id to the first hole, if one exists.
4814 */
4815 for (id = 0; id < rvd->vdev_children; id++) {
4816 if (rvd->vdev_child[id]->vdev_ishole) {
4817 vdev_free(rvd->vdev_child[id]);
4818 break;
4819 }
4820 }
4821 tvd = vd->vdev_child[c];
4822 vdev_remove_child(vd, tvd);
4823 tvd->vdev_id = id;
4824 vdev_add_child(rvd, tvd);
4825 vdev_config_dirty(tvd);
4826 }
4827
4828 if (nspares != 0) {
4829 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4830 ZPOOL_CONFIG_SPARES);
4831 spa_load_spares(spa);
4832 spa->spa_spares.sav_sync = B_TRUE;
4833 }
4834
4835 if (nl2cache != 0) {
4836 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4837 ZPOOL_CONFIG_L2CACHE);
4838 spa_load_l2cache(spa);
4839 spa->spa_l2cache.sav_sync = B_TRUE;
4840 }
4841
4842 /*
4843 * We have to be careful when adding new vdevs to an existing pool.
4844 * If other threads start allocating from these vdevs before we
4845 * sync the config cache, and we lose power, then upon reboot we may
4846 * fail to open the pool because there are DVAs that the config cache
4847 * can't translate. Therefore, we first add the vdevs without
4848 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4849 * and then let spa_config_update() initialize the new metaslabs.
4850 *
4851 * spa_load() checks for added-but-not-initialized vdevs, so that
4852 * if we lose power at any point in this sequence, the remaining
4853 * steps will be completed the next time we load the pool.
4854 */
4855 (void) spa_vdev_exit(spa, vd, txg, 0);
4856
4857 mutex_enter(&spa_namespace_lock);
4858 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4859 spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD);
4860 mutex_exit(&spa_namespace_lock);
4861
4862 return (0);
4863}
4864
4865/*
4866 * Attach a device to a mirror. The arguments are the path to any device
4867 * in the mirror, and the nvroot for the new device. If the path specifies
4868 * a device that is not mirrored, we automatically insert the mirror vdev.
4869 *
4870 * If 'replacing' is specified, the new device is intended to replace the
4871 * existing device; in this case the two devices are made into their own
4872 * mirror using the 'replacing' vdev, which is functionally identical to
4873 * the mirror vdev (it actually reuses all the same ops) but has a few
4874 * extra rules: you can't attach to it after it's been created, and upon
4875 * completion of resilvering, the first disk (the one being replaced)
4876 * is automatically detached.
4877 */
4878int
4879spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4880{
4881 uint64_t txg, dtl_max_txg;
4882 vdev_t *rvd = spa->spa_root_vdev;
4883 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4884 vdev_ops_t *pvops;
4885 char *oldvdpath, *newvdpath;
4886 int newvd_isspare;
4887 int error;
4888
4889 ASSERT(spa_writeable(spa));
4890
4891 txg = spa_vdev_enter(spa);
4892
4893 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4894
4895 if (oldvd == NULL)
4896 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4897
4898 if (!oldvd->vdev_ops->vdev_op_leaf)
4899 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4900
4901 pvd = oldvd->vdev_parent;
4902
4903 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4904 VDEV_ALLOC_ATTACH)) != 0)
4905 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4906
4907 if (newrootvd->vdev_children != 1)
4908 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4909
4910 newvd = newrootvd->vdev_child[0];
4911
4912 if (!newvd->vdev_ops->vdev_op_leaf)
4913 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4914
4915 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4916 return (spa_vdev_exit(spa, newrootvd, txg, error));
4917
4918 /*
4919 * Spares can't replace logs
4920 */
4921 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4922 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4923
4924 if (!replacing) {
4925 /*
4926 * For attach, the only allowable parent is a mirror or the root
4927 * vdev.
4928 */
4929 if (pvd->vdev_ops != &vdev_mirror_ops &&
4930 pvd->vdev_ops != &vdev_root_ops)
4931 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4932
4933 pvops = &vdev_mirror_ops;
4934 } else {
4935 /*
4936 * Active hot spares can only be replaced by inactive hot
4937 * spares.
4938 */
4939 if (pvd->vdev_ops == &vdev_spare_ops &&
4940 oldvd->vdev_isspare &&
4941 !spa_has_spare(spa, newvd->vdev_guid))
4942 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4943
4944 /*
4945 * If the source is a hot spare, and the parent isn't already a
4946 * spare, then we want to create a new hot spare. Otherwise, we
4947 * want to create a replacing vdev. The user is not allowed to
4948 * attach to a spared vdev child unless the 'isspare' state is
4949 * the same (spare replaces spare, non-spare replaces
4950 * non-spare).
4951 */
4952 if (pvd->vdev_ops == &vdev_replacing_ops &&
4953 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4954 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4955 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4956 newvd->vdev_isspare != oldvd->vdev_isspare) {
4957 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4958 }
4959
4960 if (newvd->vdev_isspare)
4961 pvops = &vdev_spare_ops;
4962 else
4963 pvops = &vdev_replacing_ops;
4964 }
4965
4966 /*
4967 * Make sure the new device is big enough.
4968 */
4969 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4970 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4971
4972 /*
4973 * The new device cannot have a higher alignment requirement
4974 * than the top-level vdev.
4975 */
4976 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4977 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4978
4979 /*
4980 * If this is an in-place replacement, update oldvd's path and devid
4981 * to make it distinguishable from newvd, and unopenable from now on.
4982 */
4983 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4984 spa_strfree(oldvd->vdev_path);
4985 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
4986 KM_SLEEP);
4987 (void) sprintf(oldvd->vdev_path, "%s/%s",
4988 newvd->vdev_path, "old");
4989 if (oldvd->vdev_devid != NULL) {
4990 spa_strfree(oldvd->vdev_devid);
4991 oldvd->vdev_devid = NULL;
4992 }
4993 }
4994
4995 /* mark the device being resilvered */
4996 newvd->vdev_resilver_txg = txg;
4997
4998 /*
4999 * If the parent is not a mirror, or if we're replacing, insert the new
5000 * mirror/replacing/spare vdev above oldvd.
5001 */
5002 if (pvd->vdev_ops != pvops)
5003 pvd = vdev_add_parent(oldvd, pvops);
5004
5005 ASSERT(pvd->vdev_top->vdev_parent == rvd);
5006 ASSERT(pvd->vdev_ops == pvops);
5007 ASSERT(oldvd->vdev_parent == pvd);
5008
5009 /*
5010 * Extract the new device from its root and add it to pvd.
5011 */
5012 vdev_remove_child(newrootvd, newvd);
5013 newvd->vdev_id = pvd->vdev_children;
5014 newvd->vdev_crtxg = oldvd->vdev_crtxg;
5015 vdev_add_child(pvd, newvd);
5016
5017 tvd = newvd->vdev_top;
5018 ASSERT(pvd->vdev_top == tvd);
5019 ASSERT(tvd->vdev_parent == rvd);
5020
5021 vdev_config_dirty(tvd);
5022
5023 /*
5024 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
5025 * for any dmu_sync-ed blocks. It will propagate upward when
5026 * spa_vdev_exit() calls vdev_dtl_reassess().
5027 */
5028 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
5029
5030 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
5031 dtl_max_txg - TXG_INITIAL);
5032
5033 if (newvd->vdev_isspare) {
5034 spa_spare_activate(newvd);
5035 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
5036 }
5037
5038 oldvdpath = spa_strdup(oldvd->vdev_path);
5039 newvdpath = spa_strdup(newvd->vdev_path);
5040 newvd_isspare = newvd->vdev_isspare;
5041
5042 /*
5043 * Mark newvd's DTL dirty in this txg.
5044 */
5045 vdev_dirty(tvd, VDD_DTL, newvd, txg);
5046
5047 /*
5048 * Schedule the resilver to restart in the future. We do this to
5049 * ensure that dmu_sync-ed blocks have been stitched into the
5050 * respective datasets.
5051 */
5052 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
5053
5054 if (spa->spa_bootfs)
5055 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
5056
5057 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH);
5058
5059 /*
5060 * Commit the config
5061 */
5062 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
5063
5064 spa_history_log_internal(spa, "vdev attach", NULL,
5065 "%s vdev=%s %s vdev=%s",
5066 replacing && newvd_isspare ? "spare in" :
5067 replacing ? "replace" : "attach", newvdpath,
5068 replacing ? "for" : "to", oldvdpath);
5069
5070 spa_strfree(oldvdpath);
5071 spa_strfree(newvdpath);
5072
5073 return (0);
5074}
5075
5076/*
5077 * Detach a device from a mirror or replacing vdev.
5078 *
5079 * If 'replace_done' is specified, only detach if the parent
5080 * is a replacing vdev.
5081 */
5082int
5083spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
5084{
5085 uint64_t txg;
5086 int error;
5087 vdev_t *rvd = spa->spa_root_vdev;
5088 vdev_t *vd, *pvd, *cvd, *tvd;
5089 boolean_t unspare = B_FALSE;
5090 uint64_t unspare_guid = 0;
5091 char *vdpath;
5092
5093 ASSERT(spa_writeable(spa));
5094
5095 txg = spa_vdev_enter(spa);
5096
5097 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5098
5099 if (vd == NULL)
5100 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5101
5102 if (!vd->vdev_ops->vdev_op_leaf)
5103 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5104
5105 pvd = vd->vdev_parent;
5106
5107 /*
5108 * If the parent/child relationship is not as expected, don't do it.
5109 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
5110 * vdev that's replacing B with C. The user's intent in replacing
5111 * is to go from M(A,B) to M(A,C). If the user decides to cancel
5112 * the replace by detaching C, the expected behavior is to end up
5113 * M(A,B). But suppose that right after deciding to detach C,
5114 * the replacement of B completes. We would have M(A,C), and then
5115 * ask to detach C, which would leave us with just A -- not what
5116 * the user wanted. To prevent this, we make sure that the
5117 * parent/child relationship hasn't changed -- in this example,
5118 * that C's parent is still the replacing vdev R.
5119 */
5120 if (pvd->vdev_guid != pguid && pguid != 0)
5121 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5122
5123 /*
5124 * Only 'replacing' or 'spare' vdevs can be replaced.
5125 */
5126 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
5127 pvd->vdev_ops != &vdev_spare_ops)
5128 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5129
5130 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
5131 spa_version(spa) >= SPA_VERSION_SPARES);
5132
5133 /*
5134 * Only mirror, replacing, and spare vdevs support detach.
5135 */
5136 if (pvd->vdev_ops != &vdev_replacing_ops &&
5137 pvd->vdev_ops != &vdev_mirror_ops &&
5138 pvd->vdev_ops != &vdev_spare_ops)
5139 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5140
5141 /*
5142 * If this device has the only valid copy of some data,
5143 * we cannot safely detach it.
5144 */
5145 if (vdev_dtl_required(vd))
5146 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5147
5148 ASSERT(pvd->vdev_children >= 2);
5149
5150 /*
5151 * If we are detaching the second disk from a replacing vdev, then
5152 * check to see if we changed the original vdev's path to have "/old"
5153 * at the end in spa_vdev_attach(). If so, undo that change now.
5154 */
5155 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
5156 vd->vdev_path != NULL) {
5157 size_t len = strlen(vd->vdev_path);
5158
5159 for (int c = 0; c < pvd->vdev_children; c++) {
5160 cvd = pvd->vdev_child[c];
5161
5162 if (cvd == vd || cvd->vdev_path == NULL)
5163 continue;
5164
5165 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
5166 strcmp(cvd->vdev_path + len, "/old") == 0) {
5167 spa_strfree(cvd->vdev_path);
5168 cvd->vdev_path = spa_strdup(vd->vdev_path);
5169 break;
5170 }
5171 }
5172 }
5173
5174 /*
5175 * If we are detaching the original disk from a spare, then it implies
5176 * that the spare should become a real disk, and be removed from the
5177 * active spare list for the pool.
5178 */
5179 if (pvd->vdev_ops == &vdev_spare_ops &&
5180 vd->vdev_id == 0 &&
5181 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
5182 unspare = B_TRUE;
5183
5184 /*
5185 * Erase the disk labels so the disk can be used for other things.
5186 * This must be done after all other error cases are handled,
5187 * but before we disembowel vd (so we can still do I/O to it).
5188 * But if we can't do it, don't treat the error as fatal --
5189 * it may be that the unwritability of the disk is the reason
5190 * it's being detached!
5191 */
5192 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5193
5194 /*
5195 * Remove vd from its parent and compact the parent's children.
5196 */
5197 vdev_remove_child(pvd, vd);
5198 vdev_compact_children(pvd);
5199
5200 /*
5201 * Remember one of the remaining children so we can get tvd below.
5202 */
5203 cvd = pvd->vdev_child[pvd->vdev_children - 1];
5204
5205 /*
5206 * If we need to remove the remaining child from the list of hot spares,
5207 * do it now, marking the vdev as no longer a spare in the process.
5208 * We must do this before vdev_remove_parent(), because that can
5209 * change the GUID if it creates a new toplevel GUID. For a similar
5210 * reason, we must remove the spare now, in the same txg as the detach;
5211 * otherwise someone could attach a new sibling, change the GUID, and
5212 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
5213 */
5214 if (unspare) {
5215 ASSERT(cvd->vdev_isspare);
5216 spa_spare_remove(cvd);
5217 unspare_guid = cvd->vdev_guid;
5218 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
5219 cvd->vdev_unspare = B_TRUE;
5220 }
5221
5222 /*
5223 * If the parent mirror/replacing vdev only has one child,
5224 * the parent is no longer needed. Remove it from the tree.
5225 */
5226 if (pvd->vdev_children == 1) {
5227 if (pvd->vdev_ops == &vdev_spare_ops)
5228 cvd->vdev_unspare = B_FALSE;
5229 vdev_remove_parent(cvd);
5230 }
5231
5232
5233 /*
5234 * We don't set tvd until now because the parent we just removed
5235 * may have been the previous top-level vdev.
5236 */
5237 tvd = cvd->vdev_top;
5238 ASSERT(tvd->vdev_parent == rvd);
5239
5240 /*
5241 * Reevaluate the parent vdev state.
5242 */
5243 vdev_propagate_state(cvd);
5244
5245 /*
5246 * If the 'autoexpand' property is set on the pool then automatically
5247 * try to expand the size of the pool. For example if the device we
5248 * just detached was smaller than the others, it may be possible to
5249 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
5250 * first so that we can obtain the updated sizes of the leaf vdevs.
5251 */
5252 if (spa->spa_autoexpand) {
5253 vdev_reopen(tvd);
5254 vdev_expand(tvd, txg);
5255 }
5256
5257 vdev_config_dirty(tvd);
5258
5259 /*
5260 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
5261 * vd->vdev_detached is set and free vd's DTL object in syncing context.
5262 * But first make sure we're not on any *other* txg's DTL list, to
5263 * prevent vd from being accessed after it's freed.
5264 */
5265 vdpath = spa_strdup(vd->vdev_path);
5266 for (int t = 0; t < TXG_SIZE; t++)
5267 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
5268 vd->vdev_detached = B_TRUE;
5269 vdev_dirty(tvd, VDD_DTL, vd, txg);
5270
5271 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
5272
5273 /* hang on to the spa before we release the lock */
5274 spa_open_ref(spa, FTAG);
5275
5276 error = spa_vdev_exit(spa, vd, txg, 0);
5277
5278 spa_history_log_internal(spa, "detach", NULL,
5279 "vdev=%s", vdpath);
5280 spa_strfree(vdpath);
5281
5282 /*
5283 * If this was the removal of the original device in a hot spare vdev,
5284 * then we want to go through and remove the device from the hot spare
5285 * list of every other pool.
5286 */
5287 if (unspare) {
5288 spa_t *altspa = NULL;
5289
5290 mutex_enter(&spa_namespace_lock);
5291 while ((altspa = spa_next(altspa)) != NULL) {
5292 if (altspa->spa_state != POOL_STATE_ACTIVE ||
5293 altspa == spa)
5294 continue;
5295
5296 spa_open_ref(altspa, FTAG);
5297 mutex_exit(&spa_namespace_lock);
5298 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
5299 mutex_enter(&spa_namespace_lock);
5300 spa_close(altspa, FTAG);
5301 }
5302 mutex_exit(&spa_namespace_lock);
5303
5304 /* search the rest of the vdevs for spares to remove */
5305 spa_vdev_resilver_done(spa);
5306 }
5307
5308 /* all done with the spa; OK to release */
5309 mutex_enter(&spa_namespace_lock);
5310 spa_close(spa, FTAG);
5311 mutex_exit(&spa_namespace_lock);
5312
5313 return (error);
5314}
5315
5316/*
5317 * Split a set of devices from their mirrors, and create a new pool from them.
5318 */
5319int
5320spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
5321 nvlist_t *props, boolean_t exp)
5322{
5323 int error = 0;
5324 uint64_t txg, *glist;
5325 spa_t *newspa;
5326 uint_t c, children, lastlog;
5327 nvlist_t **child, *nvl, *tmp;
5328 dmu_tx_t *tx;
5329 char *altroot = NULL;
5330 vdev_t *rvd, **vml = NULL; /* vdev modify list */
5331 boolean_t activate_slog;
5332
5333 ASSERT(spa_writeable(spa));
5334
5335 txg = spa_vdev_enter(spa);
5336
5337 /* clear the log and flush everything up to now */
5338 activate_slog = spa_passivate_log(spa);
5339 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5340 error = spa_offline_log(spa);
5341 txg = spa_vdev_config_enter(spa);
5342
5343 if (activate_slog)
5344 spa_activate_log(spa);
5345
5346 if (error != 0)
5347 return (spa_vdev_exit(spa, NULL, txg, error));
5348
5349 /* check new spa name before going any further */
5350 if (spa_lookup(newname) != NULL)
5351 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
5352
5353 /*
5354 * scan through all the children to ensure they're all mirrors
5355 */
5356 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
5357 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
5358 &children) != 0)
5359 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5360
5361 /* first, check to ensure we've got the right child count */
5362 rvd = spa->spa_root_vdev;
5363 lastlog = 0;
5364 for (c = 0; c < rvd->vdev_children; c++) {
5365 vdev_t *vd = rvd->vdev_child[c];
5366
5367 /* don't count the holes & logs as children */
5368 if (vd->vdev_islog || vd->vdev_ishole) {
5369 if (lastlog == 0)
5370 lastlog = c;
5371 continue;
5372 }
5373
5374 lastlog = 0;
5375 }
5376 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
5377 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5378
5379 /* next, ensure no spare or cache devices are part of the split */
5380 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
5381 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
5382 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5383
5384 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
5385 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
5386
5387 /* then, loop over each vdev and validate it */
5388 for (c = 0; c < children; c++) {
5389 uint64_t is_hole = 0;
5390
5391 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
5392 &is_hole);
5393
5394 if (is_hole != 0) {
5395 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
5396 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
5397 continue;
5398 } else {
5399 error = SET_ERROR(EINVAL);
5400 break;
5401 }
5402 }
5403
5404 /* which disk is going to be split? */
5405 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
5406 &glist[c]) != 0) {
5407 error = SET_ERROR(EINVAL);
5408 break;
5409 }
5410
5411 /* look it up in the spa */
5412 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
5413 if (vml[c] == NULL) {
5414 error = SET_ERROR(ENODEV);
5415 break;
5416 }
5417
5418 /* make sure there's nothing stopping the split */
5419 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
5420 vml[c]->vdev_islog ||
5421 vml[c]->vdev_ishole ||
5422 vml[c]->vdev_isspare ||
5423 vml[c]->vdev_isl2cache ||
5424 !vdev_writeable(vml[c]) ||
5425 vml[c]->vdev_children != 0 ||
5426 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
5427 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
5428 error = SET_ERROR(EINVAL);
5429 break;
5430 }
5431
5432 if (vdev_dtl_required(vml[c])) {
5433 error = SET_ERROR(EBUSY);
5434 break;
5435 }
5436
5437 /* we need certain info from the top level */
5438 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
5439 vml[c]->vdev_top->vdev_ms_array) == 0);
5440 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
5441 vml[c]->vdev_top->vdev_ms_shift) == 0);
5442 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
5443 vml[c]->vdev_top->vdev_asize) == 0);
5444 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
5445 vml[c]->vdev_top->vdev_ashift) == 0);
5446
5447 /* transfer per-vdev ZAPs */
5448 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
5449 VERIFY0(nvlist_add_uint64(child[c],
5450 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
5451
5452 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
5453 VERIFY0(nvlist_add_uint64(child[c],
5454 ZPOOL_CONFIG_VDEV_TOP_ZAP,
5455 vml[c]->vdev_parent->vdev_top_zap));
5456 }
5457
5458 if (error != 0) {
5459 kmem_free(vml, children * sizeof (vdev_t *));
5460 kmem_free(glist, children * sizeof (uint64_t));
5461 return (spa_vdev_exit(spa, NULL, txg, error));
5462 }
5463
5464 /* stop writers from using the disks */
5465 for (c = 0; c < children; c++) {
5466 if (vml[c] != NULL)
5467 vml[c]->vdev_offline = B_TRUE;
5468 }
5469 vdev_reopen(spa->spa_root_vdev);
5470
5471 /*
5472 * Temporarily record the splitting vdevs in the spa config. This
5473 * will disappear once the config is regenerated.
5474 */
5475 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5476 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5477 glist, children) == 0);
5478 kmem_free(glist, children * sizeof (uint64_t));
5479
5480 mutex_enter(&spa->spa_props_lock);
5481 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5482 nvl) == 0);
5483 mutex_exit(&spa->spa_props_lock);
5484 spa->spa_config_splitting = nvl;
5485 vdev_config_dirty(spa->spa_root_vdev);
5486
5487 /* configure and create the new pool */
5488 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5489 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5490 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5491 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5492 spa_version(spa)) == 0);
5493 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5494 spa->spa_config_txg) == 0);
5495 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5496 spa_generate_guid(NULL)) == 0);
5497 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
5498 (void) nvlist_lookup_string(props,
5499 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5500
5501 /* add the new pool to the namespace */
5502 newspa = spa_add(newname, config, altroot);
5503 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
5504 newspa->spa_config_txg = spa->spa_config_txg;
5505 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5506
5507 /* release the spa config lock, retaining the namespace lock */
5508 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5509
5510 if (zio_injection_enabled)
5511 zio_handle_panic_injection(spa, FTAG, 1);
5512
5513 spa_activate(newspa, spa_mode_global);
5514 spa_async_suspend(newspa);
5515
5516#ifndef illumos
5517 /* mark that we are creating new spa by splitting */
5518 newspa->spa_splitting_newspa = B_TRUE;
5519#endif
5520 /* create the new pool from the disks of the original pool */
5521 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5522#ifndef illumos
5523 newspa->spa_splitting_newspa = B_FALSE;
5524#endif
5525 if (error)
5526 goto out;
5527
5528 /* if that worked, generate a real config for the new pool */
5529 if (newspa->spa_root_vdev != NULL) {
5530 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
5531 NV_UNIQUE_NAME, KM_SLEEP) == 0);
5532 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5533 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5534 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5535 B_TRUE));
5536 }
5537
5538 /* set the props */
5539 if (props != NULL) {
5540 spa_configfile_set(newspa, props, B_FALSE);
5541 error = spa_prop_set(newspa, props);
5542 if (error)
5543 goto out;
5544 }
5545
5546 /* flush everything */
5547 txg = spa_vdev_config_enter(newspa);
5548 vdev_config_dirty(newspa->spa_root_vdev);
5549 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
5550
5551 if (zio_injection_enabled)
5552 zio_handle_panic_injection(spa, FTAG, 2);
5553
5554 spa_async_resume(newspa);
5555
5556 /* finally, update the original pool's config */
5557 txg = spa_vdev_config_enter(spa);
5558 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5559 error = dmu_tx_assign(tx, TXG_WAIT);
5560 if (error != 0)
5561 dmu_tx_abort(tx);
5562 for (c = 0; c < children; c++) {
5563 if (vml[c] != NULL) {
5564 vdev_split(vml[c]);
5565 if (error == 0)
5566 spa_history_log_internal(spa, "detach", tx,
5567 "vdev=%s", vml[c]->vdev_path);
5568
5569 vdev_free(vml[c]);
5570 }
5571 }
5572 spa->spa_avz_action = AVZ_ACTION_REBUILD;
5573 vdev_config_dirty(spa->spa_root_vdev);
5574 spa->spa_config_splitting = NULL;
5575 nvlist_free(nvl);
5576 if (error == 0)
5577 dmu_tx_commit(tx);
5578 (void) spa_vdev_exit(spa, NULL, txg, 0);
5579
5580 if (zio_injection_enabled)
5581 zio_handle_panic_injection(spa, FTAG, 3);
5582
5583 /* split is complete; log a history record */
5584 spa_history_log_internal(newspa, "split", NULL,
5585 "from pool %s", spa_name(spa));
5586
5587 kmem_free(vml, children * sizeof (vdev_t *));
5588
5589 /* if we're not going to mount the filesystems in userland, export */
5590 if (exp)
5591 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5592 B_FALSE, B_FALSE);
5593
5594 return (error);
5595
5596out:
5597 spa_unload(newspa);
5598 spa_deactivate(newspa);
5599 spa_remove(newspa);
5600
5601 txg = spa_vdev_config_enter(spa);
5602
5603 /* re-online all offlined disks */
5604 for (c = 0; c < children; c++) {
5605 if (vml[c] != NULL)
5606 vml[c]->vdev_offline = B_FALSE;
5607 }
5608 vdev_reopen(spa->spa_root_vdev);
5609
5610 nvlist_free(spa->spa_config_splitting);
5611 spa->spa_config_splitting = NULL;
5612 (void) spa_vdev_exit(spa, NULL, txg, error);
5613
5614 kmem_free(vml, children * sizeof (vdev_t *));
5615 return (error);
5616}
5617
5618static nvlist_t *
5619spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
5620{
5621 for (int i = 0; i < count; i++) {
5622 uint64_t guid;
5623
5624 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5625 &guid) == 0);
5626
5627 if (guid == target_guid)
5628 return (nvpp[i]);
5629 }
5630
5631 return (NULL);
5632}
5633
5634static void
5635spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5636 nvlist_t *dev_to_remove)
5637{
5638 nvlist_t **newdev = NULL;
5639
5640 if (count > 1)
5641 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
5642
5643 for (int i = 0, j = 0; i < count; i++) {
5644 if (dev[i] == dev_to_remove)
5645 continue;
5646 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
5647 }
5648
5649 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5650 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
5651
5652 for (int i = 0; i < count - 1; i++)
5653 nvlist_free(newdev[i]);
5654
5655 if (count > 1)
5656 kmem_free(newdev, (count - 1) * sizeof (void *));
5657}
5658
5659/*
5660 * Evacuate the device.
5661 */
5662static int
5663spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5664{
5665 uint64_t txg;
5666 int error = 0;
5667
5668 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5669 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5670 ASSERT(vd == vd->vdev_top);
5671
5672 /*
5673 * Evacuate the device. We don't hold the config lock as writer
5674 * since we need to do I/O but we do keep the
5675 * spa_namespace_lock held. Once this completes the device
5676 * should no longer have any blocks allocated on it.
5677 */
5678 if (vd->vdev_islog) {
5679 if (vd->vdev_stat.vs_alloc != 0)
5680 error = spa_offline_log(spa);
5681 } else {
5682 error = SET_ERROR(ENOTSUP);
5683 }
5684
5685 if (error)
5686 return (error);
5687
5688 /*
5689 * The evacuation succeeded. Remove any remaining MOS metadata
5690 * associated with this vdev, and wait for these changes to sync.
5691 */
5692 ASSERT0(vd->vdev_stat.vs_alloc);
5693 txg = spa_vdev_config_enter(spa);
5694 vd->vdev_removing = B_TRUE;
5695 vdev_dirty_leaves(vd, VDD_DTL, txg);
5696 vdev_config_dirty(vd);
5697 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5698
5699 return (0);
5700}
5701
5702/*
5703 * Complete the removal by cleaning up the namespace.
5704 */
5705static void
5706spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5707{
5708 vdev_t *rvd = spa->spa_root_vdev;
5709 uint64_t id = vd->vdev_id;
5710 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5711
5712 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5713 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5714 ASSERT(vd == vd->vdev_top);
5715
5716 /*
5717 * Only remove any devices which are empty.
5718 */
5719 if (vd->vdev_stat.vs_alloc != 0)
5720 return;
5721
5722 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5723
5724 if (list_link_active(&vd->vdev_state_dirty_node))
5725 vdev_state_clean(vd);
5726 if (list_link_active(&vd->vdev_config_dirty_node))
5727 vdev_config_clean(vd);
5728
5729 vdev_free(vd);
5730
5731 if (last_vdev) {
5732 vdev_compact_children(rvd);
5733 } else {
5734 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5735 vdev_add_child(rvd, vd);
5736 }
5737 vdev_config_dirty(rvd);
5738
5739 /*
5740 * Reassess the health of our root vdev.
5741 */
5742 vdev_reopen(rvd);
5743}
5744
5745/*
5746 * Remove a device from the pool -
5747 *
5748 * Removing a device from the vdev namespace requires several steps
5749 * and can take a significant amount of time. As a result we use
5750 * the spa_vdev_config_[enter/exit] functions which allow us to
5751 * grab and release the spa_config_lock while still holding the namespace
5752 * lock. During each step the configuration is synced out.
5753 *
5754 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5755 * devices.
5756 */
5757int
5758spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5759{
5760 vdev_t *vd;
5761 sysevent_t *ev = NULL;
5762 metaslab_group_t *mg;
5763 nvlist_t **spares, **l2cache, *nv;
5764 uint64_t txg = 0;
5765 uint_t nspares, nl2cache;
5766 int error = 0;
5767 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5768
5769 ASSERT(spa_writeable(spa));
5770
5771 if (!locked)
5772 txg = spa_vdev_enter(spa);
5773
5774 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5775
5776 if (spa->spa_spares.sav_vdevs != NULL &&
5777 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5778 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5779 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5780 /*
5781 * Only remove the hot spare if it's not currently in use
5782 * in this pool.
5783 */
5784 if (vd == NULL || unspare) {
5785 if (vd == NULL)
5786 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5787 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5788 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5789 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5790 spa_load_spares(spa);
5791 spa->spa_spares.sav_sync = B_TRUE;
5792 } else {
5793 error = SET_ERROR(EBUSY);
5794 }
5795 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
5796 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5797 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5798 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5799 /*
5800 * Cache devices can always be removed.
5801 */
5802 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5803 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5804 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5805 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5806 spa_load_l2cache(spa);
5807 spa->spa_l2cache.sav_sync = B_TRUE;
5808 } else if (vd != NULL && vd->vdev_islog) {
5809 ASSERT(!locked);
5810 ASSERT(vd == vd->vdev_top);
5811
5812 mg = vd->vdev_mg;
5813
5814 /*
5815 * Stop allocating from this vdev.
5816 */
5817 metaslab_group_passivate(mg);
5818
5819 /*
5820 * Wait for the youngest allocations and frees to sync,
5821 * and then wait for the deferral of those frees to finish.
5822 */
5823 spa_vdev_config_exit(spa, NULL,
5824 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5825
5826 /*
5827 * Attempt to evacuate the vdev.
5828 */
5829 error = spa_vdev_remove_evacuate(spa, vd);
5830
5831 txg = spa_vdev_config_enter(spa);
5832
5833 /*
5834 * If we couldn't evacuate the vdev, unwind.
5835 */
5836 if (error) {
5837 metaslab_group_activate(mg);
5838 return (spa_vdev_exit(spa, NULL, txg, error));
5839 }
5840
5841 /*
5842 * Clean up the vdev namespace.
5843 */
5844 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_DEV);
5845 spa_vdev_remove_from_namespace(spa, vd);
5846
5847 } else if (vd != NULL) {
5848 /*
5849 * Normal vdevs cannot be removed (yet).
5850 */
5851 error = SET_ERROR(ENOTSUP);
5852 } else {
5853 /*
5854 * There is no vdev of any kind with the specified guid.
5855 */
5856 error = SET_ERROR(ENOENT);
5857 }
5858
5859 if (!locked)
5860 error = spa_vdev_exit(spa, NULL, txg, error);
5861
5862 if (ev)
5863 spa_event_post(ev);
5864
5865 return (error);
5866}
5867
5868/*
5869 * Find any device that's done replacing, or a vdev marked 'unspare' that's
5870 * currently spared, so we can detach it.
5871 */
5872static vdev_t *
5873spa_vdev_resilver_done_hunt(vdev_t *vd)
5874{
5875 vdev_t *newvd, *oldvd;
5876
5877 for (int c = 0; c < vd->vdev_children; c++) {
5878 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5879 if (oldvd != NULL)
5880 return (oldvd);
5881 }
5882
5883 /*
5884 * Check for a completed replacement. We always consider the first
5885 * vdev in the list to be the oldest vdev, and the last one to be
5886 * the newest (see spa_vdev_attach() for how that works). In
5887 * the case where the newest vdev is faulted, we will not automatically
5888 * remove it after a resilver completes. This is OK as it will require
5889 * user intervention to determine which disk the admin wishes to keep.
5890 */
5891 if (vd->vdev_ops == &vdev_replacing_ops) {
5892 ASSERT(vd->vdev_children > 1);
5893
5894 newvd = vd->vdev_child[vd->vdev_children - 1];
5895 oldvd = vd->vdev_child[0];
5896
5897 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5898 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5899 !vdev_dtl_required(oldvd))
5900 return (oldvd);
5901 }
5902
5903 /*
5904 * Check for a completed resilver with the 'unspare' flag set.
5905 */
5906 if (vd->vdev_ops == &vdev_spare_ops) {
5907 vdev_t *first = vd->vdev_child[0];
5908 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5909
5910 if (last->vdev_unspare) {
5911 oldvd = first;
5912 newvd = last;
5913 } else if (first->vdev_unspare) {
5914 oldvd = last;
5915 newvd = first;
5916 } else {
5917 oldvd = NULL;
5918 }
5919
5920 if (oldvd != NULL &&
5921 vdev_dtl_empty(newvd, DTL_MISSING) &&
5922 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5923 !vdev_dtl_required(oldvd))
5924 return (oldvd);
5925
5926 /*
5927 * If there are more than two spares attached to a disk,
5928 * and those spares are not required, then we want to
5929 * attempt to free them up now so that they can be used
5930 * by other pools. Once we're back down to a single
5931 * disk+spare, we stop removing them.
5932 */
5933 if (vd->vdev_children > 2) {
5934 newvd = vd->vdev_child[1];
5935
5936 if (newvd->vdev_isspare && last->vdev_isspare &&
5937 vdev_dtl_empty(last, DTL_MISSING) &&
5938 vdev_dtl_empty(last, DTL_OUTAGE) &&
5939 !vdev_dtl_required(newvd))
5940 return (newvd);
5941 }
5942 }
5943
5944 return (NULL);
5945}
5946
5947static void
5948spa_vdev_resilver_done(spa_t *spa)
5949{
5950 vdev_t *vd, *pvd, *ppvd;
5951 uint64_t guid, sguid, pguid, ppguid;
5952
5953 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5954
5955 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5956 pvd = vd->vdev_parent;
5957 ppvd = pvd->vdev_parent;
5958 guid = vd->vdev_guid;
5959 pguid = pvd->vdev_guid;
5960 ppguid = ppvd->vdev_guid;
5961 sguid = 0;
5962 /*
5963 * If we have just finished replacing a hot spared device, then
5964 * we need to detach the parent's first child (the original hot
5965 * spare) as well.
5966 */
5967 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5968 ppvd->vdev_children == 2) {
5969 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5970 sguid = ppvd->vdev_child[1]->vdev_guid;
5971 }
5972 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5973
5974 spa_config_exit(spa, SCL_ALL, FTAG);
5975 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5976 return;
5977 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5978 return;
5979 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5980 }
5981
5982 spa_config_exit(spa, SCL_ALL, FTAG);
5983}
5984
5985/*
5986 * Update the stored path or FRU for this vdev.
5987 */
5988int
5989spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5990 boolean_t ispath)
5991{
5992 vdev_t *vd;
5993 boolean_t sync = B_FALSE;
5994
5995 ASSERT(spa_writeable(spa));
5996
5997 spa_vdev_state_enter(spa, SCL_ALL);
5998
5999 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
6000 return (spa_vdev_state_exit(spa, NULL, ENOENT));
6001
6002 if (!vd->vdev_ops->vdev_op_leaf)
6003 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
6004
6005 if (ispath) {
6006 if (strcmp(value, vd->vdev_path) != 0) {
6007 spa_strfree(vd->vdev_path);
6008 vd->vdev_path = spa_strdup(value);
6009 sync = B_TRUE;
6010 }
6011 } else {
6012 if (vd->vdev_fru == NULL) {
6013 vd->vdev_fru = spa_strdup(value);
6014 sync = B_TRUE;
6015 } else if (strcmp(value, vd->vdev_fru) != 0) {
6016 spa_strfree(vd->vdev_fru);
6017 vd->vdev_fru = spa_strdup(value);
6018 sync = B_TRUE;
6019 }
6020 }
6021
6022 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
6023}
6024
6025int
6026spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
6027{
6028 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
6029}
6030
6031int
6032spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
6033{
6034 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
6035}
6036
6037/*
6038 * ==========================================================================
6039 * SPA Scanning
6040 * ==========================================================================
6041 */
6042
6043int
6044spa_scan_stop(spa_t *spa)
6045{
6046 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6047 if (dsl_scan_resilvering(spa->spa_dsl_pool))
6048 return (SET_ERROR(EBUSY));
6049 return (dsl_scan_cancel(spa->spa_dsl_pool));
6050}
6051
6052int
6053spa_scan(spa_t *spa, pool_scan_func_t func)
6054{
6055 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6056
6057 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
6058 return (SET_ERROR(ENOTSUP));
6059
6060 /*
6061 * If a resilver was requested, but there is no DTL on a
6062 * writeable leaf device, we have nothing to do.
6063 */
6064 if (func == POOL_SCAN_RESILVER &&
6065 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6066 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
6067 return (0);
6068 }
6069
6070 return (dsl_scan(spa->spa_dsl_pool, func));
6071}
6072
6073/*
6074 * ==========================================================================
6075 * SPA async task processing
6076 * ==========================================================================
6077 */
6078
6079static void
6080spa_async_remove(spa_t *spa, vdev_t *vd)
6081{
6082 if (vd->vdev_remove_wanted) {
6083 vd->vdev_remove_wanted = B_FALSE;
6084 vd->vdev_delayed_close = B_FALSE;
6085 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
6086
6087 /*
6088 * We want to clear the stats, but we don't want to do a full
6089 * vdev_clear() as that will cause us to throw away
6090 * degraded/faulted state as well as attempt to reopen the
6091 * device, all of which is a waste.
6092 */
6093 vd->vdev_stat.vs_read_errors = 0;
6094 vd->vdev_stat.vs_write_errors = 0;
6095 vd->vdev_stat.vs_checksum_errors = 0;
6096
6097 vdev_state_dirty(vd->vdev_top);
6098 /* Tell userspace that the vdev is gone. */
6099 zfs_post_remove(spa, vd);
6100 }
6101
6102 for (int c = 0; c < vd->vdev_children; c++)
6103 spa_async_remove(spa, vd->vdev_child[c]);
6104}
6105
6106static void
6107spa_async_probe(spa_t *spa, vdev_t *vd)
6108{
6109 if (vd->vdev_probe_wanted) {
6110 vd->vdev_probe_wanted = B_FALSE;
6111 vdev_reopen(vd); /* vdev_open() does the actual probe */
6112 }
6113
6114 for (int c = 0; c < vd->vdev_children; c++)
6115 spa_async_probe(spa, vd->vdev_child[c]);
6116}
6117
6118static void
6119spa_async_autoexpand(spa_t *spa, vdev_t *vd)
6120{
6121 sysevent_id_t eid;
6122 nvlist_t *attr;
6123 char *physpath;
6124
6125 if (!spa->spa_autoexpand)
6126 return;
6127
6128 for (int c = 0; c < vd->vdev_children; c++) {
6129 vdev_t *cvd = vd->vdev_child[c];
6130 spa_async_autoexpand(spa, cvd);
6131 }
6132
6133 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
6134 return;
6135
6136 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6137 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
6138
6139 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6140 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
6141
6142 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
6143 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP);
6144
6145 nvlist_free(attr);
6146 kmem_free(physpath, MAXPATHLEN);
6147}
6148
6149static void
6150spa_async_thread(void *arg)
6151{
6152 spa_t *spa = arg;
6153 int tasks;
6154
6155 ASSERT(spa->spa_sync_on);
6156
6157 mutex_enter(&spa->spa_async_lock);
6158 tasks = spa->spa_async_tasks;
6159 spa->spa_async_tasks &= SPA_ASYNC_REMOVE;
6160 mutex_exit(&spa->spa_async_lock);
6161
6162 /*
6163 * See if the config needs to be updated.
6164 */
6165 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
6166 uint64_t old_space, new_space;
6167
6168 mutex_enter(&spa_namespace_lock);
6169 old_space = metaslab_class_get_space(spa_normal_class(spa));
6170 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6171 new_space = metaslab_class_get_space(spa_normal_class(spa));
6172 mutex_exit(&spa_namespace_lock);
6173
6174 /*
6175 * If the pool grew as a result of the config update,
6176 * then log an internal history event.
6177 */
6178 if (new_space != old_space) {
6179 spa_history_log_internal(spa, "vdev online", NULL,
6180 "pool '%s' size: %llu(+%llu)",
6181 spa_name(spa), new_space, new_space - old_space);
6182 }
6183 }
6184
6185 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
6186 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6187 spa_async_autoexpand(spa, spa->spa_root_vdev);
6188 spa_config_exit(spa, SCL_CONFIG, FTAG);
6189 }
6190
6191 /*
6192 * See if any devices need to be probed.
6193 */
6194 if (tasks & SPA_ASYNC_PROBE) {
6195 spa_vdev_state_enter(spa, SCL_NONE);
6196 spa_async_probe(spa, spa->spa_root_vdev);
6197 (void) spa_vdev_state_exit(spa, NULL, 0);
6198 }
6199
6200 /*
6201 * If any devices are done replacing, detach them.
6202 */
6203 if (tasks & SPA_ASYNC_RESILVER_DONE)
6204 spa_vdev_resilver_done(spa);
6205
6206 /*
6207 * Kick off a resilver.
6208 */
6209 if (tasks & SPA_ASYNC_RESILVER)
6210 dsl_resilver_restart(spa->spa_dsl_pool, 0);
6211
6212 /*
6213 * Let the world know that we're done.
6214 */
6215 mutex_enter(&spa->spa_async_lock);
6216 spa->spa_async_thread = NULL;
6217 cv_broadcast(&spa->spa_async_cv);
6218 mutex_exit(&spa->spa_async_lock);
6219 thread_exit();
6220}
6221
6222static void
6223spa_async_thread_vd(void *arg)
6224{
6225 spa_t *spa = arg;
6226 int tasks;
6227
6228 ASSERT(spa->spa_sync_on);
6229
6230 mutex_enter(&spa->spa_async_lock);
6231 tasks = spa->spa_async_tasks;
6232retry:
6233 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE;
6234 mutex_exit(&spa->spa_async_lock);
6235
6236 /*
6237 * See if any devices need to be marked REMOVED.
6238 */
6239 if (tasks & SPA_ASYNC_REMOVE) {
6240 spa_vdev_state_enter(spa, SCL_NONE);
6241 spa_async_remove(spa, spa->spa_root_vdev);
6242 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
6243 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
6244 for (int i = 0; i < spa->spa_spares.sav_count; i++)
6245 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
6246 (void) spa_vdev_state_exit(spa, NULL, 0);
6247 }
6248
6249 /*
6250 * Let the world know that we're done.
6251 */
6252 mutex_enter(&spa->spa_async_lock);
6253 tasks = spa->spa_async_tasks;
6254 if ((tasks & SPA_ASYNC_REMOVE) != 0)
6255 goto retry;
6256 spa->spa_async_thread_vd = NULL;
6257 cv_broadcast(&spa->spa_async_cv);
6258 mutex_exit(&spa->spa_async_lock);
6259 thread_exit();
6260}
6261
6262void
6263spa_async_suspend(spa_t *spa)
6264{
6265 mutex_enter(&spa->spa_async_lock);
6266 spa->spa_async_suspended++;
6267 while (spa->spa_async_thread != NULL &&
6268 spa->spa_async_thread_vd != NULL)
6269 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
6270 mutex_exit(&spa->spa_async_lock);
6271}
6272
6273void
6274spa_async_resume(spa_t *spa)
6275{
6276 mutex_enter(&spa->spa_async_lock);
6277 ASSERT(spa->spa_async_suspended != 0);
6278 spa->spa_async_suspended--;
6279 mutex_exit(&spa->spa_async_lock);
6280}
6281
6282static boolean_t
6283spa_async_tasks_pending(spa_t *spa)
6284{
6285 uint_t non_config_tasks;
6286 uint_t config_task;
6287 boolean_t config_task_suspended;
6288
6289 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE |
6290 SPA_ASYNC_REMOVE);
6291 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
6292 if (spa->spa_ccw_fail_time == 0) {
6293 config_task_suspended = B_FALSE;
6294 } else {
6295 config_task_suspended =
6296 (gethrtime() - spa->spa_ccw_fail_time) <
6297 (zfs_ccw_retry_interval * NANOSEC);
6298 }
6299
6300 return (non_config_tasks || (config_task && !config_task_suspended));
6301}
6302
6303static void
6304spa_async_dispatch(spa_t *spa)
6305{
6306 mutex_enter(&spa->spa_async_lock);
6307 if (spa_async_tasks_pending(spa) &&
6308 !spa->spa_async_suspended &&
6309 spa->spa_async_thread == NULL &&
6310 rootdir != NULL)
6311 spa->spa_async_thread = thread_create(NULL, 0,
6312 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
6313 mutex_exit(&spa->spa_async_lock);
6314}
6315
6316static void
6317spa_async_dispatch_vd(spa_t *spa)
6318{
6319 mutex_enter(&spa->spa_async_lock);
6320 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 &&
6321 !spa->spa_async_suspended &&
6322 spa->spa_async_thread_vd == NULL &&
6323 rootdir != NULL)
6324 spa->spa_async_thread_vd = thread_create(NULL, 0,
6325 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri);
6326 mutex_exit(&spa->spa_async_lock);
6327}
6328
6329void
6330spa_async_request(spa_t *spa, int task)
6331{
6332 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
6333 mutex_enter(&spa->spa_async_lock);
6334 spa->spa_async_tasks |= task;
6335 mutex_exit(&spa->spa_async_lock);
6336 spa_async_dispatch_vd(spa);
6337}
6338
6339/*
6340 * ==========================================================================
6341 * SPA syncing routines
6342 * ==========================================================================
6343 */
6344
6345static int
6346bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6347{
6348 bpobj_t *bpo = arg;
6349 bpobj_enqueue(bpo, bp, tx);
6350 return (0);
6351}
6352
6353static int
6354spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6355{
6356 zio_t *zio = arg;
6357
6358 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
6359 BP_GET_PSIZE(bp), zio->io_flags));
6360 return (0);
6361}
6362
6363/*
6364 * Note: this simple function is not inlined to make it easier to dtrace the
6365 * amount of time spent syncing frees.
6366 */
6367static void
6368spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
6369{
6370 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6371 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
6372 VERIFY(zio_wait(zio) == 0);
6373}
6374
6375/*
6376 * Note: this simple function is not inlined to make it easier to dtrace the
6377 * amount of time spent syncing deferred frees.
6378 */
6379static void
6380spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
6381{
6382 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6383 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
6384 spa_free_sync_cb, zio, tx), ==, 0);
6385 VERIFY0(zio_wait(zio));
6386}
6387
6388
6389static void
6390spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
6391{
6392 char *packed = NULL;
6393 size_t bufsize;
6394 size_t nvsize = 0;
6395 dmu_buf_t *db;
6396
6397 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
6398
6399 /*
6400 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
6401 * information. This avoids the dmu_buf_will_dirty() path and
6402 * saves us a pre-read to get data we don't actually care about.
6403 */
6404 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
6405 packed = kmem_alloc(bufsize, KM_SLEEP);
6406
6407 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
6408 KM_SLEEP) == 0);
6409 bzero(packed + nvsize, bufsize - nvsize);
6410
6411 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
6412
6413 kmem_free(packed, bufsize);
6414
6415 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
6416 dmu_buf_will_dirty(db, tx);
6417 *(uint64_t *)db->db_data = nvsize;
6418 dmu_buf_rele(db, FTAG);
6419}
6420
6421static void
6422spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
6423 const char *config, const char *entry)
6424{
6425 nvlist_t *nvroot;
6426 nvlist_t **list;
6427 int i;
6428
6429 if (!sav->sav_sync)
6430 return;
6431
6432 /*
6433 * Update the MOS nvlist describing the list of available devices.
6434 * spa_validate_aux() will have already made sure this nvlist is
6435 * valid and the vdevs are labeled appropriately.
6436 */
6437 if (sav->sav_object == 0) {
6438 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
6439 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
6440 sizeof (uint64_t), tx);
6441 VERIFY(zap_update(spa->spa_meta_objset,
6442 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
6443 &sav->sav_object, tx) == 0);
6444 }
6445
6446 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6447 if (sav->sav_count == 0) {
6448 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
6449 } else {
6450 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
6451 for (i = 0; i < sav->sav_count; i++)
6452 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
6453 B_FALSE, VDEV_CONFIG_L2CACHE);
6454 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6455 sav->sav_count) == 0);
6456 for (i = 0; i < sav->sav_count; i++)
6457 nvlist_free(list[i]);
6458 kmem_free(list, sav->sav_count * sizeof (void *));
6459 }
6460
6461 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6462 nvlist_free(nvroot);
6463
6464 sav->sav_sync = B_FALSE;
6465}
6466
6467/*
6468 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
6469 * The all-vdev ZAP must be empty.
6470 */
6471static void
6472spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
6473{
6474 spa_t *spa = vd->vdev_spa;
6475 if (vd->vdev_top_zap != 0) {
6476 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6477 vd->vdev_top_zap, tx));
6478 }
6479 if (vd->vdev_leaf_zap != 0) {
6480 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6481 vd->vdev_leaf_zap, tx));
6482 }
6483 for (uint64_t i = 0; i < vd->vdev_children; i++) {
6484 spa_avz_build(vd->vdev_child[i], avz, tx);
6485 }
6486}
6487
6488static void
6489spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6490{
6491 nvlist_t *config;
6492
6493 /*
6494 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
6495 * its config may not be dirty but we still need to build per-vdev ZAPs.
6496 * Similarly, if the pool is being assembled (e.g. after a split), we
6497 * need to rebuild the AVZ although the config may not be dirty.
6498 */
6499 if (list_is_empty(&spa->spa_config_dirty_list) &&
6500 spa->spa_avz_action == AVZ_ACTION_NONE)
6501 return;
6502
6503 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6504
6505 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
2747 /*
2748 * An older version of ZFS overwrote the sentinel value, so
2749 * we have orphaned per-vdev ZAPs in the MOS. Defer their
2750 * destruction to later; see spa_sync_config_object.
2751 */
2752 spa->spa_avz_action = AVZ_ACTION_DESTROY;
2753 /*
2754 * We're assuming that no vdevs have had their ZAPs created
2755 * before this. Better be sure of it.
2756 */
2757 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2758 }
2759 nvlist_free(mos_config);
2760
2761 /*
2762 * If we're assembling the pool from the split-off vdevs of
2763 * an existing pool, we don't want to attach the spares & cache
2764 * devices.
2765 */
2766
2767 /*
2768 * Load any hot spares for this pool.
2769 */
2770 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2771 if (error != 0 && error != ENOENT)
2772 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2773 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2774 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2775 if (load_nvlist(spa, spa->spa_spares.sav_object,
2776 &spa->spa_spares.sav_config) != 0)
2777 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2778
2779 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2780 spa_load_spares(spa);
2781 spa_config_exit(spa, SCL_ALL, FTAG);
2782 } else if (error == 0) {
2783 spa->spa_spares.sav_sync = B_TRUE;
2784 }
2785
2786 /*
2787 * Load any level 2 ARC devices for this pool.
2788 */
2789 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2790 &spa->spa_l2cache.sav_object);
2791 if (error != 0 && error != ENOENT)
2792 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2793 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2794 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2795 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2796 &spa->spa_l2cache.sav_config) != 0)
2797 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2798
2799 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2800 spa_load_l2cache(spa);
2801 spa_config_exit(spa, SCL_ALL, FTAG);
2802 } else if (error == 0) {
2803 spa->spa_l2cache.sav_sync = B_TRUE;
2804 }
2805
2806 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2807
2808 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2809 if (error && error != ENOENT)
2810 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2811
2812 if (error == 0) {
2813 uint64_t autoreplace;
2814
2815 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2816 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2817 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2818 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2819 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2820 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2821 &spa->spa_dedup_ditto);
2822
2823 spa->spa_autoreplace = (autoreplace != 0);
2824 }
2825
2826 /*
2827 * If the 'autoreplace' property is set, then post a resource notifying
2828 * the ZFS DE that it should not issue any faults for unopenable
2829 * devices. We also iterate over the vdevs, and post a sysevent for any
2830 * unopenable vdevs so that the normal autoreplace handler can take
2831 * over.
2832 */
2833 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2834 spa_check_removed(spa->spa_root_vdev);
2835 /*
2836 * For the import case, this is done in spa_import(), because
2837 * at this point we're using the spare definitions from
2838 * the MOS config, not necessarily from the userland config.
2839 */
2840 if (state != SPA_LOAD_IMPORT) {
2841 spa_aux_check_removed(&spa->spa_spares);
2842 spa_aux_check_removed(&spa->spa_l2cache);
2843 }
2844 }
2845
2846 /*
2847 * Load the vdev state for all toplevel vdevs.
2848 */
2849 vdev_load(rvd);
2850
2851 /*
2852 * Propagate the leaf DTLs we just loaded all the way up the tree.
2853 */
2854 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2855 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2856 spa_config_exit(spa, SCL_ALL, FTAG);
2857
2858 /*
2859 * Load the DDTs (dedup tables).
2860 */
2861 error = ddt_load(spa);
2862 if (error != 0)
2863 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2864
2865 spa_update_dspace(spa);
2866
2867 /*
2868 * Validate the config, using the MOS config to fill in any
2869 * information which might be missing. If we fail to validate
2870 * the config then declare the pool unfit for use. If we're
2871 * assembling a pool from a split, the log is not transferred
2872 * over.
2873 */
2874 if (type != SPA_IMPORT_ASSEMBLE) {
2875 nvlist_t *nvconfig;
2876
2877 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2878 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2879
2880 if (!spa_config_valid(spa, nvconfig)) {
2881 nvlist_free(nvconfig);
2882 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2883 ENXIO));
2884 }
2885 nvlist_free(nvconfig);
2886
2887 /*
2888 * Now that we've validated the config, check the state of the
2889 * root vdev. If it can't be opened, it indicates one or
2890 * more toplevel vdevs are faulted.
2891 */
2892 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2893 return (SET_ERROR(ENXIO));
2894
2895 if (spa_writeable(spa) && spa_check_logs(spa)) {
2896 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2897 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2898 }
2899 }
2900
2901 if (missing_feat_write) {
2902 ASSERT(state == SPA_LOAD_TRYIMPORT);
2903
2904 /*
2905 * At this point, we know that we can open the pool in
2906 * read-only mode but not read-write mode. We now have enough
2907 * information and can return to userland.
2908 */
2909 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2910 }
2911
2912 /*
2913 * We've successfully opened the pool, verify that we're ready
2914 * to start pushing transactions.
2915 */
2916 if (state != SPA_LOAD_TRYIMPORT) {
2917 if (error = spa_load_verify(spa))
2918 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2919 error));
2920 }
2921
2922 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2923 spa->spa_load_max_txg == UINT64_MAX)) {
2924 dmu_tx_t *tx;
2925 int need_update = B_FALSE;
2926 dsl_pool_t *dp = spa_get_dsl(spa);
2927
2928 ASSERT(state != SPA_LOAD_TRYIMPORT);
2929
2930 /*
2931 * Claim log blocks that haven't been committed yet.
2932 * This must all happen in a single txg.
2933 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2934 * invoked from zil_claim_log_block()'s i/o done callback.
2935 * Price of rollback is that we abandon the log.
2936 */
2937 spa->spa_claiming = B_TRUE;
2938
2939 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
2940 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2941 zil_claim, tx, DS_FIND_CHILDREN);
2942 dmu_tx_commit(tx);
2943
2944 spa->spa_claiming = B_FALSE;
2945
2946 spa_set_log_state(spa, SPA_LOG_GOOD);
2947 spa->spa_sync_on = B_TRUE;
2948 txg_sync_start(spa->spa_dsl_pool);
2949
2950 /*
2951 * Wait for all claims to sync. We sync up to the highest
2952 * claimed log block birth time so that claimed log blocks
2953 * don't appear to be from the future. spa_claim_max_txg
2954 * will have been set for us by either zil_check_log_chain()
2955 * (invoked from spa_check_logs()) or zil_claim() above.
2956 */
2957 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2958
2959 /*
2960 * If the config cache is stale, or we have uninitialized
2961 * metaslabs (see spa_vdev_add()), then update the config.
2962 *
2963 * If this is a verbatim import, trust the current
2964 * in-core spa_config and update the disk labels.
2965 */
2966 if (config_cache_txg != spa->spa_config_txg ||
2967 state == SPA_LOAD_IMPORT ||
2968 state == SPA_LOAD_RECOVER ||
2969 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2970 need_update = B_TRUE;
2971
2972 for (int c = 0; c < rvd->vdev_children; c++)
2973 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2974 need_update = B_TRUE;
2975
2976 /*
2977 * Update the config cache asychronously in case we're the
2978 * root pool, in which case the config cache isn't writable yet.
2979 */
2980 if (need_update)
2981 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2982
2983 /*
2984 * Check all DTLs to see if anything needs resilvering.
2985 */
2986 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2987 vdev_resilver_needed(rvd, NULL, NULL))
2988 spa_async_request(spa, SPA_ASYNC_RESILVER);
2989
2990 /*
2991 * Log the fact that we booted up (so that we can detect if
2992 * we rebooted in the middle of an operation).
2993 */
2994 spa_history_log_version(spa, "open");
2995
2996 /*
2997 * Delete any inconsistent datasets.
2998 */
2999 (void) dmu_objset_find(spa_name(spa),
3000 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
3001
3002 /*
3003 * Clean up any stale temporary dataset userrefs.
3004 */
3005 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
3006 }
3007
3008 return (0);
3009}
3010
3011static int
3012spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
3013{
3014 int mode = spa->spa_mode;
3015
3016 spa_unload(spa);
3017 spa_deactivate(spa);
3018
3019 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
3020
3021 spa_activate(spa, mode);
3022 spa_async_suspend(spa);
3023
3024 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
3025}
3026
3027/*
3028 * If spa_load() fails this function will try loading prior txg's. If
3029 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
3030 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
3031 * function will not rewind the pool and will return the same error as
3032 * spa_load().
3033 */
3034static int
3035spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
3036 uint64_t max_request, int rewind_flags)
3037{
3038 nvlist_t *loadinfo = NULL;
3039 nvlist_t *config = NULL;
3040 int load_error, rewind_error;
3041 uint64_t safe_rewind_txg;
3042 uint64_t min_txg;
3043
3044 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
3045 spa->spa_load_max_txg = spa->spa_load_txg;
3046 spa_set_log_state(spa, SPA_LOG_CLEAR);
3047 } else {
3048 spa->spa_load_max_txg = max_request;
3049 if (max_request != UINT64_MAX)
3050 spa->spa_extreme_rewind = B_TRUE;
3051 }
3052
3053 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
3054 mosconfig);
3055 if (load_error == 0)
3056 return (0);
3057
3058 if (spa->spa_root_vdev != NULL)
3059 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3060
3061 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
3062 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
3063
3064 if (rewind_flags & ZPOOL_NEVER_REWIND) {
3065 nvlist_free(config);
3066 return (load_error);
3067 }
3068
3069 if (state == SPA_LOAD_RECOVER) {
3070 /* Price of rolling back is discarding txgs, including log */
3071 spa_set_log_state(spa, SPA_LOG_CLEAR);
3072 } else {
3073 /*
3074 * If we aren't rolling back save the load info from our first
3075 * import attempt so that we can restore it after attempting
3076 * to rewind.
3077 */
3078 loadinfo = spa->spa_load_info;
3079 spa->spa_load_info = fnvlist_alloc();
3080 }
3081
3082 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
3083 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
3084 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
3085 TXG_INITIAL : safe_rewind_txg;
3086
3087 /*
3088 * Continue as long as we're finding errors, we're still within
3089 * the acceptable rewind range, and we're still finding uberblocks
3090 */
3091 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
3092 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
3093 if (spa->spa_load_max_txg < safe_rewind_txg)
3094 spa->spa_extreme_rewind = B_TRUE;
3095 rewind_error = spa_load_retry(spa, state, mosconfig);
3096 }
3097
3098 spa->spa_extreme_rewind = B_FALSE;
3099 spa->spa_load_max_txg = UINT64_MAX;
3100
3101 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
3102 spa_config_set(spa, config);
3103
3104 if (state == SPA_LOAD_RECOVER) {
3105 ASSERT3P(loadinfo, ==, NULL);
3106 return (rewind_error);
3107 } else {
3108 /* Store the rewind info as part of the initial load info */
3109 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
3110 spa->spa_load_info);
3111
3112 /* Restore the initial load info */
3113 fnvlist_free(spa->spa_load_info);
3114 spa->spa_load_info = loadinfo;
3115
3116 return (load_error);
3117 }
3118}
3119
3120/*
3121 * Pool Open/Import
3122 *
3123 * The import case is identical to an open except that the configuration is sent
3124 * down from userland, instead of grabbed from the configuration cache. For the
3125 * case of an open, the pool configuration will exist in the
3126 * POOL_STATE_UNINITIALIZED state.
3127 *
3128 * The stats information (gen/count/ustats) is used to gather vdev statistics at
3129 * the same time open the pool, without having to keep around the spa_t in some
3130 * ambiguous state.
3131 */
3132static int
3133spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
3134 nvlist_t **config)
3135{
3136 spa_t *spa;
3137 spa_load_state_t state = SPA_LOAD_OPEN;
3138 int error;
3139 int locked = B_FALSE;
3140 int firstopen = B_FALSE;
3141
3142 *spapp = NULL;
3143
3144 /*
3145 * As disgusting as this is, we need to support recursive calls to this
3146 * function because dsl_dir_open() is called during spa_load(), and ends
3147 * up calling spa_open() again. The real fix is to figure out how to
3148 * avoid dsl_dir_open() calling this in the first place.
3149 */
3150 if (mutex_owner(&spa_namespace_lock) != curthread) {
3151 mutex_enter(&spa_namespace_lock);
3152 locked = B_TRUE;
3153 }
3154
3155 if ((spa = spa_lookup(pool)) == NULL) {
3156 if (locked)
3157 mutex_exit(&spa_namespace_lock);
3158 return (SET_ERROR(ENOENT));
3159 }
3160
3161 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
3162 zpool_rewind_policy_t policy;
3163
3164 firstopen = B_TRUE;
3165
3166 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
3167 &policy);
3168 if (policy.zrp_request & ZPOOL_DO_REWIND)
3169 state = SPA_LOAD_RECOVER;
3170
3171 spa_activate(spa, spa_mode_global);
3172
3173 if (state != SPA_LOAD_RECOVER)
3174 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3175
3176 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
3177 policy.zrp_request);
3178
3179 if (error == EBADF) {
3180 /*
3181 * If vdev_validate() returns failure (indicated by
3182 * EBADF), it indicates that one of the vdevs indicates
3183 * that the pool has been exported or destroyed. If
3184 * this is the case, the config cache is out of sync and
3185 * we should remove the pool from the namespace.
3186 */
3187 spa_unload(spa);
3188 spa_deactivate(spa);
3189 spa_config_sync(spa, B_TRUE, B_TRUE);
3190 spa_remove(spa);
3191 if (locked)
3192 mutex_exit(&spa_namespace_lock);
3193 return (SET_ERROR(ENOENT));
3194 }
3195
3196 if (error) {
3197 /*
3198 * We can't open the pool, but we still have useful
3199 * information: the state of each vdev after the
3200 * attempted vdev_open(). Return this to the user.
3201 */
3202 if (config != NULL && spa->spa_config) {
3203 VERIFY(nvlist_dup(spa->spa_config, config,
3204 KM_SLEEP) == 0);
3205 VERIFY(nvlist_add_nvlist(*config,
3206 ZPOOL_CONFIG_LOAD_INFO,
3207 spa->spa_load_info) == 0);
3208 }
3209 spa_unload(spa);
3210 spa_deactivate(spa);
3211 spa->spa_last_open_failed = error;
3212 if (locked)
3213 mutex_exit(&spa_namespace_lock);
3214 *spapp = NULL;
3215 return (error);
3216 }
3217 }
3218
3219 spa_open_ref(spa, tag);
3220
3221 if (config != NULL)
3222 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3223
3224 /*
3225 * If we've recovered the pool, pass back any information we
3226 * gathered while doing the load.
3227 */
3228 if (state == SPA_LOAD_RECOVER) {
3229 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3230 spa->spa_load_info) == 0);
3231 }
3232
3233 if (locked) {
3234 spa->spa_last_open_failed = 0;
3235 spa->spa_last_ubsync_txg = 0;
3236 spa->spa_load_txg = 0;
3237 mutex_exit(&spa_namespace_lock);
3238#ifdef __FreeBSD__
3239#ifdef _KERNEL
3240 if (firstopen)
3241 zvol_create_minors(spa->spa_name);
3242#endif
3243#endif
3244 }
3245
3246 *spapp = spa;
3247
3248 return (0);
3249}
3250
3251int
3252spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3253 nvlist_t **config)
3254{
3255 return (spa_open_common(name, spapp, tag, policy, config));
3256}
3257
3258int
3259spa_open(const char *name, spa_t **spapp, void *tag)
3260{
3261 return (spa_open_common(name, spapp, tag, NULL, NULL));
3262}
3263
3264/*
3265 * Lookup the given spa_t, incrementing the inject count in the process,
3266 * preventing it from being exported or destroyed.
3267 */
3268spa_t *
3269spa_inject_addref(char *name)
3270{
3271 spa_t *spa;
3272
3273 mutex_enter(&spa_namespace_lock);
3274 if ((spa = spa_lookup(name)) == NULL) {
3275 mutex_exit(&spa_namespace_lock);
3276 return (NULL);
3277 }
3278 spa->spa_inject_ref++;
3279 mutex_exit(&spa_namespace_lock);
3280
3281 return (spa);
3282}
3283
3284void
3285spa_inject_delref(spa_t *spa)
3286{
3287 mutex_enter(&spa_namespace_lock);
3288 spa->spa_inject_ref--;
3289 mutex_exit(&spa_namespace_lock);
3290}
3291
3292/*
3293 * Add spares device information to the nvlist.
3294 */
3295static void
3296spa_add_spares(spa_t *spa, nvlist_t *config)
3297{
3298 nvlist_t **spares;
3299 uint_t i, nspares;
3300 nvlist_t *nvroot;
3301 uint64_t guid;
3302 vdev_stat_t *vs;
3303 uint_t vsc;
3304 uint64_t pool;
3305
3306 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3307
3308 if (spa->spa_spares.sav_count == 0)
3309 return;
3310
3311 VERIFY(nvlist_lookup_nvlist(config,
3312 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3313 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3314 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3315 if (nspares != 0) {
3316 VERIFY(nvlist_add_nvlist_array(nvroot,
3317 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3318 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3319 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3320
3321 /*
3322 * Go through and find any spares which have since been
3323 * repurposed as an active spare. If this is the case, update
3324 * their status appropriately.
3325 */
3326 for (i = 0; i < nspares; i++) {
3327 VERIFY(nvlist_lookup_uint64(spares[i],
3328 ZPOOL_CONFIG_GUID, &guid) == 0);
3329 if (spa_spare_exists(guid, &pool, NULL) &&
3330 pool != 0ULL) {
3331 VERIFY(nvlist_lookup_uint64_array(
3332 spares[i], ZPOOL_CONFIG_VDEV_STATS,
3333 (uint64_t **)&vs, &vsc) == 0);
3334 vs->vs_state = VDEV_STATE_CANT_OPEN;
3335 vs->vs_aux = VDEV_AUX_SPARED;
3336 }
3337 }
3338 }
3339}
3340
3341/*
3342 * Add l2cache device information to the nvlist, including vdev stats.
3343 */
3344static void
3345spa_add_l2cache(spa_t *spa, nvlist_t *config)
3346{
3347 nvlist_t **l2cache;
3348 uint_t i, j, nl2cache;
3349 nvlist_t *nvroot;
3350 uint64_t guid;
3351 vdev_t *vd;
3352 vdev_stat_t *vs;
3353 uint_t vsc;
3354
3355 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3356
3357 if (spa->spa_l2cache.sav_count == 0)
3358 return;
3359
3360 VERIFY(nvlist_lookup_nvlist(config,
3361 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3362 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3363 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3364 if (nl2cache != 0) {
3365 VERIFY(nvlist_add_nvlist_array(nvroot,
3366 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3367 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3368 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3369
3370 /*
3371 * Update level 2 cache device stats.
3372 */
3373
3374 for (i = 0; i < nl2cache; i++) {
3375 VERIFY(nvlist_lookup_uint64(l2cache[i],
3376 ZPOOL_CONFIG_GUID, &guid) == 0);
3377
3378 vd = NULL;
3379 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3380 if (guid ==
3381 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3382 vd = spa->spa_l2cache.sav_vdevs[j];
3383 break;
3384 }
3385 }
3386 ASSERT(vd != NULL);
3387
3388 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3389 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3390 == 0);
3391 vdev_get_stats(vd, vs);
3392 }
3393 }
3394}
3395
3396static void
3397spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3398{
3399 nvlist_t *features;
3400 zap_cursor_t zc;
3401 zap_attribute_t za;
3402
3403 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3404 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3405
3406 /* We may be unable to read features if pool is suspended. */
3407 if (spa_suspended(spa))
3408 goto out;
3409
3410 if (spa->spa_feat_for_read_obj != 0) {
3411 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3412 spa->spa_feat_for_read_obj);
3413 zap_cursor_retrieve(&zc, &za) == 0;
3414 zap_cursor_advance(&zc)) {
3415 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3416 za.za_num_integers == 1);
3417 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3418 za.za_first_integer));
3419 }
3420 zap_cursor_fini(&zc);
3421 }
3422
3423 if (spa->spa_feat_for_write_obj != 0) {
3424 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3425 spa->spa_feat_for_write_obj);
3426 zap_cursor_retrieve(&zc, &za) == 0;
3427 zap_cursor_advance(&zc)) {
3428 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3429 za.za_num_integers == 1);
3430 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3431 za.za_first_integer));
3432 }
3433 zap_cursor_fini(&zc);
3434 }
3435
3436out:
3437 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3438 features) == 0);
3439 nvlist_free(features);
3440}
3441
3442int
3443spa_get_stats(const char *name, nvlist_t **config,
3444 char *altroot, size_t buflen)
3445{
3446 int error;
3447 spa_t *spa;
3448
3449 *config = NULL;
3450 error = spa_open_common(name, &spa, FTAG, NULL, config);
3451
3452 if (spa != NULL) {
3453 /*
3454 * This still leaves a window of inconsistency where the spares
3455 * or l2cache devices could change and the config would be
3456 * self-inconsistent.
3457 */
3458 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3459
3460 if (*config != NULL) {
3461 uint64_t loadtimes[2];
3462
3463 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3464 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3465 VERIFY(nvlist_add_uint64_array(*config,
3466 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3467
3468 VERIFY(nvlist_add_uint64(*config,
3469 ZPOOL_CONFIG_ERRCOUNT,
3470 spa_get_errlog_size(spa)) == 0);
3471
3472 if (spa_suspended(spa))
3473 VERIFY(nvlist_add_uint64(*config,
3474 ZPOOL_CONFIG_SUSPENDED,
3475 spa->spa_failmode) == 0);
3476
3477 spa_add_spares(spa, *config);
3478 spa_add_l2cache(spa, *config);
3479 spa_add_feature_stats(spa, *config);
3480 }
3481 }
3482
3483 /*
3484 * We want to get the alternate root even for faulted pools, so we cheat
3485 * and call spa_lookup() directly.
3486 */
3487 if (altroot) {
3488 if (spa == NULL) {
3489 mutex_enter(&spa_namespace_lock);
3490 spa = spa_lookup(name);
3491 if (spa)
3492 spa_altroot(spa, altroot, buflen);
3493 else
3494 altroot[0] = '\0';
3495 spa = NULL;
3496 mutex_exit(&spa_namespace_lock);
3497 } else {
3498 spa_altroot(spa, altroot, buflen);
3499 }
3500 }
3501
3502 if (spa != NULL) {
3503 spa_config_exit(spa, SCL_CONFIG, FTAG);
3504 spa_close(spa, FTAG);
3505 }
3506
3507 return (error);
3508}
3509
3510/*
3511 * Validate that the auxiliary device array is well formed. We must have an
3512 * array of nvlists, each which describes a valid leaf vdev. If this is an
3513 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3514 * specified, as long as they are well-formed.
3515 */
3516static int
3517spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3518 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3519 vdev_labeltype_t label)
3520{
3521 nvlist_t **dev;
3522 uint_t i, ndev;
3523 vdev_t *vd;
3524 int error;
3525
3526 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3527
3528 /*
3529 * It's acceptable to have no devs specified.
3530 */
3531 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3532 return (0);
3533
3534 if (ndev == 0)
3535 return (SET_ERROR(EINVAL));
3536
3537 /*
3538 * Make sure the pool is formatted with a version that supports this
3539 * device type.
3540 */
3541 if (spa_version(spa) < version)
3542 return (SET_ERROR(ENOTSUP));
3543
3544 /*
3545 * Set the pending device list so we correctly handle device in-use
3546 * checking.
3547 */
3548 sav->sav_pending = dev;
3549 sav->sav_npending = ndev;
3550
3551 for (i = 0; i < ndev; i++) {
3552 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3553 mode)) != 0)
3554 goto out;
3555
3556 if (!vd->vdev_ops->vdev_op_leaf) {
3557 vdev_free(vd);
3558 error = SET_ERROR(EINVAL);
3559 goto out;
3560 }
3561
3562 /*
3563 * The L2ARC currently only supports disk devices in
3564 * kernel context. For user-level testing, we allow it.
3565 */
3566#ifdef _KERNEL
3567 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3568 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3569 error = SET_ERROR(ENOTBLK);
3570 vdev_free(vd);
3571 goto out;
3572 }
3573#endif
3574 vd->vdev_top = vd;
3575
3576 if ((error = vdev_open(vd)) == 0 &&
3577 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3578 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3579 vd->vdev_guid) == 0);
3580 }
3581
3582 vdev_free(vd);
3583
3584 if (error &&
3585 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3586 goto out;
3587 else
3588 error = 0;
3589 }
3590
3591out:
3592 sav->sav_pending = NULL;
3593 sav->sav_npending = 0;
3594 return (error);
3595}
3596
3597static int
3598spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3599{
3600 int error;
3601
3602 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3603
3604 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3605 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3606 VDEV_LABEL_SPARE)) != 0) {
3607 return (error);
3608 }
3609
3610 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3611 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3612 VDEV_LABEL_L2CACHE));
3613}
3614
3615static void
3616spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3617 const char *config)
3618{
3619 int i;
3620
3621 if (sav->sav_config != NULL) {
3622 nvlist_t **olddevs;
3623 uint_t oldndevs;
3624 nvlist_t **newdevs;
3625
3626 /*
3627 * Generate new dev list by concatentating with the
3628 * current dev list.
3629 */
3630 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3631 &olddevs, &oldndevs) == 0);
3632
3633 newdevs = kmem_alloc(sizeof (void *) *
3634 (ndevs + oldndevs), KM_SLEEP);
3635 for (i = 0; i < oldndevs; i++)
3636 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3637 KM_SLEEP) == 0);
3638 for (i = 0; i < ndevs; i++)
3639 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3640 KM_SLEEP) == 0);
3641
3642 VERIFY(nvlist_remove(sav->sav_config, config,
3643 DATA_TYPE_NVLIST_ARRAY) == 0);
3644
3645 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3646 config, newdevs, ndevs + oldndevs) == 0);
3647 for (i = 0; i < oldndevs + ndevs; i++)
3648 nvlist_free(newdevs[i]);
3649 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3650 } else {
3651 /*
3652 * Generate a new dev list.
3653 */
3654 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3655 KM_SLEEP) == 0);
3656 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3657 devs, ndevs) == 0);
3658 }
3659}
3660
3661/*
3662 * Stop and drop level 2 ARC devices
3663 */
3664void
3665spa_l2cache_drop(spa_t *spa)
3666{
3667 vdev_t *vd;
3668 int i;
3669 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3670
3671 for (i = 0; i < sav->sav_count; i++) {
3672 uint64_t pool;
3673
3674 vd = sav->sav_vdevs[i];
3675 ASSERT(vd != NULL);
3676
3677 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3678 pool != 0ULL && l2arc_vdev_present(vd))
3679 l2arc_remove_vdev(vd);
3680 }
3681}
3682
3683/*
3684 * Pool Creation
3685 */
3686int
3687spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3688 nvlist_t *zplprops)
3689{
3690 spa_t *spa;
3691 char *altroot = NULL;
3692 vdev_t *rvd;
3693 dsl_pool_t *dp;
3694 dmu_tx_t *tx;
3695 int error = 0;
3696 uint64_t txg = TXG_INITIAL;
3697 nvlist_t **spares, **l2cache;
3698 uint_t nspares, nl2cache;
3699 uint64_t version, obj;
3700 boolean_t has_features;
3701
3702 /*
3703 * If this pool already exists, return failure.
3704 */
3705 mutex_enter(&spa_namespace_lock);
3706 if (spa_lookup(pool) != NULL) {
3707 mutex_exit(&spa_namespace_lock);
3708 return (SET_ERROR(EEXIST));
3709 }
3710
3711 /*
3712 * Allocate a new spa_t structure.
3713 */
3714 (void) nvlist_lookup_string(props,
3715 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3716 spa = spa_add(pool, NULL, altroot);
3717 spa_activate(spa, spa_mode_global);
3718
3719 if (props && (error = spa_prop_validate(spa, props))) {
3720 spa_deactivate(spa);
3721 spa_remove(spa);
3722 mutex_exit(&spa_namespace_lock);
3723 return (error);
3724 }
3725
3726 has_features = B_FALSE;
3727 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3728 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3729 if (zpool_prop_feature(nvpair_name(elem)))
3730 has_features = B_TRUE;
3731 }
3732
3733 if (has_features || nvlist_lookup_uint64(props,
3734 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3735 version = SPA_VERSION;
3736 }
3737 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3738
3739 spa->spa_first_txg = txg;
3740 spa->spa_uberblock.ub_txg = txg - 1;
3741 spa->spa_uberblock.ub_version = version;
3742 spa->spa_ubsync = spa->spa_uberblock;
3743 spa->spa_load_state = SPA_LOAD_CREATE;
3744
3745 /*
3746 * Create "The Godfather" zio to hold all async IOs
3747 */
3748 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3749 KM_SLEEP);
3750 for (int i = 0; i < max_ncpus; i++) {
3751 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3752 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3753 ZIO_FLAG_GODFATHER);
3754 }
3755
3756 /*
3757 * Create the root vdev.
3758 */
3759 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3760
3761 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3762
3763 ASSERT(error != 0 || rvd != NULL);
3764 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3765
3766 if (error == 0 && !zfs_allocatable_devs(nvroot))
3767 error = SET_ERROR(EINVAL);
3768
3769 if (error == 0 &&
3770 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3771 (error = spa_validate_aux(spa, nvroot, txg,
3772 VDEV_ALLOC_ADD)) == 0) {
3773 for (int c = 0; c < rvd->vdev_children; c++) {
3774 vdev_ashift_optimize(rvd->vdev_child[c]);
3775 vdev_metaslab_set_size(rvd->vdev_child[c]);
3776 vdev_expand(rvd->vdev_child[c], txg);
3777 }
3778 }
3779
3780 spa_config_exit(spa, SCL_ALL, FTAG);
3781
3782 if (error != 0) {
3783 spa_unload(spa);
3784 spa_deactivate(spa);
3785 spa_remove(spa);
3786 mutex_exit(&spa_namespace_lock);
3787 return (error);
3788 }
3789
3790 /*
3791 * Get the list of spares, if specified.
3792 */
3793 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3794 &spares, &nspares) == 0) {
3795 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3796 KM_SLEEP) == 0);
3797 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3798 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3799 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3800 spa_load_spares(spa);
3801 spa_config_exit(spa, SCL_ALL, FTAG);
3802 spa->spa_spares.sav_sync = B_TRUE;
3803 }
3804
3805 /*
3806 * Get the list of level 2 cache devices, if specified.
3807 */
3808 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3809 &l2cache, &nl2cache) == 0) {
3810 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3811 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3812 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3813 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3814 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3815 spa_load_l2cache(spa);
3816 spa_config_exit(spa, SCL_ALL, FTAG);
3817 spa->spa_l2cache.sav_sync = B_TRUE;
3818 }
3819
3820 spa->spa_is_initializing = B_TRUE;
3821 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3822 spa->spa_meta_objset = dp->dp_meta_objset;
3823 spa->spa_is_initializing = B_FALSE;
3824
3825 /*
3826 * Create DDTs (dedup tables).
3827 */
3828 ddt_create(spa);
3829
3830 spa_update_dspace(spa);
3831
3832 tx = dmu_tx_create_assigned(dp, txg);
3833
3834 /*
3835 * Create the pool config object.
3836 */
3837 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3838 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3839 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3840
3841 if (zap_add(spa->spa_meta_objset,
3842 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3843 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3844 cmn_err(CE_PANIC, "failed to add pool config");
3845 }
3846
3847 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3848 spa_feature_create_zap_objects(spa, tx);
3849
3850 if (zap_add(spa->spa_meta_objset,
3851 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3852 sizeof (uint64_t), 1, &version, tx) != 0) {
3853 cmn_err(CE_PANIC, "failed to add pool version");
3854 }
3855
3856 /* Newly created pools with the right version are always deflated. */
3857 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3858 spa->spa_deflate = TRUE;
3859 if (zap_add(spa->spa_meta_objset,
3860 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3861 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3862 cmn_err(CE_PANIC, "failed to add deflate");
3863 }
3864 }
3865
3866 /*
3867 * Create the deferred-free bpobj. Turn off compression
3868 * because sync-to-convergence takes longer if the blocksize
3869 * keeps changing.
3870 */
3871 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3872 dmu_object_set_compress(spa->spa_meta_objset, obj,
3873 ZIO_COMPRESS_OFF, tx);
3874 if (zap_add(spa->spa_meta_objset,
3875 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3876 sizeof (uint64_t), 1, &obj, tx) != 0) {
3877 cmn_err(CE_PANIC, "failed to add bpobj");
3878 }
3879 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3880 spa->spa_meta_objset, obj));
3881
3882 /*
3883 * Create the pool's history object.
3884 */
3885 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3886 spa_history_create_obj(spa, tx);
3887
3888 /*
3889 * Generate some random noise for salted checksums to operate on.
3890 */
3891 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3892 sizeof (spa->spa_cksum_salt.zcs_bytes));
3893
3894 /*
3895 * Set pool properties.
3896 */
3897 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3898 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3899 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3900 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3901
3902 if (props != NULL) {
3903 spa_configfile_set(spa, props, B_FALSE);
3904 spa_sync_props(props, tx);
3905 }
3906
3907 dmu_tx_commit(tx);
3908
3909 spa->spa_sync_on = B_TRUE;
3910 txg_sync_start(spa->spa_dsl_pool);
3911
3912 /*
3913 * We explicitly wait for the first transaction to complete so that our
3914 * bean counters are appropriately updated.
3915 */
3916 txg_wait_synced(spa->spa_dsl_pool, txg);
3917
3918 spa_config_sync(spa, B_FALSE, B_TRUE);
3919 spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE);
3920
3921 spa_history_log_version(spa, "create");
3922
3923 /*
3924 * Don't count references from objsets that are already closed
3925 * and are making their way through the eviction process.
3926 */
3927 spa_evicting_os_wait(spa);
3928 spa->spa_minref = refcount_count(&spa->spa_refcount);
3929 spa->spa_load_state = SPA_LOAD_NONE;
3930
3931 mutex_exit(&spa_namespace_lock);
3932
3933 return (0);
3934}
3935
3936#ifdef _KERNEL
3937#ifdef illumos
3938/*
3939 * Get the root pool information from the root disk, then import the root pool
3940 * during the system boot up time.
3941 */
3942extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3943
3944static nvlist_t *
3945spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3946{
3947 nvlist_t *config;
3948 nvlist_t *nvtop, *nvroot;
3949 uint64_t pgid;
3950
3951 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3952 return (NULL);
3953
3954 /*
3955 * Add this top-level vdev to the child array.
3956 */
3957 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3958 &nvtop) == 0);
3959 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3960 &pgid) == 0);
3961 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3962
3963 /*
3964 * Put this pool's top-level vdevs into a root vdev.
3965 */
3966 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3967 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3968 VDEV_TYPE_ROOT) == 0);
3969 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3970 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3971 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3972 &nvtop, 1) == 0);
3973
3974 /*
3975 * Replace the existing vdev_tree with the new root vdev in
3976 * this pool's configuration (remove the old, add the new).
3977 */
3978 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3979 nvlist_free(nvroot);
3980 return (config);
3981}
3982
3983/*
3984 * Walk the vdev tree and see if we can find a device with "better"
3985 * configuration. A configuration is "better" if the label on that
3986 * device has a more recent txg.
3987 */
3988static void
3989spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3990{
3991 for (int c = 0; c < vd->vdev_children; c++)
3992 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3993
3994 if (vd->vdev_ops->vdev_op_leaf) {
3995 nvlist_t *label;
3996 uint64_t label_txg;
3997
3998 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3999 &label) != 0)
4000 return;
4001
4002 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
4003 &label_txg) == 0);
4004
4005 /*
4006 * Do we have a better boot device?
4007 */
4008 if (label_txg > *txg) {
4009 *txg = label_txg;
4010 *avd = vd;
4011 }
4012 nvlist_free(label);
4013 }
4014}
4015
4016/*
4017 * Import a root pool.
4018 *
4019 * For x86. devpath_list will consist of devid and/or physpath name of
4020 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
4021 * The GRUB "findroot" command will return the vdev we should boot.
4022 *
4023 * For Sparc, devpath_list consists the physpath name of the booting device
4024 * no matter the rootpool is a single device pool or a mirrored pool.
4025 * e.g.
4026 * "/pci@1f,0/ide@d/disk@0,0:a"
4027 */
4028int
4029spa_import_rootpool(char *devpath, char *devid)
4030{
4031 spa_t *spa;
4032 vdev_t *rvd, *bvd, *avd = NULL;
4033 nvlist_t *config, *nvtop;
4034 uint64_t guid, txg;
4035 char *pname;
4036 int error;
4037
4038 /*
4039 * Read the label from the boot device and generate a configuration.
4040 */
4041 config = spa_generate_rootconf(devpath, devid, &guid);
4042#if defined(_OBP) && defined(_KERNEL)
4043 if (config == NULL) {
4044 if (strstr(devpath, "/iscsi/ssd") != NULL) {
4045 /* iscsi boot */
4046 get_iscsi_bootpath_phy(devpath);
4047 config = spa_generate_rootconf(devpath, devid, &guid);
4048 }
4049 }
4050#endif
4051 if (config == NULL) {
4052 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
4053 devpath);
4054 return (SET_ERROR(EIO));
4055 }
4056
4057 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4058 &pname) == 0);
4059 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
4060
4061 mutex_enter(&spa_namespace_lock);
4062 if ((spa = spa_lookup(pname)) != NULL) {
4063 /*
4064 * Remove the existing root pool from the namespace so that we
4065 * can replace it with the correct config we just read in.
4066 */
4067 spa_remove(spa);
4068 }
4069
4070 spa = spa_add(pname, config, NULL);
4071 spa->spa_is_root = B_TRUE;
4072 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4073
4074 /*
4075 * Build up a vdev tree based on the boot device's label config.
4076 */
4077 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4078 &nvtop) == 0);
4079 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4080 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4081 VDEV_ALLOC_ROOTPOOL);
4082 spa_config_exit(spa, SCL_ALL, FTAG);
4083 if (error) {
4084 mutex_exit(&spa_namespace_lock);
4085 nvlist_free(config);
4086 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4087 pname);
4088 return (error);
4089 }
4090
4091 /*
4092 * Get the boot vdev.
4093 */
4094 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
4095 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
4096 (u_longlong_t)guid);
4097 error = SET_ERROR(ENOENT);
4098 goto out;
4099 }
4100
4101 /*
4102 * Determine if there is a better boot device.
4103 */
4104 avd = bvd;
4105 spa_alt_rootvdev(rvd, &avd, &txg);
4106 if (avd != bvd) {
4107 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
4108 "try booting from '%s'", avd->vdev_path);
4109 error = SET_ERROR(EINVAL);
4110 goto out;
4111 }
4112
4113 /*
4114 * If the boot device is part of a spare vdev then ensure that
4115 * we're booting off the active spare.
4116 */
4117 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4118 !bvd->vdev_isspare) {
4119 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
4120 "try booting from '%s'",
4121 bvd->vdev_parent->
4122 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
4123 error = SET_ERROR(EINVAL);
4124 goto out;
4125 }
4126
4127 error = 0;
4128out:
4129 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4130 vdev_free(rvd);
4131 spa_config_exit(spa, SCL_ALL, FTAG);
4132 mutex_exit(&spa_namespace_lock);
4133
4134 nvlist_free(config);
4135 return (error);
4136}
4137
4138#else /* !illumos */
4139
4140extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs,
4141 uint64_t *count);
4142
4143static nvlist_t *
4144spa_generate_rootconf(const char *name)
4145{
4146 nvlist_t **configs, **tops;
4147 nvlist_t *config;
4148 nvlist_t *best_cfg, *nvtop, *nvroot;
4149 uint64_t *holes;
4150 uint64_t best_txg;
4151 uint64_t nchildren;
4152 uint64_t pgid;
4153 uint64_t count;
4154 uint64_t i;
4155 uint_t nholes;
4156
4157 if (vdev_geom_read_pool_label(name, &configs, &count) != 0)
4158 return (NULL);
4159
4160 ASSERT3U(count, !=, 0);
4161 best_txg = 0;
4162 for (i = 0; i < count; i++) {
4163 uint64_t txg;
4164
4165 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG,
4166 &txg) == 0);
4167 if (txg > best_txg) {
4168 best_txg = txg;
4169 best_cfg = configs[i];
4170 }
4171 }
4172
4173 nchildren = 1;
4174 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren);
4175 holes = NULL;
4176 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY,
4177 &holes, &nholes);
4178
4179 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP);
4180 for (i = 0; i < nchildren; i++) {
4181 if (i >= count)
4182 break;
4183 if (configs[i] == NULL)
4184 continue;
4185 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE,
4186 &nvtop) == 0);
4187 nvlist_dup(nvtop, &tops[i], KM_SLEEP);
4188 }
4189 for (i = 0; holes != NULL && i < nholes; i++) {
4190 if (i >= nchildren)
4191 continue;
4192 if (tops[holes[i]] != NULL)
4193 continue;
4194 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP);
4195 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE,
4196 VDEV_TYPE_HOLE) == 0);
4197 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID,
4198 holes[i]) == 0);
4199 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID,
4200 0) == 0);
4201 }
4202 for (i = 0; i < nchildren; i++) {
4203 if (tops[i] != NULL)
4204 continue;
4205 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP);
4206 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE,
4207 VDEV_TYPE_MISSING) == 0);
4208 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID,
4209 i) == 0);
4210 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID,
4211 0) == 0);
4212 }
4213
4214 /*
4215 * Create pool config based on the best vdev config.
4216 */
4217 nvlist_dup(best_cfg, &config, KM_SLEEP);
4218
4219 /*
4220 * Put this pool's top-level vdevs into a root vdev.
4221 */
4222 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4223 &pgid) == 0);
4224 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4225 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
4226 VDEV_TYPE_ROOT) == 0);
4227 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
4228 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
4229 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4230 tops, nchildren) == 0);
4231
4232 /*
4233 * Replace the existing vdev_tree with the new root vdev in
4234 * this pool's configuration (remove the old, add the new).
4235 */
4236 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
4237
4238 /*
4239 * Drop vdev config elements that should not be present at pool level.
4240 */
4241 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64);
4242 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64);
4243
4244 for (i = 0; i < count; i++)
4245 nvlist_free(configs[i]);
4246 kmem_free(configs, count * sizeof(void *));
4247 for (i = 0; i < nchildren; i++)
4248 nvlist_free(tops[i]);
4249 kmem_free(tops, nchildren * sizeof(void *));
4250 nvlist_free(nvroot);
4251 return (config);
4252}
4253
4254int
4255spa_import_rootpool(const char *name)
4256{
4257 spa_t *spa;
4258 vdev_t *rvd, *bvd, *avd = NULL;
4259 nvlist_t *config, *nvtop;
4260 uint64_t txg;
4261 char *pname;
4262 int error;
4263
4264 /*
4265 * Read the label from the boot device and generate a configuration.
4266 */
4267 config = spa_generate_rootconf(name);
4268
4269 mutex_enter(&spa_namespace_lock);
4270 if (config != NULL) {
4271 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4272 &pname) == 0 && strcmp(name, pname) == 0);
4273 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg)
4274 == 0);
4275
4276 if ((spa = spa_lookup(pname)) != NULL) {
4277 /*
4278 * Remove the existing root pool from the namespace so
4279 * that we can replace it with the correct config
4280 * we just read in.
4281 */
4282 spa_remove(spa);
4283 }
4284 spa = spa_add(pname, config, NULL);
4285
4286 /*
4287 * Set spa_ubsync.ub_version as it can be used in vdev_alloc()
4288 * via spa_version().
4289 */
4290 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
4291 &spa->spa_ubsync.ub_version) != 0)
4292 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
4293 } else if ((spa = spa_lookup(name)) == NULL) {
4294 mutex_exit(&spa_namespace_lock);
4295 nvlist_free(config);
4296 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
4297 name);
4298 return (EIO);
4299 } else {
4300 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0);
4301 }
4302 spa->spa_is_root = B_TRUE;
4303 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4304
4305 /*
4306 * Build up a vdev tree based on the boot device's label config.
4307 */
4308 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4309 &nvtop) == 0);
4310 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4311 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4312 VDEV_ALLOC_ROOTPOOL);
4313 spa_config_exit(spa, SCL_ALL, FTAG);
4314 if (error) {
4315 mutex_exit(&spa_namespace_lock);
4316 nvlist_free(config);
4317 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4318 pname);
4319 return (error);
4320 }
4321
4322 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4323 vdev_free(rvd);
4324 spa_config_exit(spa, SCL_ALL, FTAG);
4325 mutex_exit(&spa_namespace_lock);
4326
4327 nvlist_free(config);
4328 return (0);
4329}
4330
4331#endif /* illumos */
4332#endif /* _KERNEL */
4333
4334/*
4335 * Import a non-root pool into the system.
4336 */
4337int
4338spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
4339{
4340 spa_t *spa;
4341 char *altroot = NULL;
4342 spa_load_state_t state = SPA_LOAD_IMPORT;
4343 zpool_rewind_policy_t policy;
4344 uint64_t mode = spa_mode_global;
4345 uint64_t readonly = B_FALSE;
4346 int error;
4347 nvlist_t *nvroot;
4348 nvlist_t **spares, **l2cache;
4349 uint_t nspares, nl2cache;
4350
4351 /*
4352 * If a pool with this name exists, return failure.
4353 */
4354 mutex_enter(&spa_namespace_lock);
4355 if (spa_lookup(pool) != NULL) {
4356 mutex_exit(&spa_namespace_lock);
4357 return (SET_ERROR(EEXIST));
4358 }
4359
4360 /*
4361 * Create and initialize the spa structure.
4362 */
4363 (void) nvlist_lookup_string(props,
4364 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4365 (void) nvlist_lookup_uint64(props,
4366 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4367 if (readonly)
4368 mode = FREAD;
4369 spa = spa_add(pool, config, altroot);
4370 spa->spa_import_flags = flags;
4371
4372 /*
4373 * Verbatim import - Take a pool and insert it into the namespace
4374 * as if it had been loaded at boot.
4375 */
4376 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
4377 if (props != NULL)
4378 spa_configfile_set(spa, props, B_FALSE);
4379
4380 spa_config_sync(spa, B_FALSE, B_TRUE);
4381 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4382
4383 mutex_exit(&spa_namespace_lock);
4384 return (0);
4385 }
4386
4387 spa_activate(spa, mode);
4388
4389 /*
4390 * Don't start async tasks until we know everything is healthy.
4391 */
4392 spa_async_suspend(spa);
4393
4394 zpool_get_rewind_policy(config, &policy);
4395 if (policy.zrp_request & ZPOOL_DO_REWIND)
4396 state = SPA_LOAD_RECOVER;
4397
4398 /*
4399 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
4400 * because the user-supplied config is actually the one to trust when
4401 * doing an import.
4402 */
4403 if (state != SPA_LOAD_RECOVER)
4404 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4405
4406 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
4407 policy.zrp_request);
4408
4409 /*
4410 * Propagate anything learned while loading the pool and pass it
4411 * back to caller (i.e. rewind info, missing devices, etc).
4412 */
4413 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4414 spa->spa_load_info) == 0);
4415
4416 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4417 /*
4418 * Toss any existing sparelist, as it doesn't have any validity
4419 * anymore, and conflicts with spa_has_spare().
4420 */
4421 if (spa->spa_spares.sav_config) {
4422 nvlist_free(spa->spa_spares.sav_config);
4423 spa->spa_spares.sav_config = NULL;
4424 spa_load_spares(spa);
4425 }
4426 if (spa->spa_l2cache.sav_config) {
4427 nvlist_free(spa->spa_l2cache.sav_config);
4428 spa->spa_l2cache.sav_config = NULL;
4429 spa_load_l2cache(spa);
4430 }
4431
4432 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4433 &nvroot) == 0);
4434 if (error == 0)
4435 error = spa_validate_aux(spa, nvroot, -1ULL,
4436 VDEV_ALLOC_SPARE);
4437 if (error == 0)
4438 error = spa_validate_aux(spa, nvroot, -1ULL,
4439 VDEV_ALLOC_L2CACHE);
4440 spa_config_exit(spa, SCL_ALL, FTAG);
4441
4442 if (props != NULL)
4443 spa_configfile_set(spa, props, B_FALSE);
4444
4445 if (error != 0 || (props && spa_writeable(spa) &&
4446 (error = spa_prop_set(spa, props)))) {
4447 spa_unload(spa);
4448 spa_deactivate(spa);
4449 spa_remove(spa);
4450 mutex_exit(&spa_namespace_lock);
4451 return (error);
4452 }
4453
4454 spa_async_resume(spa);
4455
4456 /*
4457 * Override any spares and level 2 cache devices as specified by
4458 * the user, as these may have correct device names/devids, etc.
4459 */
4460 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4461 &spares, &nspares) == 0) {
4462 if (spa->spa_spares.sav_config)
4463 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4464 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4465 else
4466 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
4467 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4468 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4469 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4470 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4471 spa_load_spares(spa);
4472 spa_config_exit(spa, SCL_ALL, FTAG);
4473 spa->spa_spares.sav_sync = B_TRUE;
4474 }
4475 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4476 &l2cache, &nl2cache) == 0) {
4477 if (spa->spa_l2cache.sav_config)
4478 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4479 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4480 else
4481 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4482 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4483 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4484 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4485 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4486 spa_load_l2cache(spa);
4487 spa_config_exit(spa, SCL_ALL, FTAG);
4488 spa->spa_l2cache.sav_sync = B_TRUE;
4489 }
4490
4491 /*
4492 * Check for any removed devices.
4493 */
4494 if (spa->spa_autoreplace) {
4495 spa_aux_check_removed(&spa->spa_spares);
4496 spa_aux_check_removed(&spa->spa_l2cache);
4497 }
4498
4499 if (spa_writeable(spa)) {
4500 /*
4501 * Update the config cache to include the newly-imported pool.
4502 */
4503 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4504 }
4505
4506 /*
4507 * It's possible that the pool was expanded while it was exported.
4508 * We kick off an async task to handle this for us.
4509 */
4510 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4511
4512 spa_history_log_version(spa, "import");
4513
4514 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
4515
4516 mutex_exit(&spa_namespace_lock);
4517
4518#ifdef __FreeBSD__
4519#ifdef _KERNEL
4520 zvol_create_minors(pool);
4521#endif
4522#endif
4523 return (0);
4524}
4525
4526nvlist_t *
4527spa_tryimport(nvlist_t *tryconfig)
4528{
4529 nvlist_t *config = NULL;
4530 char *poolname;
4531 spa_t *spa;
4532 uint64_t state;
4533 int error;
4534
4535 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4536 return (NULL);
4537
4538 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4539 return (NULL);
4540
4541 /*
4542 * Create and initialize the spa structure.
4543 */
4544 mutex_enter(&spa_namespace_lock);
4545 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
4546 spa_activate(spa, FREAD);
4547
4548 /*
4549 * Pass off the heavy lifting to spa_load().
4550 * Pass TRUE for mosconfig because the user-supplied config
4551 * is actually the one to trust when doing an import.
4552 */
4553 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
4554
4555 /*
4556 * If 'tryconfig' was at least parsable, return the current config.
4557 */
4558 if (spa->spa_root_vdev != NULL) {
4559 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4560 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4561 poolname) == 0);
4562 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4563 state) == 0);
4564 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4565 spa->spa_uberblock.ub_timestamp) == 0);
4566 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4567 spa->spa_load_info) == 0);
4568
4569 /*
4570 * If the bootfs property exists on this pool then we
4571 * copy it out so that external consumers can tell which
4572 * pools are bootable.
4573 */
4574 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4575 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4576
4577 /*
4578 * We have to play games with the name since the
4579 * pool was opened as TRYIMPORT_NAME.
4580 */
4581 if (dsl_dsobj_to_dsname(spa_name(spa),
4582 spa->spa_bootfs, tmpname) == 0) {
4583 char *cp;
4584 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4585
4586 cp = strchr(tmpname, '/');
4587 if (cp == NULL) {
4588 (void) strlcpy(dsname, tmpname,
4589 MAXPATHLEN);
4590 } else {
4591 (void) snprintf(dsname, MAXPATHLEN,
4592 "%s/%s", poolname, ++cp);
4593 }
4594 VERIFY(nvlist_add_string(config,
4595 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4596 kmem_free(dsname, MAXPATHLEN);
4597 }
4598 kmem_free(tmpname, MAXPATHLEN);
4599 }
4600
4601 /*
4602 * Add the list of hot spares and level 2 cache devices.
4603 */
4604 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4605 spa_add_spares(spa, config);
4606 spa_add_l2cache(spa, config);
4607 spa_config_exit(spa, SCL_CONFIG, FTAG);
4608 }
4609
4610 spa_unload(spa);
4611 spa_deactivate(spa);
4612 spa_remove(spa);
4613 mutex_exit(&spa_namespace_lock);
4614
4615 return (config);
4616}
4617
4618/*
4619 * Pool export/destroy
4620 *
4621 * The act of destroying or exporting a pool is very simple. We make sure there
4622 * is no more pending I/O and any references to the pool are gone. Then, we
4623 * update the pool state and sync all the labels to disk, removing the
4624 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4625 * we don't sync the labels or remove the configuration cache.
4626 */
4627static int
4628spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
4629 boolean_t force, boolean_t hardforce)
4630{
4631 spa_t *spa;
4632
4633 if (oldconfig)
4634 *oldconfig = NULL;
4635
4636 if (!(spa_mode_global & FWRITE))
4637 return (SET_ERROR(EROFS));
4638
4639 mutex_enter(&spa_namespace_lock);
4640 if ((spa = spa_lookup(pool)) == NULL) {
4641 mutex_exit(&spa_namespace_lock);
4642 return (SET_ERROR(ENOENT));
4643 }
4644
4645 /*
4646 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4647 * reacquire the namespace lock, and see if we can export.
4648 */
4649 spa_open_ref(spa, FTAG);
4650 mutex_exit(&spa_namespace_lock);
4651 spa_async_suspend(spa);
4652 mutex_enter(&spa_namespace_lock);
4653 spa_close(spa, FTAG);
4654
4655 /*
4656 * The pool will be in core if it's openable,
4657 * in which case we can modify its state.
4658 */
4659 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4660 /*
4661 * Objsets may be open only because they're dirty, so we
4662 * have to force it to sync before checking spa_refcnt.
4663 */
4664 txg_wait_synced(spa->spa_dsl_pool, 0);
4665 spa_evicting_os_wait(spa);
4666
4667 /*
4668 * A pool cannot be exported or destroyed if there are active
4669 * references. If we are resetting a pool, allow references by
4670 * fault injection handlers.
4671 */
4672 if (!spa_refcount_zero(spa) ||
4673 (spa->spa_inject_ref != 0 &&
4674 new_state != POOL_STATE_UNINITIALIZED)) {
4675 spa_async_resume(spa);
4676 mutex_exit(&spa_namespace_lock);
4677 return (SET_ERROR(EBUSY));
4678 }
4679
4680 /*
4681 * A pool cannot be exported if it has an active shared spare.
4682 * This is to prevent other pools stealing the active spare
4683 * from an exported pool. At user's own will, such pool can
4684 * be forcedly exported.
4685 */
4686 if (!force && new_state == POOL_STATE_EXPORTED &&
4687 spa_has_active_shared_spare(spa)) {
4688 spa_async_resume(spa);
4689 mutex_exit(&spa_namespace_lock);
4690 return (SET_ERROR(EXDEV));
4691 }
4692
4693 /*
4694 * We want this to be reflected on every label,
4695 * so mark them all dirty. spa_unload() will do the
4696 * final sync that pushes these changes out.
4697 */
4698 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4699 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4700 spa->spa_state = new_state;
4701 spa->spa_final_txg = spa_last_synced_txg(spa) +
4702 TXG_DEFER_SIZE + 1;
4703 vdev_config_dirty(spa->spa_root_vdev);
4704 spa_config_exit(spa, SCL_ALL, FTAG);
4705 }
4706 }
4707
4708 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4709
4710 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4711 spa_unload(spa);
4712 spa_deactivate(spa);
4713 }
4714
4715 if (oldconfig && spa->spa_config)
4716 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4717
4718 if (new_state != POOL_STATE_UNINITIALIZED) {
4719 if (!hardforce)
4720 spa_config_sync(spa, B_TRUE, B_TRUE);
4721 spa_remove(spa);
4722 }
4723 mutex_exit(&spa_namespace_lock);
4724
4725 return (0);
4726}
4727
4728/*
4729 * Destroy a storage pool.
4730 */
4731int
4732spa_destroy(char *pool)
4733{
4734 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4735 B_FALSE, B_FALSE));
4736}
4737
4738/*
4739 * Export a storage pool.
4740 */
4741int
4742spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4743 boolean_t hardforce)
4744{
4745 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4746 force, hardforce));
4747}
4748
4749/*
4750 * Similar to spa_export(), this unloads the spa_t without actually removing it
4751 * from the namespace in any way.
4752 */
4753int
4754spa_reset(char *pool)
4755{
4756 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4757 B_FALSE, B_FALSE));
4758}
4759
4760/*
4761 * ==========================================================================
4762 * Device manipulation
4763 * ==========================================================================
4764 */
4765
4766/*
4767 * Add a device to a storage pool.
4768 */
4769int
4770spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4771{
4772 uint64_t txg, id;
4773 int error;
4774 vdev_t *rvd = spa->spa_root_vdev;
4775 vdev_t *vd, *tvd;
4776 nvlist_t **spares, **l2cache;
4777 uint_t nspares, nl2cache;
4778
4779 ASSERT(spa_writeable(spa));
4780
4781 txg = spa_vdev_enter(spa);
4782
4783 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4784 VDEV_ALLOC_ADD)) != 0)
4785 return (spa_vdev_exit(spa, NULL, txg, error));
4786
4787 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
4788
4789 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4790 &nspares) != 0)
4791 nspares = 0;
4792
4793 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4794 &nl2cache) != 0)
4795 nl2cache = 0;
4796
4797 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4798 return (spa_vdev_exit(spa, vd, txg, EINVAL));
4799
4800 if (vd->vdev_children != 0 &&
4801 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4802 return (spa_vdev_exit(spa, vd, txg, error));
4803
4804 /*
4805 * We must validate the spares and l2cache devices after checking the
4806 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4807 */
4808 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4809 return (spa_vdev_exit(spa, vd, txg, error));
4810
4811 /*
4812 * Transfer each new top-level vdev from vd to rvd.
4813 */
4814 for (int c = 0; c < vd->vdev_children; c++) {
4815
4816 /*
4817 * Set the vdev id to the first hole, if one exists.
4818 */
4819 for (id = 0; id < rvd->vdev_children; id++) {
4820 if (rvd->vdev_child[id]->vdev_ishole) {
4821 vdev_free(rvd->vdev_child[id]);
4822 break;
4823 }
4824 }
4825 tvd = vd->vdev_child[c];
4826 vdev_remove_child(vd, tvd);
4827 tvd->vdev_id = id;
4828 vdev_add_child(rvd, tvd);
4829 vdev_config_dirty(tvd);
4830 }
4831
4832 if (nspares != 0) {
4833 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4834 ZPOOL_CONFIG_SPARES);
4835 spa_load_spares(spa);
4836 spa->spa_spares.sav_sync = B_TRUE;
4837 }
4838
4839 if (nl2cache != 0) {
4840 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4841 ZPOOL_CONFIG_L2CACHE);
4842 spa_load_l2cache(spa);
4843 spa->spa_l2cache.sav_sync = B_TRUE;
4844 }
4845
4846 /*
4847 * We have to be careful when adding new vdevs to an existing pool.
4848 * If other threads start allocating from these vdevs before we
4849 * sync the config cache, and we lose power, then upon reboot we may
4850 * fail to open the pool because there are DVAs that the config cache
4851 * can't translate. Therefore, we first add the vdevs without
4852 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4853 * and then let spa_config_update() initialize the new metaslabs.
4854 *
4855 * spa_load() checks for added-but-not-initialized vdevs, so that
4856 * if we lose power at any point in this sequence, the remaining
4857 * steps will be completed the next time we load the pool.
4858 */
4859 (void) spa_vdev_exit(spa, vd, txg, 0);
4860
4861 mutex_enter(&spa_namespace_lock);
4862 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4863 spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD);
4864 mutex_exit(&spa_namespace_lock);
4865
4866 return (0);
4867}
4868
4869/*
4870 * Attach a device to a mirror. The arguments are the path to any device
4871 * in the mirror, and the nvroot for the new device. If the path specifies
4872 * a device that is not mirrored, we automatically insert the mirror vdev.
4873 *
4874 * If 'replacing' is specified, the new device is intended to replace the
4875 * existing device; in this case the two devices are made into their own
4876 * mirror using the 'replacing' vdev, which is functionally identical to
4877 * the mirror vdev (it actually reuses all the same ops) but has a few
4878 * extra rules: you can't attach to it after it's been created, and upon
4879 * completion of resilvering, the first disk (the one being replaced)
4880 * is automatically detached.
4881 */
4882int
4883spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4884{
4885 uint64_t txg, dtl_max_txg;
4886 vdev_t *rvd = spa->spa_root_vdev;
4887 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4888 vdev_ops_t *pvops;
4889 char *oldvdpath, *newvdpath;
4890 int newvd_isspare;
4891 int error;
4892
4893 ASSERT(spa_writeable(spa));
4894
4895 txg = spa_vdev_enter(spa);
4896
4897 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4898
4899 if (oldvd == NULL)
4900 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4901
4902 if (!oldvd->vdev_ops->vdev_op_leaf)
4903 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4904
4905 pvd = oldvd->vdev_parent;
4906
4907 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4908 VDEV_ALLOC_ATTACH)) != 0)
4909 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4910
4911 if (newrootvd->vdev_children != 1)
4912 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4913
4914 newvd = newrootvd->vdev_child[0];
4915
4916 if (!newvd->vdev_ops->vdev_op_leaf)
4917 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4918
4919 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4920 return (spa_vdev_exit(spa, newrootvd, txg, error));
4921
4922 /*
4923 * Spares can't replace logs
4924 */
4925 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4926 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4927
4928 if (!replacing) {
4929 /*
4930 * For attach, the only allowable parent is a mirror or the root
4931 * vdev.
4932 */
4933 if (pvd->vdev_ops != &vdev_mirror_ops &&
4934 pvd->vdev_ops != &vdev_root_ops)
4935 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4936
4937 pvops = &vdev_mirror_ops;
4938 } else {
4939 /*
4940 * Active hot spares can only be replaced by inactive hot
4941 * spares.
4942 */
4943 if (pvd->vdev_ops == &vdev_spare_ops &&
4944 oldvd->vdev_isspare &&
4945 !spa_has_spare(spa, newvd->vdev_guid))
4946 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4947
4948 /*
4949 * If the source is a hot spare, and the parent isn't already a
4950 * spare, then we want to create a new hot spare. Otherwise, we
4951 * want to create a replacing vdev. The user is not allowed to
4952 * attach to a spared vdev child unless the 'isspare' state is
4953 * the same (spare replaces spare, non-spare replaces
4954 * non-spare).
4955 */
4956 if (pvd->vdev_ops == &vdev_replacing_ops &&
4957 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4958 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4959 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4960 newvd->vdev_isspare != oldvd->vdev_isspare) {
4961 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4962 }
4963
4964 if (newvd->vdev_isspare)
4965 pvops = &vdev_spare_ops;
4966 else
4967 pvops = &vdev_replacing_ops;
4968 }
4969
4970 /*
4971 * Make sure the new device is big enough.
4972 */
4973 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4974 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4975
4976 /*
4977 * The new device cannot have a higher alignment requirement
4978 * than the top-level vdev.
4979 */
4980 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4981 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4982
4983 /*
4984 * If this is an in-place replacement, update oldvd's path and devid
4985 * to make it distinguishable from newvd, and unopenable from now on.
4986 */
4987 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4988 spa_strfree(oldvd->vdev_path);
4989 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
4990 KM_SLEEP);
4991 (void) sprintf(oldvd->vdev_path, "%s/%s",
4992 newvd->vdev_path, "old");
4993 if (oldvd->vdev_devid != NULL) {
4994 spa_strfree(oldvd->vdev_devid);
4995 oldvd->vdev_devid = NULL;
4996 }
4997 }
4998
4999 /* mark the device being resilvered */
5000 newvd->vdev_resilver_txg = txg;
5001
5002 /*
5003 * If the parent is not a mirror, or if we're replacing, insert the new
5004 * mirror/replacing/spare vdev above oldvd.
5005 */
5006 if (pvd->vdev_ops != pvops)
5007 pvd = vdev_add_parent(oldvd, pvops);
5008
5009 ASSERT(pvd->vdev_top->vdev_parent == rvd);
5010 ASSERT(pvd->vdev_ops == pvops);
5011 ASSERT(oldvd->vdev_parent == pvd);
5012
5013 /*
5014 * Extract the new device from its root and add it to pvd.
5015 */
5016 vdev_remove_child(newrootvd, newvd);
5017 newvd->vdev_id = pvd->vdev_children;
5018 newvd->vdev_crtxg = oldvd->vdev_crtxg;
5019 vdev_add_child(pvd, newvd);
5020
5021 tvd = newvd->vdev_top;
5022 ASSERT(pvd->vdev_top == tvd);
5023 ASSERT(tvd->vdev_parent == rvd);
5024
5025 vdev_config_dirty(tvd);
5026
5027 /*
5028 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
5029 * for any dmu_sync-ed blocks. It will propagate upward when
5030 * spa_vdev_exit() calls vdev_dtl_reassess().
5031 */
5032 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
5033
5034 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
5035 dtl_max_txg - TXG_INITIAL);
5036
5037 if (newvd->vdev_isspare) {
5038 spa_spare_activate(newvd);
5039 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
5040 }
5041
5042 oldvdpath = spa_strdup(oldvd->vdev_path);
5043 newvdpath = spa_strdup(newvd->vdev_path);
5044 newvd_isspare = newvd->vdev_isspare;
5045
5046 /*
5047 * Mark newvd's DTL dirty in this txg.
5048 */
5049 vdev_dirty(tvd, VDD_DTL, newvd, txg);
5050
5051 /*
5052 * Schedule the resilver to restart in the future. We do this to
5053 * ensure that dmu_sync-ed blocks have been stitched into the
5054 * respective datasets.
5055 */
5056 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
5057
5058 if (spa->spa_bootfs)
5059 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
5060
5061 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH);
5062
5063 /*
5064 * Commit the config
5065 */
5066 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
5067
5068 spa_history_log_internal(spa, "vdev attach", NULL,
5069 "%s vdev=%s %s vdev=%s",
5070 replacing && newvd_isspare ? "spare in" :
5071 replacing ? "replace" : "attach", newvdpath,
5072 replacing ? "for" : "to", oldvdpath);
5073
5074 spa_strfree(oldvdpath);
5075 spa_strfree(newvdpath);
5076
5077 return (0);
5078}
5079
5080/*
5081 * Detach a device from a mirror or replacing vdev.
5082 *
5083 * If 'replace_done' is specified, only detach if the parent
5084 * is a replacing vdev.
5085 */
5086int
5087spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
5088{
5089 uint64_t txg;
5090 int error;
5091 vdev_t *rvd = spa->spa_root_vdev;
5092 vdev_t *vd, *pvd, *cvd, *tvd;
5093 boolean_t unspare = B_FALSE;
5094 uint64_t unspare_guid = 0;
5095 char *vdpath;
5096
5097 ASSERT(spa_writeable(spa));
5098
5099 txg = spa_vdev_enter(spa);
5100
5101 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5102
5103 if (vd == NULL)
5104 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5105
5106 if (!vd->vdev_ops->vdev_op_leaf)
5107 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5108
5109 pvd = vd->vdev_parent;
5110
5111 /*
5112 * If the parent/child relationship is not as expected, don't do it.
5113 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
5114 * vdev that's replacing B with C. The user's intent in replacing
5115 * is to go from M(A,B) to M(A,C). If the user decides to cancel
5116 * the replace by detaching C, the expected behavior is to end up
5117 * M(A,B). But suppose that right after deciding to detach C,
5118 * the replacement of B completes. We would have M(A,C), and then
5119 * ask to detach C, which would leave us with just A -- not what
5120 * the user wanted. To prevent this, we make sure that the
5121 * parent/child relationship hasn't changed -- in this example,
5122 * that C's parent is still the replacing vdev R.
5123 */
5124 if (pvd->vdev_guid != pguid && pguid != 0)
5125 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5126
5127 /*
5128 * Only 'replacing' or 'spare' vdevs can be replaced.
5129 */
5130 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
5131 pvd->vdev_ops != &vdev_spare_ops)
5132 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5133
5134 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
5135 spa_version(spa) >= SPA_VERSION_SPARES);
5136
5137 /*
5138 * Only mirror, replacing, and spare vdevs support detach.
5139 */
5140 if (pvd->vdev_ops != &vdev_replacing_ops &&
5141 pvd->vdev_ops != &vdev_mirror_ops &&
5142 pvd->vdev_ops != &vdev_spare_ops)
5143 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5144
5145 /*
5146 * If this device has the only valid copy of some data,
5147 * we cannot safely detach it.
5148 */
5149 if (vdev_dtl_required(vd))
5150 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5151
5152 ASSERT(pvd->vdev_children >= 2);
5153
5154 /*
5155 * If we are detaching the second disk from a replacing vdev, then
5156 * check to see if we changed the original vdev's path to have "/old"
5157 * at the end in spa_vdev_attach(). If so, undo that change now.
5158 */
5159 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
5160 vd->vdev_path != NULL) {
5161 size_t len = strlen(vd->vdev_path);
5162
5163 for (int c = 0; c < pvd->vdev_children; c++) {
5164 cvd = pvd->vdev_child[c];
5165
5166 if (cvd == vd || cvd->vdev_path == NULL)
5167 continue;
5168
5169 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
5170 strcmp(cvd->vdev_path + len, "/old") == 0) {
5171 spa_strfree(cvd->vdev_path);
5172 cvd->vdev_path = spa_strdup(vd->vdev_path);
5173 break;
5174 }
5175 }
5176 }
5177
5178 /*
5179 * If we are detaching the original disk from a spare, then it implies
5180 * that the spare should become a real disk, and be removed from the
5181 * active spare list for the pool.
5182 */
5183 if (pvd->vdev_ops == &vdev_spare_ops &&
5184 vd->vdev_id == 0 &&
5185 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
5186 unspare = B_TRUE;
5187
5188 /*
5189 * Erase the disk labels so the disk can be used for other things.
5190 * This must be done after all other error cases are handled,
5191 * but before we disembowel vd (so we can still do I/O to it).
5192 * But if we can't do it, don't treat the error as fatal --
5193 * it may be that the unwritability of the disk is the reason
5194 * it's being detached!
5195 */
5196 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5197
5198 /*
5199 * Remove vd from its parent and compact the parent's children.
5200 */
5201 vdev_remove_child(pvd, vd);
5202 vdev_compact_children(pvd);
5203
5204 /*
5205 * Remember one of the remaining children so we can get tvd below.
5206 */
5207 cvd = pvd->vdev_child[pvd->vdev_children - 1];
5208
5209 /*
5210 * If we need to remove the remaining child from the list of hot spares,
5211 * do it now, marking the vdev as no longer a spare in the process.
5212 * We must do this before vdev_remove_parent(), because that can
5213 * change the GUID if it creates a new toplevel GUID. For a similar
5214 * reason, we must remove the spare now, in the same txg as the detach;
5215 * otherwise someone could attach a new sibling, change the GUID, and
5216 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
5217 */
5218 if (unspare) {
5219 ASSERT(cvd->vdev_isspare);
5220 spa_spare_remove(cvd);
5221 unspare_guid = cvd->vdev_guid;
5222 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
5223 cvd->vdev_unspare = B_TRUE;
5224 }
5225
5226 /*
5227 * If the parent mirror/replacing vdev only has one child,
5228 * the parent is no longer needed. Remove it from the tree.
5229 */
5230 if (pvd->vdev_children == 1) {
5231 if (pvd->vdev_ops == &vdev_spare_ops)
5232 cvd->vdev_unspare = B_FALSE;
5233 vdev_remove_parent(cvd);
5234 }
5235
5236
5237 /*
5238 * We don't set tvd until now because the parent we just removed
5239 * may have been the previous top-level vdev.
5240 */
5241 tvd = cvd->vdev_top;
5242 ASSERT(tvd->vdev_parent == rvd);
5243
5244 /*
5245 * Reevaluate the parent vdev state.
5246 */
5247 vdev_propagate_state(cvd);
5248
5249 /*
5250 * If the 'autoexpand' property is set on the pool then automatically
5251 * try to expand the size of the pool. For example if the device we
5252 * just detached was smaller than the others, it may be possible to
5253 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
5254 * first so that we can obtain the updated sizes of the leaf vdevs.
5255 */
5256 if (spa->spa_autoexpand) {
5257 vdev_reopen(tvd);
5258 vdev_expand(tvd, txg);
5259 }
5260
5261 vdev_config_dirty(tvd);
5262
5263 /*
5264 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
5265 * vd->vdev_detached is set and free vd's DTL object in syncing context.
5266 * But first make sure we're not on any *other* txg's DTL list, to
5267 * prevent vd from being accessed after it's freed.
5268 */
5269 vdpath = spa_strdup(vd->vdev_path);
5270 for (int t = 0; t < TXG_SIZE; t++)
5271 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
5272 vd->vdev_detached = B_TRUE;
5273 vdev_dirty(tvd, VDD_DTL, vd, txg);
5274
5275 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
5276
5277 /* hang on to the spa before we release the lock */
5278 spa_open_ref(spa, FTAG);
5279
5280 error = spa_vdev_exit(spa, vd, txg, 0);
5281
5282 spa_history_log_internal(spa, "detach", NULL,
5283 "vdev=%s", vdpath);
5284 spa_strfree(vdpath);
5285
5286 /*
5287 * If this was the removal of the original device in a hot spare vdev,
5288 * then we want to go through and remove the device from the hot spare
5289 * list of every other pool.
5290 */
5291 if (unspare) {
5292 spa_t *altspa = NULL;
5293
5294 mutex_enter(&spa_namespace_lock);
5295 while ((altspa = spa_next(altspa)) != NULL) {
5296 if (altspa->spa_state != POOL_STATE_ACTIVE ||
5297 altspa == spa)
5298 continue;
5299
5300 spa_open_ref(altspa, FTAG);
5301 mutex_exit(&spa_namespace_lock);
5302 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
5303 mutex_enter(&spa_namespace_lock);
5304 spa_close(altspa, FTAG);
5305 }
5306 mutex_exit(&spa_namespace_lock);
5307
5308 /* search the rest of the vdevs for spares to remove */
5309 spa_vdev_resilver_done(spa);
5310 }
5311
5312 /* all done with the spa; OK to release */
5313 mutex_enter(&spa_namespace_lock);
5314 spa_close(spa, FTAG);
5315 mutex_exit(&spa_namespace_lock);
5316
5317 return (error);
5318}
5319
5320/*
5321 * Split a set of devices from their mirrors, and create a new pool from them.
5322 */
5323int
5324spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
5325 nvlist_t *props, boolean_t exp)
5326{
5327 int error = 0;
5328 uint64_t txg, *glist;
5329 spa_t *newspa;
5330 uint_t c, children, lastlog;
5331 nvlist_t **child, *nvl, *tmp;
5332 dmu_tx_t *tx;
5333 char *altroot = NULL;
5334 vdev_t *rvd, **vml = NULL; /* vdev modify list */
5335 boolean_t activate_slog;
5336
5337 ASSERT(spa_writeable(spa));
5338
5339 txg = spa_vdev_enter(spa);
5340
5341 /* clear the log and flush everything up to now */
5342 activate_slog = spa_passivate_log(spa);
5343 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5344 error = spa_offline_log(spa);
5345 txg = spa_vdev_config_enter(spa);
5346
5347 if (activate_slog)
5348 spa_activate_log(spa);
5349
5350 if (error != 0)
5351 return (spa_vdev_exit(spa, NULL, txg, error));
5352
5353 /* check new spa name before going any further */
5354 if (spa_lookup(newname) != NULL)
5355 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
5356
5357 /*
5358 * scan through all the children to ensure they're all mirrors
5359 */
5360 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
5361 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
5362 &children) != 0)
5363 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5364
5365 /* first, check to ensure we've got the right child count */
5366 rvd = spa->spa_root_vdev;
5367 lastlog = 0;
5368 for (c = 0; c < rvd->vdev_children; c++) {
5369 vdev_t *vd = rvd->vdev_child[c];
5370
5371 /* don't count the holes & logs as children */
5372 if (vd->vdev_islog || vd->vdev_ishole) {
5373 if (lastlog == 0)
5374 lastlog = c;
5375 continue;
5376 }
5377
5378 lastlog = 0;
5379 }
5380 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
5381 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5382
5383 /* next, ensure no spare or cache devices are part of the split */
5384 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
5385 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
5386 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5387
5388 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
5389 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
5390
5391 /* then, loop over each vdev and validate it */
5392 for (c = 0; c < children; c++) {
5393 uint64_t is_hole = 0;
5394
5395 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
5396 &is_hole);
5397
5398 if (is_hole != 0) {
5399 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
5400 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
5401 continue;
5402 } else {
5403 error = SET_ERROR(EINVAL);
5404 break;
5405 }
5406 }
5407
5408 /* which disk is going to be split? */
5409 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
5410 &glist[c]) != 0) {
5411 error = SET_ERROR(EINVAL);
5412 break;
5413 }
5414
5415 /* look it up in the spa */
5416 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
5417 if (vml[c] == NULL) {
5418 error = SET_ERROR(ENODEV);
5419 break;
5420 }
5421
5422 /* make sure there's nothing stopping the split */
5423 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
5424 vml[c]->vdev_islog ||
5425 vml[c]->vdev_ishole ||
5426 vml[c]->vdev_isspare ||
5427 vml[c]->vdev_isl2cache ||
5428 !vdev_writeable(vml[c]) ||
5429 vml[c]->vdev_children != 0 ||
5430 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
5431 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
5432 error = SET_ERROR(EINVAL);
5433 break;
5434 }
5435
5436 if (vdev_dtl_required(vml[c])) {
5437 error = SET_ERROR(EBUSY);
5438 break;
5439 }
5440
5441 /* we need certain info from the top level */
5442 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
5443 vml[c]->vdev_top->vdev_ms_array) == 0);
5444 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
5445 vml[c]->vdev_top->vdev_ms_shift) == 0);
5446 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
5447 vml[c]->vdev_top->vdev_asize) == 0);
5448 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
5449 vml[c]->vdev_top->vdev_ashift) == 0);
5450
5451 /* transfer per-vdev ZAPs */
5452 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
5453 VERIFY0(nvlist_add_uint64(child[c],
5454 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
5455
5456 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
5457 VERIFY0(nvlist_add_uint64(child[c],
5458 ZPOOL_CONFIG_VDEV_TOP_ZAP,
5459 vml[c]->vdev_parent->vdev_top_zap));
5460 }
5461
5462 if (error != 0) {
5463 kmem_free(vml, children * sizeof (vdev_t *));
5464 kmem_free(glist, children * sizeof (uint64_t));
5465 return (spa_vdev_exit(spa, NULL, txg, error));
5466 }
5467
5468 /* stop writers from using the disks */
5469 for (c = 0; c < children; c++) {
5470 if (vml[c] != NULL)
5471 vml[c]->vdev_offline = B_TRUE;
5472 }
5473 vdev_reopen(spa->spa_root_vdev);
5474
5475 /*
5476 * Temporarily record the splitting vdevs in the spa config. This
5477 * will disappear once the config is regenerated.
5478 */
5479 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5480 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5481 glist, children) == 0);
5482 kmem_free(glist, children * sizeof (uint64_t));
5483
5484 mutex_enter(&spa->spa_props_lock);
5485 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5486 nvl) == 0);
5487 mutex_exit(&spa->spa_props_lock);
5488 spa->spa_config_splitting = nvl;
5489 vdev_config_dirty(spa->spa_root_vdev);
5490
5491 /* configure and create the new pool */
5492 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5493 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5494 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5495 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5496 spa_version(spa)) == 0);
5497 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5498 spa->spa_config_txg) == 0);
5499 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5500 spa_generate_guid(NULL)) == 0);
5501 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
5502 (void) nvlist_lookup_string(props,
5503 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5504
5505 /* add the new pool to the namespace */
5506 newspa = spa_add(newname, config, altroot);
5507 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
5508 newspa->spa_config_txg = spa->spa_config_txg;
5509 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5510
5511 /* release the spa config lock, retaining the namespace lock */
5512 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5513
5514 if (zio_injection_enabled)
5515 zio_handle_panic_injection(spa, FTAG, 1);
5516
5517 spa_activate(newspa, spa_mode_global);
5518 spa_async_suspend(newspa);
5519
5520#ifndef illumos
5521 /* mark that we are creating new spa by splitting */
5522 newspa->spa_splitting_newspa = B_TRUE;
5523#endif
5524 /* create the new pool from the disks of the original pool */
5525 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5526#ifndef illumos
5527 newspa->spa_splitting_newspa = B_FALSE;
5528#endif
5529 if (error)
5530 goto out;
5531
5532 /* if that worked, generate a real config for the new pool */
5533 if (newspa->spa_root_vdev != NULL) {
5534 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
5535 NV_UNIQUE_NAME, KM_SLEEP) == 0);
5536 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5537 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5538 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5539 B_TRUE));
5540 }
5541
5542 /* set the props */
5543 if (props != NULL) {
5544 spa_configfile_set(newspa, props, B_FALSE);
5545 error = spa_prop_set(newspa, props);
5546 if (error)
5547 goto out;
5548 }
5549
5550 /* flush everything */
5551 txg = spa_vdev_config_enter(newspa);
5552 vdev_config_dirty(newspa->spa_root_vdev);
5553 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
5554
5555 if (zio_injection_enabled)
5556 zio_handle_panic_injection(spa, FTAG, 2);
5557
5558 spa_async_resume(newspa);
5559
5560 /* finally, update the original pool's config */
5561 txg = spa_vdev_config_enter(spa);
5562 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5563 error = dmu_tx_assign(tx, TXG_WAIT);
5564 if (error != 0)
5565 dmu_tx_abort(tx);
5566 for (c = 0; c < children; c++) {
5567 if (vml[c] != NULL) {
5568 vdev_split(vml[c]);
5569 if (error == 0)
5570 spa_history_log_internal(spa, "detach", tx,
5571 "vdev=%s", vml[c]->vdev_path);
5572
5573 vdev_free(vml[c]);
5574 }
5575 }
5576 spa->spa_avz_action = AVZ_ACTION_REBUILD;
5577 vdev_config_dirty(spa->spa_root_vdev);
5578 spa->spa_config_splitting = NULL;
5579 nvlist_free(nvl);
5580 if (error == 0)
5581 dmu_tx_commit(tx);
5582 (void) spa_vdev_exit(spa, NULL, txg, 0);
5583
5584 if (zio_injection_enabled)
5585 zio_handle_panic_injection(spa, FTAG, 3);
5586
5587 /* split is complete; log a history record */
5588 spa_history_log_internal(newspa, "split", NULL,
5589 "from pool %s", spa_name(spa));
5590
5591 kmem_free(vml, children * sizeof (vdev_t *));
5592
5593 /* if we're not going to mount the filesystems in userland, export */
5594 if (exp)
5595 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5596 B_FALSE, B_FALSE);
5597
5598 return (error);
5599
5600out:
5601 spa_unload(newspa);
5602 spa_deactivate(newspa);
5603 spa_remove(newspa);
5604
5605 txg = spa_vdev_config_enter(spa);
5606
5607 /* re-online all offlined disks */
5608 for (c = 0; c < children; c++) {
5609 if (vml[c] != NULL)
5610 vml[c]->vdev_offline = B_FALSE;
5611 }
5612 vdev_reopen(spa->spa_root_vdev);
5613
5614 nvlist_free(spa->spa_config_splitting);
5615 spa->spa_config_splitting = NULL;
5616 (void) spa_vdev_exit(spa, NULL, txg, error);
5617
5618 kmem_free(vml, children * sizeof (vdev_t *));
5619 return (error);
5620}
5621
5622static nvlist_t *
5623spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
5624{
5625 for (int i = 0; i < count; i++) {
5626 uint64_t guid;
5627
5628 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5629 &guid) == 0);
5630
5631 if (guid == target_guid)
5632 return (nvpp[i]);
5633 }
5634
5635 return (NULL);
5636}
5637
5638static void
5639spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5640 nvlist_t *dev_to_remove)
5641{
5642 nvlist_t **newdev = NULL;
5643
5644 if (count > 1)
5645 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
5646
5647 for (int i = 0, j = 0; i < count; i++) {
5648 if (dev[i] == dev_to_remove)
5649 continue;
5650 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
5651 }
5652
5653 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5654 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
5655
5656 for (int i = 0; i < count - 1; i++)
5657 nvlist_free(newdev[i]);
5658
5659 if (count > 1)
5660 kmem_free(newdev, (count - 1) * sizeof (void *));
5661}
5662
5663/*
5664 * Evacuate the device.
5665 */
5666static int
5667spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5668{
5669 uint64_t txg;
5670 int error = 0;
5671
5672 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5673 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5674 ASSERT(vd == vd->vdev_top);
5675
5676 /*
5677 * Evacuate the device. We don't hold the config lock as writer
5678 * since we need to do I/O but we do keep the
5679 * spa_namespace_lock held. Once this completes the device
5680 * should no longer have any blocks allocated on it.
5681 */
5682 if (vd->vdev_islog) {
5683 if (vd->vdev_stat.vs_alloc != 0)
5684 error = spa_offline_log(spa);
5685 } else {
5686 error = SET_ERROR(ENOTSUP);
5687 }
5688
5689 if (error)
5690 return (error);
5691
5692 /*
5693 * The evacuation succeeded. Remove any remaining MOS metadata
5694 * associated with this vdev, and wait for these changes to sync.
5695 */
5696 ASSERT0(vd->vdev_stat.vs_alloc);
5697 txg = spa_vdev_config_enter(spa);
5698 vd->vdev_removing = B_TRUE;
5699 vdev_dirty_leaves(vd, VDD_DTL, txg);
5700 vdev_config_dirty(vd);
5701 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5702
5703 return (0);
5704}
5705
5706/*
5707 * Complete the removal by cleaning up the namespace.
5708 */
5709static void
5710spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5711{
5712 vdev_t *rvd = spa->spa_root_vdev;
5713 uint64_t id = vd->vdev_id;
5714 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5715
5716 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5717 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5718 ASSERT(vd == vd->vdev_top);
5719
5720 /*
5721 * Only remove any devices which are empty.
5722 */
5723 if (vd->vdev_stat.vs_alloc != 0)
5724 return;
5725
5726 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5727
5728 if (list_link_active(&vd->vdev_state_dirty_node))
5729 vdev_state_clean(vd);
5730 if (list_link_active(&vd->vdev_config_dirty_node))
5731 vdev_config_clean(vd);
5732
5733 vdev_free(vd);
5734
5735 if (last_vdev) {
5736 vdev_compact_children(rvd);
5737 } else {
5738 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5739 vdev_add_child(rvd, vd);
5740 }
5741 vdev_config_dirty(rvd);
5742
5743 /*
5744 * Reassess the health of our root vdev.
5745 */
5746 vdev_reopen(rvd);
5747}
5748
5749/*
5750 * Remove a device from the pool -
5751 *
5752 * Removing a device from the vdev namespace requires several steps
5753 * and can take a significant amount of time. As a result we use
5754 * the spa_vdev_config_[enter/exit] functions which allow us to
5755 * grab and release the spa_config_lock while still holding the namespace
5756 * lock. During each step the configuration is synced out.
5757 *
5758 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5759 * devices.
5760 */
5761int
5762spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5763{
5764 vdev_t *vd;
5765 sysevent_t *ev = NULL;
5766 metaslab_group_t *mg;
5767 nvlist_t **spares, **l2cache, *nv;
5768 uint64_t txg = 0;
5769 uint_t nspares, nl2cache;
5770 int error = 0;
5771 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5772
5773 ASSERT(spa_writeable(spa));
5774
5775 if (!locked)
5776 txg = spa_vdev_enter(spa);
5777
5778 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5779
5780 if (spa->spa_spares.sav_vdevs != NULL &&
5781 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5782 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5783 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5784 /*
5785 * Only remove the hot spare if it's not currently in use
5786 * in this pool.
5787 */
5788 if (vd == NULL || unspare) {
5789 if (vd == NULL)
5790 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5791 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5792 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5793 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5794 spa_load_spares(spa);
5795 spa->spa_spares.sav_sync = B_TRUE;
5796 } else {
5797 error = SET_ERROR(EBUSY);
5798 }
5799 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
5800 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5801 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5802 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5803 /*
5804 * Cache devices can always be removed.
5805 */
5806 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5807 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX);
5808 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5809 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5810 spa_load_l2cache(spa);
5811 spa->spa_l2cache.sav_sync = B_TRUE;
5812 } else if (vd != NULL && vd->vdev_islog) {
5813 ASSERT(!locked);
5814 ASSERT(vd == vd->vdev_top);
5815
5816 mg = vd->vdev_mg;
5817
5818 /*
5819 * Stop allocating from this vdev.
5820 */
5821 metaslab_group_passivate(mg);
5822
5823 /*
5824 * Wait for the youngest allocations and frees to sync,
5825 * and then wait for the deferral of those frees to finish.
5826 */
5827 spa_vdev_config_exit(spa, NULL,
5828 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5829
5830 /*
5831 * Attempt to evacuate the vdev.
5832 */
5833 error = spa_vdev_remove_evacuate(spa, vd);
5834
5835 txg = spa_vdev_config_enter(spa);
5836
5837 /*
5838 * If we couldn't evacuate the vdev, unwind.
5839 */
5840 if (error) {
5841 metaslab_group_activate(mg);
5842 return (spa_vdev_exit(spa, NULL, txg, error));
5843 }
5844
5845 /*
5846 * Clean up the vdev namespace.
5847 */
5848 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_DEV);
5849 spa_vdev_remove_from_namespace(spa, vd);
5850
5851 } else if (vd != NULL) {
5852 /*
5853 * Normal vdevs cannot be removed (yet).
5854 */
5855 error = SET_ERROR(ENOTSUP);
5856 } else {
5857 /*
5858 * There is no vdev of any kind with the specified guid.
5859 */
5860 error = SET_ERROR(ENOENT);
5861 }
5862
5863 if (!locked)
5864 error = spa_vdev_exit(spa, NULL, txg, error);
5865
5866 if (ev)
5867 spa_event_post(ev);
5868
5869 return (error);
5870}
5871
5872/*
5873 * Find any device that's done replacing, or a vdev marked 'unspare' that's
5874 * currently spared, so we can detach it.
5875 */
5876static vdev_t *
5877spa_vdev_resilver_done_hunt(vdev_t *vd)
5878{
5879 vdev_t *newvd, *oldvd;
5880
5881 for (int c = 0; c < vd->vdev_children; c++) {
5882 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5883 if (oldvd != NULL)
5884 return (oldvd);
5885 }
5886
5887 /*
5888 * Check for a completed replacement. We always consider the first
5889 * vdev in the list to be the oldest vdev, and the last one to be
5890 * the newest (see spa_vdev_attach() for how that works). In
5891 * the case where the newest vdev is faulted, we will not automatically
5892 * remove it after a resilver completes. This is OK as it will require
5893 * user intervention to determine which disk the admin wishes to keep.
5894 */
5895 if (vd->vdev_ops == &vdev_replacing_ops) {
5896 ASSERT(vd->vdev_children > 1);
5897
5898 newvd = vd->vdev_child[vd->vdev_children - 1];
5899 oldvd = vd->vdev_child[0];
5900
5901 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5902 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5903 !vdev_dtl_required(oldvd))
5904 return (oldvd);
5905 }
5906
5907 /*
5908 * Check for a completed resilver with the 'unspare' flag set.
5909 */
5910 if (vd->vdev_ops == &vdev_spare_ops) {
5911 vdev_t *first = vd->vdev_child[0];
5912 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5913
5914 if (last->vdev_unspare) {
5915 oldvd = first;
5916 newvd = last;
5917 } else if (first->vdev_unspare) {
5918 oldvd = last;
5919 newvd = first;
5920 } else {
5921 oldvd = NULL;
5922 }
5923
5924 if (oldvd != NULL &&
5925 vdev_dtl_empty(newvd, DTL_MISSING) &&
5926 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5927 !vdev_dtl_required(oldvd))
5928 return (oldvd);
5929
5930 /*
5931 * If there are more than two spares attached to a disk,
5932 * and those spares are not required, then we want to
5933 * attempt to free them up now so that they can be used
5934 * by other pools. Once we're back down to a single
5935 * disk+spare, we stop removing them.
5936 */
5937 if (vd->vdev_children > 2) {
5938 newvd = vd->vdev_child[1];
5939
5940 if (newvd->vdev_isspare && last->vdev_isspare &&
5941 vdev_dtl_empty(last, DTL_MISSING) &&
5942 vdev_dtl_empty(last, DTL_OUTAGE) &&
5943 !vdev_dtl_required(newvd))
5944 return (newvd);
5945 }
5946 }
5947
5948 return (NULL);
5949}
5950
5951static void
5952spa_vdev_resilver_done(spa_t *spa)
5953{
5954 vdev_t *vd, *pvd, *ppvd;
5955 uint64_t guid, sguid, pguid, ppguid;
5956
5957 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5958
5959 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5960 pvd = vd->vdev_parent;
5961 ppvd = pvd->vdev_parent;
5962 guid = vd->vdev_guid;
5963 pguid = pvd->vdev_guid;
5964 ppguid = ppvd->vdev_guid;
5965 sguid = 0;
5966 /*
5967 * If we have just finished replacing a hot spared device, then
5968 * we need to detach the parent's first child (the original hot
5969 * spare) as well.
5970 */
5971 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5972 ppvd->vdev_children == 2) {
5973 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5974 sguid = ppvd->vdev_child[1]->vdev_guid;
5975 }
5976 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5977
5978 spa_config_exit(spa, SCL_ALL, FTAG);
5979 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5980 return;
5981 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5982 return;
5983 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5984 }
5985
5986 spa_config_exit(spa, SCL_ALL, FTAG);
5987}
5988
5989/*
5990 * Update the stored path or FRU for this vdev.
5991 */
5992int
5993spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5994 boolean_t ispath)
5995{
5996 vdev_t *vd;
5997 boolean_t sync = B_FALSE;
5998
5999 ASSERT(spa_writeable(spa));
6000
6001 spa_vdev_state_enter(spa, SCL_ALL);
6002
6003 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
6004 return (spa_vdev_state_exit(spa, NULL, ENOENT));
6005
6006 if (!vd->vdev_ops->vdev_op_leaf)
6007 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
6008
6009 if (ispath) {
6010 if (strcmp(value, vd->vdev_path) != 0) {
6011 spa_strfree(vd->vdev_path);
6012 vd->vdev_path = spa_strdup(value);
6013 sync = B_TRUE;
6014 }
6015 } else {
6016 if (vd->vdev_fru == NULL) {
6017 vd->vdev_fru = spa_strdup(value);
6018 sync = B_TRUE;
6019 } else if (strcmp(value, vd->vdev_fru) != 0) {
6020 spa_strfree(vd->vdev_fru);
6021 vd->vdev_fru = spa_strdup(value);
6022 sync = B_TRUE;
6023 }
6024 }
6025
6026 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
6027}
6028
6029int
6030spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
6031{
6032 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
6033}
6034
6035int
6036spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
6037{
6038 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
6039}
6040
6041/*
6042 * ==========================================================================
6043 * SPA Scanning
6044 * ==========================================================================
6045 */
6046
6047int
6048spa_scan_stop(spa_t *spa)
6049{
6050 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6051 if (dsl_scan_resilvering(spa->spa_dsl_pool))
6052 return (SET_ERROR(EBUSY));
6053 return (dsl_scan_cancel(spa->spa_dsl_pool));
6054}
6055
6056int
6057spa_scan(spa_t *spa, pool_scan_func_t func)
6058{
6059 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6060
6061 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
6062 return (SET_ERROR(ENOTSUP));
6063
6064 /*
6065 * If a resilver was requested, but there is no DTL on a
6066 * writeable leaf device, we have nothing to do.
6067 */
6068 if (func == POOL_SCAN_RESILVER &&
6069 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6070 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
6071 return (0);
6072 }
6073
6074 return (dsl_scan(spa->spa_dsl_pool, func));
6075}
6076
6077/*
6078 * ==========================================================================
6079 * SPA async task processing
6080 * ==========================================================================
6081 */
6082
6083static void
6084spa_async_remove(spa_t *spa, vdev_t *vd)
6085{
6086 if (vd->vdev_remove_wanted) {
6087 vd->vdev_remove_wanted = B_FALSE;
6088 vd->vdev_delayed_close = B_FALSE;
6089 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
6090
6091 /*
6092 * We want to clear the stats, but we don't want to do a full
6093 * vdev_clear() as that will cause us to throw away
6094 * degraded/faulted state as well as attempt to reopen the
6095 * device, all of which is a waste.
6096 */
6097 vd->vdev_stat.vs_read_errors = 0;
6098 vd->vdev_stat.vs_write_errors = 0;
6099 vd->vdev_stat.vs_checksum_errors = 0;
6100
6101 vdev_state_dirty(vd->vdev_top);
6102 /* Tell userspace that the vdev is gone. */
6103 zfs_post_remove(spa, vd);
6104 }
6105
6106 for (int c = 0; c < vd->vdev_children; c++)
6107 spa_async_remove(spa, vd->vdev_child[c]);
6108}
6109
6110static void
6111spa_async_probe(spa_t *spa, vdev_t *vd)
6112{
6113 if (vd->vdev_probe_wanted) {
6114 vd->vdev_probe_wanted = B_FALSE;
6115 vdev_reopen(vd); /* vdev_open() does the actual probe */
6116 }
6117
6118 for (int c = 0; c < vd->vdev_children; c++)
6119 spa_async_probe(spa, vd->vdev_child[c]);
6120}
6121
6122static void
6123spa_async_autoexpand(spa_t *spa, vdev_t *vd)
6124{
6125 sysevent_id_t eid;
6126 nvlist_t *attr;
6127 char *physpath;
6128
6129 if (!spa->spa_autoexpand)
6130 return;
6131
6132 for (int c = 0; c < vd->vdev_children; c++) {
6133 vdev_t *cvd = vd->vdev_child[c];
6134 spa_async_autoexpand(spa, cvd);
6135 }
6136
6137 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
6138 return;
6139
6140 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6141 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
6142
6143 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6144 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
6145
6146 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
6147 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP);
6148
6149 nvlist_free(attr);
6150 kmem_free(physpath, MAXPATHLEN);
6151}
6152
6153static void
6154spa_async_thread(void *arg)
6155{
6156 spa_t *spa = arg;
6157 int tasks;
6158
6159 ASSERT(spa->spa_sync_on);
6160
6161 mutex_enter(&spa->spa_async_lock);
6162 tasks = spa->spa_async_tasks;
6163 spa->spa_async_tasks &= SPA_ASYNC_REMOVE;
6164 mutex_exit(&spa->spa_async_lock);
6165
6166 /*
6167 * See if the config needs to be updated.
6168 */
6169 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
6170 uint64_t old_space, new_space;
6171
6172 mutex_enter(&spa_namespace_lock);
6173 old_space = metaslab_class_get_space(spa_normal_class(spa));
6174 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6175 new_space = metaslab_class_get_space(spa_normal_class(spa));
6176 mutex_exit(&spa_namespace_lock);
6177
6178 /*
6179 * If the pool grew as a result of the config update,
6180 * then log an internal history event.
6181 */
6182 if (new_space != old_space) {
6183 spa_history_log_internal(spa, "vdev online", NULL,
6184 "pool '%s' size: %llu(+%llu)",
6185 spa_name(spa), new_space, new_space - old_space);
6186 }
6187 }
6188
6189 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
6190 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6191 spa_async_autoexpand(spa, spa->spa_root_vdev);
6192 spa_config_exit(spa, SCL_CONFIG, FTAG);
6193 }
6194
6195 /*
6196 * See if any devices need to be probed.
6197 */
6198 if (tasks & SPA_ASYNC_PROBE) {
6199 spa_vdev_state_enter(spa, SCL_NONE);
6200 spa_async_probe(spa, spa->spa_root_vdev);
6201 (void) spa_vdev_state_exit(spa, NULL, 0);
6202 }
6203
6204 /*
6205 * If any devices are done replacing, detach them.
6206 */
6207 if (tasks & SPA_ASYNC_RESILVER_DONE)
6208 spa_vdev_resilver_done(spa);
6209
6210 /*
6211 * Kick off a resilver.
6212 */
6213 if (tasks & SPA_ASYNC_RESILVER)
6214 dsl_resilver_restart(spa->spa_dsl_pool, 0);
6215
6216 /*
6217 * Let the world know that we're done.
6218 */
6219 mutex_enter(&spa->spa_async_lock);
6220 spa->spa_async_thread = NULL;
6221 cv_broadcast(&spa->spa_async_cv);
6222 mutex_exit(&spa->spa_async_lock);
6223 thread_exit();
6224}
6225
6226static void
6227spa_async_thread_vd(void *arg)
6228{
6229 spa_t *spa = arg;
6230 int tasks;
6231
6232 ASSERT(spa->spa_sync_on);
6233
6234 mutex_enter(&spa->spa_async_lock);
6235 tasks = spa->spa_async_tasks;
6236retry:
6237 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE;
6238 mutex_exit(&spa->spa_async_lock);
6239
6240 /*
6241 * See if any devices need to be marked REMOVED.
6242 */
6243 if (tasks & SPA_ASYNC_REMOVE) {
6244 spa_vdev_state_enter(spa, SCL_NONE);
6245 spa_async_remove(spa, spa->spa_root_vdev);
6246 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
6247 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
6248 for (int i = 0; i < spa->spa_spares.sav_count; i++)
6249 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
6250 (void) spa_vdev_state_exit(spa, NULL, 0);
6251 }
6252
6253 /*
6254 * Let the world know that we're done.
6255 */
6256 mutex_enter(&spa->spa_async_lock);
6257 tasks = spa->spa_async_tasks;
6258 if ((tasks & SPA_ASYNC_REMOVE) != 0)
6259 goto retry;
6260 spa->spa_async_thread_vd = NULL;
6261 cv_broadcast(&spa->spa_async_cv);
6262 mutex_exit(&spa->spa_async_lock);
6263 thread_exit();
6264}
6265
6266void
6267spa_async_suspend(spa_t *spa)
6268{
6269 mutex_enter(&spa->spa_async_lock);
6270 spa->spa_async_suspended++;
6271 while (spa->spa_async_thread != NULL &&
6272 spa->spa_async_thread_vd != NULL)
6273 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
6274 mutex_exit(&spa->spa_async_lock);
6275}
6276
6277void
6278spa_async_resume(spa_t *spa)
6279{
6280 mutex_enter(&spa->spa_async_lock);
6281 ASSERT(spa->spa_async_suspended != 0);
6282 spa->spa_async_suspended--;
6283 mutex_exit(&spa->spa_async_lock);
6284}
6285
6286static boolean_t
6287spa_async_tasks_pending(spa_t *spa)
6288{
6289 uint_t non_config_tasks;
6290 uint_t config_task;
6291 boolean_t config_task_suspended;
6292
6293 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE |
6294 SPA_ASYNC_REMOVE);
6295 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
6296 if (spa->spa_ccw_fail_time == 0) {
6297 config_task_suspended = B_FALSE;
6298 } else {
6299 config_task_suspended =
6300 (gethrtime() - spa->spa_ccw_fail_time) <
6301 (zfs_ccw_retry_interval * NANOSEC);
6302 }
6303
6304 return (non_config_tasks || (config_task && !config_task_suspended));
6305}
6306
6307static void
6308spa_async_dispatch(spa_t *spa)
6309{
6310 mutex_enter(&spa->spa_async_lock);
6311 if (spa_async_tasks_pending(spa) &&
6312 !spa->spa_async_suspended &&
6313 spa->spa_async_thread == NULL &&
6314 rootdir != NULL)
6315 spa->spa_async_thread = thread_create(NULL, 0,
6316 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
6317 mutex_exit(&spa->spa_async_lock);
6318}
6319
6320static void
6321spa_async_dispatch_vd(spa_t *spa)
6322{
6323 mutex_enter(&spa->spa_async_lock);
6324 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 &&
6325 !spa->spa_async_suspended &&
6326 spa->spa_async_thread_vd == NULL &&
6327 rootdir != NULL)
6328 spa->spa_async_thread_vd = thread_create(NULL, 0,
6329 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri);
6330 mutex_exit(&spa->spa_async_lock);
6331}
6332
6333void
6334spa_async_request(spa_t *spa, int task)
6335{
6336 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
6337 mutex_enter(&spa->spa_async_lock);
6338 spa->spa_async_tasks |= task;
6339 mutex_exit(&spa->spa_async_lock);
6340 spa_async_dispatch_vd(spa);
6341}
6342
6343/*
6344 * ==========================================================================
6345 * SPA syncing routines
6346 * ==========================================================================
6347 */
6348
6349static int
6350bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6351{
6352 bpobj_t *bpo = arg;
6353 bpobj_enqueue(bpo, bp, tx);
6354 return (0);
6355}
6356
6357static int
6358spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6359{
6360 zio_t *zio = arg;
6361
6362 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
6363 BP_GET_PSIZE(bp), zio->io_flags));
6364 return (0);
6365}
6366
6367/*
6368 * Note: this simple function is not inlined to make it easier to dtrace the
6369 * amount of time spent syncing frees.
6370 */
6371static void
6372spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
6373{
6374 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6375 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
6376 VERIFY(zio_wait(zio) == 0);
6377}
6378
6379/*
6380 * Note: this simple function is not inlined to make it easier to dtrace the
6381 * amount of time spent syncing deferred frees.
6382 */
6383static void
6384spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
6385{
6386 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6387 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
6388 spa_free_sync_cb, zio, tx), ==, 0);
6389 VERIFY0(zio_wait(zio));
6390}
6391
6392
6393static void
6394spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
6395{
6396 char *packed = NULL;
6397 size_t bufsize;
6398 size_t nvsize = 0;
6399 dmu_buf_t *db;
6400
6401 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
6402
6403 /*
6404 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
6405 * information. This avoids the dmu_buf_will_dirty() path and
6406 * saves us a pre-read to get data we don't actually care about.
6407 */
6408 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
6409 packed = kmem_alloc(bufsize, KM_SLEEP);
6410
6411 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
6412 KM_SLEEP) == 0);
6413 bzero(packed + nvsize, bufsize - nvsize);
6414
6415 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
6416
6417 kmem_free(packed, bufsize);
6418
6419 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
6420 dmu_buf_will_dirty(db, tx);
6421 *(uint64_t *)db->db_data = nvsize;
6422 dmu_buf_rele(db, FTAG);
6423}
6424
6425static void
6426spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
6427 const char *config, const char *entry)
6428{
6429 nvlist_t *nvroot;
6430 nvlist_t **list;
6431 int i;
6432
6433 if (!sav->sav_sync)
6434 return;
6435
6436 /*
6437 * Update the MOS nvlist describing the list of available devices.
6438 * spa_validate_aux() will have already made sure this nvlist is
6439 * valid and the vdevs are labeled appropriately.
6440 */
6441 if (sav->sav_object == 0) {
6442 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
6443 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
6444 sizeof (uint64_t), tx);
6445 VERIFY(zap_update(spa->spa_meta_objset,
6446 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
6447 &sav->sav_object, tx) == 0);
6448 }
6449
6450 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6451 if (sav->sav_count == 0) {
6452 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
6453 } else {
6454 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
6455 for (i = 0; i < sav->sav_count; i++)
6456 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
6457 B_FALSE, VDEV_CONFIG_L2CACHE);
6458 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6459 sav->sav_count) == 0);
6460 for (i = 0; i < sav->sav_count; i++)
6461 nvlist_free(list[i]);
6462 kmem_free(list, sav->sav_count * sizeof (void *));
6463 }
6464
6465 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6466 nvlist_free(nvroot);
6467
6468 sav->sav_sync = B_FALSE;
6469}
6470
6471/*
6472 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
6473 * The all-vdev ZAP must be empty.
6474 */
6475static void
6476spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
6477{
6478 spa_t *spa = vd->vdev_spa;
6479 if (vd->vdev_top_zap != 0) {
6480 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6481 vd->vdev_top_zap, tx));
6482 }
6483 if (vd->vdev_leaf_zap != 0) {
6484 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6485 vd->vdev_leaf_zap, tx));
6486 }
6487 for (uint64_t i = 0; i < vd->vdev_children; i++) {
6488 spa_avz_build(vd->vdev_child[i], avz, tx);
6489 }
6490}
6491
6492static void
6493spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6494{
6495 nvlist_t *config;
6496
6497 /*
6498 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
6499 * its config may not be dirty but we still need to build per-vdev ZAPs.
6500 * Similarly, if the pool is being assembled (e.g. after a split), we
6501 * need to rebuild the AVZ although the config may not be dirty.
6502 */
6503 if (list_is_empty(&spa->spa_config_dirty_list) &&
6504 spa->spa_avz_action == AVZ_ACTION_NONE)
6505 return;
6506
6507 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6508
6509 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
6510 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
6506 spa->spa_all_vdev_zaps != 0);
6507
6508 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
6509 /* Make and build the new AVZ */
6510 uint64_t new_avz = zap_create(spa->spa_meta_objset,
6511 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
6512 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
6513
6514 /* Diff old AVZ with new one */
6515 zap_cursor_t zc;
6516 zap_attribute_t za;
6517
6518 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6519 spa->spa_all_vdev_zaps);
6520 zap_cursor_retrieve(&zc, &za) == 0;
6521 zap_cursor_advance(&zc)) {
6522 uint64_t vdzap = za.za_first_integer;
6523 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
6524 vdzap) == ENOENT) {
6525 /*
6526 * ZAP is listed in old AVZ but not in new one;
6527 * destroy it
6528 */
6529 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
6530 tx));
6531 }
6532 }
6533
6534 zap_cursor_fini(&zc);
6535
6536 /* Destroy the old AVZ */
6537 VERIFY0(zap_destroy(spa->spa_meta_objset,
6538 spa->spa_all_vdev_zaps, tx));
6539
6540 /* Replace the old AVZ in the dir obj with the new one */
6541 VERIFY0(zap_update(spa->spa_meta_objset,
6542 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
6543 sizeof (new_avz), 1, &new_avz, tx));
6544
6545 spa->spa_all_vdev_zaps = new_avz;
6546 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
6547 zap_cursor_t zc;
6548 zap_attribute_t za;
6549
6550 /* Walk through the AVZ and destroy all listed ZAPs */
6551 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6552 spa->spa_all_vdev_zaps);
6553 zap_cursor_retrieve(&zc, &za) == 0;
6554 zap_cursor_advance(&zc)) {
6555 uint64_t zap = za.za_first_integer;
6556 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
6557 }
6558
6559 zap_cursor_fini(&zc);
6560
6561 /* Destroy and unlink the AVZ itself */
6562 VERIFY0(zap_destroy(spa->spa_meta_objset,
6563 spa->spa_all_vdev_zaps, tx));
6564 VERIFY0(zap_remove(spa->spa_meta_objset,
6565 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
6566 spa->spa_all_vdev_zaps = 0;
6567 }
6568
6569 if (spa->spa_all_vdev_zaps == 0) {
6570 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
6571 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
6572 DMU_POOL_VDEV_ZAP_MAP, tx);
6573 }
6574 spa->spa_avz_action = AVZ_ACTION_NONE;
6575
6576 /* Create ZAPs for vdevs that don't have them. */
6577 vdev_construct_zaps(spa->spa_root_vdev, tx);
6578
6579 config = spa_config_generate(spa, spa->spa_root_vdev,
6580 dmu_tx_get_txg(tx), B_FALSE);
6581
6582 /*
6583 * If we're upgrading the spa version then make sure that
6584 * the config object gets updated with the correct version.
6585 */
6586 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
6587 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6588 spa->spa_uberblock.ub_version);
6589
6590 spa_config_exit(spa, SCL_STATE, FTAG);
6591
6592 nvlist_free(spa->spa_config_syncing);
6593 spa->spa_config_syncing = config;
6594
6595 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
6596}
6597
6598static void
6599spa_sync_version(void *arg, dmu_tx_t *tx)
6600{
6601 uint64_t *versionp = arg;
6602 uint64_t version = *versionp;
6603 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6604
6605 /*
6606 * Setting the version is special cased when first creating the pool.
6607 */
6608 ASSERT(tx->tx_txg != TXG_INITIAL);
6609
6610 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
6611 ASSERT(version >= spa_version(spa));
6612
6613 spa->spa_uberblock.ub_version = version;
6614 vdev_config_dirty(spa->spa_root_vdev);
6615 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
6616}
6617
6618/*
6619 * Set zpool properties.
6620 */
6621static void
6622spa_sync_props(void *arg, dmu_tx_t *tx)
6623{
6624 nvlist_t *nvp = arg;
6625 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6626 objset_t *mos = spa->spa_meta_objset;
6627 nvpair_t *elem = NULL;
6628
6629 mutex_enter(&spa->spa_props_lock);
6630
6631 while ((elem = nvlist_next_nvpair(nvp, elem))) {
6632 uint64_t intval;
6633 char *strval, *fname;
6634 zpool_prop_t prop;
6635 const char *propname;
6636 zprop_type_t proptype;
6637 spa_feature_t fid;
6638
6639 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
6640 case ZPROP_INVAL:
6641 /*
6642 * We checked this earlier in spa_prop_validate().
6643 */
6644 ASSERT(zpool_prop_feature(nvpair_name(elem)));
6645
6646 fname = strchr(nvpair_name(elem), '@') + 1;
6647 VERIFY0(zfeature_lookup_name(fname, &fid));
6648
6649 spa_feature_enable(spa, fid, tx);
6650 spa_history_log_internal(spa, "set", tx,
6651 "%s=enabled", nvpair_name(elem));
6652 break;
6653
6654 case ZPOOL_PROP_VERSION:
6655 intval = fnvpair_value_uint64(elem);
6656 /*
6657 * The version is synced seperatly before other
6658 * properties and should be correct by now.
6659 */
6660 ASSERT3U(spa_version(spa), >=, intval);
6661 break;
6662
6663 case ZPOOL_PROP_ALTROOT:
6664 /*
6665 * 'altroot' is a non-persistent property. It should
6666 * have been set temporarily at creation or import time.
6667 */
6668 ASSERT(spa->spa_root != NULL);
6669 break;
6670
6671 case ZPOOL_PROP_READONLY:
6672 case ZPOOL_PROP_CACHEFILE:
6673 /*
6674 * 'readonly' and 'cachefile' are also non-persisitent
6675 * properties.
6676 */
6677 break;
6678 case ZPOOL_PROP_COMMENT:
6679 strval = fnvpair_value_string(elem);
6680 if (spa->spa_comment != NULL)
6681 spa_strfree(spa->spa_comment);
6682 spa->spa_comment = spa_strdup(strval);
6683 /*
6684 * We need to dirty the configuration on all the vdevs
6685 * so that their labels get updated. It's unnecessary
6686 * to do this for pool creation since the vdev's
6687 * configuratoin has already been dirtied.
6688 */
6689 if (tx->tx_txg != TXG_INITIAL)
6690 vdev_config_dirty(spa->spa_root_vdev);
6691 spa_history_log_internal(spa, "set", tx,
6692 "%s=%s", nvpair_name(elem), strval);
6693 break;
6694 default:
6695 /*
6696 * Set pool property values in the poolprops mos object.
6697 */
6698 if (spa->spa_pool_props_object == 0) {
6699 spa->spa_pool_props_object =
6700 zap_create_link(mos, DMU_OT_POOL_PROPS,
6701 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6702 tx);
6703 }
6704
6705 /* normalize the property name */
6706 propname = zpool_prop_to_name(prop);
6707 proptype = zpool_prop_get_type(prop);
6708
6709 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6710 ASSERT(proptype == PROP_TYPE_STRING);
6711 strval = fnvpair_value_string(elem);
6712 VERIFY0(zap_update(mos,
6713 spa->spa_pool_props_object, propname,
6714 1, strlen(strval) + 1, strval, tx));
6715 spa_history_log_internal(spa, "set", tx,
6716 "%s=%s", nvpair_name(elem), strval);
6717 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6718 intval = fnvpair_value_uint64(elem);
6719
6720 if (proptype == PROP_TYPE_INDEX) {
6721 const char *unused;
6722 VERIFY0(zpool_prop_index_to_string(
6723 prop, intval, &unused));
6724 }
6725 VERIFY0(zap_update(mos,
6726 spa->spa_pool_props_object, propname,
6727 8, 1, &intval, tx));
6728 spa_history_log_internal(spa, "set", tx,
6729 "%s=%lld", nvpair_name(elem), intval);
6730 } else {
6731 ASSERT(0); /* not allowed */
6732 }
6733
6734 switch (prop) {
6735 case ZPOOL_PROP_DELEGATION:
6736 spa->spa_delegation = intval;
6737 break;
6738 case ZPOOL_PROP_BOOTFS:
6739 spa->spa_bootfs = intval;
6740 break;
6741 case ZPOOL_PROP_FAILUREMODE:
6742 spa->spa_failmode = intval;
6743 break;
6744 case ZPOOL_PROP_AUTOEXPAND:
6745 spa->spa_autoexpand = intval;
6746 if (tx->tx_txg != TXG_INITIAL)
6747 spa_async_request(spa,
6748 SPA_ASYNC_AUTOEXPAND);
6749 break;
6750 case ZPOOL_PROP_DEDUPDITTO:
6751 spa->spa_dedup_ditto = intval;
6752 break;
6753 default:
6754 break;
6755 }
6756 }
6757
6758 }
6759
6760 mutex_exit(&spa->spa_props_lock);
6761}
6762
6763/*
6764 * Perform one-time upgrade on-disk changes. spa_version() does not
6765 * reflect the new version this txg, so there must be no changes this
6766 * txg to anything that the upgrade code depends on after it executes.
6767 * Therefore this must be called after dsl_pool_sync() does the sync
6768 * tasks.
6769 */
6770static void
6771spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6772{
6773 dsl_pool_t *dp = spa->spa_dsl_pool;
6774
6775 ASSERT(spa->spa_sync_pass == 1);
6776
6777 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6778
6779 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6780 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6781 dsl_pool_create_origin(dp, tx);
6782
6783 /* Keeping the origin open increases spa_minref */
6784 spa->spa_minref += 3;
6785 }
6786
6787 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6788 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6789 dsl_pool_upgrade_clones(dp, tx);
6790 }
6791
6792 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6793 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6794 dsl_pool_upgrade_dir_clones(dp, tx);
6795
6796 /* Keeping the freedir open increases spa_minref */
6797 spa->spa_minref += 3;
6798 }
6799
6800 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6801 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6802 spa_feature_create_zap_objects(spa, tx);
6803 }
6804
6805 /*
6806 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6807 * when possibility to use lz4 compression for metadata was added
6808 * Old pools that have this feature enabled must be upgraded to have
6809 * this feature active
6810 */
6811 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6812 boolean_t lz4_en = spa_feature_is_enabled(spa,
6813 SPA_FEATURE_LZ4_COMPRESS);
6814 boolean_t lz4_ac = spa_feature_is_active(spa,
6815 SPA_FEATURE_LZ4_COMPRESS);
6816
6817 if (lz4_en && !lz4_ac)
6818 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6819 }
6820
6821 /*
6822 * If we haven't written the salt, do so now. Note that the
6823 * feature may not be activated yet, but that's fine since
6824 * the presence of this ZAP entry is backwards compatible.
6825 */
6826 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
6827 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
6828 VERIFY0(zap_add(spa->spa_meta_objset,
6829 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
6830 sizeof (spa->spa_cksum_salt.zcs_bytes),
6831 spa->spa_cksum_salt.zcs_bytes, tx));
6832 }
6833
6834 rrw_exit(&dp->dp_config_rwlock, FTAG);
6835}
6836
6837/*
6838 * Sync the specified transaction group. New blocks may be dirtied as
6839 * part of the process, so we iterate until it converges.
6840 */
6841void
6842spa_sync(spa_t *spa, uint64_t txg)
6843{
6844 dsl_pool_t *dp = spa->spa_dsl_pool;
6845 objset_t *mos = spa->spa_meta_objset;
6846 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
6847 vdev_t *rvd = spa->spa_root_vdev;
6848 vdev_t *vd;
6849 dmu_tx_t *tx;
6850 int error;
6851 uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
6852 zfs_vdev_queue_depth_pct / 100;
6853
6854 VERIFY(spa_writeable(spa));
6855
6856 /*
6857 * Lock out configuration changes.
6858 */
6859 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6860
6861 spa->spa_syncing_txg = txg;
6862 spa->spa_sync_pass = 0;
6863
6864 mutex_enter(&spa->spa_alloc_lock);
6865 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
6866 mutex_exit(&spa->spa_alloc_lock);
6867
6868 /*
6869 * If there are any pending vdev state changes, convert them
6870 * into config changes that go out with this transaction group.
6871 */
6872 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6873 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6874 /*
6875 * We need the write lock here because, for aux vdevs,
6876 * calling vdev_config_dirty() modifies sav_config.
6877 * This is ugly and will become unnecessary when we
6878 * eliminate the aux vdev wart by integrating all vdevs
6879 * into the root vdev tree.
6880 */
6881 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6882 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6883 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6884 vdev_state_clean(vd);
6885 vdev_config_dirty(vd);
6886 }
6887 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6888 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6889 }
6890 spa_config_exit(spa, SCL_STATE, FTAG);
6891
6892 tx = dmu_tx_create_assigned(dp, txg);
6893
6894 spa->spa_sync_starttime = gethrtime();
6895#ifdef illumos
6896 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
6897 spa->spa_sync_starttime + spa->spa_deadman_synctime));
6898#else /* !illumos */
6899#ifdef _KERNEL
6900 callout_schedule(&spa->spa_deadman_cycid,
6901 hz * spa->spa_deadman_synctime / NANOSEC);
6902#endif
6903#endif /* illumos */
6904
6905 /*
6906 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6907 * set spa_deflate if we have no raid-z vdevs.
6908 */
6909 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6910 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6911 int i;
6912
6913 for (i = 0; i < rvd->vdev_children; i++) {
6914 vd = rvd->vdev_child[i];
6915 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6916 break;
6917 }
6918 if (i == rvd->vdev_children) {
6919 spa->spa_deflate = TRUE;
6920 VERIFY(0 == zap_add(spa->spa_meta_objset,
6921 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6922 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6923 }
6924 }
6925
6926 /*
6927 * Set the top-level vdev's max queue depth. Evaluate each
6928 * top-level's async write queue depth in case it changed.
6929 * The max queue depth will not change in the middle of syncing
6930 * out this txg.
6931 */
6932 uint64_t queue_depth_total = 0;
6933 for (int c = 0; c < rvd->vdev_children; c++) {
6934 vdev_t *tvd = rvd->vdev_child[c];
6935 metaslab_group_t *mg = tvd->vdev_mg;
6936
6937 if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
6938 !metaslab_group_initialized(mg))
6939 continue;
6940
6941 /*
6942 * It is safe to do a lock-free check here because only async
6943 * allocations look at mg_max_alloc_queue_depth, and async
6944 * allocations all happen from spa_sync().
6945 */
6946 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
6947 mg->mg_max_alloc_queue_depth = max_queue_depth;
6948 queue_depth_total += mg->mg_max_alloc_queue_depth;
6949 }
6950 metaslab_class_t *mc = spa_normal_class(spa);
6951 ASSERT0(refcount_count(&mc->mc_alloc_slots));
6952 mc->mc_alloc_max_slots = queue_depth_total;
6953 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
6954
6955 ASSERT3U(mc->mc_alloc_max_slots, <=,
6956 max_queue_depth * rvd->vdev_children);
6957
6958 /*
6959 * Iterate to convergence.
6960 */
6961 do {
6962 int pass = ++spa->spa_sync_pass;
6963
6964 spa_sync_config_object(spa, tx);
6965 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6966 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6967 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6968 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6969 spa_errlog_sync(spa, txg);
6970 dsl_pool_sync(dp, txg);
6971
6972 if (pass < zfs_sync_pass_deferred_free) {
6973 spa_sync_frees(spa, free_bpl, tx);
6974 } else {
6975 /*
6976 * We can not defer frees in pass 1, because
6977 * we sync the deferred frees later in pass 1.
6978 */
6979 ASSERT3U(pass, >, 1);
6980 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6981 &spa->spa_deferred_bpobj, tx);
6982 }
6983
6984 ddt_sync(spa, txg);
6985 dsl_scan_sync(dp, tx);
6986
6987 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
6988 vdev_sync(vd, txg);
6989
6990 if (pass == 1) {
6991 spa_sync_upgrades(spa, tx);
6992 ASSERT3U(txg, >=,
6993 spa->spa_uberblock.ub_rootbp.blk_birth);
6994 /*
6995 * Note: We need to check if the MOS is dirty
6996 * because we could have marked the MOS dirty
6997 * without updating the uberblock (e.g. if we
6998 * have sync tasks but no dirty user data). We
6999 * need to check the uberblock's rootbp because
7000 * it is updated if we have synced out dirty
7001 * data (though in this case the MOS will most
7002 * likely also be dirty due to second order
7003 * effects, we don't want to rely on that here).
7004 */
7005 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
7006 !dmu_objset_is_dirty(mos, txg)) {
7007 /*
7008 * Nothing changed on the first pass,
7009 * therefore this TXG is a no-op. Avoid
7010 * syncing deferred frees, so that we
7011 * can keep this TXG as a no-op.
7012 */
7013 ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
7014 txg));
7015 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7016 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
7017 break;
7018 }
7019 spa_sync_deferred_frees(spa, tx);
7020 }
7021
7022 } while (dmu_objset_is_dirty(mos, txg));
7023
7024 if (!list_is_empty(&spa->spa_config_dirty_list)) {
7025 /*
7026 * Make sure that the number of ZAPs for all the vdevs matches
7027 * the number of ZAPs in the per-vdev ZAP list. This only gets
7028 * called if the config is dirty; otherwise there may be
7029 * outstanding AVZ operations that weren't completed in
7030 * spa_sync_config_object.
7031 */
7032 uint64_t all_vdev_zap_entry_count;
7033 ASSERT0(zap_count(spa->spa_meta_objset,
7034 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
7035 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
7036 all_vdev_zap_entry_count);
7037 }
7038
7039 /*
7040 * Rewrite the vdev configuration (which includes the uberblock)
7041 * to commit the transaction group.
7042 *
7043 * If there are no dirty vdevs, we sync the uberblock to a few
7044 * random top-level vdevs that are known to be visible in the
7045 * config cache (see spa_vdev_add() for a complete description).
7046 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
7047 */
7048 for (;;) {
7049 /*
7050 * We hold SCL_STATE to prevent vdev open/close/etc.
7051 * while we're attempting to write the vdev labels.
7052 */
7053 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7054
7055 if (list_is_empty(&spa->spa_config_dirty_list)) {
7056 vdev_t *svd[SPA_DVAS_PER_BP];
7057 int svdcount = 0;
7058 int children = rvd->vdev_children;
7059 int c0 = spa_get_random(children);
7060
7061 for (int c = 0; c < children; c++) {
7062 vd = rvd->vdev_child[(c0 + c) % children];
7063 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
7064 continue;
7065 svd[svdcount++] = vd;
7066 if (svdcount == SPA_DVAS_PER_BP)
7067 break;
7068 }
7069 error = vdev_config_sync(svd, svdcount, txg);
7070 } else {
7071 error = vdev_config_sync(rvd->vdev_child,
7072 rvd->vdev_children, txg);
7073 }
7074
7075 if (error == 0)
7076 spa->spa_last_synced_guid = rvd->vdev_guid;
7077
7078 spa_config_exit(spa, SCL_STATE, FTAG);
7079
7080 if (error == 0)
7081 break;
7082 zio_suspend(spa, NULL);
7083 zio_resume_wait(spa);
7084 }
7085 dmu_tx_commit(tx);
7086
7087#ifdef illumos
7088 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
7089#else /* !illumos */
7090#ifdef _KERNEL
7091 callout_drain(&spa->spa_deadman_cycid);
7092#endif
7093#endif /* illumos */
7094
7095 /*
7096 * Clear the dirty config list.
7097 */
7098 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
7099 vdev_config_clean(vd);
7100
7101 /*
7102 * Now that the new config has synced transactionally,
7103 * let it become visible to the config cache.
7104 */
7105 if (spa->spa_config_syncing != NULL) {
7106 spa_config_set(spa, spa->spa_config_syncing);
7107 spa->spa_config_txg = txg;
7108 spa->spa_config_syncing = NULL;
7109 }
7110
7111 dsl_pool_sync_done(dp, txg);
7112
7113 mutex_enter(&spa->spa_alloc_lock);
7114 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7115 mutex_exit(&spa->spa_alloc_lock);
7116
7117 /*
7118 * Update usable space statistics.
7119 */
7120 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
7121 vdev_sync_done(vd, txg);
7122
7123 spa_update_dspace(spa);
7124
7125 /*
7126 * It had better be the case that we didn't dirty anything
7127 * since vdev_config_sync().
7128 */
7129 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
7130 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7131 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
7132
7133 spa->spa_sync_pass = 0;
7134
7135 /*
7136 * Update the last synced uberblock here. We want to do this at
7137 * the end of spa_sync() so that consumers of spa_last_synced_txg()
7138 * will be guaranteed that all the processing associated with
7139 * that txg has been completed.
7140 */
7141 spa->spa_ubsync = spa->spa_uberblock;
7142 spa_config_exit(spa, SCL_CONFIG, FTAG);
7143
7144 spa_handle_ignored_writes(spa);
7145
7146 /*
7147 * If any async tasks have been requested, kick them off.
7148 */
7149 spa_async_dispatch(spa);
7150 spa_async_dispatch_vd(spa);
7151}
7152
7153/*
7154 * Sync all pools. We don't want to hold the namespace lock across these
7155 * operations, so we take a reference on the spa_t and drop the lock during the
7156 * sync.
7157 */
7158void
7159spa_sync_allpools(void)
7160{
7161 spa_t *spa = NULL;
7162 mutex_enter(&spa_namespace_lock);
7163 while ((spa = spa_next(spa)) != NULL) {
7164 if (spa_state(spa) != POOL_STATE_ACTIVE ||
7165 !spa_writeable(spa) || spa_suspended(spa))
7166 continue;
7167 spa_open_ref(spa, FTAG);
7168 mutex_exit(&spa_namespace_lock);
7169 txg_wait_synced(spa_get_dsl(spa), 0);
7170 mutex_enter(&spa_namespace_lock);
7171 spa_close(spa, FTAG);
7172 }
7173 mutex_exit(&spa_namespace_lock);
7174}
7175
7176/*
7177 * ==========================================================================
7178 * Miscellaneous routines
7179 * ==========================================================================
7180 */
7181
7182/*
7183 * Remove all pools in the system.
7184 */
7185void
7186spa_evict_all(void)
7187{
7188 spa_t *spa;
7189
7190 /*
7191 * Remove all cached state. All pools should be closed now,
7192 * so every spa in the AVL tree should be unreferenced.
7193 */
7194 mutex_enter(&spa_namespace_lock);
7195 while ((spa = spa_next(NULL)) != NULL) {
7196 /*
7197 * Stop async tasks. The async thread may need to detach
7198 * a device that's been replaced, which requires grabbing
7199 * spa_namespace_lock, so we must drop it here.
7200 */
7201 spa_open_ref(spa, FTAG);
7202 mutex_exit(&spa_namespace_lock);
7203 spa_async_suspend(spa);
7204 mutex_enter(&spa_namespace_lock);
7205 spa_close(spa, FTAG);
7206
7207 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7208 spa_unload(spa);
7209 spa_deactivate(spa);
7210 }
7211 spa_remove(spa);
7212 }
7213 mutex_exit(&spa_namespace_lock);
7214}
7215
7216vdev_t *
7217spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
7218{
7219 vdev_t *vd;
7220 int i;
7221
7222 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
7223 return (vd);
7224
7225 if (aux) {
7226 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
7227 vd = spa->spa_l2cache.sav_vdevs[i];
7228 if (vd->vdev_guid == guid)
7229 return (vd);
7230 }
7231
7232 for (i = 0; i < spa->spa_spares.sav_count; i++) {
7233 vd = spa->spa_spares.sav_vdevs[i];
7234 if (vd->vdev_guid == guid)
7235 return (vd);
7236 }
7237 }
7238
7239 return (NULL);
7240}
7241
7242void
7243spa_upgrade(spa_t *spa, uint64_t version)
7244{
7245 ASSERT(spa_writeable(spa));
7246
7247 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7248
7249 /*
7250 * This should only be called for a non-faulted pool, and since a
7251 * future version would result in an unopenable pool, this shouldn't be
7252 * possible.
7253 */
7254 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
7255 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
7256
7257 spa->spa_uberblock.ub_version = version;
7258 vdev_config_dirty(spa->spa_root_vdev);
7259
7260 spa_config_exit(spa, SCL_ALL, FTAG);
7261
7262 txg_wait_synced(spa_get_dsl(spa), 0);
7263}
7264
7265boolean_t
7266spa_has_spare(spa_t *spa, uint64_t guid)
7267{
7268 int i;
7269 uint64_t spareguid;
7270 spa_aux_vdev_t *sav = &spa->spa_spares;
7271
7272 for (i = 0; i < sav->sav_count; i++)
7273 if (sav->sav_vdevs[i]->vdev_guid == guid)
7274 return (B_TRUE);
7275
7276 for (i = 0; i < sav->sav_npending; i++) {
7277 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
7278 &spareguid) == 0 && spareguid == guid)
7279 return (B_TRUE);
7280 }
7281
7282 return (B_FALSE);
7283}
7284
7285/*
7286 * Check if a pool has an active shared spare device.
7287 * Note: reference count of an active spare is 2, as a spare and as a replace
7288 */
7289static boolean_t
7290spa_has_active_shared_spare(spa_t *spa)
7291{
7292 int i, refcnt;
7293 uint64_t pool;
7294 spa_aux_vdev_t *sav = &spa->spa_spares;
7295
7296 for (i = 0; i < sav->sav_count; i++) {
7297 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
7298 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
7299 refcnt > 2)
7300 return (B_TRUE);
7301 }
7302
7303 return (B_FALSE);
7304}
7305
7306static sysevent_t *
7307spa_event_create(spa_t *spa, vdev_t *vd, const char *name)
7308{
7309 sysevent_t *ev = NULL;
7310#ifdef _KERNEL
7311 sysevent_attr_list_t *attr = NULL;
7312 sysevent_value_t value;
7313
7314 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
7315 SE_SLEEP);
7316 ASSERT(ev != NULL);
7317
7318 value.value_type = SE_DATA_TYPE_STRING;
7319 value.value.sv_string = spa_name(spa);
7320 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
7321 goto done;
7322
7323 value.value_type = SE_DATA_TYPE_UINT64;
7324 value.value.sv_uint64 = spa_guid(spa);
7325 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
7326 goto done;
7327
7328 if (vd) {
7329 value.value_type = SE_DATA_TYPE_UINT64;
7330 value.value.sv_uint64 = vd->vdev_guid;
7331 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
7332 SE_SLEEP) != 0)
7333 goto done;
7334
7335 if (vd->vdev_path) {
7336 value.value_type = SE_DATA_TYPE_STRING;
7337 value.value.sv_string = vd->vdev_path;
7338 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
7339 &value, SE_SLEEP) != 0)
7340 goto done;
7341 }
7342 }
7343
7344 if (sysevent_attach_attributes(ev, attr) != 0)
7345 goto done;
7346 attr = NULL;
7347
7348done:
7349 if (attr)
7350 sysevent_free_attr(attr);
7351
7352#endif
7353 return (ev);
7354}
7355
7356static void
7357spa_event_post(sysevent_t *ev)
7358{
7359#ifdef _KERNEL
7360 sysevent_id_t eid;
7361
7362 (void) log_sysevent(ev, SE_SLEEP, &eid);
7363 sysevent_free(ev);
7364#endif
7365}
7366
7367/*
7368 * Post a sysevent corresponding to the given event. The 'name' must be one of
7369 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
7370 * filled in from the spa and (optionally) the vdev. This doesn't do anything
7371 * in the userland libzpool, as we don't want consumers to misinterpret ztest
7372 * or zdb as real changes.
7373 */
7374void
7375spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
7376{
7377 spa_event_post(spa_event_create(spa, vd, name));
7378}
6511 spa->spa_all_vdev_zaps != 0);
6512
6513 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
6514 /* Make and build the new AVZ */
6515 uint64_t new_avz = zap_create(spa->spa_meta_objset,
6516 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
6517 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
6518
6519 /* Diff old AVZ with new one */
6520 zap_cursor_t zc;
6521 zap_attribute_t za;
6522
6523 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6524 spa->spa_all_vdev_zaps);
6525 zap_cursor_retrieve(&zc, &za) == 0;
6526 zap_cursor_advance(&zc)) {
6527 uint64_t vdzap = za.za_first_integer;
6528 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
6529 vdzap) == ENOENT) {
6530 /*
6531 * ZAP is listed in old AVZ but not in new one;
6532 * destroy it
6533 */
6534 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
6535 tx));
6536 }
6537 }
6538
6539 zap_cursor_fini(&zc);
6540
6541 /* Destroy the old AVZ */
6542 VERIFY0(zap_destroy(spa->spa_meta_objset,
6543 spa->spa_all_vdev_zaps, tx));
6544
6545 /* Replace the old AVZ in the dir obj with the new one */
6546 VERIFY0(zap_update(spa->spa_meta_objset,
6547 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
6548 sizeof (new_avz), 1, &new_avz, tx));
6549
6550 spa->spa_all_vdev_zaps = new_avz;
6551 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
6552 zap_cursor_t zc;
6553 zap_attribute_t za;
6554
6555 /* Walk through the AVZ and destroy all listed ZAPs */
6556 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6557 spa->spa_all_vdev_zaps);
6558 zap_cursor_retrieve(&zc, &za) == 0;
6559 zap_cursor_advance(&zc)) {
6560 uint64_t zap = za.za_first_integer;
6561 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
6562 }
6563
6564 zap_cursor_fini(&zc);
6565
6566 /* Destroy and unlink the AVZ itself */
6567 VERIFY0(zap_destroy(spa->spa_meta_objset,
6568 spa->spa_all_vdev_zaps, tx));
6569 VERIFY0(zap_remove(spa->spa_meta_objset,
6570 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
6571 spa->spa_all_vdev_zaps = 0;
6572 }
6573
6574 if (spa->spa_all_vdev_zaps == 0) {
6575 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
6576 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
6577 DMU_POOL_VDEV_ZAP_MAP, tx);
6578 }
6579 spa->spa_avz_action = AVZ_ACTION_NONE;
6580
6581 /* Create ZAPs for vdevs that don't have them. */
6582 vdev_construct_zaps(spa->spa_root_vdev, tx);
6583
6584 config = spa_config_generate(spa, spa->spa_root_vdev,
6585 dmu_tx_get_txg(tx), B_FALSE);
6586
6587 /*
6588 * If we're upgrading the spa version then make sure that
6589 * the config object gets updated with the correct version.
6590 */
6591 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
6592 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6593 spa->spa_uberblock.ub_version);
6594
6595 spa_config_exit(spa, SCL_STATE, FTAG);
6596
6597 nvlist_free(spa->spa_config_syncing);
6598 spa->spa_config_syncing = config;
6599
6600 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
6601}
6602
6603static void
6604spa_sync_version(void *arg, dmu_tx_t *tx)
6605{
6606 uint64_t *versionp = arg;
6607 uint64_t version = *versionp;
6608 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6609
6610 /*
6611 * Setting the version is special cased when first creating the pool.
6612 */
6613 ASSERT(tx->tx_txg != TXG_INITIAL);
6614
6615 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
6616 ASSERT(version >= spa_version(spa));
6617
6618 spa->spa_uberblock.ub_version = version;
6619 vdev_config_dirty(spa->spa_root_vdev);
6620 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
6621}
6622
6623/*
6624 * Set zpool properties.
6625 */
6626static void
6627spa_sync_props(void *arg, dmu_tx_t *tx)
6628{
6629 nvlist_t *nvp = arg;
6630 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
6631 objset_t *mos = spa->spa_meta_objset;
6632 nvpair_t *elem = NULL;
6633
6634 mutex_enter(&spa->spa_props_lock);
6635
6636 while ((elem = nvlist_next_nvpair(nvp, elem))) {
6637 uint64_t intval;
6638 char *strval, *fname;
6639 zpool_prop_t prop;
6640 const char *propname;
6641 zprop_type_t proptype;
6642 spa_feature_t fid;
6643
6644 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
6645 case ZPROP_INVAL:
6646 /*
6647 * We checked this earlier in spa_prop_validate().
6648 */
6649 ASSERT(zpool_prop_feature(nvpair_name(elem)));
6650
6651 fname = strchr(nvpair_name(elem), '@') + 1;
6652 VERIFY0(zfeature_lookup_name(fname, &fid));
6653
6654 spa_feature_enable(spa, fid, tx);
6655 spa_history_log_internal(spa, "set", tx,
6656 "%s=enabled", nvpair_name(elem));
6657 break;
6658
6659 case ZPOOL_PROP_VERSION:
6660 intval = fnvpair_value_uint64(elem);
6661 /*
6662 * The version is synced seperatly before other
6663 * properties and should be correct by now.
6664 */
6665 ASSERT3U(spa_version(spa), >=, intval);
6666 break;
6667
6668 case ZPOOL_PROP_ALTROOT:
6669 /*
6670 * 'altroot' is a non-persistent property. It should
6671 * have been set temporarily at creation or import time.
6672 */
6673 ASSERT(spa->spa_root != NULL);
6674 break;
6675
6676 case ZPOOL_PROP_READONLY:
6677 case ZPOOL_PROP_CACHEFILE:
6678 /*
6679 * 'readonly' and 'cachefile' are also non-persisitent
6680 * properties.
6681 */
6682 break;
6683 case ZPOOL_PROP_COMMENT:
6684 strval = fnvpair_value_string(elem);
6685 if (spa->spa_comment != NULL)
6686 spa_strfree(spa->spa_comment);
6687 spa->spa_comment = spa_strdup(strval);
6688 /*
6689 * We need to dirty the configuration on all the vdevs
6690 * so that their labels get updated. It's unnecessary
6691 * to do this for pool creation since the vdev's
6692 * configuratoin has already been dirtied.
6693 */
6694 if (tx->tx_txg != TXG_INITIAL)
6695 vdev_config_dirty(spa->spa_root_vdev);
6696 spa_history_log_internal(spa, "set", tx,
6697 "%s=%s", nvpair_name(elem), strval);
6698 break;
6699 default:
6700 /*
6701 * Set pool property values in the poolprops mos object.
6702 */
6703 if (spa->spa_pool_props_object == 0) {
6704 spa->spa_pool_props_object =
6705 zap_create_link(mos, DMU_OT_POOL_PROPS,
6706 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6707 tx);
6708 }
6709
6710 /* normalize the property name */
6711 propname = zpool_prop_to_name(prop);
6712 proptype = zpool_prop_get_type(prop);
6713
6714 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6715 ASSERT(proptype == PROP_TYPE_STRING);
6716 strval = fnvpair_value_string(elem);
6717 VERIFY0(zap_update(mos,
6718 spa->spa_pool_props_object, propname,
6719 1, strlen(strval) + 1, strval, tx));
6720 spa_history_log_internal(spa, "set", tx,
6721 "%s=%s", nvpair_name(elem), strval);
6722 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6723 intval = fnvpair_value_uint64(elem);
6724
6725 if (proptype == PROP_TYPE_INDEX) {
6726 const char *unused;
6727 VERIFY0(zpool_prop_index_to_string(
6728 prop, intval, &unused));
6729 }
6730 VERIFY0(zap_update(mos,
6731 spa->spa_pool_props_object, propname,
6732 8, 1, &intval, tx));
6733 spa_history_log_internal(spa, "set", tx,
6734 "%s=%lld", nvpair_name(elem), intval);
6735 } else {
6736 ASSERT(0); /* not allowed */
6737 }
6738
6739 switch (prop) {
6740 case ZPOOL_PROP_DELEGATION:
6741 spa->spa_delegation = intval;
6742 break;
6743 case ZPOOL_PROP_BOOTFS:
6744 spa->spa_bootfs = intval;
6745 break;
6746 case ZPOOL_PROP_FAILUREMODE:
6747 spa->spa_failmode = intval;
6748 break;
6749 case ZPOOL_PROP_AUTOEXPAND:
6750 spa->spa_autoexpand = intval;
6751 if (tx->tx_txg != TXG_INITIAL)
6752 spa_async_request(spa,
6753 SPA_ASYNC_AUTOEXPAND);
6754 break;
6755 case ZPOOL_PROP_DEDUPDITTO:
6756 spa->spa_dedup_ditto = intval;
6757 break;
6758 default:
6759 break;
6760 }
6761 }
6762
6763 }
6764
6765 mutex_exit(&spa->spa_props_lock);
6766}
6767
6768/*
6769 * Perform one-time upgrade on-disk changes. spa_version() does not
6770 * reflect the new version this txg, so there must be no changes this
6771 * txg to anything that the upgrade code depends on after it executes.
6772 * Therefore this must be called after dsl_pool_sync() does the sync
6773 * tasks.
6774 */
6775static void
6776spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6777{
6778 dsl_pool_t *dp = spa->spa_dsl_pool;
6779
6780 ASSERT(spa->spa_sync_pass == 1);
6781
6782 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6783
6784 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6785 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6786 dsl_pool_create_origin(dp, tx);
6787
6788 /* Keeping the origin open increases spa_minref */
6789 spa->spa_minref += 3;
6790 }
6791
6792 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6793 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6794 dsl_pool_upgrade_clones(dp, tx);
6795 }
6796
6797 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6798 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6799 dsl_pool_upgrade_dir_clones(dp, tx);
6800
6801 /* Keeping the freedir open increases spa_minref */
6802 spa->spa_minref += 3;
6803 }
6804
6805 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6806 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6807 spa_feature_create_zap_objects(spa, tx);
6808 }
6809
6810 /*
6811 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6812 * when possibility to use lz4 compression for metadata was added
6813 * Old pools that have this feature enabled must be upgraded to have
6814 * this feature active
6815 */
6816 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6817 boolean_t lz4_en = spa_feature_is_enabled(spa,
6818 SPA_FEATURE_LZ4_COMPRESS);
6819 boolean_t lz4_ac = spa_feature_is_active(spa,
6820 SPA_FEATURE_LZ4_COMPRESS);
6821
6822 if (lz4_en && !lz4_ac)
6823 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6824 }
6825
6826 /*
6827 * If we haven't written the salt, do so now. Note that the
6828 * feature may not be activated yet, but that's fine since
6829 * the presence of this ZAP entry is backwards compatible.
6830 */
6831 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
6832 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
6833 VERIFY0(zap_add(spa->spa_meta_objset,
6834 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
6835 sizeof (spa->spa_cksum_salt.zcs_bytes),
6836 spa->spa_cksum_salt.zcs_bytes, tx));
6837 }
6838
6839 rrw_exit(&dp->dp_config_rwlock, FTAG);
6840}
6841
6842/*
6843 * Sync the specified transaction group. New blocks may be dirtied as
6844 * part of the process, so we iterate until it converges.
6845 */
6846void
6847spa_sync(spa_t *spa, uint64_t txg)
6848{
6849 dsl_pool_t *dp = spa->spa_dsl_pool;
6850 objset_t *mos = spa->spa_meta_objset;
6851 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
6852 vdev_t *rvd = spa->spa_root_vdev;
6853 vdev_t *vd;
6854 dmu_tx_t *tx;
6855 int error;
6856 uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
6857 zfs_vdev_queue_depth_pct / 100;
6858
6859 VERIFY(spa_writeable(spa));
6860
6861 /*
6862 * Lock out configuration changes.
6863 */
6864 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6865
6866 spa->spa_syncing_txg = txg;
6867 spa->spa_sync_pass = 0;
6868
6869 mutex_enter(&spa->spa_alloc_lock);
6870 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
6871 mutex_exit(&spa->spa_alloc_lock);
6872
6873 /*
6874 * If there are any pending vdev state changes, convert them
6875 * into config changes that go out with this transaction group.
6876 */
6877 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6878 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6879 /*
6880 * We need the write lock here because, for aux vdevs,
6881 * calling vdev_config_dirty() modifies sav_config.
6882 * This is ugly and will become unnecessary when we
6883 * eliminate the aux vdev wart by integrating all vdevs
6884 * into the root vdev tree.
6885 */
6886 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6887 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6888 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6889 vdev_state_clean(vd);
6890 vdev_config_dirty(vd);
6891 }
6892 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6893 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6894 }
6895 spa_config_exit(spa, SCL_STATE, FTAG);
6896
6897 tx = dmu_tx_create_assigned(dp, txg);
6898
6899 spa->spa_sync_starttime = gethrtime();
6900#ifdef illumos
6901 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
6902 spa->spa_sync_starttime + spa->spa_deadman_synctime));
6903#else /* !illumos */
6904#ifdef _KERNEL
6905 callout_schedule(&spa->spa_deadman_cycid,
6906 hz * spa->spa_deadman_synctime / NANOSEC);
6907#endif
6908#endif /* illumos */
6909
6910 /*
6911 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6912 * set spa_deflate if we have no raid-z vdevs.
6913 */
6914 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6915 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6916 int i;
6917
6918 for (i = 0; i < rvd->vdev_children; i++) {
6919 vd = rvd->vdev_child[i];
6920 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6921 break;
6922 }
6923 if (i == rvd->vdev_children) {
6924 spa->spa_deflate = TRUE;
6925 VERIFY(0 == zap_add(spa->spa_meta_objset,
6926 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6927 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6928 }
6929 }
6930
6931 /*
6932 * Set the top-level vdev's max queue depth. Evaluate each
6933 * top-level's async write queue depth in case it changed.
6934 * The max queue depth will not change in the middle of syncing
6935 * out this txg.
6936 */
6937 uint64_t queue_depth_total = 0;
6938 for (int c = 0; c < rvd->vdev_children; c++) {
6939 vdev_t *tvd = rvd->vdev_child[c];
6940 metaslab_group_t *mg = tvd->vdev_mg;
6941
6942 if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
6943 !metaslab_group_initialized(mg))
6944 continue;
6945
6946 /*
6947 * It is safe to do a lock-free check here because only async
6948 * allocations look at mg_max_alloc_queue_depth, and async
6949 * allocations all happen from spa_sync().
6950 */
6951 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
6952 mg->mg_max_alloc_queue_depth = max_queue_depth;
6953 queue_depth_total += mg->mg_max_alloc_queue_depth;
6954 }
6955 metaslab_class_t *mc = spa_normal_class(spa);
6956 ASSERT0(refcount_count(&mc->mc_alloc_slots));
6957 mc->mc_alloc_max_slots = queue_depth_total;
6958 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
6959
6960 ASSERT3U(mc->mc_alloc_max_slots, <=,
6961 max_queue_depth * rvd->vdev_children);
6962
6963 /*
6964 * Iterate to convergence.
6965 */
6966 do {
6967 int pass = ++spa->spa_sync_pass;
6968
6969 spa_sync_config_object(spa, tx);
6970 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6971 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6972 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6973 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6974 spa_errlog_sync(spa, txg);
6975 dsl_pool_sync(dp, txg);
6976
6977 if (pass < zfs_sync_pass_deferred_free) {
6978 spa_sync_frees(spa, free_bpl, tx);
6979 } else {
6980 /*
6981 * We can not defer frees in pass 1, because
6982 * we sync the deferred frees later in pass 1.
6983 */
6984 ASSERT3U(pass, >, 1);
6985 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6986 &spa->spa_deferred_bpobj, tx);
6987 }
6988
6989 ddt_sync(spa, txg);
6990 dsl_scan_sync(dp, tx);
6991
6992 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
6993 vdev_sync(vd, txg);
6994
6995 if (pass == 1) {
6996 spa_sync_upgrades(spa, tx);
6997 ASSERT3U(txg, >=,
6998 spa->spa_uberblock.ub_rootbp.blk_birth);
6999 /*
7000 * Note: We need to check if the MOS is dirty
7001 * because we could have marked the MOS dirty
7002 * without updating the uberblock (e.g. if we
7003 * have sync tasks but no dirty user data). We
7004 * need to check the uberblock's rootbp because
7005 * it is updated if we have synced out dirty
7006 * data (though in this case the MOS will most
7007 * likely also be dirty due to second order
7008 * effects, we don't want to rely on that here).
7009 */
7010 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
7011 !dmu_objset_is_dirty(mos, txg)) {
7012 /*
7013 * Nothing changed on the first pass,
7014 * therefore this TXG is a no-op. Avoid
7015 * syncing deferred frees, so that we
7016 * can keep this TXG as a no-op.
7017 */
7018 ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
7019 txg));
7020 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7021 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
7022 break;
7023 }
7024 spa_sync_deferred_frees(spa, tx);
7025 }
7026
7027 } while (dmu_objset_is_dirty(mos, txg));
7028
7029 if (!list_is_empty(&spa->spa_config_dirty_list)) {
7030 /*
7031 * Make sure that the number of ZAPs for all the vdevs matches
7032 * the number of ZAPs in the per-vdev ZAP list. This only gets
7033 * called if the config is dirty; otherwise there may be
7034 * outstanding AVZ operations that weren't completed in
7035 * spa_sync_config_object.
7036 */
7037 uint64_t all_vdev_zap_entry_count;
7038 ASSERT0(zap_count(spa->spa_meta_objset,
7039 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
7040 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
7041 all_vdev_zap_entry_count);
7042 }
7043
7044 /*
7045 * Rewrite the vdev configuration (which includes the uberblock)
7046 * to commit the transaction group.
7047 *
7048 * If there are no dirty vdevs, we sync the uberblock to a few
7049 * random top-level vdevs that are known to be visible in the
7050 * config cache (see spa_vdev_add() for a complete description).
7051 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
7052 */
7053 for (;;) {
7054 /*
7055 * We hold SCL_STATE to prevent vdev open/close/etc.
7056 * while we're attempting to write the vdev labels.
7057 */
7058 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7059
7060 if (list_is_empty(&spa->spa_config_dirty_list)) {
7061 vdev_t *svd[SPA_DVAS_PER_BP];
7062 int svdcount = 0;
7063 int children = rvd->vdev_children;
7064 int c0 = spa_get_random(children);
7065
7066 for (int c = 0; c < children; c++) {
7067 vd = rvd->vdev_child[(c0 + c) % children];
7068 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
7069 continue;
7070 svd[svdcount++] = vd;
7071 if (svdcount == SPA_DVAS_PER_BP)
7072 break;
7073 }
7074 error = vdev_config_sync(svd, svdcount, txg);
7075 } else {
7076 error = vdev_config_sync(rvd->vdev_child,
7077 rvd->vdev_children, txg);
7078 }
7079
7080 if (error == 0)
7081 spa->spa_last_synced_guid = rvd->vdev_guid;
7082
7083 spa_config_exit(spa, SCL_STATE, FTAG);
7084
7085 if (error == 0)
7086 break;
7087 zio_suspend(spa, NULL);
7088 zio_resume_wait(spa);
7089 }
7090 dmu_tx_commit(tx);
7091
7092#ifdef illumos
7093 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
7094#else /* !illumos */
7095#ifdef _KERNEL
7096 callout_drain(&spa->spa_deadman_cycid);
7097#endif
7098#endif /* illumos */
7099
7100 /*
7101 * Clear the dirty config list.
7102 */
7103 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
7104 vdev_config_clean(vd);
7105
7106 /*
7107 * Now that the new config has synced transactionally,
7108 * let it become visible to the config cache.
7109 */
7110 if (spa->spa_config_syncing != NULL) {
7111 spa_config_set(spa, spa->spa_config_syncing);
7112 spa->spa_config_txg = txg;
7113 spa->spa_config_syncing = NULL;
7114 }
7115
7116 dsl_pool_sync_done(dp, txg);
7117
7118 mutex_enter(&spa->spa_alloc_lock);
7119 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7120 mutex_exit(&spa->spa_alloc_lock);
7121
7122 /*
7123 * Update usable space statistics.
7124 */
7125 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
7126 vdev_sync_done(vd, txg);
7127
7128 spa_update_dspace(spa);
7129
7130 /*
7131 * It had better be the case that we didn't dirty anything
7132 * since vdev_config_sync().
7133 */
7134 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
7135 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7136 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
7137
7138 spa->spa_sync_pass = 0;
7139
7140 /*
7141 * Update the last synced uberblock here. We want to do this at
7142 * the end of spa_sync() so that consumers of spa_last_synced_txg()
7143 * will be guaranteed that all the processing associated with
7144 * that txg has been completed.
7145 */
7146 spa->spa_ubsync = spa->spa_uberblock;
7147 spa_config_exit(spa, SCL_CONFIG, FTAG);
7148
7149 spa_handle_ignored_writes(spa);
7150
7151 /*
7152 * If any async tasks have been requested, kick them off.
7153 */
7154 spa_async_dispatch(spa);
7155 spa_async_dispatch_vd(spa);
7156}
7157
7158/*
7159 * Sync all pools. We don't want to hold the namespace lock across these
7160 * operations, so we take a reference on the spa_t and drop the lock during the
7161 * sync.
7162 */
7163void
7164spa_sync_allpools(void)
7165{
7166 spa_t *spa = NULL;
7167 mutex_enter(&spa_namespace_lock);
7168 while ((spa = spa_next(spa)) != NULL) {
7169 if (spa_state(spa) != POOL_STATE_ACTIVE ||
7170 !spa_writeable(spa) || spa_suspended(spa))
7171 continue;
7172 spa_open_ref(spa, FTAG);
7173 mutex_exit(&spa_namespace_lock);
7174 txg_wait_synced(spa_get_dsl(spa), 0);
7175 mutex_enter(&spa_namespace_lock);
7176 spa_close(spa, FTAG);
7177 }
7178 mutex_exit(&spa_namespace_lock);
7179}
7180
7181/*
7182 * ==========================================================================
7183 * Miscellaneous routines
7184 * ==========================================================================
7185 */
7186
7187/*
7188 * Remove all pools in the system.
7189 */
7190void
7191spa_evict_all(void)
7192{
7193 spa_t *spa;
7194
7195 /*
7196 * Remove all cached state. All pools should be closed now,
7197 * so every spa in the AVL tree should be unreferenced.
7198 */
7199 mutex_enter(&spa_namespace_lock);
7200 while ((spa = spa_next(NULL)) != NULL) {
7201 /*
7202 * Stop async tasks. The async thread may need to detach
7203 * a device that's been replaced, which requires grabbing
7204 * spa_namespace_lock, so we must drop it here.
7205 */
7206 spa_open_ref(spa, FTAG);
7207 mutex_exit(&spa_namespace_lock);
7208 spa_async_suspend(spa);
7209 mutex_enter(&spa_namespace_lock);
7210 spa_close(spa, FTAG);
7211
7212 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7213 spa_unload(spa);
7214 spa_deactivate(spa);
7215 }
7216 spa_remove(spa);
7217 }
7218 mutex_exit(&spa_namespace_lock);
7219}
7220
7221vdev_t *
7222spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
7223{
7224 vdev_t *vd;
7225 int i;
7226
7227 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
7228 return (vd);
7229
7230 if (aux) {
7231 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
7232 vd = spa->spa_l2cache.sav_vdevs[i];
7233 if (vd->vdev_guid == guid)
7234 return (vd);
7235 }
7236
7237 for (i = 0; i < spa->spa_spares.sav_count; i++) {
7238 vd = spa->spa_spares.sav_vdevs[i];
7239 if (vd->vdev_guid == guid)
7240 return (vd);
7241 }
7242 }
7243
7244 return (NULL);
7245}
7246
7247void
7248spa_upgrade(spa_t *spa, uint64_t version)
7249{
7250 ASSERT(spa_writeable(spa));
7251
7252 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7253
7254 /*
7255 * This should only be called for a non-faulted pool, and since a
7256 * future version would result in an unopenable pool, this shouldn't be
7257 * possible.
7258 */
7259 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
7260 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
7261
7262 spa->spa_uberblock.ub_version = version;
7263 vdev_config_dirty(spa->spa_root_vdev);
7264
7265 spa_config_exit(spa, SCL_ALL, FTAG);
7266
7267 txg_wait_synced(spa_get_dsl(spa), 0);
7268}
7269
7270boolean_t
7271spa_has_spare(spa_t *spa, uint64_t guid)
7272{
7273 int i;
7274 uint64_t spareguid;
7275 spa_aux_vdev_t *sav = &spa->spa_spares;
7276
7277 for (i = 0; i < sav->sav_count; i++)
7278 if (sav->sav_vdevs[i]->vdev_guid == guid)
7279 return (B_TRUE);
7280
7281 for (i = 0; i < sav->sav_npending; i++) {
7282 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
7283 &spareguid) == 0 && spareguid == guid)
7284 return (B_TRUE);
7285 }
7286
7287 return (B_FALSE);
7288}
7289
7290/*
7291 * Check if a pool has an active shared spare device.
7292 * Note: reference count of an active spare is 2, as a spare and as a replace
7293 */
7294static boolean_t
7295spa_has_active_shared_spare(spa_t *spa)
7296{
7297 int i, refcnt;
7298 uint64_t pool;
7299 spa_aux_vdev_t *sav = &spa->spa_spares;
7300
7301 for (i = 0; i < sav->sav_count; i++) {
7302 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
7303 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
7304 refcnt > 2)
7305 return (B_TRUE);
7306 }
7307
7308 return (B_FALSE);
7309}
7310
7311static sysevent_t *
7312spa_event_create(spa_t *spa, vdev_t *vd, const char *name)
7313{
7314 sysevent_t *ev = NULL;
7315#ifdef _KERNEL
7316 sysevent_attr_list_t *attr = NULL;
7317 sysevent_value_t value;
7318
7319 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
7320 SE_SLEEP);
7321 ASSERT(ev != NULL);
7322
7323 value.value_type = SE_DATA_TYPE_STRING;
7324 value.value.sv_string = spa_name(spa);
7325 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
7326 goto done;
7327
7328 value.value_type = SE_DATA_TYPE_UINT64;
7329 value.value.sv_uint64 = spa_guid(spa);
7330 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
7331 goto done;
7332
7333 if (vd) {
7334 value.value_type = SE_DATA_TYPE_UINT64;
7335 value.value.sv_uint64 = vd->vdev_guid;
7336 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
7337 SE_SLEEP) != 0)
7338 goto done;
7339
7340 if (vd->vdev_path) {
7341 value.value_type = SE_DATA_TYPE_STRING;
7342 value.value.sv_string = vd->vdev_path;
7343 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
7344 &value, SE_SLEEP) != 0)
7345 goto done;
7346 }
7347 }
7348
7349 if (sysevent_attach_attributes(ev, attr) != 0)
7350 goto done;
7351 attr = NULL;
7352
7353done:
7354 if (attr)
7355 sysevent_free_attr(attr);
7356
7357#endif
7358 return (ev);
7359}
7360
7361static void
7362spa_event_post(sysevent_t *ev)
7363{
7364#ifdef _KERNEL
7365 sysevent_id_t eid;
7366
7367 (void) log_sysevent(ev, SE_SLEEP, &eid);
7368 sysevent_free(ev);
7369#endif
7370}
7371
7372/*
7373 * Post a sysevent corresponding to the given event. The 'name' must be one of
7374 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
7375 * filled in from the spa and (optionally) the vdev. This doesn't do anything
7376 * in the userland libzpool, as we don't want consumers to misinterpret ztest
7377 * or zdb as real changes.
7378 */
7379void
7380spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
7381{
7382 spa_event_post(spa_event_create(spa, vd, name));
7383}