Deleted Added
full compact
spa.c (243480) spa.c (243674)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 46 unchanged lines hidden (view full) ---

55#include <sys/dsl_prop.h>
56#include <sys/dsl_synctask.h>
57#include <sys/fs/zfs.h>
58#include <sys/arc.h>
59#include <sys/callb.h>
60#include <sys/spa_boot.h>
61#include <sys/zfs_ioctl.h>
62#include <sys/dsl_scan.h>
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 46 unchanged lines hidden (view full) ---

55#include <sys/dsl_prop.h>
56#include <sys/dsl_synctask.h>
57#include <sys/fs/zfs.h>
58#include <sys/arc.h>
59#include <sys/callb.h>
60#include <sys/spa_boot.h>
61#include <sys/zfs_ioctl.h>
62#include <sys/dsl_scan.h>
63#include <sys/zfeature.h>
63#include <sys/zvol.h>
64
65#ifdef _KERNEL
66#include <sys/callb.h>
67#include <sys/cpupart.h>
68#include <sys/zone.h>
69#endif /* _KERNEL */
70

--- 41 unchanged lines hidden (view full) ---

112 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
113 { ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL },
114 { ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(8), ZTI_FIX(5) },
115 { ZTI_FIX(100), ZTI_NULL, ZTI_ONE, ZTI_NULL },
116 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
117 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
118};
119
64#include <sys/zvol.h>
65
66#ifdef _KERNEL
67#include <sys/callb.h>
68#include <sys/cpupart.h>
69#include <sys/zone.h>
70#endif /* _KERNEL */
71

--- 41 unchanged lines hidden (view full) ---

113 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
114 { ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL },
115 { ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(8), ZTI_FIX(5) },
116 { ZTI_FIX(100), ZTI_NULL, ZTI_ONE, ZTI_NULL },
117 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
118 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
119};
120
121static dsl_syncfunc_t spa_sync_version;
120static dsl_syncfunc_t spa_sync_props;
122static dsl_syncfunc_t spa_sync_props;
123static dsl_checkfunc_t spa_change_guid_check;
124static dsl_syncfunc_t spa_change_guid_sync;
121static boolean_t spa_has_active_shared_spare(spa_t *spa);
122static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
123 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
124 char **ereport);
125static void spa_vdev_resilver_done(spa_t *spa);
126
127uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */
128#ifdef PSRSET_BIND

--- 42 unchanged lines hidden (view full) ---

171
172/*
173 * Get property values from the spa configuration.
174 */
175static void
176spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
177{
178 vdev_t *rvd = spa->spa_root_vdev;
125static boolean_t spa_has_active_shared_spare(spa_t *spa);
126static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
127 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
128 char **ereport);
129static void spa_vdev_resilver_done(spa_t *spa);
130
131uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */
132#ifdef PSRSET_BIND

--- 42 unchanged lines hidden (view full) ---

175
176/*
177 * Get property values from the spa configuration.
178 */
179static void
180spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
181{
182 vdev_t *rvd = spa->spa_root_vdev;
183 dsl_pool_t *pool = spa->spa_dsl_pool;
179 uint64_t size;
180 uint64_t alloc;
181 uint64_t space;
182 uint64_t cap, version;
183 zprop_source_t src = ZPROP_SRC_NONE;
184 spa_config_dirent_t *dp;
185
186 ASSERT(MUTEX_HELD(&spa->spa_props_lock));

--- 30 unchanged lines hidden (view full) ---

217 version = spa_version(spa);
218 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
219 src = ZPROP_SRC_DEFAULT;
220 else
221 src = ZPROP_SRC_LOCAL;
222 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
223 }
224
184 uint64_t size;
185 uint64_t alloc;
186 uint64_t space;
187 uint64_t cap, version;
188 zprop_source_t src = ZPROP_SRC_NONE;
189 spa_config_dirent_t *dp;
190
191 ASSERT(MUTEX_HELD(&spa->spa_props_lock));

--- 30 unchanged lines hidden (view full) ---

222 version = spa_version(spa);
223 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
224 src = ZPROP_SRC_DEFAULT;
225 else
226 src = ZPROP_SRC_LOCAL;
227 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
228 }
229
230 if (pool != NULL) {
231 dsl_dir_t *freedir = pool->dp_free_dir;
232
233 /*
234 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
235 * when opening pools before this version freedir will be NULL.
236 */
237 if (freedir != NULL) {
238 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
239 freedir->dd_phys->dd_used_bytes, src);
240 } else {
241 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
242 NULL, 0, src);
243 }
244 }
245
225 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
226
227 if (spa->spa_comment != NULL) {
228 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
229 0, ZPROP_SRC_LOCAL);
230 }
231
232 if (spa->spa_root != NULL)

--- 123 unchanged lines hidden (view full) ---

356 * for the property values to be set.
357 */
358static int
359spa_prop_validate(spa_t *spa, nvlist_t *props)
360{
361 nvpair_t *elem;
362 int error = 0, reset_bootfs = 0;
363 uint64_t objnum;
246 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
247
248 if (spa->spa_comment != NULL) {
249 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
250 0, ZPROP_SRC_LOCAL);
251 }
252
253 if (spa->spa_root != NULL)

--- 123 unchanged lines hidden (view full) ---

377 * for the property values to be set.
378 */
379static int
380spa_prop_validate(spa_t *spa, nvlist_t *props)
381{
382 nvpair_t *elem;
383 int error = 0, reset_bootfs = 0;
384 uint64_t objnum;
385 boolean_t has_feature = B_FALSE;
364
365 elem = NULL;
366 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
386
387 elem = NULL;
388 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
367 zpool_prop_t prop;
368 char *propname, *strval;
369 uint64_t intval;
389 uint64_t intval;
370 objset_t *os;
371 char *slash, *check;
390 char *strval, *slash, *check, *fname;
391 const char *propname = nvpair_name(elem);
392 zpool_prop_t prop = zpool_name_to_prop(propname);
372
393
373 propname = nvpair_name(elem);
394 switch (prop) {
395 case ZPROP_INVAL:
396 if (!zpool_prop_feature(propname)) {
397 error = EINVAL;
398 break;
399 }
374
400
375 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
376 return (EINVAL);
401 /*
402 * Sanitize the input.
403 */
404 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
405 error = EINVAL;
406 break;
407 }
377
408
378 switch (prop) {
409 if (nvpair_value_uint64(elem, &intval) != 0) {
410 error = EINVAL;
411 break;
412 }
413
414 if (intval != 0) {
415 error = EINVAL;
416 break;
417 }
418
419 fname = strchr(propname, '@') + 1;
420 if (zfeature_lookup_name(fname, NULL) != 0) {
421 error = EINVAL;
422 break;
423 }
424
425 has_feature = B_TRUE;
426 break;
427
379 case ZPOOL_PROP_VERSION:
380 error = nvpair_value_uint64(elem, &intval);
381 if (!error &&
428 case ZPOOL_PROP_VERSION:
429 error = nvpair_value_uint64(elem, &intval);
430 if (!error &&
382 (intval < spa_version(spa) || intval > SPA_VERSION))
431 (intval < spa_version(spa) ||
432 intval > SPA_VERSION_BEFORE_FEATURES ||
433 has_feature))
383 error = EINVAL;
384 break;
385
386 case ZPOOL_PROP_DELEGATION:
387 case ZPOOL_PROP_AUTOREPLACE:
388 case ZPOOL_PROP_LISTSNAPS:
389 case ZPOOL_PROP_AUTOEXPAND:
390 error = nvpair_value_uint64(elem, &intval);

--- 20 unchanged lines hidden (view full) ---

411 break;
412 }
413
414 reset_bootfs = 1;
415
416 error = nvpair_value_string(elem, &strval);
417
418 if (!error) {
434 error = EINVAL;
435 break;
436
437 case ZPOOL_PROP_DELEGATION:
438 case ZPOOL_PROP_AUTOREPLACE:
439 case ZPOOL_PROP_LISTSNAPS:
440 case ZPOOL_PROP_AUTOEXPAND:
441 error = nvpair_value_uint64(elem, &intval);

--- 20 unchanged lines hidden (view full) ---

462 break;
463 }
464
465 reset_bootfs = 1;
466
467 error = nvpair_value_string(elem, &strval);
468
469 if (!error) {
470 objset_t *os;
419 uint64_t compress;
420
421 if (strval == NULL || strval[0] == '\0') {
422 objnum = zpool_prop_default_numeric(
423 ZPOOL_PROP_BOOTFS);
424 break;
425 }
426

--- 133 unchanged lines hidden (view full) ---

560 if (need_sync)
561 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
562}
563
564int
565spa_prop_set(spa_t *spa, nvlist_t *nvp)
566{
567 int error;
471 uint64_t compress;
472
473 if (strval == NULL || strval[0] == '\0') {
474 objnum = zpool_prop_default_numeric(
475 ZPOOL_PROP_BOOTFS);
476 break;
477 }
478

--- 133 unchanged lines hidden (view full) ---

612 if (need_sync)
613 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
614}
615
616int
617spa_prop_set(spa_t *spa, nvlist_t *nvp)
618{
619 int error;
568 nvpair_t *elem;
620 nvpair_t *elem = NULL;
569 boolean_t need_sync = B_FALSE;
621 boolean_t need_sync = B_FALSE;
570 zpool_prop_t prop;
571
572 if ((error = spa_prop_validate(spa, nvp)) != 0)
573 return (error);
574
622
623 if ((error = spa_prop_validate(spa, nvp)) != 0)
624 return (error);
625
575 elem = NULL;
576 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
626 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
577 if ((prop = zpool_name_to_prop(
578 nvpair_name(elem))) == ZPROP_INVAL)
579 return (EINVAL);
627 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
580
581 if (prop == ZPOOL_PROP_CACHEFILE ||
582 prop == ZPOOL_PROP_ALTROOT ||
583 prop == ZPOOL_PROP_READONLY)
584 continue;
585
628
629 if (prop == ZPOOL_PROP_CACHEFILE ||
630 prop == ZPOOL_PROP_ALTROOT ||
631 prop == ZPOOL_PROP_READONLY)
632 continue;
633
634 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
635 uint64_t ver;
636
637 if (prop == ZPOOL_PROP_VERSION) {
638 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
639 } else {
640 ASSERT(zpool_prop_feature(nvpair_name(elem)));
641 ver = SPA_VERSION_FEATURES;
642 need_sync = B_TRUE;
643 }
644
645 /* Save time if the version is already set. */
646 if (ver == spa_version(spa))
647 continue;
648
649 /*
650 * In addition to the pool directory object, we might
651 * create the pool properties object, the features for
652 * read object, the features for write object, or the
653 * feature descriptions object.
654 */
655 error = dsl_sync_task_do(spa_get_dsl(spa), NULL,
656 spa_sync_version, spa, &ver, 6);
657 if (error)
658 return (error);
659 continue;
660 }
661
586 need_sync = B_TRUE;
587 break;
588 }
589
662 need_sync = B_TRUE;
663 break;
664 }
665
590 if (need_sync)
666 if (need_sync) {
591 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
667 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
592 spa, nvp, 3));
593 else
594 return (0);
668 spa, nvp, 6));
669 }
670
671 return (0);
595}
596
597/*
598 * If the bootfs property value is dsobj, clear it.
599 */
600void
601spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
602{
603 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
604 VERIFY(zap_remove(spa->spa_meta_objset,
605 spa->spa_pool_props_object,
606 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
607 spa->spa_bootfs = 0;
608 }
609}
610
672}
673
674/*
675 * If the bootfs property value is dsobj, clear it.
676 */
677void
678spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
679{
680 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
681 VERIFY(zap_remove(spa->spa_meta_objset,
682 spa->spa_pool_props_object,
683 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
684 spa->spa_bootfs = 0;
685 }
686}
687
688/*ARGSUSED*/
689static int
690spa_change_guid_check(void *arg1, void *arg2, dmu_tx_t *tx)
691{
692 spa_t *spa = arg1;
693 uint64_t *newguid = arg2;
694 vdev_t *rvd = spa->spa_root_vdev;
695 uint64_t vdev_state;
696
697 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
698 vdev_state = rvd->vdev_state;
699 spa_config_exit(spa, SCL_STATE, FTAG);
700
701 if (vdev_state != VDEV_STATE_HEALTHY)
702 return (ENXIO);
703
704 ASSERT3U(spa_guid(spa), !=, *newguid);
705
706 return (0);
707}
708
709static void
710spa_change_guid_sync(void *arg1, void *arg2, dmu_tx_t *tx)
711{
712 spa_t *spa = arg1;
713 uint64_t *newguid = arg2;
714 uint64_t oldguid;
715 vdev_t *rvd = spa->spa_root_vdev;
716
717 oldguid = spa_guid(spa);
718
719 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
720 rvd->vdev_guid = *newguid;
721 rvd->vdev_guid_sum += (*newguid - oldguid);
722 vdev_config_dirty(rvd);
723 spa_config_exit(spa, SCL_STATE, FTAG);
724
725#ifdef __FreeBSD__
726 /*
727 * TODO: until recent illumos logging changes are merged
728 * log reguid as pool property change
729 */
730 spa_history_log_internal(LOG_POOL_PROPSET, spa, tx,
731 "guid change old=%llu new=%llu", oldguid, *newguid);
732#else
733 spa_history_log_internal(spa, "guid change", tx, "old=%lld new=%lld",
734 oldguid, *newguid);
735#endif
736}
737
611/*
612 * Change the GUID for the pool. This is done so that we can later
613 * re-import a pool built from a clone of our own vdevs. We will modify
614 * the root vdev's guid, our own pool guid, and then mark all of our
615 * vdevs dirty. Note that we must make sure that all our vdevs are
616 * online when we do this, or else any vdevs that weren't present
617 * would be orphaned from our pool. We are also going to issue a
618 * sysevent to update any watchers.
619 */
620int
621spa_change_guid(spa_t *spa)
622{
738/*
739 * Change the GUID for the pool. This is done so that we can later
740 * re-import a pool built from a clone of our own vdevs. We will modify
741 * the root vdev's guid, our own pool guid, and then mark all of our
742 * vdevs dirty. Note that we must make sure that all our vdevs are
743 * online when we do this, or else any vdevs that weren't present
744 * would be orphaned from our pool. We are also going to issue a
745 * sysevent to update any watchers.
746 */
747int
748spa_change_guid(spa_t *spa)
749{
623 uint64_t oldguid, newguid;
624 uint64_t txg;
750 int error;
751 uint64_t guid;
625
752
626 if (!(spa_mode_global & FWRITE))
627 return (EROFS);
753 mutex_enter(&spa_namespace_lock);
754 guid = spa_generate_guid(NULL);
628
755
629 txg = spa_vdev_enter(spa);
756 error = dsl_sync_task_do(spa_get_dsl(spa), spa_change_guid_check,
757 spa_change_guid_sync, spa, &guid, 5);
630
758
631 if (spa->spa_root_vdev->vdev_state != VDEV_STATE_HEALTHY)
632 return (spa_vdev_exit(spa, NULL, txg, ENXIO));
759 if (error == 0) {
760 spa_config_sync(spa, B_FALSE, B_TRUE);
761 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
762 }
633
763
634 oldguid = spa_guid(spa);
635 newguid = spa_generate_guid(NULL);
636 ASSERT3U(oldguid, !=, newguid);
764 mutex_exit(&spa_namespace_lock);
637
765
638 spa->spa_root_vdev->vdev_guid = newguid;
639 spa->spa_root_vdev->vdev_guid_sum += (newguid - oldguid);
640
641 vdev_config_dirty(spa->spa_root_vdev);
642
643 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
644
645 return (spa_vdev_exit(spa, NULL, txg, 0));
766 return (error);
646}
647
648/*
649 * ==========================================================================
650 * SPA state manipulation (open/create/destroy/import/export)
651 * ==========================================================================
652 */
653

--- 971 unchanged lines hidden (view full) ---

1625spa_load_verify_done(zio_t *zio)
1626{
1627 blkptr_t *bp = zio->io_bp;
1628 spa_load_error_t *sle = zio->io_private;
1629 dmu_object_type_t type = BP_GET_TYPE(bp);
1630 int error = zio->io_error;
1631
1632 if (error) {
767}
768
769/*
770 * ==========================================================================
771 * SPA state manipulation (open/create/destroy/import/export)
772 * ==========================================================================
773 */
774

--- 971 unchanged lines hidden (view full) ---

1746spa_load_verify_done(zio_t *zio)
1747{
1748 blkptr_t *bp = zio->io_bp;
1749 spa_load_error_t *sle = zio->io_private;
1750 dmu_object_type_t type = BP_GET_TYPE(bp);
1751 int error = zio->io_error;
1752
1753 if (error) {
1633 if ((BP_GET_LEVEL(bp) != 0 || dmu_ot[type].ot_metadata) &&
1754 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1634 type != DMU_OT_INTENT_LOG)
1635 atomic_add_64(&sle->sle_meta_count, 1);
1636 else
1637 atomic_add_64(&sle->sle_data_count, 1);
1638 }
1639 zio_data_buf_free(zio->io_data, zio->io_size);
1640}
1641

--- 213 unchanged lines hidden (view full) ---

1855 spa->spa_config_guid = pool_guid;
1856
1857 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
1858 &nvl) == 0) {
1859 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
1860 KM_SLEEP) == 0);
1861 }
1862
1755 type != DMU_OT_INTENT_LOG)
1756 atomic_add_64(&sle->sle_meta_count, 1);
1757 else
1758 atomic_add_64(&sle->sle_data_count, 1);
1759 }
1760 zio_data_buf_free(zio->io_data, zio->io_size);
1761}
1762

--- 213 unchanged lines hidden (view full) ---

1976 spa->spa_config_guid = pool_guid;
1977
1978 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
1979 &nvl) == 0) {
1980 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
1981 KM_SLEEP) == 0);
1982 }
1983
1984 nvlist_free(spa->spa_load_info);
1985 spa->spa_load_info = fnvlist_alloc();
1986
1863 gethrestime(&spa->spa_loaded_ts);
1864 error = spa_load_impl(spa, pool_guid, config, state, type,
1865 mosconfig, &ereport);
1866 }
1867
1868 spa->spa_minref = refcount_count(&spa->spa_refcount);
1869 if (error) {
1870 if (error != EEXIST) {

--- 16 unchanged lines hidden (view full) ---

1887 */
1888static int
1889spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
1890 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
1891 char **ereport)
1892{
1893 int error = 0;
1894 nvlist_t *nvroot = NULL;
1987 gethrestime(&spa->spa_loaded_ts);
1988 error = spa_load_impl(spa, pool_guid, config, state, type,
1989 mosconfig, &ereport);
1990 }
1991
1992 spa->spa_minref = refcount_count(&spa->spa_refcount);
1993 if (error) {
1994 if (error != EEXIST) {

--- 16 unchanged lines hidden (view full) ---

2011 */
2012static int
2013spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2014 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2015 char **ereport)
2016{
2017 int error = 0;
2018 nvlist_t *nvroot = NULL;
2019 nvlist_t *label;
1895 vdev_t *rvd;
1896 uberblock_t *ub = &spa->spa_uberblock;
1897 uint64_t children, config_cache_txg = spa->spa_config_txg;
1898 int orig_mode = spa->spa_mode;
1899 int parse;
1900 uint64_t obj;
2020 vdev_t *rvd;
2021 uberblock_t *ub = &spa->spa_uberblock;
2022 uint64_t children, config_cache_txg = spa->spa_config_txg;
2023 int orig_mode = spa->spa_mode;
2024 int parse;
2025 uint64_t obj;
2026 boolean_t missing_feat_write = B_FALSE;
1901
1902 /*
1903 * If this is an untrusted config, access the pool in read-only mode.
1904 * This prevents things like resilvering recently removed devices.
1905 */
1906 if (!mosconfig)
1907 spa->spa_mode = FREAD;
1908

--- 63 unchanged lines hidden (view full) ---

1972
1973 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
1974 return (ENXIO);
1975 }
1976
1977 /*
1978 * Find the best uberblock.
1979 */
2027
2028 /*
2029 * If this is an untrusted config, access the pool in read-only mode.
2030 * This prevents things like resilvering recently removed devices.
2031 */
2032 if (!mosconfig)
2033 spa->spa_mode = FREAD;
2034

--- 63 unchanged lines hidden (view full) ---

2098
2099 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2100 return (ENXIO);
2101 }
2102
2103 /*
2104 * Find the best uberblock.
2105 */
1980 vdev_uberblock_load(NULL, rvd, ub);
2106 vdev_uberblock_load(rvd, ub, &label);
1981
1982 /*
1983 * If we weren't able to find a single valid uberblock, return failure.
1984 */
2107
2108 /*
2109 * If we weren't able to find a single valid uberblock, return failure.
2110 */
1985 if (ub->ub_txg == 0)
2111 if (ub->ub_txg == 0) {
2112 nvlist_free(label);
1986 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2113 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2114 }
1987
1988 /*
2115
2116 /*
1989 * If the pool is newer than the code, we can't open it.
2117 * If the pool has an unsupported version we can't open it.
1990 */
2118 */
1991 if (ub->ub_version > SPA_VERSION)
2119 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2120 nvlist_free(label);
1992 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2121 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2122 }
1993
2123
2124 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2125 nvlist_t *features;
2126
2127 /*
2128 * If we weren't able to find what's necessary for reading the
2129 * MOS in the label, return failure.
2130 */
2131 if (label == NULL || nvlist_lookup_nvlist(label,
2132 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2133 nvlist_free(label);
2134 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2135 ENXIO));
2136 }
2137
2138 /*
2139 * Update our in-core representation with the definitive values
2140 * from the label.
2141 */
2142 nvlist_free(spa->spa_label_features);
2143 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2144 }
2145
2146 nvlist_free(label);
2147
1994 /*
2148 /*
2149 * Look through entries in the label nvlist's features_for_read. If
2150 * there is a feature listed there which we don't understand then we
2151 * cannot open a pool.
2152 */
2153 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2154 nvlist_t *unsup_feat;
2155
2156 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2157 0);
2158
2159 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2160 NULL); nvp != NULL;
2161 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2162 if (!zfeature_is_supported(nvpair_name(nvp))) {
2163 VERIFY(nvlist_add_string(unsup_feat,
2164 nvpair_name(nvp), "") == 0);
2165 }
2166 }
2167
2168 if (!nvlist_empty(unsup_feat)) {
2169 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2170 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2171 nvlist_free(unsup_feat);
2172 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2173 ENOTSUP));
2174 }
2175
2176 nvlist_free(unsup_feat);
2177 }
2178
2179 /*
1995 * If the vdev guid sum doesn't match the uberblock, we have an
1996 * incomplete configuration. We first check to see if the pool
1997 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
1998 * If it is, defer the vdev_guid_sum check till later so we
1999 * can handle missing vdevs.
2000 */
2001 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2002 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&

--- 15 unchanged lines hidden (view full) ---

2018 spa->spa_ubsync = spa->spa_uberblock;
2019 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2020 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2021 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2022 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2023 spa->spa_claim_max_txg = spa->spa_first_txg;
2024 spa->spa_prev_software_version = ub->ub_software_version;
2025
2180 * If the vdev guid sum doesn't match the uberblock, we have an
2181 * incomplete configuration. We first check to see if the pool
2182 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2183 * If it is, defer the vdev_guid_sum check till later so we
2184 * can handle missing vdevs.
2185 */
2186 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2187 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&

--- 15 unchanged lines hidden (view full) ---

2203 spa->spa_ubsync = spa->spa_uberblock;
2204 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2205 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2206 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2207 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2208 spa->spa_claim_max_txg = spa->spa_first_txg;
2209 spa->spa_prev_software_version = ub->ub_software_version;
2210
2026 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2211 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2027 if (error)
2028 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2029 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2030
2031 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2032 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2033
2212 if (error)
2213 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2214 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2215
2216 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2217 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2218
2219 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2220 boolean_t missing_feat_read = B_FALSE;
2221 nvlist_t *unsup_feat, *enabled_feat;
2222
2223 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2224 &spa->spa_feat_for_read_obj) != 0) {
2225 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2226 }
2227
2228 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2229 &spa->spa_feat_for_write_obj) != 0) {
2230 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2231 }
2232
2233 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2234 &spa->spa_feat_desc_obj) != 0) {
2235 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2236 }
2237
2238 enabled_feat = fnvlist_alloc();
2239 unsup_feat = fnvlist_alloc();
2240
2241 if (!feature_is_supported(spa->spa_meta_objset,
2242 spa->spa_feat_for_read_obj, spa->spa_feat_desc_obj,
2243 unsup_feat, enabled_feat))
2244 missing_feat_read = B_TRUE;
2245
2246 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2247 if (!feature_is_supported(spa->spa_meta_objset,
2248 spa->spa_feat_for_write_obj, spa->spa_feat_desc_obj,
2249 unsup_feat, enabled_feat)) {
2250 missing_feat_write = B_TRUE;
2251 }
2252 }
2253
2254 fnvlist_add_nvlist(spa->spa_load_info,
2255 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2256
2257 if (!nvlist_empty(unsup_feat)) {
2258 fnvlist_add_nvlist(spa->spa_load_info,
2259 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2260 }
2261
2262 fnvlist_free(enabled_feat);
2263 fnvlist_free(unsup_feat);
2264
2265 if (!missing_feat_read) {
2266 fnvlist_add_boolean(spa->spa_load_info,
2267 ZPOOL_CONFIG_CAN_RDONLY);
2268 }
2269
2270 /*
2271 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2272 * twofold: to determine whether the pool is available for
2273 * import in read-write mode and (if it is not) whether the
2274 * pool is available for import in read-only mode. If the pool
2275 * is available for import in read-write mode, it is displayed
2276 * as available in userland; if it is not available for import
2277 * in read-only mode, it is displayed as unavailable in
2278 * userland. If the pool is available for import in read-only
2279 * mode but not read-write mode, it is displayed as unavailable
2280 * in userland with a special note that the pool is actually
2281 * available for open in read-only mode.
2282 *
2283 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2284 * missing a feature for write, we must first determine whether
2285 * the pool can be opened read-only before returning to
2286 * userland in order to know whether to display the
2287 * abovementioned note.
2288 */
2289 if (missing_feat_read || (missing_feat_write &&
2290 spa_writeable(spa))) {
2291 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2292 ENOTSUP));
2293 }
2294 }
2295
2296 spa->spa_is_initializing = B_TRUE;
2297 error = dsl_pool_open(spa->spa_dsl_pool);
2298 spa->spa_is_initializing = B_FALSE;
2299 if (error != 0)
2300 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2301
2034 if (!mosconfig) {
2035 uint64_t hostid;
2036 nvlist_t *policy = NULL, *nvconfig;
2037
2038 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2039 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2040
2041 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,

--- 201 unchanged lines hidden (view full) ---

2243 if (!spa_config_valid(spa, nvconfig)) {
2244 nvlist_free(nvconfig);
2245 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2246 ENXIO));
2247 }
2248 nvlist_free(nvconfig);
2249
2250 /*
2302 if (!mosconfig) {
2303 uint64_t hostid;
2304 nvlist_t *policy = NULL, *nvconfig;
2305
2306 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2307 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2308
2309 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,

--- 201 unchanged lines hidden (view full) ---

2511 if (!spa_config_valid(spa, nvconfig)) {
2512 nvlist_free(nvconfig);
2513 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2514 ENXIO));
2515 }
2516 nvlist_free(nvconfig);
2517
2518 /*
2251 * Now that we've validate the config, check the state of the
2519 * Now that we've validated the config, check the state of the
2252 * root vdev. If it can't be opened, it indicates one or
2253 * more toplevel vdevs are faulted.
2254 */
2255 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2256 return (ENXIO);
2257
2258 if (spa_check_logs(spa)) {
2259 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2260 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2261 }
2262 }
2263
2520 * root vdev. If it can't be opened, it indicates one or
2521 * more toplevel vdevs are faulted.
2522 */
2523 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2524 return (ENXIO);
2525
2526 if (spa_check_logs(spa)) {
2527 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2528 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2529 }
2530 }
2531
2532 if (missing_feat_write) {
2533 ASSERT(state == SPA_LOAD_TRYIMPORT);
2534
2535 /*
2536 * At this point, we know that we can open the pool in
2537 * read-only mode but not read-write mode. We now have enough
2538 * information and can return to userland.
2539 */
2540 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2541 }
2542
2264 /*
2265 * We've successfully opened the pool, verify that we're ready
2266 * to start pushing transactions.
2267 */
2268 if (state != SPA_LOAD_TRYIMPORT) {
2269 if (error = spa_load_verify(spa))
2270 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2271 error));

--- 93 unchanged lines hidden (view full) ---

2365 spa->spa_load_max_txg--;
2366
2367 spa_activate(spa, mode);
2368 spa_async_suspend(spa);
2369
2370 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2371}
2372
2543 /*
2544 * We've successfully opened the pool, verify that we're ready
2545 * to start pushing transactions.
2546 */
2547 if (state != SPA_LOAD_TRYIMPORT) {
2548 if (error = spa_load_verify(spa))
2549 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2550 error));

--- 93 unchanged lines hidden (view full) ---

2644 spa->spa_load_max_txg--;
2645
2646 spa_activate(spa, mode);
2647 spa_async_suspend(spa);
2648
2649 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2650}
2651
2652/*
2653 * If spa_load() fails this function will try loading prior txg's. If
2654 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2655 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2656 * function will not rewind the pool and will return the same error as
2657 * spa_load().
2658 */
2373static int
2374spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2375 uint64_t max_request, int rewind_flags)
2376{
2659static int
2660spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2661 uint64_t max_request, int rewind_flags)
2662{
2663 nvlist_t *loadinfo = NULL;
2377 nvlist_t *config = NULL;
2378 int load_error, rewind_error;
2379 uint64_t safe_rewind_txg;
2380 uint64_t min_txg;
2381
2382 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2383 spa->spa_load_max_txg = spa->spa_load_txg;
2384 spa_set_log_state(spa, SPA_LOG_CLEAR);

--- 12 unchanged lines hidden (view full) ---

2397 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2398 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2399
2400 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2401 nvlist_free(config);
2402 return (load_error);
2403 }
2404
2664 nvlist_t *config = NULL;
2665 int load_error, rewind_error;
2666 uint64_t safe_rewind_txg;
2667 uint64_t min_txg;
2668
2669 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2670 spa->spa_load_max_txg = spa->spa_load_txg;
2671 spa_set_log_state(spa, SPA_LOG_CLEAR);

--- 12 unchanged lines hidden (view full) ---

2684 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2685 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2686
2687 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2688 nvlist_free(config);
2689 return (load_error);
2690 }
2691
2405 /* Price of rolling back is discarding txgs, including log */
2406 if (state == SPA_LOAD_RECOVER)
2692 if (state == SPA_LOAD_RECOVER) {
2693 /* Price of rolling back is discarding txgs, including log */
2407 spa_set_log_state(spa, SPA_LOG_CLEAR);
2694 spa_set_log_state(spa, SPA_LOG_CLEAR);
2695 } else {
2696 /*
2697 * If we aren't rolling back save the load info from our first
2698 * import attempt so that we can restore it after attempting
2699 * to rewind.
2700 */
2701 loadinfo = spa->spa_load_info;
2702 spa->spa_load_info = fnvlist_alloc();
2703 }
2408
2409 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2410 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2411 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2412 TXG_INITIAL : safe_rewind_txg;
2413
2414 /*
2415 * Continue as long as we're finding errors, we're still within

--- 7 unchanged lines hidden (view full) ---

2423 }
2424
2425 spa->spa_extreme_rewind = B_FALSE;
2426 spa->spa_load_max_txg = UINT64_MAX;
2427
2428 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2429 spa_config_set(spa, config);
2430
2704
2705 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2706 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2707 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2708 TXG_INITIAL : safe_rewind_txg;
2709
2710 /*
2711 * Continue as long as we're finding errors, we're still within

--- 7 unchanged lines hidden (view full) ---

2719 }
2720
2721 spa->spa_extreme_rewind = B_FALSE;
2722 spa->spa_load_max_txg = UINT64_MAX;
2723
2724 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2725 spa_config_set(spa, config);
2726
2431 return (state == SPA_LOAD_RECOVER ? rewind_error : load_error);
2727 if (state == SPA_LOAD_RECOVER) {
2728 ASSERT3P(loadinfo, ==, NULL);
2729 return (rewind_error);
2730 } else {
2731 /* Store the rewind info as part of the initial load info */
2732 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2733 spa->spa_load_info);
2734
2735 /* Restore the initial load info */
2736 fnvlist_free(spa->spa_load_info);
2737 spa->spa_load_info = loadinfo;
2738
2739 return (load_error);
2740 }
2432}
2433
2434/*
2435 * Pool Open/Import
2436 *
2437 * The import case is identical to an open except that the configuration is sent
2438 * down from userland, instead of grabbed from the configuration cache. For the
2439 * case of an open, the pool configuration will exist in the

--- 262 unchanged lines hidden (view full) ---

2702 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
2703 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
2704 == 0);
2705 vdev_get_stats(vd, vs);
2706 }
2707 }
2708}
2709
2741}
2742
2743/*
2744 * Pool Open/Import
2745 *
2746 * The import case is identical to an open except that the configuration is sent
2747 * down from userland, instead of grabbed from the configuration cache. For the
2748 * case of an open, the pool configuration will exist in the

--- 262 unchanged lines hidden (view full) ---

3011 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3012 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3013 == 0);
3014 vdev_get_stats(vd, vs);
3015 }
3016 }
3017}
3018
3019static void
3020spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3021{
3022 nvlist_t *features;
3023 zap_cursor_t zc;
3024 zap_attribute_t za;
3025
3026 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3027 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3028
3029 if (spa->spa_feat_for_read_obj != 0) {
3030 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3031 spa->spa_feat_for_read_obj);
3032 zap_cursor_retrieve(&zc, &za) == 0;
3033 zap_cursor_advance(&zc)) {
3034 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3035 za.za_num_integers == 1);
3036 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3037 za.za_first_integer));
3038 }
3039 zap_cursor_fini(&zc);
3040 }
3041
3042 if (spa->spa_feat_for_write_obj != 0) {
3043 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3044 spa->spa_feat_for_write_obj);
3045 zap_cursor_retrieve(&zc, &za) == 0;
3046 zap_cursor_advance(&zc)) {
3047 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3048 za.za_num_integers == 1);
3049 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3050 za.za_first_integer));
3051 }
3052 zap_cursor_fini(&zc);
3053 }
3054
3055 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3056 features) == 0);
3057 nvlist_free(features);
3058}
3059
2710int
3060int
2711spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
3061spa_get_stats(const char *name, nvlist_t **config,
3062 char *altroot, size_t buflen)
2712{
2713 int error;
2714 spa_t *spa;
2715
2716 *config = NULL;
2717 error = spa_open_common(name, &spa, FTAG, NULL, config);
2718
2719 if (spa != NULL) {

--- 18 unchanged lines hidden (view full) ---

2738
2739 if (spa_suspended(spa))
2740 VERIFY(nvlist_add_uint64(*config,
2741 ZPOOL_CONFIG_SUSPENDED,
2742 spa->spa_failmode) == 0);
2743
2744 spa_add_spares(spa, *config);
2745 spa_add_l2cache(spa, *config);
3063{
3064 int error;
3065 spa_t *spa;
3066
3067 *config = NULL;
3068 error = spa_open_common(name, &spa, FTAG, NULL, config);
3069
3070 if (spa != NULL) {

--- 18 unchanged lines hidden (view full) ---

3089
3090 if (spa_suspended(spa))
3091 VERIFY(nvlist_add_uint64(*config,
3092 ZPOOL_CONFIG_SUSPENDED,
3093 spa->spa_failmode) == 0);
3094
3095 spa_add_spares(spa, *config);
3096 spa_add_l2cache(spa, *config);
3097 spa_add_feature_stats(spa, *config);
2746 }
2747 }
2748
2749 /*
2750 * We want to get the alternate root even for faulted pools, so we cheat
2751 * and call spa_lookup() directly.
2752 */
2753 if (altroot) {

--- 204 unchanged lines hidden (view full) ---

2958 vdev_t *rvd;
2959 dsl_pool_t *dp;
2960 dmu_tx_t *tx;
2961 int error = 0;
2962 uint64_t txg = TXG_INITIAL;
2963 nvlist_t **spares, **l2cache;
2964 uint_t nspares, nl2cache;
2965 uint64_t version, obj;
3098 }
3099 }
3100
3101 /*
3102 * We want to get the alternate root even for faulted pools, so we cheat
3103 * and call spa_lookup() directly.
3104 */
3105 if (altroot) {

--- 204 unchanged lines hidden (view full) ---

3310 vdev_t *rvd;
3311 dsl_pool_t *dp;
3312 dmu_tx_t *tx;
3313 int error = 0;
3314 uint64_t txg = TXG_INITIAL;
3315 nvlist_t **spares, **l2cache;
3316 uint_t nspares, nl2cache;
3317 uint64_t version, obj;
3318 boolean_t has_features;
2966
2967 /*
2968 * If this pool already exists, return failure.
2969 */
2970 mutex_enter(&spa_namespace_lock);
2971 if (spa_lookup(pool) != NULL) {
2972 mutex_exit(&spa_namespace_lock);
2973 return (EEXIST);

--- 9 unchanged lines hidden (view full) ---

2983
2984 if (props && (error = spa_prop_validate(spa, props))) {
2985 spa_deactivate(spa);
2986 spa_remove(spa);
2987 mutex_exit(&spa_namespace_lock);
2988 return (error);
2989 }
2990
3319
3320 /*
3321 * If this pool already exists, return failure.
3322 */
3323 mutex_enter(&spa_namespace_lock);
3324 if (spa_lookup(pool) != NULL) {
3325 mutex_exit(&spa_namespace_lock);
3326 return (EEXIST);

--- 9 unchanged lines hidden (view full) ---

3336
3337 if (props && (error = spa_prop_validate(spa, props))) {
3338 spa_deactivate(spa);
3339 spa_remove(spa);
3340 mutex_exit(&spa_namespace_lock);
3341 return (error);
3342 }
3343
2991 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2992 &version) != 0)
3344 has_features = B_FALSE;
3345 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3346 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3347 if (zpool_prop_feature(nvpair_name(elem)))
3348 has_features = B_TRUE;
3349 }
3350
3351 if (has_features || nvlist_lookup_uint64(props,
3352 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
2993 version = SPA_VERSION;
3353 version = SPA_VERSION;
2994 ASSERT(version <= SPA_VERSION);
3354 }
3355 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
2995
2996 spa->spa_first_txg = txg;
2997 spa->spa_uberblock.ub_txg = txg - 1;
2998 spa->spa_uberblock.ub_version = version;
2999 spa->spa_ubsync = spa->spa_uberblock;
3000
3001 /*
3002 * Create "The Godfather" zio to hold all async IOs

--- 59 unchanged lines hidden (view full) ---

3062 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3063 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3064 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3065 spa_load_l2cache(spa);
3066 spa_config_exit(spa, SCL_ALL, FTAG);
3067 spa->spa_l2cache.sav_sync = B_TRUE;
3068 }
3069
3356
3357 spa->spa_first_txg = txg;
3358 spa->spa_uberblock.ub_txg = txg - 1;
3359 spa->spa_uberblock.ub_version = version;
3360 spa->spa_ubsync = spa->spa_uberblock;
3361
3362 /*
3363 * Create "The Godfather" zio to hold all async IOs

--- 59 unchanged lines hidden (view full) ---

3423 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3424 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3425 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3426 spa_load_l2cache(spa);
3427 spa_config_exit(spa, SCL_ALL, FTAG);
3428 spa->spa_l2cache.sav_sync = B_TRUE;
3429 }
3430
3431 spa->spa_is_initializing = B_TRUE;
3070 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3071 spa->spa_meta_objset = dp->dp_meta_objset;
3432 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3433 spa->spa_meta_objset = dp->dp_meta_objset;
3434 spa->spa_is_initializing = B_FALSE;
3072
3073 /*
3074 * Create DDTs (dedup tables).
3075 */
3076 ddt_create(spa);
3077
3078 spa_update_dspace(spa);
3079

--- 7 unchanged lines hidden (view full) ---

3087 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3088
3089 if (zap_add(spa->spa_meta_objset,
3090 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3091 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3092 cmn_err(CE_PANIC, "failed to add pool config");
3093 }
3094
3435
3436 /*
3437 * Create DDTs (dedup tables).
3438 */
3439 ddt_create(spa);
3440
3441 spa_update_dspace(spa);
3442

--- 7 unchanged lines hidden (view full) ---

3450 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3451
3452 if (zap_add(spa->spa_meta_objset,
3453 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3454 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3455 cmn_err(CE_PANIC, "failed to add pool config");
3456 }
3457
3458 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3459 spa_feature_create_zap_objects(spa, tx);
3460
3095 if (zap_add(spa->spa_meta_objset,
3096 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3097 sizeof (uint64_t), 1, &version, tx) != 0) {
3098 cmn_err(CE_PANIC, "failed to add pool version");
3099 }
3100
3101 /* Newly created pools with the right version are always deflated. */
3102 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {

--- 175 unchanged lines hidden (view full) ---

3278 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3279 /* iscsi boot */
3280 get_iscsi_bootpath_phy(devpath);
3281 config = spa_generate_rootconf(devpath, devid, &guid);
3282 }
3283 }
3284#endif
3285 if (config == NULL) {
3461 if (zap_add(spa->spa_meta_objset,
3462 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3463 sizeof (uint64_t), 1, &version, tx) != 0) {
3464 cmn_err(CE_PANIC, "failed to add pool version");
3465 }
3466
3467 /* Newly created pools with the right version are always deflated. */
3468 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {

--- 175 unchanged lines hidden (view full) ---

3644 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3645 /* iscsi boot */
3646 get_iscsi_bootpath_phy(devpath);
3647 config = spa_generate_rootconf(devpath, devid, &guid);
3648 }
3649 }
3650#endif
3651 if (config == NULL) {
3286 cmn_err(CE_NOTE, "Can not read the pool label from '%s'",
3652 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
3287 devpath);
3288 return (EIO);
3289 }
3290
3291 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3292 &pname) == 0);
3293 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3294

--- 426 unchanged lines hidden (view full) ---

3721 if (spa->spa_root_vdev != NULL) {
3722 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3723 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
3724 poolname) == 0);
3725 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3726 state) == 0);
3727 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3728 spa->spa_uberblock.ub_timestamp) == 0);
3653 devpath);
3654 return (EIO);
3655 }
3656
3657 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3658 &pname) == 0);
3659 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3660

--- 426 unchanged lines hidden (view full) ---

4087 if (spa->spa_root_vdev != NULL) {
4088 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4089 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4090 poolname) == 0);
4091 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4092 state) == 0);
4093 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4094 spa->spa_uberblock.ub_timestamp) == 0);
4095 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4096 spa->spa_load_info) == 0);
3729
3730 /*
3731 * If the bootfs property exists on this pool then we
3732 * copy it out so that external consumers can tell which
3733 * pools are bootable.
3734 */
3735 if ((!error || error == EEXIST) && spa->spa_bootfs) {
3736 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);

--- 1093 unchanged lines hidden (view full) ---

4830
4831 if (error)
4832 return (error);
4833
4834 /*
4835 * The evacuation succeeded. Remove any remaining MOS metadata
4836 * associated with this vdev, and wait for these changes to sync.
4837 */
4097
4098 /*
4099 * If the bootfs property exists on this pool then we
4100 * copy it out so that external consumers can tell which
4101 * pools are bootable.
4102 */
4103 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4104 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);

--- 1093 unchanged lines hidden (view full) ---

5198
5199 if (error)
5200 return (error);
5201
5202 /*
5203 * The evacuation succeeded. Remove any remaining MOS metadata
5204 * associated with this vdev, and wait for these changes to sync.
5205 */
4838 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
5206 ASSERT0(vd->vdev_stat.vs_alloc);
4839 txg = spa_vdev_config_enter(spa);
4840 vd->vdev_removing = B_TRUE;
4841 vdev_dirty(vd, 0, NULL, txg);
4842 vdev_config_dirty(vd);
4843 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4844
4845 return (0);
4846}

--- 599 unchanged lines hidden (view full) ---

5446
5447 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5448
5449 /*
5450 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5451 * information. This avoids the dbuf_will_dirty() path and
5452 * saves us a pre-read to get data we don't actually care about.
5453 */
5207 txg = spa_vdev_config_enter(spa);
5208 vd->vdev_removing = B_TRUE;
5209 vdev_dirty(vd, 0, NULL, txg);
5210 vdev_config_dirty(vd);
5211 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5212
5213 return (0);
5214}

--- 599 unchanged lines hidden (view full) ---

5814
5815 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5816
5817 /*
5818 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5819 * information. This avoids the dbuf_will_dirty() path and
5820 * saves us a pre-read to get data we don't actually care about.
5821 */
5454 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
5822 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
5455 packed = kmem_alloc(bufsize, KM_SLEEP);
5456
5457 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
5458 KM_SLEEP) == 0);
5459 bzero(packed + nvsize, bufsize - nvsize);
5460
5461 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
5462

--- 59 unchanged lines hidden (view full) ---

5522 if (list_is_empty(&spa->spa_config_dirty_list))
5523 return;
5524
5525 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5526
5527 config = spa_config_generate(spa, spa->spa_root_vdev,
5528 dmu_tx_get_txg(tx), B_FALSE);
5529
5823 packed = kmem_alloc(bufsize, KM_SLEEP);
5824
5825 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
5826 KM_SLEEP) == 0);
5827 bzero(packed + nvsize, bufsize - nvsize);
5828
5829 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
5830

--- 59 unchanged lines hidden (view full) ---

5890 if (list_is_empty(&spa->spa_config_dirty_list))
5891 return;
5892
5893 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5894
5895 config = spa_config_generate(spa, spa->spa_root_vdev,
5896 dmu_tx_get_txg(tx), B_FALSE);
5897
5898 /*
5899 * If we're upgrading the spa version then make sure that
5900 * the config object gets updated with the correct version.
5901 */
5902 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
5903 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5904 spa->spa_uberblock.ub_version);
5905
5530 spa_config_exit(spa, SCL_STATE, FTAG);
5531
5532 if (spa->spa_config_syncing)
5533 nvlist_free(spa->spa_config_syncing);
5534 spa->spa_config_syncing = config;
5535
5536 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5537}
5538
5906 spa_config_exit(spa, SCL_STATE, FTAG);
5907
5908 if (spa->spa_config_syncing)
5909 nvlist_free(spa->spa_config_syncing);
5910 spa->spa_config_syncing = config;
5911
5912 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5913}
5914
5915static void
5916spa_sync_version(void *arg1, void *arg2, dmu_tx_t *tx)
5917{
5918 spa_t *spa = arg1;
5919 uint64_t version = *(uint64_t *)arg2;
5920
5921 /*
5922 * Setting the version is special cased when first creating the pool.
5923 */
5924 ASSERT(tx->tx_txg != TXG_INITIAL);
5925
5926 ASSERT(version <= SPA_VERSION);
5927 ASSERT(version >= spa_version(spa));
5928
5929 spa->spa_uberblock.ub_version = version;
5930 vdev_config_dirty(spa->spa_root_vdev);
5931}
5932
5539/*
5540 * Set zpool properties.
5541 */
5542static void
5543spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
5544{
5545 spa_t *spa = arg1;
5546 objset_t *mos = spa->spa_meta_objset;
5547 nvlist_t *nvp = arg2;
5933/*
5934 * Set zpool properties.
5935 */
5936static void
5937spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
5938{
5939 spa_t *spa = arg1;
5940 objset_t *mos = spa->spa_meta_objset;
5941 nvlist_t *nvp = arg2;
5548 nvpair_t *elem;
5549 uint64_t intval;
5550 char *strval;
5551 zpool_prop_t prop;
5552 const char *propname;
5553 zprop_type_t proptype;
5942 nvpair_t *elem = NULL;
5554
5555 mutex_enter(&spa->spa_props_lock);
5556
5943
5944 mutex_enter(&spa->spa_props_lock);
5945
5557 elem = NULL;
5558 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5946 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5947 uint64_t intval;
5948 char *strval, *fname;
5949 zpool_prop_t prop;
5950 const char *propname;
5951 zprop_type_t proptype;
5952 zfeature_info_t *feature;
5953
5559 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5954 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5955 case ZPROP_INVAL:
5956 /*
5957 * We checked this earlier in spa_prop_validate().
5958 */
5959 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5960
5961 fname = strchr(nvpair_name(elem), '@') + 1;
5962 VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature));
5963
5964 spa_feature_enable(spa, feature, tx);
5965 break;
5966
5560 case ZPOOL_PROP_VERSION:
5967 case ZPOOL_PROP_VERSION:
5968 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5561 /*
5969 /*
5562 * Only set version for non-zpool-creation cases
5563 * (set/import). spa_create() needs special care
5564 * for version setting.
5970 * The version is synced seperatly before other
5971 * properties and should be correct by now.
5565 */
5972 */
5566 if (tx->tx_txg != TXG_INITIAL) {
5567 VERIFY(nvpair_value_uint64(elem,
5568 &intval) == 0);
5569 ASSERT(intval <= SPA_VERSION);
5570 ASSERT(intval >= spa_version(spa));
5571 spa->spa_uberblock.ub_version = intval;
5572 vdev_config_dirty(spa->spa_root_vdev);
5573 }
5973 ASSERT3U(spa_version(spa), >=, intval);
5574 break;
5575
5576 case ZPOOL_PROP_ALTROOT:
5577 /*
5578 * 'altroot' is a non-persistent property. It should
5579 * have been set temporarily at creation or import time.
5580 */
5581 ASSERT(spa->spa_root != NULL);

--- 20 unchanged lines hidden (view full) ---

5602 if (tx->tx_txg != TXG_INITIAL)
5603 vdev_config_dirty(spa->spa_root_vdev);
5604 break;
5605 default:
5606 /*
5607 * Set pool property values in the poolprops mos object.
5608 */
5609 if (spa->spa_pool_props_object == 0) {
5974 break;
5975
5976 case ZPOOL_PROP_ALTROOT:
5977 /*
5978 * 'altroot' is a non-persistent property. It should
5979 * have been set temporarily at creation or import time.
5980 */
5981 ASSERT(spa->spa_root != NULL);

--- 20 unchanged lines hidden (view full) ---

6002 if (tx->tx_txg != TXG_INITIAL)
6003 vdev_config_dirty(spa->spa_root_vdev);
6004 break;
6005 default:
6006 /*
6007 * Set pool property values in the poolprops mos object.
6008 */
6009 if (spa->spa_pool_props_object == 0) {
5610 VERIFY((spa->spa_pool_props_object =
5611 zap_create(mos, DMU_OT_POOL_PROPS,
5612 DMU_OT_NONE, 0, tx)) > 0);
5613
5614 VERIFY(zap_update(mos,
6010 spa->spa_pool_props_object =
6011 zap_create_link(mos, DMU_OT_POOL_PROPS,
5615 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6012 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
5616 8, 1, &spa->spa_pool_props_object, tx)
5617 == 0);
6013 tx);
5618 }
5619
5620 /* normalize the property name */
5621 propname = zpool_prop_to_name(prop);
5622 proptype = zpool_prop_get_type(prop);
5623
5624 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5625 ASSERT(proptype == PROP_TYPE_STRING);

--- 82 unchanged lines hidden (view full) ---

5708
5709 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
5710 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
5711 dsl_pool_upgrade_dir_clones(dp, tx);
5712
5713 /* Keeping the freedir open increases spa_minref */
5714 spa->spa_minref += 3;
5715 }
6014 }
6015
6016 /* normalize the property name */
6017 propname = zpool_prop_to_name(prop);
6018 proptype = zpool_prop_get_type(prop);
6019
6020 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6021 ASSERT(proptype == PROP_TYPE_STRING);

--- 82 unchanged lines hidden (view full) ---

6104
6105 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6106 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6107 dsl_pool_upgrade_dir_clones(dp, tx);
6108
6109 /* Keeping the freedir open increases spa_minref */
6110 spa->spa_minref += 3;
6111 }
6112
6113 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6114 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6115 spa_feature_create_zap_objects(spa, tx);
6116 }
5716}
5717
5718/*
5719 * Sync the specified transaction group. New blocks may be dirtied as
5720 * part of the process, so we iterate until it converges.
5721 */
5722void
5723spa_sync(spa_t *spa, uint64_t txg)

--- 74 unchanged lines hidden (view full) ---

5798 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
5799 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
5800 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
5801 ((dsl_scan_active(dp->dp_scan) ||
5802 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
5803 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5804 VERIFY3U(bpobj_iterate(defer_bpo,
5805 spa_free_sync_cb, zio, tx), ==, 0);
6117}
6118
6119/*
6120 * Sync the specified transaction group. New blocks may be dirtied as
6121 * part of the process, so we iterate until it converges.
6122 */
6123void
6124spa_sync(spa_t *spa, uint64_t txg)

--- 74 unchanged lines hidden (view full) ---

6199 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
6200 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
6201 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
6202 ((dsl_scan_active(dp->dp_scan) ||
6203 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
6204 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6205 VERIFY3U(bpobj_iterate(defer_bpo,
6206 spa_free_sync_cb, zio, tx), ==, 0);
5806 VERIFY3U(zio_wait(zio), ==, 0);
6207 VERIFY0(zio_wait(zio));
5807 }
5808
5809 /*
5810 * Iterate to convergence.
5811 */
5812 do {
5813 int pass = ++spa->spa_sync_pass;
5814

--- 63 unchanged lines hidden (view full) ---

5878 } else {
5879 error = vdev_config_sync(rvd->vdev_child,
5880 rvd->vdev_children, txg, B_FALSE);
5881 if (error != 0)
5882 error = vdev_config_sync(rvd->vdev_child,
5883 rvd->vdev_children, txg, B_TRUE);
5884 }
5885
6208 }
6209
6210 /*
6211 * Iterate to convergence.
6212 */
6213 do {
6214 int pass = ++spa->spa_sync_pass;
6215

--- 63 unchanged lines hidden (view full) ---

6279 } else {
6280 error = vdev_config_sync(rvd->vdev_child,
6281 rvd->vdev_children, txg, B_FALSE);
6282 if (error != 0)
6283 error = vdev_config_sync(rvd->vdev_child,
6284 rvd->vdev_children, txg, B_TRUE);
6285 }
6286
6287 if (error == 0)
6288 spa->spa_last_synced_guid = rvd->vdev_guid;
6289
5886 spa_config_exit(spa, SCL_STATE, FTAG);
5887
5888 if (error == 0)
5889 break;
5890 zio_suspend(spa, NULL);
5891 zio_resume_wait(spa);
5892 }
5893 dmu_tx_commit(tx);

--- 259 unchanged lines hidden ---
6290 spa_config_exit(spa, SCL_STATE, FTAG);
6291
6292 if (error == 0)
6293 break;
6294 zio_suspend(spa, NULL);
6295 zio_resume_wait(spa);
6296 }
6297 dmu_tx_commit(tx);

--- 259 unchanged lines hidden ---