1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/*
| 1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/*
|
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
| 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
|
23 * Use is subject to license terms. 24 */ 25 26#include <sys/dsl_pool.h> 27#include <sys/dsl_dataset.h> 28#include <sys/dsl_dir.h> 29#include <sys/dsl_synctask.h> 30#include <sys/dmu_tx.h> 31#include <sys/dmu_objset.h> 32#include <sys/arc.h> 33#include <sys/zap.h> 34#include <sys/zio.h> 35#include <sys/zfs_context.h> 36#include <sys/fs/zfs.h> 37#include <sys/zfs_znode.h> 38#include <sys/spa_impl.h> 39 40int zfs_no_write_throttle = 0; 41int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ 42int zfs_txg_synctime = 5; /* target secs to sync a txg */ 43 44uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ 45uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ 46uint64_t zfs_write_limit_inflated = 0; 47uint64_t zfs_write_limit_override = 0; 48extern uint64_t zfs_write_limit_min; 49 50kmutex_t zfs_write_limit_lock; 51 52static pgcnt_t old_physmem = 0; 53 54static int 55dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 56{ 57 uint64_t obj; 58 int err; 59 60 err = zap_lookup(dp->dp_meta_objset, 61 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj, 62 name, sizeof (obj), 1, &obj); 63 if (err) 64 return (err); 65 66 return (dsl_dir_open_obj(dp, obj, name, dp, ddp)); 67} 68 69static dsl_pool_t * 70dsl_pool_open_impl(spa_t *spa, uint64_t txg) 71{ 72 dsl_pool_t *dp; 73 blkptr_t *bp = spa_get_rootblkptr(spa); 74 75 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 76 dp->dp_spa = spa; 77 dp->dp_meta_rootbp = *bp; 78 rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL); 79 dp->dp_write_limit = zfs_write_limit_min; 80 txg_init(dp, txg); 81 82 txg_list_create(&dp->dp_dirty_datasets, 83 offsetof(dsl_dataset_t, ds_dirty_link)); 84 txg_list_create(&dp->dp_dirty_dirs, 85 offsetof(dsl_dir_t, dd_dirty_link)); 86 txg_list_create(&dp->dp_sync_tasks, 87 offsetof(dsl_sync_task_group_t, dstg_node)); 88 list_create(&dp->dp_synced_datasets, sizeof (dsl_dataset_t), 89 offsetof(dsl_dataset_t, ds_synced_link)); 90 91 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 92 mutex_init(&dp->dp_scrub_cancel_lock, NULL, MUTEX_DEFAULT, NULL); 93 94 dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 95 1, 4, 0); 96 97 return (dp); 98} 99 100int 101dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 102{ 103 int err; 104 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 105 dsl_dir_t *dd; 106 dsl_dataset_t *ds; 107 objset_impl_t *osi; 108 109 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 110 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &osi); 111 if (err) 112 goto out; 113 dp->dp_meta_objset = &osi->os; 114 115 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 116 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 117 &dp->dp_root_dir_obj); 118 if (err) 119 goto out; 120 121 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 122 NULL, dp, &dp->dp_root_dir); 123 if (err) 124 goto out; 125 126 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 127 if (err) 128 goto out; 129 130 if (spa_version(spa) >= SPA_VERSION_ORIGIN) { 131 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 132 if (err) 133 goto out; 134 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj, 135 FTAG, &ds);
| 23 * Use is subject to license terms. 24 */ 25 26#include <sys/dsl_pool.h> 27#include <sys/dsl_dataset.h> 28#include <sys/dsl_dir.h> 29#include <sys/dsl_synctask.h> 30#include <sys/dmu_tx.h> 31#include <sys/dmu_objset.h> 32#include <sys/arc.h> 33#include <sys/zap.h> 34#include <sys/zio.h> 35#include <sys/zfs_context.h> 36#include <sys/fs/zfs.h> 37#include <sys/zfs_znode.h> 38#include <sys/spa_impl.h> 39 40int zfs_no_write_throttle = 0; 41int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ 42int zfs_txg_synctime = 5; /* target secs to sync a txg */ 43 44uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ 45uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ 46uint64_t zfs_write_limit_inflated = 0; 47uint64_t zfs_write_limit_override = 0; 48extern uint64_t zfs_write_limit_min; 49 50kmutex_t zfs_write_limit_lock; 51 52static pgcnt_t old_physmem = 0; 53 54static int 55dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 56{ 57 uint64_t obj; 58 int err; 59 60 err = zap_lookup(dp->dp_meta_objset, 61 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj, 62 name, sizeof (obj), 1, &obj); 63 if (err) 64 return (err); 65 66 return (dsl_dir_open_obj(dp, obj, name, dp, ddp)); 67} 68 69static dsl_pool_t * 70dsl_pool_open_impl(spa_t *spa, uint64_t txg) 71{ 72 dsl_pool_t *dp; 73 blkptr_t *bp = spa_get_rootblkptr(spa); 74 75 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 76 dp->dp_spa = spa; 77 dp->dp_meta_rootbp = *bp; 78 rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL); 79 dp->dp_write_limit = zfs_write_limit_min; 80 txg_init(dp, txg); 81 82 txg_list_create(&dp->dp_dirty_datasets, 83 offsetof(dsl_dataset_t, ds_dirty_link)); 84 txg_list_create(&dp->dp_dirty_dirs, 85 offsetof(dsl_dir_t, dd_dirty_link)); 86 txg_list_create(&dp->dp_sync_tasks, 87 offsetof(dsl_sync_task_group_t, dstg_node)); 88 list_create(&dp->dp_synced_datasets, sizeof (dsl_dataset_t), 89 offsetof(dsl_dataset_t, ds_synced_link)); 90 91 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 92 mutex_init(&dp->dp_scrub_cancel_lock, NULL, MUTEX_DEFAULT, NULL); 93 94 dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 95 1, 4, 0); 96 97 return (dp); 98} 99 100int 101dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 102{ 103 int err; 104 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 105 dsl_dir_t *dd; 106 dsl_dataset_t *ds; 107 objset_impl_t *osi; 108 109 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 110 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &osi); 111 if (err) 112 goto out; 113 dp->dp_meta_objset = &osi->os; 114 115 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 116 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 117 &dp->dp_root_dir_obj); 118 if (err) 119 goto out; 120 121 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 122 NULL, dp, &dp->dp_root_dir); 123 if (err) 124 goto out; 125 126 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 127 if (err) 128 goto out; 129 130 if (spa_version(spa) >= SPA_VERSION_ORIGIN) { 131 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 132 if (err) 133 goto out; 134 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj, 135 FTAG, &ds);
|
| 136 if (err == 0) { 137 err = dsl_dataset_hold_obj(dp, 138 ds->ds_phys->ds_prev_snap_obj, dp, 139 &dp->dp_origin_snap); 140 dsl_dataset_rele(ds, FTAG); 141 } 142 dsl_dir_close(dd, dp);
|
136 if (err) 137 goto out;
| 143 if (err) 144 goto out;
|
138 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 139 dp, &dp->dp_origin_snap); 140 if (err) 141 goto out; 142 dsl_dataset_rele(ds, FTAG); 143 dsl_dir_close(dd, dp);
| |
144 } 145 146 /* get scrub status */ 147 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 148 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1, 149 &dp->dp_scrub_func); 150 if (err == 0) { 151 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 152 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1, 153 &dp->dp_scrub_queue_obj); 154 if (err) 155 goto out; 156 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 157 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1, 158 &dp->dp_scrub_min_txg); 159 if (err) 160 goto out; 161 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 162 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1, 163 &dp->dp_scrub_max_txg); 164 if (err) 165 goto out; 166 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 167 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, 168 &dp->dp_scrub_bookmark); 169 if (err) 170 goto out; 171 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 172 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, 173 &spa->spa_scrub_errors); 174 if (err) 175 goto out; 176 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) { 177 /* 178 * A new-type scrub was in progress on an old 179 * pool. Restart from the beginning, since the 180 * old software may have changed the pool in the 181 * meantime. 182 */ 183 dsl_pool_scrub_restart(dp); 184 } 185 } else { 186 /* 187 * It's OK if there is no scrub in progress (and if 188 * there was an I/O error, ignore it). 189 */ 190 err = 0; 191 } 192 193out: 194 rw_exit(&dp->dp_config_rwlock); 195 if (err) 196 dsl_pool_close(dp); 197 else 198 *dpp = dp; 199 200 return (err); 201} 202 203void 204dsl_pool_close(dsl_pool_t *dp) 205{ 206 /* drop our references from dsl_pool_open() */ 207 208 /* 209 * Since we held the origin_snap from "syncing" context (which 210 * includes pool-opening context), it actually only got a "ref" 211 * and not a hold, so just drop that here. 212 */ 213 if (dp->dp_origin_snap) 214 dsl_dataset_drop_ref(dp->dp_origin_snap, dp); 215 if (dp->dp_mos_dir) 216 dsl_dir_close(dp->dp_mos_dir, dp); 217 if (dp->dp_root_dir) 218 dsl_dir_close(dp->dp_root_dir, dp); 219 220 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 221 if (dp->dp_meta_objset) 222 dmu_objset_evict(NULL, dp->dp_meta_objset->os); 223 224 txg_list_destroy(&dp->dp_dirty_datasets); 225 txg_list_destroy(&dp->dp_dirty_dirs); 226 txg_list_destroy(&dp->dp_sync_tasks); 227 list_destroy(&dp->dp_synced_datasets); 228 229 arc_flush(dp->dp_spa); 230 txg_fini(dp); 231 rw_destroy(&dp->dp_config_rwlock); 232 mutex_destroy(&dp->dp_lock); 233 mutex_destroy(&dp->dp_scrub_cancel_lock); 234 taskq_destroy(dp->dp_vnrele_taskq); 235 if (dp->dp_blkstats) 236 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 237 kmem_free(dp, sizeof (dsl_pool_t)); 238} 239 240dsl_pool_t * 241dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) 242{ 243 int err; 244 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 245 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 246 objset_impl_t *osip; 247 dsl_dataset_t *ds; 248 uint64_t dsobj; 249 250 /* create and open the MOS (meta-objset) */ 251 dp->dp_meta_objset = &dmu_objset_create_impl(spa, 252 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx)->os; 253 254 /* create the pool directory */ 255 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 256 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 257 ASSERT3U(err, ==, 0); 258 259 /* create and open the root dir */ 260 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 261 VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 262 NULL, dp, &dp->dp_root_dir)); 263 264 /* create and open the meta-objset dir */ 265 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 266 VERIFY(0 == dsl_pool_open_special_dir(dp, 267 MOS_DIR_NAME, &dp->dp_mos_dir)); 268 269 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 270 dsl_pool_create_origin(dp, tx); 271 272 /* create the root dataset */ 273 dsobj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); 274 275 /* create the root objset */ 276 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 277 osip = dmu_objset_create_impl(dp->dp_spa, ds, 278 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 279#ifdef _KERNEL 280 zfs_create_fs(&osip->os, kcred, zplprops, tx); 281#endif 282 dsl_dataset_rele(ds, FTAG); 283 284 dmu_tx_commit(tx); 285 286 return (dp); 287} 288 289void 290dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 291{ 292 zio_t *zio; 293 dmu_tx_t *tx; 294 dsl_dir_t *dd; 295 dsl_dataset_t *ds; 296 dsl_sync_task_group_t *dstg; 297 objset_impl_t *mosi = dp->dp_meta_objset->os; 298 hrtime_t start, write_time; 299 uint64_t data_written; 300 int err; 301 302 tx = dmu_tx_create_assigned(dp, txg); 303 304 dp->dp_read_overhead = 0; 305 start = gethrtime();
| 145 } 146 147 /* get scrub status */ 148 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 149 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1, 150 &dp->dp_scrub_func); 151 if (err == 0) { 152 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 153 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1, 154 &dp->dp_scrub_queue_obj); 155 if (err) 156 goto out; 157 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 158 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1, 159 &dp->dp_scrub_min_txg); 160 if (err) 161 goto out; 162 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 163 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1, 164 &dp->dp_scrub_max_txg); 165 if (err) 166 goto out; 167 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 168 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, 169 &dp->dp_scrub_bookmark); 170 if (err) 171 goto out; 172 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 173 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, 174 &spa->spa_scrub_errors); 175 if (err) 176 goto out; 177 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) { 178 /* 179 * A new-type scrub was in progress on an old 180 * pool. Restart from the beginning, since the 181 * old software may have changed the pool in the 182 * meantime. 183 */ 184 dsl_pool_scrub_restart(dp); 185 } 186 } else { 187 /* 188 * It's OK if there is no scrub in progress (and if 189 * there was an I/O error, ignore it). 190 */ 191 err = 0; 192 } 193 194out: 195 rw_exit(&dp->dp_config_rwlock); 196 if (err) 197 dsl_pool_close(dp); 198 else 199 *dpp = dp; 200 201 return (err); 202} 203 204void 205dsl_pool_close(dsl_pool_t *dp) 206{ 207 /* drop our references from dsl_pool_open() */ 208 209 /* 210 * Since we held the origin_snap from "syncing" context (which 211 * includes pool-opening context), it actually only got a "ref" 212 * and not a hold, so just drop that here. 213 */ 214 if (dp->dp_origin_snap) 215 dsl_dataset_drop_ref(dp->dp_origin_snap, dp); 216 if (dp->dp_mos_dir) 217 dsl_dir_close(dp->dp_mos_dir, dp); 218 if (dp->dp_root_dir) 219 dsl_dir_close(dp->dp_root_dir, dp); 220 221 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 222 if (dp->dp_meta_objset) 223 dmu_objset_evict(NULL, dp->dp_meta_objset->os); 224 225 txg_list_destroy(&dp->dp_dirty_datasets); 226 txg_list_destroy(&dp->dp_dirty_dirs); 227 txg_list_destroy(&dp->dp_sync_tasks); 228 list_destroy(&dp->dp_synced_datasets); 229 230 arc_flush(dp->dp_spa); 231 txg_fini(dp); 232 rw_destroy(&dp->dp_config_rwlock); 233 mutex_destroy(&dp->dp_lock); 234 mutex_destroy(&dp->dp_scrub_cancel_lock); 235 taskq_destroy(dp->dp_vnrele_taskq); 236 if (dp->dp_blkstats) 237 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 238 kmem_free(dp, sizeof (dsl_pool_t)); 239} 240 241dsl_pool_t * 242dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) 243{ 244 int err; 245 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 246 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 247 objset_impl_t *osip; 248 dsl_dataset_t *ds; 249 uint64_t dsobj; 250 251 /* create and open the MOS (meta-objset) */ 252 dp->dp_meta_objset = &dmu_objset_create_impl(spa, 253 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx)->os; 254 255 /* create the pool directory */ 256 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 257 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 258 ASSERT3U(err, ==, 0); 259 260 /* create and open the root dir */ 261 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 262 VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 263 NULL, dp, &dp->dp_root_dir)); 264 265 /* create and open the meta-objset dir */ 266 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 267 VERIFY(0 == dsl_pool_open_special_dir(dp, 268 MOS_DIR_NAME, &dp->dp_mos_dir)); 269 270 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 271 dsl_pool_create_origin(dp, tx); 272 273 /* create the root dataset */ 274 dsobj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); 275 276 /* create the root objset */ 277 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 278 osip = dmu_objset_create_impl(dp->dp_spa, ds, 279 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 280#ifdef _KERNEL 281 zfs_create_fs(&osip->os, kcred, zplprops, tx); 282#endif 283 dsl_dataset_rele(ds, FTAG); 284 285 dmu_tx_commit(tx); 286 287 return (dp); 288} 289 290void 291dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 292{ 293 zio_t *zio; 294 dmu_tx_t *tx; 295 dsl_dir_t *dd; 296 dsl_dataset_t *ds; 297 dsl_sync_task_group_t *dstg; 298 objset_impl_t *mosi = dp->dp_meta_objset->os; 299 hrtime_t start, write_time; 300 uint64_t data_written; 301 int err; 302 303 tx = dmu_tx_create_assigned(dp, txg); 304 305 dp->dp_read_overhead = 0; 306 start = gethrtime();
|
| 307
|
306 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 307 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
| 308 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 309 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
|
308 if (!list_link_active(&ds->ds_synced_link)) 309 list_insert_tail(&dp->dp_synced_datasets, ds); 310 else 311 dmu_buf_rele(ds->ds_dbuf, ds);
| 310 /* 311 * We must not sync any non-MOS datasets twice, because 312 * we may have taken a snapshot of them. However, we 313 * may sync newly-created datasets on pass 2. 314 */ 315 ASSERT(!list_link_active(&ds->ds_synced_link)); 316 list_insert_tail(&dp->dp_synced_datasets, ds);
|
312 dsl_dataset_sync(ds, zio, tx); 313 } 314 DTRACE_PROBE(pool_sync__1setup);
| 317 dsl_dataset_sync(ds, zio, tx); 318 } 319 DTRACE_PROBE(pool_sync__1setup);
|
315
| |
316 err = zio_wait(zio);
| 320 err = zio_wait(zio);
|
| 321
|
317 write_time = gethrtime() - start; 318 ASSERT(err == 0); 319 DTRACE_PROBE(pool_sync__2rootzio); 320
| 322 write_time = gethrtime() - start; 323 ASSERT(err == 0); 324 DTRACE_PROBE(pool_sync__2rootzio); 325
|
321 while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg))
| 326 for (ds = list_head(&dp->dp_synced_datasets); ds; 327 ds = list_next(&dp->dp_synced_datasets, ds)) 328 dmu_objset_do_userquota_callbacks(ds->ds_user_ptr, tx); 329 330 /* 331 * Sync the datasets again to push out the changes due to 332 * userquota updates. This must be done before we process the 333 * sync tasks, because that could cause a snapshot of a dataset 334 * whose ds_bp will be rewritten when we do this 2nd sync. 335 */ 336 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 337 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { 338 ASSERT(list_link_active(&ds->ds_synced_link)); 339 dmu_buf_rele(ds->ds_dbuf, ds); 340 dsl_dataset_sync(ds, zio, tx); 341 } 342 err = zio_wait(zio); 343 344 while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) { 345 /* 346 * No more sync tasks should have been added while we 347 * were syncing. 348 */ 349 ASSERT(spa_sync_pass(dp->dp_spa) == 1);
|
322 dsl_sync_task_group_sync(dstg, tx);
| 350 dsl_sync_task_group_sync(dstg, tx);
|
| 351 }
|
323 DTRACE_PROBE(pool_sync__3task); 324 325 start = gethrtime(); 326 while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) 327 dsl_dir_sync(dd, tx); 328 write_time += gethrtime() - start; 329 330 if (spa_sync_pass(dp->dp_spa) == 1) 331 dsl_pool_scrub_sync(dp, tx); 332 333 start = gethrtime(); 334 if (list_head(&mosi->os_dirty_dnodes[txg & TXG_MASK]) != NULL || 335 list_head(&mosi->os_free_dnodes[txg & TXG_MASK]) != NULL) { 336 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 337 dmu_objset_sync(mosi, zio, tx); 338 err = zio_wait(zio); 339 ASSERT(err == 0); 340 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 341 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 342 } 343 write_time += gethrtime() - start; 344 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time, 345 hrtime_t, dp->dp_read_overhead); 346 write_time -= dp->dp_read_overhead; 347 348 dmu_tx_commit(tx); 349 350 data_written = dp->dp_space_towrite[txg & TXG_MASK]; 351 dp->dp_space_towrite[txg & TXG_MASK] = 0; 352 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0); 353 354 /* 355 * If the write limit max has not been explicitly set, set it 356 * to a fraction of available physical memory (default 1/8th). 357 * Note that we must inflate the limit because the spa 358 * inflates write sizes to account for data replication. 359 * Check this each sync phase to catch changing memory size. 360 */ 361 if (physmem != old_physmem && zfs_write_limit_shift) { 362 mutex_enter(&zfs_write_limit_lock); 363 old_physmem = physmem; 364 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 365 zfs_write_limit_inflated = MAX(zfs_write_limit_min, 366 spa_get_asize(dp->dp_spa, zfs_write_limit_max)); 367 mutex_exit(&zfs_write_limit_lock); 368 } 369 370 /* 371 * Attempt to keep the sync time consistent by adjusting the 372 * amount of write traffic allowed into each transaction group. 373 * Weight the throughput calculation towards the current value: 374 * thru = 3/4 old_thru + 1/4 new_thru 375 */ 376 ASSERT(zfs_write_limit_min > 0); 377 if (data_written > zfs_write_limit_min / 8 && write_time > 0) { 378 uint64_t throughput = (data_written * NANOSEC) / write_time; 379 if (dp->dp_throughput) 380 dp->dp_throughput = throughput / 4 + 381 3 * dp->dp_throughput / 4; 382 else 383 dp->dp_throughput = throughput; 384 dp->dp_write_limit = MIN(zfs_write_limit_inflated, 385 MAX(zfs_write_limit_min, 386 dp->dp_throughput * zfs_txg_synctime)); 387 } 388} 389 390void 391dsl_pool_zil_clean(dsl_pool_t *dp) 392{ 393 dsl_dataset_t *ds; 394 395 while (ds = list_head(&dp->dp_synced_datasets)) { 396 list_remove(&dp->dp_synced_datasets, ds); 397 ASSERT(ds->ds_user_ptr != NULL); 398 zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil); 399 dmu_buf_rele(ds->ds_dbuf, ds); 400 } 401} 402 403/* 404 * TRUE if the current thread is the tx_sync_thread or if we 405 * are being called from SPA context during pool initialization. 406 */ 407int 408dsl_pool_sync_context(dsl_pool_t *dp) 409{ 410 return (curthread == dp->dp_tx.tx_sync_thread || 411 spa_get_dsl(dp->dp_spa) == NULL); 412} 413 414uint64_t 415dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) 416{ 417 uint64_t space, resv; 418 419 /* 420 * Reserve about 1.6% (1/64), or at least 32MB, for allocation 421 * efficiency. 422 * XXX The intent log is not accounted for, so it must fit 423 * within this slop. 424 * 425 * If we're trying to assess whether it's OK to do a free, 426 * cut the reservation in half to allow forward progress 427 * (e.g. make it possible to rm(1) files from a full pool). 428 */ 429 space = spa_get_dspace(dp->dp_spa); 430 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1); 431 if (netfree) 432 resv >>= 1; 433 434 return (space - resv); 435} 436 437int 438dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) 439{ 440 uint64_t reserved = 0; 441 uint64_t write_limit = (zfs_write_limit_override ? 442 zfs_write_limit_override : dp->dp_write_limit); 443 444 if (zfs_no_write_throttle) { 445 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], 446 space); 447 return (0); 448 } 449 450 /* 451 * Check to see if we have exceeded the maximum allowed IO for 452 * this transaction group. We can do this without locks since 453 * a little slop here is ok. Note that we do the reserved check 454 * with only half the requested reserve: this is because the 455 * reserve requests are worst-case, and we really don't want to 456 * throttle based off of worst-case estimates. 457 */ 458 if (write_limit > 0) { 459 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] 460 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; 461 462 if (reserved && reserved > write_limit) 463 return (ERESTART); 464 } 465 466 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); 467 468 /* 469 * If this transaction group is over 7/8ths capacity, delay 470 * the caller 1 clock tick. This will slow down the "fill" 471 * rate until the sync process can catch up with us. 472 */ 473 if (reserved && reserved > (write_limit - (write_limit >> 3))) 474 txg_delay(dp, tx->tx_txg, 1); 475 476 return (0); 477} 478 479void 480dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 481{ 482 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space); 483 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space); 484} 485 486void 487dsl_pool_memory_pressure(dsl_pool_t *dp) 488{ 489 uint64_t space_inuse = 0; 490 int i; 491 492 if (dp->dp_write_limit == zfs_write_limit_min) 493 return; 494 495 for (i = 0; i < TXG_SIZE; i++) { 496 space_inuse += dp->dp_space_towrite[i]; 497 space_inuse += dp->dp_tempreserved[i]; 498 } 499 dp->dp_write_limit = MAX(zfs_write_limit_min, 500 MIN(dp->dp_write_limit, space_inuse / 4)); 501} 502 503void 504dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 505{ 506 if (space > 0) { 507 mutex_enter(&dp->dp_lock); 508 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space; 509 mutex_exit(&dp->dp_lock); 510 } 511} 512 513/* ARGSUSED */ 514static int 515upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 516{ 517 dmu_tx_t *tx = arg; 518 dsl_dataset_t *ds, *prev = NULL; 519 int err; 520 dsl_pool_t *dp = spa_get_dsl(spa); 521 522 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 523 if (err) 524 return (err); 525 526 while (ds->ds_phys->ds_prev_snap_obj != 0) { 527 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 528 FTAG, &prev); 529 if (err) { 530 dsl_dataset_rele(ds, FTAG); 531 return (err); 532 } 533 534 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) 535 break; 536 dsl_dataset_rele(ds, FTAG); 537 ds = prev; 538 prev = NULL; 539 } 540 541 if (prev == NULL) { 542 prev = dp->dp_origin_snap; 543 544 /* 545 * The $ORIGIN can't have any data, or the accounting 546 * will be wrong. 547 */ 548 ASSERT(prev->ds_phys->ds_bp.blk_birth == 0); 549 550 /* The origin doesn't get attached to itself */ 551 if (ds->ds_object == prev->ds_object) { 552 dsl_dataset_rele(ds, FTAG); 553 return (0); 554 } 555 556 dmu_buf_will_dirty(ds->ds_dbuf, tx); 557 ds->ds_phys->ds_prev_snap_obj = prev->ds_object; 558 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg; 559 560 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 561 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object; 562 563 dmu_buf_will_dirty(prev->ds_dbuf, tx); 564 prev->ds_phys->ds_num_children++; 565 566 if (ds->ds_phys->ds_next_snap_obj == 0) { 567 ASSERT(ds->ds_prev == NULL); 568 VERIFY(0 == dsl_dataset_hold_obj(dp, 569 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 570 } 571 } 572 573 ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object); 574 ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object); 575 576 if (prev->ds_phys->ds_next_clones_obj == 0) {
| 352 DTRACE_PROBE(pool_sync__3task); 353 354 start = gethrtime(); 355 while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) 356 dsl_dir_sync(dd, tx); 357 write_time += gethrtime() - start; 358 359 if (spa_sync_pass(dp->dp_spa) == 1) 360 dsl_pool_scrub_sync(dp, tx); 361 362 start = gethrtime(); 363 if (list_head(&mosi->os_dirty_dnodes[txg & TXG_MASK]) != NULL || 364 list_head(&mosi->os_free_dnodes[txg & TXG_MASK]) != NULL) { 365 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 366 dmu_objset_sync(mosi, zio, tx); 367 err = zio_wait(zio); 368 ASSERT(err == 0); 369 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 370 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 371 } 372 write_time += gethrtime() - start; 373 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time, 374 hrtime_t, dp->dp_read_overhead); 375 write_time -= dp->dp_read_overhead; 376 377 dmu_tx_commit(tx); 378 379 data_written = dp->dp_space_towrite[txg & TXG_MASK]; 380 dp->dp_space_towrite[txg & TXG_MASK] = 0; 381 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0); 382 383 /* 384 * If the write limit max has not been explicitly set, set it 385 * to a fraction of available physical memory (default 1/8th). 386 * Note that we must inflate the limit because the spa 387 * inflates write sizes to account for data replication. 388 * Check this each sync phase to catch changing memory size. 389 */ 390 if (physmem != old_physmem && zfs_write_limit_shift) { 391 mutex_enter(&zfs_write_limit_lock); 392 old_physmem = physmem; 393 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 394 zfs_write_limit_inflated = MAX(zfs_write_limit_min, 395 spa_get_asize(dp->dp_spa, zfs_write_limit_max)); 396 mutex_exit(&zfs_write_limit_lock); 397 } 398 399 /* 400 * Attempt to keep the sync time consistent by adjusting the 401 * amount of write traffic allowed into each transaction group. 402 * Weight the throughput calculation towards the current value: 403 * thru = 3/4 old_thru + 1/4 new_thru 404 */ 405 ASSERT(zfs_write_limit_min > 0); 406 if (data_written > zfs_write_limit_min / 8 && write_time > 0) { 407 uint64_t throughput = (data_written * NANOSEC) / write_time; 408 if (dp->dp_throughput) 409 dp->dp_throughput = throughput / 4 + 410 3 * dp->dp_throughput / 4; 411 else 412 dp->dp_throughput = throughput; 413 dp->dp_write_limit = MIN(zfs_write_limit_inflated, 414 MAX(zfs_write_limit_min, 415 dp->dp_throughput * zfs_txg_synctime)); 416 } 417} 418 419void 420dsl_pool_zil_clean(dsl_pool_t *dp) 421{ 422 dsl_dataset_t *ds; 423 424 while (ds = list_head(&dp->dp_synced_datasets)) { 425 list_remove(&dp->dp_synced_datasets, ds); 426 ASSERT(ds->ds_user_ptr != NULL); 427 zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil); 428 dmu_buf_rele(ds->ds_dbuf, ds); 429 } 430} 431 432/* 433 * TRUE if the current thread is the tx_sync_thread or if we 434 * are being called from SPA context during pool initialization. 435 */ 436int 437dsl_pool_sync_context(dsl_pool_t *dp) 438{ 439 return (curthread == dp->dp_tx.tx_sync_thread || 440 spa_get_dsl(dp->dp_spa) == NULL); 441} 442 443uint64_t 444dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) 445{ 446 uint64_t space, resv; 447 448 /* 449 * Reserve about 1.6% (1/64), or at least 32MB, for allocation 450 * efficiency. 451 * XXX The intent log is not accounted for, so it must fit 452 * within this slop. 453 * 454 * If we're trying to assess whether it's OK to do a free, 455 * cut the reservation in half to allow forward progress 456 * (e.g. make it possible to rm(1) files from a full pool). 457 */ 458 space = spa_get_dspace(dp->dp_spa); 459 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1); 460 if (netfree) 461 resv >>= 1; 462 463 return (space - resv); 464} 465 466int 467dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) 468{ 469 uint64_t reserved = 0; 470 uint64_t write_limit = (zfs_write_limit_override ? 471 zfs_write_limit_override : dp->dp_write_limit); 472 473 if (zfs_no_write_throttle) { 474 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], 475 space); 476 return (0); 477 } 478 479 /* 480 * Check to see if we have exceeded the maximum allowed IO for 481 * this transaction group. We can do this without locks since 482 * a little slop here is ok. Note that we do the reserved check 483 * with only half the requested reserve: this is because the 484 * reserve requests are worst-case, and we really don't want to 485 * throttle based off of worst-case estimates. 486 */ 487 if (write_limit > 0) { 488 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] 489 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; 490 491 if (reserved && reserved > write_limit) 492 return (ERESTART); 493 } 494 495 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); 496 497 /* 498 * If this transaction group is over 7/8ths capacity, delay 499 * the caller 1 clock tick. This will slow down the "fill" 500 * rate until the sync process can catch up with us. 501 */ 502 if (reserved && reserved > (write_limit - (write_limit >> 3))) 503 txg_delay(dp, tx->tx_txg, 1); 504 505 return (0); 506} 507 508void 509dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 510{ 511 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space); 512 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space); 513} 514 515void 516dsl_pool_memory_pressure(dsl_pool_t *dp) 517{ 518 uint64_t space_inuse = 0; 519 int i; 520 521 if (dp->dp_write_limit == zfs_write_limit_min) 522 return; 523 524 for (i = 0; i < TXG_SIZE; i++) { 525 space_inuse += dp->dp_space_towrite[i]; 526 space_inuse += dp->dp_tempreserved[i]; 527 } 528 dp->dp_write_limit = MAX(zfs_write_limit_min, 529 MIN(dp->dp_write_limit, space_inuse / 4)); 530} 531 532void 533dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 534{ 535 if (space > 0) { 536 mutex_enter(&dp->dp_lock); 537 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space; 538 mutex_exit(&dp->dp_lock); 539 } 540} 541 542/* ARGSUSED */ 543static int 544upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 545{ 546 dmu_tx_t *tx = arg; 547 dsl_dataset_t *ds, *prev = NULL; 548 int err; 549 dsl_pool_t *dp = spa_get_dsl(spa); 550 551 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 552 if (err) 553 return (err); 554 555 while (ds->ds_phys->ds_prev_snap_obj != 0) { 556 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 557 FTAG, &prev); 558 if (err) { 559 dsl_dataset_rele(ds, FTAG); 560 return (err); 561 } 562 563 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) 564 break; 565 dsl_dataset_rele(ds, FTAG); 566 ds = prev; 567 prev = NULL; 568 } 569 570 if (prev == NULL) { 571 prev = dp->dp_origin_snap; 572 573 /* 574 * The $ORIGIN can't have any data, or the accounting 575 * will be wrong. 576 */ 577 ASSERT(prev->ds_phys->ds_bp.blk_birth == 0); 578 579 /* The origin doesn't get attached to itself */ 580 if (ds->ds_object == prev->ds_object) { 581 dsl_dataset_rele(ds, FTAG); 582 return (0); 583 } 584 585 dmu_buf_will_dirty(ds->ds_dbuf, tx); 586 ds->ds_phys->ds_prev_snap_obj = prev->ds_object; 587 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg; 588 589 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 590 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object; 591 592 dmu_buf_will_dirty(prev->ds_dbuf, tx); 593 prev->ds_phys->ds_num_children++; 594 595 if (ds->ds_phys->ds_next_snap_obj == 0) { 596 ASSERT(ds->ds_prev == NULL); 597 VERIFY(0 == dsl_dataset_hold_obj(dp, 598 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 599 } 600 } 601 602 ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object); 603 ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object); 604 605 if (prev->ds_phys->ds_next_clones_obj == 0) {
|
| 606 dmu_buf_will_dirty(prev->ds_dbuf, tx);
|
577 prev->ds_phys->ds_next_clones_obj = 578 zap_create(dp->dp_meta_objset, 579 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 580 } 581 VERIFY(0 == zap_add_int(dp->dp_meta_objset, 582 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx)); 583 584 dsl_dataset_rele(ds, FTAG); 585 if (prev != dp->dp_origin_snap) 586 dsl_dataset_rele(prev, FTAG); 587 return (0); 588} 589 590void 591dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 592{ 593 ASSERT(dmu_tx_is_syncing(tx)); 594 ASSERT(dp->dp_origin_snap != NULL); 595
| 607 prev->ds_phys->ds_next_clones_obj = 608 zap_create(dp->dp_meta_objset, 609 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 610 } 611 VERIFY(0 == zap_add_int(dp->dp_meta_objset, 612 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx)); 613 614 dsl_dataset_rele(ds, FTAG); 615 if (prev != dp->dp_origin_snap) 616 dsl_dataset_rele(prev, FTAG); 617 return (0); 618} 619 620void 621dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 622{ 623 ASSERT(dmu_tx_is_syncing(tx)); 624 ASSERT(dp->dp_origin_snap != NULL); 625
|
596 (void) dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, 597 tx, DS_FIND_CHILDREN);
| 626 VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, 627 tx, DS_FIND_CHILDREN));
|
598} 599 600void 601dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 602{ 603 uint64_t dsobj; 604 dsl_dataset_t *ds; 605 606 ASSERT(dmu_tx_is_syncing(tx)); 607 ASSERT(dp->dp_origin_snap == NULL); 608 609 /* create the origin dir, ds, & snap-ds */ 610 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 611 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 612 NULL, 0, kcred, tx); 613 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 614 dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, kcred, tx); 615 VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 616 dp, &dp->dp_origin_snap)); 617 dsl_dataset_rele(ds, FTAG); 618 rw_exit(&dp->dp_config_rwlock); 619} 620 621taskq_t * 622dsl_pool_vnrele_taskq(dsl_pool_t *dp) 623{ 624 return (dp->dp_vnrele_taskq); 625}
| 628} 629 630void 631dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 632{ 633 uint64_t dsobj; 634 dsl_dataset_t *ds; 635 636 ASSERT(dmu_tx_is_syncing(tx)); 637 ASSERT(dp->dp_origin_snap == NULL); 638 639 /* create the origin dir, ds, & snap-ds */ 640 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 641 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 642 NULL, 0, kcred, tx); 643 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 644 dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, kcred, tx); 645 VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 646 dp, &dp->dp_origin_snap)); 647 dsl_dataset_rele(ds, FTAG); 648 rw_exit(&dp->dp_config_rwlock); 649} 650 651taskq_t * 652dsl_pool_vnrele_taskq(dsl_pool_t *dp) 653{ 654 return (dp->dp_vnrele_taskq); 655}
|