dsl_dataset.c revision 219317
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25#include <sys/dmu_objset.h> 26#include <sys/dsl_dataset.h> 27#include <sys/dsl_dir.h> 28#include <sys/dsl_prop.h> 29#include <sys/dsl_synctask.h> 30#include <sys/dmu_traverse.h> 31#include <sys/dmu_tx.h> 32#include <sys/arc.h> 33#include <sys/zio.h> 34#include <sys/zap.h> 35#include <sys/unique.h> 36#include <sys/zfs_context.h> 37#include <sys/zfs_ioctl.h> 38#include <sys/spa.h> 39#include <sys/zfs_znode.h> 40#include <sys/zfs_onexit.h> 41#include <sys/zvol.h> 42#include <sys/dsl_scan.h> 43#include <sys/dsl_deadlist.h> 44 45static char *dsl_reaper = "the grim reaper"; 46 47static dsl_checkfunc_t dsl_dataset_destroy_begin_check; 48static dsl_syncfunc_t dsl_dataset_destroy_begin_sync; 49static dsl_syncfunc_t dsl_dataset_set_reservation_sync; 50 51#define SWITCH64(x, y) \ 52 { \ 53 uint64_t __tmp = (x); \ 54 (x) = (y); \ 55 (y) = __tmp; \ 56 } 57 58#define DS_REF_MAX (1ULL << 62) 59 60#define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE 61 62#define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) 63 64 65/* 66 * Figure out how much of this delta should be propogated to the dsl_dir 67 * layer. If there's a refreservation, that space has already been 68 * partially accounted for in our ancestors. 69 */ 70static int64_t 71parent_delta(dsl_dataset_t *ds, int64_t delta) 72{ 73 uint64_t old_bytes, new_bytes; 74 75 if (ds->ds_reserved == 0) 76 return (delta); 77 78 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 79 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved); 80 81 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta)); 82 return (new_bytes - old_bytes); 83} 84 85void 86dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) 87{ 88 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); 89 int compressed = BP_GET_PSIZE(bp); 90 int uncompressed = BP_GET_UCSIZE(bp); 91 int64_t delta; 92 93 dprintf_bp(bp, "ds=%p", ds); 94 95 ASSERT(dmu_tx_is_syncing(tx)); 96 /* It could have been compressed away to nothing */ 97 if (BP_IS_HOLE(bp)) 98 return; 99 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE); 100 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES); 101 if (ds == NULL) { 102 /* 103 * Account for the meta-objset space in its placeholder 104 * dsl_dir. 105 */ 106 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */ 107 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, 108 used, compressed, uncompressed, tx); 109 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 110 return; 111 } 112 dmu_buf_will_dirty(ds->ds_dbuf, tx); 113 114 mutex_enter(&ds->ds_dir->dd_lock); 115 mutex_enter(&ds->ds_lock); 116 delta = parent_delta(ds, used); 117 ds->ds_phys->ds_used_bytes += used; 118 ds->ds_phys->ds_compressed_bytes += compressed; 119 ds->ds_phys->ds_uncompressed_bytes += uncompressed; 120 ds->ds_phys->ds_unique_bytes += used; 121 mutex_exit(&ds->ds_lock); 122 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta, 123 compressed, uncompressed, tx); 124 dsl_dir_transfer_space(ds->ds_dir, used - delta, 125 DD_USED_REFRSRV, DD_USED_HEAD, tx); 126 mutex_exit(&ds->ds_dir->dd_lock); 127} 128 129int 130dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, 131 boolean_t async) 132{ 133 if (BP_IS_HOLE(bp)) 134 return (0); 135 136 ASSERT(dmu_tx_is_syncing(tx)); 137 ASSERT(bp->blk_birth <= tx->tx_txg); 138 139 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); 140 int compressed = BP_GET_PSIZE(bp); 141 int uncompressed = BP_GET_UCSIZE(bp); 142 143 ASSERT(used > 0); 144 if (ds == NULL) { 145 /* 146 * Account for the meta-objset space in its placeholder 147 * dataset. 148 */ 149 dsl_free(tx->tx_pool, tx->tx_txg, bp); 150 151 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, 152 -used, -compressed, -uncompressed, tx); 153 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 154 return (used); 155 } 156 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); 157 158 ASSERT(!dsl_dataset_is_snapshot(ds)); 159 dmu_buf_will_dirty(ds->ds_dbuf, tx); 160 161 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) { 162 int64_t delta; 163 164 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object); 165 dsl_free(tx->tx_pool, tx->tx_txg, bp); 166 167 mutex_enter(&ds->ds_dir->dd_lock); 168 mutex_enter(&ds->ds_lock); 169 ASSERT(ds->ds_phys->ds_unique_bytes >= used || 170 !DS_UNIQUE_IS_ACCURATE(ds)); 171 delta = parent_delta(ds, -used); 172 ds->ds_phys->ds_unique_bytes -= used; 173 mutex_exit(&ds->ds_lock); 174 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, 175 delta, -compressed, -uncompressed, tx); 176 dsl_dir_transfer_space(ds->ds_dir, -used - delta, 177 DD_USED_REFRSRV, DD_USED_HEAD, tx); 178 mutex_exit(&ds->ds_dir->dd_lock); 179 } else { 180 dprintf_bp(bp, "putting on dead list: %s", ""); 181 if (async) { 182 /* 183 * We are here as part of zio's write done callback, 184 * which means we're a zio interrupt thread. We can't 185 * call dsl_deadlist_insert() now because it may block 186 * waiting for I/O. Instead, put bp on the deferred 187 * queue and let dsl_pool_sync() finish the job. 188 */ 189 bplist_append(&ds->ds_pending_deadlist, bp); 190 } else { 191 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx); 192 } 193 ASSERT3U(ds->ds_prev->ds_object, ==, 194 ds->ds_phys->ds_prev_snap_obj); 195 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0); 196 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */ 197 if (ds->ds_prev->ds_phys->ds_next_snap_obj == 198 ds->ds_object && bp->blk_birth > 199 ds->ds_prev->ds_phys->ds_prev_snap_txg) { 200 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 201 mutex_enter(&ds->ds_prev->ds_lock); 202 ds->ds_prev->ds_phys->ds_unique_bytes += used; 203 mutex_exit(&ds->ds_prev->ds_lock); 204 } 205 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) { 206 dsl_dir_transfer_space(ds->ds_dir, used, 207 DD_USED_HEAD, DD_USED_SNAP, tx); 208 } 209 } 210 mutex_enter(&ds->ds_lock); 211 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used); 212 ds->ds_phys->ds_used_bytes -= used; 213 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed); 214 ds->ds_phys->ds_compressed_bytes -= compressed; 215 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed); 216 ds->ds_phys->ds_uncompressed_bytes -= uncompressed; 217 mutex_exit(&ds->ds_lock); 218 219 return (used); 220} 221 222uint64_t 223dsl_dataset_prev_snap_txg(dsl_dataset_t *ds) 224{ 225 uint64_t trysnap = 0; 226 227 if (ds == NULL) 228 return (0); 229 /* 230 * The snapshot creation could fail, but that would cause an 231 * incorrect FALSE return, which would only result in an 232 * overestimation of the amount of space that an operation would 233 * consume, which is OK. 234 * 235 * There's also a small window where we could miss a pending 236 * snapshot, because we could set the sync task in the quiescing 237 * phase. So this should only be used as a guess. 238 */ 239 if (ds->ds_trysnap_txg > 240 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa)) 241 trysnap = ds->ds_trysnap_txg; 242 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap)); 243} 244 245boolean_t 246dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp, 247 uint64_t blk_birth) 248{ 249 if (blk_birth <= dsl_dataset_prev_snap_txg(ds)) 250 return (B_FALSE); 251 252 ddt_prefetch(dsl_dataset_get_spa(ds), bp); 253 254 return (B_TRUE); 255} 256 257/* ARGSUSED */ 258static void 259dsl_dataset_evict(dmu_buf_t *db, void *dsv) 260{ 261 dsl_dataset_t *ds = dsv; 262 263 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds)); 264 265 unique_remove(ds->ds_fsid_guid); 266 267 if (ds->ds_objset != NULL) 268 dmu_objset_evict(ds->ds_objset); 269 270 if (ds->ds_prev) { 271 dsl_dataset_drop_ref(ds->ds_prev, ds); 272 ds->ds_prev = NULL; 273 } 274 275 bplist_destroy(&ds->ds_pending_deadlist); 276 if (db != NULL) { 277 dsl_deadlist_close(&ds->ds_deadlist); 278 } else { 279 ASSERT(ds->ds_deadlist.dl_dbuf == NULL); 280 ASSERT(!ds->ds_deadlist.dl_oldfmt); 281 } 282 if (ds->ds_dir) 283 dsl_dir_close(ds->ds_dir, ds); 284 285 ASSERT(!list_link_active(&ds->ds_synced_link)); 286 287 if (mutex_owned(&ds->ds_lock)) 288 mutex_exit(&ds->ds_lock); 289 mutex_destroy(&ds->ds_lock); 290 mutex_destroy(&ds->ds_recvlock); 291 if (mutex_owned(&ds->ds_opening_lock)) 292 mutex_exit(&ds->ds_opening_lock); 293 mutex_destroy(&ds->ds_opening_lock); 294 rw_destroy(&ds->ds_rwlock); 295 cv_destroy(&ds->ds_exclusive_cv); 296 297 kmem_free(ds, sizeof (dsl_dataset_t)); 298} 299 300static int 301dsl_dataset_get_snapname(dsl_dataset_t *ds) 302{ 303 dsl_dataset_phys_t *headphys; 304 int err; 305 dmu_buf_t *headdbuf; 306 dsl_pool_t *dp = ds->ds_dir->dd_pool; 307 objset_t *mos = dp->dp_meta_objset; 308 309 if (ds->ds_snapname[0]) 310 return (0); 311 if (ds->ds_phys->ds_next_snap_obj == 0) 312 return (0); 313 314 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj, 315 FTAG, &headdbuf); 316 if (err) 317 return (err); 318 headphys = headdbuf->db_data; 319 err = zap_value_search(dp->dp_meta_objset, 320 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname); 321 dmu_buf_rele(headdbuf, FTAG); 322 return (err); 323} 324 325static int 326dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value) 327{ 328 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 329 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 330 matchtype_t mt; 331 int err; 332 333 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 334 mt = MT_FIRST; 335 else 336 mt = MT_EXACT; 337 338 err = zap_lookup_norm(mos, snapobj, name, 8, 1, 339 value, mt, NULL, 0, NULL); 340 if (err == ENOTSUP && mt == MT_FIRST) 341 err = zap_lookup(mos, snapobj, name, 8, 1, value); 342 return (err); 343} 344 345static int 346dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx) 347{ 348 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 349 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 350 matchtype_t mt; 351 int err; 352 353 dsl_dir_snap_cmtime_update(ds->ds_dir); 354 355 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 356 mt = MT_FIRST; 357 else 358 mt = MT_EXACT; 359 360 err = zap_remove_norm(mos, snapobj, name, mt, tx); 361 if (err == ENOTSUP && mt == MT_FIRST) 362 err = zap_remove(mos, snapobj, name, tx); 363 return (err); 364} 365 366static int 367dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag, 368 dsl_dataset_t **dsp) 369{ 370 objset_t *mos = dp->dp_meta_objset; 371 dmu_buf_t *dbuf; 372 dsl_dataset_t *ds; 373 int err; 374 dmu_object_info_t doi; 375 376 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 377 dsl_pool_sync_context(dp)); 378 379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf); 380 if (err) 381 return (err); 382 383 /* Make sure dsobj has the correct object type. */ 384 dmu_object_info_from_db(dbuf, &doi); 385 if (doi.doi_type != DMU_OT_DSL_DATASET) 386 return (EINVAL); 387 388 ds = dmu_buf_get_user(dbuf); 389 if (ds == NULL) { 390 dsl_dataset_t *winner; 391 392 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP); 393 ds->ds_dbuf = dbuf; 394 ds->ds_object = dsobj; 395 ds->ds_phys = dbuf->db_data; 396 397 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL); 398 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL); 399 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL); 400 rw_init(&ds->ds_rwlock, 0, 0, 0); 401 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL); 402 403 bplist_create(&ds->ds_pending_deadlist); 404 dsl_deadlist_open(&ds->ds_deadlist, 405 mos, ds->ds_phys->ds_deadlist_obj); 406 407 if (err == 0) { 408 err = dsl_dir_open_obj(dp, 409 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir); 410 } 411 if (err) { 412 mutex_destroy(&ds->ds_lock); 413 mutex_destroy(&ds->ds_recvlock); 414 mutex_destroy(&ds->ds_opening_lock); 415 rw_destroy(&ds->ds_rwlock); 416 cv_destroy(&ds->ds_exclusive_cv); 417 bplist_destroy(&ds->ds_pending_deadlist); 418 dsl_deadlist_close(&ds->ds_deadlist); 419 kmem_free(ds, sizeof (dsl_dataset_t)); 420 dmu_buf_rele(dbuf, tag); 421 return (err); 422 } 423 424 if (!dsl_dataset_is_snapshot(ds)) { 425 ds->ds_snapname[0] = '\0'; 426 if (ds->ds_phys->ds_prev_snap_obj) { 427 err = dsl_dataset_get_ref(dp, 428 ds->ds_phys->ds_prev_snap_obj, 429 ds, &ds->ds_prev); 430 } 431 } else { 432 if (zfs_flags & ZFS_DEBUG_SNAPNAMES) 433 err = dsl_dataset_get_snapname(ds); 434 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) { 435 err = zap_count( 436 ds->ds_dir->dd_pool->dp_meta_objset, 437 ds->ds_phys->ds_userrefs_obj, 438 &ds->ds_userrefs); 439 } 440 } 441 442 if (err == 0 && !dsl_dataset_is_snapshot(ds)) { 443 /* 444 * In sync context, we're called with either no lock 445 * or with the write lock. If we're not syncing, 446 * we're always called with the read lock held. 447 */ 448 boolean_t need_lock = 449 !RW_WRITE_HELD(&dp->dp_config_rwlock) && 450 dsl_pool_sync_context(dp); 451 452 if (need_lock) 453 rw_enter(&dp->dp_config_rwlock, RW_READER); 454 455 err = dsl_prop_get_ds(ds, 456 "refreservation", sizeof (uint64_t), 1, 457 &ds->ds_reserved, NULL); 458 if (err == 0) { 459 err = dsl_prop_get_ds(ds, 460 "refquota", sizeof (uint64_t), 1, 461 &ds->ds_quota, NULL); 462 } 463 464 if (need_lock) 465 rw_exit(&dp->dp_config_rwlock); 466 } else { 467 ds->ds_reserved = ds->ds_quota = 0; 468 } 469 470 if (err == 0) { 471 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys, 472 dsl_dataset_evict); 473 } 474 if (err || winner) { 475 bplist_destroy(&ds->ds_pending_deadlist); 476 dsl_deadlist_close(&ds->ds_deadlist); 477 if (ds->ds_prev) 478 dsl_dataset_drop_ref(ds->ds_prev, ds); 479 dsl_dir_close(ds->ds_dir, ds); 480 mutex_destroy(&ds->ds_lock); 481 mutex_destroy(&ds->ds_recvlock); 482 mutex_destroy(&ds->ds_opening_lock); 483 rw_destroy(&ds->ds_rwlock); 484 cv_destroy(&ds->ds_exclusive_cv); 485 kmem_free(ds, sizeof (dsl_dataset_t)); 486 if (err) { 487 dmu_buf_rele(dbuf, tag); 488 return (err); 489 } 490 ds = winner; 491 } else { 492 ds->ds_fsid_guid = 493 unique_insert(ds->ds_phys->ds_fsid_guid); 494 } 495 } 496 ASSERT3P(ds->ds_dbuf, ==, dbuf); 497 ASSERT3P(ds->ds_phys, ==, dbuf->db_data); 498 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 || 499 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN || 500 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); 501 mutex_enter(&ds->ds_lock); 502 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) { 503 mutex_exit(&ds->ds_lock); 504 dmu_buf_rele(ds->ds_dbuf, tag); 505 return (ENOENT); 506 } 507 mutex_exit(&ds->ds_lock); 508 *dsp = ds; 509 return (0); 510} 511 512static int 513dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag) 514{ 515 dsl_pool_t *dp = ds->ds_dir->dd_pool; 516 517 /* 518 * In syncing context we don't want the rwlock lock: there 519 * may be an existing writer waiting for sync phase to 520 * finish. We don't need to worry about such writers, since 521 * sync phase is single-threaded, so the writer can't be 522 * doing anything while we are active. 523 */ 524 if (dsl_pool_sync_context(dp)) { 525 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 526 return (0); 527 } 528 529 /* 530 * Normal users will hold the ds_rwlock as a READER until they 531 * are finished (i.e., call dsl_dataset_rele()). "Owners" will 532 * drop their READER lock after they set the ds_owner field. 533 * 534 * If the dataset is being destroyed, the destroy thread will 535 * obtain a WRITER lock for exclusive access after it's done its 536 * open-context work and then change the ds_owner to 537 * dsl_reaper once destruction is assured. So threads 538 * may block here temporarily, until the "destructability" of 539 * the dataset is determined. 540 */ 541 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock)); 542 mutex_enter(&ds->ds_lock); 543 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) { 544 rw_exit(&dp->dp_config_rwlock); 545 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock); 546 if (DSL_DATASET_IS_DESTROYED(ds)) { 547 mutex_exit(&ds->ds_lock); 548 dsl_dataset_drop_ref(ds, tag); 549 rw_enter(&dp->dp_config_rwlock, RW_READER); 550 return (ENOENT); 551 } 552 /* 553 * The dp_config_rwlock lives above the ds_lock. And 554 * we need to check DSL_DATASET_IS_DESTROYED() while 555 * holding the ds_lock, so we have to drop and reacquire 556 * the ds_lock here. 557 */ 558 mutex_exit(&ds->ds_lock); 559 rw_enter(&dp->dp_config_rwlock, RW_READER); 560 mutex_enter(&ds->ds_lock); 561 } 562 mutex_exit(&ds->ds_lock); 563 return (0); 564} 565 566int 567dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, 568 dsl_dataset_t **dsp) 569{ 570 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp); 571 572 if (err) 573 return (err); 574 return (dsl_dataset_hold_ref(*dsp, tag)); 575} 576 577int 578dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok, 579 void *tag, dsl_dataset_t **dsp) 580{ 581 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp); 582 if (err) 583 return (err); 584 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) { 585 dsl_dataset_rele(*dsp, tag); 586 *dsp = NULL; 587 return (EBUSY); 588 } 589 return (0); 590} 591 592int 593dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp) 594{ 595 dsl_dir_t *dd; 596 dsl_pool_t *dp; 597 const char *snapname; 598 uint64_t obj; 599 int err = 0; 600 601 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname); 602 if (err) 603 return (err); 604 605 dp = dd->dd_pool; 606 obj = dd->dd_phys->dd_head_dataset_obj; 607 rw_enter(&dp->dp_config_rwlock, RW_READER); 608 if (obj) 609 err = dsl_dataset_get_ref(dp, obj, tag, dsp); 610 else 611 err = ENOENT; 612 if (err) 613 goto out; 614 615 err = dsl_dataset_hold_ref(*dsp, tag); 616 617 /* we may be looking for a snapshot */ 618 if (err == 0 && snapname != NULL) { 619 dsl_dataset_t *ds = NULL; 620 621 if (*snapname++ != '@') { 622 dsl_dataset_rele(*dsp, tag); 623 err = ENOENT; 624 goto out; 625 } 626 627 dprintf("looking for snapshot '%s'\n", snapname); 628 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj); 629 if (err == 0) 630 err = dsl_dataset_get_ref(dp, obj, tag, &ds); 631 dsl_dataset_rele(*dsp, tag); 632 633 ASSERT3U((err == 0), ==, (ds != NULL)); 634 635 if (ds) { 636 mutex_enter(&ds->ds_lock); 637 if (ds->ds_snapname[0] == 0) 638 (void) strlcpy(ds->ds_snapname, snapname, 639 sizeof (ds->ds_snapname)); 640 mutex_exit(&ds->ds_lock); 641 err = dsl_dataset_hold_ref(ds, tag); 642 *dsp = err ? NULL : ds; 643 } 644 } 645out: 646 rw_exit(&dp->dp_config_rwlock); 647 dsl_dir_close(dd, FTAG); 648 return (err); 649} 650 651int 652dsl_dataset_own(const char *name, boolean_t inconsistentok, 653 void *tag, dsl_dataset_t **dsp) 654{ 655 int err = dsl_dataset_hold(name, tag, dsp); 656 if (err) 657 return (err); 658 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) { 659 dsl_dataset_rele(*dsp, tag); 660 return (EBUSY); 661 } 662 return (0); 663} 664 665void 666dsl_dataset_name(dsl_dataset_t *ds, char *name) 667{ 668 if (ds == NULL) { 669 (void) strcpy(name, "mos"); 670 } else { 671 dsl_dir_name(ds->ds_dir, name); 672 VERIFY(0 == dsl_dataset_get_snapname(ds)); 673 if (ds->ds_snapname[0]) { 674 (void) strcat(name, "@"); 675 /* 676 * We use a "recursive" mutex so that we 677 * can call dprintf_ds() with ds_lock held. 678 */ 679 if (!MUTEX_HELD(&ds->ds_lock)) { 680 mutex_enter(&ds->ds_lock); 681 (void) strcat(name, ds->ds_snapname); 682 mutex_exit(&ds->ds_lock); 683 } else { 684 (void) strcat(name, ds->ds_snapname); 685 } 686 } 687 } 688} 689 690static int 691dsl_dataset_namelen(dsl_dataset_t *ds) 692{ 693 int result; 694 695 if (ds == NULL) { 696 result = 3; /* "mos" */ 697 } else { 698 result = dsl_dir_namelen(ds->ds_dir); 699 VERIFY(0 == dsl_dataset_get_snapname(ds)); 700 if (ds->ds_snapname[0]) { 701 ++result; /* adding one for the @-sign */ 702 if (!MUTEX_HELD(&ds->ds_lock)) { 703 mutex_enter(&ds->ds_lock); 704 result += strlen(ds->ds_snapname); 705 mutex_exit(&ds->ds_lock); 706 } else { 707 result += strlen(ds->ds_snapname); 708 } 709 } 710 } 711 712 return (result); 713} 714 715void 716dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag) 717{ 718 dmu_buf_rele(ds->ds_dbuf, tag); 719} 720 721void 722dsl_dataset_rele(dsl_dataset_t *ds, void *tag) 723{ 724 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) { 725 rw_exit(&ds->ds_rwlock); 726 } 727 dsl_dataset_drop_ref(ds, tag); 728} 729 730void 731dsl_dataset_disown(dsl_dataset_t *ds, void *tag) 732{ 733 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) || 734 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL)); 735 736 mutex_enter(&ds->ds_lock); 737 ds->ds_owner = NULL; 738 if (RW_WRITE_HELD(&ds->ds_rwlock)) { 739 rw_exit(&ds->ds_rwlock); 740 cv_broadcast(&ds->ds_exclusive_cv); 741 } 742 mutex_exit(&ds->ds_lock); 743 if (ds->ds_dbuf) 744 dsl_dataset_drop_ref(ds, tag); 745 else 746 dsl_dataset_evict(NULL, ds); 747} 748 749boolean_t 750dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag) 751{ 752 boolean_t gotit = FALSE; 753 754 mutex_enter(&ds->ds_lock); 755 if (ds->ds_owner == NULL && 756 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) { 757 ds->ds_owner = tag; 758 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) 759 rw_exit(&ds->ds_rwlock); 760 gotit = TRUE; 761 } 762 mutex_exit(&ds->ds_lock); 763 return (gotit); 764} 765 766void 767dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner) 768{ 769 ASSERT3P(owner, ==, ds->ds_owner); 770 if (!RW_WRITE_HELD(&ds->ds_rwlock)) 771 rw_enter(&ds->ds_rwlock, RW_WRITER); 772} 773 774uint64_t 775dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, 776 uint64_t flags, dmu_tx_t *tx) 777{ 778 dsl_pool_t *dp = dd->dd_pool; 779 dmu_buf_t *dbuf; 780 dsl_dataset_phys_t *dsphys; 781 uint64_t dsobj; 782 objset_t *mos = dp->dp_meta_objset; 783 784 if (origin == NULL) 785 origin = dp->dp_origin_snap; 786 787 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp); 788 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0); 789 ASSERT(dmu_tx_is_syncing(tx)); 790 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); 791 792 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 793 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 794 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 795 dmu_buf_will_dirty(dbuf, tx); 796 dsphys = dbuf->db_data; 797 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 798 dsphys->ds_dir_obj = dd->dd_object; 799 dsphys->ds_flags = flags; 800 dsphys->ds_fsid_guid = unique_create(); 801 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 802 sizeof (dsphys->ds_guid)); 803 dsphys->ds_snapnames_zapobj = 804 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP, 805 DMU_OT_NONE, 0, tx); 806 dsphys->ds_creation_time = gethrestime_sec(); 807 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg; 808 809 if (origin == NULL) { 810 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx); 811 } else { 812 dsl_dataset_t *ohds; 813 814 dsphys->ds_prev_snap_obj = origin->ds_object; 815 dsphys->ds_prev_snap_txg = 816 origin->ds_phys->ds_creation_txg; 817 dsphys->ds_used_bytes = 818 origin->ds_phys->ds_used_bytes; 819 dsphys->ds_compressed_bytes = 820 origin->ds_phys->ds_compressed_bytes; 821 dsphys->ds_uncompressed_bytes = 822 origin->ds_phys->ds_uncompressed_bytes; 823 dsphys->ds_bp = origin->ds_phys->ds_bp; 824 dsphys->ds_flags |= origin->ds_phys->ds_flags; 825 826 dmu_buf_will_dirty(origin->ds_dbuf, tx); 827 origin->ds_phys->ds_num_children++; 828 829 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 830 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds)); 831 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist, 832 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx); 833 dsl_dataset_rele(ohds, FTAG); 834 835 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) { 836 if (origin->ds_phys->ds_next_clones_obj == 0) { 837 origin->ds_phys->ds_next_clones_obj = 838 zap_create(mos, 839 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 840 } 841 VERIFY(0 == zap_add_int(mos, 842 origin->ds_phys->ds_next_clones_obj, 843 dsobj, tx)); 844 } 845 846 dmu_buf_will_dirty(dd->dd_dbuf, tx); 847 dd->dd_phys->dd_origin_obj = origin->ds_object; 848 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 849 if (origin->ds_dir->dd_phys->dd_clones == 0) { 850 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 851 origin->ds_dir->dd_phys->dd_clones = 852 zap_create(mos, 853 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); 854 } 855 VERIFY3U(0, ==, zap_add_int(mos, 856 origin->ds_dir->dd_phys->dd_clones, dsobj, tx)); 857 } 858 } 859 860 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 861 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 862 863 dmu_buf_rele(dbuf, FTAG); 864 865 dmu_buf_will_dirty(dd->dd_dbuf, tx); 866 dd->dd_phys->dd_head_dataset_obj = dsobj; 867 868 return (dsobj); 869} 870 871uint64_t 872dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname, 873 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx) 874{ 875 dsl_pool_t *dp = pdd->dd_pool; 876 uint64_t dsobj, ddobj; 877 dsl_dir_t *dd; 878 879 ASSERT(lastname[0] != '@'); 880 881 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx); 882 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd)); 883 884 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx); 885 886 dsl_deleg_set_create_perms(dd, tx, cr); 887 888 dsl_dir_close(dd, FTAG); 889 890 /* 891 * If we are creating a clone, make sure we zero out any stale 892 * data from the origin snapshots zil header. 893 */ 894 if (origin != NULL) { 895 dsl_dataset_t *ds; 896 objset_t *os; 897 898 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 899 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os)); 900 bzero(&os->os_zil_header, sizeof (os->os_zil_header)); 901 dsl_dataset_dirty(ds, tx); 902 dsl_dataset_rele(ds, FTAG); 903 } 904 905 return (dsobj); 906} 907 908struct destroyarg { 909 dsl_sync_task_group_t *dstg; 910 char *snapname; 911 char *failed; 912 boolean_t defer; 913}; 914 915static int 916dsl_snapshot_destroy_one(const char *name, void *arg) 917{ 918 struct destroyarg *da = arg; 919 dsl_dataset_t *ds; 920 int err; 921 char *dsname; 922 923 dsname = kmem_asprintf("%s@%s", name, da->snapname); 924 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds); 925 strfree(dsname); 926 if (err == 0) { 927 struct dsl_ds_destroyarg *dsda; 928 929 dsl_dataset_make_exclusive(ds, da->dstg); 930 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP); 931 dsda->ds = ds; 932 dsda->defer = da->defer; 933 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check, 934 dsl_dataset_destroy_sync, dsda, da->dstg, 0); 935 } else if (err == ENOENT) { 936 err = 0; 937 } else { 938 (void) strcpy(da->failed, name); 939 } 940 return (err); 941} 942 943/* 944 * Destroy 'snapname' in all descendants of 'fsname'. 945 */ 946#pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy 947int 948dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer) 949{ 950 int err; 951 struct destroyarg da; 952 dsl_sync_task_t *dst; 953 spa_t *spa; 954 955 err = spa_open(fsname, &spa, FTAG); 956 if (err) 957 return (err); 958 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 959 da.snapname = snapname; 960 da.failed = fsname; 961 da.defer = defer; 962 963 err = dmu_objset_find(fsname, 964 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN); 965 966 if (err == 0) 967 err = dsl_sync_task_group_wait(da.dstg); 968 969 for (dst = list_head(&da.dstg->dstg_tasks); dst; 970 dst = list_next(&da.dstg->dstg_tasks, dst)) { 971 struct dsl_ds_destroyarg *dsda = dst->dst_arg1; 972 dsl_dataset_t *ds = dsda->ds; 973 974 /* 975 * Return the file system name that triggered the error 976 */ 977 if (dst->dst_err) { 978 dsl_dataset_name(ds, fsname); 979 *strchr(fsname, '@') = '\0'; 980 } 981 ASSERT3P(dsda->rm_origin, ==, NULL); 982 dsl_dataset_disown(ds, da.dstg); 983 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg)); 984 } 985 986 dsl_sync_task_group_destroy(da.dstg); 987 spa_close(spa, FTAG); 988 return (err); 989} 990 991static boolean_t 992dsl_dataset_might_destroy_origin(dsl_dataset_t *ds) 993{ 994 boolean_t might_destroy = B_FALSE; 995 996 mutex_enter(&ds->ds_lock); 997 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 && 998 DS_IS_DEFER_DESTROY(ds)) 999 might_destroy = B_TRUE; 1000 mutex_exit(&ds->ds_lock); 1001 1002 return (might_destroy); 1003} 1004 1005/* 1006 * If we're removing a clone, and these three conditions are true: 1007 * 1) the clone's origin has no other children 1008 * 2) the clone's origin has no user references 1009 * 3) the clone's origin has been marked for deferred destruction 1010 * Then, prepare to remove the origin as part of this sync task group. 1011 */ 1012static int 1013dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag) 1014{ 1015 dsl_dataset_t *ds = dsda->ds; 1016 dsl_dataset_t *origin = ds->ds_prev; 1017 1018 if (dsl_dataset_might_destroy_origin(origin)) { 1019 char *name; 1020 int namelen; 1021 int error; 1022 1023 namelen = dsl_dataset_namelen(origin) + 1; 1024 name = kmem_alloc(namelen, KM_SLEEP); 1025 dsl_dataset_name(origin, name); 1026#ifdef _KERNEL 1027 error = zfs_unmount_snap(name, NULL); 1028 if (error) { 1029 kmem_free(name, namelen); 1030 return (error); 1031 } 1032#endif 1033 error = dsl_dataset_own(name, B_TRUE, tag, &origin); 1034 kmem_free(name, namelen); 1035 if (error) 1036 return (error); 1037 dsda->rm_origin = origin; 1038 dsl_dataset_make_exclusive(origin, tag); 1039 } 1040 1041 return (0); 1042} 1043 1044/* 1045 * ds must be opened as OWNER. On return (whether successful or not), 1046 * ds will be closed and caller can no longer dereference it. 1047 */ 1048int 1049dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer) 1050{ 1051 int err; 1052 dsl_sync_task_group_t *dstg; 1053 objset_t *os; 1054 dsl_dir_t *dd; 1055 uint64_t obj; 1056 struct dsl_ds_destroyarg dsda = { 0 }; 1057 dsl_dataset_t dummy_ds = { 0 }; 1058 1059 dsda.ds = ds; 1060 1061 if (dsl_dataset_is_snapshot(ds)) { 1062 /* Destroying a snapshot is simpler */ 1063 dsl_dataset_make_exclusive(ds, tag); 1064 1065 dsda.defer = defer; 1066 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 1067 dsl_dataset_destroy_check, dsl_dataset_destroy_sync, 1068 &dsda, tag, 0); 1069 ASSERT3P(dsda.rm_origin, ==, NULL); 1070 goto out; 1071 } else if (defer) { 1072 err = EINVAL; 1073 goto out; 1074 } 1075 1076 dd = ds->ds_dir; 1077 dummy_ds.ds_dir = dd; 1078 dummy_ds.ds_object = ds->ds_object; 1079 1080 /* 1081 * Check for errors and mark this ds as inconsistent, in 1082 * case we crash while freeing the objects. 1083 */ 1084 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check, 1085 dsl_dataset_destroy_begin_sync, ds, NULL, 0); 1086 if (err) 1087 goto out; 1088 1089 err = dmu_objset_from_ds(ds, &os); 1090 if (err) 1091 goto out; 1092 1093 /* 1094 * remove the objects in open context, so that we won't 1095 * have too much to do in syncing context. 1096 */ 1097 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 1098 ds->ds_phys->ds_prev_snap_txg)) { 1099 /* 1100 * Ignore errors, if there is not enough disk space 1101 * we will deal with it in dsl_dataset_destroy_sync(). 1102 */ 1103 (void) dmu_free_object(os, obj); 1104 } 1105 if (err != ESRCH) 1106 goto out; 1107 1108 /* 1109 * Only the ZIL knows how to free log blocks. 1110 */ 1111 zil_destroy(dmu_objset_zil(os), B_FALSE); 1112 1113 /* 1114 * Sync out all in-flight IO. 1115 */ 1116 txg_wait_synced(dd->dd_pool, 0); 1117 1118 /* 1119 * If we managed to free all the objects in open 1120 * context, the user space accounting should be zero. 1121 */ 1122 if (ds->ds_phys->ds_bp.blk_fill == 0 && 1123 dmu_objset_userused_enabled(os)) { 1124 uint64_t count; 1125 1126 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 || 1127 count == 0); 1128 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 || 1129 count == 0); 1130 } 1131 1132 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 1133 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd); 1134 rw_exit(&dd->dd_pool->dp_config_rwlock); 1135 1136 if (err) 1137 goto out; 1138 1139 /* 1140 * Blow away the dsl_dir + head dataset. 1141 */ 1142 dsl_dataset_make_exclusive(ds, tag); 1143 /* 1144 * If we're removing a clone, we might also need to remove its 1145 * origin. 1146 */ 1147 do { 1148 dsda.need_prep = B_FALSE; 1149 if (dsl_dir_is_clone(dd)) { 1150 err = dsl_dataset_origin_rm_prep(&dsda, tag); 1151 if (err) { 1152 dsl_dir_close(dd, FTAG); 1153 goto out; 1154 } 1155 } 1156 1157 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool); 1158 dsl_sync_task_create(dstg, dsl_dataset_destroy_check, 1159 dsl_dataset_destroy_sync, &dsda, tag, 0); 1160 dsl_sync_task_create(dstg, dsl_dir_destroy_check, 1161 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0); 1162 err = dsl_sync_task_group_wait(dstg); 1163 dsl_sync_task_group_destroy(dstg); 1164 1165 /* 1166 * We could be racing against 'zfs release' or 'zfs destroy -d' 1167 * on the origin snap, in which case we can get EBUSY if we 1168 * needed to destroy the origin snap but were not ready to 1169 * do so. 1170 */ 1171 if (dsda.need_prep) { 1172 ASSERT(err == EBUSY); 1173 ASSERT(dsl_dir_is_clone(dd)); 1174 ASSERT(dsda.rm_origin == NULL); 1175 } 1176 } while (dsda.need_prep); 1177 1178 if (dsda.rm_origin != NULL) 1179 dsl_dataset_disown(dsda.rm_origin, tag); 1180 1181 /* if it is successful, dsl_dir_destroy_sync will close the dd */ 1182 if (err) 1183 dsl_dir_close(dd, FTAG); 1184out: 1185 dsl_dataset_disown(ds, tag); 1186 return (err); 1187} 1188 1189blkptr_t * 1190dsl_dataset_get_blkptr(dsl_dataset_t *ds) 1191{ 1192 return (&ds->ds_phys->ds_bp); 1193} 1194 1195void 1196dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) 1197{ 1198 ASSERT(dmu_tx_is_syncing(tx)); 1199 /* If it's the meta-objset, set dp_meta_rootbp */ 1200 if (ds == NULL) { 1201 tx->tx_pool->dp_meta_rootbp = *bp; 1202 } else { 1203 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1204 ds->ds_phys->ds_bp = *bp; 1205 } 1206} 1207 1208spa_t * 1209dsl_dataset_get_spa(dsl_dataset_t *ds) 1210{ 1211 return (ds->ds_dir->dd_pool->dp_spa); 1212} 1213 1214void 1215dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) 1216{ 1217 dsl_pool_t *dp; 1218 1219 if (ds == NULL) /* this is the meta-objset */ 1220 return; 1221 1222 ASSERT(ds->ds_objset != NULL); 1223 1224 if (ds->ds_phys->ds_next_snap_obj != 0) 1225 panic("dirtying snapshot!"); 1226 1227 dp = ds->ds_dir->dd_pool; 1228 1229 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) { 1230 /* up the hold count until we can be written out */ 1231 dmu_buf_add_ref(ds->ds_dbuf, ds); 1232 } 1233} 1234 1235/* 1236 * The unique space in the head dataset can be calculated by subtracting 1237 * the space used in the most recent snapshot, that is still being used 1238 * in this file system, from the space currently in use. To figure out 1239 * the space in the most recent snapshot still in use, we need to take 1240 * the total space used in the snapshot and subtract out the space that 1241 * has been freed up since the snapshot was taken. 1242 */ 1243static void 1244dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds) 1245{ 1246 uint64_t mrs_used; 1247 uint64_t dlused, dlcomp, dluncomp; 1248 1249 ASSERT(!dsl_dataset_is_snapshot(ds)); 1250 1251 if (ds->ds_phys->ds_prev_snap_obj != 0) 1252 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes; 1253 else 1254 mrs_used = 0; 1255 1256 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp); 1257 1258 ASSERT3U(dlused, <=, mrs_used); 1259 ds->ds_phys->ds_unique_bytes = 1260 ds->ds_phys->ds_used_bytes - (mrs_used - dlused); 1261 1262 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >= 1263 SPA_VERSION_UNIQUE_ACCURATE) 1264 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 1265} 1266 1267struct killarg { 1268 dsl_dataset_t *ds; 1269 dmu_tx_t *tx; 1270}; 1271 1272/* ARGSUSED */ 1273static int 1274kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, 1275 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 1276{ 1277 struct killarg *ka = arg; 1278 dmu_tx_t *tx = ka->tx; 1279 1280 if (bp == NULL) 1281 return (0); 1282 1283 if (zb->zb_level == ZB_ZIL_LEVEL) { 1284 ASSERT(zilog != NULL); 1285 /* 1286 * It's a block in the intent log. It has no 1287 * accounting, so just free it. 1288 */ 1289 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp); 1290 } else { 1291 ASSERT(zilog == NULL); 1292 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg); 1293 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE); 1294 } 1295 1296 return (0); 1297} 1298 1299/* ARGSUSED */ 1300static int 1301dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx) 1302{ 1303 dsl_dataset_t *ds = arg1; 1304 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1305 uint64_t count; 1306 int err; 1307 1308 /* 1309 * Can't delete a head dataset if there are snapshots of it. 1310 * (Except if the only snapshots are from the branch we cloned 1311 * from.) 1312 */ 1313 if (ds->ds_prev != NULL && 1314 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1315 return (EBUSY); 1316 1317 /* 1318 * This is really a dsl_dir thing, but check it here so that 1319 * we'll be less likely to leave this dataset inconsistent & 1320 * nearly destroyed. 1321 */ 1322 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count); 1323 if (err) 1324 return (err); 1325 if (count != 0) 1326 return (EEXIST); 1327 1328 return (0); 1329} 1330 1331/* ARGSUSED */ 1332static void 1333dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx) 1334{ 1335 dsl_dataset_t *ds = arg1; 1336 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1337 1338 /* Mark it as inconsistent on-disk, in case we crash */ 1339 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1340 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 1341 1342 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx, 1343 "dataset = %llu", ds->ds_object); 1344} 1345 1346static int 1347dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag, 1348 dmu_tx_t *tx) 1349{ 1350 dsl_dataset_t *ds = dsda->ds; 1351 dsl_dataset_t *ds_prev = ds->ds_prev; 1352 1353 if (dsl_dataset_might_destroy_origin(ds_prev)) { 1354 struct dsl_ds_destroyarg ndsda = {0}; 1355 1356 /* 1357 * If we're not prepared to remove the origin, don't remove 1358 * the clone either. 1359 */ 1360 if (dsda->rm_origin == NULL) { 1361 dsda->need_prep = B_TRUE; 1362 return (EBUSY); 1363 } 1364 1365 ndsda.ds = ds_prev; 1366 ndsda.is_origin_rm = B_TRUE; 1367 return (dsl_dataset_destroy_check(&ndsda, tag, tx)); 1368 } 1369 1370 /* 1371 * If we're not going to remove the origin after all, 1372 * undo the open context setup. 1373 */ 1374 if (dsda->rm_origin != NULL) { 1375 dsl_dataset_disown(dsda->rm_origin, tag); 1376 dsda->rm_origin = NULL; 1377 } 1378 1379 return (0); 1380} 1381 1382/* 1383 * If you add new checks here, you may need to add 1384 * additional checks to the "temporary" case in 1385 * snapshot_check() in dmu_objset.c. 1386 */ 1387/* ARGSUSED */ 1388int 1389dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) 1390{ 1391 struct dsl_ds_destroyarg *dsda = arg1; 1392 dsl_dataset_t *ds = dsda->ds; 1393 1394 /* we have an owner hold, so noone else can destroy us */ 1395 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 1396 1397 /* 1398 * Only allow deferred destroy on pools that support it. 1399 * NOTE: deferred destroy is only supported on snapshots. 1400 */ 1401 if (dsda->defer) { 1402 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < 1403 SPA_VERSION_USERREFS) 1404 return (ENOTSUP); 1405 ASSERT(dsl_dataset_is_snapshot(ds)); 1406 return (0); 1407 } 1408 1409 /* 1410 * Can't delete a head dataset if there are snapshots of it. 1411 * (Except if the only snapshots are from the branch we cloned 1412 * from.) 1413 */ 1414 if (ds->ds_prev != NULL && 1415 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1416 return (EBUSY); 1417 1418 /* 1419 * If we made changes this txg, traverse_dsl_dataset won't find 1420 * them. Try again. 1421 */ 1422 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) 1423 return (EAGAIN); 1424 1425 if (dsl_dataset_is_snapshot(ds)) { 1426 /* 1427 * If this snapshot has an elevated user reference count, 1428 * we can't destroy it yet. 1429 */ 1430 if (ds->ds_userrefs > 0 && !dsda->releasing) 1431 return (EBUSY); 1432 1433 mutex_enter(&ds->ds_lock); 1434 /* 1435 * Can't delete a branch point. However, if we're destroying 1436 * a clone and removing its origin due to it having a user 1437 * hold count of 0 and having been marked for deferred destroy, 1438 * it's OK for the origin to have a single clone. 1439 */ 1440 if (ds->ds_phys->ds_num_children > 1441 (dsda->is_origin_rm ? 2 : 1)) { 1442 mutex_exit(&ds->ds_lock); 1443 return (EEXIST); 1444 } 1445 mutex_exit(&ds->ds_lock); 1446 } else if (dsl_dir_is_clone(ds->ds_dir)) { 1447 return (dsl_dataset_origin_check(dsda, arg2, tx)); 1448 } 1449 1450 /* XXX we should do some i/o error checking... */ 1451 return (0); 1452} 1453 1454struct refsarg { 1455 kmutex_t lock; 1456 boolean_t gone; 1457 kcondvar_t cv; 1458}; 1459 1460/* ARGSUSED */ 1461static void 1462dsl_dataset_refs_gone(dmu_buf_t *db, void *argv) 1463{ 1464 struct refsarg *arg = argv; 1465 1466 mutex_enter(&arg->lock); 1467 arg->gone = TRUE; 1468 cv_signal(&arg->cv); 1469 mutex_exit(&arg->lock); 1470} 1471 1472static void 1473dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag) 1474{ 1475 struct refsarg arg; 1476 1477 bzero(&arg, sizeof(arg)); 1478 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL); 1479 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL); 1480 arg.gone = FALSE; 1481 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys, 1482 dsl_dataset_refs_gone); 1483 dmu_buf_rele(ds->ds_dbuf, tag); 1484 mutex_enter(&arg.lock); 1485 while (!arg.gone) 1486 cv_wait(&arg.cv, &arg.lock); 1487 ASSERT(arg.gone); 1488 mutex_exit(&arg.lock); 1489 ds->ds_dbuf = NULL; 1490 ds->ds_phys = NULL; 1491 mutex_destroy(&arg.lock); 1492 cv_destroy(&arg.cv); 1493} 1494 1495static void 1496remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx) 1497{ 1498 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1499 uint64_t count; 1500 int err; 1501 1502 ASSERT(ds->ds_phys->ds_num_children >= 2); 1503 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx); 1504 /* 1505 * The err should not be ENOENT, but a bug in a previous version 1506 * of the code could cause upgrade_clones_cb() to not set 1507 * ds_next_snap_obj when it should, leading to a missing entry. 1508 * If we knew that the pool was created after 1509 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't 1510 * ENOENT. However, at least we can check that we don't have 1511 * too many entries in the next_clones_obj even after failing to 1512 * remove this one. 1513 */ 1514 if (err != ENOENT) { 1515 VERIFY3U(err, ==, 0); 1516 } 1517 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj, 1518 &count)); 1519 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2); 1520} 1521 1522static void 1523dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx) 1524{ 1525 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1526 zap_cursor_t zc; 1527 zap_attribute_t za; 1528 1529 /* 1530 * If it is the old version, dd_clones doesn't exist so we can't 1531 * find the clones, but deadlist_remove_key() is a no-op so it 1532 * doesn't matter. 1533 */ 1534 if (ds->ds_dir->dd_phys->dd_clones == 0) 1535 return; 1536 1537 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones); 1538 zap_cursor_retrieve(&zc, &za) == 0; 1539 zap_cursor_advance(&zc)) { 1540 dsl_dataset_t *clone; 1541 1542 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool, 1543 za.za_first_integer, FTAG, &clone)); 1544 if (clone->ds_dir->dd_origin_txg > mintxg) { 1545 dsl_deadlist_remove_key(&clone->ds_deadlist, 1546 mintxg, tx); 1547 dsl_dataset_remove_clones_key(clone, mintxg, tx); 1548 } 1549 dsl_dataset_rele(clone, FTAG); 1550 } 1551 zap_cursor_fini(&zc); 1552} 1553 1554struct process_old_arg { 1555 dsl_dataset_t *ds; 1556 dsl_dataset_t *ds_prev; 1557 boolean_t after_branch_point; 1558 zio_t *pio; 1559 uint64_t used, comp, uncomp; 1560}; 1561 1562static int 1563process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1564{ 1565 struct process_old_arg *poa = arg; 1566 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool; 1567 1568 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) { 1569 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx); 1570 if (poa->ds_prev && !poa->after_branch_point && 1571 bp->blk_birth > 1572 poa->ds_prev->ds_phys->ds_prev_snap_txg) { 1573 poa->ds_prev->ds_phys->ds_unique_bytes += 1574 bp_get_dsize_sync(dp->dp_spa, bp); 1575 } 1576 } else { 1577 poa->used += bp_get_dsize_sync(dp->dp_spa, bp); 1578 poa->comp += BP_GET_PSIZE(bp); 1579 poa->uncomp += BP_GET_UCSIZE(bp); 1580 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp); 1581 } 1582 return (0); 1583} 1584 1585static void 1586process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev, 1587 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx) 1588{ 1589 struct process_old_arg poa = { 0 }; 1590 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1591 objset_t *mos = dp->dp_meta_objset; 1592 1593 ASSERT(ds->ds_deadlist.dl_oldfmt); 1594 ASSERT(ds_next->ds_deadlist.dl_oldfmt); 1595 1596 poa.ds = ds; 1597 poa.ds_prev = ds_prev; 1598 poa.after_branch_point = after_branch_point; 1599 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 1600 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj, 1601 process_old_cb, &poa, tx)); 1602 VERIFY3U(zio_wait(poa.pio), ==, 0); 1603 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes); 1604 1605 /* change snapused */ 1606 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, 1607 -poa.used, -poa.comp, -poa.uncomp, tx); 1608 1609 /* swap next's deadlist to our deadlist */ 1610 dsl_deadlist_close(&ds->ds_deadlist); 1611 dsl_deadlist_close(&ds_next->ds_deadlist); 1612 SWITCH64(ds_next->ds_phys->ds_deadlist_obj, 1613 ds->ds_phys->ds_deadlist_obj); 1614 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); 1615 dsl_deadlist_open(&ds_next->ds_deadlist, mos, 1616 ds_next->ds_phys->ds_deadlist_obj); 1617} 1618 1619void 1620dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) 1621{ 1622 struct dsl_ds_destroyarg *dsda = arg1; 1623 dsl_dataset_t *ds = dsda->ds; 1624 int err; 1625 int after_branch_point = FALSE; 1626 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1627 objset_t *mos = dp->dp_meta_objset; 1628 dsl_dataset_t *ds_prev = NULL; 1629 boolean_t wont_destroy; 1630 uint64_t obj; 1631 1632 wont_destroy = (dsda->defer && 1633 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)); 1634 1635 ASSERT(ds->ds_owner || wont_destroy); 1636 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1); 1637 ASSERT(ds->ds_prev == NULL || 1638 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object); 1639 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg); 1640 1641 if (wont_destroy) { 1642 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 1643 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1644 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY; 1645 return; 1646 } 1647 1648 /* signal any waiters that this dataset is going away */ 1649 mutex_enter(&ds->ds_lock); 1650 ds->ds_owner = dsl_reaper; 1651 cv_broadcast(&ds->ds_exclusive_cv); 1652 mutex_exit(&ds->ds_lock); 1653 1654 /* Remove our reservation */ 1655 if (ds->ds_reserved != 0) { 1656 dsl_prop_setarg_t psa; 1657 uint64_t value = 0; 1658 1659 dsl_prop_setarg_init_uint64(&psa, "refreservation", 1660 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED), 1661 &value); 1662 psa.psa_effective_value = 0; /* predict default value */ 1663 1664 dsl_dataset_set_reservation_sync(ds, &psa, tx); 1665 ASSERT3U(ds->ds_reserved, ==, 0); 1666 } 1667 1668 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 1669 1670 dsl_scan_ds_destroyed(ds, tx); 1671 1672 obj = ds->ds_object; 1673 1674 if (ds->ds_phys->ds_prev_snap_obj != 0) { 1675 if (ds->ds_prev) { 1676 ds_prev = ds->ds_prev; 1677 } else { 1678 VERIFY(0 == dsl_dataset_hold_obj(dp, 1679 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev)); 1680 } 1681 after_branch_point = 1682 (ds_prev->ds_phys->ds_next_snap_obj != obj); 1683 1684 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx); 1685 if (after_branch_point && 1686 ds_prev->ds_phys->ds_next_clones_obj != 0) { 1687 remove_from_next_clones(ds_prev, obj, tx); 1688 if (ds->ds_phys->ds_next_snap_obj != 0) { 1689 VERIFY(0 == zap_add_int(mos, 1690 ds_prev->ds_phys->ds_next_clones_obj, 1691 ds->ds_phys->ds_next_snap_obj, tx)); 1692 } 1693 } 1694 if (after_branch_point && 1695 ds->ds_phys->ds_next_snap_obj == 0) { 1696 /* This clone is toast. */ 1697 ASSERT(ds_prev->ds_phys->ds_num_children > 1); 1698 ds_prev->ds_phys->ds_num_children--; 1699 1700 /* 1701 * If the clone's origin has no other clones, no 1702 * user holds, and has been marked for deferred 1703 * deletion, then we should have done the necessary 1704 * destroy setup for it. 1705 */ 1706 if (ds_prev->ds_phys->ds_num_children == 1 && 1707 ds_prev->ds_userrefs == 0 && 1708 DS_IS_DEFER_DESTROY(ds_prev)) { 1709 ASSERT3P(dsda->rm_origin, !=, NULL); 1710 } else { 1711 ASSERT3P(dsda->rm_origin, ==, NULL); 1712 } 1713 } else if (!after_branch_point) { 1714 ds_prev->ds_phys->ds_next_snap_obj = 1715 ds->ds_phys->ds_next_snap_obj; 1716 } 1717 } 1718 1719 if (dsl_dataset_is_snapshot(ds)) { 1720 dsl_dataset_t *ds_next; 1721 uint64_t old_unique; 1722 uint64_t used = 0, comp = 0, uncomp = 0; 1723 1724 VERIFY(0 == dsl_dataset_hold_obj(dp, 1725 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next)); 1726 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj); 1727 1728 old_unique = ds_next->ds_phys->ds_unique_bytes; 1729 1730 dmu_buf_will_dirty(ds_next->ds_dbuf, tx); 1731 ds_next->ds_phys->ds_prev_snap_obj = 1732 ds->ds_phys->ds_prev_snap_obj; 1733 ds_next->ds_phys->ds_prev_snap_txg = 1734 ds->ds_phys->ds_prev_snap_txg; 1735 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 1736 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0); 1737 1738 1739 if (ds_next->ds_deadlist.dl_oldfmt) { 1740 process_old_deadlist(ds, ds_prev, ds_next, 1741 after_branch_point, tx); 1742 } else { 1743 /* Adjust prev's unique space. */ 1744 if (ds_prev && !after_branch_point) { 1745 dsl_deadlist_space_range(&ds_next->ds_deadlist, 1746 ds_prev->ds_phys->ds_prev_snap_txg, 1747 ds->ds_phys->ds_prev_snap_txg, 1748 &used, &comp, &uncomp); 1749 ds_prev->ds_phys->ds_unique_bytes += used; 1750 } 1751 1752 /* Adjust snapused. */ 1753 dsl_deadlist_space_range(&ds_next->ds_deadlist, 1754 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, 1755 &used, &comp, &uncomp); 1756 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, 1757 -used, -comp, -uncomp, tx); 1758 1759 /* Move blocks to be freed to pool's free list. */ 1760 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist, 1761 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg, 1762 tx); 1763 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, 1764 DD_USED_HEAD, used, comp, uncomp, tx); 1765 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx); 1766 1767 /* Merge our deadlist into next's and free it. */ 1768 dsl_deadlist_merge(&ds_next->ds_deadlist, 1769 ds->ds_phys->ds_deadlist_obj, tx); 1770 } 1771 dsl_deadlist_close(&ds->ds_deadlist); 1772 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx); 1773 1774 /* Collapse range in clone heads */ 1775 dsl_dataset_remove_clones_key(ds, 1776 ds->ds_phys->ds_creation_txg, tx); 1777 1778 if (dsl_dataset_is_snapshot(ds_next)) { 1779 dsl_dataset_t *ds_nextnext; 1780 1781 /* 1782 * Update next's unique to include blocks which 1783 * were previously shared by only this snapshot 1784 * and it. Those blocks will be born after the 1785 * prev snap and before this snap, and will have 1786 * died after the next snap and before the one 1787 * after that (ie. be on the snap after next's 1788 * deadlist). 1789 */ 1790 VERIFY(0 == dsl_dataset_hold_obj(dp, 1791 ds_next->ds_phys->ds_next_snap_obj, 1792 FTAG, &ds_nextnext)); 1793 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist, 1794 ds->ds_phys->ds_prev_snap_txg, 1795 ds->ds_phys->ds_creation_txg, 1796 &used, &comp, &uncomp); 1797 ds_next->ds_phys->ds_unique_bytes += used; 1798 dsl_dataset_rele(ds_nextnext, FTAG); 1799 ASSERT3P(ds_next->ds_prev, ==, NULL); 1800 1801 /* Collapse range in this head. */ 1802 dsl_dataset_t *hds; 1803 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 1804 ds->ds_dir->dd_phys->dd_head_dataset_obj, 1805 FTAG, &hds)); 1806 dsl_deadlist_remove_key(&hds->ds_deadlist, 1807 ds->ds_phys->ds_creation_txg, tx); 1808 dsl_dataset_rele(hds, FTAG); 1809 1810 } else { 1811 ASSERT3P(ds_next->ds_prev, ==, ds); 1812 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next); 1813 ds_next->ds_prev = NULL; 1814 if (ds_prev) { 1815 VERIFY(0 == dsl_dataset_get_ref(dp, 1816 ds->ds_phys->ds_prev_snap_obj, 1817 ds_next, &ds_next->ds_prev)); 1818 } 1819 1820 dsl_dataset_recalc_head_uniq(ds_next); 1821 1822 /* 1823 * Reduce the amount of our unconsmed refreservation 1824 * being charged to our parent by the amount of 1825 * new unique data we have gained. 1826 */ 1827 if (old_unique < ds_next->ds_reserved) { 1828 int64_t mrsdelta; 1829 uint64_t new_unique = 1830 ds_next->ds_phys->ds_unique_bytes; 1831 1832 ASSERT(old_unique <= new_unique); 1833 mrsdelta = MIN(new_unique - old_unique, 1834 ds_next->ds_reserved - old_unique); 1835 dsl_dir_diduse_space(ds->ds_dir, 1836 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx); 1837 } 1838 } 1839 dsl_dataset_rele(ds_next, FTAG); 1840 } else { 1841 /* 1842 * There's no next snapshot, so this is a head dataset. 1843 * Destroy the deadlist. Unless it's a clone, the 1844 * deadlist should be empty. (If it's a clone, it's 1845 * safe to ignore the deadlist contents.) 1846 */ 1847 struct killarg ka; 1848 1849 dsl_deadlist_close(&ds->ds_deadlist); 1850 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx); 1851 ds->ds_phys->ds_deadlist_obj = 0; 1852 1853 /* 1854 * Free everything that we point to (that's born after 1855 * the previous snapshot, if we are a clone) 1856 * 1857 * NB: this should be very quick, because we already 1858 * freed all the objects in open context. 1859 */ 1860 ka.ds = ds; 1861 ka.tx = tx; 1862 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg, 1863 TRAVERSE_POST, kill_blkptr, &ka); 1864 ASSERT3U(err, ==, 0); 1865 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || 1866 ds->ds_phys->ds_unique_bytes == 0); 1867 1868 if (ds->ds_prev != NULL) { 1869 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 1870 VERIFY3U(0, ==, zap_remove_int(mos, 1871 ds->ds_prev->ds_dir->dd_phys->dd_clones, 1872 ds->ds_object, tx)); 1873 } 1874 dsl_dataset_rele(ds->ds_prev, ds); 1875 ds->ds_prev = ds_prev = NULL; 1876 } 1877 } 1878 1879 /* 1880 * This must be done after the dsl_traverse(), because it will 1881 * re-open the objset. 1882 */ 1883 if (ds->ds_objset) { 1884 dmu_objset_evict(ds->ds_objset); 1885 ds->ds_objset = NULL; 1886 } 1887 1888 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) { 1889 /* Erase the link in the dir */ 1890 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 1891 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0; 1892 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0); 1893 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx); 1894 ASSERT(err == 0); 1895 } else { 1896 /* remove from snapshot namespace */ 1897 dsl_dataset_t *ds_head; 1898 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0); 1899 VERIFY(0 == dsl_dataset_hold_obj(dp, 1900 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head)); 1901 VERIFY(0 == dsl_dataset_get_snapname(ds)); 1902#ifdef ZFS_DEBUG 1903 { 1904 uint64_t val; 1905 1906 err = dsl_dataset_snap_lookup(ds_head, 1907 ds->ds_snapname, &val); 1908 ASSERT3U(err, ==, 0); 1909 ASSERT3U(val, ==, obj); 1910 } 1911#endif 1912 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx); 1913 ASSERT(err == 0); 1914 dsl_dataset_rele(ds_head, FTAG); 1915 } 1916 1917 if (ds_prev && ds->ds_prev != ds_prev) 1918 dsl_dataset_rele(ds_prev, FTAG); 1919 1920 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); 1921 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx, 1922 "dataset = %llu", ds->ds_object); 1923 1924 if (ds->ds_phys->ds_next_clones_obj != 0) { 1925 uint64_t count; 1926 ASSERT(0 == zap_count(mos, 1927 ds->ds_phys->ds_next_clones_obj, &count) && count == 0); 1928 VERIFY(0 == dmu_object_free(mos, 1929 ds->ds_phys->ds_next_clones_obj, tx)); 1930 } 1931 if (ds->ds_phys->ds_props_obj != 0) 1932 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx)); 1933 if (ds->ds_phys->ds_userrefs_obj != 0) 1934 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx)); 1935 dsl_dir_close(ds->ds_dir, ds); 1936 ds->ds_dir = NULL; 1937 dsl_dataset_drain_refs(ds, tag); 1938 VERIFY(0 == dmu_object_free(mos, obj, tx)); 1939 1940 if (dsda->rm_origin) { 1941 /* 1942 * Remove the origin of the clone we just destroyed. 1943 */ 1944 struct dsl_ds_destroyarg ndsda = {0}; 1945 1946 ndsda.ds = dsda->rm_origin; 1947 dsl_dataset_destroy_sync(&ndsda, tag, tx); 1948 } 1949} 1950 1951static int 1952dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) 1953{ 1954 uint64_t asize; 1955 1956 if (!dmu_tx_is_syncing(tx)) 1957 return (0); 1958 1959 /* 1960 * If there's an fs-only reservation, any blocks that might become 1961 * owned by the snapshot dataset must be accommodated by space 1962 * outside of the reservation. 1963 */ 1964 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds)); 1965 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 1966 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) 1967 return (ENOSPC); 1968 1969 /* 1970 * Propogate any reserved space for this snapshot to other 1971 * snapshot checks in this sync group. 1972 */ 1973 if (asize > 0) 1974 dsl_dir_willuse_space(ds->ds_dir, asize, tx); 1975 1976 return (0); 1977} 1978 1979int 1980dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) 1981{ 1982 dsl_dataset_t *ds = arg1; 1983 const char *snapname = arg2; 1984 int err; 1985 uint64_t value; 1986 1987 /* 1988 * We don't allow multiple snapshots of the same txg. If there 1989 * is already one, try again. 1990 */ 1991 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg) 1992 return (EAGAIN); 1993 1994 /* 1995 * Check for conflicting name snapshot name. 1996 */ 1997 err = dsl_dataset_snap_lookup(ds, snapname, &value); 1998 if (err == 0) 1999 return (EEXIST); 2000 if (err != ENOENT) 2001 return (err); 2002 2003 /* 2004 * Check that the dataset's name is not too long. Name consists 2005 * of the dataset's length + 1 for the @-sign + snapshot name's length 2006 */ 2007 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN) 2008 return (ENAMETOOLONG); 2009 2010 err = dsl_dataset_snapshot_reserve_space(ds, tx); 2011 if (err) 2012 return (err); 2013 2014 ds->ds_trysnap_txg = tx->tx_txg; 2015 return (0); 2016} 2017 2018void 2019dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx) 2020{ 2021 dsl_dataset_t *ds = arg1; 2022 const char *snapname = arg2; 2023 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2024 dmu_buf_t *dbuf; 2025 dsl_dataset_phys_t *dsphys; 2026 uint64_t dsobj, crtxg; 2027 objset_t *mos = dp->dp_meta_objset; 2028 int err; 2029 2030 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 2031 2032 /* 2033 * The origin's ds_creation_txg has to be < TXG_INITIAL 2034 */ 2035 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0) 2036 crtxg = 1; 2037 else 2038 crtxg = tx->tx_txg; 2039 2040 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 2041 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 2042 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 2043 dmu_buf_will_dirty(dbuf, tx); 2044 dsphys = dbuf->db_data; 2045 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 2046 dsphys->ds_dir_obj = ds->ds_dir->dd_object; 2047 dsphys->ds_fsid_guid = unique_create(); 2048 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 2049 sizeof (dsphys->ds_guid)); 2050 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; 2051 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; 2052 dsphys->ds_next_snap_obj = ds->ds_object; 2053 dsphys->ds_num_children = 1; 2054 dsphys->ds_creation_time = gethrestime_sec(); 2055 dsphys->ds_creation_txg = crtxg; 2056 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj; 2057 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes; 2058 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes; 2059 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes; 2060 dsphys->ds_flags = ds->ds_phys->ds_flags; 2061 dsphys->ds_bp = ds->ds_phys->ds_bp; 2062 dmu_buf_rele(dbuf, FTAG); 2063 2064 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0); 2065 if (ds->ds_prev) { 2066 uint64_t next_clones_obj = 2067 ds->ds_prev->ds_phys->ds_next_clones_obj; 2068 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj == 2069 ds->ds_object || 2070 ds->ds_prev->ds_phys->ds_num_children > 1); 2071 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { 2072 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 2073 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 2074 ds->ds_prev->ds_phys->ds_creation_txg); 2075 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj; 2076 } else if (next_clones_obj != 0) { 2077 remove_from_next_clones(ds->ds_prev, 2078 dsphys->ds_next_snap_obj, tx); 2079 VERIFY3U(0, ==, zap_add_int(mos, 2080 next_clones_obj, dsobj, tx)); 2081 } 2082 } 2083 2084 /* 2085 * If we have a reference-reservation on this dataset, we will 2086 * need to increase the amount of refreservation being charged 2087 * since our unique space is going to zero. 2088 */ 2089 if (ds->ds_reserved) { 2090 int64_t delta; 2091 ASSERT(DS_UNIQUE_IS_ACCURATE(ds)); 2092 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 2093 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, 2094 delta, 0, 0, tx); 2095 } 2096 2097 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2098 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu", 2099 ds->ds_dir->dd_myname, snapname, dsobj, 2100 ds->ds_phys->ds_prev_snap_txg); 2101 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist, 2102 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx); 2103 dsl_deadlist_close(&ds->ds_deadlist); 2104 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); 2105 dsl_deadlist_add_key(&ds->ds_deadlist, 2106 ds->ds_phys->ds_prev_snap_txg, tx); 2107 2108 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg); 2109 ds->ds_phys->ds_prev_snap_obj = dsobj; 2110 ds->ds_phys->ds_prev_snap_txg = crtxg; 2111 ds->ds_phys->ds_unique_bytes = 0; 2112 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 2113 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 2114 2115 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj, 2116 snapname, 8, 1, &dsobj, tx); 2117 ASSERT(err == 0); 2118 2119 if (ds->ds_prev) 2120 dsl_dataset_drop_ref(ds->ds_prev, ds); 2121 VERIFY(0 == dsl_dataset_get_ref(dp, 2122 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 2123 2124 dsl_scan_ds_snapshotted(ds, tx); 2125 2126 dsl_dir_snap_cmtime_update(ds->ds_dir); 2127 2128 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx, 2129 "dataset = %llu", dsobj); 2130} 2131 2132void 2133dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) 2134{ 2135 ASSERT(dmu_tx_is_syncing(tx)); 2136 ASSERT(ds->ds_objset != NULL); 2137 ASSERT(ds->ds_phys->ds_next_snap_obj == 0); 2138 2139 /* 2140 * in case we had to change ds_fsid_guid when we opened it, 2141 * sync it out now. 2142 */ 2143 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2144 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid; 2145 2146 dsl_dir_dirty(ds->ds_dir, tx); 2147 dmu_objset_sync(ds->ds_objset, zio, tx); 2148} 2149 2150void 2151dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv) 2152{ 2153 uint64_t refd, avail, uobjs, aobjs; 2154 2155 dsl_dir_stats(ds->ds_dir, nv); 2156 2157 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs); 2158 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail); 2159 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd); 2160 2161 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION, 2162 ds->ds_phys->ds_creation_time); 2163 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG, 2164 ds->ds_phys->ds_creation_txg); 2165 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA, 2166 ds->ds_quota); 2167 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION, 2168 ds->ds_reserved); 2169 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID, 2170 ds->ds_phys->ds_guid); 2171 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE, 2172 ds->ds_phys->ds_unique_bytes); 2173 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID, 2174 ds->ds_object); 2175 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS, 2176 ds->ds_userrefs); 2177 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY, 2178 DS_IS_DEFER_DESTROY(ds) ? 1 : 0); 2179 2180 if (ds->ds_phys->ds_next_snap_obj) { 2181 /* 2182 * This is a snapshot; override the dd's space used with 2183 * our unique space and compression ratio. 2184 */ 2185 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, 2186 ds->ds_phys->ds_unique_bytes); 2187 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, 2188 ds->ds_phys->ds_compressed_bytes == 0 ? 100 : 2189 (ds->ds_phys->ds_uncompressed_bytes * 100 / 2190 ds->ds_phys->ds_compressed_bytes)); 2191 } 2192} 2193 2194void 2195dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat) 2196{ 2197 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg; 2198 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT; 2199 stat->dds_guid = ds->ds_phys->ds_guid; 2200 if (ds->ds_phys->ds_next_snap_obj) { 2201 stat->dds_is_snapshot = B_TRUE; 2202 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1; 2203 } else { 2204 stat->dds_is_snapshot = B_FALSE; 2205 stat->dds_num_clones = 0; 2206 } 2207 2208 /* clone origin is really a dsl_dir thing... */ 2209 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); 2210 if (dsl_dir_is_clone(ds->ds_dir)) { 2211 dsl_dataset_t *ods; 2212 2213 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool, 2214 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods)); 2215 dsl_dataset_name(ods, stat->dds_origin); 2216 dsl_dataset_drop_ref(ods, FTAG); 2217 } else { 2218 stat->dds_origin[0] = '\0'; 2219 } 2220 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); 2221} 2222 2223uint64_t 2224dsl_dataset_fsid_guid(dsl_dataset_t *ds) 2225{ 2226 return (ds->ds_fsid_guid); 2227} 2228 2229void 2230dsl_dataset_space(dsl_dataset_t *ds, 2231 uint64_t *refdbytesp, uint64_t *availbytesp, 2232 uint64_t *usedobjsp, uint64_t *availobjsp) 2233{ 2234 *refdbytesp = ds->ds_phys->ds_used_bytes; 2235 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE); 2236 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) 2237 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes; 2238 if (ds->ds_quota != 0) { 2239 /* 2240 * Adjust available bytes according to refquota 2241 */ 2242 if (*refdbytesp < ds->ds_quota) 2243 *availbytesp = MIN(*availbytesp, 2244 ds->ds_quota - *refdbytesp); 2245 else 2246 *availbytesp = 0; 2247 } 2248 *usedobjsp = ds->ds_phys->ds_bp.blk_fill; 2249 *availobjsp = DN_MAX_OBJECT - *usedobjsp; 2250} 2251 2252boolean_t 2253dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds) 2254{ 2255 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2256 2257 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 2258 dsl_pool_sync_context(dp)); 2259 if (ds->ds_prev == NULL) 2260 return (B_FALSE); 2261 if (ds->ds_phys->ds_bp.blk_birth > 2262 ds->ds_prev->ds_phys->ds_creation_txg) { 2263 objset_t *os, *os_prev; 2264 /* 2265 * It may be that only the ZIL differs, because it was 2266 * reset in the head. Don't count that as being 2267 * modified. 2268 */ 2269 if (dmu_objset_from_ds(ds, &os) != 0) 2270 return (B_TRUE); 2271 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0) 2272 return (B_TRUE); 2273 return (bcmp(&os->os_phys->os_meta_dnode, 2274 &os_prev->os_phys->os_meta_dnode, 2275 sizeof (os->os_phys->os_meta_dnode)) != 0); 2276 } 2277 return (B_FALSE); 2278} 2279 2280/* ARGSUSED */ 2281static int 2282dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) 2283{ 2284 dsl_dataset_t *ds = arg1; 2285 char *newsnapname = arg2; 2286 dsl_dir_t *dd = ds->ds_dir; 2287 dsl_dataset_t *hds; 2288 uint64_t val; 2289 int err; 2290 2291 err = dsl_dataset_hold_obj(dd->dd_pool, 2292 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds); 2293 if (err) 2294 return (err); 2295 2296 /* new name better not be in use */ 2297 err = dsl_dataset_snap_lookup(hds, newsnapname, &val); 2298 dsl_dataset_rele(hds, FTAG); 2299 2300 if (err == 0) 2301 err = EEXIST; 2302 else if (err == ENOENT) 2303 err = 0; 2304 2305 /* dataset name + 1 for the "@" + the new snapshot name must fit */ 2306 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN) 2307 err = ENAMETOOLONG; 2308 2309 return (err); 2310} 2311 2312static void 2313dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx) 2314{ 2315 char oldname[MAXPATHLEN], newname[MAXPATHLEN]; 2316 dsl_dataset_t *ds = arg1; 2317 const char *newsnapname = arg2; 2318 dsl_dir_t *dd = ds->ds_dir; 2319 objset_t *mos = dd->dd_pool->dp_meta_objset; 2320 dsl_dataset_t *hds; 2321 int err; 2322 2323 ASSERT(ds->ds_phys->ds_next_snap_obj != 0); 2324 2325 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, 2326 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds)); 2327 2328 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2329 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx); 2330 ASSERT3U(err, ==, 0); 2331 dsl_dataset_name(ds, oldname); 2332 mutex_enter(&ds->ds_lock); 2333 (void) strcpy(ds->ds_snapname, newsnapname); 2334 mutex_exit(&ds->ds_lock); 2335 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj, 2336 ds->ds_snapname, 8, 1, &ds->ds_object, tx); 2337 ASSERT3U(err, ==, 0); 2338 dsl_dataset_name(ds, newname); 2339 zvol_rename_minors(oldname, newname); 2340 2341 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx, 2342 "dataset = %llu", ds->ds_object); 2343 dsl_dataset_rele(hds, FTAG); 2344} 2345 2346struct renamesnaparg { 2347 dsl_sync_task_group_t *dstg; 2348 char failed[MAXPATHLEN]; 2349 char *oldsnap; 2350 char *newsnap; 2351}; 2352 2353static int 2354dsl_snapshot_rename_one(const char *name, void *arg) 2355{ 2356 struct renamesnaparg *ra = arg; 2357 dsl_dataset_t *ds = NULL; 2358 char *snapname; 2359 int err; 2360 2361 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap); 2362 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed)); 2363 2364 /* 2365 * For recursive snapshot renames the parent won't be changing 2366 * so we just pass name for both the to/from argument. 2367 */ 2368 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED()); 2369 if (err != 0) { 2370 strfree(snapname); 2371 return (err == ENOENT ? 0 : err); 2372 } 2373 2374#ifdef _KERNEL 2375 /* 2376 * For all filesystems undergoing rename, we'll need to unmount it. 2377 */ 2378 (void) zfs_unmount_snap(snapname, NULL); 2379#endif 2380 err = dsl_dataset_hold(snapname, ra->dstg, &ds); 2381 strfree(snapname); 2382 if (err != 0) 2383 return (err == ENOENT ? 0 : err); 2384 2385 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check, 2386 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0); 2387 2388 return (0); 2389} 2390 2391static int 2392dsl_recursive_rename(char *oldname, const char *newname) 2393{ 2394 int err; 2395 struct renamesnaparg *ra; 2396 dsl_sync_task_t *dst; 2397 spa_t *spa; 2398 char *cp, *fsname = spa_strdup(oldname); 2399 int len = strlen(oldname) + 1; 2400 2401 /* truncate the snapshot name to get the fsname */ 2402 cp = strchr(fsname, '@'); 2403 *cp = '\0'; 2404 2405 err = spa_open(fsname, &spa, FTAG); 2406 if (err) { 2407 kmem_free(fsname, len); 2408 return (err); 2409 } 2410 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP); 2411 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 2412 2413 ra->oldsnap = strchr(oldname, '@') + 1; 2414 ra->newsnap = strchr(newname, '@') + 1; 2415 *ra->failed = '\0'; 2416 2417 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra, 2418 DS_FIND_CHILDREN); 2419 kmem_free(fsname, len); 2420 2421 if (err == 0) { 2422 err = dsl_sync_task_group_wait(ra->dstg); 2423 } 2424 2425 for (dst = list_head(&ra->dstg->dstg_tasks); dst; 2426 dst = list_next(&ra->dstg->dstg_tasks, dst)) { 2427 dsl_dataset_t *ds = dst->dst_arg1; 2428 if (dst->dst_err) { 2429 dsl_dir_name(ds->ds_dir, ra->failed); 2430 (void) strlcat(ra->failed, "@", sizeof (ra->failed)); 2431 (void) strlcat(ra->failed, ra->newsnap, 2432 sizeof (ra->failed)); 2433 } 2434 dsl_dataset_rele(ds, ra->dstg); 2435 } 2436 2437 if (err) 2438 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed)); 2439 2440 dsl_sync_task_group_destroy(ra->dstg); 2441 kmem_free(ra, sizeof (struct renamesnaparg)); 2442 spa_close(spa, FTAG); 2443 return (err); 2444} 2445 2446static int 2447dsl_valid_rename(const char *oldname, void *arg) 2448{ 2449 int delta = *(int *)arg; 2450 2451 if (strlen(oldname) + delta >= MAXNAMELEN) 2452 return (ENAMETOOLONG); 2453 2454 return (0); 2455} 2456 2457#pragma weak dmu_objset_rename = dsl_dataset_rename 2458int 2459dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive) 2460{ 2461 dsl_dir_t *dd; 2462 dsl_dataset_t *ds; 2463 const char *tail; 2464 int err; 2465 2466 err = dsl_dir_open(oldname, FTAG, &dd, &tail); 2467 if (err) 2468 return (err); 2469 2470 if (tail == NULL) { 2471 int delta = strlen(newname) - strlen(oldname); 2472 2473 /* if we're growing, validate child name lengths */ 2474 if (delta > 0) 2475 err = dmu_objset_find(oldname, dsl_valid_rename, 2476 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 2477 2478 if (err == 0) 2479 err = dsl_dir_rename(dd, newname); 2480 dsl_dir_close(dd, FTAG); 2481 return (err); 2482 } 2483 2484 if (tail[0] != '@') { 2485 /* the name ended in a nonexistent component */ 2486 dsl_dir_close(dd, FTAG); 2487 return (ENOENT); 2488 } 2489 2490 dsl_dir_close(dd, FTAG); 2491 2492 /* new name must be snapshot in same filesystem */ 2493 tail = strchr(newname, '@'); 2494 if (tail == NULL) 2495 return (EINVAL); 2496 tail++; 2497 if (strncmp(oldname, newname, tail - newname) != 0) 2498 return (EXDEV); 2499 2500 if (recursive) { 2501 err = dsl_recursive_rename(oldname, newname); 2502 } else { 2503 err = dsl_dataset_hold(oldname, FTAG, &ds); 2504 if (err) 2505 return (err); 2506 2507 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2508 dsl_dataset_snapshot_rename_check, 2509 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1); 2510 2511 dsl_dataset_rele(ds, FTAG); 2512 } 2513 2514 return (err); 2515} 2516 2517struct promotenode { 2518 list_node_t link; 2519 dsl_dataset_t *ds; 2520}; 2521 2522struct promotearg { 2523 list_t shared_snaps, origin_snaps, clone_snaps; 2524 dsl_dataset_t *origin_origin; 2525 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap; 2526 char *err_ds; 2527}; 2528 2529static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep); 2530static boolean_t snaplist_unstable(list_t *l); 2531 2532static int 2533dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx) 2534{ 2535 dsl_dataset_t *hds = arg1; 2536 struct promotearg *pa = arg2; 2537 struct promotenode *snap = list_head(&pa->shared_snaps); 2538 dsl_dataset_t *origin_ds = snap->ds; 2539 int err; 2540 uint64_t unused; 2541 2542 /* Check that it is a real clone */ 2543 if (!dsl_dir_is_clone(hds->ds_dir)) 2544 return (EINVAL); 2545 2546 /* Since this is so expensive, don't do the preliminary check */ 2547 if (!dmu_tx_is_syncing(tx)) 2548 return (0); 2549 2550 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) 2551 return (EXDEV); 2552 2553 /* compute origin's new unique space */ 2554 snap = list_tail(&pa->clone_snaps); 2555 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); 2556 dsl_deadlist_space_range(&snap->ds->ds_deadlist, 2557 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, 2558 &pa->unique, &unused, &unused); 2559 2560 /* 2561 * Walk the snapshots that we are moving 2562 * 2563 * Compute space to transfer. Consider the incremental changes 2564 * to used for each snapshot: 2565 * (my used) = (prev's used) + (blocks born) - (blocks killed) 2566 * So each snapshot gave birth to: 2567 * (blocks born) = (my used) - (prev's used) + (blocks killed) 2568 * So a sequence would look like: 2569 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0) 2570 * Which simplifies to: 2571 * uN + kN + kN-1 + ... + k1 + k0 2572 * Note however, if we stop before we reach the ORIGIN we get: 2573 * uN + kN + kN-1 + ... + kM - uM-1 2574 */ 2575 pa->used = origin_ds->ds_phys->ds_used_bytes; 2576 pa->comp = origin_ds->ds_phys->ds_compressed_bytes; 2577 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes; 2578 for (snap = list_head(&pa->shared_snaps); snap; 2579 snap = list_next(&pa->shared_snaps, snap)) { 2580 uint64_t val, dlused, dlcomp, dluncomp; 2581 dsl_dataset_t *ds = snap->ds; 2582 2583 /* Check that the snapshot name does not conflict */ 2584 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2585 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val); 2586 if (err == 0) { 2587 err = EEXIST; 2588 goto out; 2589 } 2590 if (err != ENOENT) 2591 goto out; 2592 2593 /* The very first snapshot does not have a deadlist */ 2594 if (ds->ds_phys->ds_prev_snap_obj == 0) 2595 continue; 2596 2597 dsl_deadlist_space(&ds->ds_deadlist, 2598 &dlused, &dlcomp, &dluncomp); 2599 pa->used += dlused; 2600 pa->comp += dlcomp; 2601 pa->uncomp += dluncomp; 2602 } 2603 2604 /* 2605 * If we are a clone of a clone then we never reached ORIGIN, 2606 * so we need to subtract out the clone origin's used space. 2607 */ 2608 if (pa->origin_origin) { 2609 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes; 2610 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes; 2611 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes; 2612 } 2613 2614 /* Check that there is enough space here */ 2615 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir, 2616 pa->used); 2617 if (err) 2618 return (err); 2619 2620 /* 2621 * Compute the amounts of space that will be used by snapshots 2622 * after the promotion (for both origin and clone). For each, 2623 * it is the amount of space that will be on all of their 2624 * deadlists (that was not born before their new origin). 2625 */ 2626 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { 2627 uint64_t space; 2628 2629 /* 2630 * Note, typically this will not be a clone of a clone, 2631 * so dd_origin_txg will be < TXG_INITIAL, so 2632 * these snaplist_space() -> dsl_deadlist_space_range() 2633 * calls will be fast because they do not have to 2634 * iterate over all bps. 2635 */ 2636 snap = list_head(&pa->origin_snaps); 2637 err = snaplist_space(&pa->shared_snaps, 2638 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap); 2639 if (err) 2640 return (err); 2641 2642 err = snaplist_space(&pa->clone_snaps, 2643 snap->ds->ds_dir->dd_origin_txg, &space); 2644 if (err) 2645 return (err); 2646 pa->cloneusedsnap += space; 2647 } 2648 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { 2649 err = snaplist_space(&pa->origin_snaps, 2650 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap); 2651 if (err) 2652 return (err); 2653 } 2654 2655 return (0); 2656out: 2657 pa->err_ds = snap->ds->ds_snapname; 2658 return (err); 2659} 2660 2661static void 2662dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx) 2663{ 2664 dsl_dataset_t *hds = arg1; 2665 struct promotearg *pa = arg2; 2666 struct promotenode *snap = list_head(&pa->shared_snaps); 2667 dsl_dataset_t *origin_ds = snap->ds; 2668 dsl_dataset_t *origin_head; 2669 dsl_dir_t *dd = hds->ds_dir; 2670 dsl_pool_t *dp = hds->ds_dir->dd_pool; 2671 dsl_dir_t *odd = NULL; 2672 uint64_t oldnext_obj; 2673 int64_t delta; 2674 2675 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)); 2676 2677 snap = list_head(&pa->origin_snaps); 2678 origin_head = snap->ds; 2679 2680 /* 2681 * We need to explicitly open odd, since origin_ds's dd will be 2682 * changing. 2683 */ 2684 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object, 2685 NULL, FTAG, &odd)); 2686 2687 /* change origin's next snap */ 2688 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx); 2689 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj; 2690 snap = list_tail(&pa->clone_snaps); 2691 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); 2692 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object; 2693 2694 /* change the origin's next clone */ 2695 if (origin_ds->ds_phys->ds_next_clones_obj) { 2696 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx); 2697 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2698 origin_ds->ds_phys->ds_next_clones_obj, 2699 oldnext_obj, tx)); 2700 } 2701 2702 /* change origin */ 2703 dmu_buf_will_dirty(dd->dd_dbuf, tx); 2704 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object); 2705 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj; 2706 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg; 2707 dmu_buf_will_dirty(odd->dd_dbuf, tx); 2708 odd->dd_phys->dd_origin_obj = origin_ds->ds_object; 2709 origin_head->ds_dir->dd_origin_txg = 2710 origin_ds->ds_phys->ds_creation_txg; 2711 2712 /* change dd_clone entries */ 2713 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 2714 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2715 odd->dd_phys->dd_clones, hds->ds_object, tx)); 2716 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2717 pa->origin_origin->ds_dir->dd_phys->dd_clones, 2718 hds->ds_object, tx)); 2719 2720 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2721 pa->origin_origin->ds_dir->dd_phys->dd_clones, 2722 origin_head->ds_object, tx)); 2723 if (dd->dd_phys->dd_clones == 0) { 2724 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset, 2725 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); 2726 } 2727 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2728 dd->dd_phys->dd_clones, origin_head->ds_object, tx)); 2729 2730 } 2731 2732 /* move snapshots to this dir */ 2733 for (snap = list_head(&pa->shared_snaps); snap; 2734 snap = list_next(&pa->shared_snaps, snap)) { 2735 dsl_dataset_t *ds = snap->ds; 2736 2737 /* unregister props as dsl_dir is changing */ 2738 if (ds->ds_objset) { 2739 dmu_objset_evict(ds->ds_objset); 2740 ds->ds_objset = NULL; 2741 } 2742 /* move snap name entry */ 2743 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2744 VERIFY(0 == dsl_dataset_snap_remove(origin_head, 2745 ds->ds_snapname, tx)); 2746 VERIFY(0 == zap_add(dp->dp_meta_objset, 2747 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, 2748 8, 1, &ds->ds_object, tx)); 2749 2750 /* change containing dsl_dir */ 2751 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2752 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object); 2753 ds->ds_phys->ds_dir_obj = dd->dd_object; 2754 ASSERT3P(ds->ds_dir, ==, odd); 2755 dsl_dir_close(ds->ds_dir, ds); 2756 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, 2757 NULL, ds, &ds->ds_dir)); 2758 2759 /* move any clone references */ 2760 if (ds->ds_phys->ds_next_clones_obj && 2761 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 2762 zap_cursor_t zc; 2763 zap_attribute_t za; 2764 2765 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2766 ds->ds_phys->ds_next_clones_obj); 2767 zap_cursor_retrieve(&zc, &za) == 0; 2768 zap_cursor_advance(&zc)) { 2769 dsl_dataset_t *cnds; 2770 uint64_t o; 2771 2772 if (za.za_first_integer == oldnext_obj) { 2773 /* 2774 * We've already moved the 2775 * origin's reference. 2776 */ 2777 continue; 2778 } 2779 2780 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 2781 za.za_first_integer, FTAG, &cnds)); 2782 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj; 2783 2784 VERIFY3U(zap_remove_int(dp->dp_meta_objset, 2785 odd->dd_phys->dd_clones, o, tx), ==, 0); 2786 VERIFY3U(zap_add_int(dp->dp_meta_objset, 2787 dd->dd_phys->dd_clones, o, tx), ==, 0); 2788 dsl_dataset_rele(cnds, FTAG); 2789 } 2790 zap_cursor_fini(&zc); 2791 } 2792 2793 ASSERT3U(dsl_prop_numcb(ds), ==, 0); 2794 } 2795 2796 /* 2797 * Change space accounting. 2798 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either 2799 * both be valid, or both be 0 (resulting in delta == 0). This 2800 * is true for each of {clone,origin} independently. 2801 */ 2802 2803 delta = pa->cloneusedsnap - 2804 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; 2805 ASSERT3S(delta, >=, 0); 2806 ASSERT3U(pa->used, >=, delta); 2807 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx); 2808 dsl_dir_diduse_space(dd, DD_USED_HEAD, 2809 pa->used - delta, pa->comp, pa->uncomp, tx); 2810 2811 delta = pa->originusedsnap - 2812 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; 2813 ASSERT3S(delta, <=, 0); 2814 ASSERT3U(pa->used, >=, -delta); 2815 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx); 2816 dsl_dir_diduse_space(odd, DD_USED_HEAD, 2817 -pa->used - delta, -pa->comp, -pa->uncomp, tx); 2818 2819 origin_ds->ds_phys->ds_unique_bytes = pa->unique; 2820 2821 /* log history record */ 2822 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx, 2823 "dataset = %llu", hds->ds_object); 2824 2825 dsl_dir_close(odd, FTAG); 2826} 2827 2828static char *snaplist_tag = "snaplist"; 2829/* 2830 * Make a list of dsl_dataset_t's for the snapshots between first_obj 2831 * (exclusive) and last_obj (inclusive). The list will be in reverse 2832 * order (last_obj will be the list_head()). If first_obj == 0, do all 2833 * snapshots back to this dataset's origin. 2834 */ 2835static int 2836snaplist_make(dsl_pool_t *dp, boolean_t own, 2837 uint64_t first_obj, uint64_t last_obj, list_t *l) 2838{ 2839 uint64_t obj = last_obj; 2840 2841 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock)); 2842 2843 list_create(l, sizeof (struct promotenode), 2844 offsetof(struct promotenode, link)); 2845 2846 while (obj != first_obj) { 2847 dsl_dataset_t *ds; 2848 struct promotenode *snap; 2849 int err; 2850 2851 if (own) { 2852 err = dsl_dataset_own_obj(dp, obj, 2853 0, snaplist_tag, &ds); 2854 if (err == 0) 2855 dsl_dataset_make_exclusive(ds, snaplist_tag); 2856 } else { 2857 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds); 2858 } 2859 if (err == ENOENT) { 2860 /* lost race with snapshot destroy */ 2861 struct promotenode *last = list_tail(l); 2862 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj); 2863 obj = last->ds->ds_phys->ds_prev_snap_obj; 2864 continue; 2865 } else if (err) { 2866 return (err); 2867 } 2868 2869 if (first_obj == 0) 2870 first_obj = ds->ds_dir->dd_phys->dd_origin_obj; 2871 2872 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP); 2873 snap->ds = ds; 2874 list_insert_tail(l, snap); 2875 obj = ds->ds_phys->ds_prev_snap_obj; 2876 } 2877 2878 return (0); 2879} 2880 2881static int 2882snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep) 2883{ 2884 struct promotenode *snap; 2885 2886 *spacep = 0; 2887 for (snap = list_head(l); snap; snap = list_next(l, snap)) { 2888 uint64_t used, comp, uncomp; 2889 dsl_deadlist_space_range(&snap->ds->ds_deadlist, 2890 mintxg, UINT64_MAX, &used, &comp, &uncomp); 2891 *spacep += used; 2892 } 2893 return (0); 2894} 2895 2896static void 2897snaplist_destroy(list_t *l, boolean_t own) 2898{ 2899 struct promotenode *snap; 2900 2901 if (!l || !list_link_active(&l->list_head)) 2902 return; 2903 2904 while ((snap = list_tail(l)) != NULL) { 2905 list_remove(l, snap); 2906 if (own) 2907 dsl_dataset_disown(snap->ds, snaplist_tag); 2908 else 2909 dsl_dataset_rele(snap->ds, snaplist_tag); 2910 kmem_free(snap, sizeof (struct promotenode)); 2911 } 2912 list_destroy(l); 2913} 2914 2915/* 2916 * Promote a clone. Nomenclature note: 2917 * "clone" or "cds": the original clone which is being promoted 2918 * "origin" or "ods": the snapshot which is originally clone's origin 2919 * "origin head" or "ohds": the dataset which is the head 2920 * (filesystem/volume) for the origin 2921 * "origin origin": the origin of the origin's filesystem (typically 2922 * NULL, indicating that the clone is not a clone of a clone). 2923 */ 2924int 2925dsl_dataset_promote(const char *name, char *conflsnap) 2926{ 2927 dsl_dataset_t *ds; 2928 dsl_dir_t *dd; 2929 dsl_pool_t *dp; 2930 dmu_object_info_t doi; 2931 struct promotearg pa = { 0 }; 2932 struct promotenode *snap; 2933 int err; 2934 2935 err = dsl_dataset_hold(name, FTAG, &ds); 2936 if (err) 2937 return (err); 2938 dd = ds->ds_dir; 2939 dp = dd->dd_pool; 2940 2941 err = dmu_object_info(dp->dp_meta_objset, 2942 ds->ds_phys->ds_snapnames_zapobj, &doi); 2943 if (err) { 2944 dsl_dataset_rele(ds, FTAG); 2945 return (err); 2946 } 2947 2948 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) { 2949 dsl_dataset_rele(ds, FTAG); 2950 return (EINVAL); 2951 } 2952 2953 /* 2954 * We are going to inherit all the snapshots taken before our 2955 * origin (i.e., our new origin will be our parent's origin). 2956 * Take ownership of them so that we can rename them into our 2957 * namespace. 2958 */ 2959 rw_enter(&dp->dp_config_rwlock, RW_READER); 2960 2961 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj, 2962 &pa.shared_snaps); 2963 if (err != 0) 2964 goto out; 2965 2966 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps); 2967 if (err != 0) 2968 goto out; 2969 2970 snap = list_head(&pa.shared_snaps); 2971 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj); 2972 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj, 2973 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps); 2974 if (err != 0) 2975 goto out; 2976 2977 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) { 2978 err = dsl_dataset_hold_obj(dp, 2979 snap->ds->ds_dir->dd_phys->dd_origin_obj, 2980 FTAG, &pa.origin_origin); 2981 if (err != 0) 2982 goto out; 2983 } 2984 2985out: 2986 rw_exit(&dp->dp_config_rwlock); 2987 2988 /* 2989 * Add in 128x the snapnames zapobj size, since we will be moving 2990 * a bunch of snapnames to the promoted ds, and dirtying their 2991 * bonus buffers. 2992 */ 2993 if (err == 0) { 2994 err = dsl_sync_task_do(dp, dsl_dataset_promote_check, 2995 dsl_dataset_promote_sync, ds, &pa, 2996 2 + 2 * doi.doi_physical_blocks_512); 2997 if (err && pa.err_ds && conflsnap) 2998 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN); 2999 } 3000 3001 snaplist_destroy(&pa.shared_snaps, B_TRUE); 3002 snaplist_destroy(&pa.clone_snaps, B_FALSE); 3003 snaplist_destroy(&pa.origin_snaps, B_FALSE); 3004 if (pa.origin_origin) 3005 dsl_dataset_rele(pa.origin_origin, FTAG); 3006 dsl_dataset_rele(ds, FTAG); 3007 return (err); 3008} 3009 3010struct cloneswaparg { 3011 dsl_dataset_t *cds; /* clone dataset */ 3012 dsl_dataset_t *ohds; /* origin's head dataset */ 3013 boolean_t force; 3014 int64_t unused_refres_delta; /* change in unconsumed refreservation */ 3015}; 3016 3017/* ARGSUSED */ 3018static int 3019dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx) 3020{ 3021 struct cloneswaparg *csa = arg1; 3022 3023 /* they should both be heads */ 3024 if (dsl_dataset_is_snapshot(csa->cds) || 3025 dsl_dataset_is_snapshot(csa->ohds)) 3026 return (EINVAL); 3027 3028 /* the branch point should be just before them */ 3029 if (csa->cds->ds_prev != csa->ohds->ds_prev) 3030 return (EINVAL); 3031 3032 /* cds should be the clone (unless they are unrelated) */ 3033 if (csa->cds->ds_prev != NULL && 3034 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap && 3035 csa->ohds->ds_object != 3036 csa->cds->ds_prev->ds_phys->ds_next_snap_obj) 3037 return (EINVAL); 3038 3039 /* the clone should be a child of the origin */ 3040 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir) 3041 return (EINVAL); 3042 3043 /* ohds shouldn't be modified unless 'force' */ 3044 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds)) 3045 return (ETXTBSY); 3046 3047 /* adjust amount of any unconsumed refreservation */ 3048 csa->unused_refres_delta = 3049 (int64_t)MIN(csa->ohds->ds_reserved, 3050 csa->ohds->ds_phys->ds_unique_bytes) - 3051 (int64_t)MIN(csa->ohds->ds_reserved, 3052 csa->cds->ds_phys->ds_unique_bytes); 3053 3054 if (csa->unused_refres_delta > 0 && 3055 csa->unused_refres_delta > 3056 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE)) 3057 return (ENOSPC); 3058 3059 if (csa->ohds->ds_quota != 0 && 3060 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota) 3061 return (EDQUOT); 3062 3063 return (0); 3064} 3065 3066/* ARGSUSED */ 3067static void 3068dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3069{ 3070 struct cloneswaparg *csa = arg1; 3071 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool; 3072 3073 ASSERT(csa->cds->ds_reserved == 0); 3074 ASSERT(csa->ohds->ds_quota == 0 || 3075 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota); 3076 3077 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx); 3078 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx); 3079 3080 if (csa->cds->ds_objset != NULL) { 3081 dmu_objset_evict(csa->cds->ds_objset); 3082 csa->cds->ds_objset = NULL; 3083 } 3084 3085 if (csa->ohds->ds_objset != NULL) { 3086 dmu_objset_evict(csa->ohds->ds_objset); 3087 csa->ohds->ds_objset = NULL; 3088 } 3089 3090 /* 3091 * Reset origin's unique bytes, if it exists. 3092 */ 3093 if (csa->cds->ds_prev) { 3094 dsl_dataset_t *origin = csa->cds->ds_prev; 3095 uint64_t comp, uncomp; 3096 3097 dmu_buf_will_dirty(origin->ds_dbuf, tx); 3098 dsl_deadlist_space_range(&csa->cds->ds_deadlist, 3099 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX, 3100 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp); 3101 } 3102 3103 /* swap blkptrs */ 3104 { 3105 blkptr_t tmp; 3106 tmp = csa->ohds->ds_phys->ds_bp; 3107 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp; 3108 csa->cds->ds_phys->ds_bp = tmp; 3109 } 3110 3111 /* set dd_*_bytes */ 3112 { 3113 int64_t dused, dcomp, duncomp; 3114 uint64_t cdl_used, cdl_comp, cdl_uncomp; 3115 uint64_t odl_used, odl_comp, odl_uncomp; 3116 3117 ASSERT3U(csa->cds->ds_dir->dd_phys-> 3118 dd_used_breakdown[DD_USED_SNAP], ==, 0); 3119 3120 dsl_deadlist_space(&csa->cds->ds_deadlist, 3121 &cdl_used, &cdl_comp, &cdl_uncomp); 3122 dsl_deadlist_space(&csa->ohds->ds_deadlist, 3123 &odl_used, &odl_comp, &odl_uncomp); 3124 3125 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used - 3126 (csa->ohds->ds_phys->ds_used_bytes + odl_used); 3127 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp - 3128 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp); 3129 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes + 3130 cdl_uncomp - 3131 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp); 3132 3133 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD, 3134 dused, dcomp, duncomp, tx); 3135 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD, 3136 -dused, -dcomp, -duncomp, tx); 3137 3138 /* 3139 * The difference in the space used by snapshots is the 3140 * difference in snapshot space due to the head's 3141 * deadlist (since that's the only thing that's 3142 * changing that affects the snapused). 3143 */ 3144 dsl_deadlist_space_range(&csa->cds->ds_deadlist, 3145 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, 3146 &cdl_used, &cdl_comp, &cdl_uncomp); 3147 dsl_deadlist_space_range(&csa->ohds->ds_deadlist, 3148 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, 3149 &odl_used, &odl_comp, &odl_uncomp); 3150 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used, 3151 DD_USED_HEAD, DD_USED_SNAP, tx); 3152 } 3153 3154 /* swap ds_*_bytes */ 3155 SWITCH64(csa->ohds->ds_phys->ds_used_bytes, 3156 csa->cds->ds_phys->ds_used_bytes); 3157 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes, 3158 csa->cds->ds_phys->ds_compressed_bytes); 3159 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes, 3160 csa->cds->ds_phys->ds_uncompressed_bytes); 3161 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes, 3162 csa->cds->ds_phys->ds_unique_bytes); 3163 3164 /* apply any parent delta for change in unconsumed refreservation */ 3165 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV, 3166 csa->unused_refres_delta, 0, 0, tx); 3167 3168 /* 3169 * Swap deadlists. 3170 */ 3171 dsl_deadlist_close(&csa->cds->ds_deadlist); 3172 dsl_deadlist_close(&csa->ohds->ds_deadlist); 3173 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj, 3174 csa->cds->ds_phys->ds_deadlist_obj); 3175 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset, 3176 csa->cds->ds_phys->ds_deadlist_obj); 3177 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset, 3178 csa->ohds->ds_phys->ds_deadlist_obj); 3179 3180 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx); 3181} 3182 3183/* 3184 * Swap 'clone' with its origin head datasets. Used at the end of "zfs 3185 * recv" into an existing fs to swizzle the file system to the new 3186 * version, and by "zfs rollback". Can also be used to swap two 3187 * independent head datasets if neither has any snapshots. 3188 */ 3189int 3190dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, 3191 boolean_t force) 3192{ 3193 struct cloneswaparg csa; 3194 int error; 3195 3196 ASSERT(clone->ds_owner); 3197 ASSERT(origin_head->ds_owner); 3198retry: 3199 /* 3200 * Need exclusive access for the swap. If we're swapping these 3201 * datasets back after an error, we already hold the locks. 3202 */ 3203 if (!RW_WRITE_HELD(&clone->ds_rwlock)) 3204 rw_enter(&clone->ds_rwlock, RW_WRITER); 3205 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) && 3206 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) { 3207 rw_exit(&clone->ds_rwlock); 3208 rw_enter(&origin_head->ds_rwlock, RW_WRITER); 3209 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) { 3210 rw_exit(&origin_head->ds_rwlock); 3211 goto retry; 3212 } 3213 } 3214 csa.cds = clone; 3215 csa.ohds = origin_head; 3216 csa.force = force; 3217 error = dsl_sync_task_do(clone->ds_dir->dd_pool, 3218 dsl_dataset_clone_swap_check, 3219 dsl_dataset_clone_swap_sync, &csa, NULL, 9); 3220 return (error); 3221} 3222 3223/* 3224 * Given a pool name and a dataset object number in that pool, 3225 * return the name of that dataset. 3226 */ 3227int 3228dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf) 3229{ 3230 spa_t *spa; 3231 dsl_pool_t *dp; 3232 dsl_dataset_t *ds; 3233 int error; 3234 3235 if ((error = spa_open(pname, &spa, FTAG)) != 0) 3236 return (error); 3237 dp = spa_get_dsl(spa); 3238 rw_enter(&dp->dp_config_rwlock, RW_READER); 3239 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) { 3240 dsl_dataset_name(ds, buf); 3241 dsl_dataset_rele(ds, FTAG); 3242 } 3243 rw_exit(&dp->dp_config_rwlock); 3244 spa_close(spa, FTAG); 3245 3246 return (error); 3247} 3248 3249int 3250dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, 3251 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv) 3252{ 3253 int error = 0; 3254 3255 ASSERT3S(asize, >, 0); 3256 3257 /* 3258 * *ref_rsrv is the portion of asize that will come from any 3259 * unconsumed refreservation space. 3260 */ 3261 *ref_rsrv = 0; 3262 3263 mutex_enter(&ds->ds_lock); 3264 /* 3265 * Make a space adjustment for reserved bytes. 3266 */ 3267 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) { 3268 ASSERT3U(*used, >=, 3269 ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 3270 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 3271 *ref_rsrv = 3272 asize - MIN(asize, parent_delta(ds, asize + inflight)); 3273 } 3274 3275 if (!check_quota || ds->ds_quota == 0) { 3276 mutex_exit(&ds->ds_lock); 3277 return (0); 3278 } 3279 /* 3280 * If they are requesting more space, and our current estimate 3281 * is over quota, they get to try again unless the actual 3282 * on-disk is over quota and there are no pending changes (which 3283 * may free up space for us). 3284 */ 3285 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) { 3286 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota) 3287 error = ERESTART; 3288 else 3289 error = EDQUOT; 3290 } 3291 mutex_exit(&ds->ds_lock); 3292 3293 return (error); 3294} 3295 3296/* ARGSUSED */ 3297static int 3298dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) 3299{ 3300 dsl_dataset_t *ds = arg1; 3301 dsl_prop_setarg_t *psa = arg2; 3302 int err; 3303 3304 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA) 3305 return (ENOTSUP); 3306 3307 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0) 3308 return (err); 3309 3310 if (psa->psa_effective_value == 0) 3311 return (0); 3312 3313 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes || 3314 psa->psa_effective_value < ds->ds_reserved) 3315 return (ENOSPC); 3316 3317 return (0); 3318} 3319 3320extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *); 3321 3322void 3323dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3324{ 3325 dsl_dataset_t *ds = arg1; 3326 dsl_prop_setarg_t *psa = arg2; 3327 uint64_t effective_value = psa->psa_effective_value; 3328 3329 dsl_prop_set_sync(ds, psa, tx); 3330 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa); 3331 3332 if (ds->ds_quota != effective_value) { 3333 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3334 ds->ds_quota = effective_value; 3335 3336 spa_history_log_internal(LOG_DS_REFQUOTA, 3337 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ", 3338 (longlong_t)ds->ds_quota, ds->ds_object); 3339 } 3340} 3341 3342int 3343dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota) 3344{ 3345 dsl_dataset_t *ds; 3346 dsl_prop_setarg_t psa; 3347 int err; 3348 3349 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a); 3350 3351 err = dsl_dataset_hold(dsname, FTAG, &ds); 3352 if (err) 3353 return (err); 3354 3355 /* 3356 * If someone removes a file, then tries to set the quota, we 3357 * want to make sure the file freeing takes effect. 3358 */ 3359 txg_wait_open(ds->ds_dir->dd_pool, 0); 3360 3361 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 3362 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync, 3363 ds, &psa, 0); 3364 3365 dsl_dataset_rele(ds, FTAG); 3366 return (err); 3367} 3368 3369static int 3370dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) 3371{ 3372 dsl_dataset_t *ds = arg1; 3373 dsl_prop_setarg_t *psa = arg2; 3374 uint64_t effective_value; 3375 uint64_t unique; 3376 int err; 3377 3378 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < 3379 SPA_VERSION_REFRESERVATION) 3380 return (ENOTSUP); 3381 3382 if (dsl_dataset_is_snapshot(ds)) 3383 return (EINVAL); 3384 3385 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0) 3386 return (err); 3387 3388 effective_value = psa->psa_effective_value; 3389 3390 /* 3391 * If we are doing the preliminary check in open context, the 3392 * space estimates may be inaccurate. 3393 */ 3394 if (!dmu_tx_is_syncing(tx)) 3395 return (0); 3396 3397 mutex_enter(&ds->ds_lock); 3398 if (!DS_UNIQUE_IS_ACCURATE(ds)) 3399 dsl_dataset_recalc_head_uniq(ds); 3400 unique = ds->ds_phys->ds_unique_bytes; 3401 mutex_exit(&ds->ds_lock); 3402 3403 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) { 3404 uint64_t delta = MAX(unique, effective_value) - 3405 MAX(unique, ds->ds_reserved); 3406 3407 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) 3408 return (ENOSPC); 3409 if (ds->ds_quota > 0 && 3410 effective_value > ds->ds_quota) 3411 return (ENOSPC); 3412 } 3413 3414 return (0); 3415} 3416 3417static void 3418dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3419{ 3420 dsl_dataset_t *ds = arg1; 3421 dsl_prop_setarg_t *psa = arg2; 3422 uint64_t effective_value = psa->psa_effective_value; 3423 uint64_t unique; 3424 int64_t delta; 3425 3426 dsl_prop_set_sync(ds, psa, tx); 3427 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa); 3428 3429 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3430 3431 mutex_enter(&ds->ds_dir->dd_lock); 3432 mutex_enter(&ds->ds_lock); 3433 ASSERT(DS_UNIQUE_IS_ACCURATE(ds)); 3434 unique = ds->ds_phys->ds_unique_bytes; 3435 delta = MAX(0, (int64_t)(effective_value - unique)) - 3436 MAX(0, (int64_t)(ds->ds_reserved - unique)); 3437 ds->ds_reserved = effective_value; 3438 mutex_exit(&ds->ds_lock); 3439 3440 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx); 3441 mutex_exit(&ds->ds_dir->dd_lock); 3442 3443 spa_history_log_internal(LOG_DS_REFRESERV, 3444 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu", 3445 (longlong_t)effective_value, ds->ds_object); 3446} 3447 3448int 3449dsl_dataset_set_reservation(const char *dsname, zprop_source_t source, 3450 uint64_t reservation) 3451{ 3452 dsl_dataset_t *ds; 3453 dsl_prop_setarg_t psa; 3454 int err; 3455 3456 dsl_prop_setarg_init_uint64(&psa, "refreservation", source, 3457 &reservation); 3458 3459 err = dsl_dataset_hold(dsname, FTAG, &ds); 3460 if (err) 3461 return (err); 3462 3463 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 3464 dsl_dataset_set_reservation_check, 3465 dsl_dataset_set_reservation_sync, ds, &psa, 0); 3466 3467 dsl_dataset_rele(ds, FTAG); 3468 return (err); 3469} 3470 3471typedef struct zfs_hold_cleanup_arg { 3472 dsl_pool_t *dp; 3473 uint64_t dsobj; 3474 char htag[MAXNAMELEN]; 3475} zfs_hold_cleanup_arg_t; 3476 3477static void 3478dsl_dataset_user_release_onexit(void *arg) 3479{ 3480 zfs_hold_cleanup_arg_t *ca = arg; 3481 3482 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag, 3483 B_TRUE); 3484 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t)); 3485} 3486 3487void 3488dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag, 3489 minor_t minor) 3490{ 3491 zfs_hold_cleanup_arg_t *ca; 3492 3493 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP); 3494 ca->dp = ds->ds_dir->dd_pool; 3495 ca->dsobj = ds->ds_object; 3496 (void) strlcpy(ca->htag, htag, sizeof (ca->htag)); 3497 VERIFY3U(0, ==, zfs_onexit_add_cb(minor, 3498 dsl_dataset_user_release_onexit, ca, NULL)); 3499} 3500 3501/* 3502 * If you add new checks here, you may need to add 3503 * additional checks to the "temporary" case in 3504 * snapshot_check() in dmu_objset.c. 3505 */ 3506static int 3507dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx) 3508{ 3509 dsl_dataset_t *ds = arg1; 3510 struct dsl_ds_holdarg *ha = arg2; 3511 char *htag = ha->htag; 3512 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 3513 int error = 0; 3514 3515 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) 3516 return (ENOTSUP); 3517 3518 if (!dsl_dataset_is_snapshot(ds)) 3519 return (EINVAL); 3520 3521 /* tags must be unique */ 3522 mutex_enter(&ds->ds_lock); 3523 if (ds->ds_phys->ds_userrefs_obj) { 3524 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag, 3525 8, 1, tx); 3526 if (error == 0) 3527 error = EEXIST; 3528 else if (error == ENOENT) 3529 error = 0; 3530 } 3531 mutex_exit(&ds->ds_lock); 3532 3533 if (error == 0 && ha->temphold && 3534 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN) 3535 error = E2BIG; 3536 3537 return (error); 3538} 3539 3540void 3541dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3542{ 3543 dsl_dataset_t *ds = arg1; 3544 struct dsl_ds_holdarg *ha = arg2; 3545 char *htag = ha->htag; 3546 dsl_pool_t *dp = ds->ds_dir->dd_pool; 3547 objset_t *mos = dp->dp_meta_objset; 3548 uint64_t now = gethrestime_sec(); 3549 uint64_t zapobj; 3550 3551 mutex_enter(&ds->ds_lock); 3552 if (ds->ds_phys->ds_userrefs_obj == 0) { 3553 /* 3554 * This is the first user hold for this dataset. Create 3555 * the userrefs zap object. 3556 */ 3557 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3558 zapobj = ds->ds_phys->ds_userrefs_obj = 3559 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx); 3560 } else { 3561 zapobj = ds->ds_phys->ds_userrefs_obj; 3562 } 3563 ds->ds_userrefs++; 3564 mutex_exit(&ds->ds_lock); 3565 3566 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx)); 3567 3568 if (ha->temphold) { 3569 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object, 3570 htag, &now, tx)); 3571 } 3572 3573 spa_history_log_internal(LOG_DS_USER_HOLD, 3574 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag, 3575 (int)ha->temphold, ds->ds_object); 3576} 3577 3578static int 3579dsl_dataset_user_hold_one(const char *dsname, void *arg) 3580{ 3581 struct dsl_ds_holdarg *ha = arg; 3582 dsl_dataset_t *ds; 3583 int error; 3584 char *name; 3585 3586 /* alloc a buffer to hold dsname@snapname plus terminating NULL */ 3587 name = kmem_asprintf("%s@%s", dsname, ha->snapname); 3588 error = dsl_dataset_hold(name, ha->dstg, &ds); 3589 strfree(name); 3590 if (error == 0) { 3591 ha->gotone = B_TRUE; 3592 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check, 3593 dsl_dataset_user_hold_sync, ds, ha, 0); 3594 } else if (error == ENOENT && ha->recursive) { 3595 error = 0; 3596 } else { 3597 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3598 } 3599 return (error); 3600} 3601 3602int 3603dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag, 3604 boolean_t temphold) 3605{ 3606 struct dsl_ds_holdarg *ha; 3607 int error; 3608 3609 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); 3610 ha->htag = htag; 3611 ha->temphold = temphold; 3612 error = dsl_sync_task_do(ds->ds_dir->dd_pool, 3613 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync, 3614 ds, ha, 0); 3615 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3616 3617 return (error); 3618} 3619 3620int 3621dsl_dataset_user_hold(char *dsname, char *snapname, char *htag, 3622 boolean_t recursive, boolean_t temphold, int cleanup_fd) 3623{ 3624 struct dsl_ds_holdarg *ha; 3625 dsl_sync_task_t *dst; 3626 spa_t *spa; 3627 int error; 3628 minor_t minor = 0; 3629 3630 if (cleanup_fd != -1) { 3631 /* Currently we only support cleanup-on-exit of tempholds. */ 3632 if (!temphold) 3633 return (EINVAL); 3634 error = zfs_onexit_fd_hold(cleanup_fd, &minor); 3635 if (error) 3636 return (error); 3637 } 3638 3639 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); 3640 3641 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3642 3643 error = spa_open(dsname, &spa, FTAG); 3644 if (error) { 3645 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3646 if (cleanup_fd != -1) 3647 zfs_onexit_fd_rele(cleanup_fd); 3648 return (error); 3649 } 3650 3651 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 3652 ha->htag = htag; 3653 ha->snapname = snapname; 3654 ha->recursive = recursive; 3655 ha->temphold = temphold; 3656 3657 if (recursive) { 3658 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one, 3659 ha, DS_FIND_CHILDREN); 3660 } else { 3661 error = dsl_dataset_user_hold_one(dsname, ha); 3662 } 3663 if (error == 0) 3664 error = dsl_sync_task_group_wait(ha->dstg); 3665 3666 for (dst = list_head(&ha->dstg->dstg_tasks); dst; 3667 dst = list_next(&ha->dstg->dstg_tasks, dst)) { 3668 dsl_dataset_t *ds = dst->dst_arg1; 3669 3670 if (dst->dst_err) { 3671 dsl_dataset_name(ds, ha->failed); 3672 *strchr(ha->failed, '@') = '\0'; 3673 } else if (error == 0 && minor != 0 && temphold) { 3674 /* 3675 * If this hold is to be released upon process exit, 3676 * register that action now. 3677 */ 3678 dsl_register_onexit_hold_cleanup(ds, htag, minor); 3679 } 3680 dsl_dataset_rele(ds, ha->dstg); 3681 } 3682 3683 if (error == 0 && recursive && !ha->gotone) 3684 error = ENOENT; 3685 3686 if (error) 3687 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed)); 3688 3689 dsl_sync_task_group_destroy(ha->dstg); 3690 3691 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3692 spa_close(spa, FTAG); 3693 if (cleanup_fd != -1) 3694 zfs_onexit_fd_rele(cleanup_fd); 3695 return (error); 3696} 3697 3698struct dsl_ds_releasearg { 3699 dsl_dataset_t *ds; 3700 const char *htag; 3701 boolean_t own; /* do we own or just hold ds? */ 3702}; 3703 3704static int 3705dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag, 3706 boolean_t *might_destroy) 3707{ 3708 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 3709 uint64_t zapobj; 3710 uint64_t tmp; 3711 int error; 3712 3713 *might_destroy = B_FALSE; 3714 3715 mutex_enter(&ds->ds_lock); 3716 zapobj = ds->ds_phys->ds_userrefs_obj; 3717 if (zapobj == 0) { 3718 /* The tag can't possibly exist */ 3719 mutex_exit(&ds->ds_lock); 3720 return (ESRCH); 3721 } 3722 3723 /* Make sure the tag exists */ 3724 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp); 3725 if (error) { 3726 mutex_exit(&ds->ds_lock); 3727 if (error == ENOENT) 3728 error = ESRCH; 3729 return (error); 3730 } 3731 3732 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 && 3733 DS_IS_DEFER_DESTROY(ds)) 3734 *might_destroy = B_TRUE; 3735 3736 mutex_exit(&ds->ds_lock); 3737 return (0); 3738} 3739 3740static int 3741dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx) 3742{ 3743 struct dsl_ds_releasearg *ra = arg1; 3744 dsl_dataset_t *ds = ra->ds; 3745 boolean_t might_destroy; 3746 int error; 3747 3748 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) 3749 return (ENOTSUP); 3750 3751 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy); 3752 if (error) 3753 return (error); 3754 3755 if (might_destroy) { 3756 struct dsl_ds_destroyarg dsda = {0}; 3757 3758 if (dmu_tx_is_syncing(tx)) { 3759 /* 3760 * If we're not prepared to remove the snapshot, 3761 * we can't allow the release to happen right now. 3762 */ 3763 if (!ra->own) 3764 return (EBUSY); 3765 } 3766 dsda.ds = ds; 3767 dsda.releasing = B_TRUE; 3768 return (dsl_dataset_destroy_check(&dsda, tag, tx)); 3769 } 3770 3771 return (0); 3772} 3773 3774static void 3775dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx) 3776{ 3777 struct dsl_ds_releasearg *ra = arg1; 3778 dsl_dataset_t *ds = ra->ds; 3779 dsl_pool_t *dp = ds->ds_dir->dd_pool; 3780 objset_t *mos = dp->dp_meta_objset; 3781 uint64_t zapobj; 3782 uint64_t dsobj = ds->ds_object; 3783 uint64_t refs; 3784 int error; 3785 3786 mutex_enter(&ds->ds_lock); 3787 ds->ds_userrefs--; 3788 refs = ds->ds_userrefs; 3789 mutex_exit(&ds->ds_lock); 3790 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx); 3791 VERIFY(error == 0 || error == ENOENT); 3792 zapobj = ds->ds_phys->ds_userrefs_obj; 3793 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx)); 3794 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 && 3795 DS_IS_DEFER_DESTROY(ds)) { 3796 struct dsl_ds_destroyarg dsda = {0}; 3797 3798 ASSERT(ra->own); 3799 dsda.ds = ds; 3800 dsda.releasing = B_TRUE; 3801 /* We already did the destroy_check */ 3802 dsl_dataset_destroy_sync(&dsda, tag, tx); 3803 } 3804 3805 spa_history_log_internal(LOG_DS_USER_RELEASE, 3806 dp->dp_spa, tx, "<%s> %lld dataset = %llu", 3807 ra->htag, (longlong_t)refs, dsobj); 3808} 3809 3810static int 3811dsl_dataset_user_release_one(const char *dsname, void *arg) 3812{ 3813 struct dsl_ds_holdarg *ha = arg; 3814 struct dsl_ds_releasearg *ra; 3815 dsl_dataset_t *ds; 3816 int error; 3817 void *dtag = ha->dstg; 3818 char *name; 3819 boolean_t own = B_FALSE; 3820 boolean_t might_destroy; 3821 3822 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */ 3823 name = kmem_asprintf("%s@%s", dsname, ha->snapname); 3824 error = dsl_dataset_hold(name, dtag, &ds); 3825 strfree(name); 3826 if (error == ENOENT && ha->recursive) 3827 return (0); 3828 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3829 if (error) 3830 return (error); 3831 3832 ha->gotone = B_TRUE; 3833 3834 ASSERT(dsl_dataset_is_snapshot(ds)); 3835 3836 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy); 3837 if (error) { 3838 dsl_dataset_rele(ds, dtag); 3839 return (error); 3840 } 3841 3842 if (might_destroy) { 3843#ifdef _KERNEL 3844 name = kmem_asprintf("%s@%s", dsname, ha->snapname); 3845 error = zfs_unmount_snap(name, NULL); 3846 strfree(name); 3847 if (error) { 3848 dsl_dataset_rele(ds, dtag); 3849 return (error); 3850 } 3851#endif 3852 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) { 3853 dsl_dataset_rele(ds, dtag); 3854 return (EBUSY); 3855 } else { 3856 own = B_TRUE; 3857 dsl_dataset_make_exclusive(ds, dtag); 3858 } 3859 } 3860 3861 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP); 3862 ra->ds = ds; 3863 ra->htag = ha->htag; 3864 ra->own = own; 3865 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check, 3866 dsl_dataset_user_release_sync, ra, dtag, 0); 3867 3868 return (0); 3869} 3870 3871int 3872dsl_dataset_user_release(char *dsname, char *snapname, char *htag, 3873 boolean_t recursive) 3874{ 3875 struct dsl_ds_holdarg *ha; 3876 dsl_sync_task_t *dst; 3877 spa_t *spa; 3878 int error; 3879 3880top: 3881 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); 3882 3883 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3884 3885 error = spa_open(dsname, &spa, FTAG); 3886 if (error) { 3887 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3888 return (error); 3889 } 3890 3891 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 3892 ha->htag = htag; 3893 ha->snapname = snapname; 3894 ha->recursive = recursive; 3895 if (recursive) { 3896 error = dmu_objset_find(dsname, dsl_dataset_user_release_one, 3897 ha, DS_FIND_CHILDREN); 3898 } else { 3899 error = dsl_dataset_user_release_one(dsname, ha); 3900 } 3901 if (error == 0) 3902 error = dsl_sync_task_group_wait(ha->dstg); 3903 3904 for (dst = list_head(&ha->dstg->dstg_tasks); dst; 3905 dst = list_next(&ha->dstg->dstg_tasks, dst)) { 3906 struct dsl_ds_releasearg *ra = dst->dst_arg1; 3907 dsl_dataset_t *ds = ra->ds; 3908 3909 if (dst->dst_err) 3910 dsl_dataset_name(ds, ha->failed); 3911 3912 if (ra->own) 3913 dsl_dataset_disown(ds, ha->dstg); 3914 else 3915 dsl_dataset_rele(ds, ha->dstg); 3916 3917 kmem_free(ra, sizeof (struct dsl_ds_releasearg)); 3918 } 3919 3920 if (error == 0 && recursive && !ha->gotone) 3921 error = ENOENT; 3922 3923 if (error && error != EBUSY) 3924 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed)); 3925 3926 dsl_sync_task_group_destroy(ha->dstg); 3927 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3928 spa_close(spa, FTAG); 3929 3930 /* 3931 * We can get EBUSY if we were racing with deferred destroy and 3932 * dsl_dataset_user_release_check() hadn't done the necessary 3933 * open context setup. We can also get EBUSY if we're racing 3934 * with destroy and that thread is the ds_owner. Either way 3935 * the busy condition should be transient, and we should retry 3936 * the release operation. 3937 */ 3938 if (error == EBUSY) 3939 goto top; 3940 3941 return (error); 3942} 3943 3944/* 3945 * Called at spa_load time (with retry == B_FALSE) to release a stale 3946 * temporary user hold. Also called by the onexit code (with retry == B_TRUE). 3947 */ 3948int 3949dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag, 3950 boolean_t retry) 3951{ 3952 dsl_dataset_t *ds; 3953 char *snap; 3954 char *name; 3955 int namelen; 3956 int error; 3957 3958 do { 3959 rw_enter(&dp->dp_config_rwlock, RW_READER); 3960 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 3961 rw_exit(&dp->dp_config_rwlock); 3962 if (error) 3963 return (error); 3964 namelen = dsl_dataset_namelen(ds)+1; 3965 name = kmem_alloc(namelen, KM_SLEEP); 3966 dsl_dataset_name(ds, name); 3967 dsl_dataset_rele(ds, FTAG); 3968 3969 snap = strchr(name, '@'); 3970 *snap = '\0'; 3971 ++snap; 3972 error = dsl_dataset_user_release(name, snap, htag, B_FALSE); 3973 kmem_free(name, namelen); 3974 3975 /* 3976 * The object can't have been destroyed because we have a hold, 3977 * but it might have been renamed, resulting in ENOENT. Retry 3978 * if we've been requested to do so. 3979 * 3980 * It would be nice if we could use the dsobj all the way 3981 * through and avoid ENOENT entirely. But we might need to 3982 * unmount the snapshot, and there's currently no way to lookup 3983 * a vfsp using a ZFS object id. 3984 */ 3985 } while ((error == ENOENT) && retry); 3986 3987 return (error); 3988} 3989 3990int 3991dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp) 3992{ 3993 dsl_dataset_t *ds; 3994 int err; 3995 3996 err = dsl_dataset_hold(dsname, FTAG, &ds); 3997 if (err) 3998 return (err); 3999 4000 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP)); 4001 if (ds->ds_phys->ds_userrefs_obj != 0) { 4002 zap_attribute_t *za; 4003 zap_cursor_t zc; 4004 4005 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 4006 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset, 4007 ds->ds_phys->ds_userrefs_obj); 4008 zap_cursor_retrieve(&zc, za) == 0; 4009 zap_cursor_advance(&zc)) { 4010 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name, 4011 za->za_first_integer)); 4012 } 4013 zap_cursor_fini(&zc); 4014 kmem_free(za, sizeof (zap_attribute_t)); 4015 } 4016 dsl_dataset_rele(ds, FTAG); 4017 return (0); 4018} 4019 4020/* 4021 * Note, this fuction is used as the callback for dmu_objset_find(). We 4022 * always return 0 so that we will continue to find and process 4023 * inconsistent datasets, even if we encounter an error trying to 4024 * process one of them. 4025 */ 4026/* ARGSUSED */ 4027int 4028dsl_destroy_inconsistent(const char *dsname, void *arg) 4029{ 4030 dsl_dataset_t *ds; 4031 4032 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) { 4033 if (DS_IS_INCONSISTENT(ds)) 4034 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE); 4035 else 4036 dsl_dataset_disown(ds, FTAG); 4037 } 4038 return (0); 4039} 4040