Deleted Added
full compact
dsl_dataset.c (219320) dsl_dataset.c (223623)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
23 */
24
25#include <sys/dmu_objset.h>
26#include <sys/dsl_dataset.h>
27#include <sys/dsl_dir.h>
28#include <sys/dsl_prop.h>
29#include <sys/dsl_synctask.h>
30#include <sys/dmu_traverse.h>
31#include <sys/dmu_tx.h>
32#include <sys/arc.h>
33#include <sys/zio.h>
34#include <sys/zap.h>
35#include <sys/unique.h>
36#include <sys/zfs_context.h>
37#include <sys/zfs_ioctl.h>
38#include <sys/spa.h>
39#include <sys/zfs_znode.h>
40#include <sys/zfs_onexit.h>
41#include <sys/zvol.h>
42#include <sys/dsl_scan.h>
43#include <sys/dsl_deadlist.h>
44
45static char *dsl_reaper = "the grim reaper";
46
47static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
48static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
49static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
50
51#define SWITCH64(x, y) \
52 { \
53 uint64_t __tmp = (x); \
54 (x) = (y); \
55 (y) = __tmp; \
56 }
57
58#define DS_REF_MAX (1ULL << 62)
59
60#define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
61
62#define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
63
64
65/*
66 * Figure out how much of this delta should be propogated to the dsl_dir
67 * layer. If there's a refreservation, that space has already been
68 * partially accounted for in our ancestors.
69 */
70static int64_t
71parent_delta(dsl_dataset_t *ds, int64_t delta)
72{
73 uint64_t old_bytes, new_bytes;
74
75 if (ds->ds_reserved == 0)
76 return (delta);
77
78 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
79 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
80
81 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
82 return (new_bytes - old_bytes);
83}
84
85void
86dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
87{
88 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
89 int compressed = BP_GET_PSIZE(bp);
90 int uncompressed = BP_GET_UCSIZE(bp);
91 int64_t delta;
92
93 dprintf_bp(bp, "ds=%p", ds);
94
95 ASSERT(dmu_tx_is_syncing(tx));
96 /* It could have been compressed away to nothing */
97 if (BP_IS_HOLE(bp))
98 return;
99 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
100 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
101 if (ds == NULL) {
102 /*
103 * Account for the meta-objset space in its placeholder
104 * dsl_dir.
105 */
106 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
107 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
108 used, compressed, uncompressed, tx);
109 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
110 return;
111 }
112 dmu_buf_will_dirty(ds->ds_dbuf, tx);
113
114 mutex_enter(&ds->ds_dir->dd_lock);
115 mutex_enter(&ds->ds_lock);
116 delta = parent_delta(ds, used);
117 ds->ds_phys->ds_used_bytes += used;
118 ds->ds_phys->ds_compressed_bytes += compressed;
119 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
120 ds->ds_phys->ds_unique_bytes += used;
121 mutex_exit(&ds->ds_lock);
122 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
123 compressed, uncompressed, tx);
124 dsl_dir_transfer_space(ds->ds_dir, used - delta,
125 DD_USED_REFRSRV, DD_USED_HEAD, tx);
126 mutex_exit(&ds->ds_dir->dd_lock);
127}
128
129int
130dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
131 boolean_t async)
132{
133 if (BP_IS_HOLE(bp))
134 return (0);
135
136 ASSERT(dmu_tx_is_syncing(tx));
137 ASSERT(bp->blk_birth <= tx->tx_txg);
138
139 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
140 int compressed = BP_GET_PSIZE(bp);
141 int uncompressed = BP_GET_UCSIZE(bp);
142
143 ASSERT(used > 0);
144 if (ds == NULL) {
145 /*
146 * Account for the meta-objset space in its placeholder
147 * dataset.
148 */
149 dsl_free(tx->tx_pool, tx->tx_txg, bp);
150
151 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
152 -used, -compressed, -uncompressed, tx);
153 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
154 return (used);
155 }
156 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
157
158 ASSERT(!dsl_dataset_is_snapshot(ds));
159 dmu_buf_will_dirty(ds->ds_dbuf, tx);
160
161 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
162 int64_t delta;
163
164 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
165 dsl_free(tx->tx_pool, tx->tx_txg, bp);
166
167 mutex_enter(&ds->ds_dir->dd_lock);
168 mutex_enter(&ds->ds_lock);
169 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
170 !DS_UNIQUE_IS_ACCURATE(ds));
171 delta = parent_delta(ds, -used);
172 ds->ds_phys->ds_unique_bytes -= used;
173 mutex_exit(&ds->ds_lock);
174 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
175 delta, -compressed, -uncompressed, tx);
176 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
177 DD_USED_REFRSRV, DD_USED_HEAD, tx);
178 mutex_exit(&ds->ds_dir->dd_lock);
179 } else {
180 dprintf_bp(bp, "putting on dead list: %s", "");
181 if (async) {
182 /*
183 * We are here as part of zio's write done callback,
184 * which means we're a zio interrupt thread. We can't
185 * call dsl_deadlist_insert() now because it may block
186 * waiting for I/O. Instead, put bp on the deferred
187 * queue and let dsl_pool_sync() finish the job.
188 */
189 bplist_append(&ds->ds_pending_deadlist, bp);
190 } else {
191 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
192 }
193 ASSERT3U(ds->ds_prev->ds_object, ==,
194 ds->ds_phys->ds_prev_snap_obj);
195 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
196 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
197 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
198 ds->ds_object && bp->blk_birth >
199 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
200 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
201 mutex_enter(&ds->ds_prev->ds_lock);
202 ds->ds_prev->ds_phys->ds_unique_bytes += used;
203 mutex_exit(&ds->ds_prev->ds_lock);
204 }
205 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
206 dsl_dir_transfer_space(ds->ds_dir, used,
207 DD_USED_HEAD, DD_USED_SNAP, tx);
208 }
209 }
210 mutex_enter(&ds->ds_lock);
211 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
212 ds->ds_phys->ds_used_bytes -= used;
213 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
214 ds->ds_phys->ds_compressed_bytes -= compressed;
215 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
216 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
217 mutex_exit(&ds->ds_lock);
218
219 return (used);
220}
221
222uint64_t
223dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
224{
225 uint64_t trysnap = 0;
226
227 if (ds == NULL)
228 return (0);
229 /*
230 * The snapshot creation could fail, but that would cause an
231 * incorrect FALSE return, which would only result in an
232 * overestimation of the amount of space that an operation would
233 * consume, which is OK.
234 *
235 * There's also a small window where we could miss a pending
236 * snapshot, because we could set the sync task in the quiescing
237 * phase. So this should only be used as a guess.
238 */
239 if (ds->ds_trysnap_txg >
240 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
241 trysnap = ds->ds_trysnap_txg;
242 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
243}
244
245boolean_t
246dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
247 uint64_t blk_birth)
248{
249 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
250 return (B_FALSE);
251
252 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
253
254 return (B_TRUE);
255}
256
257/* ARGSUSED */
258static void
259dsl_dataset_evict(dmu_buf_t *db, void *dsv)
260{
261 dsl_dataset_t *ds = dsv;
262
263 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
264
265 unique_remove(ds->ds_fsid_guid);
266
267 if (ds->ds_objset != NULL)
268 dmu_objset_evict(ds->ds_objset);
269
270 if (ds->ds_prev) {
271 dsl_dataset_drop_ref(ds->ds_prev, ds);
272 ds->ds_prev = NULL;
273 }
274
275 bplist_destroy(&ds->ds_pending_deadlist);
276 if (db != NULL) {
277 dsl_deadlist_close(&ds->ds_deadlist);
278 } else {
279 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
280 ASSERT(!ds->ds_deadlist.dl_oldfmt);
281 }
282 if (ds->ds_dir)
283 dsl_dir_close(ds->ds_dir, ds);
284
285 ASSERT(!list_link_active(&ds->ds_synced_link));
286
287 if (mutex_owned(&ds->ds_lock))
288 mutex_exit(&ds->ds_lock);
289 mutex_destroy(&ds->ds_lock);
290 mutex_destroy(&ds->ds_recvlock);
291 if (mutex_owned(&ds->ds_opening_lock))
292 mutex_exit(&ds->ds_opening_lock);
293 mutex_destroy(&ds->ds_opening_lock);
294 rw_destroy(&ds->ds_rwlock);
295 cv_destroy(&ds->ds_exclusive_cv);
296
297 kmem_free(ds, sizeof (dsl_dataset_t));
298}
299
300static int
301dsl_dataset_get_snapname(dsl_dataset_t *ds)
302{
303 dsl_dataset_phys_t *headphys;
304 int err;
305 dmu_buf_t *headdbuf;
306 dsl_pool_t *dp = ds->ds_dir->dd_pool;
307 objset_t *mos = dp->dp_meta_objset;
308
309 if (ds->ds_snapname[0])
310 return (0);
311 if (ds->ds_phys->ds_next_snap_obj == 0)
312 return (0);
313
314 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
315 FTAG, &headdbuf);
316 if (err)
317 return (err);
318 headphys = headdbuf->db_data;
319 err = zap_value_search(dp->dp_meta_objset,
320 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
321 dmu_buf_rele(headdbuf, FTAG);
322 return (err);
323}
324
325static int
326dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
327{
328 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
329 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
330 matchtype_t mt;
331 int err;
332
333 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
334 mt = MT_FIRST;
335 else
336 mt = MT_EXACT;
337
338 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
339 value, mt, NULL, 0, NULL);
340 if (err == ENOTSUP && mt == MT_FIRST)
341 err = zap_lookup(mos, snapobj, name, 8, 1, value);
342 return (err);
343}
344
345static int
346dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
347{
348 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
349 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
350 matchtype_t mt;
351 int err;
352
353 dsl_dir_snap_cmtime_update(ds->ds_dir);
354
355 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
356 mt = MT_FIRST;
357 else
358 mt = MT_EXACT;
359
360 err = zap_remove_norm(mos, snapobj, name, mt, tx);
361 if (err == ENOTSUP && mt == MT_FIRST)
362 err = zap_remove(mos, snapobj, name, tx);
363 return (err);
364}
365
366static int
367dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
368 dsl_dataset_t **dsp)
369{
370 objset_t *mos = dp->dp_meta_objset;
371 dmu_buf_t *dbuf;
372 dsl_dataset_t *ds;
373 int err;
374 dmu_object_info_t doi;
375
376 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
377 dsl_pool_sync_context(dp));
378
379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
380 if (err)
381 return (err);
382
383 /* Make sure dsobj has the correct object type. */
384 dmu_object_info_from_db(dbuf, &doi);
385 if (doi.doi_type != DMU_OT_DSL_DATASET)
386 return (EINVAL);
387
388 ds = dmu_buf_get_user(dbuf);
389 if (ds == NULL) {
390 dsl_dataset_t *winner;
391
392 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
393 ds->ds_dbuf = dbuf;
394 ds->ds_object = dsobj;
395 ds->ds_phys = dbuf->db_data;
396
397 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
398 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
399 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
400 rw_init(&ds->ds_rwlock, 0, 0, 0);
401 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
402
403 bplist_create(&ds->ds_pending_deadlist);
404 dsl_deadlist_open(&ds->ds_deadlist,
405 mos, ds->ds_phys->ds_deadlist_obj);
406
407 if (err == 0) {
408 err = dsl_dir_open_obj(dp,
409 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
410 }
411 if (err) {
412 mutex_destroy(&ds->ds_lock);
413 mutex_destroy(&ds->ds_recvlock);
414 mutex_destroy(&ds->ds_opening_lock);
415 rw_destroy(&ds->ds_rwlock);
416 cv_destroy(&ds->ds_exclusive_cv);
417 bplist_destroy(&ds->ds_pending_deadlist);
418 dsl_deadlist_close(&ds->ds_deadlist);
419 kmem_free(ds, sizeof (dsl_dataset_t));
420 dmu_buf_rele(dbuf, tag);
421 return (err);
422 }
423
424 if (!dsl_dataset_is_snapshot(ds)) {
425 ds->ds_snapname[0] = '\0';
426 if (ds->ds_phys->ds_prev_snap_obj) {
427 err = dsl_dataset_get_ref(dp,
428 ds->ds_phys->ds_prev_snap_obj,
429 ds, &ds->ds_prev);
430 }
431 } else {
432 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
433 err = dsl_dataset_get_snapname(ds);
434 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
435 err = zap_count(
436 ds->ds_dir->dd_pool->dp_meta_objset,
437 ds->ds_phys->ds_userrefs_obj,
438 &ds->ds_userrefs);
439 }
440 }
441
442 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
443 /*
444 * In sync context, we're called with either no lock
445 * or with the write lock. If we're not syncing,
446 * we're always called with the read lock held.
447 */
448 boolean_t need_lock =
449 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
450 dsl_pool_sync_context(dp);
451
452 if (need_lock)
453 rw_enter(&dp->dp_config_rwlock, RW_READER);
454
455 err = dsl_prop_get_ds(ds,
456 "refreservation", sizeof (uint64_t), 1,
457 &ds->ds_reserved, NULL);
458 if (err == 0) {
459 err = dsl_prop_get_ds(ds,
460 "refquota", sizeof (uint64_t), 1,
461 &ds->ds_quota, NULL);
462 }
463
464 if (need_lock)
465 rw_exit(&dp->dp_config_rwlock);
466 } else {
467 ds->ds_reserved = ds->ds_quota = 0;
468 }
469
470 if (err == 0) {
471 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
472 dsl_dataset_evict);
473 }
474 if (err || winner) {
475 bplist_destroy(&ds->ds_pending_deadlist);
476 dsl_deadlist_close(&ds->ds_deadlist);
477 if (ds->ds_prev)
478 dsl_dataset_drop_ref(ds->ds_prev, ds);
479 dsl_dir_close(ds->ds_dir, ds);
480 mutex_destroy(&ds->ds_lock);
481 mutex_destroy(&ds->ds_recvlock);
482 mutex_destroy(&ds->ds_opening_lock);
483 rw_destroy(&ds->ds_rwlock);
484 cv_destroy(&ds->ds_exclusive_cv);
485 kmem_free(ds, sizeof (dsl_dataset_t));
486 if (err) {
487 dmu_buf_rele(dbuf, tag);
488 return (err);
489 }
490 ds = winner;
491 } else {
492 ds->ds_fsid_guid =
493 unique_insert(ds->ds_phys->ds_fsid_guid);
494 }
495 }
496 ASSERT3P(ds->ds_dbuf, ==, dbuf);
497 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
498 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
499 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
500 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
501 mutex_enter(&ds->ds_lock);
502 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
503 mutex_exit(&ds->ds_lock);
504 dmu_buf_rele(ds->ds_dbuf, tag);
505 return (ENOENT);
506 }
507 mutex_exit(&ds->ds_lock);
508 *dsp = ds;
509 return (0);
510}
511
512static int
513dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
514{
515 dsl_pool_t *dp = ds->ds_dir->dd_pool;
516
517 /*
518 * In syncing context we don't want the rwlock lock: there
519 * may be an existing writer waiting for sync phase to
520 * finish. We don't need to worry about such writers, since
521 * sync phase is single-threaded, so the writer can't be
522 * doing anything while we are active.
523 */
524 if (dsl_pool_sync_context(dp)) {
525 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
526 return (0);
527 }
528
529 /*
530 * Normal users will hold the ds_rwlock as a READER until they
531 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
532 * drop their READER lock after they set the ds_owner field.
533 *
534 * If the dataset is being destroyed, the destroy thread will
535 * obtain a WRITER lock for exclusive access after it's done its
536 * open-context work and then change the ds_owner to
537 * dsl_reaper once destruction is assured. So threads
538 * may block here temporarily, until the "destructability" of
539 * the dataset is determined.
540 */
541 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
542 mutex_enter(&ds->ds_lock);
543 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
544 rw_exit(&dp->dp_config_rwlock);
545 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
546 if (DSL_DATASET_IS_DESTROYED(ds)) {
547 mutex_exit(&ds->ds_lock);
548 dsl_dataset_drop_ref(ds, tag);
549 rw_enter(&dp->dp_config_rwlock, RW_READER);
550 return (ENOENT);
551 }
552 /*
553 * The dp_config_rwlock lives above the ds_lock. And
554 * we need to check DSL_DATASET_IS_DESTROYED() while
555 * holding the ds_lock, so we have to drop and reacquire
556 * the ds_lock here.
557 */
558 mutex_exit(&ds->ds_lock);
559 rw_enter(&dp->dp_config_rwlock, RW_READER);
560 mutex_enter(&ds->ds_lock);
561 }
562 mutex_exit(&ds->ds_lock);
563 return (0);
564}
565
566int
567dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
568 dsl_dataset_t **dsp)
569{
570 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
571
572 if (err)
573 return (err);
574 return (dsl_dataset_hold_ref(*dsp, tag));
575}
576
577int
578dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
579 void *tag, dsl_dataset_t **dsp)
580{
581 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
582 if (err)
583 return (err);
584 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
585 dsl_dataset_rele(*dsp, tag);
586 *dsp = NULL;
587 return (EBUSY);
588 }
589 return (0);
590}
591
592int
593dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
594{
595 dsl_dir_t *dd;
596 dsl_pool_t *dp;
597 const char *snapname;
598 uint64_t obj;
599 int err = 0;
600
601 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
602 if (err)
603 return (err);
604
605 dp = dd->dd_pool;
606 obj = dd->dd_phys->dd_head_dataset_obj;
607 rw_enter(&dp->dp_config_rwlock, RW_READER);
608 if (obj)
609 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
610 else
611 err = ENOENT;
612 if (err)
613 goto out;
614
615 err = dsl_dataset_hold_ref(*dsp, tag);
616
617 /* we may be looking for a snapshot */
618 if (err == 0 && snapname != NULL) {
619 dsl_dataset_t *ds = NULL;
620
621 if (*snapname++ != '@') {
622 dsl_dataset_rele(*dsp, tag);
623 err = ENOENT;
624 goto out;
625 }
626
627 dprintf("looking for snapshot '%s'\n", snapname);
628 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
629 if (err == 0)
630 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
631 dsl_dataset_rele(*dsp, tag);
632
633 ASSERT3U((err == 0), ==, (ds != NULL));
634
635 if (ds) {
636 mutex_enter(&ds->ds_lock);
637 if (ds->ds_snapname[0] == 0)
638 (void) strlcpy(ds->ds_snapname, snapname,
639 sizeof (ds->ds_snapname));
640 mutex_exit(&ds->ds_lock);
641 err = dsl_dataset_hold_ref(ds, tag);
642 *dsp = err ? NULL : ds;
643 }
644 }
645out:
646 rw_exit(&dp->dp_config_rwlock);
647 dsl_dir_close(dd, FTAG);
648 return (err);
649}
650
651int
652dsl_dataset_own(const char *name, boolean_t inconsistentok,
653 void *tag, dsl_dataset_t **dsp)
654{
655 int err = dsl_dataset_hold(name, tag, dsp);
656 if (err)
657 return (err);
658 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
659 dsl_dataset_rele(*dsp, tag);
660 return (EBUSY);
661 }
662 return (0);
663}
664
665void
666dsl_dataset_name(dsl_dataset_t *ds, char *name)
667{
668 if (ds == NULL) {
669 (void) strcpy(name, "mos");
670 } else {
671 dsl_dir_name(ds->ds_dir, name);
672 VERIFY(0 == dsl_dataset_get_snapname(ds));
673 if (ds->ds_snapname[0]) {
674 (void) strcat(name, "@");
675 /*
676 * We use a "recursive" mutex so that we
677 * can call dprintf_ds() with ds_lock held.
678 */
679 if (!MUTEX_HELD(&ds->ds_lock)) {
680 mutex_enter(&ds->ds_lock);
681 (void) strcat(name, ds->ds_snapname);
682 mutex_exit(&ds->ds_lock);
683 } else {
684 (void) strcat(name, ds->ds_snapname);
685 }
686 }
687 }
688}
689
690static int
691dsl_dataset_namelen(dsl_dataset_t *ds)
692{
693 int result;
694
695 if (ds == NULL) {
696 result = 3; /* "mos" */
697 } else {
698 result = dsl_dir_namelen(ds->ds_dir);
699 VERIFY(0 == dsl_dataset_get_snapname(ds));
700 if (ds->ds_snapname[0]) {
701 ++result; /* adding one for the @-sign */
702 if (!MUTEX_HELD(&ds->ds_lock)) {
703 mutex_enter(&ds->ds_lock);
704 result += strlen(ds->ds_snapname);
705 mutex_exit(&ds->ds_lock);
706 } else {
707 result += strlen(ds->ds_snapname);
708 }
709 }
710 }
711
712 return (result);
713}
714
715void
716dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
717{
718 dmu_buf_rele(ds->ds_dbuf, tag);
719}
720
721void
722dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
723{
724 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
725 rw_exit(&ds->ds_rwlock);
726 }
727 dsl_dataset_drop_ref(ds, tag);
728}
729
730void
731dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
732{
733 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
734 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
735
736 mutex_enter(&ds->ds_lock);
737 ds->ds_owner = NULL;
738 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
739 rw_exit(&ds->ds_rwlock);
740 cv_broadcast(&ds->ds_exclusive_cv);
741 }
742 mutex_exit(&ds->ds_lock);
743 if (ds->ds_dbuf)
744 dsl_dataset_drop_ref(ds, tag);
745 else
746 dsl_dataset_evict(NULL, ds);
747}
748
749boolean_t
750dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
751{
752 boolean_t gotit = FALSE;
753
754 mutex_enter(&ds->ds_lock);
755 if (ds->ds_owner == NULL &&
756 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
757 ds->ds_owner = tag;
758 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
759 rw_exit(&ds->ds_rwlock);
760 gotit = TRUE;
761 }
762 mutex_exit(&ds->ds_lock);
763 return (gotit);
764}
765
766void
767dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
768{
769 ASSERT3P(owner, ==, ds->ds_owner);
770 if (!RW_WRITE_HELD(&ds->ds_rwlock))
771 rw_enter(&ds->ds_rwlock, RW_WRITER);
772}
773
774uint64_t
775dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
776 uint64_t flags, dmu_tx_t *tx)
777{
778 dsl_pool_t *dp = dd->dd_pool;
779 dmu_buf_t *dbuf;
780 dsl_dataset_phys_t *dsphys;
781 uint64_t dsobj;
782 objset_t *mos = dp->dp_meta_objset;
783
784 if (origin == NULL)
785 origin = dp->dp_origin_snap;
786
787 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
788 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
789 ASSERT(dmu_tx_is_syncing(tx));
790 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
791
792 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
793 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
794 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
795 dmu_buf_will_dirty(dbuf, tx);
796 dsphys = dbuf->db_data;
797 bzero(dsphys, sizeof (dsl_dataset_phys_t));
798 dsphys->ds_dir_obj = dd->dd_object;
799 dsphys->ds_flags = flags;
800 dsphys->ds_fsid_guid = unique_create();
801 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
802 sizeof (dsphys->ds_guid));
803 dsphys->ds_snapnames_zapobj =
804 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
805 DMU_OT_NONE, 0, tx);
806 dsphys->ds_creation_time = gethrestime_sec();
807 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
808
809 if (origin == NULL) {
810 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
811 } else {
812 dsl_dataset_t *ohds;
813
814 dsphys->ds_prev_snap_obj = origin->ds_object;
815 dsphys->ds_prev_snap_txg =
816 origin->ds_phys->ds_creation_txg;
817 dsphys->ds_used_bytes =
818 origin->ds_phys->ds_used_bytes;
819 dsphys->ds_compressed_bytes =
820 origin->ds_phys->ds_compressed_bytes;
821 dsphys->ds_uncompressed_bytes =
822 origin->ds_phys->ds_uncompressed_bytes;
823 dsphys->ds_bp = origin->ds_phys->ds_bp;
824 dsphys->ds_flags |= origin->ds_phys->ds_flags;
825
826 dmu_buf_will_dirty(origin->ds_dbuf, tx);
827 origin->ds_phys->ds_num_children++;
828
829 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
830 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
831 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
832 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
833 dsl_dataset_rele(ohds, FTAG);
834
835 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
836 if (origin->ds_phys->ds_next_clones_obj == 0) {
837 origin->ds_phys->ds_next_clones_obj =
838 zap_create(mos,
839 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
840 }
841 VERIFY(0 == zap_add_int(mos,
842 origin->ds_phys->ds_next_clones_obj,
843 dsobj, tx));
844 }
845
846 dmu_buf_will_dirty(dd->dd_dbuf, tx);
847 dd->dd_phys->dd_origin_obj = origin->ds_object;
848 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
849 if (origin->ds_dir->dd_phys->dd_clones == 0) {
850 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
851 origin->ds_dir->dd_phys->dd_clones =
852 zap_create(mos,
853 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
854 }
855 VERIFY3U(0, ==, zap_add_int(mos,
856 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
857 }
858 }
859
860 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
861 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
862
863 dmu_buf_rele(dbuf, FTAG);
864
865 dmu_buf_will_dirty(dd->dd_dbuf, tx);
866 dd->dd_phys->dd_head_dataset_obj = dsobj;
867
868 return (dsobj);
869}
870
871uint64_t
872dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
873 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
874{
875 dsl_pool_t *dp = pdd->dd_pool;
876 uint64_t dsobj, ddobj;
877 dsl_dir_t *dd;
878
879 ASSERT(lastname[0] != '@');
880
881 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
882 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
883
884 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
885
886 dsl_deleg_set_create_perms(dd, tx, cr);
887
888 dsl_dir_close(dd, FTAG);
889
890 /*
891 * If we are creating a clone, make sure we zero out any stale
892 * data from the origin snapshots zil header.
893 */
894 if (origin != NULL) {
895 dsl_dataset_t *ds;
896 objset_t *os;
897
898 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
899 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
900 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
901 dsl_dataset_dirty(ds, tx);
902 dsl_dataset_rele(ds, FTAG);
903 }
904
905 return (dsobj);
906}
907
908struct destroyarg {
909 dsl_sync_task_group_t *dstg;
910 char *snapname;
911 char *failed;
912 boolean_t defer;
913};
914
915static int
916dsl_snapshot_destroy_one(const char *name, void *arg)
917{
918 struct destroyarg *da = arg;
919 dsl_dataset_t *ds;
920 int err;
921 char *dsname;
922
923 dsname = kmem_asprintf("%s@%s", name, da->snapname);
924 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
925 strfree(dsname);
926 if (err == 0) {
927 struct dsl_ds_destroyarg *dsda;
928
929 dsl_dataset_make_exclusive(ds, da->dstg);
930 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
931 dsda->ds = ds;
932 dsda->defer = da->defer;
933 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
934 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
935 } else if (err == ENOENT) {
936 err = 0;
937 } else {
938 (void) strcpy(da->failed, name);
939 }
940 return (err);
941}
942
943/*
944 * Destroy 'snapname' in all descendants of 'fsname'.
945 */
946#pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
947int
948dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
949{
950 int err;
951 struct destroyarg da;
952 dsl_sync_task_t *dst;
953 spa_t *spa;
954
955 err = spa_open(fsname, &spa, FTAG);
956 if (err)
957 return (err);
958 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
959 da.snapname = snapname;
960 da.failed = fsname;
961 da.defer = defer;
962
963 err = dmu_objset_find(fsname,
964 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
965
966 if (err == 0)
967 err = dsl_sync_task_group_wait(da.dstg);
968
969 for (dst = list_head(&da.dstg->dstg_tasks); dst;
970 dst = list_next(&da.dstg->dstg_tasks, dst)) {
971 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
972 dsl_dataset_t *ds = dsda->ds;
973
974 /*
975 * Return the file system name that triggered the error
976 */
977 if (dst->dst_err) {
978 dsl_dataset_name(ds, fsname);
979 *strchr(fsname, '@') = '\0';
980 }
981 ASSERT3P(dsda->rm_origin, ==, NULL);
982 dsl_dataset_disown(ds, da.dstg);
983 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
984 }
985
986 dsl_sync_task_group_destroy(da.dstg);
987 spa_close(spa, FTAG);
988 return (err);
989}
990
991static boolean_t
992dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
993{
994 boolean_t might_destroy = B_FALSE;
995
996 mutex_enter(&ds->ds_lock);
997 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
998 DS_IS_DEFER_DESTROY(ds))
999 might_destroy = B_TRUE;
1000 mutex_exit(&ds->ds_lock);
1001
1002 return (might_destroy);
1003}
1004
1005/*
1006 * If we're removing a clone, and these three conditions are true:
1007 * 1) the clone's origin has no other children
1008 * 2) the clone's origin has no user references
1009 * 3) the clone's origin has been marked for deferred destruction
1010 * Then, prepare to remove the origin as part of this sync task group.
1011 */
1012static int
1013dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1014{
1015 dsl_dataset_t *ds = dsda->ds;
1016 dsl_dataset_t *origin = ds->ds_prev;
1017
1018 if (dsl_dataset_might_destroy_origin(origin)) {
1019 char *name;
1020 int namelen;
1021 int error;
1022
1023 namelen = dsl_dataset_namelen(origin) + 1;
1024 name = kmem_alloc(namelen, KM_SLEEP);
1025 dsl_dataset_name(origin, name);
1026#ifdef _KERNEL
1027 error = zfs_unmount_snap(name, NULL);
1028 if (error) {
1029 kmem_free(name, namelen);
1030 return (error);
1031 }
1032#endif
1033 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1034 kmem_free(name, namelen);
1035 if (error)
1036 return (error);
1037 dsda->rm_origin = origin;
1038 dsl_dataset_make_exclusive(origin, tag);
1039 }
1040
1041 return (0);
1042}
1043
1044/*
1045 * ds must be opened as OWNER. On return (whether successful or not),
1046 * ds will be closed and caller can no longer dereference it.
1047 */
1048int
1049dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1050{
1051 int err;
1052 dsl_sync_task_group_t *dstg;
1053 objset_t *os;
1054 dsl_dir_t *dd;
1055 uint64_t obj;
1056 struct dsl_ds_destroyarg dsda = { 0 };
1057 dsl_dataset_t dummy_ds = { 0 };
1058
1059 dsda.ds = ds;
1060
1061 if (dsl_dataset_is_snapshot(ds)) {
1062 /* Destroying a snapshot is simpler */
1063 dsl_dataset_make_exclusive(ds, tag);
1064
1065 dsda.defer = defer;
1066 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1067 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1068 &dsda, tag, 0);
1069 ASSERT3P(dsda.rm_origin, ==, NULL);
1070 goto out;
1071 } else if (defer) {
1072 err = EINVAL;
1073 goto out;
1074 }
1075
1076 dd = ds->ds_dir;
1077 dummy_ds.ds_dir = dd;
1078 dummy_ds.ds_object = ds->ds_object;
1079
1080 /*
1081 * Check for errors and mark this ds as inconsistent, in
1082 * case we crash while freeing the objects.
1083 */
1084 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1085 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1086 if (err)
1087 goto out;
1088
1089 err = dmu_objset_from_ds(ds, &os);
1090 if (err)
1091 goto out;
1092
1093 /*
1094 * remove the objects in open context, so that we won't
1095 * have too much to do in syncing context.
1096 */
1097 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1098 ds->ds_phys->ds_prev_snap_txg)) {
1099 /*
1100 * Ignore errors, if there is not enough disk space
1101 * we will deal with it in dsl_dataset_destroy_sync().
1102 */
1103 (void) dmu_free_object(os, obj);
1104 }
1105 if (err != ESRCH)
1106 goto out;
1107
1108 /*
1109 * Only the ZIL knows how to free log blocks.
1110 */
1111 zil_destroy(dmu_objset_zil(os), B_FALSE);
1112
1113 /*
1114 * Sync out all in-flight IO.
1115 */
1116 txg_wait_synced(dd->dd_pool, 0);
1117
1118 /*
1119 * If we managed to free all the objects in open
1120 * context, the user space accounting should be zero.
1121 */
1122 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1123 dmu_objset_userused_enabled(os)) {
1124 uint64_t count;
1125
1126 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1127 count == 0);
1128 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1129 count == 0);
1130 }
1131
1132 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1133 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1134 rw_exit(&dd->dd_pool->dp_config_rwlock);
1135
1136 if (err)
1137 goto out;
1138
1139 /*
1140 * Blow away the dsl_dir + head dataset.
1141 */
1142 dsl_dataset_make_exclusive(ds, tag);
1143 /*
1144 * If we're removing a clone, we might also need to remove its
1145 * origin.
1146 */
1147 do {
1148 dsda.need_prep = B_FALSE;
1149 if (dsl_dir_is_clone(dd)) {
1150 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1151 if (err) {
1152 dsl_dir_close(dd, FTAG);
1153 goto out;
1154 }
1155 }
1156
1157 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1158 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1159 dsl_dataset_destroy_sync, &dsda, tag, 0);
1160 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1161 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1162 err = dsl_sync_task_group_wait(dstg);
1163 dsl_sync_task_group_destroy(dstg);
1164
1165 /*
1166 * We could be racing against 'zfs release' or 'zfs destroy -d'
1167 * on the origin snap, in which case we can get EBUSY if we
1168 * needed to destroy the origin snap but were not ready to
1169 * do so.
1170 */
1171 if (dsda.need_prep) {
1172 ASSERT(err == EBUSY);
1173 ASSERT(dsl_dir_is_clone(dd));
1174 ASSERT(dsda.rm_origin == NULL);
1175 }
1176 } while (dsda.need_prep);
1177
1178 if (dsda.rm_origin != NULL)
1179 dsl_dataset_disown(dsda.rm_origin, tag);
1180
1181 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1182 if (err)
1183 dsl_dir_close(dd, FTAG);
1184out:
1185 dsl_dataset_disown(ds, tag);
1186 return (err);
1187}
1188
1189blkptr_t *
1190dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1191{
1192 return (&ds->ds_phys->ds_bp);
1193}
1194
1195void
1196dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1197{
1198 ASSERT(dmu_tx_is_syncing(tx));
1199 /* If it's the meta-objset, set dp_meta_rootbp */
1200 if (ds == NULL) {
1201 tx->tx_pool->dp_meta_rootbp = *bp;
1202 } else {
1203 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1204 ds->ds_phys->ds_bp = *bp;
1205 }
1206}
1207
1208spa_t *
1209dsl_dataset_get_spa(dsl_dataset_t *ds)
1210{
1211 return (ds->ds_dir->dd_pool->dp_spa);
1212}
1213
1214void
1215dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1216{
1217 dsl_pool_t *dp;
1218
1219 if (ds == NULL) /* this is the meta-objset */
1220 return;
1221
1222 ASSERT(ds->ds_objset != NULL);
1223
1224 if (ds->ds_phys->ds_next_snap_obj != 0)
1225 panic("dirtying snapshot!");
1226
1227 dp = ds->ds_dir->dd_pool;
1228
1229 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1230 /* up the hold count until we can be written out */
1231 dmu_buf_add_ref(ds->ds_dbuf, ds);
1232 }
1233}
1234
1235/*
1236 * The unique space in the head dataset can be calculated by subtracting
1237 * the space used in the most recent snapshot, that is still being used
1238 * in this file system, from the space currently in use. To figure out
1239 * the space in the most recent snapshot still in use, we need to take
1240 * the total space used in the snapshot and subtract out the space that
1241 * has been freed up since the snapshot was taken.
1242 */
1243static void
1244dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1245{
1246 uint64_t mrs_used;
1247 uint64_t dlused, dlcomp, dluncomp;
1248
1249 ASSERT(!dsl_dataset_is_snapshot(ds));
1250
1251 if (ds->ds_phys->ds_prev_snap_obj != 0)
1252 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1253 else
1254 mrs_used = 0;
1255
1256 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1257
1258 ASSERT3U(dlused, <=, mrs_used);
1259 ds->ds_phys->ds_unique_bytes =
1260 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1261
1262 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1263 SPA_VERSION_UNIQUE_ACCURATE)
1264 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1265}
1266
1267struct killarg {
1268 dsl_dataset_t *ds;
1269 dmu_tx_t *tx;
1270};
1271
1272/* ARGSUSED */
1273static int
1274kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1275 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1276{
1277 struct killarg *ka = arg;
1278 dmu_tx_t *tx = ka->tx;
1279
1280 if (bp == NULL)
1281 return (0);
1282
1283 if (zb->zb_level == ZB_ZIL_LEVEL) {
1284 ASSERT(zilog != NULL);
1285 /*
1286 * It's a block in the intent log. It has no
1287 * accounting, so just free it.
1288 */
1289 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1290 } else {
1291 ASSERT(zilog == NULL);
1292 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1293 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1294 }
1295
1296 return (0);
1297}
1298
1299/* ARGSUSED */
1300static int
1301dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1302{
1303 dsl_dataset_t *ds = arg1;
1304 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1305 uint64_t count;
1306 int err;
1307
1308 /*
1309 * Can't delete a head dataset if there are snapshots of it.
1310 * (Except if the only snapshots are from the branch we cloned
1311 * from.)
1312 */
1313 if (ds->ds_prev != NULL &&
1314 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1315 return (EBUSY);
1316
1317 /*
1318 * This is really a dsl_dir thing, but check it here so that
1319 * we'll be less likely to leave this dataset inconsistent &
1320 * nearly destroyed.
1321 */
1322 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1323 if (err)
1324 return (err);
1325 if (count != 0)
1326 return (EEXIST);
1327
1328 return (0);
1329}
1330
1331/* ARGSUSED */
1332static void
1333dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1334{
1335 dsl_dataset_t *ds = arg1;
1336 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1337
1338 /* Mark it as inconsistent on-disk, in case we crash */
1339 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1340 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1341
1342 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1343 "dataset = %llu", ds->ds_object);
1344}
1345
1346static int
1347dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1348 dmu_tx_t *tx)
1349{
1350 dsl_dataset_t *ds = dsda->ds;
1351 dsl_dataset_t *ds_prev = ds->ds_prev;
1352
1353 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1354 struct dsl_ds_destroyarg ndsda = {0};
1355
1356 /*
1357 * If we're not prepared to remove the origin, don't remove
1358 * the clone either.
1359 */
1360 if (dsda->rm_origin == NULL) {
1361 dsda->need_prep = B_TRUE;
1362 return (EBUSY);
1363 }
1364
1365 ndsda.ds = ds_prev;
1366 ndsda.is_origin_rm = B_TRUE;
1367 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1368 }
1369
1370 /*
1371 * If we're not going to remove the origin after all,
1372 * undo the open context setup.
1373 */
1374 if (dsda->rm_origin != NULL) {
1375 dsl_dataset_disown(dsda->rm_origin, tag);
1376 dsda->rm_origin = NULL;
1377 }
1378
1379 return (0);
1380}
1381
1382/*
1383 * If you add new checks here, you may need to add
1384 * additional checks to the "temporary" case in
1385 * snapshot_check() in dmu_objset.c.
1386 */
1387/* ARGSUSED */
1388int
1389dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1390{
1391 struct dsl_ds_destroyarg *dsda = arg1;
1392 dsl_dataset_t *ds = dsda->ds;
1393
1394 /* we have an owner hold, so noone else can destroy us */
1395 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1396
1397 /*
1398 * Only allow deferred destroy on pools that support it.
1399 * NOTE: deferred destroy is only supported on snapshots.
1400 */
1401 if (dsda->defer) {
1402 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1403 SPA_VERSION_USERREFS)
1404 return (ENOTSUP);
1405 ASSERT(dsl_dataset_is_snapshot(ds));
1406 return (0);
1407 }
1408
1409 /*
1410 * Can't delete a head dataset if there are snapshots of it.
1411 * (Except if the only snapshots are from the branch we cloned
1412 * from.)
1413 */
1414 if (ds->ds_prev != NULL &&
1415 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1416 return (EBUSY);
1417
1418 /*
1419 * If we made changes this txg, traverse_dsl_dataset won't find
1420 * them. Try again.
1421 */
1422 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1423 return (EAGAIN);
1424
1425 if (dsl_dataset_is_snapshot(ds)) {
1426 /*
1427 * If this snapshot has an elevated user reference count,
1428 * we can't destroy it yet.
1429 */
1430 if (ds->ds_userrefs > 0 && !dsda->releasing)
1431 return (EBUSY);
1432
1433 mutex_enter(&ds->ds_lock);
1434 /*
1435 * Can't delete a branch point. However, if we're destroying
1436 * a clone and removing its origin due to it having a user
1437 * hold count of 0 and having been marked for deferred destroy,
1438 * it's OK for the origin to have a single clone.
1439 */
1440 if (ds->ds_phys->ds_num_children >
1441 (dsda->is_origin_rm ? 2 : 1)) {
1442 mutex_exit(&ds->ds_lock);
1443 return (EEXIST);
1444 }
1445 mutex_exit(&ds->ds_lock);
1446 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1447 return (dsl_dataset_origin_check(dsda, arg2, tx));
1448 }
1449
1450 /* XXX we should do some i/o error checking... */
1451 return (0);
1452}
1453
1454struct refsarg {
1455 kmutex_t lock;
1456 boolean_t gone;
1457 kcondvar_t cv;
1458};
1459
1460/* ARGSUSED */
1461static void
1462dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1463{
1464 struct refsarg *arg = argv;
1465
1466 mutex_enter(&arg->lock);
1467 arg->gone = TRUE;
1468 cv_signal(&arg->cv);
1469 mutex_exit(&arg->lock);
1470}
1471
1472static void
1473dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1474{
1475 struct refsarg arg;
1476
1477 bzero(&arg, sizeof(arg));
1478 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1479 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1480 arg.gone = FALSE;
1481 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1482 dsl_dataset_refs_gone);
1483 dmu_buf_rele(ds->ds_dbuf, tag);
1484 mutex_enter(&arg.lock);
1485 while (!arg.gone)
1486 cv_wait(&arg.cv, &arg.lock);
1487 ASSERT(arg.gone);
1488 mutex_exit(&arg.lock);
1489 ds->ds_dbuf = NULL;
1490 ds->ds_phys = NULL;
1491 mutex_destroy(&arg.lock);
1492 cv_destroy(&arg.cv);
1493}
1494
1495static void
1496remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1497{
1498 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1499 uint64_t count;
1500 int err;
1501
1502 ASSERT(ds->ds_phys->ds_num_children >= 2);
1503 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1504 /*
1505 * The err should not be ENOENT, but a bug in a previous version
1506 * of the code could cause upgrade_clones_cb() to not set
1507 * ds_next_snap_obj when it should, leading to a missing entry.
1508 * If we knew that the pool was created after
1509 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1510 * ENOENT. However, at least we can check that we don't have
1511 * too many entries in the next_clones_obj even after failing to
1512 * remove this one.
1513 */
1514 if (err != ENOENT) {
1515 VERIFY3U(err, ==, 0);
1516 }
1517 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1518 &count));
1519 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1520}
1521
1522static void
1523dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1524{
1525 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1526 zap_cursor_t zc;
1527 zap_attribute_t za;
1528
1529 /*
1530 * If it is the old version, dd_clones doesn't exist so we can't
1531 * find the clones, but deadlist_remove_key() is a no-op so it
1532 * doesn't matter.
1533 */
1534 if (ds->ds_dir->dd_phys->dd_clones == 0)
1535 return;
1536
1537 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1538 zap_cursor_retrieve(&zc, &za) == 0;
1539 zap_cursor_advance(&zc)) {
1540 dsl_dataset_t *clone;
1541
1542 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1543 za.za_first_integer, FTAG, &clone));
1544 if (clone->ds_dir->dd_origin_txg > mintxg) {
1545 dsl_deadlist_remove_key(&clone->ds_deadlist,
1546 mintxg, tx);
1547 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1548 }
1549 dsl_dataset_rele(clone, FTAG);
1550 }
1551 zap_cursor_fini(&zc);
1552}
1553
1554struct process_old_arg {
1555 dsl_dataset_t *ds;
1556 dsl_dataset_t *ds_prev;
1557 boolean_t after_branch_point;
1558 zio_t *pio;
1559 uint64_t used, comp, uncomp;
1560};
1561
1562static int
1563process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1564{
1565 struct process_old_arg *poa = arg;
1566 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1567
1568 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1569 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1570 if (poa->ds_prev && !poa->after_branch_point &&
1571 bp->blk_birth >
1572 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1573 poa->ds_prev->ds_phys->ds_unique_bytes +=
1574 bp_get_dsize_sync(dp->dp_spa, bp);
1575 }
1576 } else {
1577 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1578 poa->comp += BP_GET_PSIZE(bp);
1579 poa->uncomp += BP_GET_UCSIZE(bp);
1580 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1581 }
1582 return (0);
1583}
1584
1585static void
1586process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1587 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1588{
1589 struct process_old_arg poa = { 0 };
1590 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1591 objset_t *mos = dp->dp_meta_objset;
1592
1593 ASSERT(ds->ds_deadlist.dl_oldfmt);
1594 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1595
1596 poa.ds = ds;
1597 poa.ds_prev = ds_prev;
1598 poa.after_branch_point = after_branch_point;
1599 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1600 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1601 process_old_cb, &poa, tx));
1602 VERIFY3U(zio_wait(poa.pio), ==, 0);
1603 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1604
1605 /* change snapused */
1606 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1607 -poa.used, -poa.comp, -poa.uncomp, tx);
1608
1609 /* swap next's deadlist to our deadlist */
1610 dsl_deadlist_close(&ds->ds_deadlist);
1611 dsl_deadlist_close(&ds_next->ds_deadlist);
1612 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1613 ds->ds_phys->ds_deadlist_obj);
1614 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1615 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1616 ds_next->ds_phys->ds_deadlist_obj);
1617}
1618
1619void
1620dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1621{
1622 struct dsl_ds_destroyarg *dsda = arg1;
1623 dsl_dataset_t *ds = dsda->ds;
1624 int err;
1625 int after_branch_point = FALSE;
1626 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1627 objset_t *mos = dp->dp_meta_objset;
1628 dsl_dataset_t *ds_prev = NULL;
1629 boolean_t wont_destroy;
1630 uint64_t obj;
1631
1632 wont_destroy = (dsda->defer &&
1633 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1634
1635 ASSERT(ds->ds_owner || wont_destroy);
1636 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1637 ASSERT(ds->ds_prev == NULL ||
1638 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1639 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1640
1641 if (wont_destroy) {
1642 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1643 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1644 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1645 return;
1646 }
1647
1648 /* signal any waiters that this dataset is going away */
1649 mutex_enter(&ds->ds_lock);
1650 ds->ds_owner = dsl_reaper;
1651 cv_broadcast(&ds->ds_exclusive_cv);
1652 mutex_exit(&ds->ds_lock);
1653
1654 /* Remove our reservation */
1655 if (ds->ds_reserved != 0) {
1656 dsl_prop_setarg_t psa;
1657 uint64_t value = 0;
1658
1659 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1660 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1661 &value);
1662 psa.psa_effective_value = 0; /* predict default value */
1663
1664 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1665 ASSERT3U(ds->ds_reserved, ==, 0);
1666 }
1667
1668 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1669
1670 dsl_scan_ds_destroyed(ds, tx);
1671
1672 obj = ds->ds_object;
1673
1674 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1675 if (ds->ds_prev) {
1676 ds_prev = ds->ds_prev;
1677 } else {
1678 VERIFY(0 == dsl_dataset_hold_obj(dp,
1679 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1680 }
1681 after_branch_point =
1682 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1683
1684 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1685 if (after_branch_point &&
1686 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1687 remove_from_next_clones(ds_prev, obj, tx);
1688 if (ds->ds_phys->ds_next_snap_obj != 0) {
1689 VERIFY(0 == zap_add_int(mos,
1690 ds_prev->ds_phys->ds_next_clones_obj,
1691 ds->ds_phys->ds_next_snap_obj, tx));
1692 }
1693 }
1694 if (after_branch_point &&
1695 ds->ds_phys->ds_next_snap_obj == 0) {
1696 /* This clone is toast. */
1697 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1698 ds_prev->ds_phys->ds_num_children--;
1699
1700 /*
1701 * If the clone's origin has no other clones, no
1702 * user holds, and has been marked for deferred
1703 * deletion, then we should have done the necessary
1704 * destroy setup for it.
1705 */
1706 if (ds_prev->ds_phys->ds_num_children == 1 &&
1707 ds_prev->ds_userrefs == 0 &&
1708 DS_IS_DEFER_DESTROY(ds_prev)) {
1709 ASSERT3P(dsda->rm_origin, !=, NULL);
1710 } else {
1711 ASSERT3P(dsda->rm_origin, ==, NULL);
1712 }
1713 } else if (!after_branch_point) {
1714 ds_prev->ds_phys->ds_next_snap_obj =
1715 ds->ds_phys->ds_next_snap_obj;
1716 }
1717 }
1718
1719 if (dsl_dataset_is_snapshot(ds)) {
1720 dsl_dataset_t *ds_next;
1721 uint64_t old_unique;
1722 uint64_t used = 0, comp = 0, uncomp = 0;
1723
1724 VERIFY(0 == dsl_dataset_hold_obj(dp,
1725 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1726 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1727
1728 old_unique = ds_next->ds_phys->ds_unique_bytes;
1729
1730 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1731 ds_next->ds_phys->ds_prev_snap_obj =
1732 ds->ds_phys->ds_prev_snap_obj;
1733 ds_next->ds_phys->ds_prev_snap_txg =
1734 ds->ds_phys->ds_prev_snap_txg;
1735 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1736 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1737
1738
1739 if (ds_next->ds_deadlist.dl_oldfmt) {
1740 process_old_deadlist(ds, ds_prev, ds_next,
1741 after_branch_point, tx);
1742 } else {
1743 /* Adjust prev's unique space. */
1744 if (ds_prev && !after_branch_point) {
1745 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1746 ds_prev->ds_phys->ds_prev_snap_txg,
1747 ds->ds_phys->ds_prev_snap_txg,
1748 &used, &comp, &uncomp);
1749 ds_prev->ds_phys->ds_unique_bytes += used;
1750 }
1751
1752 /* Adjust snapused. */
1753 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1754 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1755 &used, &comp, &uncomp);
1756 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1757 -used, -comp, -uncomp, tx);
1758
1759 /* Move blocks to be freed to pool's free list. */
1760 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1761 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1762 tx);
1763 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1764 DD_USED_HEAD, used, comp, uncomp, tx);
1765 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1766
1767 /* Merge our deadlist into next's and free it. */
1768 dsl_deadlist_merge(&ds_next->ds_deadlist,
1769 ds->ds_phys->ds_deadlist_obj, tx);
1770 }
1771 dsl_deadlist_close(&ds->ds_deadlist);
1772 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1773
1774 /* Collapse range in clone heads */
1775 dsl_dataset_remove_clones_key(ds,
1776 ds->ds_phys->ds_creation_txg, tx);
1777
1778 if (dsl_dataset_is_snapshot(ds_next)) {
1779 dsl_dataset_t *ds_nextnext;
1780
1781 /*
1782 * Update next's unique to include blocks which
1783 * were previously shared by only this snapshot
1784 * and it. Those blocks will be born after the
1785 * prev snap and before this snap, and will have
1786 * died after the next snap and before the one
1787 * after that (ie. be on the snap after next's
1788 * deadlist).
1789 */
1790 VERIFY(0 == dsl_dataset_hold_obj(dp,
1791 ds_next->ds_phys->ds_next_snap_obj,
1792 FTAG, &ds_nextnext));
1793 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1794 ds->ds_phys->ds_prev_snap_txg,
1795 ds->ds_phys->ds_creation_txg,
1796 &used, &comp, &uncomp);
1797 ds_next->ds_phys->ds_unique_bytes += used;
1798 dsl_dataset_rele(ds_nextnext, FTAG);
1799 ASSERT3P(ds_next->ds_prev, ==, NULL);
1800
1801 /* Collapse range in this head. */
1802 dsl_dataset_t *hds;
1803 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1804 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1805 FTAG, &hds));
1806 dsl_deadlist_remove_key(&hds->ds_deadlist,
1807 ds->ds_phys->ds_creation_txg, tx);
1808 dsl_dataset_rele(hds, FTAG);
1809
1810 } else {
1811 ASSERT3P(ds_next->ds_prev, ==, ds);
1812 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1813 ds_next->ds_prev = NULL;
1814 if (ds_prev) {
1815 VERIFY(0 == dsl_dataset_get_ref(dp,
1816 ds->ds_phys->ds_prev_snap_obj,
1817 ds_next, &ds_next->ds_prev));
1818 }
1819
1820 dsl_dataset_recalc_head_uniq(ds_next);
1821
1822 /*
1823 * Reduce the amount of our unconsmed refreservation
1824 * being charged to our parent by the amount of
1825 * new unique data we have gained.
1826 */
1827 if (old_unique < ds_next->ds_reserved) {
1828 int64_t mrsdelta;
1829 uint64_t new_unique =
1830 ds_next->ds_phys->ds_unique_bytes;
1831
1832 ASSERT(old_unique <= new_unique);
1833 mrsdelta = MIN(new_unique - old_unique,
1834 ds_next->ds_reserved - old_unique);
1835 dsl_dir_diduse_space(ds->ds_dir,
1836 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1837 }
1838 }
1839 dsl_dataset_rele(ds_next, FTAG);
1840 } else {
1841 /*
1842 * There's no next snapshot, so this is a head dataset.
1843 * Destroy the deadlist. Unless it's a clone, the
1844 * deadlist should be empty. (If it's a clone, it's
1845 * safe to ignore the deadlist contents.)
1846 */
1847 struct killarg ka;
1848
1849 dsl_deadlist_close(&ds->ds_deadlist);
1850 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1851 ds->ds_phys->ds_deadlist_obj = 0;
1852
1853 /*
1854 * Free everything that we point to (that's born after
1855 * the previous snapshot, if we are a clone)
1856 *
1857 * NB: this should be very quick, because we already
1858 * freed all the objects in open context.
1859 */
1860 ka.ds = ds;
1861 ka.tx = tx;
1862 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1863 TRAVERSE_POST, kill_blkptr, &ka);
1864 ASSERT3U(err, ==, 0);
1865 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1866 ds->ds_phys->ds_unique_bytes == 0);
1867
1868 if (ds->ds_prev != NULL) {
1869 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1870 VERIFY3U(0, ==, zap_remove_int(mos,
1871 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1872 ds->ds_object, tx));
1873 }
1874 dsl_dataset_rele(ds->ds_prev, ds);
1875 ds->ds_prev = ds_prev = NULL;
1876 }
1877 }
1878
1879 /*
1880 * This must be done after the dsl_traverse(), because it will
1881 * re-open the objset.
1882 */
1883 if (ds->ds_objset) {
1884 dmu_objset_evict(ds->ds_objset);
1885 ds->ds_objset = NULL;
1886 }
1887
1888 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1889 /* Erase the link in the dir */
1890 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1891 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1892 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1893 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1894 ASSERT(err == 0);
1895 } else {
1896 /* remove from snapshot namespace */
1897 dsl_dataset_t *ds_head;
1898 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1899 VERIFY(0 == dsl_dataset_hold_obj(dp,
1900 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1901 VERIFY(0 == dsl_dataset_get_snapname(ds));
1902#ifdef ZFS_DEBUG
1903 {
1904 uint64_t val;
1905
1906 err = dsl_dataset_snap_lookup(ds_head,
1907 ds->ds_snapname, &val);
1908 ASSERT3U(err, ==, 0);
1909 ASSERT3U(val, ==, obj);
1910 }
1911#endif
1912 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1913 ASSERT(err == 0);
1914 dsl_dataset_rele(ds_head, FTAG);
1915 }
1916
1917 if (ds_prev && ds->ds_prev != ds_prev)
1918 dsl_dataset_rele(ds_prev, FTAG);
1919
1920 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1921 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1922 "dataset = %llu", ds->ds_object);
1923
1924 if (ds->ds_phys->ds_next_clones_obj != 0) {
1925 uint64_t count;
1926 ASSERT(0 == zap_count(mos,
1927 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1928 VERIFY(0 == dmu_object_free(mos,
1929 ds->ds_phys->ds_next_clones_obj, tx));
1930 }
1931 if (ds->ds_phys->ds_props_obj != 0)
1932 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1933 if (ds->ds_phys->ds_userrefs_obj != 0)
1934 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1935 dsl_dir_close(ds->ds_dir, ds);
1936 ds->ds_dir = NULL;
1937 dsl_dataset_drain_refs(ds, tag);
1938 VERIFY(0 == dmu_object_free(mos, obj, tx));
1939
1940 if (dsda->rm_origin) {
1941 /*
1942 * Remove the origin of the clone we just destroyed.
1943 */
1944 struct dsl_ds_destroyarg ndsda = {0};
1945
1946 ndsda.ds = dsda->rm_origin;
1947 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1948 }
1949}
1950
1951static int
1952dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1953{
1954 uint64_t asize;
1955
1956 if (!dmu_tx_is_syncing(tx))
1957 return (0);
1958
1959 /*
1960 * If there's an fs-only reservation, any blocks that might become
1961 * owned by the snapshot dataset must be accommodated by space
1962 * outside of the reservation.
1963 */
1964 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1965 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1966 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1967 return (ENOSPC);
1968
1969 /*
1970 * Propogate any reserved space for this snapshot to other
1971 * snapshot checks in this sync group.
1972 */
1973 if (asize > 0)
1974 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1975
1976 return (0);
1977}
1978
1979int
1980dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1981{
1982 dsl_dataset_t *ds = arg1;
1983 const char *snapname = arg2;
1984 int err;
1985 uint64_t value;
1986
1987 /*
1988 * We don't allow multiple snapshots of the same txg. If there
1989 * is already one, try again.
1990 */
1991 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1992 return (EAGAIN);
1993
1994 /*
1995 * Check for conflicting name snapshot name.
1996 */
1997 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1998 if (err == 0)
1999 return (EEXIST);
2000 if (err != ENOENT)
2001 return (err);
2002
2003 /*
2004 * Check that the dataset's name is not too long. Name consists
2005 * of the dataset's length + 1 for the @-sign + snapshot name's length
2006 */
2007 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2008 return (ENAMETOOLONG);
2009
2010 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2011 if (err)
2012 return (err);
2013
2014 ds->ds_trysnap_txg = tx->tx_txg;
2015 return (0);
2016}
2017
2018void
2019dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2020{
2021 dsl_dataset_t *ds = arg1;
2022 const char *snapname = arg2;
2023 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2024 dmu_buf_t *dbuf;
2025 dsl_dataset_phys_t *dsphys;
2026 uint64_t dsobj, crtxg;
2027 objset_t *mos = dp->dp_meta_objset;
2028 int err;
2029
2030 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2031
2032 /*
2033 * The origin's ds_creation_txg has to be < TXG_INITIAL
2034 */
2035 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2036 crtxg = 1;
2037 else
2038 crtxg = tx->tx_txg;
2039
2040 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2041 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2042 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2043 dmu_buf_will_dirty(dbuf, tx);
2044 dsphys = dbuf->db_data;
2045 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2046 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2047 dsphys->ds_fsid_guid = unique_create();
2048 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2049 sizeof (dsphys->ds_guid));
2050 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2051 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2052 dsphys->ds_next_snap_obj = ds->ds_object;
2053 dsphys->ds_num_children = 1;
2054 dsphys->ds_creation_time = gethrestime_sec();
2055 dsphys->ds_creation_txg = crtxg;
2056 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2057 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2058 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2059 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2060 dsphys->ds_flags = ds->ds_phys->ds_flags;
2061 dsphys->ds_bp = ds->ds_phys->ds_bp;
2062 dmu_buf_rele(dbuf, FTAG);
2063
2064 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2065 if (ds->ds_prev) {
2066 uint64_t next_clones_obj =
2067 ds->ds_prev->ds_phys->ds_next_clones_obj;
2068 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2069 ds->ds_object ||
2070 ds->ds_prev->ds_phys->ds_num_children > 1);
2071 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2072 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2073 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2074 ds->ds_prev->ds_phys->ds_creation_txg);
2075 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2076 } else if (next_clones_obj != 0) {
2077 remove_from_next_clones(ds->ds_prev,
2078 dsphys->ds_next_snap_obj, tx);
2079 VERIFY3U(0, ==, zap_add_int(mos,
2080 next_clones_obj, dsobj, tx));
2081 }
2082 }
2083
2084 /*
2085 * If we have a reference-reservation on this dataset, we will
2086 * need to increase the amount of refreservation being charged
2087 * since our unique space is going to zero.
2088 */
2089 if (ds->ds_reserved) {
2090 int64_t delta;
2091 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2092 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2093 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2094 delta, 0, 0, tx);
2095 }
2096
2097 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2098 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2099 ds->ds_dir->dd_myname, snapname, dsobj,
2100 ds->ds_phys->ds_prev_snap_txg);
2101 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2102 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2103 dsl_deadlist_close(&ds->ds_deadlist);
2104 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2105 dsl_deadlist_add_key(&ds->ds_deadlist,
2106 ds->ds_phys->ds_prev_snap_txg, tx);
2107
2108 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2109 ds->ds_phys->ds_prev_snap_obj = dsobj;
2110 ds->ds_phys->ds_prev_snap_txg = crtxg;
2111 ds->ds_phys->ds_unique_bytes = 0;
2112 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2113 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2114
2115 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2116 snapname, 8, 1, &dsobj, tx);
2117 ASSERT(err == 0);
2118
2119 if (ds->ds_prev)
2120 dsl_dataset_drop_ref(ds->ds_prev, ds);
2121 VERIFY(0 == dsl_dataset_get_ref(dp,
2122 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2123
2124 dsl_scan_ds_snapshotted(ds, tx);
2125
2126 dsl_dir_snap_cmtime_update(ds->ds_dir);
2127
2128 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2129 "dataset = %llu", dsobj);
2130}
2131
2132void
2133dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2134{
2135 ASSERT(dmu_tx_is_syncing(tx));
2136 ASSERT(ds->ds_objset != NULL);
2137 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2138
2139 /*
2140 * in case we had to change ds_fsid_guid when we opened it,
2141 * sync it out now.
2142 */
2143 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2144 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2145
2146 dsl_dir_dirty(ds->ds_dir, tx);
2147 dmu_objset_sync(ds->ds_objset, zio, tx);
2148}
2149
2150void
2151dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2152{
24 */
25
26#include <sys/dmu_objset.h>
27#include <sys/dsl_dataset.h>
28#include <sys/dsl_dir.h>
29#include <sys/dsl_prop.h>
30#include <sys/dsl_synctask.h>
31#include <sys/dmu_traverse.h>
32#include <sys/dmu_tx.h>
33#include <sys/arc.h>
34#include <sys/zio.h>
35#include <sys/zap.h>
36#include <sys/unique.h>
37#include <sys/zfs_context.h>
38#include <sys/zfs_ioctl.h>
39#include <sys/spa.h>
40#include <sys/zfs_znode.h>
41#include <sys/zfs_onexit.h>
42#include <sys/zvol.h>
43#include <sys/dsl_scan.h>
44#include <sys/dsl_deadlist.h>
45
46static char *dsl_reaper = "the grim reaper";
47
48static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
49static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
50static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
51
52#define SWITCH64(x, y) \
53 { \
54 uint64_t __tmp = (x); \
55 (x) = (y); \
56 (y) = __tmp; \
57 }
58
59#define DS_REF_MAX (1ULL << 62)
60
61#define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
62
63#define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
64
65
66/*
67 * Figure out how much of this delta should be propogated to the dsl_dir
68 * layer. If there's a refreservation, that space has already been
69 * partially accounted for in our ancestors.
70 */
71static int64_t
72parent_delta(dsl_dataset_t *ds, int64_t delta)
73{
74 uint64_t old_bytes, new_bytes;
75
76 if (ds->ds_reserved == 0)
77 return (delta);
78
79 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
80 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
81
82 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
83 return (new_bytes - old_bytes);
84}
85
86void
87dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
88{
89 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
90 int compressed = BP_GET_PSIZE(bp);
91 int uncompressed = BP_GET_UCSIZE(bp);
92 int64_t delta;
93
94 dprintf_bp(bp, "ds=%p", ds);
95
96 ASSERT(dmu_tx_is_syncing(tx));
97 /* It could have been compressed away to nothing */
98 if (BP_IS_HOLE(bp))
99 return;
100 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
101 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
102 if (ds == NULL) {
103 /*
104 * Account for the meta-objset space in its placeholder
105 * dsl_dir.
106 */
107 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
108 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
109 used, compressed, uncompressed, tx);
110 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
111 return;
112 }
113 dmu_buf_will_dirty(ds->ds_dbuf, tx);
114
115 mutex_enter(&ds->ds_dir->dd_lock);
116 mutex_enter(&ds->ds_lock);
117 delta = parent_delta(ds, used);
118 ds->ds_phys->ds_used_bytes += used;
119 ds->ds_phys->ds_compressed_bytes += compressed;
120 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
121 ds->ds_phys->ds_unique_bytes += used;
122 mutex_exit(&ds->ds_lock);
123 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
124 compressed, uncompressed, tx);
125 dsl_dir_transfer_space(ds->ds_dir, used - delta,
126 DD_USED_REFRSRV, DD_USED_HEAD, tx);
127 mutex_exit(&ds->ds_dir->dd_lock);
128}
129
130int
131dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
132 boolean_t async)
133{
134 if (BP_IS_HOLE(bp))
135 return (0);
136
137 ASSERT(dmu_tx_is_syncing(tx));
138 ASSERT(bp->blk_birth <= tx->tx_txg);
139
140 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
141 int compressed = BP_GET_PSIZE(bp);
142 int uncompressed = BP_GET_UCSIZE(bp);
143
144 ASSERT(used > 0);
145 if (ds == NULL) {
146 /*
147 * Account for the meta-objset space in its placeholder
148 * dataset.
149 */
150 dsl_free(tx->tx_pool, tx->tx_txg, bp);
151
152 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
153 -used, -compressed, -uncompressed, tx);
154 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
155 return (used);
156 }
157 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
158
159 ASSERT(!dsl_dataset_is_snapshot(ds));
160 dmu_buf_will_dirty(ds->ds_dbuf, tx);
161
162 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
163 int64_t delta;
164
165 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
166 dsl_free(tx->tx_pool, tx->tx_txg, bp);
167
168 mutex_enter(&ds->ds_dir->dd_lock);
169 mutex_enter(&ds->ds_lock);
170 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
171 !DS_UNIQUE_IS_ACCURATE(ds));
172 delta = parent_delta(ds, -used);
173 ds->ds_phys->ds_unique_bytes -= used;
174 mutex_exit(&ds->ds_lock);
175 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
176 delta, -compressed, -uncompressed, tx);
177 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
178 DD_USED_REFRSRV, DD_USED_HEAD, tx);
179 mutex_exit(&ds->ds_dir->dd_lock);
180 } else {
181 dprintf_bp(bp, "putting on dead list: %s", "");
182 if (async) {
183 /*
184 * We are here as part of zio's write done callback,
185 * which means we're a zio interrupt thread. We can't
186 * call dsl_deadlist_insert() now because it may block
187 * waiting for I/O. Instead, put bp on the deferred
188 * queue and let dsl_pool_sync() finish the job.
189 */
190 bplist_append(&ds->ds_pending_deadlist, bp);
191 } else {
192 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
193 }
194 ASSERT3U(ds->ds_prev->ds_object, ==,
195 ds->ds_phys->ds_prev_snap_obj);
196 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
197 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
198 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
199 ds->ds_object && bp->blk_birth >
200 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
201 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
202 mutex_enter(&ds->ds_prev->ds_lock);
203 ds->ds_prev->ds_phys->ds_unique_bytes += used;
204 mutex_exit(&ds->ds_prev->ds_lock);
205 }
206 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
207 dsl_dir_transfer_space(ds->ds_dir, used,
208 DD_USED_HEAD, DD_USED_SNAP, tx);
209 }
210 }
211 mutex_enter(&ds->ds_lock);
212 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
213 ds->ds_phys->ds_used_bytes -= used;
214 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
215 ds->ds_phys->ds_compressed_bytes -= compressed;
216 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
217 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
218 mutex_exit(&ds->ds_lock);
219
220 return (used);
221}
222
223uint64_t
224dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
225{
226 uint64_t trysnap = 0;
227
228 if (ds == NULL)
229 return (0);
230 /*
231 * The snapshot creation could fail, but that would cause an
232 * incorrect FALSE return, which would only result in an
233 * overestimation of the amount of space that an operation would
234 * consume, which is OK.
235 *
236 * There's also a small window where we could miss a pending
237 * snapshot, because we could set the sync task in the quiescing
238 * phase. So this should only be used as a guess.
239 */
240 if (ds->ds_trysnap_txg >
241 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
242 trysnap = ds->ds_trysnap_txg;
243 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
244}
245
246boolean_t
247dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
248 uint64_t blk_birth)
249{
250 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
251 return (B_FALSE);
252
253 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
254
255 return (B_TRUE);
256}
257
258/* ARGSUSED */
259static void
260dsl_dataset_evict(dmu_buf_t *db, void *dsv)
261{
262 dsl_dataset_t *ds = dsv;
263
264 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
265
266 unique_remove(ds->ds_fsid_guid);
267
268 if (ds->ds_objset != NULL)
269 dmu_objset_evict(ds->ds_objset);
270
271 if (ds->ds_prev) {
272 dsl_dataset_drop_ref(ds->ds_prev, ds);
273 ds->ds_prev = NULL;
274 }
275
276 bplist_destroy(&ds->ds_pending_deadlist);
277 if (db != NULL) {
278 dsl_deadlist_close(&ds->ds_deadlist);
279 } else {
280 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
281 ASSERT(!ds->ds_deadlist.dl_oldfmt);
282 }
283 if (ds->ds_dir)
284 dsl_dir_close(ds->ds_dir, ds);
285
286 ASSERT(!list_link_active(&ds->ds_synced_link));
287
288 if (mutex_owned(&ds->ds_lock))
289 mutex_exit(&ds->ds_lock);
290 mutex_destroy(&ds->ds_lock);
291 mutex_destroy(&ds->ds_recvlock);
292 if (mutex_owned(&ds->ds_opening_lock))
293 mutex_exit(&ds->ds_opening_lock);
294 mutex_destroy(&ds->ds_opening_lock);
295 rw_destroy(&ds->ds_rwlock);
296 cv_destroy(&ds->ds_exclusive_cv);
297
298 kmem_free(ds, sizeof (dsl_dataset_t));
299}
300
301static int
302dsl_dataset_get_snapname(dsl_dataset_t *ds)
303{
304 dsl_dataset_phys_t *headphys;
305 int err;
306 dmu_buf_t *headdbuf;
307 dsl_pool_t *dp = ds->ds_dir->dd_pool;
308 objset_t *mos = dp->dp_meta_objset;
309
310 if (ds->ds_snapname[0])
311 return (0);
312 if (ds->ds_phys->ds_next_snap_obj == 0)
313 return (0);
314
315 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
316 FTAG, &headdbuf);
317 if (err)
318 return (err);
319 headphys = headdbuf->db_data;
320 err = zap_value_search(dp->dp_meta_objset,
321 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
322 dmu_buf_rele(headdbuf, FTAG);
323 return (err);
324}
325
326static int
327dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
328{
329 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
330 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
331 matchtype_t mt;
332 int err;
333
334 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
335 mt = MT_FIRST;
336 else
337 mt = MT_EXACT;
338
339 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
340 value, mt, NULL, 0, NULL);
341 if (err == ENOTSUP && mt == MT_FIRST)
342 err = zap_lookup(mos, snapobj, name, 8, 1, value);
343 return (err);
344}
345
346static int
347dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
348{
349 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
350 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
351 matchtype_t mt;
352 int err;
353
354 dsl_dir_snap_cmtime_update(ds->ds_dir);
355
356 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
357 mt = MT_FIRST;
358 else
359 mt = MT_EXACT;
360
361 err = zap_remove_norm(mos, snapobj, name, mt, tx);
362 if (err == ENOTSUP && mt == MT_FIRST)
363 err = zap_remove(mos, snapobj, name, tx);
364 return (err);
365}
366
367static int
368dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
369 dsl_dataset_t **dsp)
370{
371 objset_t *mos = dp->dp_meta_objset;
372 dmu_buf_t *dbuf;
373 dsl_dataset_t *ds;
374 int err;
375 dmu_object_info_t doi;
376
377 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
378 dsl_pool_sync_context(dp));
379
380 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
381 if (err)
382 return (err);
383
384 /* Make sure dsobj has the correct object type. */
385 dmu_object_info_from_db(dbuf, &doi);
386 if (doi.doi_type != DMU_OT_DSL_DATASET)
387 return (EINVAL);
388
389 ds = dmu_buf_get_user(dbuf);
390 if (ds == NULL) {
391 dsl_dataset_t *winner;
392
393 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
394 ds->ds_dbuf = dbuf;
395 ds->ds_object = dsobj;
396 ds->ds_phys = dbuf->db_data;
397
398 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
399 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
400 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
401 rw_init(&ds->ds_rwlock, 0, 0, 0);
402 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
403
404 bplist_create(&ds->ds_pending_deadlist);
405 dsl_deadlist_open(&ds->ds_deadlist,
406 mos, ds->ds_phys->ds_deadlist_obj);
407
408 if (err == 0) {
409 err = dsl_dir_open_obj(dp,
410 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
411 }
412 if (err) {
413 mutex_destroy(&ds->ds_lock);
414 mutex_destroy(&ds->ds_recvlock);
415 mutex_destroy(&ds->ds_opening_lock);
416 rw_destroy(&ds->ds_rwlock);
417 cv_destroy(&ds->ds_exclusive_cv);
418 bplist_destroy(&ds->ds_pending_deadlist);
419 dsl_deadlist_close(&ds->ds_deadlist);
420 kmem_free(ds, sizeof (dsl_dataset_t));
421 dmu_buf_rele(dbuf, tag);
422 return (err);
423 }
424
425 if (!dsl_dataset_is_snapshot(ds)) {
426 ds->ds_snapname[0] = '\0';
427 if (ds->ds_phys->ds_prev_snap_obj) {
428 err = dsl_dataset_get_ref(dp,
429 ds->ds_phys->ds_prev_snap_obj,
430 ds, &ds->ds_prev);
431 }
432 } else {
433 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
434 err = dsl_dataset_get_snapname(ds);
435 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
436 err = zap_count(
437 ds->ds_dir->dd_pool->dp_meta_objset,
438 ds->ds_phys->ds_userrefs_obj,
439 &ds->ds_userrefs);
440 }
441 }
442
443 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
444 /*
445 * In sync context, we're called with either no lock
446 * or with the write lock. If we're not syncing,
447 * we're always called with the read lock held.
448 */
449 boolean_t need_lock =
450 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
451 dsl_pool_sync_context(dp);
452
453 if (need_lock)
454 rw_enter(&dp->dp_config_rwlock, RW_READER);
455
456 err = dsl_prop_get_ds(ds,
457 "refreservation", sizeof (uint64_t), 1,
458 &ds->ds_reserved, NULL);
459 if (err == 0) {
460 err = dsl_prop_get_ds(ds,
461 "refquota", sizeof (uint64_t), 1,
462 &ds->ds_quota, NULL);
463 }
464
465 if (need_lock)
466 rw_exit(&dp->dp_config_rwlock);
467 } else {
468 ds->ds_reserved = ds->ds_quota = 0;
469 }
470
471 if (err == 0) {
472 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
473 dsl_dataset_evict);
474 }
475 if (err || winner) {
476 bplist_destroy(&ds->ds_pending_deadlist);
477 dsl_deadlist_close(&ds->ds_deadlist);
478 if (ds->ds_prev)
479 dsl_dataset_drop_ref(ds->ds_prev, ds);
480 dsl_dir_close(ds->ds_dir, ds);
481 mutex_destroy(&ds->ds_lock);
482 mutex_destroy(&ds->ds_recvlock);
483 mutex_destroy(&ds->ds_opening_lock);
484 rw_destroy(&ds->ds_rwlock);
485 cv_destroy(&ds->ds_exclusive_cv);
486 kmem_free(ds, sizeof (dsl_dataset_t));
487 if (err) {
488 dmu_buf_rele(dbuf, tag);
489 return (err);
490 }
491 ds = winner;
492 } else {
493 ds->ds_fsid_guid =
494 unique_insert(ds->ds_phys->ds_fsid_guid);
495 }
496 }
497 ASSERT3P(ds->ds_dbuf, ==, dbuf);
498 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
499 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
500 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
501 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
502 mutex_enter(&ds->ds_lock);
503 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
504 mutex_exit(&ds->ds_lock);
505 dmu_buf_rele(ds->ds_dbuf, tag);
506 return (ENOENT);
507 }
508 mutex_exit(&ds->ds_lock);
509 *dsp = ds;
510 return (0);
511}
512
513static int
514dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
515{
516 dsl_pool_t *dp = ds->ds_dir->dd_pool;
517
518 /*
519 * In syncing context we don't want the rwlock lock: there
520 * may be an existing writer waiting for sync phase to
521 * finish. We don't need to worry about such writers, since
522 * sync phase is single-threaded, so the writer can't be
523 * doing anything while we are active.
524 */
525 if (dsl_pool_sync_context(dp)) {
526 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
527 return (0);
528 }
529
530 /*
531 * Normal users will hold the ds_rwlock as a READER until they
532 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
533 * drop their READER lock after they set the ds_owner field.
534 *
535 * If the dataset is being destroyed, the destroy thread will
536 * obtain a WRITER lock for exclusive access after it's done its
537 * open-context work and then change the ds_owner to
538 * dsl_reaper once destruction is assured. So threads
539 * may block here temporarily, until the "destructability" of
540 * the dataset is determined.
541 */
542 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
543 mutex_enter(&ds->ds_lock);
544 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
545 rw_exit(&dp->dp_config_rwlock);
546 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
547 if (DSL_DATASET_IS_DESTROYED(ds)) {
548 mutex_exit(&ds->ds_lock);
549 dsl_dataset_drop_ref(ds, tag);
550 rw_enter(&dp->dp_config_rwlock, RW_READER);
551 return (ENOENT);
552 }
553 /*
554 * The dp_config_rwlock lives above the ds_lock. And
555 * we need to check DSL_DATASET_IS_DESTROYED() while
556 * holding the ds_lock, so we have to drop and reacquire
557 * the ds_lock here.
558 */
559 mutex_exit(&ds->ds_lock);
560 rw_enter(&dp->dp_config_rwlock, RW_READER);
561 mutex_enter(&ds->ds_lock);
562 }
563 mutex_exit(&ds->ds_lock);
564 return (0);
565}
566
567int
568dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
569 dsl_dataset_t **dsp)
570{
571 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
572
573 if (err)
574 return (err);
575 return (dsl_dataset_hold_ref(*dsp, tag));
576}
577
578int
579dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
580 void *tag, dsl_dataset_t **dsp)
581{
582 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
583 if (err)
584 return (err);
585 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
586 dsl_dataset_rele(*dsp, tag);
587 *dsp = NULL;
588 return (EBUSY);
589 }
590 return (0);
591}
592
593int
594dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
595{
596 dsl_dir_t *dd;
597 dsl_pool_t *dp;
598 const char *snapname;
599 uint64_t obj;
600 int err = 0;
601
602 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
603 if (err)
604 return (err);
605
606 dp = dd->dd_pool;
607 obj = dd->dd_phys->dd_head_dataset_obj;
608 rw_enter(&dp->dp_config_rwlock, RW_READER);
609 if (obj)
610 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
611 else
612 err = ENOENT;
613 if (err)
614 goto out;
615
616 err = dsl_dataset_hold_ref(*dsp, tag);
617
618 /* we may be looking for a snapshot */
619 if (err == 0 && snapname != NULL) {
620 dsl_dataset_t *ds = NULL;
621
622 if (*snapname++ != '@') {
623 dsl_dataset_rele(*dsp, tag);
624 err = ENOENT;
625 goto out;
626 }
627
628 dprintf("looking for snapshot '%s'\n", snapname);
629 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
630 if (err == 0)
631 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
632 dsl_dataset_rele(*dsp, tag);
633
634 ASSERT3U((err == 0), ==, (ds != NULL));
635
636 if (ds) {
637 mutex_enter(&ds->ds_lock);
638 if (ds->ds_snapname[0] == 0)
639 (void) strlcpy(ds->ds_snapname, snapname,
640 sizeof (ds->ds_snapname));
641 mutex_exit(&ds->ds_lock);
642 err = dsl_dataset_hold_ref(ds, tag);
643 *dsp = err ? NULL : ds;
644 }
645 }
646out:
647 rw_exit(&dp->dp_config_rwlock);
648 dsl_dir_close(dd, FTAG);
649 return (err);
650}
651
652int
653dsl_dataset_own(const char *name, boolean_t inconsistentok,
654 void *tag, dsl_dataset_t **dsp)
655{
656 int err = dsl_dataset_hold(name, tag, dsp);
657 if (err)
658 return (err);
659 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
660 dsl_dataset_rele(*dsp, tag);
661 return (EBUSY);
662 }
663 return (0);
664}
665
666void
667dsl_dataset_name(dsl_dataset_t *ds, char *name)
668{
669 if (ds == NULL) {
670 (void) strcpy(name, "mos");
671 } else {
672 dsl_dir_name(ds->ds_dir, name);
673 VERIFY(0 == dsl_dataset_get_snapname(ds));
674 if (ds->ds_snapname[0]) {
675 (void) strcat(name, "@");
676 /*
677 * We use a "recursive" mutex so that we
678 * can call dprintf_ds() with ds_lock held.
679 */
680 if (!MUTEX_HELD(&ds->ds_lock)) {
681 mutex_enter(&ds->ds_lock);
682 (void) strcat(name, ds->ds_snapname);
683 mutex_exit(&ds->ds_lock);
684 } else {
685 (void) strcat(name, ds->ds_snapname);
686 }
687 }
688 }
689}
690
691static int
692dsl_dataset_namelen(dsl_dataset_t *ds)
693{
694 int result;
695
696 if (ds == NULL) {
697 result = 3; /* "mos" */
698 } else {
699 result = dsl_dir_namelen(ds->ds_dir);
700 VERIFY(0 == dsl_dataset_get_snapname(ds));
701 if (ds->ds_snapname[0]) {
702 ++result; /* adding one for the @-sign */
703 if (!MUTEX_HELD(&ds->ds_lock)) {
704 mutex_enter(&ds->ds_lock);
705 result += strlen(ds->ds_snapname);
706 mutex_exit(&ds->ds_lock);
707 } else {
708 result += strlen(ds->ds_snapname);
709 }
710 }
711 }
712
713 return (result);
714}
715
716void
717dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
718{
719 dmu_buf_rele(ds->ds_dbuf, tag);
720}
721
722void
723dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
724{
725 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
726 rw_exit(&ds->ds_rwlock);
727 }
728 dsl_dataset_drop_ref(ds, tag);
729}
730
731void
732dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
733{
734 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
735 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
736
737 mutex_enter(&ds->ds_lock);
738 ds->ds_owner = NULL;
739 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
740 rw_exit(&ds->ds_rwlock);
741 cv_broadcast(&ds->ds_exclusive_cv);
742 }
743 mutex_exit(&ds->ds_lock);
744 if (ds->ds_dbuf)
745 dsl_dataset_drop_ref(ds, tag);
746 else
747 dsl_dataset_evict(NULL, ds);
748}
749
750boolean_t
751dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
752{
753 boolean_t gotit = FALSE;
754
755 mutex_enter(&ds->ds_lock);
756 if (ds->ds_owner == NULL &&
757 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
758 ds->ds_owner = tag;
759 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
760 rw_exit(&ds->ds_rwlock);
761 gotit = TRUE;
762 }
763 mutex_exit(&ds->ds_lock);
764 return (gotit);
765}
766
767void
768dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
769{
770 ASSERT3P(owner, ==, ds->ds_owner);
771 if (!RW_WRITE_HELD(&ds->ds_rwlock))
772 rw_enter(&ds->ds_rwlock, RW_WRITER);
773}
774
775uint64_t
776dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
777 uint64_t flags, dmu_tx_t *tx)
778{
779 dsl_pool_t *dp = dd->dd_pool;
780 dmu_buf_t *dbuf;
781 dsl_dataset_phys_t *dsphys;
782 uint64_t dsobj;
783 objset_t *mos = dp->dp_meta_objset;
784
785 if (origin == NULL)
786 origin = dp->dp_origin_snap;
787
788 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
789 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
790 ASSERT(dmu_tx_is_syncing(tx));
791 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
792
793 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
794 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
795 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
796 dmu_buf_will_dirty(dbuf, tx);
797 dsphys = dbuf->db_data;
798 bzero(dsphys, sizeof (dsl_dataset_phys_t));
799 dsphys->ds_dir_obj = dd->dd_object;
800 dsphys->ds_flags = flags;
801 dsphys->ds_fsid_guid = unique_create();
802 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
803 sizeof (dsphys->ds_guid));
804 dsphys->ds_snapnames_zapobj =
805 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
806 DMU_OT_NONE, 0, tx);
807 dsphys->ds_creation_time = gethrestime_sec();
808 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
809
810 if (origin == NULL) {
811 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
812 } else {
813 dsl_dataset_t *ohds;
814
815 dsphys->ds_prev_snap_obj = origin->ds_object;
816 dsphys->ds_prev_snap_txg =
817 origin->ds_phys->ds_creation_txg;
818 dsphys->ds_used_bytes =
819 origin->ds_phys->ds_used_bytes;
820 dsphys->ds_compressed_bytes =
821 origin->ds_phys->ds_compressed_bytes;
822 dsphys->ds_uncompressed_bytes =
823 origin->ds_phys->ds_uncompressed_bytes;
824 dsphys->ds_bp = origin->ds_phys->ds_bp;
825 dsphys->ds_flags |= origin->ds_phys->ds_flags;
826
827 dmu_buf_will_dirty(origin->ds_dbuf, tx);
828 origin->ds_phys->ds_num_children++;
829
830 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
831 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
832 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
833 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
834 dsl_dataset_rele(ohds, FTAG);
835
836 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
837 if (origin->ds_phys->ds_next_clones_obj == 0) {
838 origin->ds_phys->ds_next_clones_obj =
839 zap_create(mos,
840 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
841 }
842 VERIFY(0 == zap_add_int(mos,
843 origin->ds_phys->ds_next_clones_obj,
844 dsobj, tx));
845 }
846
847 dmu_buf_will_dirty(dd->dd_dbuf, tx);
848 dd->dd_phys->dd_origin_obj = origin->ds_object;
849 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
850 if (origin->ds_dir->dd_phys->dd_clones == 0) {
851 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
852 origin->ds_dir->dd_phys->dd_clones =
853 zap_create(mos,
854 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
855 }
856 VERIFY3U(0, ==, zap_add_int(mos,
857 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
858 }
859 }
860
861 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
862 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
863
864 dmu_buf_rele(dbuf, FTAG);
865
866 dmu_buf_will_dirty(dd->dd_dbuf, tx);
867 dd->dd_phys->dd_head_dataset_obj = dsobj;
868
869 return (dsobj);
870}
871
872uint64_t
873dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
874 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
875{
876 dsl_pool_t *dp = pdd->dd_pool;
877 uint64_t dsobj, ddobj;
878 dsl_dir_t *dd;
879
880 ASSERT(lastname[0] != '@');
881
882 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
883 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
884
885 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
886
887 dsl_deleg_set_create_perms(dd, tx, cr);
888
889 dsl_dir_close(dd, FTAG);
890
891 /*
892 * If we are creating a clone, make sure we zero out any stale
893 * data from the origin snapshots zil header.
894 */
895 if (origin != NULL) {
896 dsl_dataset_t *ds;
897 objset_t *os;
898
899 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
900 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
901 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
902 dsl_dataset_dirty(ds, tx);
903 dsl_dataset_rele(ds, FTAG);
904 }
905
906 return (dsobj);
907}
908
909struct destroyarg {
910 dsl_sync_task_group_t *dstg;
911 char *snapname;
912 char *failed;
913 boolean_t defer;
914};
915
916static int
917dsl_snapshot_destroy_one(const char *name, void *arg)
918{
919 struct destroyarg *da = arg;
920 dsl_dataset_t *ds;
921 int err;
922 char *dsname;
923
924 dsname = kmem_asprintf("%s@%s", name, da->snapname);
925 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
926 strfree(dsname);
927 if (err == 0) {
928 struct dsl_ds_destroyarg *dsda;
929
930 dsl_dataset_make_exclusive(ds, da->dstg);
931 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
932 dsda->ds = ds;
933 dsda->defer = da->defer;
934 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
935 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
936 } else if (err == ENOENT) {
937 err = 0;
938 } else {
939 (void) strcpy(da->failed, name);
940 }
941 return (err);
942}
943
944/*
945 * Destroy 'snapname' in all descendants of 'fsname'.
946 */
947#pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
948int
949dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
950{
951 int err;
952 struct destroyarg da;
953 dsl_sync_task_t *dst;
954 spa_t *spa;
955
956 err = spa_open(fsname, &spa, FTAG);
957 if (err)
958 return (err);
959 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
960 da.snapname = snapname;
961 da.failed = fsname;
962 da.defer = defer;
963
964 err = dmu_objset_find(fsname,
965 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
966
967 if (err == 0)
968 err = dsl_sync_task_group_wait(da.dstg);
969
970 for (dst = list_head(&da.dstg->dstg_tasks); dst;
971 dst = list_next(&da.dstg->dstg_tasks, dst)) {
972 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
973 dsl_dataset_t *ds = dsda->ds;
974
975 /*
976 * Return the file system name that triggered the error
977 */
978 if (dst->dst_err) {
979 dsl_dataset_name(ds, fsname);
980 *strchr(fsname, '@') = '\0';
981 }
982 ASSERT3P(dsda->rm_origin, ==, NULL);
983 dsl_dataset_disown(ds, da.dstg);
984 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
985 }
986
987 dsl_sync_task_group_destroy(da.dstg);
988 spa_close(spa, FTAG);
989 return (err);
990}
991
992static boolean_t
993dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
994{
995 boolean_t might_destroy = B_FALSE;
996
997 mutex_enter(&ds->ds_lock);
998 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
999 DS_IS_DEFER_DESTROY(ds))
1000 might_destroy = B_TRUE;
1001 mutex_exit(&ds->ds_lock);
1002
1003 return (might_destroy);
1004}
1005
1006/*
1007 * If we're removing a clone, and these three conditions are true:
1008 * 1) the clone's origin has no other children
1009 * 2) the clone's origin has no user references
1010 * 3) the clone's origin has been marked for deferred destruction
1011 * Then, prepare to remove the origin as part of this sync task group.
1012 */
1013static int
1014dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1015{
1016 dsl_dataset_t *ds = dsda->ds;
1017 dsl_dataset_t *origin = ds->ds_prev;
1018
1019 if (dsl_dataset_might_destroy_origin(origin)) {
1020 char *name;
1021 int namelen;
1022 int error;
1023
1024 namelen = dsl_dataset_namelen(origin) + 1;
1025 name = kmem_alloc(namelen, KM_SLEEP);
1026 dsl_dataset_name(origin, name);
1027#ifdef _KERNEL
1028 error = zfs_unmount_snap(name, NULL);
1029 if (error) {
1030 kmem_free(name, namelen);
1031 return (error);
1032 }
1033#endif
1034 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1035 kmem_free(name, namelen);
1036 if (error)
1037 return (error);
1038 dsda->rm_origin = origin;
1039 dsl_dataset_make_exclusive(origin, tag);
1040 }
1041
1042 return (0);
1043}
1044
1045/*
1046 * ds must be opened as OWNER. On return (whether successful or not),
1047 * ds will be closed and caller can no longer dereference it.
1048 */
1049int
1050dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1051{
1052 int err;
1053 dsl_sync_task_group_t *dstg;
1054 objset_t *os;
1055 dsl_dir_t *dd;
1056 uint64_t obj;
1057 struct dsl_ds_destroyarg dsda = { 0 };
1058 dsl_dataset_t dummy_ds = { 0 };
1059
1060 dsda.ds = ds;
1061
1062 if (dsl_dataset_is_snapshot(ds)) {
1063 /* Destroying a snapshot is simpler */
1064 dsl_dataset_make_exclusive(ds, tag);
1065
1066 dsda.defer = defer;
1067 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1068 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1069 &dsda, tag, 0);
1070 ASSERT3P(dsda.rm_origin, ==, NULL);
1071 goto out;
1072 } else if (defer) {
1073 err = EINVAL;
1074 goto out;
1075 }
1076
1077 dd = ds->ds_dir;
1078 dummy_ds.ds_dir = dd;
1079 dummy_ds.ds_object = ds->ds_object;
1080
1081 /*
1082 * Check for errors and mark this ds as inconsistent, in
1083 * case we crash while freeing the objects.
1084 */
1085 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1086 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1087 if (err)
1088 goto out;
1089
1090 err = dmu_objset_from_ds(ds, &os);
1091 if (err)
1092 goto out;
1093
1094 /*
1095 * remove the objects in open context, so that we won't
1096 * have too much to do in syncing context.
1097 */
1098 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1099 ds->ds_phys->ds_prev_snap_txg)) {
1100 /*
1101 * Ignore errors, if there is not enough disk space
1102 * we will deal with it in dsl_dataset_destroy_sync().
1103 */
1104 (void) dmu_free_object(os, obj);
1105 }
1106 if (err != ESRCH)
1107 goto out;
1108
1109 /*
1110 * Only the ZIL knows how to free log blocks.
1111 */
1112 zil_destroy(dmu_objset_zil(os), B_FALSE);
1113
1114 /*
1115 * Sync out all in-flight IO.
1116 */
1117 txg_wait_synced(dd->dd_pool, 0);
1118
1119 /*
1120 * If we managed to free all the objects in open
1121 * context, the user space accounting should be zero.
1122 */
1123 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1124 dmu_objset_userused_enabled(os)) {
1125 uint64_t count;
1126
1127 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1128 count == 0);
1129 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1130 count == 0);
1131 }
1132
1133 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1134 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1135 rw_exit(&dd->dd_pool->dp_config_rwlock);
1136
1137 if (err)
1138 goto out;
1139
1140 /*
1141 * Blow away the dsl_dir + head dataset.
1142 */
1143 dsl_dataset_make_exclusive(ds, tag);
1144 /*
1145 * If we're removing a clone, we might also need to remove its
1146 * origin.
1147 */
1148 do {
1149 dsda.need_prep = B_FALSE;
1150 if (dsl_dir_is_clone(dd)) {
1151 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1152 if (err) {
1153 dsl_dir_close(dd, FTAG);
1154 goto out;
1155 }
1156 }
1157
1158 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1159 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1160 dsl_dataset_destroy_sync, &dsda, tag, 0);
1161 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1162 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1163 err = dsl_sync_task_group_wait(dstg);
1164 dsl_sync_task_group_destroy(dstg);
1165
1166 /*
1167 * We could be racing against 'zfs release' or 'zfs destroy -d'
1168 * on the origin snap, in which case we can get EBUSY if we
1169 * needed to destroy the origin snap but were not ready to
1170 * do so.
1171 */
1172 if (dsda.need_prep) {
1173 ASSERT(err == EBUSY);
1174 ASSERT(dsl_dir_is_clone(dd));
1175 ASSERT(dsda.rm_origin == NULL);
1176 }
1177 } while (dsda.need_prep);
1178
1179 if (dsda.rm_origin != NULL)
1180 dsl_dataset_disown(dsda.rm_origin, tag);
1181
1182 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1183 if (err)
1184 dsl_dir_close(dd, FTAG);
1185out:
1186 dsl_dataset_disown(ds, tag);
1187 return (err);
1188}
1189
1190blkptr_t *
1191dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1192{
1193 return (&ds->ds_phys->ds_bp);
1194}
1195
1196void
1197dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1198{
1199 ASSERT(dmu_tx_is_syncing(tx));
1200 /* If it's the meta-objset, set dp_meta_rootbp */
1201 if (ds == NULL) {
1202 tx->tx_pool->dp_meta_rootbp = *bp;
1203 } else {
1204 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1205 ds->ds_phys->ds_bp = *bp;
1206 }
1207}
1208
1209spa_t *
1210dsl_dataset_get_spa(dsl_dataset_t *ds)
1211{
1212 return (ds->ds_dir->dd_pool->dp_spa);
1213}
1214
1215void
1216dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1217{
1218 dsl_pool_t *dp;
1219
1220 if (ds == NULL) /* this is the meta-objset */
1221 return;
1222
1223 ASSERT(ds->ds_objset != NULL);
1224
1225 if (ds->ds_phys->ds_next_snap_obj != 0)
1226 panic("dirtying snapshot!");
1227
1228 dp = ds->ds_dir->dd_pool;
1229
1230 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1231 /* up the hold count until we can be written out */
1232 dmu_buf_add_ref(ds->ds_dbuf, ds);
1233 }
1234}
1235
1236/*
1237 * The unique space in the head dataset can be calculated by subtracting
1238 * the space used in the most recent snapshot, that is still being used
1239 * in this file system, from the space currently in use. To figure out
1240 * the space in the most recent snapshot still in use, we need to take
1241 * the total space used in the snapshot and subtract out the space that
1242 * has been freed up since the snapshot was taken.
1243 */
1244static void
1245dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1246{
1247 uint64_t mrs_used;
1248 uint64_t dlused, dlcomp, dluncomp;
1249
1250 ASSERT(!dsl_dataset_is_snapshot(ds));
1251
1252 if (ds->ds_phys->ds_prev_snap_obj != 0)
1253 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1254 else
1255 mrs_used = 0;
1256
1257 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1258
1259 ASSERT3U(dlused, <=, mrs_used);
1260 ds->ds_phys->ds_unique_bytes =
1261 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1262
1263 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1264 SPA_VERSION_UNIQUE_ACCURATE)
1265 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1266}
1267
1268struct killarg {
1269 dsl_dataset_t *ds;
1270 dmu_tx_t *tx;
1271};
1272
1273/* ARGSUSED */
1274static int
1275kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1276 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1277{
1278 struct killarg *ka = arg;
1279 dmu_tx_t *tx = ka->tx;
1280
1281 if (bp == NULL)
1282 return (0);
1283
1284 if (zb->zb_level == ZB_ZIL_LEVEL) {
1285 ASSERT(zilog != NULL);
1286 /*
1287 * It's a block in the intent log. It has no
1288 * accounting, so just free it.
1289 */
1290 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1291 } else {
1292 ASSERT(zilog == NULL);
1293 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1294 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1295 }
1296
1297 return (0);
1298}
1299
1300/* ARGSUSED */
1301static int
1302dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1303{
1304 dsl_dataset_t *ds = arg1;
1305 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1306 uint64_t count;
1307 int err;
1308
1309 /*
1310 * Can't delete a head dataset if there are snapshots of it.
1311 * (Except if the only snapshots are from the branch we cloned
1312 * from.)
1313 */
1314 if (ds->ds_prev != NULL &&
1315 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1316 return (EBUSY);
1317
1318 /*
1319 * This is really a dsl_dir thing, but check it here so that
1320 * we'll be less likely to leave this dataset inconsistent &
1321 * nearly destroyed.
1322 */
1323 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1324 if (err)
1325 return (err);
1326 if (count != 0)
1327 return (EEXIST);
1328
1329 return (0);
1330}
1331
1332/* ARGSUSED */
1333static void
1334dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1335{
1336 dsl_dataset_t *ds = arg1;
1337 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1338
1339 /* Mark it as inconsistent on-disk, in case we crash */
1340 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1341 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1342
1343 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1344 "dataset = %llu", ds->ds_object);
1345}
1346
1347static int
1348dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1349 dmu_tx_t *tx)
1350{
1351 dsl_dataset_t *ds = dsda->ds;
1352 dsl_dataset_t *ds_prev = ds->ds_prev;
1353
1354 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1355 struct dsl_ds_destroyarg ndsda = {0};
1356
1357 /*
1358 * If we're not prepared to remove the origin, don't remove
1359 * the clone either.
1360 */
1361 if (dsda->rm_origin == NULL) {
1362 dsda->need_prep = B_TRUE;
1363 return (EBUSY);
1364 }
1365
1366 ndsda.ds = ds_prev;
1367 ndsda.is_origin_rm = B_TRUE;
1368 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1369 }
1370
1371 /*
1372 * If we're not going to remove the origin after all,
1373 * undo the open context setup.
1374 */
1375 if (dsda->rm_origin != NULL) {
1376 dsl_dataset_disown(dsda->rm_origin, tag);
1377 dsda->rm_origin = NULL;
1378 }
1379
1380 return (0);
1381}
1382
1383/*
1384 * If you add new checks here, you may need to add
1385 * additional checks to the "temporary" case in
1386 * snapshot_check() in dmu_objset.c.
1387 */
1388/* ARGSUSED */
1389int
1390dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1391{
1392 struct dsl_ds_destroyarg *dsda = arg1;
1393 dsl_dataset_t *ds = dsda->ds;
1394
1395 /* we have an owner hold, so noone else can destroy us */
1396 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1397
1398 /*
1399 * Only allow deferred destroy on pools that support it.
1400 * NOTE: deferred destroy is only supported on snapshots.
1401 */
1402 if (dsda->defer) {
1403 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1404 SPA_VERSION_USERREFS)
1405 return (ENOTSUP);
1406 ASSERT(dsl_dataset_is_snapshot(ds));
1407 return (0);
1408 }
1409
1410 /*
1411 * Can't delete a head dataset if there are snapshots of it.
1412 * (Except if the only snapshots are from the branch we cloned
1413 * from.)
1414 */
1415 if (ds->ds_prev != NULL &&
1416 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1417 return (EBUSY);
1418
1419 /*
1420 * If we made changes this txg, traverse_dsl_dataset won't find
1421 * them. Try again.
1422 */
1423 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1424 return (EAGAIN);
1425
1426 if (dsl_dataset_is_snapshot(ds)) {
1427 /*
1428 * If this snapshot has an elevated user reference count,
1429 * we can't destroy it yet.
1430 */
1431 if (ds->ds_userrefs > 0 && !dsda->releasing)
1432 return (EBUSY);
1433
1434 mutex_enter(&ds->ds_lock);
1435 /*
1436 * Can't delete a branch point. However, if we're destroying
1437 * a clone and removing its origin due to it having a user
1438 * hold count of 0 and having been marked for deferred destroy,
1439 * it's OK for the origin to have a single clone.
1440 */
1441 if (ds->ds_phys->ds_num_children >
1442 (dsda->is_origin_rm ? 2 : 1)) {
1443 mutex_exit(&ds->ds_lock);
1444 return (EEXIST);
1445 }
1446 mutex_exit(&ds->ds_lock);
1447 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1448 return (dsl_dataset_origin_check(dsda, arg2, tx));
1449 }
1450
1451 /* XXX we should do some i/o error checking... */
1452 return (0);
1453}
1454
1455struct refsarg {
1456 kmutex_t lock;
1457 boolean_t gone;
1458 kcondvar_t cv;
1459};
1460
1461/* ARGSUSED */
1462static void
1463dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1464{
1465 struct refsarg *arg = argv;
1466
1467 mutex_enter(&arg->lock);
1468 arg->gone = TRUE;
1469 cv_signal(&arg->cv);
1470 mutex_exit(&arg->lock);
1471}
1472
1473static void
1474dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1475{
1476 struct refsarg arg;
1477
1478 bzero(&arg, sizeof(arg));
1479 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1480 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1481 arg.gone = FALSE;
1482 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1483 dsl_dataset_refs_gone);
1484 dmu_buf_rele(ds->ds_dbuf, tag);
1485 mutex_enter(&arg.lock);
1486 while (!arg.gone)
1487 cv_wait(&arg.cv, &arg.lock);
1488 ASSERT(arg.gone);
1489 mutex_exit(&arg.lock);
1490 ds->ds_dbuf = NULL;
1491 ds->ds_phys = NULL;
1492 mutex_destroy(&arg.lock);
1493 cv_destroy(&arg.cv);
1494}
1495
1496static void
1497remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1498{
1499 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1500 uint64_t count;
1501 int err;
1502
1503 ASSERT(ds->ds_phys->ds_num_children >= 2);
1504 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1505 /*
1506 * The err should not be ENOENT, but a bug in a previous version
1507 * of the code could cause upgrade_clones_cb() to not set
1508 * ds_next_snap_obj when it should, leading to a missing entry.
1509 * If we knew that the pool was created after
1510 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1511 * ENOENT. However, at least we can check that we don't have
1512 * too many entries in the next_clones_obj even after failing to
1513 * remove this one.
1514 */
1515 if (err != ENOENT) {
1516 VERIFY3U(err, ==, 0);
1517 }
1518 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1519 &count));
1520 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1521}
1522
1523static void
1524dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1525{
1526 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1527 zap_cursor_t zc;
1528 zap_attribute_t za;
1529
1530 /*
1531 * If it is the old version, dd_clones doesn't exist so we can't
1532 * find the clones, but deadlist_remove_key() is a no-op so it
1533 * doesn't matter.
1534 */
1535 if (ds->ds_dir->dd_phys->dd_clones == 0)
1536 return;
1537
1538 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1539 zap_cursor_retrieve(&zc, &za) == 0;
1540 zap_cursor_advance(&zc)) {
1541 dsl_dataset_t *clone;
1542
1543 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1544 za.za_first_integer, FTAG, &clone));
1545 if (clone->ds_dir->dd_origin_txg > mintxg) {
1546 dsl_deadlist_remove_key(&clone->ds_deadlist,
1547 mintxg, tx);
1548 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1549 }
1550 dsl_dataset_rele(clone, FTAG);
1551 }
1552 zap_cursor_fini(&zc);
1553}
1554
1555struct process_old_arg {
1556 dsl_dataset_t *ds;
1557 dsl_dataset_t *ds_prev;
1558 boolean_t after_branch_point;
1559 zio_t *pio;
1560 uint64_t used, comp, uncomp;
1561};
1562
1563static int
1564process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1565{
1566 struct process_old_arg *poa = arg;
1567 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1568
1569 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1570 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1571 if (poa->ds_prev && !poa->after_branch_point &&
1572 bp->blk_birth >
1573 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1574 poa->ds_prev->ds_phys->ds_unique_bytes +=
1575 bp_get_dsize_sync(dp->dp_spa, bp);
1576 }
1577 } else {
1578 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1579 poa->comp += BP_GET_PSIZE(bp);
1580 poa->uncomp += BP_GET_UCSIZE(bp);
1581 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1582 }
1583 return (0);
1584}
1585
1586static void
1587process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1588 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1589{
1590 struct process_old_arg poa = { 0 };
1591 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1592 objset_t *mos = dp->dp_meta_objset;
1593
1594 ASSERT(ds->ds_deadlist.dl_oldfmt);
1595 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1596
1597 poa.ds = ds;
1598 poa.ds_prev = ds_prev;
1599 poa.after_branch_point = after_branch_point;
1600 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1601 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1602 process_old_cb, &poa, tx));
1603 VERIFY3U(zio_wait(poa.pio), ==, 0);
1604 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1605
1606 /* change snapused */
1607 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1608 -poa.used, -poa.comp, -poa.uncomp, tx);
1609
1610 /* swap next's deadlist to our deadlist */
1611 dsl_deadlist_close(&ds->ds_deadlist);
1612 dsl_deadlist_close(&ds_next->ds_deadlist);
1613 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1614 ds->ds_phys->ds_deadlist_obj);
1615 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1616 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1617 ds_next->ds_phys->ds_deadlist_obj);
1618}
1619
1620void
1621dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1622{
1623 struct dsl_ds_destroyarg *dsda = arg1;
1624 dsl_dataset_t *ds = dsda->ds;
1625 int err;
1626 int after_branch_point = FALSE;
1627 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1628 objset_t *mos = dp->dp_meta_objset;
1629 dsl_dataset_t *ds_prev = NULL;
1630 boolean_t wont_destroy;
1631 uint64_t obj;
1632
1633 wont_destroy = (dsda->defer &&
1634 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1635
1636 ASSERT(ds->ds_owner || wont_destroy);
1637 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1638 ASSERT(ds->ds_prev == NULL ||
1639 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1640 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1641
1642 if (wont_destroy) {
1643 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1644 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1645 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1646 return;
1647 }
1648
1649 /* signal any waiters that this dataset is going away */
1650 mutex_enter(&ds->ds_lock);
1651 ds->ds_owner = dsl_reaper;
1652 cv_broadcast(&ds->ds_exclusive_cv);
1653 mutex_exit(&ds->ds_lock);
1654
1655 /* Remove our reservation */
1656 if (ds->ds_reserved != 0) {
1657 dsl_prop_setarg_t psa;
1658 uint64_t value = 0;
1659
1660 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1661 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1662 &value);
1663 psa.psa_effective_value = 0; /* predict default value */
1664
1665 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1666 ASSERT3U(ds->ds_reserved, ==, 0);
1667 }
1668
1669 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1670
1671 dsl_scan_ds_destroyed(ds, tx);
1672
1673 obj = ds->ds_object;
1674
1675 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1676 if (ds->ds_prev) {
1677 ds_prev = ds->ds_prev;
1678 } else {
1679 VERIFY(0 == dsl_dataset_hold_obj(dp,
1680 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1681 }
1682 after_branch_point =
1683 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1684
1685 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1686 if (after_branch_point &&
1687 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1688 remove_from_next_clones(ds_prev, obj, tx);
1689 if (ds->ds_phys->ds_next_snap_obj != 0) {
1690 VERIFY(0 == zap_add_int(mos,
1691 ds_prev->ds_phys->ds_next_clones_obj,
1692 ds->ds_phys->ds_next_snap_obj, tx));
1693 }
1694 }
1695 if (after_branch_point &&
1696 ds->ds_phys->ds_next_snap_obj == 0) {
1697 /* This clone is toast. */
1698 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1699 ds_prev->ds_phys->ds_num_children--;
1700
1701 /*
1702 * If the clone's origin has no other clones, no
1703 * user holds, and has been marked for deferred
1704 * deletion, then we should have done the necessary
1705 * destroy setup for it.
1706 */
1707 if (ds_prev->ds_phys->ds_num_children == 1 &&
1708 ds_prev->ds_userrefs == 0 &&
1709 DS_IS_DEFER_DESTROY(ds_prev)) {
1710 ASSERT3P(dsda->rm_origin, !=, NULL);
1711 } else {
1712 ASSERT3P(dsda->rm_origin, ==, NULL);
1713 }
1714 } else if (!after_branch_point) {
1715 ds_prev->ds_phys->ds_next_snap_obj =
1716 ds->ds_phys->ds_next_snap_obj;
1717 }
1718 }
1719
1720 if (dsl_dataset_is_snapshot(ds)) {
1721 dsl_dataset_t *ds_next;
1722 uint64_t old_unique;
1723 uint64_t used = 0, comp = 0, uncomp = 0;
1724
1725 VERIFY(0 == dsl_dataset_hold_obj(dp,
1726 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1727 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1728
1729 old_unique = ds_next->ds_phys->ds_unique_bytes;
1730
1731 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1732 ds_next->ds_phys->ds_prev_snap_obj =
1733 ds->ds_phys->ds_prev_snap_obj;
1734 ds_next->ds_phys->ds_prev_snap_txg =
1735 ds->ds_phys->ds_prev_snap_txg;
1736 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1737 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1738
1739
1740 if (ds_next->ds_deadlist.dl_oldfmt) {
1741 process_old_deadlist(ds, ds_prev, ds_next,
1742 after_branch_point, tx);
1743 } else {
1744 /* Adjust prev's unique space. */
1745 if (ds_prev && !after_branch_point) {
1746 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1747 ds_prev->ds_phys->ds_prev_snap_txg,
1748 ds->ds_phys->ds_prev_snap_txg,
1749 &used, &comp, &uncomp);
1750 ds_prev->ds_phys->ds_unique_bytes += used;
1751 }
1752
1753 /* Adjust snapused. */
1754 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1755 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1756 &used, &comp, &uncomp);
1757 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1758 -used, -comp, -uncomp, tx);
1759
1760 /* Move blocks to be freed to pool's free list. */
1761 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1762 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1763 tx);
1764 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1765 DD_USED_HEAD, used, comp, uncomp, tx);
1766 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1767
1768 /* Merge our deadlist into next's and free it. */
1769 dsl_deadlist_merge(&ds_next->ds_deadlist,
1770 ds->ds_phys->ds_deadlist_obj, tx);
1771 }
1772 dsl_deadlist_close(&ds->ds_deadlist);
1773 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1774
1775 /* Collapse range in clone heads */
1776 dsl_dataset_remove_clones_key(ds,
1777 ds->ds_phys->ds_creation_txg, tx);
1778
1779 if (dsl_dataset_is_snapshot(ds_next)) {
1780 dsl_dataset_t *ds_nextnext;
1781
1782 /*
1783 * Update next's unique to include blocks which
1784 * were previously shared by only this snapshot
1785 * and it. Those blocks will be born after the
1786 * prev snap and before this snap, and will have
1787 * died after the next snap and before the one
1788 * after that (ie. be on the snap after next's
1789 * deadlist).
1790 */
1791 VERIFY(0 == dsl_dataset_hold_obj(dp,
1792 ds_next->ds_phys->ds_next_snap_obj,
1793 FTAG, &ds_nextnext));
1794 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1795 ds->ds_phys->ds_prev_snap_txg,
1796 ds->ds_phys->ds_creation_txg,
1797 &used, &comp, &uncomp);
1798 ds_next->ds_phys->ds_unique_bytes += used;
1799 dsl_dataset_rele(ds_nextnext, FTAG);
1800 ASSERT3P(ds_next->ds_prev, ==, NULL);
1801
1802 /* Collapse range in this head. */
1803 dsl_dataset_t *hds;
1804 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1805 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1806 FTAG, &hds));
1807 dsl_deadlist_remove_key(&hds->ds_deadlist,
1808 ds->ds_phys->ds_creation_txg, tx);
1809 dsl_dataset_rele(hds, FTAG);
1810
1811 } else {
1812 ASSERT3P(ds_next->ds_prev, ==, ds);
1813 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1814 ds_next->ds_prev = NULL;
1815 if (ds_prev) {
1816 VERIFY(0 == dsl_dataset_get_ref(dp,
1817 ds->ds_phys->ds_prev_snap_obj,
1818 ds_next, &ds_next->ds_prev));
1819 }
1820
1821 dsl_dataset_recalc_head_uniq(ds_next);
1822
1823 /*
1824 * Reduce the amount of our unconsmed refreservation
1825 * being charged to our parent by the amount of
1826 * new unique data we have gained.
1827 */
1828 if (old_unique < ds_next->ds_reserved) {
1829 int64_t mrsdelta;
1830 uint64_t new_unique =
1831 ds_next->ds_phys->ds_unique_bytes;
1832
1833 ASSERT(old_unique <= new_unique);
1834 mrsdelta = MIN(new_unique - old_unique,
1835 ds_next->ds_reserved - old_unique);
1836 dsl_dir_diduse_space(ds->ds_dir,
1837 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1838 }
1839 }
1840 dsl_dataset_rele(ds_next, FTAG);
1841 } else {
1842 /*
1843 * There's no next snapshot, so this is a head dataset.
1844 * Destroy the deadlist. Unless it's a clone, the
1845 * deadlist should be empty. (If it's a clone, it's
1846 * safe to ignore the deadlist contents.)
1847 */
1848 struct killarg ka;
1849
1850 dsl_deadlist_close(&ds->ds_deadlist);
1851 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1852 ds->ds_phys->ds_deadlist_obj = 0;
1853
1854 /*
1855 * Free everything that we point to (that's born after
1856 * the previous snapshot, if we are a clone)
1857 *
1858 * NB: this should be very quick, because we already
1859 * freed all the objects in open context.
1860 */
1861 ka.ds = ds;
1862 ka.tx = tx;
1863 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1864 TRAVERSE_POST, kill_blkptr, &ka);
1865 ASSERT3U(err, ==, 0);
1866 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1867 ds->ds_phys->ds_unique_bytes == 0);
1868
1869 if (ds->ds_prev != NULL) {
1870 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1871 VERIFY3U(0, ==, zap_remove_int(mos,
1872 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1873 ds->ds_object, tx));
1874 }
1875 dsl_dataset_rele(ds->ds_prev, ds);
1876 ds->ds_prev = ds_prev = NULL;
1877 }
1878 }
1879
1880 /*
1881 * This must be done after the dsl_traverse(), because it will
1882 * re-open the objset.
1883 */
1884 if (ds->ds_objset) {
1885 dmu_objset_evict(ds->ds_objset);
1886 ds->ds_objset = NULL;
1887 }
1888
1889 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1890 /* Erase the link in the dir */
1891 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1892 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1893 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1894 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1895 ASSERT(err == 0);
1896 } else {
1897 /* remove from snapshot namespace */
1898 dsl_dataset_t *ds_head;
1899 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1900 VERIFY(0 == dsl_dataset_hold_obj(dp,
1901 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1902 VERIFY(0 == dsl_dataset_get_snapname(ds));
1903#ifdef ZFS_DEBUG
1904 {
1905 uint64_t val;
1906
1907 err = dsl_dataset_snap_lookup(ds_head,
1908 ds->ds_snapname, &val);
1909 ASSERT3U(err, ==, 0);
1910 ASSERT3U(val, ==, obj);
1911 }
1912#endif
1913 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1914 ASSERT(err == 0);
1915 dsl_dataset_rele(ds_head, FTAG);
1916 }
1917
1918 if (ds_prev && ds->ds_prev != ds_prev)
1919 dsl_dataset_rele(ds_prev, FTAG);
1920
1921 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1922 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1923 "dataset = %llu", ds->ds_object);
1924
1925 if (ds->ds_phys->ds_next_clones_obj != 0) {
1926 uint64_t count;
1927 ASSERT(0 == zap_count(mos,
1928 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1929 VERIFY(0 == dmu_object_free(mos,
1930 ds->ds_phys->ds_next_clones_obj, tx));
1931 }
1932 if (ds->ds_phys->ds_props_obj != 0)
1933 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1934 if (ds->ds_phys->ds_userrefs_obj != 0)
1935 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1936 dsl_dir_close(ds->ds_dir, ds);
1937 ds->ds_dir = NULL;
1938 dsl_dataset_drain_refs(ds, tag);
1939 VERIFY(0 == dmu_object_free(mos, obj, tx));
1940
1941 if (dsda->rm_origin) {
1942 /*
1943 * Remove the origin of the clone we just destroyed.
1944 */
1945 struct dsl_ds_destroyarg ndsda = {0};
1946
1947 ndsda.ds = dsda->rm_origin;
1948 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1949 }
1950}
1951
1952static int
1953dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1954{
1955 uint64_t asize;
1956
1957 if (!dmu_tx_is_syncing(tx))
1958 return (0);
1959
1960 /*
1961 * If there's an fs-only reservation, any blocks that might become
1962 * owned by the snapshot dataset must be accommodated by space
1963 * outside of the reservation.
1964 */
1965 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1966 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1967 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1968 return (ENOSPC);
1969
1970 /*
1971 * Propogate any reserved space for this snapshot to other
1972 * snapshot checks in this sync group.
1973 */
1974 if (asize > 0)
1975 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1976
1977 return (0);
1978}
1979
1980int
1981dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1982{
1983 dsl_dataset_t *ds = arg1;
1984 const char *snapname = arg2;
1985 int err;
1986 uint64_t value;
1987
1988 /*
1989 * We don't allow multiple snapshots of the same txg. If there
1990 * is already one, try again.
1991 */
1992 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1993 return (EAGAIN);
1994
1995 /*
1996 * Check for conflicting name snapshot name.
1997 */
1998 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1999 if (err == 0)
2000 return (EEXIST);
2001 if (err != ENOENT)
2002 return (err);
2003
2004 /*
2005 * Check that the dataset's name is not too long. Name consists
2006 * of the dataset's length + 1 for the @-sign + snapshot name's length
2007 */
2008 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2009 return (ENAMETOOLONG);
2010
2011 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2012 if (err)
2013 return (err);
2014
2015 ds->ds_trysnap_txg = tx->tx_txg;
2016 return (0);
2017}
2018
2019void
2020dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2021{
2022 dsl_dataset_t *ds = arg1;
2023 const char *snapname = arg2;
2024 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2025 dmu_buf_t *dbuf;
2026 dsl_dataset_phys_t *dsphys;
2027 uint64_t dsobj, crtxg;
2028 objset_t *mos = dp->dp_meta_objset;
2029 int err;
2030
2031 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2032
2033 /*
2034 * The origin's ds_creation_txg has to be < TXG_INITIAL
2035 */
2036 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2037 crtxg = 1;
2038 else
2039 crtxg = tx->tx_txg;
2040
2041 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2042 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2043 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2044 dmu_buf_will_dirty(dbuf, tx);
2045 dsphys = dbuf->db_data;
2046 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2047 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2048 dsphys->ds_fsid_guid = unique_create();
2049 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2050 sizeof (dsphys->ds_guid));
2051 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2052 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2053 dsphys->ds_next_snap_obj = ds->ds_object;
2054 dsphys->ds_num_children = 1;
2055 dsphys->ds_creation_time = gethrestime_sec();
2056 dsphys->ds_creation_txg = crtxg;
2057 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2058 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2059 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2060 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2061 dsphys->ds_flags = ds->ds_phys->ds_flags;
2062 dsphys->ds_bp = ds->ds_phys->ds_bp;
2063 dmu_buf_rele(dbuf, FTAG);
2064
2065 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2066 if (ds->ds_prev) {
2067 uint64_t next_clones_obj =
2068 ds->ds_prev->ds_phys->ds_next_clones_obj;
2069 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2070 ds->ds_object ||
2071 ds->ds_prev->ds_phys->ds_num_children > 1);
2072 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2073 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2074 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2075 ds->ds_prev->ds_phys->ds_creation_txg);
2076 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2077 } else if (next_clones_obj != 0) {
2078 remove_from_next_clones(ds->ds_prev,
2079 dsphys->ds_next_snap_obj, tx);
2080 VERIFY3U(0, ==, zap_add_int(mos,
2081 next_clones_obj, dsobj, tx));
2082 }
2083 }
2084
2085 /*
2086 * If we have a reference-reservation on this dataset, we will
2087 * need to increase the amount of refreservation being charged
2088 * since our unique space is going to zero.
2089 */
2090 if (ds->ds_reserved) {
2091 int64_t delta;
2092 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2093 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2094 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2095 delta, 0, 0, tx);
2096 }
2097
2098 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2099 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2100 ds->ds_dir->dd_myname, snapname, dsobj,
2101 ds->ds_phys->ds_prev_snap_txg);
2102 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2103 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2104 dsl_deadlist_close(&ds->ds_deadlist);
2105 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2106 dsl_deadlist_add_key(&ds->ds_deadlist,
2107 ds->ds_phys->ds_prev_snap_txg, tx);
2108
2109 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2110 ds->ds_phys->ds_prev_snap_obj = dsobj;
2111 ds->ds_phys->ds_prev_snap_txg = crtxg;
2112 ds->ds_phys->ds_unique_bytes = 0;
2113 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2114 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2115
2116 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2117 snapname, 8, 1, &dsobj, tx);
2118 ASSERT(err == 0);
2119
2120 if (ds->ds_prev)
2121 dsl_dataset_drop_ref(ds->ds_prev, ds);
2122 VERIFY(0 == dsl_dataset_get_ref(dp,
2123 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2124
2125 dsl_scan_ds_snapshotted(ds, tx);
2126
2127 dsl_dir_snap_cmtime_update(ds->ds_dir);
2128
2129 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2130 "dataset = %llu", dsobj);
2131}
2132
2133void
2134dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2135{
2136 ASSERT(dmu_tx_is_syncing(tx));
2137 ASSERT(ds->ds_objset != NULL);
2138 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2139
2140 /*
2141 * in case we had to change ds_fsid_guid when we opened it,
2142 * sync it out now.
2143 */
2144 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2145 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2146
2147 dsl_dir_dirty(ds->ds_dir, tx);
2148 dmu_objset_sync(ds->ds_objset, zio, tx);
2149}
2150
2151void
2152dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2153{
2153 uint64_t refd, avail, uobjs, aobjs;
2154 uint64_t refd, avail, uobjs, aobjs, ratio;
2154
2155 dsl_dir_stats(ds->ds_dir, nv);
2156
2157 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2158 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2159 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2160
2161 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2162 ds->ds_phys->ds_creation_time);
2163 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2164 ds->ds_phys->ds_creation_txg);
2165 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2166 ds->ds_quota);
2167 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2168 ds->ds_reserved);
2169 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2170 ds->ds_phys->ds_guid);
2171 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2172 ds->ds_phys->ds_unique_bytes);
2173 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2174 ds->ds_object);
2175 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2176 ds->ds_userrefs);
2177 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2178 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2179
2155
2156 dsl_dir_stats(ds->ds_dir, nv);
2157
2158 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2159 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2160 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2161
2162 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2163 ds->ds_phys->ds_creation_time);
2164 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2165 ds->ds_phys->ds_creation_txg);
2166 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2167 ds->ds_quota);
2168 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2169 ds->ds_reserved);
2170 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2171 ds->ds_phys->ds_guid);
2172 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2173 ds->ds_phys->ds_unique_bytes);
2174 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2175 ds->ds_object);
2176 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2177 ds->ds_userrefs);
2178 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2179 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2180
2181 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2182 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2183 ds->ds_phys->ds_compressed_bytes);
2184 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2185
2180 if (ds->ds_phys->ds_next_snap_obj) {
2181 /*
2182 * This is a snapshot; override the dd's space used with
2183 * our unique space and compression ratio.
2184 */
2185 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2186 ds->ds_phys->ds_unique_bytes);
2186 if (ds->ds_phys->ds_next_snap_obj) {
2187 /*
2188 * This is a snapshot; override the dd's space used with
2189 * our unique space and compression ratio.
2190 */
2191 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2192 ds->ds_phys->ds_unique_bytes);
2187 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2188 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2189 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2190 ds->ds_phys->ds_compressed_bytes));
2193 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2191 }
2192}
2193
2194void
2195dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2196{
2197 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2198 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2199 stat->dds_guid = ds->ds_phys->ds_guid;
2200 if (ds->ds_phys->ds_next_snap_obj) {
2201 stat->dds_is_snapshot = B_TRUE;
2202 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2203 } else {
2204 stat->dds_is_snapshot = B_FALSE;
2205 stat->dds_num_clones = 0;
2206 }
2207
2208 /* clone origin is really a dsl_dir thing... */
2209 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2210 if (dsl_dir_is_clone(ds->ds_dir)) {
2211 dsl_dataset_t *ods;
2212
2213 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2214 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2215 dsl_dataset_name(ods, stat->dds_origin);
2216 dsl_dataset_drop_ref(ods, FTAG);
2217 } else {
2218 stat->dds_origin[0] = '\0';
2219 }
2220 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2221}
2222
2223uint64_t
2224dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2225{
2226 return (ds->ds_fsid_guid);
2227}
2228
2229void
2230dsl_dataset_space(dsl_dataset_t *ds,
2231 uint64_t *refdbytesp, uint64_t *availbytesp,
2232 uint64_t *usedobjsp, uint64_t *availobjsp)
2233{
2234 *refdbytesp = ds->ds_phys->ds_used_bytes;
2235 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2236 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2237 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2238 if (ds->ds_quota != 0) {
2239 /*
2240 * Adjust available bytes according to refquota
2241 */
2242 if (*refdbytesp < ds->ds_quota)
2243 *availbytesp = MIN(*availbytesp,
2244 ds->ds_quota - *refdbytesp);
2245 else
2246 *availbytesp = 0;
2247 }
2248 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2249 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2250}
2251
2252boolean_t
2253dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2254{
2255 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2256
2257 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2258 dsl_pool_sync_context(dp));
2259 if (ds->ds_prev == NULL)
2260 return (B_FALSE);
2261 if (ds->ds_phys->ds_bp.blk_birth >
2262 ds->ds_prev->ds_phys->ds_creation_txg) {
2263 objset_t *os, *os_prev;
2264 /*
2265 * It may be that only the ZIL differs, because it was
2266 * reset in the head. Don't count that as being
2267 * modified.
2268 */
2269 if (dmu_objset_from_ds(ds, &os) != 0)
2270 return (B_TRUE);
2271 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2272 return (B_TRUE);
2273 return (bcmp(&os->os_phys->os_meta_dnode,
2274 &os_prev->os_phys->os_meta_dnode,
2275 sizeof (os->os_phys->os_meta_dnode)) != 0);
2276 }
2277 return (B_FALSE);
2278}
2279
2280/* ARGSUSED */
2281static int
2282dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2283{
2284 dsl_dataset_t *ds = arg1;
2285 char *newsnapname = arg2;
2286 dsl_dir_t *dd = ds->ds_dir;
2287 dsl_dataset_t *hds;
2288 uint64_t val;
2289 int err;
2290
2291 err = dsl_dataset_hold_obj(dd->dd_pool,
2292 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2293 if (err)
2294 return (err);
2295
2296 /* new name better not be in use */
2297 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2298 dsl_dataset_rele(hds, FTAG);
2299
2300 if (err == 0)
2301 err = EEXIST;
2302 else if (err == ENOENT)
2303 err = 0;
2304
2305 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2306 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2307 err = ENAMETOOLONG;
2308
2309 return (err);
2310}
2311
2312static void
2313dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2314{
2315 char oldname[MAXPATHLEN], newname[MAXPATHLEN];
2316 dsl_dataset_t *ds = arg1;
2317 const char *newsnapname = arg2;
2318 dsl_dir_t *dd = ds->ds_dir;
2319 objset_t *mos = dd->dd_pool->dp_meta_objset;
2320 dsl_dataset_t *hds;
2321 int err;
2322
2323 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2324
2325 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2326 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2327
2328 VERIFY(0 == dsl_dataset_get_snapname(ds));
2329 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2330 ASSERT3U(err, ==, 0);
2331 dsl_dataset_name(ds, oldname);
2332 mutex_enter(&ds->ds_lock);
2333 (void) strcpy(ds->ds_snapname, newsnapname);
2334 mutex_exit(&ds->ds_lock);
2335 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2336 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2337 ASSERT3U(err, ==, 0);
2338 dsl_dataset_name(ds, newname);
2339#ifdef _KERNEL
2340 zvol_rename_minors(oldname, newname);
2341#endif
2342
2343 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2344 "dataset = %llu", ds->ds_object);
2345 dsl_dataset_rele(hds, FTAG);
2346}
2347
2348struct renamesnaparg {
2349 dsl_sync_task_group_t *dstg;
2350 char failed[MAXPATHLEN];
2351 char *oldsnap;
2352 char *newsnap;
2353};
2354
2355static int
2356dsl_snapshot_rename_one(const char *name, void *arg)
2357{
2358 struct renamesnaparg *ra = arg;
2359 dsl_dataset_t *ds = NULL;
2360 char *snapname;
2361 int err;
2362
2363 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2364 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2365
2366 /*
2367 * For recursive snapshot renames the parent won't be changing
2368 * so we just pass name for both the to/from argument.
2369 */
2370 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2371 if (err != 0) {
2372 strfree(snapname);
2373 return (err == ENOENT ? 0 : err);
2374 }
2375
2376#ifdef _KERNEL
2377 /*
2378 * For all filesystems undergoing rename, we'll need to unmount it.
2379 */
2380 (void) zfs_unmount_snap(snapname, NULL);
2381#endif
2382 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2383 strfree(snapname);
2384 if (err != 0)
2385 return (err == ENOENT ? 0 : err);
2386
2387 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2388 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2389
2390 return (0);
2391}
2392
2393static int
2394dsl_recursive_rename(char *oldname, const char *newname)
2395{
2396 int err;
2397 struct renamesnaparg *ra;
2398 dsl_sync_task_t *dst;
2399 spa_t *spa;
2400 char *cp, *fsname = spa_strdup(oldname);
2401 int len = strlen(oldname) + 1;
2402
2403 /* truncate the snapshot name to get the fsname */
2404 cp = strchr(fsname, '@');
2405 *cp = '\0';
2406
2407 err = spa_open(fsname, &spa, FTAG);
2408 if (err) {
2409 kmem_free(fsname, len);
2410 return (err);
2411 }
2412 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2413 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2414
2415 ra->oldsnap = strchr(oldname, '@') + 1;
2416 ra->newsnap = strchr(newname, '@') + 1;
2417 *ra->failed = '\0';
2418
2419 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2420 DS_FIND_CHILDREN);
2421 kmem_free(fsname, len);
2422
2423 if (err == 0) {
2424 err = dsl_sync_task_group_wait(ra->dstg);
2425 }
2426
2427 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2428 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2429 dsl_dataset_t *ds = dst->dst_arg1;
2430 if (dst->dst_err) {
2431 dsl_dir_name(ds->ds_dir, ra->failed);
2432 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2433 (void) strlcat(ra->failed, ra->newsnap,
2434 sizeof (ra->failed));
2435 }
2436 dsl_dataset_rele(ds, ra->dstg);
2437 }
2438
2439 if (err)
2440 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2441
2442 dsl_sync_task_group_destroy(ra->dstg);
2443 kmem_free(ra, sizeof (struct renamesnaparg));
2444 spa_close(spa, FTAG);
2445 return (err);
2446}
2447
2448static int
2449dsl_valid_rename(const char *oldname, void *arg)
2450{
2451 int delta = *(int *)arg;
2452
2453 if (strlen(oldname) + delta >= MAXNAMELEN)
2454 return (ENAMETOOLONG);
2455
2456 return (0);
2457}
2458
2459#pragma weak dmu_objset_rename = dsl_dataset_rename
2460int
2461dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2462{
2463 dsl_dir_t *dd;
2464 dsl_dataset_t *ds;
2465 const char *tail;
2466 int err;
2467
2468 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2469 if (err)
2470 return (err);
2471
2472 if (tail == NULL) {
2473 int delta = strlen(newname) - strlen(oldname);
2474
2475 /* if we're growing, validate child name lengths */
2476 if (delta > 0)
2477 err = dmu_objset_find(oldname, dsl_valid_rename,
2478 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2479
2480 if (err == 0)
2481 err = dsl_dir_rename(dd, newname);
2482 dsl_dir_close(dd, FTAG);
2483 return (err);
2484 }
2485
2486 if (tail[0] != '@') {
2487 /* the name ended in a nonexistent component */
2488 dsl_dir_close(dd, FTAG);
2489 return (ENOENT);
2490 }
2491
2492 dsl_dir_close(dd, FTAG);
2493
2494 /* new name must be snapshot in same filesystem */
2495 tail = strchr(newname, '@');
2496 if (tail == NULL)
2497 return (EINVAL);
2498 tail++;
2499 if (strncmp(oldname, newname, tail - newname) != 0)
2500 return (EXDEV);
2501
2502 if (recursive) {
2503 err = dsl_recursive_rename(oldname, newname);
2504 } else {
2505 err = dsl_dataset_hold(oldname, FTAG, &ds);
2506 if (err)
2507 return (err);
2508
2509 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2510 dsl_dataset_snapshot_rename_check,
2511 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2512
2513 dsl_dataset_rele(ds, FTAG);
2514 }
2515
2516 return (err);
2517}
2518
2519struct promotenode {
2520 list_node_t link;
2521 dsl_dataset_t *ds;
2522};
2523
2524struct promotearg {
2525 list_t shared_snaps, origin_snaps, clone_snaps;
2526 dsl_dataset_t *origin_origin;
2527 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2528 char *err_ds;
2529};
2530
2531static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2532static boolean_t snaplist_unstable(list_t *l);
2533
2534static int
2535dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2536{
2537 dsl_dataset_t *hds = arg1;
2538 struct promotearg *pa = arg2;
2539 struct promotenode *snap = list_head(&pa->shared_snaps);
2540 dsl_dataset_t *origin_ds = snap->ds;
2541 int err;
2542 uint64_t unused;
2543
2544 /* Check that it is a real clone */
2545 if (!dsl_dir_is_clone(hds->ds_dir))
2546 return (EINVAL);
2547
2548 /* Since this is so expensive, don't do the preliminary check */
2549 if (!dmu_tx_is_syncing(tx))
2550 return (0);
2551
2552 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2553 return (EXDEV);
2554
2555 /* compute origin's new unique space */
2556 snap = list_tail(&pa->clone_snaps);
2557 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2558 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2559 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2560 &pa->unique, &unused, &unused);
2561
2562 /*
2563 * Walk the snapshots that we are moving
2564 *
2565 * Compute space to transfer. Consider the incremental changes
2566 * to used for each snapshot:
2567 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2568 * So each snapshot gave birth to:
2569 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2570 * So a sequence would look like:
2571 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2572 * Which simplifies to:
2573 * uN + kN + kN-1 + ... + k1 + k0
2574 * Note however, if we stop before we reach the ORIGIN we get:
2575 * uN + kN + kN-1 + ... + kM - uM-1
2576 */
2577 pa->used = origin_ds->ds_phys->ds_used_bytes;
2578 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2579 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2580 for (snap = list_head(&pa->shared_snaps); snap;
2581 snap = list_next(&pa->shared_snaps, snap)) {
2582 uint64_t val, dlused, dlcomp, dluncomp;
2583 dsl_dataset_t *ds = snap->ds;
2584
2585 /* Check that the snapshot name does not conflict */
2586 VERIFY(0 == dsl_dataset_get_snapname(ds));
2587 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2588 if (err == 0) {
2589 err = EEXIST;
2590 goto out;
2591 }
2592 if (err != ENOENT)
2593 goto out;
2594
2595 /* The very first snapshot does not have a deadlist */
2596 if (ds->ds_phys->ds_prev_snap_obj == 0)
2597 continue;
2598
2599 dsl_deadlist_space(&ds->ds_deadlist,
2600 &dlused, &dlcomp, &dluncomp);
2601 pa->used += dlused;
2602 pa->comp += dlcomp;
2603 pa->uncomp += dluncomp;
2604 }
2605
2606 /*
2607 * If we are a clone of a clone then we never reached ORIGIN,
2608 * so we need to subtract out the clone origin's used space.
2609 */
2610 if (pa->origin_origin) {
2611 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2612 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2613 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2614 }
2615
2616 /* Check that there is enough space here */
2617 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2618 pa->used);
2619 if (err)
2620 return (err);
2621
2622 /*
2623 * Compute the amounts of space that will be used by snapshots
2624 * after the promotion (for both origin and clone). For each,
2625 * it is the amount of space that will be on all of their
2626 * deadlists (that was not born before their new origin).
2627 */
2628 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2629 uint64_t space;
2630
2631 /*
2632 * Note, typically this will not be a clone of a clone,
2633 * so dd_origin_txg will be < TXG_INITIAL, so
2634 * these snaplist_space() -> dsl_deadlist_space_range()
2635 * calls will be fast because they do not have to
2636 * iterate over all bps.
2637 */
2638 snap = list_head(&pa->origin_snaps);
2639 err = snaplist_space(&pa->shared_snaps,
2640 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2641 if (err)
2642 return (err);
2643
2644 err = snaplist_space(&pa->clone_snaps,
2645 snap->ds->ds_dir->dd_origin_txg, &space);
2646 if (err)
2647 return (err);
2648 pa->cloneusedsnap += space;
2649 }
2650 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2651 err = snaplist_space(&pa->origin_snaps,
2652 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2653 if (err)
2654 return (err);
2655 }
2656
2657 return (0);
2658out:
2659 pa->err_ds = snap->ds->ds_snapname;
2660 return (err);
2661}
2662
2663static void
2664dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2665{
2666 dsl_dataset_t *hds = arg1;
2667 struct promotearg *pa = arg2;
2668 struct promotenode *snap = list_head(&pa->shared_snaps);
2669 dsl_dataset_t *origin_ds = snap->ds;
2670 dsl_dataset_t *origin_head;
2671 dsl_dir_t *dd = hds->ds_dir;
2672 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2673 dsl_dir_t *odd = NULL;
2674 uint64_t oldnext_obj;
2675 int64_t delta;
2676
2677 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2678
2679 snap = list_head(&pa->origin_snaps);
2680 origin_head = snap->ds;
2681
2682 /*
2683 * We need to explicitly open odd, since origin_ds's dd will be
2684 * changing.
2685 */
2686 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2687 NULL, FTAG, &odd));
2688
2689 /* change origin's next snap */
2690 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2691 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2692 snap = list_tail(&pa->clone_snaps);
2693 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2694 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2695
2696 /* change the origin's next clone */
2697 if (origin_ds->ds_phys->ds_next_clones_obj) {
2698 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2699 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2700 origin_ds->ds_phys->ds_next_clones_obj,
2701 oldnext_obj, tx));
2702 }
2703
2704 /* change origin */
2705 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2706 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2707 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2708 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2709 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2710 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2711 origin_head->ds_dir->dd_origin_txg =
2712 origin_ds->ds_phys->ds_creation_txg;
2713
2714 /* change dd_clone entries */
2715 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2716 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2717 odd->dd_phys->dd_clones, hds->ds_object, tx));
2718 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2719 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2720 hds->ds_object, tx));
2721
2722 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2723 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2724 origin_head->ds_object, tx));
2725 if (dd->dd_phys->dd_clones == 0) {
2726 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2727 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2728 }
2729 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2730 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2731
2732 }
2733
2734 /* move snapshots to this dir */
2735 for (snap = list_head(&pa->shared_snaps); snap;
2736 snap = list_next(&pa->shared_snaps, snap)) {
2737 dsl_dataset_t *ds = snap->ds;
2738
2739 /* unregister props as dsl_dir is changing */
2740 if (ds->ds_objset) {
2741 dmu_objset_evict(ds->ds_objset);
2742 ds->ds_objset = NULL;
2743 }
2744 /* move snap name entry */
2745 VERIFY(0 == dsl_dataset_get_snapname(ds));
2746 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2747 ds->ds_snapname, tx));
2748 VERIFY(0 == zap_add(dp->dp_meta_objset,
2749 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2750 8, 1, &ds->ds_object, tx));
2751
2752 /* change containing dsl_dir */
2753 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2754 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2755 ds->ds_phys->ds_dir_obj = dd->dd_object;
2756 ASSERT3P(ds->ds_dir, ==, odd);
2757 dsl_dir_close(ds->ds_dir, ds);
2758 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2759 NULL, ds, &ds->ds_dir));
2760
2761 /* move any clone references */
2762 if (ds->ds_phys->ds_next_clones_obj &&
2763 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2764 zap_cursor_t zc;
2765 zap_attribute_t za;
2766
2767 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2768 ds->ds_phys->ds_next_clones_obj);
2769 zap_cursor_retrieve(&zc, &za) == 0;
2770 zap_cursor_advance(&zc)) {
2771 dsl_dataset_t *cnds;
2772 uint64_t o;
2773
2774 if (za.za_first_integer == oldnext_obj) {
2775 /*
2776 * We've already moved the
2777 * origin's reference.
2778 */
2779 continue;
2780 }
2781
2782 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2783 za.za_first_integer, FTAG, &cnds));
2784 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2785
2786 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2787 odd->dd_phys->dd_clones, o, tx), ==, 0);
2788 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2789 dd->dd_phys->dd_clones, o, tx), ==, 0);
2790 dsl_dataset_rele(cnds, FTAG);
2791 }
2792 zap_cursor_fini(&zc);
2793 }
2794
2795 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2796 }
2797
2798 /*
2799 * Change space accounting.
2800 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2801 * both be valid, or both be 0 (resulting in delta == 0). This
2802 * is true for each of {clone,origin} independently.
2803 */
2804
2805 delta = pa->cloneusedsnap -
2806 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2807 ASSERT3S(delta, >=, 0);
2808 ASSERT3U(pa->used, >=, delta);
2809 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2810 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2811 pa->used - delta, pa->comp, pa->uncomp, tx);
2812
2813 delta = pa->originusedsnap -
2814 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2815 ASSERT3S(delta, <=, 0);
2816 ASSERT3U(pa->used, >=, -delta);
2817 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2818 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2819 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2820
2821 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2822
2823 /* log history record */
2824 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2825 "dataset = %llu", hds->ds_object);
2826
2827 dsl_dir_close(odd, FTAG);
2828}
2829
2830static char *snaplist_tag = "snaplist";
2831/*
2832 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2833 * (exclusive) and last_obj (inclusive). The list will be in reverse
2834 * order (last_obj will be the list_head()). If first_obj == 0, do all
2835 * snapshots back to this dataset's origin.
2836 */
2837static int
2838snaplist_make(dsl_pool_t *dp, boolean_t own,
2839 uint64_t first_obj, uint64_t last_obj, list_t *l)
2840{
2841 uint64_t obj = last_obj;
2842
2843 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2844
2845 list_create(l, sizeof (struct promotenode),
2846 offsetof(struct promotenode, link));
2847
2848 while (obj != first_obj) {
2849 dsl_dataset_t *ds;
2850 struct promotenode *snap;
2851 int err;
2852
2853 if (own) {
2854 err = dsl_dataset_own_obj(dp, obj,
2855 0, snaplist_tag, &ds);
2856 if (err == 0)
2857 dsl_dataset_make_exclusive(ds, snaplist_tag);
2858 } else {
2859 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2860 }
2861 if (err == ENOENT) {
2862 /* lost race with snapshot destroy */
2863 struct promotenode *last = list_tail(l);
2864 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2865 obj = last->ds->ds_phys->ds_prev_snap_obj;
2866 continue;
2867 } else if (err) {
2868 return (err);
2869 }
2870
2871 if (first_obj == 0)
2872 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2873
2874 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2875 snap->ds = ds;
2876 list_insert_tail(l, snap);
2877 obj = ds->ds_phys->ds_prev_snap_obj;
2878 }
2879
2880 return (0);
2881}
2882
2883static int
2884snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2885{
2886 struct promotenode *snap;
2887
2888 *spacep = 0;
2889 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2890 uint64_t used, comp, uncomp;
2891 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2892 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2893 *spacep += used;
2894 }
2895 return (0);
2896}
2897
2898static void
2899snaplist_destroy(list_t *l, boolean_t own)
2900{
2901 struct promotenode *snap;
2902
2903 if (!l || !list_link_active(&l->list_head))
2904 return;
2905
2906 while ((snap = list_tail(l)) != NULL) {
2907 list_remove(l, snap);
2908 if (own)
2909 dsl_dataset_disown(snap->ds, snaplist_tag);
2910 else
2911 dsl_dataset_rele(snap->ds, snaplist_tag);
2912 kmem_free(snap, sizeof (struct promotenode));
2913 }
2914 list_destroy(l);
2915}
2916
2917/*
2918 * Promote a clone. Nomenclature note:
2919 * "clone" or "cds": the original clone which is being promoted
2920 * "origin" or "ods": the snapshot which is originally clone's origin
2921 * "origin head" or "ohds": the dataset which is the head
2922 * (filesystem/volume) for the origin
2923 * "origin origin": the origin of the origin's filesystem (typically
2924 * NULL, indicating that the clone is not a clone of a clone).
2925 */
2926int
2927dsl_dataset_promote(const char *name, char *conflsnap)
2928{
2929 dsl_dataset_t *ds;
2930 dsl_dir_t *dd;
2931 dsl_pool_t *dp;
2932 dmu_object_info_t doi;
2933 struct promotearg pa = { 0 };
2934 struct promotenode *snap;
2935 int err;
2936
2937 err = dsl_dataset_hold(name, FTAG, &ds);
2938 if (err)
2939 return (err);
2940 dd = ds->ds_dir;
2941 dp = dd->dd_pool;
2942
2943 err = dmu_object_info(dp->dp_meta_objset,
2944 ds->ds_phys->ds_snapnames_zapobj, &doi);
2945 if (err) {
2946 dsl_dataset_rele(ds, FTAG);
2947 return (err);
2948 }
2949
2950 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2951 dsl_dataset_rele(ds, FTAG);
2952 return (EINVAL);
2953 }
2954
2955 /*
2956 * We are going to inherit all the snapshots taken before our
2957 * origin (i.e., our new origin will be our parent's origin).
2958 * Take ownership of them so that we can rename them into our
2959 * namespace.
2960 */
2961 rw_enter(&dp->dp_config_rwlock, RW_READER);
2962
2963 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2964 &pa.shared_snaps);
2965 if (err != 0)
2966 goto out;
2967
2968 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2969 if (err != 0)
2970 goto out;
2971
2972 snap = list_head(&pa.shared_snaps);
2973 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2974 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2975 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2976 if (err != 0)
2977 goto out;
2978
2979 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
2980 err = dsl_dataset_hold_obj(dp,
2981 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2982 FTAG, &pa.origin_origin);
2983 if (err != 0)
2984 goto out;
2985 }
2986
2987out:
2988 rw_exit(&dp->dp_config_rwlock);
2989
2990 /*
2991 * Add in 128x the snapnames zapobj size, since we will be moving
2992 * a bunch of snapnames to the promoted ds, and dirtying their
2993 * bonus buffers.
2994 */
2995 if (err == 0) {
2996 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2997 dsl_dataset_promote_sync, ds, &pa,
2998 2 + 2 * doi.doi_physical_blocks_512);
2999 if (err && pa.err_ds && conflsnap)
3000 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3001 }
3002
3003 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3004 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3005 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3006 if (pa.origin_origin)
3007 dsl_dataset_rele(pa.origin_origin, FTAG);
3008 dsl_dataset_rele(ds, FTAG);
3009 return (err);
3010}
3011
3012struct cloneswaparg {
3013 dsl_dataset_t *cds; /* clone dataset */
3014 dsl_dataset_t *ohds; /* origin's head dataset */
3015 boolean_t force;
3016 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3017};
3018
3019/* ARGSUSED */
3020static int
3021dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3022{
3023 struct cloneswaparg *csa = arg1;
3024
3025 /* they should both be heads */
3026 if (dsl_dataset_is_snapshot(csa->cds) ||
3027 dsl_dataset_is_snapshot(csa->ohds))
3028 return (EINVAL);
3029
3030 /* the branch point should be just before them */
3031 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3032 return (EINVAL);
3033
3034 /* cds should be the clone (unless they are unrelated) */
3035 if (csa->cds->ds_prev != NULL &&
3036 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3037 csa->ohds->ds_object !=
3038 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3039 return (EINVAL);
3040
3041 /* the clone should be a child of the origin */
3042 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3043 return (EINVAL);
3044
3045 /* ohds shouldn't be modified unless 'force' */
3046 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3047 return (ETXTBSY);
3048
3049 /* adjust amount of any unconsumed refreservation */
3050 csa->unused_refres_delta =
3051 (int64_t)MIN(csa->ohds->ds_reserved,
3052 csa->ohds->ds_phys->ds_unique_bytes) -
3053 (int64_t)MIN(csa->ohds->ds_reserved,
3054 csa->cds->ds_phys->ds_unique_bytes);
3055
3056 if (csa->unused_refres_delta > 0 &&
3057 csa->unused_refres_delta >
3058 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3059 return (ENOSPC);
3060
3061 if (csa->ohds->ds_quota != 0 &&
3062 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3063 return (EDQUOT);
3064
3065 return (0);
3066}
3067
3068/* ARGSUSED */
3069static void
3070dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3071{
3072 struct cloneswaparg *csa = arg1;
3073 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3074
3075 ASSERT(csa->cds->ds_reserved == 0);
3076 ASSERT(csa->ohds->ds_quota == 0 ||
3077 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3078
3079 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3080 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3081
3082 if (csa->cds->ds_objset != NULL) {
3083 dmu_objset_evict(csa->cds->ds_objset);
3084 csa->cds->ds_objset = NULL;
3085 }
3086
3087 if (csa->ohds->ds_objset != NULL) {
3088 dmu_objset_evict(csa->ohds->ds_objset);
3089 csa->ohds->ds_objset = NULL;
3090 }
3091
3092 /*
3093 * Reset origin's unique bytes, if it exists.
3094 */
3095 if (csa->cds->ds_prev) {
3096 dsl_dataset_t *origin = csa->cds->ds_prev;
3097 uint64_t comp, uncomp;
3098
3099 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3100 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3101 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3102 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3103 }
3104
3105 /* swap blkptrs */
3106 {
3107 blkptr_t tmp;
3108 tmp = csa->ohds->ds_phys->ds_bp;
3109 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3110 csa->cds->ds_phys->ds_bp = tmp;
3111 }
3112
3113 /* set dd_*_bytes */
3114 {
3115 int64_t dused, dcomp, duncomp;
3116 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3117 uint64_t odl_used, odl_comp, odl_uncomp;
3118
3119 ASSERT3U(csa->cds->ds_dir->dd_phys->
3120 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3121
3122 dsl_deadlist_space(&csa->cds->ds_deadlist,
3123 &cdl_used, &cdl_comp, &cdl_uncomp);
3124 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3125 &odl_used, &odl_comp, &odl_uncomp);
3126
3127 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3128 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3129 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3130 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3131 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3132 cdl_uncomp -
3133 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3134
3135 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3136 dused, dcomp, duncomp, tx);
3137 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3138 -dused, -dcomp, -duncomp, tx);
3139
3140 /*
3141 * The difference in the space used by snapshots is the
3142 * difference in snapshot space due to the head's
3143 * deadlist (since that's the only thing that's
3144 * changing that affects the snapused).
3145 */
3146 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3147 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3148 &cdl_used, &cdl_comp, &cdl_uncomp);
3149 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3150 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3151 &odl_used, &odl_comp, &odl_uncomp);
3152 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3153 DD_USED_HEAD, DD_USED_SNAP, tx);
3154 }
3155
3156 /* swap ds_*_bytes */
3157 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3158 csa->cds->ds_phys->ds_used_bytes);
3159 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3160 csa->cds->ds_phys->ds_compressed_bytes);
3161 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3162 csa->cds->ds_phys->ds_uncompressed_bytes);
3163 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3164 csa->cds->ds_phys->ds_unique_bytes);
3165
3166 /* apply any parent delta for change in unconsumed refreservation */
3167 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3168 csa->unused_refres_delta, 0, 0, tx);
3169
3170 /*
3171 * Swap deadlists.
3172 */
3173 dsl_deadlist_close(&csa->cds->ds_deadlist);
3174 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3175 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3176 csa->cds->ds_phys->ds_deadlist_obj);
3177 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3178 csa->cds->ds_phys->ds_deadlist_obj);
3179 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3180 csa->ohds->ds_phys->ds_deadlist_obj);
3181
3182 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3183}
3184
3185/*
3186 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3187 * recv" into an existing fs to swizzle the file system to the new
3188 * version, and by "zfs rollback". Can also be used to swap two
3189 * independent head datasets if neither has any snapshots.
3190 */
3191int
3192dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3193 boolean_t force)
3194{
3195 struct cloneswaparg csa;
3196 int error;
3197
3198 ASSERT(clone->ds_owner);
3199 ASSERT(origin_head->ds_owner);
3200retry:
3201 /*
3202 * Need exclusive access for the swap. If we're swapping these
3203 * datasets back after an error, we already hold the locks.
3204 */
3205 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3206 rw_enter(&clone->ds_rwlock, RW_WRITER);
3207 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3208 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3209 rw_exit(&clone->ds_rwlock);
3210 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3211 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3212 rw_exit(&origin_head->ds_rwlock);
3213 goto retry;
3214 }
3215 }
3216 csa.cds = clone;
3217 csa.ohds = origin_head;
3218 csa.force = force;
3219 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3220 dsl_dataset_clone_swap_check,
3221 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3222 return (error);
3223}
3224
3225/*
3226 * Given a pool name and a dataset object number in that pool,
3227 * return the name of that dataset.
3228 */
3229int
3230dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3231{
3232 spa_t *spa;
3233 dsl_pool_t *dp;
3234 dsl_dataset_t *ds;
3235 int error;
3236
3237 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3238 return (error);
3239 dp = spa_get_dsl(spa);
3240 rw_enter(&dp->dp_config_rwlock, RW_READER);
3241 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3242 dsl_dataset_name(ds, buf);
3243 dsl_dataset_rele(ds, FTAG);
3244 }
3245 rw_exit(&dp->dp_config_rwlock);
3246 spa_close(spa, FTAG);
3247
3248 return (error);
3249}
3250
3251int
3252dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3253 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3254{
3255 int error = 0;
3256
3257 ASSERT3S(asize, >, 0);
3258
3259 /*
3260 * *ref_rsrv is the portion of asize that will come from any
3261 * unconsumed refreservation space.
3262 */
3263 *ref_rsrv = 0;
3264
3265 mutex_enter(&ds->ds_lock);
3266 /*
3267 * Make a space adjustment for reserved bytes.
3268 */
3269 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3270 ASSERT3U(*used, >=,
3271 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3272 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3273 *ref_rsrv =
3274 asize - MIN(asize, parent_delta(ds, asize + inflight));
3275 }
3276
3277 if (!check_quota || ds->ds_quota == 0) {
3278 mutex_exit(&ds->ds_lock);
3279 return (0);
3280 }
3281 /*
3282 * If they are requesting more space, and our current estimate
3283 * is over quota, they get to try again unless the actual
3284 * on-disk is over quota and there are no pending changes (which
3285 * may free up space for us).
3286 */
3287 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3288 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3289 error = ERESTART;
3290 else
3291 error = EDQUOT;
3292 }
3293 mutex_exit(&ds->ds_lock);
3294
3295 return (error);
3296}
3297
3298/* ARGSUSED */
3299static int
3300dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3301{
3302 dsl_dataset_t *ds = arg1;
3303 dsl_prop_setarg_t *psa = arg2;
3304 int err;
3305
3306 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3307 return (ENOTSUP);
3308
3309 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3310 return (err);
3311
3312 if (psa->psa_effective_value == 0)
3313 return (0);
3314
3315 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3316 psa->psa_effective_value < ds->ds_reserved)
3317 return (ENOSPC);
3318
3319 return (0);
3320}
3321
3322extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3323
3324void
3325dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3326{
3327 dsl_dataset_t *ds = arg1;
3328 dsl_prop_setarg_t *psa = arg2;
3329 uint64_t effective_value = psa->psa_effective_value;
3330
3331 dsl_prop_set_sync(ds, psa, tx);
3332 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3333
3334 if (ds->ds_quota != effective_value) {
3335 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3336 ds->ds_quota = effective_value;
3337
3338 spa_history_log_internal(LOG_DS_REFQUOTA,
3339 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3340 (longlong_t)ds->ds_quota, ds->ds_object);
3341 }
3342}
3343
3344int
3345dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3346{
3347 dsl_dataset_t *ds;
3348 dsl_prop_setarg_t psa;
3349 int err;
3350
3351 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3352
3353 err = dsl_dataset_hold(dsname, FTAG, &ds);
3354 if (err)
3355 return (err);
3356
3357 /*
3358 * If someone removes a file, then tries to set the quota, we
3359 * want to make sure the file freeing takes effect.
3360 */
3361 txg_wait_open(ds->ds_dir->dd_pool, 0);
3362
3363 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3364 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3365 ds, &psa, 0);
3366
3367 dsl_dataset_rele(ds, FTAG);
3368 return (err);
3369}
3370
3371static int
3372dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3373{
3374 dsl_dataset_t *ds = arg1;
3375 dsl_prop_setarg_t *psa = arg2;
3376 uint64_t effective_value;
3377 uint64_t unique;
3378 int err;
3379
3380 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3381 SPA_VERSION_REFRESERVATION)
3382 return (ENOTSUP);
3383
3384 if (dsl_dataset_is_snapshot(ds))
3385 return (EINVAL);
3386
3387 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3388 return (err);
3389
3390 effective_value = psa->psa_effective_value;
3391
3392 /*
3393 * If we are doing the preliminary check in open context, the
3394 * space estimates may be inaccurate.
3395 */
3396 if (!dmu_tx_is_syncing(tx))
3397 return (0);
3398
3399 mutex_enter(&ds->ds_lock);
3400 if (!DS_UNIQUE_IS_ACCURATE(ds))
3401 dsl_dataset_recalc_head_uniq(ds);
3402 unique = ds->ds_phys->ds_unique_bytes;
3403 mutex_exit(&ds->ds_lock);
3404
3405 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3406 uint64_t delta = MAX(unique, effective_value) -
3407 MAX(unique, ds->ds_reserved);
3408
3409 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3410 return (ENOSPC);
3411 if (ds->ds_quota > 0 &&
3412 effective_value > ds->ds_quota)
3413 return (ENOSPC);
3414 }
3415
3416 return (0);
3417}
3418
3419static void
3420dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3421{
3422 dsl_dataset_t *ds = arg1;
3423 dsl_prop_setarg_t *psa = arg2;
3424 uint64_t effective_value = psa->psa_effective_value;
3425 uint64_t unique;
3426 int64_t delta;
3427
3428 dsl_prop_set_sync(ds, psa, tx);
3429 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3430
3431 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3432
3433 mutex_enter(&ds->ds_dir->dd_lock);
3434 mutex_enter(&ds->ds_lock);
3435 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3436 unique = ds->ds_phys->ds_unique_bytes;
3437 delta = MAX(0, (int64_t)(effective_value - unique)) -
3438 MAX(0, (int64_t)(ds->ds_reserved - unique));
3439 ds->ds_reserved = effective_value;
3440 mutex_exit(&ds->ds_lock);
3441
3442 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3443 mutex_exit(&ds->ds_dir->dd_lock);
3444
3445 spa_history_log_internal(LOG_DS_REFRESERV,
3446 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3447 (longlong_t)effective_value, ds->ds_object);
3448}
3449
3450int
3451dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3452 uint64_t reservation)
3453{
3454 dsl_dataset_t *ds;
3455 dsl_prop_setarg_t psa;
3456 int err;
3457
3458 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3459 &reservation);
3460
3461 err = dsl_dataset_hold(dsname, FTAG, &ds);
3462 if (err)
3463 return (err);
3464
3465 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3466 dsl_dataset_set_reservation_check,
3467 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3468
3469 dsl_dataset_rele(ds, FTAG);
3470 return (err);
3471}
3472
3473typedef struct zfs_hold_cleanup_arg {
3474 dsl_pool_t *dp;
3475 uint64_t dsobj;
3476 char htag[MAXNAMELEN];
3477} zfs_hold_cleanup_arg_t;
3478
3479static void
3480dsl_dataset_user_release_onexit(void *arg)
3481{
3482 zfs_hold_cleanup_arg_t *ca = arg;
3483
3484 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3485 B_TRUE);
3486 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3487}
3488
3489void
3490dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3491 minor_t minor)
3492{
3493 zfs_hold_cleanup_arg_t *ca;
3494
3495 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3496 ca->dp = ds->ds_dir->dd_pool;
3497 ca->dsobj = ds->ds_object;
3498 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3499 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3500 dsl_dataset_user_release_onexit, ca, NULL));
3501}
3502
3503/*
3504 * If you add new checks here, you may need to add
3505 * additional checks to the "temporary" case in
3506 * snapshot_check() in dmu_objset.c.
3507 */
3508static int
3509dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3510{
3511 dsl_dataset_t *ds = arg1;
3512 struct dsl_ds_holdarg *ha = arg2;
3513 char *htag = ha->htag;
3514 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3515 int error = 0;
3516
3517 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3518 return (ENOTSUP);
3519
3520 if (!dsl_dataset_is_snapshot(ds))
3521 return (EINVAL);
3522
3523 /* tags must be unique */
3524 mutex_enter(&ds->ds_lock);
3525 if (ds->ds_phys->ds_userrefs_obj) {
3526 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3527 8, 1, tx);
3528 if (error == 0)
3529 error = EEXIST;
3530 else if (error == ENOENT)
3531 error = 0;
3532 }
3533 mutex_exit(&ds->ds_lock);
3534
3535 if (error == 0 && ha->temphold &&
3536 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3537 error = E2BIG;
3538
3539 return (error);
3540}
3541
3542void
3543dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3544{
3545 dsl_dataset_t *ds = arg1;
3546 struct dsl_ds_holdarg *ha = arg2;
3547 char *htag = ha->htag;
3548 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3549 objset_t *mos = dp->dp_meta_objset;
3550 uint64_t now = gethrestime_sec();
3551 uint64_t zapobj;
3552
3553 mutex_enter(&ds->ds_lock);
3554 if (ds->ds_phys->ds_userrefs_obj == 0) {
3555 /*
3556 * This is the first user hold for this dataset. Create
3557 * the userrefs zap object.
3558 */
3559 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3560 zapobj = ds->ds_phys->ds_userrefs_obj =
3561 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3562 } else {
3563 zapobj = ds->ds_phys->ds_userrefs_obj;
3564 }
3565 ds->ds_userrefs++;
3566 mutex_exit(&ds->ds_lock);
3567
3568 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3569
3570 if (ha->temphold) {
3571 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3572 htag, &now, tx));
3573 }
3574
3575 spa_history_log_internal(LOG_DS_USER_HOLD,
3576 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3577 (int)ha->temphold, ds->ds_object);
3578}
3579
3580static int
3581dsl_dataset_user_hold_one(const char *dsname, void *arg)
3582{
3583 struct dsl_ds_holdarg *ha = arg;
3584 dsl_dataset_t *ds;
3585 int error;
3586 char *name;
3587
3588 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3589 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3590 error = dsl_dataset_hold(name, ha->dstg, &ds);
3591 strfree(name);
3592 if (error == 0) {
3593 ha->gotone = B_TRUE;
3594 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3595 dsl_dataset_user_hold_sync, ds, ha, 0);
3596 } else if (error == ENOENT && ha->recursive) {
3597 error = 0;
3598 } else {
3599 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3600 }
3601 return (error);
3602}
3603
3604int
3605dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3606 boolean_t temphold)
3607{
3608 struct dsl_ds_holdarg *ha;
3609 int error;
3610
3611 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3612 ha->htag = htag;
3613 ha->temphold = temphold;
3614 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3615 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3616 ds, ha, 0);
3617 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3618
3619 return (error);
3620}
3621
3622int
3623dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3624 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3625{
3626 struct dsl_ds_holdarg *ha;
3627 dsl_sync_task_t *dst;
3628 spa_t *spa;
3629 int error;
3630 minor_t minor = 0;
3631
3632 if (cleanup_fd != -1) {
3633 /* Currently we only support cleanup-on-exit of tempholds. */
3634 if (!temphold)
3635 return (EINVAL);
3636 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3637 if (error)
3638 return (error);
3639 }
3640
3641 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3642
3643 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3644
3645 error = spa_open(dsname, &spa, FTAG);
3646 if (error) {
3647 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3648 if (cleanup_fd != -1)
3649 zfs_onexit_fd_rele(cleanup_fd);
3650 return (error);
3651 }
3652
3653 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3654 ha->htag = htag;
3655 ha->snapname = snapname;
3656 ha->recursive = recursive;
3657 ha->temphold = temphold;
3658
3659 if (recursive) {
3660 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3661 ha, DS_FIND_CHILDREN);
3662 } else {
3663 error = dsl_dataset_user_hold_one(dsname, ha);
3664 }
3665 if (error == 0)
3666 error = dsl_sync_task_group_wait(ha->dstg);
3667
3668 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3669 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3670 dsl_dataset_t *ds = dst->dst_arg1;
3671
3672 if (dst->dst_err) {
3673 dsl_dataset_name(ds, ha->failed);
3674 *strchr(ha->failed, '@') = '\0';
3675 } else if (error == 0 && minor != 0 && temphold) {
3676 /*
3677 * If this hold is to be released upon process exit,
3678 * register that action now.
3679 */
3680 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3681 }
3682 dsl_dataset_rele(ds, ha->dstg);
3683 }
3684
3685 if (error == 0 && recursive && !ha->gotone)
3686 error = ENOENT;
3687
3688 if (error)
3689 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3690
3691 dsl_sync_task_group_destroy(ha->dstg);
3692
3693 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3694 spa_close(spa, FTAG);
3695 if (cleanup_fd != -1)
3696 zfs_onexit_fd_rele(cleanup_fd);
3697 return (error);
3698}
3699
3700struct dsl_ds_releasearg {
3701 dsl_dataset_t *ds;
3702 const char *htag;
3703 boolean_t own; /* do we own or just hold ds? */
3704};
3705
3706static int
3707dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3708 boolean_t *might_destroy)
3709{
3710 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3711 uint64_t zapobj;
3712 uint64_t tmp;
3713 int error;
3714
3715 *might_destroy = B_FALSE;
3716
3717 mutex_enter(&ds->ds_lock);
3718 zapobj = ds->ds_phys->ds_userrefs_obj;
3719 if (zapobj == 0) {
3720 /* The tag can't possibly exist */
3721 mutex_exit(&ds->ds_lock);
3722 return (ESRCH);
3723 }
3724
3725 /* Make sure the tag exists */
3726 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3727 if (error) {
3728 mutex_exit(&ds->ds_lock);
3729 if (error == ENOENT)
3730 error = ESRCH;
3731 return (error);
3732 }
3733
3734 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3735 DS_IS_DEFER_DESTROY(ds))
3736 *might_destroy = B_TRUE;
3737
3738 mutex_exit(&ds->ds_lock);
3739 return (0);
3740}
3741
3742static int
3743dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3744{
3745 struct dsl_ds_releasearg *ra = arg1;
3746 dsl_dataset_t *ds = ra->ds;
3747 boolean_t might_destroy;
3748 int error;
3749
3750 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3751 return (ENOTSUP);
3752
3753 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3754 if (error)
3755 return (error);
3756
3757 if (might_destroy) {
3758 struct dsl_ds_destroyarg dsda = {0};
3759
3760 if (dmu_tx_is_syncing(tx)) {
3761 /*
3762 * If we're not prepared to remove the snapshot,
3763 * we can't allow the release to happen right now.
3764 */
3765 if (!ra->own)
3766 return (EBUSY);
3767 }
3768 dsda.ds = ds;
3769 dsda.releasing = B_TRUE;
3770 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3771 }
3772
3773 return (0);
3774}
3775
3776static void
3777dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3778{
3779 struct dsl_ds_releasearg *ra = arg1;
3780 dsl_dataset_t *ds = ra->ds;
3781 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3782 objset_t *mos = dp->dp_meta_objset;
3783 uint64_t zapobj;
3784 uint64_t dsobj = ds->ds_object;
3785 uint64_t refs;
3786 int error;
3787
3788 mutex_enter(&ds->ds_lock);
3789 ds->ds_userrefs--;
3790 refs = ds->ds_userrefs;
3791 mutex_exit(&ds->ds_lock);
3792 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3793 VERIFY(error == 0 || error == ENOENT);
3794 zapobj = ds->ds_phys->ds_userrefs_obj;
3795 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3796 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3797 DS_IS_DEFER_DESTROY(ds)) {
3798 struct dsl_ds_destroyarg dsda = {0};
3799
3800 ASSERT(ra->own);
3801 dsda.ds = ds;
3802 dsda.releasing = B_TRUE;
3803 /* We already did the destroy_check */
3804 dsl_dataset_destroy_sync(&dsda, tag, tx);
3805 }
3806
3807 spa_history_log_internal(LOG_DS_USER_RELEASE,
3808 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3809 ra->htag, (longlong_t)refs, dsobj);
3810}
3811
3812static int
3813dsl_dataset_user_release_one(const char *dsname, void *arg)
3814{
3815 struct dsl_ds_holdarg *ha = arg;
3816 struct dsl_ds_releasearg *ra;
3817 dsl_dataset_t *ds;
3818 int error;
3819 void *dtag = ha->dstg;
3820 char *name;
3821 boolean_t own = B_FALSE;
3822 boolean_t might_destroy;
3823
3824 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3825 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3826 error = dsl_dataset_hold(name, dtag, &ds);
3827 strfree(name);
3828 if (error == ENOENT && ha->recursive)
3829 return (0);
3830 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3831 if (error)
3832 return (error);
3833
3834 ha->gotone = B_TRUE;
3835
3836 ASSERT(dsl_dataset_is_snapshot(ds));
3837
3838 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3839 if (error) {
3840 dsl_dataset_rele(ds, dtag);
3841 return (error);
3842 }
3843
3844 if (might_destroy) {
3845#ifdef _KERNEL
3846 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3847 error = zfs_unmount_snap(name, NULL);
3848 strfree(name);
3849 if (error) {
3850 dsl_dataset_rele(ds, dtag);
3851 return (error);
3852 }
3853#endif
3854 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3855 dsl_dataset_rele(ds, dtag);
3856 return (EBUSY);
3857 } else {
3858 own = B_TRUE;
3859 dsl_dataset_make_exclusive(ds, dtag);
3860 }
3861 }
3862
3863 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3864 ra->ds = ds;
3865 ra->htag = ha->htag;
3866 ra->own = own;
3867 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3868 dsl_dataset_user_release_sync, ra, dtag, 0);
3869
3870 return (0);
3871}
3872
3873int
3874dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3875 boolean_t recursive)
3876{
3877 struct dsl_ds_holdarg *ha;
3878 dsl_sync_task_t *dst;
3879 spa_t *spa;
3880 int error;
3881
3882top:
3883 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3884
3885 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3886
3887 error = spa_open(dsname, &spa, FTAG);
3888 if (error) {
3889 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3890 return (error);
3891 }
3892
3893 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3894 ha->htag = htag;
3895 ha->snapname = snapname;
3896 ha->recursive = recursive;
3897 if (recursive) {
3898 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3899 ha, DS_FIND_CHILDREN);
3900 } else {
3901 error = dsl_dataset_user_release_one(dsname, ha);
3902 }
3903 if (error == 0)
3904 error = dsl_sync_task_group_wait(ha->dstg);
3905
3906 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3907 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3908 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3909 dsl_dataset_t *ds = ra->ds;
3910
3911 if (dst->dst_err)
3912 dsl_dataset_name(ds, ha->failed);
3913
3914 if (ra->own)
3915 dsl_dataset_disown(ds, ha->dstg);
3916 else
3917 dsl_dataset_rele(ds, ha->dstg);
3918
3919 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3920 }
3921
3922 if (error == 0 && recursive && !ha->gotone)
3923 error = ENOENT;
3924
3925 if (error && error != EBUSY)
3926 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3927
3928 dsl_sync_task_group_destroy(ha->dstg);
3929 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3930 spa_close(spa, FTAG);
3931
3932 /*
3933 * We can get EBUSY if we were racing with deferred destroy and
3934 * dsl_dataset_user_release_check() hadn't done the necessary
3935 * open context setup. We can also get EBUSY if we're racing
3936 * with destroy and that thread is the ds_owner. Either way
3937 * the busy condition should be transient, and we should retry
3938 * the release operation.
3939 */
3940 if (error == EBUSY)
3941 goto top;
3942
3943 return (error);
3944}
3945
3946/*
3947 * Called at spa_load time (with retry == B_FALSE) to release a stale
3948 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
3949 */
3950int
3951dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
3952 boolean_t retry)
3953{
3954 dsl_dataset_t *ds;
3955 char *snap;
3956 char *name;
3957 int namelen;
3958 int error;
3959
3960 do {
3961 rw_enter(&dp->dp_config_rwlock, RW_READER);
3962 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3963 rw_exit(&dp->dp_config_rwlock);
3964 if (error)
3965 return (error);
3966 namelen = dsl_dataset_namelen(ds)+1;
3967 name = kmem_alloc(namelen, KM_SLEEP);
3968 dsl_dataset_name(ds, name);
3969 dsl_dataset_rele(ds, FTAG);
3970
3971 snap = strchr(name, '@');
3972 *snap = '\0';
3973 ++snap;
3974 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
3975 kmem_free(name, namelen);
3976
3977 /*
3978 * The object can't have been destroyed because we have a hold,
3979 * but it might have been renamed, resulting in ENOENT. Retry
3980 * if we've been requested to do so.
3981 *
3982 * It would be nice if we could use the dsobj all the way
3983 * through and avoid ENOENT entirely. But we might need to
3984 * unmount the snapshot, and there's currently no way to lookup
3985 * a vfsp using a ZFS object id.
3986 */
3987 } while ((error == ENOENT) && retry);
3988
3989 return (error);
3990}
3991
3992int
3993dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3994{
3995 dsl_dataset_t *ds;
3996 int err;
3997
3998 err = dsl_dataset_hold(dsname, FTAG, &ds);
3999 if (err)
4000 return (err);
4001
4002 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4003 if (ds->ds_phys->ds_userrefs_obj != 0) {
4004 zap_attribute_t *za;
4005 zap_cursor_t zc;
4006
4007 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4008 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4009 ds->ds_phys->ds_userrefs_obj);
4010 zap_cursor_retrieve(&zc, za) == 0;
4011 zap_cursor_advance(&zc)) {
4012 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4013 za->za_first_integer));
4014 }
4015 zap_cursor_fini(&zc);
4016 kmem_free(za, sizeof (zap_attribute_t));
4017 }
4018 dsl_dataset_rele(ds, FTAG);
4019 return (0);
4020}
4021
4022/*
4023 * Note, this fuction is used as the callback for dmu_objset_find(). We
4024 * always return 0 so that we will continue to find and process
4025 * inconsistent datasets, even if we encounter an error trying to
4026 * process one of them.
4027 */
4028/* ARGSUSED */
4029int
4030dsl_destroy_inconsistent(const char *dsname, void *arg)
4031{
4032 dsl_dataset_t *ds;
4033
4034 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4035 if (DS_IS_INCONSISTENT(ds))
4036 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4037 else
4038 dsl_dataset_disown(ds, FTAG);
4039 }
4040 return (0);
4041}
2194 }
2195}
2196
2197void
2198dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2199{
2200 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2201 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2202 stat->dds_guid = ds->ds_phys->ds_guid;
2203 if (ds->ds_phys->ds_next_snap_obj) {
2204 stat->dds_is_snapshot = B_TRUE;
2205 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2206 } else {
2207 stat->dds_is_snapshot = B_FALSE;
2208 stat->dds_num_clones = 0;
2209 }
2210
2211 /* clone origin is really a dsl_dir thing... */
2212 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2213 if (dsl_dir_is_clone(ds->ds_dir)) {
2214 dsl_dataset_t *ods;
2215
2216 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2217 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2218 dsl_dataset_name(ods, stat->dds_origin);
2219 dsl_dataset_drop_ref(ods, FTAG);
2220 } else {
2221 stat->dds_origin[0] = '\0';
2222 }
2223 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2224}
2225
2226uint64_t
2227dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2228{
2229 return (ds->ds_fsid_guid);
2230}
2231
2232void
2233dsl_dataset_space(dsl_dataset_t *ds,
2234 uint64_t *refdbytesp, uint64_t *availbytesp,
2235 uint64_t *usedobjsp, uint64_t *availobjsp)
2236{
2237 *refdbytesp = ds->ds_phys->ds_used_bytes;
2238 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2239 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2240 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2241 if (ds->ds_quota != 0) {
2242 /*
2243 * Adjust available bytes according to refquota
2244 */
2245 if (*refdbytesp < ds->ds_quota)
2246 *availbytesp = MIN(*availbytesp,
2247 ds->ds_quota - *refdbytesp);
2248 else
2249 *availbytesp = 0;
2250 }
2251 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2252 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2253}
2254
2255boolean_t
2256dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2257{
2258 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2259
2260 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2261 dsl_pool_sync_context(dp));
2262 if (ds->ds_prev == NULL)
2263 return (B_FALSE);
2264 if (ds->ds_phys->ds_bp.blk_birth >
2265 ds->ds_prev->ds_phys->ds_creation_txg) {
2266 objset_t *os, *os_prev;
2267 /*
2268 * It may be that only the ZIL differs, because it was
2269 * reset in the head. Don't count that as being
2270 * modified.
2271 */
2272 if (dmu_objset_from_ds(ds, &os) != 0)
2273 return (B_TRUE);
2274 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2275 return (B_TRUE);
2276 return (bcmp(&os->os_phys->os_meta_dnode,
2277 &os_prev->os_phys->os_meta_dnode,
2278 sizeof (os->os_phys->os_meta_dnode)) != 0);
2279 }
2280 return (B_FALSE);
2281}
2282
2283/* ARGSUSED */
2284static int
2285dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2286{
2287 dsl_dataset_t *ds = arg1;
2288 char *newsnapname = arg2;
2289 dsl_dir_t *dd = ds->ds_dir;
2290 dsl_dataset_t *hds;
2291 uint64_t val;
2292 int err;
2293
2294 err = dsl_dataset_hold_obj(dd->dd_pool,
2295 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2296 if (err)
2297 return (err);
2298
2299 /* new name better not be in use */
2300 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2301 dsl_dataset_rele(hds, FTAG);
2302
2303 if (err == 0)
2304 err = EEXIST;
2305 else if (err == ENOENT)
2306 err = 0;
2307
2308 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2309 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2310 err = ENAMETOOLONG;
2311
2312 return (err);
2313}
2314
2315static void
2316dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2317{
2318 char oldname[MAXPATHLEN], newname[MAXPATHLEN];
2319 dsl_dataset_t *ds = arg1;
2320 const char *newsnapname = arg2;
2321 dsl_dir_t *dd = ds->ds_dir;
2322 objset_t *mos = dd->dd_pool->dp_meta_objset;
2323 dsl_dataset_t *hds;
2324 int err;
2325
2326 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2327
2328 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2329 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2330
2331 VERIFY(0 == dsl_dataset_get_snapname(ds));
2332 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2333 ASSERT3U(err, ==, 0);
2334 dsl_dataset_name(ds, oldname);
2335 mutex_enter(&ds->ds_lock);
2336 (void) strcpy(ds->ds_snapname, newsnapname);
2337 mutex_exit(&ds->ds_lock);
2338 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2339 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2340 ASSERT3U(err, ==, 0);
2341 dsl_dataset_name(ds, newname);
2342#ifdef _KERNEL
2343 zvol_rename_minors(oldname, newname);
2344#endif
2345
2346 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2347 "dataset = %llu", ds->ds_object);
2348 dsl_dataset_rele(hds, FTAG);
2349}
2350
2351struct renamesnaparg {
2352 dsl_sync_task_group_t *dstg;
2353 char failed[MAXPATHLEN];
2354 char *oldsnap;
2355 char *newsnap;
2356};
2357
2358static int
2359dsl_snapshot_rename_one(const char *name, void *arg)
2360{
2361 struct renamesnaparg *ra = arg;
2362 dsl_dataset_t *ds = NULL;
2363 char *snapname;
2364 int err;
2365
2366 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2367 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2368
2369 /*
2370 * For recursive snapshot renames the parent won't be changing
2371 * so we just pass name for both the to/from argument.
2372 */
2373 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2374 if (err != 0) {
2375 strfree(snapname);
2376 return (err == ENOENT ? 0 : err);
2377 }
2378
2379#ifdef _KERNEL
2380 /*
2381 * For all filesystems undergoing rename, we'll need to unmount it.
2382 */
2383 (void) zfs_unmount_snap(snapname, NULL);
2384#endif
2385 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2386 strfree(snapname);
2387 if (err != 0)
2388 return (err == ENOENT ? 0 : err);
2389
2390 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2391 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2392
2393 return (0);
2394}
2395
2396static int
2397dsl_recursive_rename(char *oldname, const char *newname)
2398{
2399 int err;
2400 struct renamesnaparg *ra;
2401 dsl_sync_task_t *dst;
2402 spa_t *spa;
2403 char *cp, *fsname = spa_strdup(oldname);
2404 int len = strlen(oldname) + 1;
2405
2406 /* truncate the snapshot name to get the fsname */
2407 cp = strchr(fsname, '@');
2408 *cp = '\0';
2409
2410 err = spa_open(fsname, &spa, FTAG);
2411 if (err) {
2412 kmem_free(fsname, len);
2413 return (err);
2414 }
2415 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2416 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2417
2418 ra->oldsnap = strchr(oldname, '@') + 1;
2419 ra->newsnap = strchr(newname, '@') + 1;
2420 *ra->failed = '\0';
2421
2422 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2423 DS_FIND_CHILDREN);
2424 kmem_free(fsname, len);
2425
2426 if (err == 0) {
2427 err = dsl_sync_task_group_wait(ra->dstg);
2428 }
2429
2430 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2431 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2432 dsl_dataset_t *ds = dst->dst_arg1;
2433 if (dst->dst_err) {
2434 dsl_dir_name(ds->ds_dir, ra->failed);
2435 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2436 (void) strlcat(ra->failed, ra->newsnap,
2437 sizeof (ra->failed));
2438 }
2439 dsl_dataset_rele(ds, ra->dstg);
2440 }
2441
2442 if (err)
2443 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2444
2445 dsl_sync_task_group_destroy(ra->dstg);
2446 kmem_free(ra, sizeof (struct renamesnaparg));
2447 spa_close(spa, FTAG);
2448 return (err);
2449}
2450
2451static int
2452dsl_valid_rename(const char *oldname, void *arg)
2453{
2454 int delta = *(int *)arg;
2455
2456 if (strlen(oldname) + delta >= MAXNAMELEN)
2457 return (ENAMETOOLONG);
2458
2459 return (0);
2460}
2461
2462#pragma weak dmu_objset_rename = dsl_dataset_rename
2463int
2464dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2465{
2466 dsl_dir_t *dd;
2467 dsl_dataset_t *ds;
2468 const char *tail;
2469 int err;
2470
2471 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2472 if (err)
2473 return (err);
2474
2475 if (tail == NULL) {
2476 int delta = strlen(newname) - strlen(oldname);
2477
2478 /* if we're growing, validate child name lengths */
2479 if (delta > 0)
2480 err = dmu_objset_find(oldname, dsl_valid_rename,
2481 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2482
2483 if (err == 0)
2484 err = dsl_dir_rename(dd, newname);
2485 dsl_dir_close(dd, FTAG);
2486 return (err);
2487 }
2488
2489 if (tail[0] != '@') {
2490 /* the name ended in a nonexistent component */
2491 dsl_dir_close(dd, FTAG);
2492 return (ENOENT);
2493 }
2494
2495 dsl_dir_close(dd, FTAG);
2496
2497 /* new name must be snapshot in same filesystem */
2498 tail = strchr(newname, '@');
2499 if (tail == NULL)
2500 return (EINVAL);
2501 tail++;
2502 if (strncmp(oldname, newname, tail - newname) != 0)
2503 return (EXDEV);
2504
2505 if (recursive) {
2506 err = dsl_recursive_rename(oldname, newname);
2507 } else {
2508 err = dsl_dataset_hold(oldname, FTAG, &ds);
2509 if (err)
2510 return (err);
2511
2512 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2513 dsl_dataset_snapshot_rename_check,
2514 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2515
2516 dsl_dataset_rele(ds, FTAG);
2517 }
2518
2519 return (err);
2520}
2521
2522struct promotenode {
2523 list_node_t link;
2524 dsl_dataset_t *ds;
2525};
2526
2527struct promotearg {
2528 list_t shared_snaps, origin_snaps, clone_snaps;
2529 dsl_dataset_t *origin_origin;
2530 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2531 char *err_ds;
2532};
2533
2534static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2535static boolean_t snaplist_unstable(list_t *l);
2536
2537static int
2538dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2539{
2540 dsl_dataset_t *hds = arg1;
2541 struct promotearg *pa = arg2;
2542 struct promotenode *snap = list_head(&pa->shared_snaps);
2543 dsl_dataset_t *origin_ds = snap->ds;
2544 int err;
2545 uint64_t unused;
2546
2547 /* Check that it is a real clone */
2548 if (!dsl_dir_is_clone(hds->ds_dir))
2549 return (EINVAL);
2550
2551 /* Since this is so expensive, don't do the preliminary check */
2552 if (!dmu_tx_is_syncing(tx))
2553 return (0);
2554
2555 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2556 return (EXDEV);
2557
2558 /* compute origin's new unique space */
2559 snap = list_tail(&pa->clone_snaps);
2560 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2561 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2562 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2563 &pa->unique, &unused, &unused);
2564
2565 /*
2566 * Walk the snapshots that we are moving
2567 *
2568 * Compute space to transfer. Consider the incremental changes
2569 * to used for each snapshot:
2570 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2571 * So each snapshot gave birth to:
2572 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2573 * So a sequence would look like:
2574 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2575 * Which simplifies to:
2576 * uN + kN + kN-1 + ... + k1 + k0
2577 * Note however, if we stop before we reach the ORIGIN we get:
2578 * uN + kN + kN-1 + ... + kM - uM-1
2579 */
2580 pa->used = origin_ds->ds_phys->ds_used_bytes;
2581 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2582 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2583 for (snap = list_head(&pa->shared_snaps); snap;
2584 snap = list_next(&pa->shared_snaps, snap)) {
2585 uint64_t val, dlused, dlcomp, dluncomp;
2586 dsl_dataset_t *ds = snap->ds;
2587
2588 /* Check that the snapshot name does not conflict */
2589 VERIFY(0 == dsl_dataset_get_snapname(ds));
2590 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2591 if (err == 0) {
2592 err = EEXIST;
2593 goto out;
2594 }
2595 if (err != ENOENT)
2596 goto out;
2597
2598 /* The very first snapshot does not have a deadlist */
2599 if (ds->ds_phys->ds_prev_snap_obj == 0)
2600 continue;
2601
2602 dsl_deadlist_space(&ds->ds_deadlist,
2603 &dlused, &dlcomp, &dluncomp);
2604 pa->used += dlused;
2605 pa->comp += dlcomp;
2606 pa->uncomp += dluncomp;
2607 }
2608
2609 /*
2610 * If we are a clone of a clone then we never reached ORIGIN,
2611 * so we need to subtract out the clone origin's used space.
2612 */
2613 if (pa->origin_origin) {
2614 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2615 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2616 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2617 }
2618
2619 /* Check that there is enough space here */
2620 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2621 pa->used);
2622 if (err)
2623 return (err);
2624
2625 /*
2626 * Compute the amounts of space that will be used by snapshots
2627 * after the promotion (for both origin and clone). For each,
2628 * it is the amount of space that will be on all of their
2629 * deadlists (that was not born before their new origin).
2630 */
2631 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2632 uint64_t space;
2633
2634 /*
2635 * Note, typically this will not be a clone of a clone,
2636 * so dd_origin_txg will be < TXG_INITIAL, so
2637 * these snaplist_space() -> dsl_deadlist_space_range()
2638 * calls will be fast because they do not have to
2639 * iterate over all bps.
2640 */
2641 snap = list_head(&pa->origin_snaps);
2642 err = snaplist_space(&pa->shared_snaps,
2643 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2644 if (err)
2645 return (err);
2646
2647 err = snaplist_space(&pa->clone_snaps,
2648 snap->ds->ds_dir->dd_origin_txg, &space);
2649 if (err)
2650 return (err);
2651 pa->cloneusedsnap += space;
2652 }
2653 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2654 err = snaplist_space(&pa->origin_snaps,
2655 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2656 if (err)
2657 return (err);
2658 }
2659
2660 return (0);
2661out:
2662 pa->err_ds = snap->ds->ds_snapname;
2663 return (err);
2664}
2665
2666static void
2667dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2668{
2669 dsl_dataset_t *hds = arg1;
2670 struct promotearg *pa = arg2;
2671 struct promotenode *snap = list_head(&pa->shared_snaps);
2672 dsl_dataset_t *origin_ds = snap->ds;
2673 dsl_dataset_t *origin_head;
2674 dsl_dir_t *dd = hds->ds_dir;
2675 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2676 dsl_dir_t *odd = NULL;
2677 uint64_t oldnext_obj;
2678 int64_t delta;
2679
2680 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2681
2682 snap = list_head(&pa->origin_snaps);
2683 origin_head = snap->ds;
2684
2685 /*
2686 * We need to explicitly open odd, since origin_ds's dd will be
2687 * changing.
2688 */
2689 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2690 NULL, FTAG, &odd));
2691
2692 /* change origin's next snap */
2693 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2694 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2695 snap = list_tail(&pa->clone_snaps);
2696 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2697 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2698
2699 /* change the origin's next clone */
2700 if (origin_ds->ds_phys->ds_next_clones_obj) {
2701 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2702 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2703 origin_ds->ds_phys->ds_next_clones_obj,
2704 oldnext_obj, tx));
2705 }
2706
2707 /* change origin */
2708 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2709 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2710 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2711 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2712 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2713 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2714 origin_head->ds_dir->dd_origin_txg =
2715 origin_ds->ds_phys->ds_creation_txg;
2716
2717 /* change dd_clone entries */
2718 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2719 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2720 odd->dd_phys->dd_clones, hds->ds_object, tx));
2721 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2722 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2723 hds->ds_object, tx));
2724
2725 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2726 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2727 origin_head->ds_object, tx));
2728 if (dd->dd_phys->dd_clones == 0) {
2729 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2730 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2731 }
2732 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2733 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2734
2735 }
2736
2737 /* move snapshots to this dir */
2738 for (snap = list_head(&pa->shared_snaps); snap;
2739 snap = list_next(&pa->shared_snaps, snap)) {
2740 dsl_dataset_t *ds = snap->ds;
2741
2742 /* unregister props as dsl_dir is changing */
2743 if (ds->ds_objset) {
2744 dmu_objset_evict(ds->ds_objset);
2745 ds->ds_objset = NULL;
2746 }
2747 /* move snap name entry */
2748 VERIFY(0 == dsl_dataset_get_snapname(ds));
2749 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2750 ds->ds_snapname, tx));
2751 VERIFY(0 == zap_add(dp->dp_meta_objset,
2752 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2753 8, 1, &ds->ds_object, tx));
2754
2755 /* change containing dsl_dir */
2756 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2757 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2758 ds->ds_phys->ds_dir_obj = dd->dd_object;
2759 ASSERT3P(ds->ds_dir, ==, odd);
2760 dsl_dir_close(ds->ds_dir, ds);
2761 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2762 NULL, ds, &ds->ds_dir));
2763
2764 /* move any clone references */
2765 if (ds->ds_phys->ds_next_clones_obj &&
2766 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2767 zap_cursor_t zc;
2768 zap_attribute_t za;
2769
2770 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2771 ds->ds_phys->ds_next_clones_obj);
2772 zap_cursor_retrieve(&zc, &za) == 0;
2773 zap_cursor_advance(&zc)) {
2774 dsl_dataset_t *cnds;
2775 uint64_t o;
2776
2777 if (za.za_first_integer == oldnext_obj) {
2778 /*
2779 * We've already moved the
2780 * origin's reference.
2781 */
2782 continue;
2783 }
2784
2785 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2786 za.za_first_integer, FTAG, &cnds));
2787 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2788
2789 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2790 odd->dd_phys->dd_clones, o, tx), ==, 0);
2791 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2792 dd->dd_phys->dd_clones, o, tx), ==, 0);
2793 dsl_dataset_rele(cnds, FTAG);
2794 }
2795 zap_cursor_fini(&zc);
2796 }
2797
2798 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2799 }
2800
2801 /*
2802 * Change space accounting.
2803 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2804 * both be valid, or both be 0 (resulting in delta == 0). This
2805 * is true for each of {clone,origin} independently.
2806 */
2807
2808 delta = pa->cloneusedsnap -
2809 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2810 ASSERT3S(delta, >=, 0);
2811 ASSERT3U(pa->used, >=, delta);
2812 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2813 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2814 pa->used - delta, pa->comp, pa->uncomp, tx);
2815
2816 delta = pa->originusedsnap -
2817 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2818 ASSERT3S(delta, <=, 0);
2819 ASSERT3U(pa->used, >=, -delta);
2820 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2821 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2822 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2823
2824 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2825
2826 /* log history record */
2827 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2828 "dataset = %llu", hds->ds_object);
2829
2830 dsl_dir_close(odd, FTAG);
2831}
2832
2833static char *snaplist_tag = "snaplist";
2834/*
2835 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2836 * (exclusive) and last_obj (inclusive). The list will be in reverse
2837 * order (last_obj will be the list_head()). If first_obj == 0, do all
2838 * snapshots back to this dataset's origin.
2839 */
2840static int
2841snaplist_make(dsl_pool_t *dp, boolean_t own,
2842 uint64_t first_obj, uint64_t last_obj, list_t *l)
2843{
2844 uint64_t obj = last_obj;
2845
2846 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2847
2848 list_create(l, sizeof (struct promotenode),
2849 offsetof(struct promotenode, link));
2850
2851 while (obj != first_obj) {
2852 dsl_dataset_t *ds;
2853 struct promotenode *snap;
2854 int err;
2855
2856 if (own) {
2857 err = dsl_dataset_own_obj(dp, obj,
2858 0, snaplist_tag, &ds);
2859 if (err == 0)
2860 dsl_dataset_make_exclusive(ds, snaplist_tag);
2861 } else {
2862 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2863 }
2864 if (err == ENOENT) {
2865 /* lost race with snapshot destroy */
2866 struct promotenode *last = list_tail(l);
2867 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2868 obj = last->ds->ds_phys->ds_prev_snap_obj;
2869 continue;
2870 } else if (err) {
2871 return (err);
2872 }
2873
2874 if (first_obj == 0)
2875 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2876
2877 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2878 snap->ds = ds;
2879 list_insert_tail(l, snap);
2880 obj = ds->ds_phys->ds_prev_snap_obj;
2881 }
2882
2883 return (0);
2884}
2885
2886static int
2887snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2888{
2889 struct promotenode *snap;
2890
2891 *spacep = 0;
2892 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2893 uint64_t used, comp, uncomp;
2894 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2895 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2896 *spacep += used;
2897 }
2898 return (0);
2899}
2900
2901static void
2902snaplist_destroy(list_t *l, boolean_t own)
2903{
2904 struct promotenode *snap;
2905
2906 if (!l || !list_link_active(&l->list_head))
2907 return;
2908
2909 while ((snap = list_tail(l)) != NULL) {
2910 list_remove(l, snap);
2911 if (own)
2912 dsl_dataset_disown(snap->ds, snaplist_tag);
2913 else
2914 dsl_dataset_rele(snap->ds, snaplist_tag);
2915 kmem_free(snap, sizeof (struct promotenode));
2916 }
2917 list_destroy(l);
2918}
2919
2920/*
2921 * Promote a clone. Nomenclature note:
2922 * "clone" or "cds": the original clone which is being promoted
2923 * "origin" or "ods": the snapshot which is originally clone's origin
2924 * "origin head" or "ohds": the dataset which is the head
2925 * (filesystem/volume) for the origin
2926 * "origin origin": the origin of the origin's filesystem (typically
2927 * NULL, indicating that the clone is not a clone of a clone).
2928 */
2929int
2930dsl_dataset_promote(const char *name, char *conflsnap)
2931{
2932 dsl_dataset_t *ds;
2933 dsl_dir_t *dd;
2934 dsl_pool_t *dp;
2935 dmu_object_info_t doi;
2936 struct promotearg pa = { 0 };
2937 struct promotenode *snap;
2938 int err;
2939
2940 err = dsl_dataset_hold(name, FTAG, &ds);
2941 if (err)
2942 return (err);
2943 dd = ds->ds_dir;
2944 dp = dd->dd_pool;
2945
2946 err = dmu_object_info(dp->dp_meta_objset,
2947 ds->ds_phys->ds_snapnames_zapobj, &doi);
2948 if (err) {
2949 dsl_dataset_rele(ds, FTAG);
2950 return (err);
2951 }
2952
2953 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2954 dsl_dataset_rele(ds, FTAG);
2955 return (EINVAL);
2956 }
2957
2958 /*
2959 * We are going to inherit all the snapshots taken before our
2960 * origin (i.e., our new origin will be our parent's origin).
2961 * Take ownership of them so that we can rename them into our
2962 * namespace.
2963 */
2964 rw_enter(&dp->dp_config_rwlock, RW_READER);
2965
2966 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2967 &pa.shared_snaps);
2968 if (err != 0)
2969 goto out;
2970
2971 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2972 if (err != 0)
2973 goto out;
2974
2975 snap = list_head(&pa.shared_snaps);
2976 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2977 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2978 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2979 if (err != 0)
2980 goto out;
2981
2982 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
2983 err = dsl_dataset_hold_obj(dp,
2984 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2985 FTAG, &pa.origin_origin);
2986 if (err != 0)
2987 goto out;
2988 }
2989
2990out:
2991 rw_exit(&dp->dp_config_rwlock);
2992
2993 /*
2994 * Add in 128x the snapnames zapobj size, since we will be moving
2995 * a bunch of snapnames to the promoted ds, and dirtying their
2996 * bonus buffers.
2997 */
2998 if (err == 0) {
2999 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3000 dsl_dataset_promote_sync, ds, &pa,
3001 2 + 2 * doi.doi_physical_blocks_512);
3002 if (err && pa.err_ds && conflsnap)
3003 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3004 }
3005
3006 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3007 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3008 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3009 if (pa.origin_origin)
3010 dsl_dataset_rele(pa.origin_origin, FTAG);
3011 dsl_dataset_rele(ds, FTAG);
3012 return (err);
3013}
3014
3015struct cloneswaparg {
3016 dsl_dataset_t *cds; /* clone dataset */
3017 dsl_dataset_t *ohds; /* origin's head dataset */
3018 boolean_t force;
3019 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3020};
3021
3022/* ARGSUSED */
3023static int
3024dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3025{
3026 struct cloneswaparg *csa = arg1;
3027
3028 /* they should both be heads */
3029 if (dsl_dataset_is_snapshot(csa->cds) ||
3030 dsl_dataset_is_snapshot(csa->ohds))
3031 return (EINVAL);
3032
3033 /* the branch point should be just before them */
3034 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3035 return (EINVAL);
3036
3037 /* cds should be the clone (unless they are unrelated) */
3038 if (csa->cds->ds_prev != NULL &&
3039 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3040 csa->ohds->ds_object !=
3041 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3042 return (EINVAL);
3043
3044 /* the clone should be a child of the origin */
3045 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3046 return (EINVAL);
3047
3048 /* ohds shouldn't be modified unless 'force' */
3049 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3050 return (ETXTBSY);
3051
3052 /* adjust amount of any unconsumed refreservation */
3053 csa->unused_refres_delta =
3054 (int64_t)MIN(csa->ohds->ds_reserved,
3055 csa->ohds->ds_phys->ds_unique_bytes) -
3056 (int64_t)MIN(csa->ohds->ds_reserved,
3057 csa->cds->ds_phys->ds_unique_bytes);
3058
3059 if (csa->unused_refres_delta > 0 &&
3060 csa->unused_refres_delta >
3061 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3062 return (ENOSPC);
3063
3064 if (csa->ohds->ds_quota != 0 &&
3065 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3066 return (EDQUOT);
3067
3068 return (0);
3069}
3070
3071/* ARGSUSED */
3072static void
3073dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3074{
3075 struct cloneswaparg *csa = arg1;
3076 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3077
3078 ASSERT(csa->cds->ds_reserved == 0);
3079 ASSERT(csa->ohds->ds_quota == 0 ||
3080 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3081
3082 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3083 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3084
3085 if (csa->cds->ds_objset != NULL) {
3086 dmu_objset_evict(csa->cds->ds_objset);
3087 csa->cds->ds_objset = NULL;
3088 }
3089
3090 if (csa->ohds->ds_objset != NULL) {
3091 dmu_objset_evict(csa->ohds->ds_objset);
3092 csa->ohds->ds_objset = NULL;
3093 }
3094
3095 /*
3096 * Reset origin's unique bytes, if it exists.
3097 */
3098 if (csa->cds->ds_prev) {
3099 dsl_dataset_t *origin = csa->cds->ds_prev;
3100 uint64_t comp, uncomp;
3101
3102 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3103 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3104 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3105 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3106 }
3107
3108 /* swap blkptrs */
3109 {
3110 blkptr_t tmp;
3111 tmp = csa->ohds->ds_phys->ds_bp;
3112 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3113 csa->cds->ds_phys->ds_bp = tmp;
3114 }
3115
3116 /* set dd_*_bytes */
3117 {
3118 int64_t dused, dcomp, duncomp;
3119 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3120 uint64_t odl_used, odl_comp, odl_uncomp;
3121
3122 ASSERT3U(csa->cds->ds_dir->dd_phys->
3123 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3124
3125 dsl_deadlist_space(&csa->cds->ds_deadlist,
3126 &cdl_used, &cdl_comp, &cdl_uncomp);
3127 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3128 &odl_used, &odl_comp, &odl_uncomp);
3129
3130 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3131 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3132 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3133 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3134 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3135 cdl_uncomp -
3136 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3137
3138 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3139 dused, dcomp, duncomp, tx);
3140 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3141 -dused, -dcomp, -duncomp, tx);
3142
3143 /*
3144 * The difference in the space used by snapshots is the
3145 * difference in snapshot space due to the head's
3146 * deadlist (since that's the only thing that's
3147 * changing that affects the snapused).
3148 */
3149 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3150 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3151 &cdl_used, &cdl_comp, &cdl_uncomp);
3152 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3153 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3154 &odl_used, &odl_comp, &odl_uncomp);
3155 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3156 DD_USED_HEAD, DD_USED_SNAP, tx);
3157 }
3158
3159 /* swap ds_*_bytes */
3160 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3161 csa->cds->ds_phys->ds_used_bytes);
3162 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3163 csa->cds->ds_phys->ds_compressed_bytes);
3164 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3165 csa->cds->ds_phys->ds_uncompressed_bytes);
3166 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3167 csa->cds->ds_phys->ds_unique_bytes);
3168
3169 /* apply any parent delta for change in unconsumed refreservation */
3170 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3171 csa->unused_refres_delta, 0, 0, tx);
3172
3173 /*
3174 * Swap deadlists.
3175 */
3176 dsl_deadlist_close(&csa->cds->ds_deadlist);
3177 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3178 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3179 csa->cds->ds_phys->ds_deadlist_obj);
3180 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3181 csa->cds->ds_phys->ds_deadlist_obj);
3182 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3183 csa->ohds->ds_phys->ds_deadlist_obj);
3184
3185 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3186}
3187
3188/*
3189 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3190 * recv" into an existing fs to swizzle the file system to the new
3191 * version, and by "zfs rollback". Can also be used to swap two
3192 * independent head datasets if neither has any snapshots.
3193 */
3194int
3195dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3196 boolean_t force)
3197{
3198 struct cloneswaparg csa;
3199 int error;
3200
3201 ASSERT(clone->ds_owner);
3202 ASSERT(origin_head->ds_owner);
3203retry:
3204 /*
3205 * Need exclusive access for the swap. If we're swapping these
3206 * datasets back after an error, we already hold the locks.
3207 */
3208 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3209 rw_enter(&clone->ds_rwlock, RW_WRITER);
3210 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3211 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3212 rw_exit(&clone->ds_rwlock);
3213 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3214 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3215 rw_exit(&origin_head->ds_rwlock);
3216 goto retry;
3217 }
3218 }
3219 csa.cds = clone;
3220 csa.ohds = origin_head;
3221 csa.force = force;
3222 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3223 dsl_dataset_clone_swap_check,
3224 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3225 return (error);
3226}
3227
3228/*
3229 * Given a pool name and a dataset object number in that pool,
3230 * return the name of that dataset.
3231 */
3232int
3233dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3234{
3235 spa_t *spa;
3236 dsl_pool_t *dp;
3237 dsl_dataset_t *ds;
3238 int error;
3239
3240 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3241 return (error);
3242 dp = spa_get_dsl(spa);
3243 rw_enter(&dp->dp_config_rwlock, RW_READER);
3244 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3245 dsl_dataset_name(ds, buf);
3246 dsl_dataset_rele(ds, FTAG);
3247 }
3248 rw_exit(&dp->dp_config_rwlock);
3249 spa_close(spa, FTAG);
3250
3251 return (error);
3252}
3253
3254int
3255dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3256 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3257{
3258 int error = 0;
3259
3260 ASSERT3S(asize, >, 0);
3261
3262 /*
3263 * *ref_rsrv is the portion of asize that will come from any
3264 * unconsumed refreservation space.
3265 */
3266 *ref_rsrv = 0;
3267
3268 mutex_enter(&ds->ds_lock);
3269 /*
3270 * Make a space adjustment for reserved bytes.
3271 */
3272 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3273 ASSERT3U(*used, >=,
3274 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3275 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3276 *ref_rsrv =
3277 asize - MIN(asize, parent_delta(ds, asize + inflight));
3278 }
3279
3280 if (!check_quota || ds->ds_quota == 0) {
3281 mutex_exit(&ds->ds_lock);
3282 return (0);
3283 }
3284 /*
3285 * If they are requesting more space, and our current estimate
3286 * is over quota, they get to try again unless the actual
3287 * on-disk is over quota and there are no pending changes (which
3288 * may free up space for us).
3289 */
3290 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3291 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3292 error = ERESTART;
3293 else
3294 error = EDQUOT;
3295 }
3296 mutex_exit(&ds->ds_lock);
3297
3298 return (error);
3299}
3300
3301/* ARGSUSED */
3302static int
3303dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3304{
3305 dsl_dataset_t *ds = arg1;
3306 dsl_prop_setarg_t *psa = arg2;
3307 int err;
3308
3309 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3310 return (ENOTSUP);
3311
3312 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3313 return (err);
3314
3315 if (psa->psa_effective_value == 0)
3316 return (0);
3317
3318 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3319 psa->psa_effective_value < ds->ds_reserved)
3320 return (ENOSPC);
3321
3322 return (0);
3323}
3324
3325extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3326
3327void
3328dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3329{
3330 dsl_dataset_t *ds = arg1;
3331 dsl_prop_setarg_t *psa = arg2;
3332 uint64_t effective_value = psa->psa_effective_value;
3333
3334 dsl_prop_set_sync(ds, psa, tx);
3335 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3336
3337 if (ds->ds_quota != effective_value) {
3338 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3339 ds->ds_quota = effective_value;
3340
3341 spa_history_log_internal(LOG_DS_REFQUOTA,
3342 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3343 (longlong_t)ds->ds_quota, ds->ds_object);
3344 }
3345}
3346
3347int
3348dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3349{
3350 dsl_dataset_t *ds;
3351 dsl_prop_setarg_t psa;
3352 int err;
3353
3354 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3355
3356 err = dsl_dataset_hold(dsname, FTAG, &ds);
3357 if (err)
3358 return (err);
3359
3360 /*
3361 * If someone removes a file, then tries to set the quota, we
3362 * want to make sure the file freeing takes effect.
3363 */
3364 txg_wait_open(ds->ds_dir->dd_pool, 0);
3365
3366 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3367 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3368 ds, &psa, 0);
3369
3370 dsl_dataset_rele(ds, FTAG);
3371 return (err);
3372}
3373
3374static int
3375dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3376{
3377 dsl_dataset_t *ds = arg1;
3378 dsl_prop_setarg_t *psa = arg2;
3379 uint64_t effective_value;
3380 uint64_t unique;
3381 int err;
3382
3383 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3384 SPA_VERSION_REFRESERVATION)
3385 return (ENOTSUP);
3386
3387 if (dsl_dataset_is_snapshot(ds))
3388 return (EINVAL);
3389
3390 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3391 return (err);
3392
3393 effective_value = psa->psa_effective_value;
3394
3395 /*
3396 * If we are doing the preliminary check in open context, the
3397 * space estimates may be inaccurate.
3398 */
3399 if (!dmu_tx_is_syncing(tx))
3400 return (0);
3401
3402 mutex_enter(&ds->ds_lock);
3403 if (!DS_UNIQUE_IS_ACCURATE(ds))
3404 dsl_dataset_recalc_head_uniq(ds);
3405 unique = ds->ds_phys->ds_unique_bytes;
3406 mutex_exit(&ds->ds_lock);
3407
3408 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3409 uint64_t delta = MAX(unique, effective_value) -
3410 MAX(unique, ds->ds_reserved);
3411
3412 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3413 return (ENOSPC);
3414 if (ds->ds_quota > 0 &&
3415 effective_value > ds->ds_quota)
3416 return (ENOSPC);
3417 }
3418
3419 return (0);
3420}
3421
3422static void
3423dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3424{
3425 dsl_dataset_t *ds = arg1;
3426 dsl_prop_setarg_t *psa = arg2;
3427 uint64_t effective_value = psa->psa_effective_value;
3428 uint64_t unique;
3429 int64_t delta;
3430
3431 dsl_prop_set_sync(ds, psa, tx);
3432 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3433
3434 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3435
3436 mutex_enter(&ds->ds_dir->dd_lock);
3437 mutex_enter(&ds->ds_lock);
3438 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3439 unique = ds->ds_phys->ds_unique_bytes;
3440 delta = MAX(0, (int64_t)(effective_value - unique)) -
3441 MAX(0, (int64_t)(ds->ds_reserved - unique));
3442 ds->ds_reserved = effective_value;
3443 mutex_exit(&ds->ds_lock);
3444
3445 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3446 mutex_exit(&ds->ds_dir->dd_lock);
3447
3448 spa_history_log_internal(LOG_DS_REFRESERV,
3449 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3450 (longlong_t)effective_value, ds->ds_object);
3451}
3452
3453int
3454dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3455 uint64_t reservation)
3456{
3457 dsl_dataset_t *ds;
3458 dsl_prop_setarg_t psa;
3459 int err;
3460
3461 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3462 &reservation);
3463
3464 err = dsl_dataset_hold(dsname, FTAG, &ds);
3465 if (err)
3466 return (err);
3467
3468 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3469 dsl_dataset_set_reservation_check,
3470 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3471
3472 dsl_dataset_rele(ds, FTAG);
3473 return (err);
3474}
3475
3476typedef struct zfs_hold_cleanup_arg {
3477 dsl_pool_t *dp;
3478 uint64_t dsobj;
3479 char htag[MAXNAMELEN];
3480} zfs_hold_cleanup_arg_t;
3481
3482static void
3483dsl_dataset_user_release_onexit(void *arg)
3484{
3485 zfs_hold_cleanup_arg_t *ca = arg;
3486
3487 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3488 B_TRUE);
3489 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3490}
3491
3492void
3493dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3494 minor_t minor)
3495{
3496 zfs_hold_cleanup_arg_t *ca;
3497
3498 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3499 ca->dp = ds->ds_dir->dd_pool;
3500 ca->dsobj = ds->ds_object;
3501 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3502 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3503 dsl_dataset_user_release_onexit, ca, NULL));
3504}
3505
3506/*
3507 * If you add new checks here, you may need to add
3508 * additional checks to the "temporary" case in
3509 * snapshot_check() in dmu_objset.c.
3510 */
3511static int
3512dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3513{
3514 dsl_dataset_t *ds = arg1;
3515 struct dsl_ds_holdarg *ha = arg2;
3516 char *htag = ha->htag;
3517 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3518 int error = 0;
3519
3520 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3521 return (ENOTSUP);
3522
3523 if (!dsl_dataset_is_snapshot(ds))
3524 return (EINVAL);
3525
3526 /* tags must be unique */
3527 mutex_enter(&ds->ds_lock);
3528 if (ds->ds_phys->ds_userrefs_obj) {
3529 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3530 8, 1, tx);
3531 if (error == 0)
3532 error = EEXIST;
3533 else if (error == ENOENT)
3534 error = 0;
3535 }
3536 mutex_exit(&ds->ds_lock);
3537
3538 if (error == 0 && ha->temphold &&
3539 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3540 error = E2BIG;
3541
3542 return (error);
3543}
3544
3545void
3546dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3547{
3548 dsl_dataset_t *ds = arg1;
3549 struct dsl_ds_holdarg *ha = arg2;
3550 char *htag = ha->htag;
3551 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3552 objset_t *mos = dp->dp_meta_objset;
3553 uint64_t now = gethrestime_sec();
3554 uint64_t zapobj;
3555
3556 mutex_enter(&ds->ds_lock);
3557 if (ds->ds_phys->ds_userrefs_obj == 0) {
3558 /*
3559 * This is the first user hold for this dataset. Create
3560 * the userrefs zap object.
3561 */
3562 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3563 zapobj = ds->ds_phys->ds_userrefs_obj =
3564 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3565 } else {
3566 zapobj = ds->ds_phys->ds_userrefs_obj;
3567 }
3568 ds->ds_userrefs++;
3569 mutex_exit(&ds->ds_lock);
3570
3571 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3572
3573 if (ha->temphold) {
3574 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3575 htag, &now, tx));
3576 }
3577
3578 spa_history_log_internal(LOG_DS_USER_HOLD,
3579 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3580 (int)ha->temphold, ds->ds_object);
3581}
3582
3583static int
3584dsl_dataset_user_hold_one(const char *dsname, void *arg)
3585{
3586 struct dsl_ds_holdarg *ha = arg;
3587 dsl_dataset_t *ds;
3588 int error;
3589 char *name;
3590
3591 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3592 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3593 error = dsl_dataset_hold(name, ha->dstg, &ds);
3594 strfree(name);
3595 if (error == 0) {
3596 ha->gotone = B_TRUE;
3597 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3598 dsl_dataset_user_hold_sync, ds, ha, 0);
3599 } else if (error == ENOENT && ha->recursive) {
3600 error = 0;
3601 } else {
3602 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3603 }
3604 return (error);
3605}
3606
3607int
3608dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3609 boolean_t temphold)
3610{
3611 struct dsl_ds_holdarg *ha;
3612 int error;
3613
3614 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3615 ha->htag = htag;
3616 ha->temphold = temphold;
3617 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3618 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3619 ds, ha, 0);
3620 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3621
3622 return (error);
3623}
3624
3625int
3626dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3627 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3628{
3629 struct dsl_ds_holdarg *ha;
3630 dsl_sync_task_t *dst;
3631 spa_t *spa;
3632 int error;
3633 minor_t minor = 0;
3634
3635 if (cleanup_fd != -1) {
3636 /* Currently we only support cleanup-on-exit of tempholds. */
3637 if (!temphold)
3638 return (EINVAL);
3639 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3640 if (error)
3641 return (error);
3642 }
3643
3644 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3645
3646 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3647
3648 error = spa_open(dsname, &spa, FTAG);
3649 if (error) {
3650 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3651 if (cleanup_fd != -1)
3652 zfs_onexit_fd_rele(cleanup_fd);
3653 return (error);
3654 }
3655
3656 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3657 ha->htag = htag;
3658 ha->snapname = snapname;
3659 ha->recursive = recursive;
3660 ha->temphold = temphold;
3661
3662 if (recursive) {
3663 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3664 ha, DS_FIND_CHILDREN);
3665 } else {
3666 error = dsl_dataset_user_hold_one(dsname, ha);
3667 }
3668 if (error == 0)
3669 error = dsl_sync_task_group_wait(ha->dstg);
3670
3671 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3672 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3673 dsl_dataset_t *ds = dst->dst_arg1;
3674
3675 if (dst->dst_err) {
3676 dsl_dataset_name(ds, ha->failed);
3677 *strchr(ha->failed, '@') = '\0';
3678 } else if (error == 0 && minor != 0 && temphold) {
3679 /*
3680 * If this hold is to be released upon process exit,
3681 * register that action now.
3682 */
3683 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3684 }
3685 dsl_dataset_rele(ds, ha->dstg);
3686 }
3687
3688 if (error == 0 && recursive && !ha->gotone)
3689 error = ENOENT;
3690
3691 if (error)
3692 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3693
3694 dsl_sync_task_group_destroy(ha->dstg);
3695
3696 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3697 spa_close(spa, FTAG);
3698 if (cleanup_fd != -1)
3699 zfs_onexit_fd_rele(cleanup_fd);
3700 return (error);
3701}
3702
3703struct dsl_ds_releasearg {
3704 dsl_dataset_t *ds;
3705 const char *htag;
3706 boolean_t own; /* do we own or just hold ds? */
3707};
3708
3709static int
3710dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3711 boolean_t *might_destroy)
3712{
3713 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3714 uint64_t zapobj;
3715 uint64_t tmp;
3716 int error;
3717
3718 *might_destroy = B_FALSE;
3719
3720 mutex_enter(&ds->ds_lock);
3721 zapobj = ds->ds_phys->ds_userrefs_obj;
3722 if (zapobj == 0) {
3723 /* The tag can't possibly exist */
3724 mutex_exit(&ds->ds_lock);
3725 return (ESRCH);
3726 }
3727
3728 /* Make sure the tag exists */
3729 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3730 if (error) {
3731 mutex_exit(&ds->ds_lock);
3732 if (error == ENOENT)
3733 error = ESRCH;
3734 return (error);
3735 }
3736
3737 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3738 DS_IS_DEFER_DESTROY(ds))
3739 *might_destroy = B_TRUE;
3740
3741 mutex_exit(&ds->ds_lock);
3742 return (0);
3743}
3744
3745static int
3746dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3747{
3748 struct dsl_ds_releasearg *ra = arg1;
3749 dsl_dataset_t *ds = ra->ds;
3750 boolean_t might_destroy;
3751 int error;
3752
3753 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3754 return (ENOTSUP);
3755
3756 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3757 if (error)
3758 return (error);
3759
3760 if (might_destroy) {
3761 struct dsl_ds_destroyarg dsda = {0};
3762
3763 if (dmu_tx_is_syncing(tx)) {
3764 /*
3765 * If we're not prepared to remove the snapshot,
3766 * we can't allow the release to happen right now.
3767 */
3768 if (!ra->own)
3769 return (EBUSY);
3770 }
3771 dsda.ds = ds;
3772 dsda.releasing = B_TRUE;
3773 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3774 }
3775
3776 return (0);
3777}
3778
3779static void
3780dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3781{
3782 struct dsl_ds_releasearg *ra = arg1;
3783 dsl_dataset_t *ds = ra->ds;
3784 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3785 objset_t *mos = dp->dp_meta_objset;
3786 uint64_t zapobj;
3787 uint64_t dsobj = ds->ds_object;
3788 uint64_t refs;
3789 int error;
3790
3791 mutex_enter(&ds->ds_lock);
3792 ds->ds_userrefs--;
3793 refs = ds->ds_userrefs;
3794 mutex_exit(&ds->ds_lock);
3795 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3796 VERIFY(error == 0 || error == ENOENT);
3797 zapobj = ds->ds_phys->ds_userrefs_obj;
3798 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3799 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3800 DS_IS_DEFER_DESTROY(ds)) {
3801 struct dsl_ds_destroyarg dsda = {0};
3802
3803 ASSERT(ra->own);
3804 dsda.ds = ds;
3805 dsda.releasing = B_TRUE;
3806 /* We already did the destroy_check */
3807 dsl_dataset_destroy_sync(&dsda, tag, tx);
3808 }
3809
3810 spa_history_log_internal(LOG_DS_USER_RELEASE,
3811 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3812 ra->htag, (longlong_t)refs, dsobj);
3813}
3814
3815static int
3816dsl_dataset_user_release_one(const char *dsname, void *arg)
3817{
3818 struct dsl_ds_holdarg *ha = arg;
3819 struct dsl_ds_releasearg *ra;
3820 dsl_dataset_t *ds;
3821 int error;
3822 void *dtag = ha->dstg;
3823 char *name;
3824 boolean_t own = B_FALSE;
3825 boolean_t might_destroy;
3826
3827 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3828 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3829 error = dsl_dataset_hold(name, dtag, &ds);
3830 strfree(name);
3831 if (error == ENOENT && ha->recursive)
3832 return (0);
3833 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3834 if (error)
3835 return (error);
3836
3837 ha->gotone = B_TRUE;
3838
3839 ASSERT(dsl_dataset_is_snapshot(ds));
3840
3841 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3842 if (error) {
3843 dsl_dataset_rele(ds, dtag);
3844 return (error);
3845 }
3846
3847 if (might_destroy) {
3848#ifdef _KERNEL
3849 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3850 error = zfs_unmount_snap(name, NULL);
3851 strfree(name);
3852 if (error) {
3853 dsl_dataset_rele(ds, dtag);
3854 return (error);
3855 }
3856#endif
3857 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3858 dsl_dataset_rele(ds, dtag);
3859 return (EBUSY);
3860 } else {
3861 own = B_TRUE;
3862 dsl_dataset_make_exclusive(ds, dtag);
3863 }
3864 }
3865
3866 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3867 ra->ds = ds;
3868 ra->htag = ha->htag;
3869 ra->own = own;
3870 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3871 dsl_dataset_user_release_sync, ra, dtag, 0);
3872
3873 return (0);
3874}
3875
3876int
3877dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3878 boolean_t recursive)
3879{
3880 struct dsl_ds_holdarg *ha;
3881 dsl_sync_task_t *dst;
3882 spa_t *spa;
3883 int error;
3884
3885top:
3886 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3887
3888 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3889
3890 error = spa_open(dsname, &spa, FTAG);
3891 if (error) {
3892 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3893 return (error);
3894 }
3895
3896 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3897 ha->htag = htag;
3898 ha->snapname = snapname;
3899 ha->recursive = recursive;
3900 if (recursive) {
3901 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3902 ha, DS_FIND_CHILDREN);
3903 } else {
3904 error = dsl_dataset_user_release_one(dsname, ha);
3905 }
3906 if (error == 0)
3907 error = dsl_sync_task_group_wait(ha->dstg);
3908
3909 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3910 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3911 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3912 dsl_dataset_t *ds = ra->ds;
3913
3914 if (dst->dst_err)
3915 dsl_dataset_name(ds, ha->failed);
3916
3917 if (ra->own)
3918 dsl_dataset_disown(ds, ha->dstg);
3919 else
3920 dsl_dataset_rele(ds, ha->dstg);
3921
3922 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3923 }
3924
3925 if (error == 0 && recursive && !ha->gotone)
3926 error = ENOENT;
3927
3928 if (error && error != EBUSY)
3929 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3930
3931 dsl_sync_task_group_destroy(ha->dstg);
3932 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3933 spa_close(spa, FTAG);
3934
3935 /*
3936 * We can get EBUSY if we were racing with deferred destroy and
3937 * dsl_dataset_user_release_check() hadn't done the necessary
3938 * open context setup. We can also get EBUSY if we're racing
3939 * with destroy and that thread is the ds_owner. Either way
3940 * the busy condition should be transient, and we should retry
3941 * the release operation.
3942 */
3943 if (error == EBUSY)
3944 goto top;
3945
3946 return (error);
3947}
3948
3949/*
3950 * Called at spa_load time (with retry == B_FALSE) to release a stale
3951 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
3952 */
3953int
3954dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
3955 boolean_t retry)
3956{
3957 dsl_dataset_t *ds;
3958 char *snap;
3959 char *name;
3960 int namelen;
3961 int error;
3962
3963 do {
3964 rw_enter(&dp->dp_config_rwlock, RW_READER);
3965 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3966 rw_exit(&dp->dp_config_rwlock);
3967 if (error)
3968 return (error);
3969 namelen = dsl_dataset_namelen(ds)+1;
3970 name = kmem_alloc(namelen, KM_SLEEP);
3971 dsl_dataset_name(ds, name);
3972 dsl_dataset_rele(ds, FTAG);
3973
3974 snap = strchr(name, '@');
3975 *snap = '\0';
3976 ++snap;
3977 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
3978 kmem_free(name, namelen);
3979
3980 /*
3981 * The object can't have been destroyed because we have a hold,
3982 * but it might have been renamed, resulting in ENOENT. Retry
3983 * if we've been requested to do so.
3984 *
3985 * It would be nice if we could use the dsobj all the way
3986 * through and avoid ENOENT entirely. But we might need to
3987 * unmount the snapshot, and there's currently no way to lookup
3988 * a vfsp using a ZFS object id.
3989 */
3990 } while ((error == ENOENT) && retry);
3991
3992 return (error);
3993}
3994
3995int
3996dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3997{
3998 dsl_dataset_t *ds;
3999 int err;
4000
4001 err = dsl_dataset_hold(dsname, FTAG, &ds);
4002 if (err)
4003 return (err);
4004
4005 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4006 if (ds->ds_phys->ds_userrefs_obj != 0) {
4007 zap_attribute_t *za;
4008 zap_cursor_t zc;
4009
4010 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4011 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4012 ds->ds_phys->ds_userrefs_obj);
4013 zap_cursor_retrieve(&zc, za) == 0;
4014 zap_cursor_advance(&zc)) {
4015 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4016 za->za_first_integer));
4017 }
4018 zap_cursor_fini(&zc);
4019 kmem_free(za, sizeof (zap_attribute_t));
4020 }
4021 dsl_dataset_rele(ds, FTAG);
4022 return (0);
4023}
4024
4025/*
4026 * Note, this fuction is used as the callback for dmu_objset_find(). We
4027 * always return 0 so that we will continue to find and process
4028 * inconsistent datasets, even if we encounter an error trying to
4029 * process one of them.
4030 */
4031/* ARGSUSED */
4032int
4033dsl_destroy_inconsistent(const char *dsname, void *arg)
4034{
4035 dsl_dataset_t *ds;
4036
4037 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4038 if (DS_IS_INCONSISTENT(ds))
4039 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4040 else
4041 dsl_dataset_disown(ds, FTAG);
4042 }
4043 return (0);
4044}