dmu_objset.c revision 359722
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright 2017 Nexenta Systems, Inc.
31 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
32 */
33
34/* Portions Copyright 2010 Robert Milkowski */
35
36#include <sys/cred.h>
37#include <sys/zfs_context.h>
38#include <sys/dmu_objset.h>
39#include <sys/dsl_dir.h>
40#include <sys/dsl_dataset.h>
41#include <sys/dsl_prop.h>
42#include <sys/dsl_pool.h>
43#include <sys/dsl_synctask.h>
44#include <sys/dsl_deleg.h>
45#include <sys/dnode.h>
46#include <sys/dbuf.h>
47#include <sys/zvol.h>
48#include <sys/dmu_tx.h>
49#include <sys/zap.h>
50#include <sys/zil.h>
51#include <sys/dmu_impl.h>
52#include <sys/zfs_ioctl.h>
53#include <sys/sa.h>
54#include <sys/zfs_onexit.h>
55#include <sys/dsl_destroy.h>
56#include <sys/vdev.h>
57#include <sys/zfeature.h>
58#include "zfs_namecheck.h"
59
60/*
61 * Needed to close a window in dnode_move() that allows the objset to be freed
62 * before it can be safely accessed.
63 */
64krwlock_t os_lock;
65
66/*
67 * Tunable to overwrite the maximum number of threads for the parallization
68 * of dmu_objset_find_dp, needed to speed up the import of pools with many
69 * datasets.
70 * Default is 4 times the number of leaf vdevs.
71 */
72int dmu_find_threads = 0;
73
74/*
75 * Backfill lower metadnode objects after this many have been freed.
76 * Backfilling negatively impacts object creation rates, so only do it
77 * if there are enough holes to fill.
78 */
79int dmu_rescan_dnode_threshold = 131072;
80
81static void dmu_objset_find_dp_cb(void *arg);
82
83void
84dmu_objset_init(void)
85{
86	rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
87}
88
89void
90dmu_objset_fini(void)
91{
92	rw_destroy(&os_lock);
93}
94
95spa_t *
96dmu_objset_spa(objset_t *os)
97{
98	return (os->os_spa);
99}
100
101zilog_t *
102dmu_objset_zil(objset_t *os)
103{
104	return (os->os_zil);
105}
106
107dsl_pool_t *
108dmu_objset_pool(objset_t *os)
109{
110	dsl_dataset_t *ds;
111
112	if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
113		return (ds->ds_dir->dd_pool);
114	else
115		return (spa_get_dsl(os->os_spa));
116}
117
118dsl_dataset_t *
119dmu_objset_ds(objset_t *os)
120{
121	return (os->os_dsl_dataset);
122}
123
124dmu_objset_type_t
125dmu_objset_type(objset_t *os)
126{
127	return (os->os_phys->os_type);
128}
129
130void
131dmu_objset_name(objset_t *os, char *buf)
132{
133	dsl_dataset_name(os->os_dsl_dataset, buf);
134}
135
136uint64_t
137dmu_objset_id(objset_t *os)
138{
139	dsl_dataset_t *ds = os->os_dsl_dataset;
140
141	return (ds ? ds->ds_object : 0);
142}
143
144zfs_sync_type_t
145dmu_objset_syncprop(objset_t *os)
146{
147	return (os->os_sync);
148}
149
150zfs_logbias_op_t
151dmu_objset_logbias(objset_t *os)
152{
153	return (os->os_logbias);
154}
155
156static void
157checksum_changed_cb(void *arg, uint64_t newval)
158{
159	objset_t *os = arg;
160
161	/*
162	 * Inheritance should have been done by now.
163	 */
164	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
165
166	os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
167}
168
169static void
170compression_changed_cb(void *arg, uint64_t newval)
171{
172	objset_t *os = arg;
173
174	/*
175	 * Inheritance and range checking should have been done by now.
176	 */
177	ASSERT(newval != ZIO_COMPRESS_INHERIT);
178
179	os->os_compress = zio_compress_select(os->os_spa, newval,
180	    ZIO_COMPRESS_ON);
181}
182
183static void
184copies_changed_cb(void *arg, uint64_t newval)
185{
186	objset_t *os = arg;
187
188	/*
189	 * Inheritance and range checking should have been done by now.
190	 */
191	ASSERT(newval > 0);
192	ASSERT(newval <= spa_max_replication(os->os_spa));
193
194	os->os_copies = newval;
195}
196
197static void
198dedup_changed_cb(void *arg, uint64_t newval)
199{
200	objset_t *os = arg;
201	spa_t *spa = os->os_spa;
202	enum zio_checksum checksum;
203
204	/*
205	 * Inheritance should have been done by now.
206	 */
207	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
208
209	checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
210
211	os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
212	os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
213}
214
215static void
216primary_cache_changed_cb(void *arg, uint64_t newval)
217{
218	objset_t *os = arg;
219
220	/*
221	 * Inheritance and range checking should have been done by now.
222	 */
223	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
224	    newval == ZFS_CACHE_METADATA);
225
226	os->os_primary_cache = newval;
227}
228
229static void
230secondary_cache_changed_cb(void *arg, uint64_t newval)
231{
232	objset_t *os = arg;
233
234	/*
235	 * Inheritance and range checking should have been done by now.
236	 */
237	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
238	    newval == ZFS_CACHE_METADATA);
239
240	os->os_secondary_cache = newval;
241}
242
243static void
244sync_changed_cb(void *arg, uint64_t newval)
245{
246	objset_t *os = arg;
247
248	/*
249	 * Inheritance and range checking should have been done by now.
250	 */
251	ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
252	    newval == ZFS_SYNC_DISABLED);
253
254	os->os_sync = newval;
255	if (os->os_zil)
256		zil_set_sync(os->os_zil, newval);
257}
258
259static void
260redundant_metadata_changed_cb(void *arg, uint64_t newval)
261{
262	objset_t *os = arg;
263
264	/*
265	 * Inheritance and range checking should have been done by now.
266	 */
267	ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
268	    newval == ZFS_REDUNDANT_METADATA_MOST);
269
270	os->os_redundant_metadata = newval;
271}
272
273static void
274logbias_changed_cb(void *arg, uint64_t newval)
275{
276	objset_t *os = arg;
277
278	ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
279	    newval == ZFS_LOGBIAS_THROUGHPUT);
280	os->os_logbias = newval;
281	if (os->os_zil)
282		zil_set_logbias(os->os_zil, newval);
283}
284
285static void
286recordsize_changed_cb(void *arg, uint64_t newval)
287{
288	objset_t *os = arg;
289
290	os->os_recordsize = newval;
291}
292
293void
294dmu_objset_byteswap(void *buf, size_t size)
295{
296	objset_phys_t *osp = buf;
297
298	ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
299	dnode_byteswap(&osp->os_meta_dnode);
300	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
301	osp->os_type = BSWAP_64(osp->os_type);
302	osp->os_flags = BSWAP_64(osp->os_flags);
303	if (size == sizeof (objset_phys_t)) {
304		dnode_byteswap(&osp->os_userused_dnode);
305		dnode_byteswap(&osp->os_groupused_dnode);
306	}
307}
308
309/*
310 * The hash is a CRC-based hash of the objset_t pointer and the object number.
311 */
312static uint64_t
313dnode_hash(const objset_t *os, uint64_t obj)
314{
315	uintptr_t osv = (uintptr_t)os;
316	uint64_t crc = -1ULL;
317
318	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
319	/*
320	 * The low 6 bits of the pointer don't have much entropy, because
321	 * the objset_t is larger than 2^6 bytes long.
322	 */
323	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
324	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
325	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
326	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
327
328	crc ^= (osv>>14) ^ (obj>>24);
329
330	return (crc);
331}
332
333unsigned int
334dnode_multilist_index_func(multilist_t *ml, void *obj)
335{
336	dnode_t *dn = obj;
337	return (dnode_hash(dn->dn_objset, dn->dn_object) %
338	    multilist_get_num_sublists(ml));
339}
340
341/*
342 * Instantiates the objset_t in-memory structure corresponding to the
343 * objset_phys_t that's pointed to by the specified blkptr_t.
344 */
345int
346dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
347    objset_t **osp)
348{
349	objset_t *os;
350	int i, err;
351
352	ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
353
354#if 0
355	/*
356	 * The $ORIGIN dataset (if it exists) doesn't have an associated
357	 * objset, so there's no reason to open it. The $ORIGIN dataset
358	 * will not exist on pools older than SPA_VERSION_ORIGIN.
359	 */
360	if (ds != NULL && spa_get_dsl(spa) != NULL &&
361	    spa_get_dsl(spa)->dp_origin_snap != NULL) {
362		ASSERT3P(ds->ds_dir, !=,
363		    spa_get_dsl(spa)->dp_origin_snap->ds_dir);
364	}
365#endif
366
367	os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
368	os->os_dsl_dataset = ds;
369	os->os_spa = spa;
370	os->os_rootbp = bp;
371	if (!BP_IS_HOLE(os->os_rootbp)) {
372		arc_flags_t aflags = ARC_FLAG_WAIT;
373		zbookmark_phys_t zb;
374		SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
375		    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
376
377		if (DMU_OS_IS_L2CACHEABLE(os))
378			aflags |= ARC_FLAG_L2CACHE;
379
380		dprintf_bp(os->os_rootbp, "reading %s", "");
381		err = arc_read(NULL, spa, os->os_rootbp,
382		    arc_getbuf_func, &os->os_phys_buf,
383		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
384		if (err != 0) {
385			kmem_free(os, sizeof (objset_t));
386			/* convert checksum errors into IO errors */
387			if (err == ECKSUM)
388				err = SET_ERROR(EIO);
389			return (err);
390		}
391
392		/* Increase the blocksize if we are permitted. */
393		if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
394		    arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
395			arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
396			    ARC_BUFC_METADATA, sizeof (objset_phys_t));
397			bzero(buf->b_data, sizeof (objset_phys_t));
398			bcopy(os->os_phys_buf->b_data, buf->b_data,
399			    arc_buf_size(os->os_phys_buf));
400			arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
401			os->os_phys_buf = buf;
402		}
403
404		os->os_phys = os->os_phys_buf->b_data;
405		os->os_flags = os->os_phys->os_flags;
406	} else {
407		int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
408		    sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
409		os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
410		    ARC_BUFC_METADATA, size);
411		os->os_phys = os->os_phys_buf->b_data;
412		bzero(os->os_phys, size);
413	}
414
415	/*
416	 * Note: the changed_cb will be called once before the register
417	 * func returns, thus changing the checksum/compression from the
418	 * default (fletcher2/off).  Snapshots don't need to know about
419	 * checksum/compression/copies.
420	 */
421	if (ds != NULL) {
422		boolean_t needlock = B_FALSE;
423
424		/*
425		 * Note: it's valid to open the objset if the dataset is
426		 * long-held, in which case the pool_config lock will not
427		 * be held.
428		 */
429		if (!dsl_pool_config_held(dmu_objset_pool(os))) {
430			needlock = B_TRUE;
431			dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
432		}
433		err = dsl_prop_register(ds,
434		    zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
435		    primary_cache_changed_cb, os);
436		if (err == 0) {
437			err = dsl_prop_register(ds,
438			    zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
439			    secondary_cache_changed_cb, os);
440		}
441		if (!ds->ds_is_snapshot) {
442			if (err == 0) {
443				err = dsl_prop_register(ds,
444				    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
445				    checksum_changed_cb, os);
446			}
447			if (err == 0) {
448				err = dsl_prop_register(ds,
449				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
450				    compression_changed_cb, os);
451			}
452			if (err == 0) {
453				err = dsl_prop_register(ds,
454				    zfs_prop_to_name(ZFS_PROP_COPIES),
455				    copies_changed_cb, os);
456			}
457			if (err == 0) {
458				err = dsl_prop_register(ds,
459				    zfs_prop_to_name(ZFS_PROP_DEDUP),
460				    dedup_changed_cb, os);
461			}
462			if (err == 0) {
463				err = dsl_prop_register(ds,
464				    zfs_prop_to_name(ZFS_PROP_LOGBIAS),
465				    logbias_changed_cb, os);
466			}
467			if (err == 0) {
468				err = dsl_prop_register(ds,
469				    zfs_prop_to_name(ZFS_PROP_SYNC),
470				    sync_changed_cb, os);
471			}
472			if (err == 0) {
473				err = dsl_prop_register(ds,
474				    zfs_prop_to_name(
475				    ZFS_PROP_REDUNDANT_METADATA),
476				    redundant_metadata_changed_cb, os);
477			}
478			if (err == 0) {
479				err = dsl_prop_register(ds,
480				    zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
481				    recordsize_changed_cb, os);
482			}
483		}
484		if (needlock)
485			dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
486		if (err != 0) {
487			arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
488			kmem_free(os, sizeof (objset_t));
489			return (err);
490		}
491	} else {
492		/* It's the meta-objset. */
493		os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
494		os->os_compress = ZIO_COMPRESS_ON;
495		os->os_copies = spa_max_replication(spa);
496		os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
497		os->os_dedup_verify = B_FALSE;
498		os->os_logbias = ZFS_LOGBIAS_LATENCY;
499		os->os_sync = ZFS_SYNC_STANDARD;
500		os->os_primary_cache = ZFS_CACHE_ALL;
501		os->os_secondary_cache = ZFS_CACHE_ALL;
502	}
503	/*
504	 * These properties will be filled in by the logic in zfs_get_zplprop()
505	 * when they are queried for the first time.
506	 */
507	os->os_version = OBJSET_PROP_UNINITIALIZED;
508	os->os_normalization = OBJSET_PROP_UNINITIALIZED;
509	os->os_utf8only = OBJSET_PROP_UNINITIALIZED;
510	os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED;
511
512	if (ds == NULL || !ds->ds_is_snapshot)
513		os->os_zil_header = os->os_phys->os_zil_header;
514	os->os_zil = zil_alloc(os, &os->os_zil_header);
515
516	for (i = 0; i < TXG_SIZE; i++) {
517		os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t),
518		    offsetof(dnode_t, dn_dirty_link[i]),
519		    dnode_multilist_index_func);
520	}
521	list_create(&os->os_dnodes, sizeof (dnode_t),
522	    offsetof(dnode_t, dn_link));
523	list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
524	    offsetof(dmu_buf_impl_t, db_link));
525
526	mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
527	mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
528	mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
529	mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
530
531	dnode_special_open(os, &os->os_phys->os_meta_dnode,
532	    DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
533	if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
534		dnode_special_open(os, &os->os_phys->os_userused_dnode,
535		    DMU_USERUSED_OBJECT, &os->os_userused_dnode);
536		dnode_special_open(os, &os->os_phys->os_groupused_dnode,
537		    DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
538	}
539
540	*osp = os;
541	return (0);
542}
543
544int
545dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
546{
547	int err = 0;
548
549	/*
550	 * We shouldn't be doing anything with dsl_dataset_t's unless the
551	 * pool_config lock is held, or the dataset is long-held.
552	 */
553	ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) ||
554	    dsl_dataset_long_held(ds));
555
556	mutex_enter(&ds->ds_opening_lock);
557	if (ds->ds_objset == NULL) {
558		objset_t *os;
559		rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
560		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
561		    ds, dsl_dataset_get_blkptr(ds), &os);
562		rrw_exit(&ds->ds_bp_rwlock, FTAG);
563
564		if (err == 0) {
565			mutex_enter(&ds->ds_lock);
566			ASSERT(ds->ds_objset == NULL);
567			ds->ds_objset = os;
568			mutex_exit(&ds->ds_lock);
569		}
570	}
571	*osp = ds->ds_objset;
572	mutex_exit(&ds->ds_opening_lock);
573	return (err);
574}
575
576/*
577 * Holds the pool while the objset is held.  Therefore only one objset
578 * can be held at a time.
579 */
580int
581dmu_objset_hold(const char *name, void *tag, objset_t **osp)
582{
583	dsl_pool_t *dp;
584	dsl_dataset_t *ds;
585	int err;
586
587	err = dsl_pool_hold(name, tag, &dp);
588	if (err != 0)
589		return (err);
590	err = dsl_dataset_hold(dp, name, tag, &ds);
591	if (err != 0) {
592		dsl_pool_rele(dp, tag);
593		return (err);
594	}
595
596	err = dmu_objset_from_ds(ds, osp);
597	if (err != 0) {
598		dsl_dataset_rele(ds, tag);
599		dsl_pool_rele(dp, tag);
600	}
601
602	return (err);
603}
604
605static int
606dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
607    boolean_t readonly, void *tag, objset_t **osp)
608{
609	int err;
610
611	err = dmu_objset_from_ds(ds, osp);
612	if (err != 0) {
613		dsl_dataset_disown(ds, tag);
614	} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
615		dsl_dataset_disown(ds, tag);
616		return (SET_ERROR(EINVAL));
617	} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
618		dsl_dataset_disown(ds, tag);
619		return (SET_ERROR(EROFS));
620	}
621	return (err);
622}
623
624/*
625 * dsl_pool must not be held when this is called.
626 * Upon successful return, there will be a longhold on the dataset,
627 * and the dsl_pool will not be held.
628 */
629int
630dmu_objset_own(const char *name, dmu_objset_type_t type,
631    boolean_t readonly, void *tag, objset_t **osp)
632{
633	dsl_pool_t *dp;
634	dsl_dataset_t *ds;
635	int err;
636
637	err = dsl_pool_hold(name, FTAG, &dp);
638	if (err != 0)
639		return (err);
640	err = dsl_dataset_own(dp, name, tag, &ds);
641	if (err != 0) {
642		dsl_pool_rele(dp, FTAG);
643		return (err);
644	}
645	err = dmu_objset_own_impl(ds, type, readonly, tag, osp);
646	dsl_pool_rele(dp, FTAG);
647
648	return (err);
649}
650
651int
652dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
653    boolean_t readonly, void *tag, objset_t **osp)
654{
655	dsl_dataset_t *ds;
656	int err;
657
658	err = dsl_dataset_own_obj(dp, obj, tag, &ds);
659	if (err != 0)
660		return (err);
661
662	return (dmu_objset_own_impl(ds, type, readonly, tag, osp));
663}
664
665void
666dmu_objset_rele(objset_t *os, void *tag)
667{
668	dsl_pool_t *dp = dmu_objset_pool(os);
669	dsl_dataset_rele(os->os_dsl_dataset, tag);
670	dsl_pool_rele(dp, tag);
671}
672
673/*
674 * When we are called, os MUST refer to an objset associated with a dataset
675 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
676 * == tag.  We will then release and reacquire ownership of the dataset while
677 * holding the pool config_rwlock to avoid intervening namespace or ownership
678 * changes may occur.
679 *
680 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
681 * release the hold on its dataset and acquire a new one on the dataset of the
682 * same name so that it can be partially torn down and reconstructed.
683 */
684void
685dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
686    void *tag)
687{
688	dsl_pool_t *dp;
689	char name[ZFS_MAX_DATASET_NAME_LEN];
690
691	VERIFY3P(ds, !=, NULL);
692	VERIFY3P(ds->ds_owner, ==, tag);
693	VERIFY(dsl_dataset_long_held(ds));
694
695	dsl_dataset_name(ds, name);
696	dp = ds->ds_dir->dd_pool;
697	dsl_pool_config_enter(dp, FTAG);
698	dsl_dataset_disown(ds, tag);
699	VERIFY0(dsl_dataset_own(dp, name, tag, newds));
700	dsl_pool_config_exit(dp, FTAG);
701}
702
703void
704dmu_objset_disown(objset_t *os, void *tag)
705{
706	dsl_dataset_disown(os->os_dsl_dataset, tag);
707}
708
709void
710dmu_objset_evict_dbufs(objset_t *os)
711{
712	dnode_t dn_marker;
713	dnode_t *dn;
714
715	mutex_enter(&os->os_lock);
716	dn = list_head(&os->os_dnodes);
717	while (dn != NULL) {
718		/*
719		 * Skip dnodes without holds.  We have to do this dance
720		 * because dnode_add_ref() only works if there is already a
721		 * hold.  If the dnode has no holds, then it has no dbufs.
722		 */
723		if (dnode_add_ref(dn, FTAG)) {
724			list_insert_after(&os->os_dnodes, dn, &dn_marker);
725			mutex_exit(&os->os_lock);
726
727			dnode_evict_dbufs(dn);
728			dnode_rele(dn, FTAG);
729
730			mutex_enter(&os->os_lock);
731			dn = list_next(&os->os_dnodes, &dn_marker);
732			list_remove(&os->os_dnodes, &dn_marker);
733		} else {
734			dn = list_next(&os->os_dnodes, dn);
735		}
736	}
737	mutex_exit(&os->os_lock);
738
739	if (DMU_USERUSED_DNODE(os) != NULL) {
740		dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
741		dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
742	}
743	dnode_evict_dbufs(DMU_META_DNODE(os));
744}
745
746/*
747 * Objset eviction processing is split into into two pieces.
748 * The first marks the objset as evicting, evicts any dbufs that
749 * have a refcount of zero, and then queues up the objset for the
750 * second phase of eviction.  Once os->os_dnodes has been cleared by
751 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
752 * The second phase closes the special dnodes, dequeues the objset from
753 * the list of those undergoing eviction, and finally frees the objset.
754 *
755 * NOTE: Due to asynchronous eviction processing (invocation of
756 *       dnode_buf_pageout()), it is possible for the meta dnode for the
757 *       objset to have no holds even though os->os_dnodes is not empty.
758 */
759void
760dmu_objset_evict(objset_t *os)
761{
762	dsl_dataset_t *ds = os->os_dsl_dataset;
763
764	for (int t = 0; t < TXG_SIZE; t++)
765		ASSERT(!dmu_objset_is_dirty(os, t));
766
767	if (ds)
768		dsl_prop_unregister_all(ds, os);
769
770	if (os->os_sa)
771		sa_tear_down(os);
772
773	dmu_objset_evict_dbufs(os);
774
775	mutex_enter(&os->os_lock);
776	spa_evicting_os_register(os->os_spa, os);
777	if (list_is_empty(&os->os_dnodes)) {
778		mutex_exit(&os->os_lock);
779		dmu_objset_evict_done(os);
780	} else {
781		mutex_exit(&os->os_lock);
782	}
783}
784
785void
786dmu_objset_evict_done(objset_t *os)
787{
788	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
789
790	dnode_special_close(&os->os_meta_dnode);
791	if (DMU_USERUSED_DNODE(os)) {
792		dnode_special_close(&os->os_userused_dnode);
793		dnode_special_close(&os->os_groupused_dnode);
794	}
795	zil_free(os->os_zil);
796
797	arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
798
799	/*
800	 * This is a barrier to prevent the objset from going away in
801	 * dnode_move() until we can safely ensure that the objset is still in
802	 * use. We consider the objset valid before the barrier and invalid
803	 * after the barrier.
804	 */
805	rw_enter(&os_lock, RW_READER);
806	rw_exit(&os_lock);
807
808	mutex_destroy(&os->os_lock);
809	mutex_destroy(&os->os_userused_lock);
810	mutex_destroy(&os->os_obj_lock);
811	mutex_destroy(&os->os_user_ptr_lock);
812	for (int i = 0; i < TXG_SIZE; i++) {
813		multilist_destroy(os->os_dirty_dnodes[i]);
814	}
815	spa_evicting_os_deregister(os->os_spa, os);
816	kmem_free(os, sizeof (objset_t));
817}
818
819timestruc_t
820dmu_objset_snap_cmtime(objset_t *os)
821{
822	return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
823}
824
825/* called from dsl for meta-objset */
826objset_t *
827dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
828    dmu_objset_type_t type, dmu_tx_t *tx)
829{
830	objset_t *os;
831	dnode_t *mdn;
832
833	ASSERT(dmu_tx_is_syncing(tx));
834
835	if (ds != NULL)
836		VERIFY0(dmu_objset_from_ds(ds, &os));
837	else
838		VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
839
840	mdn = DMU_META_DNODE(os);
841
842	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
843	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
844
845	/*
846	 * We don't want to have to increase the meta-dnode's nlevels
847	 * later, because then we could do it in quescing context while
848	 * we are also accessing it in open context.
849	 *
850	 * This precaution is not necessary for the MOS (ds == NULL),
851	 * because the MOS is only updated in syncing context.
852	 * This is most fortunate: the MOS is the only objset that
853	 * needs to be synced multiple times as spa_sync() iterates
854	 * to convergence, so minimizing its dn_nlevels matters.
855	 */
856	if (ds != NULL) {
857		int levels = 1;
858
859		/*
860		 * Determine the number of levels necessary for the meta-dnode
861		 * to contain DN_MAX_OBJECT dnodes.  Note that in order to
862		 * ensure that we do not overflow 64 bits, there has to be
863		 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT
864		 * but < 2^64.  Therefore,
865		 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be
866		 * less than (64 - log2(DN_MAX_OBJECT)) (16).
867		 */
868		while ((uint64_t)mdn->dn_nblkptr <<
869		    (mdn->dn_datablkshift - DNODE_SHIFT +
870		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
871		    DN_MAX_OBJECT)
872			levels++;
873
874		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
875		    mdn->dn_nlevels = levels;
876	}
877
878	ASSERT(type != DMU_OST_NONE);
879	ASSERT(type != DMU_OST_ANY);
880	ASSERT(type < DMU_OST_NUMTYPES);
881	os->os_phys->os_type = type;
882	if (dmu_objset_userused_enabled(os)) {
883		os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
884		os->os_flags = os->os_phys->os_flags;
885	}
886
887	dsl_dataset_dirty(ds, tx);
888
889	return (os);
890}
891
892typedef struct dmu_objset_create_arg {
893	const char *doca_name;
894	cred_t *doca_cred;
895	void (*doca_userfunc)(objset_t *os, void *arg,
896	    cred_t *cr, dmu_tx_t *tx);
897	void *doca_userarg;
898	dmu_objset_type_t doca_type;
899	uint64_t doca_flags;
900} dmu_objset_create_arg_t;
901
902/*ARGSUSED*/
903static int
904dmu_objset_create_check(void *arg, dmu_tx_t *tx)
905{
906	dmu_objset_create_arg_t *doca = arg;
907	dsl_pool_t *dp = dmu_tx_pool(tx);
908	dsl_dir_t *pdd;
909	dsl_dataset_t *parentds;
910	objset_t *parentos;
911	const char *tail;
912	int error;
913
914	if (strchr(doca->doca_name, '@') != NULL)
915		return (SET_ERROR(EINVAL));
916
917	if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
918		return (SET_ERROR(ENAMETOOLONG));
919
920	if (dataset_nestcheck(doca->doca_name) != 0)
921		return (SET_ERROR(ENAMETOOLONG));
922
923	error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
924	if (error != 0)
925		return (error);
926	if (tail == NULL) {
927		dsl_dir_rele(pdd, FTAG);
928		return (SET_ERROR(EEXIST));
929	}
930	error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
931	    doca->doca_cred);
932	if (error != 0) {
933		dsl_dir_rele(pdd, FTAG);
934		return (error);
935	}
936
937	/* can't create below anything but filesystems (eg. no ZVOLs) */
938	error = dsl_dataset_hold_obj(pdd->dd_pool,
939	    dsl_dir_phys(pdd)->dd_head_dataset_obj, FTAG, &parentds);
940	if (error != 0) {
941		dsl_dir_rele(pdd, FTAG);
942		return (error);
943	}
944	error = dmu_objset_from_ds(parentds, &parentos);
945	if (error != 0) {
946		dsl_dataset_rele(parentds, FTAG);
947		dsl_dir_rele(pdd, FTAG);
948		return (error);
949	}
950	if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
951		dsl_dataset_rele(parentds, FTAG);
952		dsl_dir_rele(pdd, FTAG);
953		return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
954	}
955	dsl_dataset_rele(parentds, FTAG);
956	dsl_dir_rele(pdd, FTAG);
957
958	return (error);
959}
960
961static void
962dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
963{
964	dmu_objset_create_arg_t *doca = arg;
965	dsl_pool_t *dp = dmu_tx_pool(tx);
966	dsl_dir_t *pdd;
967	const char *tail;
968	dsl_dataset_t *ds;
969	uint64_t obj;
970	blkptr_t *bp;
971	objset_t *os;
972
973	VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
974
975	obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
976	    doca->doca_cred, tx);
977
978	VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
979	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
980	bp = dsl_dataset_get_blkptr(ds);
981	os = dmu_objset_create_impl(pdd->dd_pool->dp_spa,
982	    ds, bp, doca->doca_type, tx);
983	rrw_exit(&ds->ds_bp_rwlock, FTAG);
984
985	if (doca->doca_userfunc != NULL) {
986		doca->doca_userfunc(os, doca->doca_userarg,
987		    doca->doca_cred, tx);
988	}
989
990	spa_history_log_internal_ds(ds, "create", tx, "");
991	dsl_dataset_rele(ds, FTAG);
992	dsl_dir_rele(pdd, FTAG);
993}
994
995int
996dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
997    void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
998{
999	dmu_objset_create_arg_t doca;
1000
1001	doca.doca_name = name;
1002	doca.doca_cred = CRED();
1003	doca.doca_flags = flags;
1004	doca.doca_userfunc = func;
1005	doca.doca_userarg = arg;
1006	doca.doca_type = type;
1007
1008	return (dsl_sync_task(name,
1009	    dmu_objset_create_check, dmu_objset_create_sync, &doca,
1010	    5, ZFS_SPACE_CHECK_NORMAL));
1011}
1012
1013typedef struct dmu_objset_clone_arg {
1014	const char *doca_clone;
1015	const char *doca_origin;
1016	cred_t *doca_cred;
1017} dmu_objset_clone_arg_t;
1018
1019/*ARGSUSED*/
1020static int
1021dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
1022{
1023	dmu_objset_clone_arg_t *doca = arg;
1024	dsl_dir_t *pdd;
1025	const char *tail;
1026	int error;
1027	dsl_dataset_t *origin;
1028	dsl_pool_t *dp = dmu_tx_pool(tx);
1029
1030	if (strchr(doca->doca_clone, '@') != NULL)
1031		return (SET_ERROR(EINVAL));
1032
1033	if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
1034		return (SET_ERROR(ENAMETOOLONG));
1035
1036	error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
1037	if (error != 0)
1038		return (error);
1039	if (tail == NULL) {
1040		dsl_dir_rele(pdd, FTAG);
1041		return (SET_ERROR(EEXIST));
1042	}
1043
1044	error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
1045	    doca->doca_cred);
1046	if (error != 0) {
1047		dsl_dir_rele(pdd, FTAG);
1048		return (SET_ERROR(EDQUOT));
1049	}
1050	dsl_dir_rele(pdd, FTAG);
1051
1052	error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
1053	if (error != 0)
1054		return (error);
1055
1056	/* You can only clone snapshots, not the head datasets. */
1057	if (!origin->ds_is_snapshot) {
1058		dsl_dataset_rele(origin, FTAG);
1059		return (SET_ERROR(EINVAL));
1060	}
1061	dsl_dataset_rele(origin, FTAG);
1062
1063	return (0);
1064}
1065
1066static void
1067dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
1068{
1069	dmu_objset_clone_arg_t *doca = arg;
1070	dsl_pool_t *dp = dmu_tx_pool(tx);
1071	dsl_dir_t *pdd;
1072	const char *tail;
1073	dsl_dataset_t *origin, *ds;
1074	uint64_t obj;
1075	char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1076
1077	VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
1078	VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
1079
1080	obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
1081	    doca->doca_cred, tx);
1082
1083	VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
1084	dsl_dataset_name(origin, namebuf);
1085	spa_history_log_internal_ds(ds, "clone", tx,
1086	    "origin=%s (%llu)", namebuf, origin->ds_object);
1087	dsl_dataset_rele(ds, FTAG);
1088	dsl_dataset_rele(origin, FTAG);
1089	dsl_dir_rele(pdd, FTAG);
1090}
1091
1092int
1093dmu_objset_clone(const char *clone, const char *origin)
1094{
1095	dmu_objset_clone_arg_t doca;
1096
1097	doca.doca_clone = clone;
1098	doca.doca_origin = origin;
1099	doca.doca_cred = CRED();
1100
1101	return (dsl_sync_task(clone,
1102	    dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
1103	    5, ZFS_SPACE_CHECK_NORMAL));
1104}
1105
1106static int
1107dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg)
1108{
1109	int error = 0;
1110	uint64_t object = 0;
1111	while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
1112		error = dmu_object_remap_indirects(os, object,
1113		    last_removed_txg);
1114		/*
1115		 * If the ZPL removed the object before we managed to dnode_hold
1116		 * it, we would get an ENOENT. If the ZPL declares its intent
1117		 * to remove the object (dnode_free) before we manage to
1118		 * dnode_hold it, we would get an EEXIST. In either case, we
1119		 * want to continue remapping the other objects in the objset;
1120		 * in all other cases, we want to break early.
1121		 */
1122		if (error != 0 && error != ENOENT && error != EEXIST) {
1123			break;
1124		}
1125	}
1126	if (error == ESRCH) {
1127		error = 0;
1128	}
1129	return (error);
1130}
1131
1132int
1133dmu_objset_remap_indirects(const char *fsname)
1134{
1135	int error = 0;
1136	objset_t *os = NULL;
1137	uint64_t last_removed_txg;
1138	uint64_t remap_start_txg;
1139	dsl_dir_t *dd;
1140
1141	error = dmu_objset_hold(fsname, FTAG, &os);
1142	if (error != 0) {
1143		return (error);
1144	}
1145	dd = dmu_objset_ds(os)->ds_dir;
1146
1147	if (!spa_feature_is_enabled(dmu_objset_spa(os),
1148	    SPA_FEATURE_OBSOLETE_COUNTS)) {
1149		dmu_objset_rele(os, FTAG);
1150		return (SET_ERROR(ENOTSUP));
1151	}
1152
1153	if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) {
1154		dmu_objset_rele(os, FTAG);
1155		return (SET_ERROR(EINVAL));
1156	}
1157
1158	/*
1159	 * If there has not been a removal, we're done.
1160	 */
1161	last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os));
1162	if (last_removed_txg == -1ULL) {
1163		dmu_objset_rele(os, FTAG);
1164		return (0);
1165	}
1166
1167	/*
1168	 * If we have remapped since the last removal, we're done.
1169	 */
1170	if (dsl_dir_is_zapified(dd)) {
1171		uint64_t last_remap_txg;
1172		if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)),
1173		    dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
1174		    sizeof (last_remap_txg), 1, &last_remap_txg) == 0 &&
1175		    last_remap_txg > last_removed_txg) {
1176			dmu_objset_rele(os, FTAG);
1177			return (0);
1178		}
1179	}
1180
1181	dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
1182	dsl_pool_rele(dmu_objset_pool(os), FTAG);
1183
1184	remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os));
1185	error = dmu_objset_remap_indirects_impl(os, last_removed_txg);
1186	if (error == 0) {
1187		/*
1188		 * We update the last_remap_txg to be the start txg so that
1189		 * we can guarantee that every block older than last_remap_txg
1190		 * that can be remapped has been remapped.
1191		 */
1192		error = dsl_dir_update_last_remap_txg(dd, remap_start_txg);
1193	}
1194
1195	dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
1196	dsl_dataset_rele(dmu_objset_ds(os), FTAG);
1197
1198	return (error);
1199}
1200
1201int
1202dmu_objset_snapshot_one(const char *fsname, const char *snapname)
1203{
1204	int err;
1205	char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
1206	nvlist_t *snaps = fnvlist_alloc();
1207
1208	fnvlist_add_boolean(snaps, longsnap);
1209	strfree(longsnap);
1210	err = dsl_dataset_snapshot(snaps, NULL, NULL);
1211	fnvlist_free(snaps);
1212	return (err);
1213}
1214
1215static void
1216dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
1217{
1218	dnode_t *dn;
1219
1220	while ((dn = multilist_sublist_head(list)) != NULL) {
1221		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
1222		ASSERT(dn->dn_dbuf->db_data_pending);
1223		/*
1224		 * Initialize dn_zio outside dnode_sync() because the
1225		 * meta-dnode needs to set it ouside dnode_sync().
1226		 */
1227		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
1228		ASSERT(dn->dn_zio);
1229
1230		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
1231		multilist_sublist_remove(list, dn);
1232
1233		multilist_t *newlist = dn->dn_objset->os_synced_dnodes;
1234		if (newlist != NULL) {
1235			(void) dnode_add_ref(dn, newlist);
1236			multilist_insert(newlist, dn);
1237		}
1238
1239		dnode_sync(dn, tx);
1240	}
1241}
1242
1243/* ARGSUSED */
1244static void
1245dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
1246{
1247	blkptr_t *bp = zio->io_bp;
1248	objset_t *os = arg;
1249	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
1250
1251	ASSERT(!BP_IS_EMBEDDED(bp));
1252	ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
1253	ASSERT0(BP_GET_LEVEL(bp));
1254
1255	/*
1256	 * Update rootbp fill count: it should be the number of objects
1257	 * allocated in the object set (not counting the "special"
1258	 * objects that are stored in the objset_phys_t -- the meta
1259	 * dnode and user/group accounting objects).
1260	 */
1261	bp->blk_fill = 0;
1262	for (int i = 0; i < dnp->dn_nblkptr; i++)
1263		bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
1264	if (os->os_dsl_dataset != NULL)
1265		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
1266	*os->os_rootbp = *bp;
1267	if (os->os_dsl_dataset != NULL)
1268		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
1269}
1270
1271/* ARGSUSED */
1272static void
1273dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
1274{
1275	blkptr_t *bp = zio->io_bp;
1276	blkptr_t *bp_orig = &zio->io_bp_orig;
1277	objset_t *os = arg;
1278
1279	if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
1280		ASSERT(BP_EQUAL(bp, bp_orig));
1281	} else {
1282		dsl_dataset_t *ds = os->os_dsl_dataset;
1283		dmu_tx_t *tx = os->os_synctx;
1284
1285		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
1286		dsl_dataset_block_born(ds, bp, tx);
1287	}
1288	kmem_free(bp, sizeof (*bp));
1289}
1290
1291typedef struct sync_dnodes_arg {
1292	multilist_t *sda_list;
1293	int sda_sublist_idx;
1294	multilist_t *sda_newlist;
1295	dmu_tx_t *sda_tx;
1296} sync_dnodes_arg_t;
1297
1298static void
1299sync_dnodes_task(void *arg)
1300{
1301	sync_dnodes_arg_t *sda = arg;
1302
1303	multilist_sublist_t *ms =
1304	    multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
1305
1306	dmu_objset_sync_dnodes(ms, sda->sda_tx);
1307
1308	multilist_sublist_unlock(ms);
1309
1310	kmem_free(sda, sizeof (*sda));
1311}
1312
1313
1314/* called from dsl */
1315void
1316dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
1317{
1318	int txgoff;
1319	zbookmark_phys_t zb;
1320	zio_prop_t zp;
1321	zio_t *zio;
1322	list_t *list;
1323	dbuf_dirty_record_t *dr;
1324	int num_sublists;
1325	multilist_t *ml;
1326	blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
1327	*blkptr_copy = *os->os_rootbp;
1328
1329	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1330
1331	ASSERT(dmu_tx_is_syncing(tx));
1332	/* XXX the write_done callback should really give us the tx... */
1333	os->os_synctx = tx;
1334
1335	if (os->os_dsl_dataset == NULL) {
1336		/*
1337		 * This is the MOS.  If we have upgraded,
1338		 * spa_max_replication() could change, so reset
1339		 * os_copies here.
1340		 */
1341		os->os_copies = spa_max_replication(os->os_spa);
1342	}
1343
1344	/*
1345	 * Create the root block IO
1346	 */
1347	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1348	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1349	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1350	arc_release(os->os_phys_buf, &os->os_phys_buf);
1351
1352	dmu_write_policy(os, NULL, 0, 0, &zp);
1353
1354	zio = arc_write(pio, os->os_spa, tx->tx_txg,
1355	    blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
1356	    &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
1357	    os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1358
1359	/*
1360	 * Sync special dnodes - the parent IO for the sync is the root block
1361	 */
1362	DMU_META_DNODE(os)->dn_zio = zio;
1363	dnode_sync(DMU_META_DNODE(os), tx);
1364
1365	os->os_phys->os_flags = os->os_flags;
1366
1367	if (DMU_USERUSED_DNODE(os) &&
1368	    DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1369		DMU_USERUSED_DNODE(os)->dn_zio = zio;
1370		dnode_sync(DMU_USERUSED_DNODE(os), tx);
1371		DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1372		dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1373	}
1374
1375	txgoff = tx->tx_txg & TXG_MASK;
1376
1377	if (dmu_objset_userused_enabled(os)) {
1378		/*
1379		 * We must create the list here because it uses the
1380		 * dn_dirty_link[] of this txg.  But it may already
1381		 * exist because we call dsl_dataset_sync() twice per txg.
1382		 */
1383		if (os->os_synced_dnodes == NULL) {
1384			os->os_synced_dnodes =
1385			    multilist_create(sizeof (dnode_t),
1386			    offsetof(dnode_t, dn_dirty_link[txgoff]),
1387			    dnode_multilist_index_func);
1388		} else {
1389			ASSERT3U(os->os_synced_dnodes->ml_offset, ==,
1390			    offsetof(dnode_t, dn_dirty_link[txgoff]));
1391		}
1392	}
1393
1394	ml = os->os_dirty_dnodes[txgoff];
1395	num_sublists = multilist_get_num_sublists(ml);
1396	for (int i = 0; i < num_sublists; i++) {
1397		if (multilist_sublist_is_empty_idx(ml, i))
1398			continue;
1399		sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
1400		sda->sda_list = ml;
1401		sda->sda_sublist_idx = i;
1402		sda->sda_tx = tx;
1403		(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
1404		    sync_dnodes_task, sda, 0);
1405		/* callback frees sda */
1406	}
1407	taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
1408
1409	list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1410	while ((dr = list_head(list)) != NULL) {
1411		ASSERT0(dr->dr_dbuf->db_level);
1412		list_remove(list, dr);
1413		if (dr->dr_zio)
1414			zio_nowait(dr->dr_zio);
1415	}
1416
1417	/* Enable dnode backfill if enough objects have been freed. */
1418	if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
1419		os->os_rescan_dnodes = B_TRUE;
1420		os->os_freed_dnodes = 0;
1421	}
1422
1423	/*
1424	 * Free intent log blocks up to this tx.
1425	 */
1426	zil_sync(os->os_zil, tx);
1427	os->os_phys->os_zil_header = os->os_zil_header;
1428	zio_nowait(zio);
1429}
1430
1431boolean_t
1432dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1433{
1434	return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK]));
1435}
1436
1437static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1438
1439void
1440dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1441{
1442	used_cbs[ost] = cb;
1443}
1444
1445boolean_t
1446dmu_objset_userused_enabled(objset_t *os)
1447{
1448	return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1449	    used_cbs[os->os_phys->os_type] != NULL &&
1450	    DMU_USERUSED_DNODE(os) != NULL);
1451}
1452
1453typedef struct userquota_node {
1454	uint64_t uqn_id;
1455	int64_t uqn_delta;
1456	avl_node_t uqn_node;
1457} userquota_node_t;
1458
1459typedef struct userquota_cache {
1460	avl_tree_t uqc_user_deltas;
1461	avl_tree_t uqc_group_deltas;
1462} userquota_cache_t;
1463
1464static int
1465userquota_compare(const void *l, const void *r)
1466{
1467	const userquota_node_t *luqn = l;
1468	const userquota_node_t *ruqn = r;
1469
1470	if (luqn->uqn_id < ruqn->uqn_id)
1471		return (-1);
1472	if (luqn->uqn_id > ruqn->uqn_id)
1473		return (1);
1474	return (0);
1475}
1476
1477static void
1478do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
1479{
1480	void *cookie;
1481	userquota_node_t *uqn;
1482
1483	ASSERT(dmu_tx_is_syncing(tx));
1484
1485	cookie = NULL;
1486	while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
1487	    &cookie)) != NULL) {
1488		/*
1489		 * os_userused_lock protects against concurrent calls to
1490		 * zap_increment_int().  It's needed because zap_increment_int()
1491		 * is not thread-safe (i.e. not atomic).
1492		 */
1493		mutex_enter(&os->os_userused_lock);
1494		VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT,
1495		    uqn->uqn_id, uqn->uqn_delta, tx));
1496		mutex_exit(&os->os_userused_lock);
1497		kmem_free(uqn, sizeof (*uqn));
1498	}
1499	avl_destroy(&cache->uqc_user_deltas);
1500
1501	cookie = NULL;
1502	while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
1503	    &cookie)) != NULL) {
1504		mutex_enter(&os->os_userused_lock);
1505		VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1506		    uqn->uqn_id, uqn->uqn_delta, tx));
1507		mutex_exit(&os->os_userused_lock);
1508		kmem_free(uqn, sizeof (*uqn));
1509	}
1510	avl_destroy(&cache->uqc_group_deltas);
1511}
1512
1513static void
1514userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta)
1515{
1516	userquota_node_t search = { .uqn_id = id };
1517	avl_index_t idx;
1518
1519	userquota_node_t *uqn = avl_find(avl, &search, &idx);
1520	if (uqn == NULL) {
1521		uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
1522		uqn->uqn_id = id;
1523		avl_insert(avl, uqn, idx);
1524	}
1525	uqn->uqn_delta += delta;
1526}
1527
1528static void
1529do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags,
1530    uint64_t user, uint64_t group, boolean_t subtract)
1531{
1532	if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1533		int64_t delta = DNODE_SIZE + used;
1534		if (subtract)
1535			delta = -delta;
1536
1537		userquota_update_cache(&cache->uqc_user_deltas, user, delta);
1538		userquota_update_cache(&cache->uqc_group_deltas, group, delta);
1539	}
1540}
1541
1542typedef struct userquota_updates_arg {
1543	objset_t *uua_os;
1544	int uua_sublist_idx;
1545	dmu_tx_t *uua_tx;
1546} userquota_updates_arg_t;
1547
1548static void
1549userquota_updates_task(void *arg)
1550{
1551	userquota_updates_arg_t *uua = arg;
1552	objset_t *os = uua->uua_os;
1553	dmu_tx_t *tx = uua->uua_tx;
1554	dnode_t *dn;
1555	userquota_cache_t cache = { 0 };
1556
1557	multilist_sublist_t *list =
1558	    multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx);
1559
1560	ASSERT(multilist_sublist_head(list) == NULL ||
1561	    dmu_objset_userused_enabled(os));
1562	avl_create(&cache.uqc_user_deltas, userquota_compare,
1563	    sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1564	avl_create(&cache.uqc_group_deltas, userquota_compare,
1565	    sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1566
1567	while ((dn = multilist_sublist_head(list)) != NULL) {
1568		int flags;
1569		ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1570		ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1571		    dn->dn_phys->dn_flags &
1572		    DNODE_FLAG_USERUSED_ACCOUNTED);
1573
1574		flags = dn->dn_id_flags;
1575		ASSERT(flags);
1576		if (flags & DN_ID_OLD_EXIST)  {
1577			do_userquota_update(&cache,
1578			    dn->dn_oldused, dn->dn_oldflags,
1579			    dn->dn_olduid, dn->dn_oldgid, B_TRUE);
1580		}
1581		if (flags & DN_ID_NEW_EXIST) {
1582			do_userquota_update(&cache,
1583			    DN_USED_BYTES(dn->dn_phys),
1584			    dn->dn_phys->dn_flags,  dn->dn_newuid,
1585			    dn->dn_newgid, B_FALSE);
1586		}
1587
1588		mutex_enter(&dn->dn_mtx);
1589		dn->dn_oldused = 0;
1590		dn->dn_oldflags = 0;
1591		if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1592			dn->dn_olduid = dn->dn_newuid;
1593			dn->dn_oldgid = dn->dn_newgid;
1594			dn->dn_id_flags |= DN_ID_OLD_EXIST;
1595			if (dn->dn_bonuslen == 0)
1596				dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1597			else
1598				dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1599		}
1600		dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1601		mutex_exit(&dn->dn_mtx);
1602
1603		multilist_sublist_remove(list, dn);
1604		dnode_rele(dn, os->os_synced_dnodes);
1605	}
1606	do_userquota_cacheflush(os, &cache, tx);
1607	multilist_sublist_unlock(list);
1608	kmem_free(uua, sizeof (*uua));
1609}
1610
1611void
1612dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1613{
1614	int num_sublists;
1615
1616	if (!dmu_objset_userused_enabled(os))
1617		return;
1618
1619	/* Allocate the user/groupused objects if necessary. */
1620	if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1621		VERIFY0(zap_create_claim(os,
1622		    DMU_USERUSED_OBJECT,
1623		    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1624		VERIFY0(zap_create_claim(os,
1625		    DMU_GROUPUSED_OBJECT,
1626		    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1627	}
1628
1629	num_sublists = multilist_get_num_sublists(os->os_synced_dnodes);
1630	for (int i = 0; i < num_sublists; i++) {
1631		if (multilist_sublist_is_empty_idx(os->os_synced_dnodes, i))
1632			continue;
1633		userquota_updates_arg_t *uua =
1634		    kmem_alloc(sizeof (*uua), KM_SLEEP);
1635		uua->uua_os = os;
1636		uua->uua_sublist_idx = i;
1637		uua->uua_tx = tx;
1638		/* note: caller does taskq_wait() */
1639		(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
1640		    userquota_updates_task, uua, 0);
1641		/* callback frees uua */
1642	}
1643}
1644
1645/*
1646 * Returns a pointer to data to find uid/gid from
1647 *
1648 * If a dirty record for transaction group that is syncing can't
1649 * be found then NULL is returned.  In the NULL case it is assumed
1650 * the uid/gid aren't changing.
1651 */
1652static void *
1653dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1654{
1655	dbuf_dirty_record_t *dr, **drp;
1656	void *data;
1657
1658	if (db->db_dirtycnt == 0)
1659		return (db->db.db_data);  /* Nothing is changing */
1660
1661	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1662		if (dr->dr_txg == tx->tx_txg)
1663			break;
1664
1665	if (dr == NULL) {
1666		data = NULL;
1667	} else {
1668		dnode_t *dn;
1669
1670		DB_DNODE_ENTER(dr->dr_dbuf);
1671		dn = DB_DNODE(dr->dr_dbuf);
1672
1673		if (dn->dn_bonuslen == 0 &&
1674		    dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1675			data = dr->dt.dl.dr_data->b_data;
1676		else
1677			data = dr->dt.dl.dr_data;
1678
1679		DB_DNODE_EXIT(dr->dr_dbuf);
1680	}
1681
1682	return (data);
1683}
1684
1685void
1686dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1687{
1688	objset_t *os = dn->dn_objset;
1689	void *data = NULL;
1690	dmu_buf_impl_t *db = NULL;
1691	uint64_t *user = NULL;
1692	uint64_t *group = NULL;
1693	int flags = dn->dn_id_flags;
1694	int error;
1695	boolean_t have_spill = B_FALSE;
1696
1697	if (!dmu_objset_userused_enabled(dn->dn_objset))
1698		return;
1699
1700	if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1701	    DN_ID_CHKED_SPILL)))
1702		return;
1703
1704	if (before && dn->dn_bonuslen != 0)
1705		data = DN_BONUS(dn->dn_phys);
1706	else if (!before && dn->dn_bonuslen != 0) {
1707		if (dn->dn_bonus) {
1708			db = dn->dn_bonus;
1709			mutex_enter(&db->db_mtx);
1710			data = dmu_objset_userquota_find_data(db, tx);
1711		} else {
1712			data = DN_BONUS(dn->dn_phys);
1713		}
1714	} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1715			int rf = 0;
1716
1717			if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1718				rf |= DB_RF_HAVESTRUCT;
1719			error = dmu_spill_hold_by_dnode(dn,
1720			    rf | DB_RF_MUST_SUCCEED,
1721			    FTAG, (dmu_buf_t **)&db);
1722			ASSERT(error == 0);
1723			mutex_enter(&db->db_mtx);
1724			data = (before) ? db->db.db_data :
1725			    dmu_objset_userquota_find_data(db, tx);
1726			have_spill = B_TRUE;
1727	} else {
1728		mutex_enter(&dn->dn_mtx);
1729		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1730		mutex_exit(&dn->dn_mtx);
1731		return;
1732	}
1733
1734	if (before) {
1735		ASSERT(data);
1736		user = &dn->dn_olduid;
1737		group = &dn->dn_oldgid;
1738	} else if (data) {
1739		user = &dn->dn_newuid;
1740		group = &dn->dn_newgid;
1741	}
1742
1743	/*
1744	 * Must always call the callback in case the object
1745	 * type has changed and that type isn't an object type to track
1746	 */
1747	error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1748	    user, group);
1749
1750	/*
1751	 * Preserve existing uid/gid when the callback can't determine
1752	 * what the new uid/gid are and the callback returned EEXIST.
1753	 * The EEXIST error tells us to just use the existing uid/gid.
1754	 * If we don't know what the old values are then just assign
1755	 * them to 0, since that is a new file  being created.
1756	 */
1757	if (!before && data == NULL && error == EEXIST) {
1758		if (flags & DN_ID_OLD_EXIST) {
1759			dn->dn_newuid = dn->dn_olduid;
1760			dn->dn_newgid = dn->dn_oldgid;
1761		} else {
1762			dn->dn_newuid = 0;
1763			dn->dn_newgid = 0;
1764		}
1765		error = 0;
1766	}
1767
1768	if (db)
1769		mutex_exit(&db->db_mtx);
1770
1771	mutex_enter(&dn->dn_mtx);
1772	if (error == 0 && before)
1773		dn->dn_id_flags |= DN_ID_OLD_EXIST;
1774	if (error == 0 && !before)
1775		dn->dn_id_flags |= DN_ID_NEW_EXIST;
1776
1777	if (have_spill) {
1778		dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1779	} else {
1780		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1781	}
1782	mutex_exit(&dn->dn_mtx);
1783	if (have_spill)
1784		dmu_buf_rele((dmu_buf_t *)db, FTAG);
1785}
1786
1787boolean_t
1788dmu_objset_userspace_present(objset_t *os)
1789{
1790	return (os->os_phys->os_flags &
1791	    OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1792}
1793
1794int
1795dmu_objset_userspace_upgrade(objset_t *os)
1796{
1797	uint64_t obj;
1798	int err = 0;
1799
1800	if (dmu_objset_userspace_present(os))
1801		return (0);
1802	if (!dmu_objset_userused_enabled(os))
1803		return (SET_ERROR(ENOTSUP));
1804	if (dmu_objset_is_snapshot(os))
1805		return (SET_ERROR(EINVAL));
1806
1807	/*
1808	 * We simply need to mark every object dirty, so that it will be
1809	 * synced out and now accounted.  If this is called
1810	 * concurrently, or if we already did some work before crashing,
1811	 * that's fine, since we track each object's accounted state
1812	 * independently.
1813	 */
1814
1815	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1816		dmu_tx_t *tx;
1817		dmu_buf_t *db;
1818		int objerr;
1819
1820		if (issig(JUSTLOOKING) && issig(FORREAL))
1821			return (SET_ERROR(EINTR));
1822
1823		objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1824		if (objerr != 0)
1825			continue;
1826		tx = dmu_tx_create(os);
1827		dmu_tx_hold_bonus(tx, obj);
1828		objerr = dmu_tx_assign(tx, TXG_WAIT);
1829		if (objerr != 0) {
1830			dmu_tx_abort(tx);
1831			continue;
1832		}
1833		dmu_buf_will_dirty(db, tx);
1834		dmu_buf_rele(db, FTAG);
1835		dmu_tx_commit(tx);
1836	}
1837
1838	os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1839	txg_wait_synced(dmu_objset_pool(os), 0);
1840	return (0);
1841}
1842
1843void
1844dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1845    uint64_t *usedobjsp, uint64_t *availobjsp)
1846{
1847	dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1848	    usedobjsp, availobjsp);
1849}
1850
1851uint64_t
1852dmu_objset_fsid_guid(objset_t *os)
1853{
1854	return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1855}
1856
1857void
1858dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1859{
1860	stat->dds_type = os->os_phys->os_type;
1861	if (os->os_dsl_dataset)
1862		dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1863}
1864
1865void
1866dmu_objset_stats(objset_t *os, nvlist_t *nv)
1867{
1868	ASSERT(os->os_dsl_dataset ||
1869	    os->os_phys->os_type == DMU_OST_META);
1870
1871	if (os->os_dsl_dataset != NULL)
1872		dsl_dataset_stats(os->os_dsl_dataset, nv);
1873
1874	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1875	    os->os_phys->os_type);
1876	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1877	    dmu_objset_userspace_present(os));
1878}
1879
1880int
1881dmu_objset_is_snapshot(objset_t *os)
1882{
1883	if (os->os_dsl_dataset != NULL)
1884		return (os->os_dsl_dataset->ds_is_snapshot);
1885	else
1886		return (B_FALSE);
1887}
1888
1889int
1890dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1891    boolean_t *conflict)
1892{
1893	dsl_dataset_t *ds = os->os_dsl_dataset;
1894	uint64_t ignored;
1895
1896	if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1897		return (SET_ERROR(ENOENT));
1898
1899	return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1900	    dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
1901	    MT_NORMALIZE, real, maxlen, conflict));
1902}
1903
1904int
1905dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1906    uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1907{
1908	dsl_dataset_t *ds = os->os_dsl_dataset;
1909	zap_cursor_t cursor;
1910	zap_attribute_t attr;
1911
1912	ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
1913
1914	if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1915		return (SET_ERROR(ENOENT));
1916
1917	zap_cursor_init_serialized(&cursor,
1918	    ds->ds_dir->dd_pool->dp_meta_objset,
1919	    dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
1920
1921	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1922		zap_cursor_fini(&cursor);
1923		return (SET_ERROR(ENOENT));
1924	}
1925
1926	if (strlen(attr.za_name) + 1 > namelen) {
1927		zap_cursor_fini(&cursor);
1928		return (SET_ERROR(ENAMETOOLONG));
1929	}
1930
1931	(void) strcpy(name, attr.za_name);
1932	if (idp)
1933		*idp = attr.za_first_integer;
1934	if (case_conflict)
1935		*case_conflict = attr.za_normalization_conflict;
1936	zap_cursor_advance(&cursor);
1937	*offp = zap_cursor_serialize(&cursor);
1938	zap_cursor_fini(&cursor);
1939
1940	return (0);
1941}
1942
1943int
1944dmu_dir_list_next(objset_t *os, int namelen, char *name,
1945    uint64_t *idp, uint64_t *offp)
1946{
1947	dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1948	zap_cursor_t cursor;
1949	zap_attribute_t attr;
1950
1951	/* there is no next dir on a snapshot! */
1952	if (os->os_dsl_dataset->ds_object !=
1953	    dsl_dir_phys(dd)->dd_head_dataset_obj)
1954		return (SET_ERROR(ENOENT));
1955
1956	zap_cursor_init_serialized(&cursor,
1957	    dd->dd_pool->dp_meta_objset,
1958	    dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
1959
1960	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1961		zap_cursor_fini(&cursor);
1962		return (SET_ERROR(ENOENT));
1963	}
1964
1965	if (strlen(attr.za_name) + 1 > namelen) {
1966		zap_cursor_fini(&cursor);
1967		return (SET_ERROR(ENAMETOOLONG));
1968	}
1969
1970	(void) strcpy(name, attr.za_name);
1971	if (idp)
1972		*idp = attr.za_first_integer;
1973	zap_cursor_advance(&cursor);
1974	*offp = zap_cursor_serialize(&cursor);
1975	zap_cursor_fini(&cursor);
1976
1977	return (0);
1978}
1979
1980typedef struct dmu_objset_find_ctx {
1981	taskq_t		*dc_tq;
1982	dsl_pool_t	*dc_dp;
1983	uint64_t	dc_ddobj;
1984	char		*dc_ddname; /* last component of ddobj's name */
1985	int		(*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
1986	void		*dc_arg;
1987	int		dc_flags;
1988	kmutex_t	*dc_error_lock;
1989	int		*dc_error;
1990} dmu_objset_find_ctx_t;
1991
1992static void
1993dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
1994{
1995	dsl_pool_t *dp = dcp->dc_dp;
1996	dsl_dir_t *dd;
1997	dsl_dataset_t *ds;
1998	zap_cursor_t zc;
1999	zap_attribute_t *attr;
2000	uint64_t thisobj;
2001	int err = 0;
2002
2003	/* don't process if there already was an error */
2004	if (*dcp->dc_error != 0)
2005		goto out;
2006
2007	/*
2008	 * Note: passing the name (dc_ddname) here is optional, but it
2009	 * improves performance because we don't need to call
2010	 * zap_value_search() to determine the name.
2011	 */
2012	err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
2013	if (err != 0)
2014		goto out;
2015
2016	/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2017	if (dd->dd_myname[0] == '$') {
2018		dsl_dir_rele(dd, FTAG);
2019		goto out;
2020	}
2021
2022	thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
2023	attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
2024
2025	/*
2026	 * Iterate over all children.
2027	 */
2028	if (dcp->dc_flags & DS_FIND_CHILDREN) {
2029		for (zap_cursor_init(&zc, dp->dp_meta_objset,
2030		    dsl_dir_phys(dd)->dd_child_dir_zapobj);
2031		    zap_cursor_retrieve(&zc, attr) == 0;
2032		    (void) zap_cursor_advance(&zc)) {
2033			ASSERT3U(attr->za_integer_length, ==,
2034			    sizeof (uint64_t));
2035			ASSERT3U(attr->za_num_integers, ==, 1);
2036
2037			dmu_objset_find_ctx_t *child_dcp =
2038			    kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
2039			*child_dcp = *dcp;
2040			child_dcp->dc_ddobj = attr->za_first_integer;
2041			child_dcp->dc_ddname = spa_strdup(attr->za_name);
2042			if (dcp->dc_tq != NULL)
2043				(void) taskq_dispatch(dcp->dc_tq,
2044				    dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
2045			else
2046				dmu_objset_find_dp_impl(child_dcp);
2047		}
2048		zap_cursor_fini(&zc);
2049	}
2050
2051	/*
2052	 * Iterate over all snapshots.
2053	 */
2054	if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
2055		dsl_dataset_t *ds;
2056		err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2057
2058		if (err == 0) {
2059			uint64_t snapobj;
2060
2061			snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2062			dsl_dataset_rele(ds, FTAG);
2063
2064			for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2065			    zap_cursor_retrieve(&zc, attr) == 0;
2066			    (void) zap_cursor_advance(&zc)) {
2067				ASSERT3U(attr->za_integer_length, ==,
2068				    sizeof (uint64_t));
2069				ASSERT3U(attr->za_num_integers, ==, 1);
2070
2071				err = dsl_dataset_hold_obj(dp,
2072				    attr->za_first_integer, FTAG, &ds);
2073				if (err != 0)
2074					break;
2075				err = dcp->dc_func(dp, ds, dcp->dc_arg);
2076				dsl_dataset_rele(ds, FTAG);
2077				if (err != 0)
2078					break;
2079			}
2080			zap_cursor_fini(&zc);
2081		}
2082	}
2083
2084	kmem_free(attr, sizeof (zap_attribute_t));
2085
2086	if (err != 0) {
2087		dsl_dir_rele(dd, FTAG);
2088		goto out;
2089	}
2090
2091	/*
2092	 * Apply to self.
2093	 */
2094	err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2095
2096	/*
2097	 * Note: we hold the dir while calling dsl_dataset_hold_obj() so
2098	 * that the dir will remain cached, and we won't have to re-instantiate
2099	 * it (which could be expensive due to finding its name via
2100	 * zap_value_search()).
2101	 */
2102	dsl_dir_rele(dd, FTAG);
2103	if (err != 0)
2104		goto out;
2105	err = dcp->dc_func(dp, ds, dcp->dc_arg);
2106	dsl_dataset_rele(ds, FTAG);
2107
2108out:
2109	if (err != 0) {
2110		mutex_enter(dcp->dc_error_lock);
2111		/* only keep first error */
2112		if (*dcp->dc_error == 0)
2113			*dcp->dc_error = err;
2114		mutex_exit(dcp->dc_error_lock);
2115	}
2116
2117	if (dcp->dc_ddname != NULL)
2118		spa_strfree(dcp->dc_ddname);
2119	kmem_free(dcp, sizeof (*dcp));
2120}
2121
2122static void
2123dmu_objset_find_dp_cb(void *arg)
2124{
2125	dmu_objset_find_ctx_t *dcp = arg;
2126	dsl_pool_t *dp = dcp->dc_dp;
2127
2128	/*
2129	 * We need to get a pool_config_lock here, as there are several
2130	 * asssert(pool_config_held) down the stack. Getting a lock via
2131	 * dsl_pool_config_enter is risky, as it might be stalled by a
2132	 * pending writer. This would deadlock, as the write lock can
2133	 * only be granted when our parent thread gives up the lock.
2134	 * The _prio interface gives us priority over a pending writer.
2135	 */
2136	dsl_pool_config_enter_prio(dp, FTAG);
2137
2138	dmu_objset_find_dp_impl(dcp);
2139
2140	dsl_pool_config_exit(dp, FTAG);
2141}
2142
2143/*
2144 * Find objsets under and including ddobj, call func(ds) on each.
2145 * The order for the enumeration is completely undefined.
2146 * func is called with dsl_pool_config held.
2147 */
2148int
2149dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
2150    int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
2151{
2152	int error = 0;
2153	taskq_t *tq = NULL;
2154	int ntasks;
2155	dmu_objset_find_ctx_t *dcp;
2156	kmutex_t err_lock;
2157
2158	mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
2159	dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
2160	dcp->dc_tq = NULL;
2161	dcp->dc_dp = dp;
2162	dcp->dc_ddobj = ddobj;
2163	dcp->dc_ddname = NULL;
2164	dcp->dc_func = func;
2165	dcp->dc_arg = arg;
2166	dcp->dc_flags = flags;
2167	dcp->dc_error_lock = &err_lock;
2168	dcp->dc_error = &error;
2169
2170	if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
2171		/*
2172		 * In case a write lock is held we can't make use of
2173		 * parallelism, as down the stack of the worker threads
2174		 * the lock is asserted via dsl_pool_config_held.
2175		 * In case of a read lock this is solved by getting a read
2176		 * lock in each worker thread, which isn't possible in case
2177		 * of a writer lock. So we fall back to the synchronous path
2178		 * here.
2179		 * In the future it might be possible to get some magic into
2180		 * dsl_pool_config_held in a way that it returns true for
2181		 * the worker threads so that a single lock held from this
2182		 * thread suffices. For now, stay single threaded.
2183		 */
2184		dmu_objset_find_dp_impl(dcp);
2185		mutex_destroy(&err_lock);
2186
2187		return (error);
2188	}
2189
2190	ntasks = dmu_find_threads;
2191	if (ntasks == 0)
2192		ntasks = vdev_count_leaves(dp->dp_spa) * 4;
2193	tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks,
2194	    INT_MAX, 0);
2195	if (tq == NULL) {
2196		kmem_free(dcp, sizeof (*dcp));
2197		mutex_destroy(&err_lock);
2198
2199		return (SET_ERROR(ENOMEM));
2200	}
2201	dcp->dc_tq = tq;
2202
2203	/* dcp will be freed by task */
2204	(void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
2205
2206	/*
2207	 * PORTING: this code relies on the property of taskq_wait to wait
2208	 * until no more tasks are queued and no more tasks are active. As
2209	 * we always queue new tasks from within other tasks, task_wait
2210	 * reliably waits for the full recursion to finish, even though we
2211	 * enqueue new tasks after taskq_wait has been called.
2212	 * On platforms other than illumos, taskq_wait may not have this
2213	 * property.
2214	 */
2215	taskq_wait(tq);
2216	taskq_destroy(tq);
2217	mutex_destroy(&err_lock);
2218
2219	return (error);
2220}
2221
2222/*
2223 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
2224 * The dp_config_rwlock must not be held when this is called, and it
2225 * will not be held when the callback is called.
2226 * Therefore this function should only be used when the pool is not changing
2227 * (e.g. in syncing context), or the callback can deal with the possible races.
2228 */
2229static int
2230dmu_objset_find_impl(spa_t *spa, const char *name,
2231    int func(const char *, void *), void *arg, int flags)
2232{
2233	dsl_dir_t *dd;
2234	dsl_pool_t *dp = spa_get_dsl(spa);
2235	dsl_dataset_t *ds;
2236	zap_cursor_t zc;
2237	zap_attribute_t *attr;
2238	char *child;
2239	uint64_t thisobj;
2240	int err;
2241
2242	dsl_pool_config_enter(dp, FTAG);
2243
2244	err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
2245	if (err != 0) {
2246		dsl_pool_config_exit(dp, FTAG);
2247		return (err);
2248	}
2249
2250	/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2251	if (dd->dd_myname[0] == '$') {
2252		dsl_dir_rele(dd, FTAG);
2253		dsl_pool_config_exit(dp, FTAG);
2254		return (0);
2255	}
2256
2257	thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
2258	attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
2259
2260	/*
2261	 * Iterate over all children.
2262	 */
2263	if (flags & DS_FIND_CHILDREN) {
2264		for (zap_cursor_init(&zc, dp->dp_meta_objset,
2265		    dsl_dir_phys(dd)->dd_child_dir_zapobj);
2266		    zap_cursor_retrieve(&zc, attr) == 0;
2267		    (void) zap_cursor_advance(&zc)) {
2268			ASSERT3U(attr->za_integer_length, ==,
2269			    sizeof (uint64_t));
2270			ASSERT3U(attr->za_num_integers, ==, 1);
2271
2272			child = kmem_asprintf("%s/%s", name, attr->za_name);
2273			dsl_pool_config_exit(dp, FTAG);
2274			err = dmu_objset_find_impl(spa, child,
2275			    func, arg, flags);
2276			dsl_pool_config_enter(dp, FTAG);
2277			strfree(child);
2278			if (err != 0)
2279				break;
2280		}
2281		zap_cursor_fini(&zc);
2282
2283		if (err != 0) {
2284			dsl_dir_rele(dd, FTAG);
2285			dsl_pool_config_exit(dp, FTAG);
2286			kmem_free(attr, sizeof (zap_attribute_t));
2287			return (err);
2288		}
2289	}
2290
2291	/*
2292	 * Iterate over all snapshots.
2293	 */
2294	if (flags & DS_FIND_SNAPSHOTS) {
2295		err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2296
2297		if (err == 0) {
2298			uint64_t snapobj;
2299
2300			snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2301			dsl_dataset_rele(ds, FTAG);
2302
2303			for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2304			    zap_cursor_retrieve(&zc, attr) == 0;
2305			    (void) zap_cursor_advance(&zc)) {
2306				ASSERT3U(attr->za_integer_length, ==,
2307				    sizeof (uint64_t));
2308				ASSERT3U(attr->za_num_integers, ==, 1);
2309
2310				child = kmem_asprintf("%s@%s",
2311				    name, attr->za_name);
2312				dsl_pool_config_exit(dp, FTAG);
2313				err = func(child, arg);
2314				dsl_pool_config_enter(dp, FTAG);
2315				strfree(child);
2316				if (err != 0)
2317					break;
2318			}
2319			zap_cursor_fini(&zc);
2320		}
2321	}
2322
2323	dsl_dir_rele(dd, FTAG);
2324	kmem_free(attr, sizeof (zap_attribute_t));
2325	dsl_pool_config_exit(dp, FTAG);
2326
2327	if (err != 0)
2328		return (err);
2329
2330	/* Apply to self. */
2331	return (func(name, arg));
2332}
2333
2334/*
2335 * See comment above dmu_objset_find_impl().
2336 */
2337int
2338dmu_objset_find(char *name, int func(const char *, void *), void *arg,
2339    int flags)
2340{
2341	spa_t *spa;
2342	int error;
2343
2344	error = spa_open(name, &spa, FTAG);
2345	if (error != 0)
2346		return (error);
2347	error = dmu_objset_find_impl(spa, name, func, arg, flags);
2348	spa_close(spa, FTAG);
2349	return (error);
2350}
2351
2352void
2353dmu_objset_set_user(objset_t *os, void *user_ptr)
2354{
2355	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2356	os->os_user_ptr = user_ptr;
2357}
2358
2359void *
2360dmu_objset_get_user(objset_t *os)
2361{
2362	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2363	return (os->os_user_ptr);
2364}
2365
2366/*
2367 * Determine name of filesystem, given name of snapshot.
2368 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
2369 */
2370int
2371dmu_fsname(const char *snapname, char *buf)
2372{
2373	char *atp = strchr(snapname, '@');
2374	if (atp == NULL)
2375		return (SET_ERROR(EINVAL));
2376	if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
2377		return (SET_ERROR(ENAMETOOLONG));
2378	(void) strlcpy(buf, snapname, atp - snapname + 1);
2379	return (0);
2380}
2381
2382/*
2383 * Call when we think we're going to write/free space in open context to track
2384 * the amount of dirty data in the open txg, which is also the amount
2385 * of memory that can not be evicted until this txg syncs.
2386 */
2387void
2388dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
2389{
2390	dsl_dataset_t *ds = os->os_dsl_dataset;
2391	int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
2392
2393	if (ds != NULL) {
2394		dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
2395		dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
2396	}
2397}
2398