1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/dbuf.h>
30#include <sys/dnode.h>
31#include <sys/dmu.h>
32#include <sys/dmu_impl.h>
33#include <sys/dmu_tx.h>
34#include <sys/dmu_objset.h>
35#include <sys/dsl_dir.h>
36#include <sys/dsl_dataset.h>
37#include <sys/spa.h>
38#include <sys/zio.h>
39#include <sys/dmu_zfetch.h>
40#include <sys/range_tree.h>
41
42static kmem_cache_t *dnode_cache;
43/*
44 * Define DNODE_STATS to turn on statistic gathering. By default, it is only
45 * turned on when DEBUG is also defined.
46 */
47#ifdef	DEBUG
48#define	DNODE_STATS
49#endif	/* DEBUG */
50
51#ifdef	DNODE_STATS
52#define	DNODE_STAT_ADD(stat)			((stat)++)
53#else
54#define	DNODE_STAT_ADD(stat)			/* nothing */
55#endif	/* DNODE_STATS */
56
57static dnode_phys_t dnode_phys_zero;
58
59int zfs_default_bs = SPA_MINBLOCKSHIFT;
60int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
61
62#ifdef illumos
63static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
64#endif
65
66static int
67dbuf_compare(const void *x1, const void *x2)
68{
69	const dmu_buf_impl_t *d1 = x1;
70	const dmu_buf_impl_t *d2 = x2;
71
72	if (d1->db_level < d2->db_level) {
73		return (-1);
74	}
75	if (d1->db_level > d2->db_level) {
76		return (1);
77	}
78
79	if (d1->db_blkid < d2->db_blkid) {
80		return (-1);
81	}
82	if (d1->db_blkid > d2->db_blkid) {
83		return (1);
84	}
85
86	if (d1->db_state == DB_SEARCH) {
87		ASSERT3S(d2->db_state, !=, DB_SEARCH);
88		return (-1);
89	} else if (d2->db_state == DB_SEARCH) {
90		ASSERT3S(d1->db_state, !=, DB_SEARCH);
91		return (1);
92	}
93
94	if ((uintptr_t)d1 < (uintptr_t)d2) {
95		return (-1);
96	}
97	if ((uintptr_t)d1 > (uintptr_t)d2) {
98		return (1);
99	}
100	return (0);
101}
102
103/* ARGSUSED */
104static int
105dnode_cons(void *arg, void *unused, int kmflag)
106{
107	dnode_t *dn = arg;
108	int i;
109
110	rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
111	mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
112	mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
113	cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
114
115	/*
116	 * Every dbuf has a reference, and dropping a tracked reference is
117	 * O(number of references), so don't track dn_holds.
118	 */
119	refcount_create_untracked(&dn->dn_holds);
120	refcount_create(&dn->dn_tx_holds);
121	list_link_init(&dn->dn_link);
122
123	bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
124	bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
125	bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
126	bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
127	bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
128	bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
129	bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
130
131	for (i = 0; i < TXG_SIZE; i++) {
132		list_link_init(&dn->dn_dirty_link[i]);
133		dn->dn_free_ranges[i] = NULL;
134		list_create(&dn->dn_dirty_records[i],
135		    sizeof (dbuf_dirty_record_t),
136		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
137	}
138
139	dn->dn_allocated_txg = 0;
140	dn->dn_free_txg = 0;
141	dn->dn_assigned_txg = 0;
142	dn->dn_dirtyctx = 0;
143	dn->dn_dirtyctx_firstset = NULL;
144	dn->dn_bonus = NULL;
145	dn->dn_have_spill = B_FALSE;
146	dn->dn_zio = NULL;
147	dn->dn_oldused = 0;
148	dn->dn_oldflags = 0;
149	dn->dn_olduid = 0;
150	dn->dn_oldgid = 0;
151	dn->dn_newuid = 0;
152	dn->dn_newgid = 0;
153	dn->dn_id_flags = 0;
154
155	dn->dn_dbufs_count = 0;
156	avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
157	    offsetof(dmu_buf_impl_t, db_link));
158
159	dn->dn_moved = 0;
160	POINTER_INVALIDATE(&dn->dn_objset);
161	return (0);
162}
163
164/* ARGSUSED */
165static void
166dnode_dest(void *arg, void *unused)
167{
168	int i;
169	dnode_t *dn = arg;
170
171	rw_destroy(&dn->dn_struct_rwlock);
172	mutex_destroy(&dn->dn_mtx);
173	mutex_destroy(&dn->dn_dbufs_mtx);
174	cv_destroy(&dn->dn_notxholds);
175	refcount_destroy(&dn->dn_holds);
176	refcount_destroy(&dn->dn_tx_holds);
177	ASSERT(!list_link_active(&dn->dn_link));
178
179	for (i = 0; i < TXG_SIZE; i++) {
180		ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
181		ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
182		list_destroy(&dn->dn_dirty_records[i]);
183		ASSERT0(dn->dn_next_nblkptr[i]);
184		ASSERT0(dn->dn_next_nlevels[i]);
185		ASSERT0(dn->dn_next_indblkshift[i]);
186		ASSERT0(dn->dn_next_bonustype[i]);
187		ASSERT0(dn->dn_rm_spillblk[i]);
188		ASSERT0(dn->dn_next_bonuslen[i]);
189		ASSERT0(dn->dn_next_blksz[i]);
190	}
191
192	ASSERT0(dn->dn_allocated_txg);
193	ASSERT0(dn->dn_free_txg);
194	ASSERT0(dn->dn_assigned_txg);
195	ASSERT0(dn->dn_dirtyctx);
196	ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
197	ASSERT3P(dn->dn_bonus, ==, NULL);
198	ASSERT(!dn->dn_have_spill);
199	ASSERT3P(dn->dn_zio, ==, NULL);
200	ASSERT0(dn->dn_oldused);
201	ASSERT0(dn->dn_oldflags);
202	ASSERT0(dn->dn_olduid);
203	ASSERT0(dn->dn_oldgid);
204	ASSERT0(dn->dn_newuid);
205	ASSERT0(dn->dn_newgid);
206	ASSERT0(dn->dn_id_flags);
207
208	ASSERT0(dn->dn_dbufs_count);
209	avl_destroy(&dn->dn_dbufs);
210}
211
212void
213dnode_init(void)
214{
215	ASSERT(dnode_cache == NULL);
216	dnode_cache = kmem_cache_create("dnode_t",
217	    sizeof (dnode_t),
218	    0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
219	kmem_cache_set_move(dnode_cache, dnode_move);
220}
221
222void
223dnode_fini(void)
224{
225	kmem_cache_destroy(dnode_cache);
226	dnode_cache = NULL;
227}
228
229
230#ifdef ZFS_DEBUG
231void
232dnode_verify(dnode_t *dn)
233{
234	int drop_struct_lock = FALSE;
235
236	ASSERT(dn->dn_phys);
237	ASSERT(dn->dn_objset);
238	ASSERT(dn->dn_handle->dnh_dnode == dn);
239
240	ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
241
242	if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
243		return;
244
245	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
246		rw_enter(&dn->dn_struct_rwlock, RW_READER);
247		drop_struct_lock = TRUE;
248	}
249	if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
250		int i;
251		ASSERT3U(dn->dn_indblkshift, >=, 0);
252		ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
253		if (dn->dn_datablkshift) {
254			ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
255			ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
256			ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
257		}
258		ASSERT3U(dn->dn_nlevels, <=, 30);
259		ASSERT(DMU_OT_IS_VALID(dn->dn_type));
260		ASSERT3U(dn->dn_nblkptr, >=, 1);
261		ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
262		ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
263		ASSERT3U(dn->dn_datablksz, ==,
264		    dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
265		ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
266		ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
267		    dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
268		for (i = 0; i < TXG_SIZE; i++) {
269			ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
270		}
271	}
272	if (dn->dn_phys->dn_type != DMU_OT_NONE)
273		ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
274	ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
275	if (dn->dn_dbuf != NULL) {
276		ASSERT3P(dn->dn_phys, ==,
277		    (dnode_phys_t *)dn->dn_dbuf->db.db_data +
278		    (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
279	}
280	if (drop_struct_lock)
281		rw_exit(&dn->dn_struct_rwlock);
282}
283#endif
284
285void
286dnode_byteswap(dnode_phys_t *dnp)
287{
288	uint64_t *buf64 = (void*)&dnp->dn_blkptr;
289	int i;
290
291	if (dnp->dn_type == DMU_OT_NONE) {
292		bzero(dnp, sizeof (dnode_phys_t));
293		return;
294	}
295
296	dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
297	dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
298	dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
299	dnp->dn_used = BSWAP_64(dnp->dn_used);
300
301	/*
302	 * dn_nblkptr is only one byte, so it's OK to read it in either
303	 * byte order.  We can't read dn_bouslen.
304	 */
305	ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
306	ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
307	for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
308		buf64[i] = BSWAP_64(buf64[i]);
309
310	/*
311	 * OK to check dn_bonuslen for zero, because it won't matter if
312	 * we have the wrong byte order.  This is necessary because the
313	 * dnode dnode is smaller than a regular dnode.
314	 */
315	if (dnp->dn_bonuslen != 0) {
316		/*
317		 * Note that the bonus length calculated here may be
318		 * longer than the actual bonus buffer.  This is because
319		 * we always put the bonus buffer after the last block
320		 * pointer (instead of packing it against the end of the
321		 * dnode buffer).
322		 */
323		int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
324		size_t len = DN_MAX_BONUSLEN - off;
325		ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
326		dmu_object_byteswap_t byteswap =
327		    DMU_OT_BYTESWAP(dnp->dn_bonustype);
328		dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
329	}
330
331	/* Swap SPILL block if we have one */
332	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
333		byteswap_uint64_array(&dnp->dn_spill, sizeof (blkptr_t));
334
335}
336
337void
338dnode_buf_byteswap(void *vbuf, size_t size)
339{
340	dnode_phys_t *buf = vbuf;
341	int i;
342
343	ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
344	ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
345
346	size >>= DNODE_SHIFT;
347	for (i = 0; i < size; i++) {
348		dnode_byteswap(buf);
349		buf++;
350	}
351}
352
353void
354dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
355{
356	ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
357
358	dnode_setdirty(dn, tx);
359	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
360	ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
361	    (dn->dn_nblkptr-1) * sizeof (blkptr_t));
362	dn->dn_bonuslen = newsize;
363	if (newsize == 0)
364		dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
365	else
366		dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
367	rw_exit(&dn->dn_struct_rwlock);
368}
369
370void
371dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
372{
373	ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
374	dnode_setdirty(dn, tx);
375	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
376	dn->dn_bonustype = newtype;
377	dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
378	rw_exit(&dn->dn_struct_rwlock);
379}
380
381void
382dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
383{
384	ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
385	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
386	dnode_setdirty(dn, tx);
387	dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
388	dn->dn_have_spill = B_FALSE;
389}
390
391static void
392dnode_setdblksz(dnode_t *dn, int size)
393{
394	ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
395	ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
396	ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
397	ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
398	    1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
399	dn->dn_datablksz = size;
400	dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
401	dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
402}
403
404static dnode_t *
405dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
406    uint64_t object, dnode_handle_t *dnh)
407{
408	dnode_t *dn;
409
410	dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
411	ASSERT(!POINTER_IS_VALID(dn->dn_objset));
412	dn->dn_moved = 0;
413
414	/*
415	 * Defer setting dn_objset until the dnode is ready to be a candidate
416	 * for the dnode_move() callback.
417	 */
418	dn->dn_object = object;
419	dn->dn_dbuf = db;
420	dn->dn_handle = dnh;
421	dn->dn_phys = dnp;
422
423	if (dnp->dn_datablkszsec) {
424		dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
425	} else {
426		dn->dn_datablksz = 0;
427		dn->dn_datablkszsec = 0;
428		dn->dn_datablkshift = 0;
429	}
430	dn->dn_indblkshift = dnp->dn_indblkshift;
431	dn->dn_nlevels = dnp->dn_nlevels;
432	dn->dn_type = dnp->dn_type;
433	dn->dn_nblkptr = dnp->dn_nblkptr;
434	dn->dn_checksum = dnp->dn_checksum;
435	dn->dn_compress = dnp->dn_compress;
436	dn->dn_bonustype = dnp->dn_bonustype;
437	dn->dn_bonuslen = dnp->dn_bonuslen;
438	dn->dn_maxblkid = dnp->dn_maxblkid;
439	dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
440	dn->dn_id_flags = 0;
441
442	dmu_zfetch_init(&dn->dn_zfetch, dn);
443
444	ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
445
446	mutex_enter(&os->os_lock);
447	if (dnh->dnh_dnode != NULL) {
448		/* Lost the allocation race. */
449		mutex_exit(&os->os_lock);
450#ifdef __NetBSD__
451		dmu_zfetch_fini(&dn->dn_zfetch);
452#endif
453		kmem_cache_free(dnode_cache, dn);
454		return (dnh->dnh_dnode);
455	}
456
457	/*
458	 * Exclude special dnodes from os_dnodes so an empty os_dnodes
459	 * signifies that the special dnodes have no references from
460	 * their children (the entries in os_dnodes).  This allows
461	 * dnode_destroy() to easily determine if the last child has
462	 * been removed and then complete eviction of the objset.
463	 */
464	if (!DMU_OBJECT_IS_SPECIAL(object))
465		list_insert_head(&os->os_dnodes, dn);
466	membar_producer();
467
468	/*
469	 * Everything else must be valid before assigning dn_objset
470	 * makes the dnode eligible for dnode_move().
471	 */
472	dn->dn_objset = os;
473
474	dnh->dnh_dnode = dn;
475	mutex_exit(&os->os_lock);
476
477	arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER);
478	return (dn);
479}
480
481/*
482 * Caller must be holding the dnode handle, which is released upon return.
483 */
484static void
485dnode_destroy(dnode_t *dn)
486{
487	objset_t *os = dn->dn_objset;
488	boolean_t complete_os_eviction = B_FALSE;
489
490	ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
491
492	mutex_enter(&os->os_lock);
493	POINTER_INVALIDATE(&dn->dn_objset);
494	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
495		list_remove(&os->os_dnodes, dn);
496		complete_os_eviction =
497		    list_is_empty(&os->os_dnodes) &&
498		    list_link_active(&os->os_evicting_node);
499	}
500	mutex_exit(&os->os_lock);
501
502	/* the dnode can no longer move, so we can release the handle */
503	zrl_remove(&dn->dn_handle->dnh_zrlock);
504
505	dn->dn_allocated_txg = 0;
506	dn->dn_free_txg = 0;
507	dn->dn_assigned_txg = 0;
508
509	dn->dn_dirtyctx = 0;
510	if (dn->dn_dirtyctx_firstset != NULL) {
511		kmem_free(dn->dn_dirtyctx_firstset, 1);
512		dn->dn_dirtyctx_firstset = NULL;
513	}
514	if (dn->dn_bonus != NULL) {
515		mutex_enter(&dn->dn_bonus->db_mtx);
516		dbuf_destroy(dn->dn_bonus);
517		dn->dn_bonus = NULL;
518	}
519	dn->dn_zio = NULL;
520
521	dn->dn_have_spill = B_FALSE;
522	dn->dn_oldused = 0;
523	dn->dn_oldflags = 0;
524	dn->dn_olduid = 0;
525	dn->dn_oldgid = 0;
526	dn->dn_newuid = 0;
527	dn->dn_newgid = 0;
528	dn->dn_id_flags = 0;
529
530	dmu_zfetch_fini(&dn->dn_zfetch);
531	kmem_cache_free(dnode_cache, dn);
532	arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
533
534	if (complete_os_eviction)
535		dmu_objset_evict_done(os);
536}
537
538void
539dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
540    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
541{
542	int i;
543
544	ASSERT3U(blocksize, <=,
545	    spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
546	if (blocksize == 0)
547		blocksize = 1 << zfs_default_bs;
548	else
549		blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
550
551	if (ibs == 0)
552		ibs = zfs_default_ibs;
553
554	ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
555
556	dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
557	    dn->dn_object, tx->tx_txg, blocksize, ibs);
558
559	ASSERT(dn->dn_type == DMU_OT_NONE);
560	ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
561	ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
562	ASSERT(ot != DMU_OT_NONE);
563	ASSERT(DMU_OT_IS_VALID(ot));
564	ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
565	    (bonustype == DMU_OT_SA && bonuslen == 0) ||
566	    (bonustype != DMU_OT_NONE && bonuslen != 0));
567	ASSERT(DMU_OT_IS_VALID(bonustype));
568	ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
569	ASSERT(dn->dn_type == DMU_OT_NONE);
570	ASSERT0(dn->dn_maxblkid);
571	ASSERT0(dn->dn_allocated_txg);
572	ASSERT0(dn->dn_assigned_txg);
573	ASSERT(refcount_is_zero(&dn->dn_tx_holds));
574	ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
575	ASSERT(avl_is_empty(&dn->dn_dbufs));
576
577	for (i = 0; i < TXG_SIZE; i++) {
578		ASSERT0(dn->dn_next_nblkptr[i]);
579		ASSERT0(dn->dn_next_nlevels[i]);
580		ASSERT0(dn->dn_next_indblkshift[i]);
581		ASSERT0(dn->dn_next_bonuslen[i]);
582		ASSERT0(dn->dn_next_bonustype[i]);
583		ASSERT0(dn->dn_rm_spillblk[i]);
584		ASSERT0(dn->dn_next_blksz[i]);
585		ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
586		ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
587		ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
588	}
589
590	dn->dn_type = ot;
591	dnode_setdblksz(dn, blocksize);
592	dn->dn_indblkshift = ibs;
593	dn->dn_nlevels = 1;
594	if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
595		dn->dn_nblkptr = 1;
596	else
597		dn->dn_nblkptr = 1 +
598		    ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
599	dn->dn_bonustype = bonustype;
600	dn->dn_bonuslen = bonuslen;
601	dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
602	dn->dn_compress = ZIO_COMPRESS_INHERIT;
603	dn->dn_dirtyctx = 0;
604
605	dn->dn_free_txg = 0;
606	if (dn->dn_dirtyctx_firstset) {
607		kmem_free(dn->dn_dirtyctx_firstset, 1);
608		dn->dn_dirtyctx_firstset = NULL;
609	}
610
611	dn->dn_allocated_txg = tx->tx_txg;
612	dn->dn_id_flags = 0;
613
614	dnode_setdirty(dn, tx);
615	dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
616	dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
617	dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
618	dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
619}
620
621void
622dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
623    dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
624{
625	int nblkptr;
626
627	ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
628	ASSERT3U(blocksize, <=,
629	    spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
630	ASSERT0(blocksize % SPA_MINBLOCKSIZE);
631	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
632	ASSERT(tx->tx_txg != 0);
633	ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
634	    (bonustype != DMU_OT_NONE && bonuslen != 0) ||
635	    (bonustype == DMU_OT_SA && bonuslen == 0));
636	ASSERT(DMU_OT_IS_VALID(bonustype));
637	ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
638
639	/* clean up any unreferenced dbufs */
640	dnode_evict_dbufs(dn);
641
642	dn->dn_id_flags = 0;
643
644	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
645	dnode_setdirty(dn, tx);
646	if (dn->dn_datablksz != blocksize) {
647		/* change blocksize */
648		ASSERT(dn->dn_maxblkid == 0 &&
649		    (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
650		    dnode_block_freed(dn, 0)));
651		dnode_setdblksz(dn, blocksize);
652		dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
653	}
654	if (dn->dn_bonuslen != bonuslen)
655		dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
656
657	if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
658		nblkptr = 1;
659	else
660		nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
661	if (dn->dn_bonustype != bonustype)
662		dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
663	if (dn->dn_nblkptr != nblkptr)
664		dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
665	if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
666		dbuf_rm_spill(dn, tx);
667		dnode_rm_spill(dn, tx);
668	}
669	rw_exit(&dn->dn_struct_rwlock);
670
671	/* change type */
672	dn->dn_type = ot;
673
674	/* change bonus size and type */
675	mutex_enter(&dn->dn_mtx);
676	dn->dn_bonustype = bonustype;
677	dn->dn_bonuslen = bonuslen;
678	dn->dn_nblkptr = nblkptr;
679	dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
680	dn->dn_compress = ZIO_COMPRESS_INHERIT;
681	ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
682
683	/* fix up the bonus db_size */
684	if (dn->dn_bonus) {
685		dn->dn_bonus->db.db_size =
686		    DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
687		ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
688	}
689
690	dn->dn_allocated_txg = tx->tx_txg;
691	mutex_exit(&dn->dn_mtx);
692}
693
694#ifdef	DNODE_STATS
695static struct {
696	uint64_t dms_dnode_invalid;
697	uint64_t dms_dnode_recheck1;
698	uint64_t dms_dnode_recheck2;
699	uint64_t dms_dnode_special;
700	uint64_t dms_dnode_handle;
701	uint64_t dms_dnode_rwlock;
702	uint64_t dms_dnode_active;
703} dnode_move_stats;
704#endif	/* DNODE_STATS */
705
706static void
707dnode_move_impl(dnode_t *odn, dnode_t *ndn)
708{
709	int i;
710
711	ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
712	ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
713	ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
714	ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
715
716	/* Copy fields. */
717	ndn->dn_objset = odn->dn_objset;
718	ndn->dn_object = odn->dn_object;
719	ndn->dn_dbuf = odn->dn_dbuf;
720	ndn->dn_handle = odn->dn_handle;
721	ndn->dn_phys = odn->dn_phys;
722	ndn->dn_type = odn->dn_type;
723	ndn->dn_bonuslen = odn->dn_bonuslen;
724	ndn->dn_bonustype = odn->dn_bonustype;
725	ndn->dn_nblkptr = odn->dn_nblkptr;
726	ndn->dn_checksum = odn->dn_checksum;
727	ndn->dn_compress = odn->dn_compress;
728	ndn->dn_nlevels = odn->dn_nlevels;
729	ndn->dn_indblkshift = odn->dn_indblkshift;
730	ndn->dn_datablkshift = odn->dn_datablkshift;
731	ndn->dn_datablkszsec = odn->dn_datablkszsec;
732	ndn->dn_datablksz = odn->dn_datablksz;
733	ndn->dn_maxblkid = odn->dn_maxblkid;
734	bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
735	    sizeof (odn->dn_next_nblkptr));
736	bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
737	    sizeof (odn->dn_next_nlevels));
738	bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
739	    sizeof (odn->dn_next_indblkshift));
740	bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
741	    sizeof (odn->dn_next_bonustype));
742	bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
743	    sizeof (odn->dn_rm_spillblk));
744	bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
745	    sizeof (odn->dn_next_bonuslen));
746	bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
747	    sizeof (odn->dn_next_blksz));
748	for (i = 0; i < TXG_SIZE; i++) {
749		list_move_tail(&ndn->dn_dirty_records[i],
750		    &odn->dn_dirty_records[i]);
751	}
752	bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
753	    sizeof (odn->dn_free_ranges));
754	ndn->dn_allocated_txg = odn->dn_allocated_txg;
755	ndn->dn_free_txg = odn->dn_free_txg;
756	ndn->dn_assigned_txg = odn->dn_assigned_txg;
757	ndn->dn_dirtyctx = odn->dn_dirtyctx;
758	ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
759	ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
760	refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
761	ASSERT(avl_is_empty(&ndn->dn_dbufs));
762	avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
763	ndn->dn_dbufs_count = odn->dn_dbufs_count;
764	ndn->dn_bonus = odn->dn_bonus;
765	ndn->dn_have_spill = odn->dn_have_spill;
766	ndn->dn_zio = odn->dn_zio;
767	ndn->dn_oldused = odn->dn_oldused;
768	ndn->dn_oldflags = odn->dn_oldflags;
769	ndn->dn_olduid = odn->dn_olduid;
770	ndn->dn_oldgid = odn->dn_oldgid;
771	ndn->dn_newuid = odn->dn_newuid;
772	ndn->dn_newgid = odn->dn_newgid;
773	ndn->dn_id_flags = odn->dn_id_flags;
774	dmu_zfetch_init(&ndn->dn_zfetch, NULL);
775	list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
776	ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
777
778	/*
779	 * Update back pointers. Updating the handle fixes the back pointer of
780	 * every descendant dbuf as well as the bonus dbuf.
781	 */
782	ASSERT(ndn->dn_handle->dnh_dnode == odn);
783	ndn->dn_handle->dnh_dnode = ndn;
784	if (ndn->dn_zfetch.zf_dnode == odn) {
785		ndn->dn_zfetch.zf_dnode = ndn;
786	}
787
788	/*
789	 * Invalidate the original dnode by clearing all of its back pointers.
790	 */
791	odn->dn_dbuf = NULL;
792	odn->dn_handle = NULL;
793	avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
794	    offsetof(dmu_buf_impl_t, db_link));
795	odn->dn_dbufs_count = 0;
796	odn->dn_bonus = NULL;
797	odn->dn_zfetch.zf_dnode = NULL;
798
799	/*
800	 * Set the low bit of the objset pointer to ensure that dnode_move()
801	 * recognizes the dnode as invalid in any subsequent callback.
802	 */
803	POINTER_INVALIDATE(&odn->dn_objset);
804
805	/*
806	 * Satisfy the destructor.
807	 */
808	for (i = 0; i < TXG_SIZE; i++) {
809		list_create(&odn->dn_dirty_records[i],
810		    sizeof (dbuf_dirty_record_t),
811		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
812		odn->dn_free_ranges[i] = NULL;
813		odn->dn_next_nlevels[i] = 0;
814		odn->dn_next_indblkshift[i] = 0;
815		odn->dn_next_bonustype[i] = 0;
816		odn->dn_rm_spillblk[i] = 0;
817		odn->dn_next_bonuslen[i] = 0;
818		odn->dn_next_blksz[i] = 0;
819	}
820	odn->dn_allocated_txg = 0;
821	odn->dn_free_txg = 0;
822	odn->dn_assigned_txg = 0;
823	odn->dn_dirtyctx = 0;
824	odn->dn_dirtyctx_firstset = NULL;
825	odn->dn_have_spill = B_FALSE;
826	odn->dn_zio = NULL;
827	odn->dn_oldused = 0;
828	odn->dn_oldflags = 0;
829	odn->dn_olduid = 0;
830	odn->dn_oldgid = 0;
831	odn->dn_newuid = 0;
832	odn->dn_newgid = 0;
833	odn->dn_id_flags = 0;
834
835	/*
836	 * Mark the dnode.
837	 */
838	ndn->dn_moved = 1;
839	odn->dn_moved = (uint8_t)-1;
840}
841
842#ifdef illumos
843#ifdef	_KERNEL
844/*ARGSUSED*/
845static kmem_cbrc_t
846dnode_move(void *buf, void *newbuf, size_t size, void *arg)
847{
848	dnode_t *odn = buf, *ndn = newbuf;
849	objset_t *os;
850	int64_t refcount;
851	uint32_t dbufs;
852
853	/*
854	 * The dnode is on the objset's list of known dnodes if the objset
855	 * pointer is valid. We set the low bit of the objset pointer when
856	 * freeing the dnode to invalidate it, and the memory patterns written
857	 * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
858	 * A newly created dnode sets the objset pointer last of all to indicate
859	 * that the dnode is known and in a valid state to be moved by this
860	 * function.
861	 */
862	os = odn->dn_objset;
863	if (!POINTER_IS_VALID(os)) {
864		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_invalid);
865		return (KMEM_CBRC_DONT_KNOW);
866	}
867
868	/*
869	 * Ensure that the objset does not go away during the move.
870	 */
871	rw_enter(&os_lock, RW_WRITER);
872	if (os != odn->dn_objset) {
873		rw_exit(&os_lock);
874		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck1);
875		return (KMEM_CBRC_DONT_KNOW);
876	}
877
878	/*
879	 * If the dnode is still valid, then so is the objset. We know that no
880	 * valid objset can be freed while we hold os_lock, so we can safely
881	 * ensure that the objset remains in use.
882	 */
883	mutex_enter(&os->os_lock);
884
885	/*
886	 * Recheck the objset pointer in case the dnode was removed just before
887	 * acquiring the lock.
888	 */
889	if (os != odn->dn_objset) {
890		mutex_exit(&os->os_lock);
891		rw_exit(&os_lock);
892		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck2);
893		return (KMEM_CBRC_DONT_KNOW);
894	}
895
896	/*
897	 * At this point we know that as long as we hold os->os_lock, the dnode
898	 * cannot be freed and fields within the dnode can be safely accessed.
899	 * The objset listing this dnode cannot go away as long as this dnode is
900	 * on its list.
901	 */
902	rw_exit(&os_lock);
903	if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
904		mutex_exit(&os->os_lock);
905		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_special);
906		return (KMEM_CBRC_NO);
907	}
908	ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
909
910	/*
911	 * Lock the dnode handle to prevent the dnode from obtaining any new
912	 * holds. This also prevents the descendant dbufs and the bonus dbuf
913	 * from accessing the dnode, so that we can discount their holds. The
914	 * handle is safe to access because we know that while the dnode cannot
915	 * go away, neither can its handle. Once we hold dnh_zrlock, we can
916	 * safely move any dnode referenced only by dbufs.
917	 */
918	if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
919		mutex_exit(&os->os_lock);
920		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_handle);
921		return (KMEM_CBRC_LATER);
922	}
923
924	/*
925	 * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
926	 * We need to guarantee that there is a hold for every dbuf in order to
927	 * determine whether the dnode is actively referenced. Falsely matching
928	 * a dbuf to an active hold would lead to an unsafe move. It's possible
929	 * that a thread already having an active dnode hold is about to add a
930	 * dbuf, and we can't compare hold and dbuf counts while the add is in
931	 * progress.
932	 */
933	if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
934		zrl_exit(&odn->dn_handle->dnh_zrlock);
935		mutex_exit(&os->os_lock);
936		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_rwlock);
937		return (KMEM_CBRC_LATER);
938	}
939
940	/*
941	 * A dbuf may be removed (evicted) without an active dnode hold. In that
942	 * case, the dbuf count is decremented under the handle lock before the
943	 * dbuf's hold is released. This order ensures that if we count the hold
944	 * after the dbuf is removed but before its hold is released, we will
945	 * treat the unmatched hold as active and exit safely. If we count the
946	 * hold before the dbuf is removed, the hold is discounted, and the
947	 * removal is blocked until the move completes.
948	 */
949	refcount = refcount_count(&odn->dn_holds);
950	ASSERT(refcount >= 0);
951	dbufs = odn->dn_dbufs_count;
952
953	/* We can't have more dbufs than dnode holds. */
954	ASSERT3U(dbufs, <=, refcount);
955	DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
956	    uint32_t, dbufs);
957
958	if (refcount > dbufs) {
959		rw_exit(&odn->dn_struct_rwlock);
960		zrl_exit(&odn->dn_handle->dnh_zrlock);
961		mutex_exit(&os->os_lock);
962		DNODE_STAT_ADD(dnode_move_stats.dms_dnode_active);
963		return (KMEM_CBRC_LATER);
964	}
965
966	rw_exit(&odn->dn_struct_rwlock);
967
968	/*
969	 * At this point we know that anyone with a hold on the dnode is not
970	 * actively referencing it. The dnode is known and in a valid state to
971	 * move. We're holding the locks needed to execute the critical section.
972	 */
973	dnode_move_impl(odn, ndn);
974
975	list_link_replace(&odn->dn_link, &ndn->dn_link);
976	/* If the dnode was safe to move, the refcount cannot have changed. */
977	ASSERT(refcount == refcount_count(&ndn->dn_holds));
978	ASSERT(dbufs == ndn->dn_dbufs_count);
979	zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
980	mutex_exit(&os->os_lock);
981
982	return (KMEM_CBRC_YES);
983}
984#endif	/* _KERNEL */
985#endif	/* illumos */
986
987void
988dnode_special_close(dnode_handle_t *dnh)
989{
990	dnode_t *dn = dnh->dnh_dnode;
991
992	/*
993	 * Wait for final references to the dnode to clear.  This can
994	 * only happen if the arc is asyncronously evicting state that
995	 * has a hold on this dnode while we are trying to evict this
996	 * dnode.
997	 */
998	while (refcount_count(&dn->dn_holds) > 0)
999		delay(1);
1000	ASSERT(dn->dn_dbuf == NULL ||
1001	    dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
1002	zrl_add(&dnh->dnh_zrlock);
1003	dnode_destroy(dn); /* implicit zrl_remove() */
1004	zrl_destroy(&dnh->dnh_zrlock);
1005	dnh->dnh_dnode = NULL;
1006}
1007
1008void
1009dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
1010    dnode_handle_t *dnh)
1011{
1012	dnode_t *dn;
1013
1014	dn = dnode_create(os, dnp, NULL, object, dnh);
1015	zrl_init(&dnh->dnh_zrlock);
1016	DNODE_VERIFY(dn);
1017}
1018
1019static void
1020dnode_buf_evict_async(void *dbu)
1021{
1022	dnode_children_t *children_dnodes = dbu;
1023	int i;
1024
1025	for (i = 0; i < children_dnodes->dnc_count; i++) {
1026		dnode_handle_t *dnh = &children_dnodes->dnc_children[i];
1027		dnode_t *dn;
1028
1029		/*
1030		 * The dnode handle lock guards against the dnode moving to
1031		 * another valid address, so there is no need here to guard
1032		 * against changes to or from NULL.
1033		 */
1034		if (dnh->dnh_dnode == NULL) {
1035			zrl_destroy(&dnh->dnh_zrlock);
1036			continue;
1037		}
1038
1039		zrl_add(&dnh->dnh_zrlock);
1040		dn = dnh->dnh_dnode;
1041		/*
1042		 * If there are holds on this dnode, then there should
1043		 * be holds on the dnode's containing dbuf as well; thus
1044		 * it wouldn't be eligible for eviction and this function
1045		 * would not have been called.
1046		 */
1047		ASSERT(refcount_is_zero(&dn->dn_holds));
1048		ASSERT(refcount_is_zero(&dn->dn_tx_holds));
1049
1050		dnode_destroy(dn); /* implicit zrl_remove() */
1051		zrl_destroy(&dnh->dnh_zrlock);
1052		dnh->dnh_dnode = NULL;
1053	}
1054	kmem_free(children_dnodes, sizeof (dnode_children_t) +
1055	    children_dnodes->dnc_count * sizeof (dnode_handle_t));
1056}
1057
1058/*
1059 * errors:
1060 * EINVAL - invalid object number.
1061 * EIO - i/o error.
1062 * succeeds even for free dnodes.
1063 */
1064int
1065dnode_hold_impl(objset_t *os, uint64_t object, int flag,
1066    void *tag, dnode_t **dnp)
1067{
1068	int epb, idx, err;
1069	int drop_struct_lock = FALSE;
1070	int type;
1071	uint64_t blk;
1072	dnode_t *mdn, *dn;
1073	dmu_buf_impl_t *db;
1074	dnode_children_t *children_dnodes;
1075	dnode_handle_t *dnh;
1076
1077	/*
1078	 * If you are holding the spa config lock as writer, you shouldn't
1079	 * be asking the DMU to do *anything* unless it's the root pool
1080	 * which may require us to read from the root filesystem while
1081	 * holding some (not all) of the locks as writer.
1082	 */
1083	ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1084	    (spa_is_root(os->os_spa) &&
1085	    spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1086
1087	if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) {
1088		dn = (object == DMU_USERUSED_OBJECT) ?
1089		    DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
1090		if (dn == NULL)
1091			return (SET_ERROR(ENOENT));
1092		type = dn->dn_type;
1093		if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1094			return (SET_ERROR(ENOENT));
1095		if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1096			return (SET_ERROR(EEXIST));
1097		DNODE_VERIFY(dn);
1098		(void) refcount_add(&dn->dn_holds, tag);
1099		*dnp = dn;
1100		return (0);
1101	}
1102
1103	if (object == 0 || object >= DN_MAX_OBJECT)
1104		return (SET_ERROR(EINVAL));
1105
1106	mdn = DMU_META_DNODE(os);
1107	ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1108
1109	DNODE_VERIFY(mdn);
1110
1111	if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1112		rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1113		drop_struct_lock = TRUE;
1114	}
1115
1116	blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1117
1118	db = dbuf_hold(mdn, blk, FTAG);
1119	if (drop_struct_lock)
1120		rw_exit(&mdn->dn_struct_rwlock);
1121	if (db == NULL)
1122		return (SET_ERROR(EIO));
1123	err = dbuf_read(db, NULL, DB_RF_CANFAIL);
1124	if (err) {
1125		dbuf_rele(db, FTAG);
1126		return (err);
1127	}
1128
1129	ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
1130	epb = db->db.db_size >> DNODE_SHIFT;
1131
1132	idx = object & (epb-1);
1133
1134	ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
1135	children_dnodes = dmu_buf_get_user(&db->db);
1136	if (children_dnodes == NULL) {
1137		int i;
1138		dnode_children_t *winner;
1139		children_dnodes = kmem_zalloc(sizeof (dnode_children_t) +
1140		    epb * sizeof (dnode_handle_t), KM_SLEEP);
1141		children_dnodes->dnc_count = epb;
1142		dnh = &children_dnodes->dnc_children[0];
1143		for (i = 0; i < epb; i++) {
1144			zrl_init(&dnh[i].dnh_zrlock);
1145		}
1146		dmu_buf_init_user(&children_dnodes->dnc_dbu, NULL,
1147		    dnode_buf_evict_async, NULL);
1148		winner = dmu_buf_set_user(&db->db, &children_dnodes->dnc_dbu);
1149		if (winner != NULL) {
1150
1151			for (i = 0; i < epb; i++) {
1152				zrl_destroy(&dnh[i].dnh_zrlock);
1153			}
1154
1155			kmem_free(children_dnodes, sizeof (dnode_children_t) +
1156			    epb * sizeof (dnode_handle_t));
1157			children_dnodes = winner;
1158		}
1159	}
1160	ASSERT(children_dnodes->dnc_count == epb);
1161
1162	dnh = &children_dnodes->dnc_children[idx];
1163	zrl_add(&dnh->dnh_zrlock);
1164	dn = dnh->dnh_dnode;
1165	if (dn == NULL) {
1166		dnode_phys_t *phys = (dnode_phys_t *)db->db.db_data+idx;
1167
1168		dn = dnode_create(os, phys, db, object, dnh);
1169	}
1170
1171	mutex_enter(&dn->dn_mtx);
1172	type = dn->dn_type;
1173	if (dn->dn_free_txg ||
1174	    ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
1175	    ((flag & DNODE_MUST_BE_FREE) &&
1176	    (type != DMU_OT_NONE || !refcount_is_zero(&dn->dn_holds)))) {
1177		mutex_exit(&dn->dn_mtx);
1178		zrl_remove(&dnh->dnh_zrlock);
1179		dbuf_rele(db, FTAG);
1180		return (type == DMU_OT_NONE ? ENOENT : EEXIST);
1181	}
1182	if (refcount_add(&dn->dn_holds, tag) == 1)
1183		dbuf_add_ref(db, dnh);
1184	mutex_exit(&dn->dn_mtx);
1185
1186	/* Now we can rely on the hold to prevent the dnode from moving. */
1187	zrl_remove(&dnh->dnh_zrlock);
1188
1189	DNODE_VERIFY(dn);
1190	ASSERT3P(dn->dn_dbuf, ==, db);
1191	ASSERT3U(dn->dn_object, ==, object);
1192	dbuf_rele(db, FTAG);
1193
1194	*dnp = dn;
1195	return (0);
1196}
1197
1198/*
1199 * Return held dnode if the object is allocated, NULL if not.
1200 */
1201int
1202dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
1203{
1204	return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
1205}
1206
1207/*
1208 * Can only add a reference if there is already at least one
1209 * reference on the dnode.  Returns FALSE if unable to add a
1210 * new reference.
1211 */
1212boolean_t
1213dnode_add_ref(dnode_t *dn, void *tag)
1214{
1215	mutex_enter(&dn->dn_mtx);
1216	if (refcount_is_zero(&dn->dn_holds)) {
1217		mutex_exit(&dn->dn_mtx);
1218		return (FALSE);
1219	}
1220	VERIFY(1 < refcount_add(&dn->dn_holds, tag));
1221	mutex_exit(&dn->dn_mtx);
1222	return (TRUE);
1223}
1224
1225void
1226dnode_rele(dnode_t *dn, void *tag)
1227{
1228	mutex_enter(&dn->dn_mtx);
1229	dnode_rele_and_unlock(dn, tag);
1230}
1231
1232void
1233dnode_rele_and_unlock(dnode_t *dn, void *tag)
1234{
1235	uint64_t refs;
1236	/* Get while the hold prevents the dnode from moving. */
1237	dmu_buf_impl_t *db = dn->dn_dbuf;
1238	dnode_handle_t *dnh = dn->dn_handle;
1239
1240	refs = refcount_remove(&dn->dn_holds, tag);
1241	mutex_exit(&dn->dn_mtx);
1242
1243	/*
1244	 * It's unsafe to release the last hold on a dnode by dnode_rele() or
1245	 * indirectly by dbuf_rele() while relying on the dnode handle to
1246	 * prevent the dnode from moving, since releasing the last hold could
1247	 * result in the dnode's parent dbuf evicting its dnode handles. For
1248	 * that reason anyone calling dnode_rele() or dbuf_rele() without some
1249	 * other direct or indirect hold on the dnode must first drop the dnode
1250	 * handle.
1251	 */
1252	ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1253
1254	/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1255	if (refs == 0 && db != NULL) {
1256		/*
1257		 * Another thread could add a hold to the dnode handle in
1258		 * dnode_hold_impl() while holding the parent dbuf. Since the
1259		 * hold on the parent dbuf prevents the handle from being
1260		 * destroyed, the hold on the handle is OK. We can't yet assert
1261		 * that the handle has zero references, but that will be
1262		 * asserted anyway when the handle gets destroyed.
1263		 */
1264		dbuf_rele(db, dnh);
1265	}
1266}
1267
1268void
1269dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1270{
1271	objset_t *os = dn->dn_objset;
1272	uint64_t txg = tx->tx_txg;
1273
1274	if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1275		dsl_dataset_dirty(os->os_dsl_dataset, tx);
1276		return;
1277	}
1278
1279	DNODE_VERIFY(dn);
1280
1281#ifdef ZFS_DEBUG
1282	mutex_enter(&dn->dn_mtx);
1283	ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1284	ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1285	mutex_exit(&dn->dn_mtx);
1286#endif
1287
1288	/*
1289	 * Determine old uid/gid when necessary
1290	 */
1291	dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
1292
1293	mutex_enter(&os->os_lock);
1294
1295	/*
1296	 * If we are already marked dirty, we're done.
1297	 */
1298	if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1299		mutex_exit(&os->os_lock);
1300		return;
1301	}
1302
1303	ASSERT(!refcount_is_zero(&dn->dn_holds) ||
1304	    !avl_is_empty(&dn->dn_dbufs));
1305	ASSERT(dn->dn_datablksz != 0);
1306	ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1307	ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
1308	ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
1309
1310	dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1311	    dn->dn_object, txg);
1312
1313	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
1314		list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
1315	} else {
1316		list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
1317	}
1318
1319	mutex_exit(&os->os_lock);
1320
1321	/*
1322	 * The dnode maintains a hold on its containing dbuf as
1323	 * long as there are holds on it.  Each instantiated child
1324	 * dbuf maintains a hold on the dnode.  When the last child
1325	 * drops its hold, the dnode will drop its hold on the
1326	 * containing dbuf. We add a "dirty hold" here so that the
1327	 * dnode will hang around after we finish processing its
1328	 * children.
1329	 */
1330	VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1331
1332	(void) dbuf_dirty(dn->dn_dbuf, tx);
1333
1334	dsl_dataset_dirty(os->os_dsl_dataset, tx);
1335}
1336
1337void
1338dnode_free(dnode_t *dn, dmu_tx_t *tx)
1339{
1340	int txgoff = tx->tx_txg & TXG_MASK;
1341
1342	dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
1343
1344	/* we should be the only holder... hopefully */
1345	/* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
1346
1347	mutex_enter(&dn->dn_mtx);
1348	if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1349		mutex_exit(&dn->dn_mtx);
1350		return;
1351	}
1352	dn->dn_free_txg = tx->tx_txg;
1353	mutex_exit(&dn->dn_mtx);
1354
1355	/*
1356	 * If the dnode is already dirty, it needs to be moved from
1357	 * the dirty list to the free list.
1358	 */
1359	mutex_enter(&dn->dn_objset->os_lock);
1360	if (list_link_active(&dn->dn_dirty_link[txgoff])) {
1361		list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
1362		list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
1363		mutex_exit(&dn->dn_objset->os_lock);
1364	} else {
1365		mutex_exit(&dn->dn_objset->os_lock);
1366		dnode_setdirty(dn, tx);
1367	}
1368}
1369
1370/*
1371 * Try to change the block size for the indicated dnode.  This can only
1372 * succeed if there are no blocks allocated or dirty beyond first block
1373 */
1374int
1375dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
1376{
1377	dmu_buf_impl_t *db;
1378	int err;
1379
1380	ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
1381	if (size == 0)
1382		size = SPA_MINBLOCKSIZE;
1383	else
1384		size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
1385
1386	if (ibs == dn->dn_indblkshift)
1387		ibs = 0;
1388
1389	if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
1390		return (0);
1391
1392	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1393
1394	/* Check for any allocated blocks beyond the first */
1395	if (dn->dn_maxblkid != 0)
1396		goto fail;
1397
1398	mutex_enter(&dn->dn_dbufs_mtx);
1399	for (db = avl_first(&dn->dn_dbufs); db != NULL;
1400	    db = AVL_NEXT(&dn->dn_dbufs, db)) {
1401		if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
1402		    db->db_blkid != DMU_SPILL_BLKID) {
1403			mutex_exit(&dn->dn_dbufs_mtx);
1404			goto fail;
1405		}
1406	}
1407	mutex_exit(&dn->dn_dbufs_mtx);
1408
1409	if (ibs && dn->dn_nlevels != 1)
1410		goto fail;
1411
1412	/* resize the old block */
1413	err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
1414	if (err == 0)
1415		dbuf_new_size(db, size, tx);
1416	else if (err != ENOENT)
1417		goto fail;
1418
1419	dnode_setdblksz(dn, size);
1420	dnode_setdirty(dn, tx);
1421	dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
1422	if (ibs) {
1423		dn->dn_indblkshift = ibs;
1424		dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
1425	}
1426	/* rele after we have fixed the blocksize in the dnode */
1427	if (db)
1428		dbuf_rele(db, FTAG);
1429
1430	rw_exit(&dn->dn_struct_rwlock);
1431	return (0);
1432
1433fail:
1434	rw_exit(&dn->dn_struct_rwlock);
1435	return (SET_ERROR(ENOTSUP));
1436}
1437
1438/* read-holding callers must not rely on the lock being continuously held */
1439void
1440dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
1441{
1442	uint64_t txgoff = tx->tx_txg & TXG_MASK;
1443	int epbs, new_nlevels;
1444	uint64_t sz;
1445
1446	ASSERT(blkid != DMU_BONUS_BLKID);
1447
1448	ASSERT(have_read ?
1449	    RW_READ_HELD(&dn->dn_struct_rwlock) :
1450	    RW_WRITE_HELD(&dn->dn_struct_rwlock));
1451
1452	/*
1453	 * if we have a read-lock, check to see if we need to do any work
1454	 * before upgrading to a write-lock.
1455	 */
1456	if (have_read) {
1457		if (blkid <= dn->dn_maxblkid)
1458			return;
1459
1460		if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
1461			rw_exit(&dn->dn_struct_rwlock);
1462			rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1463		}
1464	}
1465
1466	if (blkid <= dn->dn_maxblkid)
1467		goto out;
1468
1469	dn->dn_maxblkid = blkid;
1470
1471	/*
1472	 * Compute the number of levels necessary to support the new maxblkid.
1473	 */
1474	new_nlevels = 1;
1475	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1476	for (sz = dn->dn_nblkptr;
1477	    sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1478		new_nlevels++;
1479
1480	if (new_nlevels > dn->dn_nlevels) {
1481		int old_nlevels = dn->dn_nlevels;
1482		dmu_buf_impl_t *db;
1483		list_t *list;
1484		dbuf_dirty_record_t *new, *dr, *dr_next;
1485
1486		dn->dn_nlevels = new_nlevels;
1487
1488		ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1489		dn->dn_next_nlevels[txgoff] = new_nlevels;
1490
1491		/* dirty the left indirects */
1492		db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1493		ASSERT(db != NULL);
1494		new = dbuf_dirty(db, tx);
1495		dbuf_rele(db, FTAG);
1496
1497		/* transfer the dirty records to the new indirect */
1498		mutex_enter(&dn->dn_mtx);
1499		mutex_enter(&new->dt.di.dr_mtx);
1500		list = &dn->dn_dirty_records[txgoff];
1501		for (dr = list_head(list); dr; dr = dr_next) {
1502			dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1503			if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1504			    dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1505			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1506				ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1507				list_remove(&dn->dn_dirty_records[txgoff], dr);
1508				list_insert_tail(&new->dt.di.dr_children, dr);
1509				dr->dr_parent = new;
1510			}
1511		}
1512		mutex_exit(&new->dt.di.dr_mtx);
1513		mutex_exit(&dn->dn_mtx);
1514	}
1515
1516out:
1517	if (have_read)
1518		rw_downgrade(&dn->dn_struct_rwlock);
1519}
1520
1521static void
1522dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
1523{
1524	dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
1525	if (db != NULL) {
1526		dmu_buf_will_dirty(&db->db, tx);
1527		dbuf_rele(db, FTAG);
1528	}
1529}
1530
1531void
1532dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
1533{
1534	dmu_buf_impl_t *db;
1535	uint64_t blkoff, blkid, nblks;
1536	int blksz, blkshift, head, tail;
1537	int trunc = FALSE;
1538	int epbs;
1539
1540	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1541	blksz = dn->dn_datablksz;
1542	blkshift = dn->dn_datablkshift;
1543	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1544
1545	if (len == DMU_OBJECT_END) {
1546		len = UINT64_MAX - off;
1547		trunc = TRUE;
1548	}
1549
1550	/*
1551	 * First, block align the region to free:
1552	 */
1553	if (ISP2(blksz)) {
1554		head = P2NPHASE(off, blksz);
1555		blkoff = P2PHASE(off, blksz);
1556		if ((off >> blkshift) > dn->dn_maxblkid)
1557			goto out;
1558	} else {
1559		ASSERT(dn->dn_maxblkid == 0);
1560		if (off == 0 && len >= blksz) {
1561			/*
1562			 * Freeing the whole block; fast-track this request.
1563			 * Note that we won't dirty any indirect blocks,
1564			 * which is fine because we will be freeing the entire
1565			 * file and thus all indirect blocks will be freed
1566			 * by free_children().
1567			 */
1568			blkid = 0;
1569			nblks = 1;
1570			goto done;
1571		} else if (off >= blksz) {
1572			/* Freeing past end-of-data */
1573			goto out;
1574		} else {
1575			/* Freeing part of the block. */
1576			head = blksz - off;
1577			ASSERT3U(head, >, 0);
1578		}
1579		blkoff = off;
1580	}
1581	/* zero out any partial block data at the start of the range */
1582	if (head) {
1583		ASSERT3U(blkoff + head, ==, blksz);
1584		if (len < head)
1585			head = len;
1586		if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
1587		    TRUE, FALSE, FTAG, &db) == 0) {
1588			caddr_t data;
1589
1590			/* don't dirty if it isn't on disk and isn't dirty */
1591			if (db->db_last_dirty ||
1592			    (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1593				rw_exit(&dn->dn_struct_rwlock);
1594				dmu_buf_will_dirty(&db->db, tx);
1595				rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1596				data = db->db.db_data;
1597				bzero(data + blkoff, head);
1598			}
1599			dbuf_rele(db, FTAG);
1600		}
1601		off += head;
1602		len -= head;
1603	}
1604
1605	/* If the range was less than one block, we're done */
1606	if (len == 0)
1607		goto out;
1608
1609	/* If the remaining range is past end of file, we're done */
1610	if ((off >> blkshift) > dn->dn_maxblkid)
1611		goto out;
1612
1613	ASSERT(ISP2(blksz));
1614	if (trunc)
1615		tail = 0;
1616	else
1617		tail = P2PHASE(len, blksz);
1618
1619	ASSERT0(P2PHASE(off, blksz));
1620	/* zero out any partial block data at the end of the range */
1621	if (tail) {
1622		if (len < tail)
1623			tail = len;
1624		if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
1625		    TRUE, FALSE, FTAG, &db) == 0) {
1626			/* don't dirty if not on disk and not dirty */
1627			if (db->db_last_dirty ||
1628			    (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1629				rw_exit(&dn->dn_struct_rwlock);
1630				dmu_buf_will_dirty(&db->db, tx);
1631				rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1632				bzero(db->db.db_data, tail);
1633			}
1634			dbuf_rele(db, FTAG);
1635		}
1636		len -= tail;
1637	}
1638
1639	/* If the range did not include a full block, we are done */
1640	if (len == 0)
1641		goto out;
1642
1643	ASSERT(IS_P2ALIGNED(off, blksz));
1644	ASSERT(trunc || IS_P2ALIGNED(len, blksz));
1645	blkid = off >> blkshift;
1646	nblks = len >> blkshift;
1647	if (trunc)
1648		nblks += 1;
1649
1650	/*
1651	 * Dirty all the indirect blocks in this range.  Note that only
1652	 * the first and last indirect blocks can actually be written
1653	 * (if they were partially freed) -- they must be dirtied, even if
1654	 * they do not exist on disk yet.  The interior blocks will
1655	 * be freed by free_children(), so they will not actually be written.
1656	 * Even though these interior blocks will not be written, we
1657	 * dirty them for two reasons:
1658	 *
1659	 *  - It ensures that the indirect blocks remain in memory until
1660	 *    syncing context.  (They have already been prefetched by
1661	 *    dmu_tx_hold_free(), so we don't have to worry about reading
1662	 *    them serially here.)
1663	 *
1664	 *  - The dirty space accounting will put pressure on the txg sync
1665	 *    mechanism to begin syncing, and to delay transactions if there
1666	 *    is a large amount of freeing.  Even though these indirect
1667	 *    blocks will not be written, we could need to write the same
1668	 *    amount of space if we copy the freed BPs into deadlists.
1669	 */
1670	if (dn->dn_nlevels > 1) {
1671		uint64_t first, last;
1672
1673		first = blkid >> epbs;
1674		dnode_dirty_l1(dn, first, tx);
1675		if (trunc)
1676			last = dn->dn_maxblkid >> epbs;
1677		else
1678			last = (blkid + nblks - 1) >> epbs;
1679		if (last != first)
1680			dnode_dirty_l1(dn, last, tx);
1681
1682		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
1683		    SPA_BLKPTRSHIFT;
1684		for (uint64_t i = first + 1; i < last; i++) {
1685			/*
1686			 * Set i to the blockid of the next non-hole
1687			 * level-1 indirect block at or after i.  Note
1688			 * that dnode_next_offset() operates in terms of
1689			 * level-0-equivalent bytes.
1690			 */
1691			uint64_t ibyte = i << shift;
1692			int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
1693			    &ibyte, 2, 1, 0);
1694			i = ibyte >> shift;
1695			if (i >= last)
1696				break;
1697
1698			/*
1699			 * Normally we should not see an error, either
1700			 * from dnode_next_offset() or dbuf_hold_level()
1701			 * (except for ESRCH from dnode_next_offset).
1702			 * If there is an i/o error, then when we read
1703			 * this block in syncing context, it will use
1704			 * ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
1705			 * to the "failmode" property.  dnode_next_offset()
1706			 * doesn't have a flag to indicate MUSTSUCCEED.
1707			 */
1708			if (err != 0)
1709				break;
1710
1711			dnode_dirty_l1(dn, i, tx);
1712		}
1713	}
1714
1715done:
1716	/*
1717	 * Add this range to the dnode range list.
1718	 * We will finish up this free operation in the syncing phase.
1719	 */
1720	mutex_enter(&dn->dn_mtx);
1721	int txgoff = tx->tx_txg & TXG_MASK;
1722	if (dn->dn_free_ranges[txgoff] == NULL) {
1723		dn->dn_free_ranges[txgoff] =
1724		    range_tree_create(NULL, NULL, &dn->dn_mtx);
1725	}
1726	range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
1727	range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
1728	dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1729	    blkid, nblks, tx->tx_txg);
1730	mutex_exit(&dn->dn_mtx);
1731
1732	dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
1733	dnode_setdirty(dn, tx);
1734out:
1735
1736	rw_exit(&dn->dn_struct_rwlock);
1737}
1738
1739static boolean_t
1740dnode_spill_freed(dnode_t *dn)
1741{
1742	int i;
1743
1744	mutex_enter(&dn->dn_mtx);
1745	for (i = 0; i < TXG_SIZE; i++) {
1746		if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
1747			break;
1748	}
1749	mutex_exit(&dn->dn_mtx);
1750	return (i < TXG_SIZE);
1751}
1752
1753/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
1754uint64_t
1755dnode_block_freed(dnode_t *dn, uint64_t blkid)
1756{
1757	void *dp = spa_get_dsl(dn->dn_objset->os_spa);
1758	int i;
1759
1760	if (blkid == DMU_BONUS_BLKID)
1761		return (FALSE);
1762
1763	/*
1764	 * If we're in the process of opening the pool, dp will not be
1765	 * set yet, but there shouldn't be anything dirty.
1766	 */
1767	if (dp == NULL)
1768		return (FALSE);
1769
1770	if (dn->dn_free_txg)
1771		return (TRUE);
1772
1773	if (blkid == DMU_SPILL_BLKID)
1774		return (dnode_spill_freed(dn));
1775
1776	mutex_enter(&dn->dn_mtx);
1777	for (i = 0; i < TXG_SIZE; i++) {
1778		if (dn->dn_free_ranges[i] != NULL &&
1779		    range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
1780			break;
1781	}
1782	mutex_exit(&dn->dn_mtx);
1783	return (i < TXG_SIZE);
1784}
1785
1786/* call from syncing context when we actually write/free space for this dnode */
1787void
1788dnode_diduse_space(dnode_t *dn, int64_t delta)
1789{
1790	uint64_t space;
1791	dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
1792	    dn, dn->dn_phys,
1793	    (u_longlong_t)dn->dn_phys->dn_used,
1794	    (longlong_t)delta);
1795
1796	mutex_enter(&dn->dn_mtx);
1797	space = DN_USED_BYTES(dn->dn_phys);
1798	if (delta > 0) {
1799		ASSERT3U(space + delta, >=, space); /* no overflow */
1800	} else {
1801		ASSERT3U(space, >=, -delta); /* no underflow */
1802	}
1803	space += delta;
1804	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
1805		ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
1806		ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
1807		dn->dn_phys->dn_used = space >> DEV_BSHIFT;
1808	} else {
1809		dn->dn_phys->dn_used = space;
1810		dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
1811	}
1812	mutex_exit(&dn->dn_mtx);
1813}
1814
1815/*
1816 * Call when we think we're going to write/free space in open context to track
1817 * the amount of memory in use by the currently open txg.
1818 */
1819void
1820dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
1821{
1822	objset_t *os = dn->dn_objset;
1823	dsl_dataset_t *ds = os->os_dsl_dataset;
1824	int64_t aspace = spa_get_asize(os->os_spa, space);
1825
1826	if (ds != NULL) {
1827		dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
1828		dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
1829	}
1830
1831	dmu_tx_willuse_space(tx, aspace);
1832}
1833
1834/*
1835 * Scans a block at the indicated "level" looking for a hole or data,
1836 * depending on 'flags'.
1837 *
1838 * If level > 0, then we are scanning an indirect block looking at its
1839 * pointers.  If level == 0, then we are looking at a block of dnodes.
1840 *
1841 * If we don't find what we are looking for in the block, we return ESRCH.
1842 * Otherwise, return with *offset pointing to the beginning (if searching
1843 * forwards) or end (if searching backwards) of the range covered by the
1844 * block pointer we matched on (or dnode).
1845 *
1846 * The basic search algorithm used below by dnode_next_offset() is to
1847 * use this function to search up the block tree (widen the search) until
1848 * we find something (i.e., we don't return ESRCH) and then search back
1849 * down the tree (narrow the search) until we reach our original search
1850 * level.
1851 */
1852static int
1853dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
1854    int lvl, uint64_t blkfill, uint64_t txg)
1855{
1856	dmu_buf_impl_t *db = NULL;
1857	void *data = NULL;
1858	uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1859	uint64_t epb = 1ULL << epbs;
1860	uint64_t minfill, maxfill;
1861	boolean_t hole;
1862	int i, inc, error, span;
1863
1864	dprintf("probing object %llu offset %llx level %d of %u\n",
1865	    dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
1866
1867	hole = ((flags & DNODE_FIND_HOLE) != 0);
1868	inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
1869	ASSERT(txg == 0 || !hole);
1870
1871	if (lvl == dn->dn_phys->dn_nlevels) {
1872		error = 0;
1873		epb = dn->dn_phys->dn_nblkptr;
1874		data = dn->dn_phys->dn_blkptr;
1875	} else {
1876		uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
1877		error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
1878		if (error) {
1879			if (error != ENOENT)
1880				return (error);
1881			if (hole)
1882				return (0);
1883			/*
1884			 * This can only happen when we are searching up
1885			 * the block tree for data.  We don't really need to
1886			 * adjust the offset, as we will just end up looking
1887			 * at the pointer to this block in its parent, and its
1888			 * going to be unallocated, so we will skip over it.
1889			 */
1890			return (SET_ERROR(ESRCH));
1891		}
1892		error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
1893		if (error) {
1894			dbuf_rele(db, FTAG);
1895			return (error);
1896		}
1897		data = db->db.db_data;
1898	}
1899
1900
1901	if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
1902	    db->db_blkptr->blk_birth <= txg ||
1903	    BP_IS_HOLE(db->db_blkptr))) {
1904		/*
1905		 * This can only happen when we are searching up the tree
1906		 * and these conditions mean that we need to keep climbing.
1907		 */
1908		error = SET_ERROR(ESRCH);
1909	} else if (lvl == 0) {
1910		dnode_phys_t *dnp = data;
1911		span = DNODE_SHIFT;
1912		ASSERT(dn->dn_type == DMU_OT_DNODE);
1913
1914		for (i = (*offset >> span) & (blkfill - 1);
1915		    i >= 0 && i < blkfill; i += inc) {
1916			if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
1917				break;
1918			*offset += (1ULL << span) * inc;
1919		}
1920		if (i < 0 || i == blkfill)
1921			error = SET_ERROR(ESRCH);
1922	} else {
1923		blkptr_t *bp = data;
1924		uint64_t start = *offset;
1925		span = (lvl - 1) * epbs + dn->dn_datablkshift;
1926		minfill = 0;
1927		maxfill = blkfill << ((lvl - 1) * epbs);
1928
1929		if (hole)
1930			maxfill--;
1931		else
1932			minfill++;
1933
1934		*offset = *offset >> span;
1935		for (i = BF64_GET(*offset, 0, epbs);
1936		    i >= 0 && i < epb; i += inc) {
1937			if (BP_GET_FILL(&bp[i]) >= minfill &&
1938			    BP_GET_FILL(&bp[i]) <= maxfill &&
1939			    (hole || bp[i].blk_birth > txg))
1940				break;
1941			if (inc > 0 || *offset > 0)
1942				*offset += inc;
1943		}
1944		*offset = *offset << span;
1945		if (inc < 0) {
1946			/* traversing backwards; position offset at the end */
1947			ASSERT3U(*offset, <=, start);
1948			*offset = MIN(*offset + (1ULL << span) - 1, start);
1949		} else if (*offset < start) {
1950			*offset = start;
1951		}
1952		if (i < 0 || i >= epb)
1953			error = SET_ERROR(ESRCH);
1954	}
1955
1956	if (db)
1957		dbuf_rele(db, FTAG);
1958
1959	return (error);
1960}
1961
1962/*
1963 * Find the next hole, data, or sparse region at or after *offset.
1964 * The value 'blkfill' tells us how many items we expect to find
1965 * in an L0 data block; this value is 1 for normal objects,
1966 * DNODES_PER_BLOCK for the meta dnode, and some fraction of
1967 * DNODES_PER_BLOCK when searching for sparse regions thereof.
1968 *
1969 * Examples:
1970 *
1971 * dnode_next_offset(dn, flags, offset, 1, 1, 0);
1972 *	Finds the next/previous hole/data in a file.
1973 *	Used in dmu_offset_next().
1974 *
1975 * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
1976 *	Finds the next free/allocated dnode an objset's meta-dnode.
1977 *	Only finds objects that have new contents since txg (ie.
1978 *	bonus buffer changes and content removal are ignored).
1979 *	Used in dmu_object_next().
1980 *
1981 * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
1982 *	Finds the next L2 meta-dnode bp that's at most 1/4 full.
1983 *	Used in dmu_object_alloc().
1984 */
1985int
1986dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
1987    int minlvl, uint64_t blkfill, uint64_t txg)
1988{
1989	uint64_t initial_offset = *offset;
1990	int lvl, maxlvl;
1991	int error = 0;
1992
1993	if (!(flags & DNODE_FIND_HAVELOCK))
1994		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1995
1996	if (dn->dn_phys->dn_nlevels == 0) {
1997		error = SET_ERROR(ESRCH);
1998		goto out;
1999	}
2000
2001	if (dn->dn_datablkshift == 0) {
2002		if (*offset < dn->dn_datablksz) {
2003			if (flags & DNODE_FIND_HOLE)
2004				*offset = dn->dn_datablksz;
2005		} else {
2006			error = SET_ERROR(ESRCH);
2007		}
2008		goto out;
2009	}
2010
2011	maxlvl = dn->dn_phys->dn_nlevels;
2012
2013	for (lvl = minlvl; lvl <= maxlvl; lvl++) {
2014		error = dnode_next_offset_level(dn,
2015		    flags, offset, lvl, blkfill, txg);
2016		if (error != ESRCH)
2017			break;
2018	}
2019
2020	while (error == 0 && --lvl >= minlvl) {
2021		error = dnode_next_offset_level(dn,
2022		    flags, offset, lvl, blkfill, txg);
2023	}
2024
2025	/*
2026	 * There's always a "virtual hole" at the end of the object, even
2027	 * if all BP's which physically exist are non-holes.
2028	 */
2029	if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
2030	    minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
2031		error = 0;
2032	}
2033
2034	if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
2035	    initial_offset < *offset : initial_offset > *offset))
2036		error = SET_ERROR(ESRCH);
2037out:
2038	if (!(flags & DNODE_FIND_HAVELOCK))
2039		rw_exit(&dn->dn_struct_rwlock);
2040
2041	return (error);
2042}
2043