1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#include <sys/zfs_context.h>
26#include <sys/dbuf.h>
27#include <sys/dnode.h>
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/dmu_objset.h>
31#include <sys/dsl_dataset.h>
32#include <sys/spa.h>
33
34static void
35dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
36{
37	dmu_buf_impl_t *db;
38	int txgoff = tx->tx_txg & TXG_MASK;
39	int nblkptr = dn->dn_phys->dn_nblkptr;
40	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
41	int new_level = dn->dn_next_nlevels[txgoff];
42	int i;
43
44	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
45
46	/* this dnode can't be paged out because it's dirty */
47	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
48	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
49	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
50
51	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
52	ASSERT(db != NULL);
53
54	dn->dn_phys->dn_nlevels = new_level;
55	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
56	    dn->dn_object, dn->dn_phys->dn_nlevels);
57
58	/* check for existing blkptrs in the dnode */
59	for (i = 0; i < nblkptr; i++)
60		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
61			break;
62	if (i != nblkptr) {
63		/* transfer dnode's block pointers to new indirect block */
64		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
65		ASSERT(db->db.db_data);
66		ASSERT(arc_released(db->db_buf));
67		ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
68		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
69		    sizeof (blkptr_t) * nblkptr);
70		arc_buf_freeze(db->db_buf);
71	}
72
73	/* set dbuf's parent pointers to new indirect buf */
74	for (i = 0; i < nblkptr; i++) {
75		dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
76
77		if (child == NULL)
78			continue;
79#ifdef	DEBUG
80		DB_DNODE_ENTER(child);
81		ASSERT3P(DB_DNODE(child), ==, dn);
82		DB_DNODE_EXIT(child);
83#endif	/* DEBUG */
84		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
85			ASSERT(child->db_parent->db_level == db->db_level);
86			ASSERT(child->db_blkptr !=
87			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
88			mutex_exit(&child->db_mtx);
89			continue;
90		}
91		ASSERT(child->db_parent == NULL ||
92		    child->db_parent == dn->dn_dbuf);
93
94		child->db_parent = db;
95		dbuf_add_ref(db, child);
96		if (db->db.db_data)
97			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
98		else
99			child->db_blkptr = NULL;
100		dprintf_dbuf_bp(child, child->db_blkptr,
101		    "changed db_blkptr to new indirect %s", "");
102
103		mutex_exit(&child->db_mtx);
104	}
105
106	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
107
108	dbuf_rele(db, FTAG);
109
110	rw_exit(&dn->dn_struct_rwlock);
111}
112
113static int
114free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
115{
116	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
117	uint64_t bytesfreed = 0;
118	int i, blocks_freed = 0;
119
120	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
121
122	for (i = 0; i < num; i++, bp++) {
123		if (BP_IS_HOLE(bp))
124			continue;
125
126		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
127		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
128		bzero(bp, sizeof (blkptr_t));
129		blocks_freed += 1;
130	}
131	dnode_diduse_space(dn, -bytesfreed);
132	return (blocks_freed);
133}
134
135#ifdef ZFS_DEBUG
136static void
137free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
138{
139	int off, num;
140	int i, err, epbs;
141	uint64_t txg = tx->tx_txg;
142	dnode_t *dn;
143
144	DB_DNODE_ENTER(db);
145	dn = DB_DNODE(db);
146	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
147	off = start - (db->db_blkid * 1<<epbs);
148	num = end - start + 1;
149
150	ASSERT3U(off, >=, 0);
151	ASSERT3U(num, >=, 0);
152	ASSERT3U(db->db_level, >, 0);
153	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
154	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
155	ASSERT(db->db_blkptr != NULL);
156
157	for (i = off; i < off+num; i++) {
158		uint64_t *buf;
159		dmu_buf_impl_t *child;
160		dbuf_dirty_record_t *dr;
161		int j;
162
163		ASSERT(db->db_level == 1);
164
165		rw_enter(&dn->dn_struct_rwlock, RW_READER);
166		err = dbuf_hold_impl(dn, db->db_level-1,
167		    (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
168		rw_exit(&dn->dn_struct_rwlock);
169		if (err == ENOENT)
170			continue;
171		ASSERT(err == 0);
172		ASSERT(child->db_level == 0);
173		dr = child->db_last_dirty;
174		while (dr && dr->dr_txg > txg)
175			dr = dr->dr_next;
176		ASSERT(dr == NULL || dr->dr_txg == txg);
177
178		/* data_old better be zeroed */
179		if (dr) {
180			buf = dr->dt.dl.dr_data->b_data;
181			for (j = 0; j < child->db.db_size >> 3; j++) {
182				if (buf[j] != 0) {
183					panic("freed data not zero: "
184					    "child=%p i=%d off=%d num=%d\n",
185					    (void *)child, i, off, num);
186				}
187			}
188		}
189
190		/*
191		 * db_data better be zeroed unless it's dirty in a
192		 * future txg.
193		 */
194		mutex_enter(&child->db_mtx);
195		buf = child->db.db_data;
196		if (buf != NULL && child->db_state != DB_FILL &&
197		    child->db_last_dirty == NULL) {
198			for (j = 0; j < child->db.db_size >> 3; j++) {
199				if (buf[j] != 0) {
200					panic("freed data not zero: "
201					    "child=%p i=%d off=%d num=%d\n",
202					    (void *)child, i, off, num);
203				}
204			}
205		}
206		mutex_exit(&child->db_mtx);
207
208		dbuf_rele(child, FTAG);
209	}
210	DB_DNODE_EXIT(db);
211}
212#endif
213
214#define	ALL -1
215
216static int
217free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
218    dmu_tx_t *tx)
219{
220	dnode_t *dn;
221	blkptr_t *bp;
222	dmu_buf_impl_t *subdb;
223	uint64_t start, end, dbstart, dbend, i;
224	int epbs, shift, err;
225	int all = TRUE;
226	int blocks_freed = 0;
227
228	/*
229	 * There is a small possibility that this block will not be cached:
230	 *   1 - if level > 1 and there are no children with level <= 1
231	 *   2 - if we didn't get a dirty hold (because this block had just
232	 *	 finished being written -- and so had no holds), and then this
233	 *	 block got evicted before we got here.
234	 */
235	if (db->db_state != DB_CACHED)
236		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
237
238	dbuf_release_bp(db);
239	bp = (blkptr_t *)db->db.db_data;
240
241	DB_DNODE_ENTER(db);
242	dn = DB_DNODE(db);
243	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
244	shift = (db->db_level - 1) * epbs;
245	dbstart = db->db_blkid << epbs;
246	start = blkid >> shift;
247	if (dbstart < start) {
248		bp += start - dbstart;
249		all = FALSE;
250	} else {
251		start = dbstart;
252	}
253	dbend = ((db->db_blkid + 1) << epbs) - 1;
254	end = (blkid + nblks - 1) >> shift;
255	if (dbend <= end)
256		end = dbend;
257	else if (all)
258		all = trunc;
259	ASSERT3U(start, <=, end);
260
261	if (db->db_level == 1) {
262		FREE_VERIFY(db, start, end, tx);
263		blocks_freed = free_blocks(dn, bp, end-start+1, tx);
264		arc_buf_freeze(db->db_buf);
265		ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
266		DB_DNODE_EXIT(db);
267		return (all ? ALL : blocks_freed);
268	}
269
270	for (i = start; i <= end; i++, bp++) {
271		if (BP_IS_HOLE(bp))
272			continue;
273		rw_enter(&dn->dn_struct_rwlock, RW_READER);
274		err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
275		ASSERT3U(err, ==, 0);
276		rw_exit(&dn->dn_struct_rwlock);
277
278		if (free_children(subdb, blkid, nblks, trunc, tx) == ALL) {
279			ASSERT3P(subdb->db_blkptr, ==, bp);
280			blocks_freed += free_blocks(dn, bp, 1, tx);
281		} else {
282			all = FALSE;
283		}
284		dbuf_rele(subdb, FTAG);
285	}
286	DB_DNODE_EXIT(db);
287	arc_buf_freeze(db->db_buf);
288#ifdef ZFS_DEBUG
289	bp -= (end-start)+1;
290	for (i = start; i <= end; i++, bp++) {
291		if (i == start && blkid != 0)
292			continue;
293		else if (i == end && !trunc)
294			continue;
295		ASSERT3U(bp->blk_birth, ==, 0);
296	}
297#endif
298	ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
299	return (all ? ALL : blocks_freed);
300}
301
302/*
303 * free_range: Traverse the indicated range of the provided file
304 * and "free" all the blocks contained there.
305 */
306static void
307dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
308{
309	blkptr_t *bp = dn->dn_phys->dn_blkptr;
310	dmu_buf_impl_t *db;
311	int trunc, start, end, shift, i, err;
312	int dnlevel = dn->dn_phys->dn_nlevels;
313
314	if (blkid > dn->dn_phys->dn_maxblkid)
315		return;
316
317	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
318	trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
319	if (trunc)
320		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
321
322	/* There are no indirect blocks in the object */
323	if (dnlevel == 1) {
324		if (blkid >= dn->dn_phys->dn_nblkptr) {
325			/* this range was never made persistent */
326			return;
327		}
328		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
329		(void) free_blocks(dn, bp + blkid, nblks, tx);
330		if (trunc) {
331			uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
332			    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
333			dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
334			ASSERT(off < dn->dn_phys->dn_maxblkid ||
335			    dn->dn_phys->dn_maxblkid == 0 ||
336			    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
337		}
338		return;
339	}
340
341	shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
342	start = blkid >> shift;
343	ASSERT(start < dn->dn_phys->dn_nblkptr);
344	end = (blkid + nblks - 1) >> shift;
345	bp += start;
346	for (i = start; i <= end; i++, bp++) {
347		if (BP_IS_HOLE(bp))
348			continue;
349		rw_enter(&dn->dn_struct_rwlock, RW_READER);
350		err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
351		ASSERT3U(err, ==, 0);
352		rw_exit(&dn->dn_struct_rwlock);
353
354		if (free_children(db, blkid, nblks, trunc, tx) == ALL) {
355			ASSERT3P(db->db_blkptr, ==, bp);
356			(void) free_blocks(dn, bp, 1, tx);
357		}
358		dbuf_rele(db, FTAG);
359	}
360	if (trunc) {
361		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
362		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
363		dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
364		ASSERT(off < dn->dn_phys->dn_maxblkid ||
365		    dn->dn_phys->dn_maxblkid == 0 ||
366		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
367	}
368}
369
370/*
371 * Try to kick all the dnodes dbufs out of the cache...
372 */
373void
374dnode_evict_dbufs(dnode_t *dn)
375{
376	int progress;
377	int pass = 0;
378
379	do {
380		dmu_buf_impl_t *db, marker;
381		int evicting = FALSE;
382
383		progress = FALSE;
384		mutex_enter(&dn->dn_dbufs_mtx);
385		list_insert_tail(&dn->dn_dbufs, &marker);
386		db = list_head(&dn->dn_dbufs);
387		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
388			list_remove(&dn->dn_dbufs, db);
389			list_insert_tail(&dn->dn_dbufs, db);
390#ifdef	DEBUG
391			DB_DNODE_ENTER(db);
392			ASSERT3P(DB_DNODE(db), ==, dn);
393			DB_DNODE_EXIT(db);
394#endif	/* DEBUG */
395
396			mutex_enter(&db->db_mtx);
397			if (db->db_state == DB_EVICTING) {
398				progress = TRUE;
399				evicting = TRUE;
400				mutex_exit(&db->db_mtx);
401			} else if (refcount_is_zero(&db->db_holds)) {
402				progress = TRUE;
403				dbuf_clear(db); /* exits db_mtx for us */
404			} else {
405				mutex_exit(&db->db_mtx);
406			}
407
408		}
409		list_remove(&dn->dn_dbufs, &marker);
410		/*
411		 * NB: we need to drop dn_dbufs_mtx between passes so
412		 * that any DB_EVICTING dbufs can make progress.
413		 * Ideally, we would have some cv we could wait on, but
414		 * since we don't, just wait a bit to give the other
415		 * thread a chance to run.
416		 */
417		mutex_exit(&dn->dn_dbufs_mtx);
418		if (evicting)
419			delay(1);
420		pass++;
421		ASSERT(pass < 100); /* sanity check */
422	} while (progress);
423
424	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
425	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
426		mutex_enter(&dn->dn_bonus->db_mtx);
427		dbuf_evict(dn->dn_bonus);
428		dn->dn_bonus = NULL;
429	}
430	rw_exit(&dn->dn_struct_rwlock);
431}
432
433static void
434dnode_undirty_dbufs(list_t *list)
435{
436	dbuf_dirty_record_t *dr;
437
438	while (dr = list_head(list)) {
439		dmu_buf_impl_t *db = dr->dr_dbuf;
440		uint64_t txg = dr->dr_txg;
441
442		if (db->db_level != 0)
443			dnode_undirty_dbufs(&dr->dt.di.dr_children);
444
445		mutex_enter(&db->db_mtx);
446		/* XXX - use dbuf_undirty()? */
447		list_remove(list, dr);
448		ASSERT(db->db_last_dirty == dr);
449		db->db_last_dirty = NULL;
450		db->db_dirtycnt -= 1;
451		if (db->db_level == 0) {
452			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
453			    dr->dt.dl.dr_data == db->db_buf);
454			dbuf_unoverride(dr);
455		}
456		kmem_free(dr, sizeof (dbuf_dirty_record_t));
457		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
458	}
459}
460
461static void
462dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
463{
464	int txgoff = tx->tx_txg & TXG_MASK;
465
466	ASSERT(dmu_tx_is_syncing(tx));
467
468	/*
469	 * Our contents should have been freed in dnode_sync() by the
470	 * free range record inserted by the caller of dnode_free().
471	 */
472	ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
473	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
474
475	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
476	dnode_evict_dbufs(dn);
477	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
478
479	/*
480	 * XXX - It would be nice to assert this, but we may still
481	 * have residual holds from async evictions from the arc...
482	 *
483	 * zfs_obj_to_path() also depends on this being
484	 * commented out.
485	 *
486	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
487	 */
488
489	/* Undirty next bits */
490	dn->dn_next_nlevels[txgoff] = 0;
491	dn->dn_next_indblkshift[txgoff] = 0;
492	dn->dn_next_blksz[txgoff] = 0;
493
494	/* ASSERT(blkptrs are zero); */
495	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
496	ASSERT(dn->dn_type != DMU_OT_NONE);
497
498	ASSERT(dn->dn_free_txg > 0);
499	if (dn->dn_allocated_txg != dn->dn_free_txg)
500		dbuf_will_dirty(dn->dn_dbuf, tx);
501	bzero(dn->dn_phys, sizeof (dnode_phys_t));
502
503	mutex_enter(&dn->dn_mtx);
504	dn->dn_type = DMU_OT_NONE;
505	dn->dn_maxblkid = 0;
506	dn->dn_allocated_txg = 0;
507	dn->dn_free_txg = 0;
508	dn->dn_have_spill = B_FALSE;
509	mutex_exit(&dn->dn_mtx);
510
511	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
512
513	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
514	/*
515	 * Now that we've released our hold, the dnode may
516	 * be evicted, so we musn't access it.
517	 */
518}
519
520/*
521 * Write out the dnode's dirty buffers.
522 */
523void
524dnode_sync(dnode_t *dn, dmu_tx_t *tx)
525{
526	free_range_t *rp;
527	dnode_phys_t *dnp = dn->dn_phys;
528	int txgoff = tx->tx_txg & TXG_MASK;
529	list_t *list = &dn->dn_dirty_records[txgoff];
530	static const dnode_phys_t zerodn = { 0 };
531	boolean_t kill_spill = B_FALSE;
532
533	ASSERT(dmu_tx_is_syncing(tx));
534	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
535	ASSERT(dnp->dn_type != DMU_OT_NONE ||
536	    bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
537	DNODE_VERIFY(dn);
538
539	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
540
541	if (dmu_objset_userused_enabled(dn->dn_objset) &&
542	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
543		mutex_enter(&dn->dn_mtx);
544		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
545		dn->dn_oldflags = dn->dn_phys->dn_flags;
546		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
547		mutex_exit(&dn->dn_mtx);
548		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
549	} else {
550		/* Once we account for it, we should always account for it. */
551		ASSERT(!(dn->dn_phys->dn_flags &
552		    DNODE_FLAG_USERUSED_ACCOUNTED));
553	}
554
555	mutex_enter(&dn->dn_mtx);
556	if (dn->dn_allocated_txg == tx->tx_txg) {
557		/* The dnode is newly allocated or reallocated */
558		if (dnp->dn_type == DMU_OT_NONE) {
559			/* this is a first alloc, not a realloc */
560			dnp->dn_nlevels = 1;
561			dnp->dn_nblkptr = dn->dn_nblkptr;
562		}
563
564		dnp->dn_type = dn->dn_type;
565		dnp->dn_bonustype = dn->dn_bonustype;
566		dnp->dn_bonuslen = dn->dn_bonuslen;
567	}
568
569	ASSERT(dnp->dn_nlevels > 1 ||
570	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
571	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
572	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
573
574	if (dn->dn_next_blksz[txgoff]) {
575		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
576		    SPA_MINBLOCKSIZE) == 0);
577		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
578		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
579		    avl_last(&dn->dn_ranges[txgoff]) ||
580		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
581		    dnp->dn_datablkszsec);
582		dnp->dn_datablkszsec =
583		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
584		dn->dn_next_blksz[txgoff] = 0;
585	}
586
587	if (dn->dn_next_bonuslen[txgoff]) {
588		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
589			dnp->dn_bonuslen = 0;
590		else
591			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
592		ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
593		dn->dn_next_bonuslen[txgoff] = 0;
594	}
595
596	if (dn->dn_next_bonustype[txgoff]) {
597		ASSERT(dn->dn_next_bonustype[txgoff] < DMU_OT_NUMTYPES);
598		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
599		dn->dn_next_bonustype[txgoff] = 0;
600	}
601
602	/*
603	 * We will either remove a spill block when a file is being removed
604	 * or we have been asked to remove it.
605	 */
606	if (dn->dn_rm_spillblk[txgoff] ||
607	    ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
608	    dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg)) {
609		if ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
610			kill_spill = B_TRUE;
611		dn->dn_rm_spillblk[txgoff] = 0;
612	}
613
614	if (dn->dn_next_indblkshift[txgoff]) {
615		ASSERT(dnp->dn_nlevels == 1);
616		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
617		dn->dn_next_indblkshift[txgoff] = 0;
618	}
619
620	/*
621	 * Just take the live (open-context) values for checksum and compress.
622	 * Strictly speaking it's a future leak, but nothing bad happens if we
623	 * start using the new checksum or compress algorithm a little early.
624	 */
625	dnp->dn_checksum = dn->dn_checksum;
626	dnp->dn_compress = dn->dn_compress;
627
628	mutex_exit(&dn->dn_mtx);
629
630	if (kill_spill) {
631		(void) free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
632		mutex_enter(&dn->dn_mtx);
633		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
634		mutex_exit(&dn->dn_mtx);
635	}
636
637	/* process all the "freed" ranges in the file */
638	while (rp = avl_last(&dn->dn_ranges[txgoff])) {
639		dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx);
640		/* grab the mutex so we don't race with dnode_block_freed() */
641		mutex_enter(&dn->dn_mtx);
642		avl_remove(&dn->dn_ranges[txgoff], rp);
643		mutex_exit(&dn->dn_mtx);
644		kmem_free(rp, sizeof (free_range_t));
645	}
646
647	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
648		dnode_sync_free(dn, tx);
649		return;
650	}
651
652	if (dn->dn_next_nblkptr[txgoff]) {
653		/* this should only happen on a realloc */
654		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
655		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
656			/* zero the new blkptrs we are gaining */
657			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
658			    sizeof (blkptr_t) *
659			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
660#ifdef ZFS_DEBUG
661		} else {
662			int i;
663			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
664			/* the blkptrs we are losing better be unallocated */
665			for (i = dn->dn_next_nblkptr[txgoff];
666			    i < dnp->dn_nblkptr; i++)
667				ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
668#endif
669		}
670		mutex_enter(&dn->dn_mtx);
671		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
672		dn->dn_next_nblkptr[txgoff] = 0;
673		mutex_exit(&dn->dn_mtx);
674	}
675
676	if (dn->dn_next_nlevels[txgoff]) {
677		dnode_increase_indirection(dn, tx);
678		dn->dn_next_nlevels[txgoff] = 0;
679	}
680
681	dbuf_sync_list(list, tx);
682
683	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
684		ASSERT3P(list_head(list), ==, NULL);
685		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
686	}
687
688	/*
689	 * Although we have dropped our reference to the dnode, it
690	 * can't be evicted until its written, and we haven't yet
691	 * initiated the IO for the dnode's dbuf.
692	 */
693}
694