1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2020 Oxide Computer Company
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dbuf.h>
31#include <sys/dnode.h>
32#include <sys/dmu.h>
33#include <sys/dmu_tx.h>
34#include <sys/dmu_objset.h>
35#include <sys/dmu_recv.h>
36#include <sys/dsl_dataset.h>
37#include <sys/spa.h>
38#include <sys/range_tree.h>
39#include <sys/zfeature.h>
40
41static void
42dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
43{
44	dmu_buf_impl_t *db;
45	int txgoff = tx->tx_txg & TXG_MASK;
46	int nblkptr = dn->dn_phys->dn_nblkptr;
47	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
48	int new_level = dn->dn_next_nlevels[txgoff];
49	int i;
50
51	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
52
53	/* this dnode can't be paged out because it's dirty */
54	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
55	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
56
57	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
58	ASSERT(db != NULL);
59
60	dn->dn_phys->dn_nlevels = new_level;
61	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
62	    dn->dn_object, dn->dn_phys->dn_nlevels);
63
64	/*
65	 * Lock ordering requires that we hold the children's db_mutexes (by
66	 * calling dbuf_find()) before holding the parent's db_rwlock.  The lock
67	 * order is imposed by dbuf_read's steps of "grab the lock to protect
68	 * db_parent, get db_parent, hold db_parent's db_rwlock".
69	 */
70	dmu_buf_impl_t *children[DN_MAX_NBLKPTR];
71	ASSERT3U(nblkptr, <=, DN_MAX_NBLKPTR);
72	for (i = 0; i < nblkptr; i++) {
73		children[i] =
74		    dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
75	}
76
77	/* transfer dnode's block pointers to new indirect block */
78	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
79	if (dn->dn_dbuf != NULL)
80		rw_enter(&dn->dn_dbuf->db_rwlock, RW_WRITER);
81	rw_enter(&db->db_rwlock, RW_WRITER);
82	ASSERT(db->db.db_data);
83	ASSERT(arc_released(db->db_buf));
84	ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
85	bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
86	    sizeof (blkptr_t) * nblkptr);
87	arc_buf_freeze(db->db_buf);
88
89	/* set dbuf's parent pointers to new indirect buf */
90	for (i = 0; i < nblkptr; i++) {
91		dmu_buf_impl_t *child = children[i];
92
93		if (child == NULL)
94			continue;
95#ifdef	ZFS_DEBUG
96		DB_DNODE_ENTER(child);
97		ASSERT3P(DB_DNODE(child), ==, dn);
98		DB_DNODE_EXIT(child);
99#endif	/* DEBUG */
100		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
101			ASSERT(child->db_parent->db_level == db->db_level);
102			ASSERT(child->db_blkptr !=
103			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
104			mutex_exit(&child->db_mtx);
105			continue;
106		}
107		ASSERT(child->db_parent == NULL ||
108		    child->db_parent == dn->dn_dbuf);
109
110		child->db_parent = db;
111		dbuf_add_ref(db, child);
112		if (db->db.db_data)
113			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
114		else
115			child->db_blkptr = NULL;
116		dprintf_dbuf_bp(child, child->db_blkptr,
117		    "changed db_blkptr to new indirect %s", "");
118
119		mutex_exit(&child->db_mtx);
120	}
121
122	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
123
124	rw_exit(&db->db_rwlock);
125	if (dn->dn_dbuf != NULL)
126		rw_exit(&dn->dn_dbuf->db_rwlock);
127
128	dbuf_rele(db, FTAG);
129
130	rw_exit(&dn->dn_struct_rwlock);
131}
132
133static void
134free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
135{
136	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
137	uint64_t bytesfreed = 0;
138
139	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
140
141	for (int i = 0; i < num; i++, bp++) {
142		if (BP_IS_HOLE(bp))
143			continue;
144
145		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
146		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
147
148		/*
149		 * Save some useful information on the holes being
150		 * punched, including logical size, type, and indirection
151		 * level. Retaining birth time enables detection of when
152		 * holes are punched for reducing the number of free
153		 * records transmitted during a zfs send.
154		 */
155
156		uint64_t lsize = BP_GET_LSIZE(bp);
157		dmu_object_type_t type = BP_GET_TYPE(bp);
158		uint64_t lvl = BP_GET_LEVEL(bp);
159
160		bzero(bp, sizeof (blkptr_t));
161
162		if (spa_feature_is_active(dn->dn_objset->os_spa,
163		    SPA_FEATURE_HOLE_BIRTH)) {
164			BP_SET_LSIZE(bp, lsize);
165			BP_SET_TYPE(bp, type);
166			BP_SET_LEVEL(bp, lvl);
167			BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
168		}
169	}
170	dnode_diduse_space(dn, -bytesfreed);
171}
172
173#ifdef ZFS_DEBUG
174static void
175free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
176{
177	int off, num;
178	int i, err, epbs;
179	uint64_t txg = tx->tx_txg;
180	dnode_t *dn;
181
182	DB_DNODE_ENTER(db);
183	dn = DB_DNODE(db);
184	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
185	off = start - (db->db_blkid * 1<<epbs);
186	num = end - start + 1;
187
188	ASSERT3U(off, >=, 0);
189	ASSERT3U(num, >=, 0);
190	ASSERT3U(db->db_level, >, 0);
191	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
192	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
193	ASSERT(db->db_blkptr != NULL);
194
195	for (i = off; i < off+num; i++) {
196		uint64_t *buf;
197		dmu_buf_impl_t *child;
198		dbuf_dirty_record_t *dr;
199		int j;
200
201		ASSERT(db->db_level == 1);
202
203		rw_enter(&dn->dn_struct_rwlock, RW_READER);
204		err = dbuf_hold_impl(dn, db->db_level - 1,
205		    (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
206		rw_exit(&dn->dn_struct_rwlock);
207		if (err == ENOENT)
208			continue;
209		ASSERT(err == 0);
210		ASSERT(child->db_level == 0);
211		dr = dbuf_find_dirty_eq(child, txg);
212
213		/* data_old better be zeroed */
214		if (dr) {
215			buf = dr->dt.dl.dr_data->b_data;
216			for (j = 0; j < child->db.db_size >> 3; j++) {
217				if (buf[j] != 0) {
218					panic("freed data not zero: "
219					    "child=%p i=%d off=%d num=%d\n",
220					    (void *)child, i, off, num);
221				}
222			}
223		}
224
225		/*
226		 * db_data better be zeroed unless it's dirty in a
227		 * future txg.
228		 */
229		mutex_enter(&child->db_mtx);
230		buf = child->db.db_data;
231		if (buf != NULL && child->db_state != DB_FILL &&
232		    list_is_empty(&child->db_dirty_records)) {
233			for (j = 0; j < child->db.db_size >> 3; j++) {
234				if (buf[j] != 0) {
235					panic("freed data not zero: "
236					    "child=%p i=%d off=%d num=%d\n",
237					    (void *)child, i, off, num);
238				}
239			}
240		}
241		mutex_exit(&child->db_mtx);
242
243		dbuf_rele(child, FTAG);
244	}
245	DB_DNODE_EXIT(db);
246}
247#endif
248
249/*
250 * We don't usually free the indirect blocks here.  If in one txg we have a
251 * free_range and a write to the same indirect block, it's important that we
252 * preserve the hole's birth times. Therefore, we don't free any any indirect
253 * blocks in free_children().  If an indirect block happens to turn into all
254 * holes, it will be freed by dbuf_write_children_ready, which happens at a
255 * point in the syncing process where we know for certain the contents of the
256 * indirect block.
257 *
258 * However, if we're freeing a dnode, its space accounting must go to zero
259 * before we actually try to free the dnode, or we will trip an assertion. In
260 * addition, we know the case described above cannot occur, because the dnode is
261 * being freed.  Therefore, we free the indirect blocks immediately in that
262 * case.
263 */
264static void
265free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
266    boolean_t free_indirects, dmu_tx_t *tx)
267{
268	dnode_t *dn;
269	blkptr_t *bp;
270	dmu_buf_impl_t *subdb;
271	uint64_t start, end, dbstart, dbend;
272	unsigned int epbs, shift, i;
273
274	/*
275	 * There is a small possibility that this block will not be cached:
276	 *   1 - if level > 1 and there are no children with level <= 1
277	 *   2 - if this block was evicted since we read it from
278	 *	 dmu_tx_hold_free().
279	 */
280	if (db->db_state != DB_CACHED)
281		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
282
283	/*
284	 * If we modify this indirect block, and we are not freeing the
285	 * dnode (!free_indirects), then this indirect block needs to get
286	 * written to disk by dbuf_write().  If it is dirty, we know it will
287	 * be written (otherwise, we would have incorrect on-disk state
288	 * because the space would be freed but still referenced by the BP
289	 * in this indirect block).  Therefore we VERIFY that it is
290	 * dirty.
291	 *
292	 * Our VERIFY covers some cases that do not actually have to be
293	 * dirty, but the open-context code happens to dirty.  E.g. if the
294	 * blocks we are freeing are all holes, because in that case, we
295	 * are only freeing part of this indirect block, so it is an
296	 * ancestor of the first or last block to be freed.  The first and
297	 * last L1 indirect blocks are always dirtied by dnode_free_range().
298	 */
299	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
300	VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0);
301	dmu_buf_unlock_parent(db, dblt, FTAG);
302
303	dbuf_release_bp(db);
304	bp = db->db.db_data;
305
306	DB_DNODE_ENTER(db);
307	dn = DB_DNODE(db);
308	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
309	ASSERT3U(epbs, <, 31);
310	shift = (db->db_level - 1) * epbs;
311	dbstart = db->db_blkid << epbs;
312	start = blkid >> shift;
313	if (dbstart < start) {
314		bp += start - dbstart;
315	} else {
316		start = dbstart;
317	}
318	dbend = ((db->db_blkid + 1) << epbs) - 1;
319	end = (blkid + nblks - 1) >> shift;
320	if (dbend <= end)
321		end = dbend;
322
323	ASSERT3U(start, <=, end);
324
325	if (db->db_level == 1) {
326		FREE_VERIFY(db, start, end, tx);
327		rw_enter(&db->db_rwlock, RW_WRITER);
328		free_blocks(dn, bp, end - start + 1, tx);
329		rw_exit(&db->db_rwlock);
330	} else {
331		for (uint64_t id = start; id <= end; id++, bp++) {
332			if (BP_IS_HOLE(bp))
333				continue;
334			rw_enter(&dn->dn_struct_rwlock, RW_READER);
335			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
336			    id, TRUE, FALSE, FTAG, &subdb));
337			rw_exit(&dn->dn_struct_rwlock);
338			ASSERT3P(bp, ==, subdb->db_blkptr);
339
340			free_children(subdb, blkid, nblks, free_indirects, tx);
341			dbuf_rele(subdb, FTAG);
342		}
343	}
344
345	if (free_indirects) {
346		rw_enter(&db->db_rwlock, RW_WRITER);
347		for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
348			ASSERT(BP_IS_HOLE(bp));
349		bzero(db->db.db_data, db->db.db_size);
350		free_blocks(dn, db->db_blkptr, 1, tx);
351		rw_exit(&db->db_rwlock);
352	}
353
354	DB_DNODE_EXIT(db);
355	arc_buf_freeze(db->db_buf);
356}
357
358/*
359 * Traverse the indicated range of the provided file
360 * and "free" all the blocks contained there.
361 */
362static void
363dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
364    boolean_t free_indirects, dmu_tx_t *tx)
365{
366	blkptr_t *bp = dn->dn_phys->dn_blkptr;
367	int dnlevel = dn->dn_phys->dn_nlevels;
368	boolean_t trunc = B_FALSE;
369
370	if (blkid > dn->dn_phys->dn_maxblkid)
371		return;
372
373	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
374	if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
375		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
376		trunc = B_TRUE;
377	}
378
379	/* There are no indirect blocks in the object */
380	if (dnlevel == 1) {
381		if (blkid >= dn->dn_phys->dn_nblkptr) {
382			/* this range was never made persistent */
383			return;
384		}
385		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
386		free_blocks(dn, bp + blkid, nblks, tx);
387	} else {
388		int shift = (dnlevel - 1) *
389		    (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
390		int start = blkid >> shift;
391		int end = (blkid + nblks - 1) >> shift;
392		dmu_buf_impl_t *db;
393
394		ASSERT(start < dn->dn_phys->dn_nblkptr);
395		bp += start;
396		for (int i = start; i <= end; i++, bp++) {
397			if (BP_IS_HOLE(bp))
398				continue;
399			rw_enter(&dn->dn_struct_rwlock, RW_READER);
400			VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
401			    TRUE, FALSE, FTAG, &db));
402			rw_exit(&dn->dn_struct_rwlock);
403			free_children(db, blkid, nblks, free_indirects, tx);
404			dbuf_rele(db, FTAG);
405		}
406	}
407
408	/*
409	 * Do not truncate the maxblkid if we are performing a raw
410	 * receive. The raw receive sets the maxblkid manually and
411	 * must not be overridden. Usually, the last DRR_FREE record
412	 * will be at the maxblkid, because the source system sets
413	 * the maxblkid when truncating. However, if the last block
414	 * was freed by overwriting with zeros and being compressed
415	 * away to a hole, the source system will generate a DRR_FREE
416	 * record while leaving the maxblkid after the end of that
417	 * record. In this case we need to leave the maxblkid as
418	 * indicated in the DRR_OBJECT record, so that it matches the
419	 * source system, ensuring that the cryptographic hashes will
420	 * match.
421	 */
422	if (trunc && !dn->dn_objset->os_raw_receive) {
423		uint64_t off __maybe_unused;
424		dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
425
426		off = (dn->dn_phys->dn_maxblkid + 1) *
427		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
428		ASSERT(off < dn->dn_phys->dn_maxblkid ||
429		    dn->dn_phys->dn_maxblkid == 0 ||
430		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
431	}
432}
433
434typedef struct dnode_sync_free_range_arg {
435	dnode_t *dsfra_dnode;
436	dmu_tx_t *dsfra_tx;
437	boolean_t dsfra_free_indirects;
438} dnode_sync_free_range_arg_t;
439
440static void
441dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
442{
443	dnode_sync_free_range_arg_t *dsfra = arg;
444	dnode_t *dn = dsfra->dsfra_dnode;
445
446	mutex_exit(&dn->dn_mtx);
447	dnode_sync_free_range_impl(dn, blkid, nblks,
448	    dsfra->dsfra_free_indirects, dsfra->dsfra_tx);
449	mutex_enter(&dn->dn_mtx);
450}
451
452/*
453 * Try to kick all the dnode's dbufs out of the cache...
454 */
455void
456dnode_evict_dbufs(dnode_t *dn)
457{
458	dmu_buf_impl_t *db_marker;
459	dmu_buf_impl_t *db, *db_next;
460
461	db_marker = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
462
463	mutex_enter(&dn->dn_dbufs_mtx);
464	for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
465
466#ifdef	ZFS_DEBUG
467		DB_DNODE_ENTER(db);
468		ASSERT3P(DB_DNODE(db), ==, dn);
469		DB_DNODE_EXIT(db);
470#endif	/* DEBUG */
471
472		mutex_enter(&db->db_mtx);
473		if (db->db_state != DB_EVICTING &&
474		    zfs_refcount_is_zero(&db->db_holds)) {
475			db_marker->db_level = db->db_level;
476			db_marker->db_blkid = db->db_blkid;
477			db_marker->db_state = DB_SEARCH;
478			avl_insert_here(&dn->dn_dbufs, db_marker, db,
479			    AVL_BEFORE);
480
481			/*
482			 * We need to use the "marker" dbuf rather than
483			 * simply getting the next dbuf, because
484			 * dbuf_destroy() may actually remove multiple dbufs.
485			 * It can call itself recursively on the parent dbuf,
486			 * which may also be removed from dn_dbufs.  The code
487			 * flow would look like:
488			 *
489			 * dbuf_destroy():
490			 *   dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
491			 *	if (!cacheable || pending_evict)
492			 *	  dbuf_destroy()
493			 */
494			dbuf_destroy(db);
495
496			db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
497			avl_remove(&dn->dn_dbufs, db_marker);
498		} else {
499			db->db_pending_evict = TRUE;
500			mutex_exit(&db->db_mtx);
501			db_next = AVL_NEXT(&dn->dn_dbufs, db);
502		}
503	}
504	mutex_exit(&dn->dn_dbufs_mtx);
505
506	kmem_free(db_marker, sizeof (dmu_buf_impl_t));
507
508	dnode_evict_bonus(dn);
509}
510
511void
512dnode_evict_bonus(dnode_t *dn)
513{
514	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
515	if (dn->dn_bonus != NULL) {
516		if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
517			mutex_enter(&dn->dn_bonus->db_mtx);
518			dbuf_destroy(dn->dn_bonus);
519			dn->dn_bonus = NULL;
520		} else {
521			dn->dn_bonus->db_pending_evict = TRUE;
522		}
523	}
524	rw_exit(&dn->dn_struct_rwlock);
525}
526
527static void
528dnode_undirty_dbufs(list_t *list)
529{
530	dbuf_dirty_record_t *dr;
531
532	while ((dr = list_head(list))) {
533		dmu_buf_impl_t *db = dr->dr_dbuf;
534		uint64_t txg = dr->dr_txg;
535
536		if (db->db_level != 0)
537			dnode_undirty_dbufs(&dr->dt.di.dr_children);
538
539		mutex_enter(&db->db_mtx);
540		/* XXX - use dbuf_undirty()? */
541		list_remove(list, dr);
542		ASSERT(list_head(&db->db_dirty_records) == dr);
543		list_remove_head(&db->db_dirty_records);
544		ASSERT(list_is_empty(&db->db_dirty_records));
545		db->db_dirtycnt -= 1;
546		if (db->db_level == 0) {
547			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
548			    dr->dt.dl.dr_data == db->db_buf);
549			dbuf_unoverride(dr);
550		} else {
551			mutex_destroy(&dr->dt.di.dr_mtx);
552			list_destroy(&dr->dt.di.dr_children);
553		}
554		kmem_free(dr, sizeof (dbuf_dirty_record_t));
555		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
556	}
557}
558
559static void
560dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
561{
562	int txgoff = tx->tx_txg & TXG_MASK;
563
564	ASSERT(dmu_tx_is_syncing(tx));
565
566	/*
567	 * Our contents should have been freed in dnode_sync() by the
568	 * free range record inserted by the caller of dnode_free().
569	 */
570	ASSERT0(DN_USED_BYTES(dn->dn_phys));
571	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
572
573	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
574	dnode_evict_dbufs(dn);
575
576	/*
577	 * XXX - It would be nice to assert this, but we may still
578	 * have residual holds from async evictions from the arc...
579	 *
580	 * zfs_obj_to_path() also depends on this being
581	 * commented out.
582	 *
583	 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
584	 */
585
586	/* Undirty next bits */
587	dn->dn_next_nlevels[txgoff] = 0;
588	dn->dn_next_indblkshift[txgoff] = 0;
589	dn->dn_next_blksz[txgoff] = 0;
590	dn->dn_next_maxblkid[txgoff] = 0;
591
592	/* ASSERT(blkptrs are zero); */
593	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
594	ASSERT(dn->dn_type != DMU_OT_NONE);
595
596	ASSERT(dn->dn_free_txg > 0);
597	if (dn->dn_allocated_txg != dn->dn_free_txg)
598		dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
599	bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots);
600	dnode_free_interior_slots(dn);
601
602	mutex_enter(&dn->dn_mtx);
603	dn->dn_type = DMU_OT_NONE;
604	dn->dn_maxblkid = 0;
605	dn->dn_allocated_txg = 0;
606	dn->dn_free_txg = 0;
607	dn->dn_have_spill = B_FALSE;
608	dn->dn_num_slots = 1;
609	mutex_exit(&dn->dn_mtx);
610
611	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
612
613	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
614	/*
615	 * Now that we've released our hold, the dnode may
616	 * be evicted, so we mustn't access it.
617	 */
618}
619
620/*
621 * Write out the dnode's dirty buffers.
622 */
623void
624dnode_sync(dnode_t *dn, dmu_tx_t *tx)
625{
626	objset_t *os = dn->dn_objset;
627	dnode_phys_t *dnp = dn->dn_phys;
628	int txgoff = tx->tx_txg & TXG_MASK;
629	list_t *list = &dn->dn_dirty_records[txgoff];
630	static const dnode_phys_t zerodn __maybe_unused = { 0 };
631	boolean_t kill_spill = B_FALSE;
632
633	ASSERT(dmu_tx_is_syncing(tx));
634	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
635	ASSERT(dnp->dn_type != DMU_OT_NONE ||
636	    bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
637	DNODE_VERIFY(dn);
638
639	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
640
641	/*
642	 * Do user accounting if it is enabled and this is not
643	 * an encrypted receive.
644	 */
645	if (dmu_objset_userused_enabled(os) &&
646	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object) &&
647	    (!os->os_encrypted || !dmu_objset_is_receiving(os))) {
648		mutex_enter(&dn->dn_mtx);
649		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
650		dn->dn_oldflags = dn->dn_phys->dn_flags;
651		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
652		if (dmu_objset_userobjused_enabled(dn->dn_objset))
653			dn->dn_phys->dn_flags |=
654			    DNODE_FLAG_USEROBJUSED_ACCOUNTED;
655		mutex_exit(&dn->dn_mtx);
656		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
657	} else {
658		/* Once we account for it, we should always account for it */
659		ASSERT(!(dn->dn_phys->dn_flags &
660		    DNODE_FLAG_USERUSED_ACCOUNTED));
661		ASSERT(!(dn->dn_phys->dn_flags &
662		    DNODE_FLAG_USEROBJUSED_ACCOUNTED));
663	}
664
665	mutex_enter(&dn->dn_mtx);
666	if (dn->dn_allocated_txg == tx->tx_txg) {
667		/* The dnode is newly allocated or reallocated */
668		if (dnp->dn_type == DMU_OT_NONE) {
669			/* this is a first alloc, not a realloc */
670			dnp->dn_nlevels = 1;
671			dnp->dn_nblkptr = dn->dn_nblkptr;
672		}
673
674		dnp->dn_type = dn->dn_type;
675		dnp->dn_bonustype = dn->dn_bonustype;
676		dnp->dn_bonuslen = dn->dn_bonuslen;
677	}
678
679	dnp->dn_extra_slots = dn->dn_num_slots - 1;
680
681	ASSERT(dnp->dn_nlevels > 1 ||
682	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
683	    BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
684	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
685	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
686	ASSERT(dnp->dn_nlevels < 2 ||
687	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
688	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
689
690	if (dn->dn_next_type[txgoff] != 0) {
691		dnp->dn_type = dn->dn_type;
692		dn->dn_next_type[txgoff] = 0;
693	}
694
695	if (dn->dn_next_blksz[txgoff] != 0) {
696		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
697		    SPA_MINBLOCKSIZE) == 0);
698		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
699		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
700		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
701		    dnp->dn_datablkszsec ||
702		    !range_tree_is_empty(dn->dn_free_ranges[txgoff]));
703		dnp->dn_datablkszsec =
704		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
705		dn->dn_next_blksz[txgoff] = 0;
706	}
707
708	if (dn->dn_next_bonuslen[txgoff] != 0) {
709		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
710			dnp->dn_bonuslen = 0;
711		else
712			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
713		ASSERT(dnp->dn_bonuslen <=
714		    DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1));
715		dn->dn_next_bonuslen[txgoff] = 0;
716	}
717
718	if (dn->dn_next_bonustype[txgoff] != 0) {
719		ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
720		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
721		dn->dn_next_bonustype[txgoff] = 0;
722	}
723
724	boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
725	    dn->dn_free_txg <= tx->tx_txg;
726
727	/*
728	 * Remove the spill block if we have been explicitly asked to
729	 * remove it, or if the object is being removed.
730	 */
731	if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
732		if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
733			kill_spill = B_TRUE;
734		dn->dn_rm_spillblk[txgoff] = 0;
735	}
736
737	if (dn->dn_next_indblkshift[txgoff] != 0) {
738		ASSERT(dnp->dn_nlevels == 1);
739		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
740		dn->dn_next_indblkshift[txgoff] = 0;
741	}
742
743	/*
744	 * Just take the live (open-context) values for checksum and compress.
745	 * Strictly speaking it's a future leak, but nothing bad happens if we
746	 * start using the new checksum or compress algorithm a little early.
747	 */
748	dnp->dn_checksum = dn->dn_checksum;
749	dnp->dn_compress = dn->dn_compress;
750
751	mutex_exit(&dn->dn_mtx);
752
753	if (kill_spill) {
754		free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx);
755		mutex_enter(&dn->dn_mtx);
756		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
757		mutex_exit(&dn->dn_mtx);
758	}
759
760	/* process all the "freed" ranges in the file */
761	if (dn->dn_free_ranges[txgoff] != NULL) {
762		dnode_sync_free_range_arg_t dsfra;
763		dsfra.dsfra_dnode = dn;
764		dsfra.dsfra_tx = tx;
765		dsfra.dsfra_free_indirects = freeing_dnode;
766		mutex_enter(&dn->dn_mtx);
767		if (freeing_dnode) {
768			ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
769			    0, dn->dn_maxblkid + 1));
770		}
771		/*
772		 * Because dnode_sync_free_range() must drop dn_mtx during its
773		 * processing, using it as a callback to range_tree_vacate() is
774		 * not safe.  No other operations (besides destroy) are allowed
775		 * once range_tree_vacate() has begun, and dropping dn_mtx
776		 * would leave a window open for another thread to observe that
777		 * invalid (and unsafe) state.
778		 */
779		range_tree_walk(dn->dn_free_ranges[txgoff],
780		    dnode_sync_free_range, &dsfra);
781		range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
782		range_tree_destroy(dn->dn_free_ranges[txgoff]);
783		dn->dn_free_ranges[txgoff] = NULL;
784		mutex_exit(&dn->dn_mtx);
785	}
786
787	if (freeing_dnode) {
788		dn->dn_objset->os_freed_dnodes++;
789		dnode_sync_free(dn, tx);
790		return;
791	}
792
793	if (dn->dn_num_slots > DNODE_MIN_SLOTS) {
794		dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
795		mutex_enter(&ds->ds_lock);
796		ds->ds_feature_activation[SPA_FEATURE_LARGE_DNODE] =
797		    (void *)B_TRUE;
798		mutex_exit(&ds->ds_lock);
799	}
800
801	if (dn->dn_next_nlevels[txgoff]) {
802		dnode_increase_indirection(dn, tx);
803		dn->dn_next_nlevels[txgoff] = 0;
804	}
805
806	/*
807	 * This must be done after dnode_sync_free_range()
808	 * and dnode_increase_indirection(). See dnode_new_blkid()
809	 * for an explanation of the high bit being set.
810	 */
811	if (dn->dn_next_maxblkid[txgoff]) {
812		mutex_enter(&dn->dn_mtx);
813		dnp->dn_maxblkid =
814		    dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET;
815		dn->dn_next_maxblkid[txgoff] = 0;
816		mutex_exit(&dn->dn_mtx);
817	}
818
819	if (dn->dn_next_nblkptr[txgoff]) {
820		/* this should only happen on a realloc */
821		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
822		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
823			/* zero the new blkptrs we are gaining */
824			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
825			    sizeof (blkptr_t) *
826			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
827#ifdef ZFS_DEBUG
828		} else {
829			int i;
830			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
831			/* the blkptrs we are losing better be unallocated */
832			for (i = 0; i < dnp->dn_nblkptr; i++) {
833				if (i >= dn->dn_next_nblkptr[txgoff])
834					ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
835			}
836#endif
837		}
838		mutex_enter(&dn->dn_mtx);
839		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
840		dn->dn_next_nblkptr[txgoff] = 0;
841		mutex_exit(&dn->dn_mtx);
842	}
843
844	dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
845
846	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
847		ASSERT3P(list_head(list), ==, NULL);
848		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
849	}
850
851	/*
852	 * Although we have dropped our reference to the dnode, it
853	 * can't be evicted until its written, and we haven't yet
854	 * initiated the IO for the dnode's dbuf.  Additionally, the caller
855	 * has already added a reference to the dnode because it's on the
856	 * os_synced_dnodes list.
857	 */
858}
859