dbuf.c revision 285202
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dmu.h>
31#include <sys/dmu_send.h>
32#include <sys/dmu_impl.h>
33#include <sys/dbuf.h>
34#include <sys/dmu_objset.h>
35#include <sys/dsl_dataset.h>
36#include <sys/dsl_dir.h>
37#include <sys/dmu_tx.h>
38#include <sys/spa.h>
39#include <sys/zio.h>
40#include <sys/dmu_zfetch.h>
41#include <sys/sa.h>
42#include <sys/sa_impl.h>
43#include <sys/zfeature.h>
44#include <sys/blkptr.h>
45#include <sys/range_tree.h>
46
47/*
48 * Number of times that zfs_free_range() took the slow path while doing
49 * a zfs receive.  A nonzero value indicates a potential performance problem.
50 */
51uint64_t zfs_free_range_recv_miss;
52
53static void dbuf_destroy(dmu_buf_impl_t *db);
54static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
55static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
56
57/*
58 * Global data structures and functions for the dbuf cache.
59 */
60static kmem_cache_t *dbuf_cache;
61
62/* ARGSUSED */
63static int
64dbuf_cons(void *vdb, void *unused, int kmflag)
65{
66	dmu_buf_impl_t *db = vdb;
67	bzero(db, sizeof (dmu_buf_impl_t));
68
69	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
70	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
71	refcount_create(&db->db_holds);
72
73	return (0);
74}
75
76/* ARGSUSED */
77static void
78dbuf_dest(void *vdb, void *unused)
79{
80	dmu_buf_impl_t *db = vdb;
81	mutex_destroy(&db->db_mtx);
82	cv_destroy(&db->db_changed);
83	refcount_destroy(&db->db_holds);
84}
85
86/*
87 * dbuf hash table routines
88 */
89static dbuf_hash_table_t dbuf_hash_table;
90
91static uint64_t dbuf_hash_count;
92
93static uint64_t
94dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
95{
96	uintptr_t osv = (uintptr_t)os;
97	uint64_t crc = -1ULL;
98
99	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
100	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
101	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
102	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
103	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
104	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
105	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
106
107	crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
108
109	return (crc);
110}
111
112#define	DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
113
114#define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
115	((dbuf)->db.db_object == (obj) &&		\
116	(dbuf)->db_objset == (os) &&			\
117	(dbuf)->db_level == (level) &&			\
118	(dbuf)->db_blkid == (blkid))
119
120dmu_buf_impl_t *
121dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
122{
123	dbuf_hash_table_t *h = &dbuf_hash_table;
124	objset_t *os = dn->dn_objset;
125	uint64_t obj = dn->dn_object;
126	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
127	uint64_t idx = hv & h->hash_table_mask;
128	dmu_buf_impl_t *db;
129
130	mutex_enter(DBUF_HASH_MUTEX(h, idx));
131	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
132		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
133			mutex_enter(&db->db_mtx);
134			if (db->db_state != DB_EVICTING) {
135				mutex_exit(DBUF_HASH_MUTEX(h, idx));
136				return (db);
137			}
138			mutex_exit(&db->db_mtx);
139		}
140	}
141	mutex_exit(DBUF_HASH_MUTEX(h, idx));
142	return (NULL);
143}
144
145/*
146 * Insert an entry into the hash table.  If there is already an element
147 * equal to elem in the hash table, then the already existing element
148 * will be returned and the new element will not be inserted.
149 * Otherwise returns NULL.
150 */
151static dmu_buf_impl_t *
152dbuf_hash_insert(dmu_buf_impl_t *db)
153{
154	dbuf_hash_table_t *h = &dbuf_hash_table;
155	objset_t *os = db->db_objset;
156	uint64_t obj = db->db.db_object;
157	int level = db->db_level;
158	uint64_t blkid = db->db_blkid;
159	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
160	uint64_t idx = hv & h->hash_table_mask;
161	dmu_buf_impl_t *dbf;
162
163	mutex_enter(DBUF_HASH_MUTEX(h, idx));
164	for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
165		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
166			mutex_enter(&dbf->db_mtx);
167			if (dbf->db_state != DB_EVICTING) {
168				mutex_exit(DBUF_HASH_MUTEX(h, idx));
169				return (dbf);
170			}
171			mutex_exit(&dbf->db_mtx);
172		}
173	}
174
175	mutex_enter(&db->db_mtx);
176	db->db_hash_next = h->hash_table[idx];
177	h->hash_table[idx] = db;
178	mutex_exit(DBUF_HASH_MUTEX(h, idx));
179	atomic_inc_64(&dbuf_hash_count);
180
181	return (NULL);
182}
183
184/*
185 * Remove an entry from the hash table.  It must be in the EVICTING state.
186 */
187static void
188dbuf_hash_remove(dmu_buf_impl_t *db)
189{
190	dbuf_hash_table_t *h = &dbuf_hash_table;
191	uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
192	    db->db_level, db->db_blkid);
193	uint64_t idx = hv & h->hash_table_mask;
194	dmu_buf_impl_t *dbf, **dbp;
195
196	/*
197	 * We musn't hold db_mtx to maintain lock ordering:
198	 * DBUF_HASH_MUTEX > db_mtx.
199	 */
200	ASSERT(refcount_is_zero(&db->db_holds));
201	ASSERT(db->db_state == DB_EVICTING);
202	ASSERT(!MUTEX_HELD(&db->db_mtx));
203
204	mutex_enter(DBUF_HASH_MUTEX(h, idx));
205	dbp = &h->hash_table[idx];
206	while ((dbf = *dbp) != db) {
207		dbp = &dbf->db_hash_next;
208		ASSERT(dbf != NULL);
209	}
210	*dbp = db->db_hash_next;
211	db->db_hash_next = NULL;
212	mutex_exit(DBUF_HASH_MUTEX(h, idx));
213	atomic_dec_64(&dbuf_hash_count);
214}
215
216static arc_evict_func_t dbuf_do_evict;
217
218static void
219dbuf_evict_user(dmu_buf_impl_t *db)
220{
221	ASSERT(MUTEX_HELD(&db->db_mtx));
222
223	if (db->db_level != 0 || db->db_evict_func == NULL)
224		return;
225
226	db->db_evict_func(&db->db, db->db_user_ptr);
227	db->db_user_ptr = NULL;
228	db->db_evict_func = NULL;
229}
230
231boolean_t
232dbuf_is_metadata(dmu_buf_impl_t *db)
233{
234	if (db->db_level > 0) {
235		return (B_TRUE);
236	} else {
237		boolean_t is_metadata;
238
239		DB_DNODE_ENTER(db);
240		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
241		DB_DNODE_EXIT(db);
242
243		return (is_metadata);
244	}
245}
246
247void
248dbuf_evict(dmu_buf_impl_t *db)
249{
250	ASSERT(MUTEX_HELD(&db->db_mtx));
251	ASSERT(db->db_buf == NULL);
252	ASSERT(db->db_data_pending == NULL);
253
254	dbuf_clear(db);
255	dbuf_destroy(db);
256}
257
258void
259dbuf_init(void)
260{
261	uint64_t hsize = 1ULL << 16;
262	dbuf_hash_table_t *h = &dbuf_hash_table;
263	int i;
264
265	/*
266	 * The hash table is big enough to fill all of physical memory
267	 * with an average 4K block size.  The table will take up
268	 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
269	 */
270	while (hsize * 4096 < (uint64_t)physmem * PAGESIZE)
271		hsize <<= 1;
272
273retry:
274	h->hash_table_mask = hsize - 1;
275	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
276	if (h->hash_table == NULL) {
277		/* XXX - we should really return an error instead of assert */
278		ASSERT(hsize > (1ULL << 10));
279		hsize >>= 1;
280		goto retry;
281	}
282
283	dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
284	    sizeof (dmu_buf_impl_t),
285	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
286
287	for (i = 0; i < DBUF_MUTEXES; i++)
288		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
289}
290
291void
292dbuf_fini(void)
293{
294	dbuf_hash_table_t *h = &dbuf_hash_table;
295	int i;
296
297	for (i = 0; i < DBUF_MUTEXES; i++)
298		mutex_destroy(&h->hash_mutexes[i]);
299	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
300	kmem_cache_destroy(dbuf_cache);
301}
302
303/*
304 * Other stuff.
305 */
306
307#ifdef ZFS_DEBUG
308static void
309dbuf_verify(dmu_buf_impl_t *db)
310{
311	dnode_t *dn;
312	dbuf_dirty_record_t *dr;
313
314	ASSERT(MUTEX_HELD(&db->db_mtx));
315
316	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
317		return;
318
319	ASSERT(db->db_objset != NULL);
320	DB_DNODE_ENTER(db);
321	dn = DB_DNODE(db);
322	if (dn == NULL) {
323		ASSERT(db->db_parent == NULL);
324		ASSERT(db->db_blkptr == NULL);
325	} else {
326		ASSERT3U(db->db.db_object, ==, dn->dn_object);
327		ASSERT3P(db->db_objset, ==, dn->dn_objset);
328		ASSERT3U(db->db_level, <, dn->dn_nlevels);
329		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
330		    db->db_blkid == DMU_SPILL_BLKID ||
331		    !avl_is_empty(&dn->dn_dbufs));
332	}
333	if (db->db_blkid == DMU_BONUS_BLKID) {
334		ASSERT(dn != NULL);
335		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
336		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
337	} else if (db->db_blkid == DMU_SPILL_BLKID) {
338		ASSERT(dn != NULL);
339		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
340		ASSERT0(db->db.db_offset);
341	} else {
342		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
343	}
344
345	for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
346		ASSERT(dr->dr_dbuf == db);
347
348	for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
349		ASSERT(dr->dr_dbuf == db);
350
351	/*
352	 * We can't assert that db_size matches dn_datablksz because it
353	 * can be momentarily different when another thread is doing
354	 * dnode_set_blksz().
355	 */
356	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
357		dr = db->db_data_pending;
358		/*
359		 * It should only be modified in syncing context, so
360		 * make sure we only have one copy of the data.
361		 */
362		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
363	}
364
365	/* verify db->db_blkptr */
366	if (db->db_blkptr) {
367		if (db->db_parent == dn->dn_dbuf) {
368			/* db is pointed to by the dnode */
369			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
370			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
371				ASSERT(db->db_parent == NULL);
372			else
373				ASSERT(db->db_parent != NULL);
374			if (db->db_blkid != DMU_SPILL_BLKID)
375				ASSERT3P(db->db_blkptr, ==,
376				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
377		} else {
378			/* db is pointed to by an indirect block */
379			int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
380			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
381			ASSERT3U(db->db_parent->db.db_object, ==,
382			    db->db.db_object);
383			/*
384			 * dnode_grow_indblksz() can make this fail if we don't
385			 * have the struct_rwlock.  XXX indblksz no longer
386			 * grows.  safe to do this now?
387			 */
388			if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
389				ASSERT3P(db->db_blkptr, ==,
390				    ((blkptr_t *)db->db_parent->db.db_data +
391				    db->db_blkid % epb));
392			}
393		}
394	}
395	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
396	    (db->db_buf == NULL || db->db_buf->b_data) &&
397	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
398	    db->db_state != DB_FILL && !dn->dn_free_txg) {
399		/*
400		 * If the blkptr isn't set but they have nonzero data,
401		 * it had better be dirty, otherwise we'll lose that
402		 * data when we evict this buffer.
403		 */
404		if (db->db_dirtycnt == 0) {
405			uint64_t *buf = db->db.db_data;
406			int i;
407
408			for (i = 0; i < db->db.db_size >> 3; i++) {
409				ASSERT(buf[i] == 0);
410			}
411		}
412	}
413	DB_DNODE_EXIT(db);
414}
415#endif
416
417static void
418dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
419{
420	ASSERT(MUTEX_HELD(&db->db_mtx));
421	db->db_buf = buf;
422	if (buf != NULL) {
423		ASSERT(buf->b_data != NULL);
424		db->db.db_data = buf->b_data;
425		if (!arc_released(buf))
426			arc_set_callback(buf, dbuf_do_evict, db);
427	} else {
428		dbuf_evict_user(db);
429		db->db.db_data = NULL;
430		if (db->db_state != DB_NOFILL)
431			db->db_state = DB_UNCACHED;
432	}
433}
434
435/*
436 * Loan out an arc_buf for read.  Return the loaned arc_buf.
437 */
438arc_buf_t *
439dbuf_loan_arcbuf(dmu_buf_impl_t *db)
440{
441	arc_buf_t *abuf;
442
443	mutex_enter(&db->db_mtx);
444	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
445		int blksz = db->db.db_size;
446		spa_t *spa = db->db_objset->os_spa;
447
448		mutex_exit(&db->db_mtx);
449		abuf = arc_loan_buf(spa, blksz);
450		bcopy(db->db.db_data, abuf->b_data, blksz);
451	} else {
452		abuf = db->db_buf;
453		arc_loan_inuse_buf(abuf, db);
454		dbuf_set_data(db, NULL);
455		mutex_exit(&db->db_mtx);
456	}
457	return (abuf);
458}
459
460uint64_t
461dbuf_whichblock(dnode_t *dn, uint64_t offset)
462{
463	if (dn->dn_datablkshift) {
464		return (offset >> dn->dn_datablkshift);
465	} else {
466		ASSERT3U(offset, <, dn->dn_datablksz);
467		return (0);
468	}
469}
470
471static void
472dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
473{
474	dmu_buf_impl_t *db = vdb;
475
476	mutex_enter(&db->db_mtx);
477	ASSERT3U(db->db_state, ==, DB_READ);
478	/*
479	 * All reads are synchronous, so we must have a hold on the dbuf
480	 */
481	ASSERT(refcount_count(&db->db_holds) > 0);
482	ASSERT(db->db_buf == NULL);
483	ASSERT(db->db.db_data == NULL);
484	if (db->db_level == 0 && db->db_freed_in_flight) {
485		/* we were freed in flight; disregard any error */
486		arc_release(buf, db);
487		bzero(buf->b_data, db->db.db_size);
488		arc_buf_freeze(buf);
489		db->db_freed_in_flight = FALSE;
490		dbuf_set_data(db, buf);
491		db->db_state = DB_CACHED;
492	} else if (zio == NULL || zio->io_error == 0) {
493		dbuf_set_data(db, buf);
494		db->db_state = DB_CACHED;
495	} else {
496		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
497		ASSERT3P(db->db_buf, ==, NULL);
498		VERIFY(arc_buf_remove_ref(buf, db));
499		db->db_state = DB_UNCACHED;
500	}
501	cv_broadcast(&db->db_changed);
502	dbuf_rele_and_unlock(db, NULL);
503}
504
505static void
506dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
507{
508	dnode_t *dn;
509	zbookmark_phys_t zb;
510	arc_flags_t aflags = ARC_FLAG_NOWAIT;
511
512	DB_DNODE_ENTER(db);
513	dn = DB_DNODE(db);
514	ASSERT(!refcount_is_zero(&db->db_holds));
515	/* We need the struct_rwlock to prevent db_blkptr from changing. */
516	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
517	ASSERT(MUTEX_HELD(&db->db_mtx));
518	ASSERT(db->db_state == DB_UNCACHED);
519	ASSERT(db->db_buf == NULL);
520
521	if (db->db_blkid == DMU_BONUS_BLKID) {
522		int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
523
524		ASSERT3U(bonuslen, <=, db->db.db_size);
525		db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
526		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
527		if (bonuslen < DN_MAX_BONUSLEN)
528			bzero(db->db.db_data, DN_MAX_BONUSLEN);
529		if (bonuslen)
530			bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
531		DB_DNODE_EXIT(db);
532		db->db_state = DB_CACHED;
533		mutex_exit(&db->db_mtx);
534		return;
535	}
536
537	/*
538	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
539	 * processes the delete record and clears the bp while we are waiting
540	 * for the dn_mtx (resulting in a "no" from block_freed).
541	 */
542	if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
543	    (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
544	    BP_IS_HOLE(db->db_blkptr)))) {
545		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
546
547		DB_DNODE_EXIT(db);
548		dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
549		    db->db.db_size, db, type));
550		bzero(db->db.db_data, db->db.db_size);
551		db->db_state = DB_CACHED;
552		*flags |= DB_RF_CACHED;
553		mutex_exit(&db->db_mtx);
554		return;
555	}
556
557	DB_DNODE_EXIT(db);
558
559	db->db_state = DB_READ;
560	mutex_exit(&db->db_mtx);
561
562	if (DBUF_IS_L2CACHEABLE(db))
563		aflags |= ARC_FLAG_L2CACHE;
564	if (DBUF_IS_L2COMPRESSIBLE(db))
565		aflags |= ARC_FLAG_L2COMPRESS;
566
567	SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
568	    db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
569	    db->db.db_object, db->db_level, db->db_blkid);
570
571	dbuf_add_ref(db, NULL);
572
573	(void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
574	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
575	    (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
576	    &aflags, &zb);
577	if (aflags & ARC_FLAG_CACHED)
578		*flags |= DB_RF_CACHED;
579}
580
581int
582dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
583{
584	int err = 0;
585	boolean_t havepzio = (zio != NULL);
586	boolean_t prefetch;
587	dnode_t *dn;
588
589	/*
590	 * We don't have to hold the mutex to check db_state because it
591	 * can't be freed while we have a hold on the buffer.
592	 */
593	ASSERT(!refcount_is_zero(&db->db_holds));
594
595	if (db->db_state == DB_NOFILL)
596		return (SET_ERROR(EIO));
597
598	DB_DNODE_ENTER(db);
599	dn = DB_DNODE(db);
600	if ((flags & DB_RF_HAVESTRUCT) == 0)
601		rw_enter(&dn->dn_struct_rwlock, RW_READER);
602
603	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
604	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
605	    DBUF_IS_CACHEABLE(db);
606
607	mutex_enter(&db->db_mtx);
608	if (db->db_state == DB_CACHED) {
609		mutex_exit(&db->db_mtx);
610		if (prefetch)
611			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
612			    db->db.db_size, TRUE);
613		if ((flags & DB_RF_HAVESTRUCT) == 0)
614			rw_exit(&dn->dn_struct_rwlock);
615		DB_DNODE_EXIT(db);
616	} else if (db->db_state == DB_UNCACHED) {
617		spa_t *spa = dn->dn_objset->os_spa;
618
619		if (zio == NULL)
620			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
621		dbuf_read_impl(db, zio, &flags);
622
623		/* dbuf_read_impl has dropped db_mtx for us */
624
625		if (prefetch)
626			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
627			    db->db.db_size, flags & DB_RF_CACHED);
628
629		if ((flags & DB_RF_HAVESTRUCT) == 0)
630			rw_exit(&dn->dn_struct_rwlock);
631		DB_DNODE_EXIT(db);
632
633		if (!havepzio)
634			err = zio_wait(zio);
635	} else {
636		/*
637		 * Another reader came in while the dbuf was in flight
638		 * between UNCACHED and CACHED.  Either a writer will finish
639		 * writing the buffer (sending the dbuf to CACHED) or the
640		 * first reader's request will reach the read_done callback
641		 * and send the dbuf to CACHED.  Otherwise, a failure
642		 * occurred and the dbuf went to UNCACHED.
643		 */
644		mutex_exit(&db->db_mtx);
645		if (prefetch)
646			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
647			    db->db.db_size, TRUE);
648		if ((flags & DB_RF_HAVESTRUCT) == 0)
649			rw_exit(&dn->dn_struct_rwlock);
650		DB_DNODE_EXIT(db);
651
652		/* Skip the wait per the caller's request. */
653		mutex_enter(&db->db_mtx);
654		if ((flags & DB_RF_NEVERWAIT) == 0) {
655			while (db->db_state == DB_READ ||
656			    db->db_state == DB_FILL) {
657				ASSERT(db->db_state == DB_READ ||
658				    (flags & DB_RF_HAVESTRUCT) == 0);
659				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
660				    db, zio_t *, zio);
661				cv_wait(&db->db_changed, &db->db_mtx);
662			}
663			if (db->db_state == DB_UNCACHED)
664				err = SET_ERROR(EIO);
665		}
666		mutex_exit(&db->db_mtx);
667	}
668
669	ASSERT(err || havepzio || db->db_state == DB_CACHED);
670	return (err);
671}
672
673static void
674dbuf_noread(dmu_buf_impl_t *db)
675{
676	ASSERT(!refcount_is_zero(&db->db_holds));
677	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
678	mutex_enter(&db->db_mtx);
679	while (db->db_state == DB_READ || db->db_state == DB_FILL)
680		cv_wait(&db->db_changed, &db->db_mtx);
681	if (db->db_state == DB_UNCACHED) {
682		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
683		spa_t *spa = db->db_objset->os_spa;
684
685		ASSERT(db->db_buf == NULL);
686		ASSERT(db->db.db_data == NULL);
687		dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
688		db->db_state = DB_FILL;
689	} else if (db->db_state == DB_NOFILL) {
690		dbuf_set_data(db, NULL);
691	} else {
692		ASSERT3U(db->db_state, ==, DB_CACHED);
693	}
694	mutex_exit(&db->db_mtx);
695}
696
697/*
698 * This is our just-in-time copy function.  It makes a copy of
699 * buffers, that have been modified in a previous transaction
700 * group, before we modify them in the current active group.
701 *
702 * This function is used in two places: when we are dirtying a
703 * buffer for the first time in a txg, and when we are freeing
704 * a range in a dnode that includes this buffer.
705 *
706 * Note that when we are called from dbuf_free_range() we do
707 * not put a hold on the buffer, we just traverse the active
708 * dbuf list for the dnode.
709 */
710static void
711dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
712{
713	dbuf_dirty_record_t *dr = db->db_last_dirty;
714
715	ASSERT(MUTEX_HELD(&db->db_mtx));
716	ASSERT(db->db.db_data != NULL);
717	ASSERT(db->db_level == 0);
718	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
719
720	if (dr == NULL ||
721	    (dr->dt.dl.dr_data !=
722	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
723		return;
724
725	/*
726	 * If the last dirty record for this dbuf has not yet synced
727	 * and its referencing the dbuf data, either:
728	 *	reset the reference to point to a new copy,
729	 * or (if there a no active holders)
730	 *	just null out the current db_data pointer.
731	 */
732	ASSERT(dr->dr_txg >= txg - 2);
733	if (db->db_blkid == DMU_BONUS_BLKID) {
734		/* Note that the data bufs here are zio_bufs */
735		dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
736		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
737		bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
738	} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
739		int size = db->db.db_size;
740		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
741		spa_t *spa = db->db_objset->os_spa;
742
743		dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
744		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
745	} else {
746		dbuf_set_data(db, NULL);
747	}
748}
749
750void
751dbuf_unoverride(dbuf_dirty_record_t *dr)
752{
753	dmu_buf_impl_t *db = dr->dr_dbuf;
754	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
755	uint64_t txg = dr->dr_txg;
756
757	ASSERT(MUTEX_HELD(&db->db_mtx));
758	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
759	ASSERT(db->db_level == 0);
760
761	if (db->db_blkid == DMU_BONUS_BLKID ||
762	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
763		return;
764
765	ASSERT(db->db_data_pending != dr);
766
767	/* free this block */
768	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
769		zio_free(db->db_objset->os_spa, txg, bp);
770
771	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
772	dr->dt.dl.dr_nopwrite = B_FALSE;
773
774	/*
775	 * Release the already-written buffer, so we leave it in
776	 * a consistent dirty state.  Note that all callers are
777	 * modifying the buffer, so they will immediately do
778	 * another (redundant) arc_release().  Therefore, leave
779	 * the buf thawed to save the effort of freezing &
780	 * immediately re-thawing it.
781	 */
782	arc_release(dr->dt.dl.dr_data, db);
783}
784
785/*
786 * Evict (if its unreferenced) or clear (if its referenced) any level-0
787 * data blocks in the free range, so that any future readers will find
788 * empty blocks.
789 *
790 * This is a no-op if the dataset is in the middle of an incremental
791 * receive; see comment below for details.
792 */
793void
794dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
795    dmu_tx_t *tx)
796{
797	dmu_buf_impl_t *db, *db_next, db_search;
798	uint64_t txg = tx->tx_txg;
799	avl_index_t where;
800
801	if (end_blkid > dn->dn_maxblkid && (end_blkid != DMU_SPILL_BLKID))
802		end_blkid = dn->dn_maxblkid;
803	dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
804
805	db_search.db_level = 0;
806	db_search.db_blkid = start_blkid;
807	db_search.db_state = DB_SEARCH;
808
809	mutex_enter(&dn->dn_dbufs_mtx);
810	if (start_blkid >= dn->dn_unlisted_l0_blkid) {
811		/* There can't be any dbufs in this range; no need to search. */
812#ifdef DEBUG
813		db = avl_find(&dn->dn_dbufs, &db_search, &where);
814		ASSERT3P(db, ==, NULL);
815		db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
816		ASSERT(db == NULL || db->db_level > 0);
817#endif
818		mutex_exit(&dn->dn_dbufs_mtx);
819		return;
820	} else if (dmu_objset_is_receiving(dn->dn_objset)) {
821		/*
822		 * If we are receiving, we expect there to be no dbufs in
823		 * the range to be freed, because receive modifies each
824		 * block at most once, and in offset order.  If this is
825		 * not the case, it can lead to performance problems,
826		 * so note that we unexpectedly took the slow path.
827		 */
828		atomic_inc_64(&zfs_free_range_recv_miss);
829	}
830
831	db = avl_find(&dn->dn_dbufs, &db_search, &where);
832	ASSERT3P(db, ==, NULL);
833	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
834
835	for (; db != NULL; db = db_next) {
836		db_next = AVL_NEXT(&dn->dn_dbufs, db);
837		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
838
839		if (db->db_level != 0 || db->db_blkid > end_blkid) {
840			break;
841		}
842		ASSERT3U(db->db_blkid, >=, start_blkid);
843
844		/* found a level 0 buffer in the range */
845		mutex_enter(&db->db_mtx);
846		if (dbuf_undirty(db, tx)) {
847			/* mutex has been dropped and dbuf destroyed */
848			continue;
849		}
850
851		if (db->db_state == DB_UNCACHED ||
852		    db->db_state == DB_NOFILL ||
853		    db->db_state == DB_EVICTING) {
854			ASSERT(db->db.db_data == NULL);
855			mutex_exit(&db->db_mtx);
856			continue;
857		}
858		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
859			/* will be handled in dbuf_read_done or dbuf_rele */
860			db->db_freed_in_flight = TRUE;
861			mutex_exit(&db->db_mtx);
862			continue;
863		}
864		if (refcount_count(&db->db_holds) == 0) {
865			ASSERT(db->db_buf);
866			dbuf_clear(db);
867			continue;
868		}
869		/* The dbuf is referenced */
870
871		if (db->db_last_dirty != NULL) {
872			dbuf_dirty_record_t *dr = db->db_last_dirty;
873
874			if (dr->dr_txg == txg) {
875				/*
876				 * This buffer is "in-use", re-adjust the file
877				 * size to reflect that this buffer may
878				 * contain new data when we sync.
879				 */
880				if (db->db_blkid != DMU_SPILL_BLKID &&
881				    db->db_blkid > dn->dn_maxblkid)
882					dn->dn_maxblkid = db->db_blkid;
883				dbuf_unoverride(dr);
884			} else {
885				/*
886				 * This dbuf is not dirty in the open context.
887				 * Either uncache it (if its not referenced in
888				 * the open context) or reset its contents to
889				 * empty.
890				 */
891				dbuf_fix_old_data(db, txg);
892			}
893		}
894		/* clear the contents if its cached */
895		if (db->db_state == DB_CACHED) {
896			ASSERT(db->db.db_data != NULL);
897			arc_release(db->db_buf, db);
898			bzero(db->db.db_data, db->db.db_size);
899			arc_buf_freeze(db->db_buf);
900		}
901
902		mutex_exit(&db->db_mtx);
903	}
904	mutex_exit(&dn->dn_dbufs_mtx);
905}
906
907static int
908dbuf_block_freeable(dmu_buf_impl_t *db)
909{
910	dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
911	uint64_t birth_txg = 0;
912
913	/*
914	 * We don't need any locking to protect db_blkptr:
915	 * If it's syncing, then db_last_dirty will be set
916	 * so we'll ignore db_blkptr.
917	 *
918	 * This logic ensures that only block births for
919	 * filled blocks are considered.
920	 */
921	ASSERT(MUTEX_HELD(&db->db_mtx));
922	if (db->db_last_dirty && (db->db_blkptr == NULL ||
923	    !BP_IS_HOLE(db->db_blkptr))) {
924		birth_txg = db->db_last_dirty->dr_txg;
925	} else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
926		birth_txg = db->db_blkptr->blk_birth;
927	}
928
929	/*
930	 * If this block don't exist or is in a snapshot, it can't be freed.
931	 * Don't pass the bp to dsl_dataset_block_freeable() since we
932	 * are holding the db_mtx lock and might deadlock if we are
933	 * prefetching a dedup-ed block.
934	 */
935	if (birth_txg != 0)
936		return (ds == NULL ||
937		    dsl_dataset_block_freeable(ds, NULL, birth_txg));
938	else
939		return (B_FALSE);
940}
941
942void
943dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
944{
945	arc_buf_t *buf, *obuf;
946	int osize = db->db.db_size;
947	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
948	dnode_t *dn;
949
950	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
951
952	DB_DNODE_ENTER(db);
953	dn = DB_DNODE(db);
954
955	/* XXX does *this* func really need the lock? */
956	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
957
958	/*
959	 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
960	 * is OK, because there can be no other references to the db
961	 * when we are changing its size, so no concurrent DB_FILL can
962	 * be happening.
963	 */
964	/*
965	 * XXX we should be doing a dbuf_read, checking the return
966	 * value and returning that up to our callers
967	 */
968	dmu_buf_will_dirty(&db->db, tx);
969
970	/* create the data buffer for the new block */
971	buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
972
973	/* copy old block data to the new block */
974	obuf = db->db_buf;
975	bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
976	/* zero the remainder */
977	if (size > osize)
978		bzero((uint8_t *)buf->b_data + osize, size - osize);
979
980	mutex_enter(&db->db_mtx);
981	dbuf_set_data(db, buf);
982	VERIFY(arc_buf_remove_ref(obuf, db));
983	db->db.db_size = size;
984
985	if (db->db_level == 0) {
986		ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
987		db->db_last_dirty->dt.dl.dr_data = buf;
988	}
989	mutex_exit(&db->db_mtx);
990
991	dnode_willuse_space(dn, size-osize, tx);
992	DB_DNODE_EXIT(db);
993}
994
995void
996dbuf_release_bp(dmu_buf_impl_t *db)
997{
998	objset_t *os = db->db_objset;
999
1000	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1001	ASSERT(arc_released(os->os_phys_buf) ||
1002	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1003	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1004
1005	(void) arc_release(db->db_buf, db);
1006}
1007
1008dbuf_dirty_record_t *
1009dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1010{
1011	dnode_t *dn;
1012	objset_t *os;
1013	dbuf_dirty_record_t **drp, *dr;
1014	int drop_struct_lock = FALSE;
1015	boolean_t do_free_accounting = B_FALSE;
1016	int txgoff = tx->tx_txg & TXG_MASK;
1017
1018	ASSERT(tx->tx_txg != 0);
1019	ASSERT(!refcount_is_zero(&db->db_holds));
1020	DMU_TX_DIRTY_BUF(tx, db);
1021
1022	DB_DNODE_ENTER(db);
1023	dn = DB_DNODE(db);
1024	/*
1025	 * Shouldn't dirty a regular buffer in syncing context.  Private
1026	 * objects may be dirtied in syncing context, but only if they
1027	 * were already pre-dirtied in open context.
1028	 */
1029	ASSERT(!dmu_tx_is_syncing(tx) ||
1030	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1031	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1032	    dn->dn_objset->os_dsl_dataset == NULL);
1033	/*
1034	 * We make this assert for private objects as well, but after we
1035	 * check if we're already dirty.  They are allowed to re-dirty
1036	 * in syncing context.
1037	 */
1038	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1039	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1040	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1041
1042	mutex_enter(&db->db_mtx);
1043	/*
1044	 * XXX make this true for indirects too?  The problem is that
1045	 * transactions created with dmu_tx_create_assigned() from
1046	 * syncing context don't bother holding ahead.
1047	 */
1048	ASSERT(db->db_level != 0 ||
1049	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1050	    db->db_state == DB_NOFILL);
1051
1052	mutex_enter(&dn->dn_mtx);
1053	/*
1054	 * Don't set dirtyctx to SYNC if we're just modifying this as we
1055	 * initialize the objset.
1056	 */
1057	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1058	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1059		dn->dn_dirtyctx =
1060		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1061		ASSERT(dn->dn_dirtyctx_firstset == NULL);
1062		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1063	}
1064	mutex_exit(&dn->dn_mtx);
1065
1066	if (db->db_blkid == DMU_SPILL_BLKID)
1067		dn->dn_have_spill = B_TRUE;
1068
1069	/*
1070	 * If this buffer is already dirty, we're done.
1071	 */
1072	drp = &db->db_last_dirty;
1073	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1074	    db->db.db_object == DMU_META_DNODE_OBJECT);
1075	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1076		drp = &dr->dr_next;
1077	if (dr && dr->dr_txg == tx->tx_txg) {
1078		DB_DNODE_EXIT(db);
1079
1080		if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1081			/*
1082			 * If this buffer has already been written out,
1083			 * we now need to reset its state.
1084			 */
1085			dbuf_unoverride(dr);
1086			if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1087			    db->db_state != DB_NOFILL)
1088				arc_buf_thaw(db->db_buf);
1089		}
1090		mutex_exit(&db->db_mtx);
1091		return (dr);
1092	}
1093
1094	/*
1095	 * Only valid if not already dirty.
1096	 */
1097	ASSERT(dn->dn_object == 0 ||
1098	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1099	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1100
1101	ASSERT3U(dn->dn_nlevels, >, db->db_level);
1102	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1103	    dn->dn_phys->dn_nlevels > db->db_level ||
1104	    dn->dn_next_nlevels[txgoff] > db->db_level ||
1105	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1106	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1107
1108	/*
1109	 * We should only be dirtying in syncing context if it's the
1110	 * mos or we're initializing the os or it's a special object.
1111	 * However, we are allowed to dirty in syncing context provided
1112	 * we already dirtied it in open context.  Hence we must make
1113	 * this assertion only if we're not already dirty.
1114	 */
1115	os = dn->dn_objset;
1116	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1117	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1118	ASSERT(db->db.db_size != 0);
1119
1120	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1121
1122	if (db->db_blkid != DMU_BONUS_BLKID) {
1123		/*
1124		 * Update the accounting.
1125		 * Note: we delay "free accounting" until after we drop
1126		 * the db_mtx.  This keeps us from grabbing other locks
1127		 * (and possibly deadlocking) in bp_get_dsize() while
1128		 * also holding the db_mtx.
1129		 */
1130		dnode_willuse_space(dn, db->db.db_size, tx);
1131		do_free_accounting = dbuf_block_freeable(db);
1132	}
1133
1134	/*
1135	 * If this buffer is dirty in an old transaction group we need
1136	 * to make a copy of it so that the changes we make in this
1137	 * transaction group won't leak out when we sync the older txg.
1138	 */
1139	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1140	if (db->db_level == 0) {
1141		void *data_old = db->db_buf;
1142
1143		if (db->db_state != DB_NOFILL) {
1144			if (db->db_blkid == DMU_BONUS_BLKID) {
1145				dbuf_fix_old_data(db, tx->tx_txg);
1146				data_old = db->db.db_data;
1147			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1148				/*
1149				 * Release the data buffer from the cache so
1150				 * that we can modify it without impacting
1151				 * possible other users of this cached data
1152				 * block.  Note that indirect blocks and
1153				 * private objects are not released until the
1154				 * syncing state (since they are only modified
1155				 * then).
1156				 */
1157				arc_release(db->db_buf, db);
1158				dbuf_fix_old_data(db, tx->tx_txg);
1159				data_old = db->db_buf;
1160			}
1161			ASSERT(data_old != NULL);
1162		}
1163		dr->dt.dl.dr_data = data_old;
1164	} else {
1165		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1166		list_create(&dr->dt.di.dr_children,
1167		    sizeof (dbuf_dirty_record_t),
1168		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
1169	}
1170	if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1171		dr->dr_accounted = db->db.db_size;
1172	dr->dr_dbuf = db;
1173	dr->dr_txg = tx->tx_txg;
1174	dr->dr_next = *drp;
1175	*drp = dr;
1176
1177	/*
1178	 * We could have been freed_in_flight between the dbuf_noread
1179	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
1180	 * happened after the free.
1181	 */
1182	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1183	    db->db_blkid != DMU_SPILL_BLKID) {
1184		mutex_enter(&dn->dn_mtx);
1185		if (dn->dn_free_ranges[txgoff] != NULL) {
1186			range_tree_clear(dn->dn_free_ranges[txgoff],
1187			    db->db_blkid, 1);
1188		}
1189		mutex_exit(&dn->dn_mtx);
1190		db->db_freed_in_flight = FALSE;
1191	}
1192
1193	/*
1194	 * This buffer is now part of this txg
1195	 */
1196	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1197	db->db_dirtycnt += 1;
1198	ASSERT3U(db->db_dirtycnt, <=, 3);
1199
1200	mutex_exit(&db->db_mtx);
1201
1202	if (db->db_blkid == DMU_BONUS_BLKID ||
1203	    db->db_blkid == DMU_SPILL_BLKID) {
1204		mutex_enter(&dn->dn_mtx);
1205		ASSERT(!list_link_active(&dr->dr_dirty_node));
1206		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1207		mutex_exit(&dn->dn_mtx);
1208		dnode_setdirty(dn, tx);
1209		DB_DNODE_EXIT(db);
1210		return (dr);
1211	} else if (do_free_accounting) {
1212		blkptr_t *bp = db->db_blkptr;
1213		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1214		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1215		/*
1216		 * This is only a guess -- if the dbuf is dirty
1217		 * in a previous txg, we don't know how much
1218		 * space it will use on disk yet.  We should
1219		 * really have the struct_rwlock to access
1220		 * db_blkptr, but since this is just a guess,
1221		 * it's OK if we get an odd answer.
1222		 */
1223		ddt_prefetch(os->os_spa, bp);
1224		dnode_willuse_space(dn, -willfree, tx);
1225	}
1226
1227	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1228		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1229		drop_struct_lock = TRUE;
1230	}
1231
1232	if (db->db_level == 0) {
1233		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1234		ASSERT(dn->dn_maxblkid >= db->db_blkid);
1235	}
1236
1237	if (db->db_level+1 < dn->dn_nlevels) {
1238		dmu_buf_impl_t *parent = db->db_parent;
1239		dbuf_dirty_record_t *di;
1240		int parent_held = FALSE;
1241
1242		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1243			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1244
1245			parent = dbuf_hold_level(dn, db->db_level+1,
1246			    db->db_blkid >> epbs, FTAG);
1247			ASSERT(parent != NULL);
1248			parent_held = TRUE;
1249		}
1250		if (drop_struct_lock)
1251			rw_exit(&dn->dn_struct_rwlock);
1252		ASSERT3U(db->db_level+1, ==, parent->db_level);
1253		di = dbuf_dirty(parent, tx);
1254		if (parent_held)
1255			dbuf_rele(parent, FTAG);
1256
1257		mutex_enter(&db->db_mtx);
1258		/*
1259		 * Since we've dropped the mutex, it's possible that
1260		 * dbuf_undirty() might have changed this out from under us.
1261		 */
1262		if (db->db_last_dirty == dr ||
1263		    dn->dn_object == DMU_META_DNODE_OBJECT) {
1264			mutex_enter(&di->dt.di.dr_mtx);
1265			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1266			ASSERT(!list_link_active(&dr->dr_dirty_node));
1267			list_insert_tail(&di->dt.di.dr_children, dr);
1268			mutex_exit(&di->dt.di.dr_mtx);
1269			dr->dr_parent = di;
1270		}
1271		mutex_exit(&db->db_mtx);
1272	} else {
1273		ASSERT(db->db_level+1 == dn->dn_nlevels);
1274		ASSERT(db->db_blkid < dn->dn_nblkptr);
1275		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1276		mutex_enter(&dn->dn_mtx);
1277		ASSERT(!list_link_active(&dr->dr_dirty_node));
1278		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1279		mutex_exit(&dn->dn_mtx);
1280		if (drop_struct_lock)
1281			rw_exit(&dn->dn_struct_rwlock);
1282	}
1283
1284	dnode_setdirty(dn, tx);
1285	DB_DNODE_EXIT(db);
1286	return (dr);
1287}
1288
1289/*
1290 * Undirty a buffer in the transaction group referenced by the given
1291 * transaction.  Return whether this evicted the dbuf.
1292 */
1293static boolean_t
1294dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1295{
1296	dnode_t *dn;
1297	uint64_t txg = tx->tx_txg;
1298	dbuf_dirty_record_t *dr, **drp;
1299
1300	ASSERT(txg != 0);
1301
1302	/*
1303	 * Due to our use of dn_nlevels below, this can only be called
1304	 * in open context, unless we are operating on the MOS.
1305	 * From syncing context, dn_nlevels may be different from the
1306	 * dn_nlevels used when dbuf was dirtied.
1307	 */
1308	ASSERT(db->db_objset ==
1309	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
1310	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
1311	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1312	ASSERT0(db->db_level);
1313	ASSERT(MUTEX_HELD(&db->db_mtx));
1314
1315	/*
1316	 * If this buffer is not dirty, we're done.
1317	 */
1318	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1319		if (dr->dr_txg <= txg)
1320			break;
1321	if (dr == NULL || dr->dr_txg < txg)
1322		return (B_FALSE);
1323	ASSERT(dr->dr_txg == txg);
1324	ASSERT(dr->dr_dbuf == db);
1325
1326	DB_DNODE_ENTER(db);
1327	dn = DB_DNODE(db);
1328
1329	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1330
1331	ASSERT(db->db.db_size != 0);
1332
1333	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
1334	    dr->dr_accounted, txg);
1335
1336	*drp = dr->dr_next;
1337
1338	/*
1339	 * Note that there are three places in dbuf_dirty()
1340	 * where this dirty record may be put on a list.
1341	 * Make sure to do a list_remove corresponding to
1342	 * every one of those list_insert calls.
1343	 */
1344	if (dr->dr_parent) {
1345		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1346		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1347		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1348	} else if (db->db_blkid == DMU_SPILL_BLKID ||
1349	    db->db_level + 1 == dn->dn_nlevels) {
1350		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1351		mutex_enter(&dn->dn_mtx);
1352		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1353		mutex_exit(&dn->dn_mtx);
1354	}
1355	DB_DNODE_EXIT(db);
1356
1357	if (db->db_state != DB_NOFILL) {
1358		dbuf_unoverride(dr);
1359
1360		ASSERT(db->db_buf != NULL);
1361		ASSERT(dr->dt.dl.dr_data != NULL);
1362		if (dr->dt.dl.dr_data != db->db_buf)
1363			VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1364	}
1365
1366	kmem_free(dr, sizeof (dbuf_dirty_record_t));
1367
1368	ASSERT(db->db_dirtycnt > 0);
1369	db->db_dirtycnt -= 1;
1370
1371	if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1372		arc_buf_t *buf = db->db_buf;
1373
1374		ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1375		dbuf_set_data(db, NULL);
1376		VERIFY(arc_buf_remove_ref(buf, db));
1377		dbuf_evict(db);
1378		return (B_TRUE);
1379	}
1380
1381	return (B_FALSE);
1382}
1383
1384void
1385dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1386{
1387	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1388	int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1389
1390	ASSERT(tx->tx_txg != 0);
1391	ASSERT(!refcount_is_zero(&db->db_holds));
1392
1393	DB_DNODE_ENTER(db);
1394	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1395		rf |= DB_RF_HAVESTRUCT;
1396	DB_DNODE_EXIT(db);
1397	(void) dbuf_read(db, NULL, rf);
1398	(void) dbuf_dirty(db, tx);
1399}
1400
1401void
1402dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1403{
1404	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1405
1406	db->db_state = DB_NOFILL;
1407
1408	dmu_buf_will_fill(db_fake, tx);
1409}
1410
1411void
1412dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1413{
1414	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1415
1416	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1417	ASSERT(tx->tx_txg != 0);
1418	ASSERT(db->db_level == 0);
1419	ASSERT(!refcount_is_zero(&db->db_holds));
1420
1421	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1422	    dmu_tx_private_ok(tx));
1423
1424	dbuf_noread(db);
1425	(void) dbuf_dirty(db, tx);
1426}
1427
1428#pragma weak dmu_buf_fill_done = dbuf_fill_done
1429/* ARGSUSED */
1430void
1431dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1432{
1433	mutex_enter(&db->db_mtx);
1434	DBUF_VERIFY(db);
1435
1436	if (db->db_state == DB_FILL) {
1437		if (db->db_level == 0 && db->db_freed_in_flight) {
1438			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1439			/* we were freed while filling */
1440			/* XXX dbuf_undirty? */
1441			bzero(db->db.db_data, db->db.db_size);
1442			db->db_freed_in_flight = FALSE;
1443		}
1444		db->db_state = DB_CACHED;
1445		cv_broadcast(&db->db_changed);
1446	}
1447	mutex_exit(&db->db_mtx);
1448}
1449
1450void
1451dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1452    bp_embedded_type_t etype, enum zio_compress comp,
1453    int uncompressed_size, int compressed_size, int byteorder,
1454    dmu_tx_t *tx)
1455{
1456	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1457	struct dirty_leaf *dl;
1458	dmu_object_type_t type;
1459
1460	DB_DNODE_ENTER(db);
1461	type = DB_DNODE(db)->dn_type;
1462	DB_DNODE_EXIT(db);
1463
1464	ASSERT0(db->db_level);
1465	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1466
1467	dmu_buf_will_not_fill(dbuf, tx);
1468
1469	ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1470	dl = &db->db_last_dirty->dt.dl;
1471	encode_embedded_bp_compressed(&dl->dr_overridden_by,
1472	    data, comp, uncompressed_size, compressed_size);
1473	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1474	BP_SET_TYPE(&dl->dr_overridden_by, type);
1475	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1476	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1477
1478	dl->dr_override_state = DR_OVERRIDDEN;
1479	dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1480}
1481
1482/*
1483 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1484 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1485 */
1486void
1487dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1488{
1489	ASSERT(!refcount_is_zero(&db->db_holds));
1490	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1491	ASSERT(db->db_level == 0);
1492	ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1493	ASSERT(buf != NULL);
1494	ASSERT(arc_buf_size(buf) == db->db.db_size);
1495	ASSERT(tx->tx_txg != 0);
1496
1497	arc_return_buf(buf, db);
1498	ASSERT(arc_released(buf));
1499
1500	mutex_enter(&db->db_mtx);
1501
1502	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1503		cv_wait(&db->db_changed, &db->db_mtx);
1504
1505	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1506
1507	if (db->db_state == DB_CACHED &&
1508	    refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1509		mutex_exit(&db->db_mtx);
1510		(void) dbuf_dirty(db, tx);
1511		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1512		VERIFY(arc_buf_remove_ref(buf, db));
1513		xuio_stat_wbuf_copied();
1514		return;
1515	}
1516
1517	xuio_stat_wbuf_nocopy();
1518	if (db->db_state == DB_CACHED) {
1519		dbuf_dirty_record_t *dr = db->db_last_dirty;
1520
1521		ASSERT(db->db_buf != NULL);
1522		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1523			ASSERT(dr->dt.dl.dr_data == db->db_buf);
1524			if (!arc_released(db->db_buf)) {
1525				ASSERT(dr->dt.dl.dr_override_state ==
1526				    DR_OVERRIDDEN);
1527				arc_release(db->db_buf, db);
1528			}
1529			dr->dt.dl.dr_data = buf;
1530			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1531		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1532			arc_release(db->db_buf, db);
1533			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1534		}
1535		db->db_buf = NULL;
1536	}
1537	ASSERT(db->db_buf == NULL);
1538	dbuf_set_data(db, buf);
1539	db->db_state = DB_FILL;
1540	mutex_exit(&db->db_mtx);
1541	(void) dbuf_dirty(db, tx);
1542	dmu_buf_fill_done(&db->db, tx);
1543}
1544
1545/*
1546 * "Clear" the contents of this dbuf.  This will mark the dbuf
1547 * EVICTING and clear *most* of its references.  Unfortunately,
1548 * when we are not holding the dn_dbufs_mtx, we can't clear the
1549 * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1550 * in this case.  For callers from the DMU we will usually see:
1551 *	dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
1552 * For the arc callback, we will usually see:
1553 *	dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1554 * Sometimes, though, we will get a mix of these two:
1555 *	DMU: dbuf_clear()->arc_clear_callback()
1556 *	ARC: dbuf_do_evict()->dbuf_destroy()
1557 *
1558 * This routine will dissociate the dbuf from the arc, by calling
1559 * arc_clear_callback(), but will not evict the data from the ARC.
1560 */
1561void
1562dbuf_clear(dmu_buf_impl_t *db)
1563{
1564	dnode_t *dn;
1565	dmu_buf_impl_t *parent = db->db_parent;
1566	dmu_buf_impl_t *dndb;
1567	boolean_t dbuf_gone = B_FALSE;
1568
1569	ASSERT(MUTEX_HELD(&db->db_mtx));
1570	ASSERT(refcount_is_zero(&db->db_holds));
1571
1572	dbuf_evict_user(db);
1573
1574	if (db->db_state == DB_CACHED) {
1575		ASSERT(db->db.db_data != NULL);
1576		if (db->db_blkid == DMU_BONUS_BLKID) {
1577			zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1578			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1579		}
1580		db->db.db_data = NULL;
1581		db->db_state = DB_UNCACHED;
1582	}
1583
1584	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1585	ASSERT(db->db_data_pending == NULL);
1586
1587	db->db_state = DB_EVICTING;
1588	db->db_blkptr = NULL;
1589
1590	DB_DNODE_ENTER(db);
1591	dn = DB_DNODE(db);
1592	dndb = dn->dn_dbuf;
1593	if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1594		avl_remove(&dn->dn_dbufs, db);
1595		atomic_dec_32(&dn->dn_dbufs_count);
1596		membar_producer();
1597		DB_DNODE_EXIT(db);
1598		/*
1599		 * Decrementing the dbuf count means that the hold corresponding
1600		 * to the removed dbuf is no longer discounted in dnode_move(),
1601		 * so the dnode cannot be moved until after we release the hold.
1602		 * The membar_producer() ensures visibility of the decremented
1603		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1604		 * release any lock.
1605		 */
1606		dnode_rele(dn, db);
1607		db->db_dnode_handle = NULL;
1608	} else {
1609		DB_DNODE_EXIT(db);
1610	}
1611
1612	if (db->db_buf)
1613		dbuf_gone = arc_clear_callback(db->db_buf);
1614
1615	if (!dbuf_gone)
1616		mutex_exit(&db->db_mtx);
1617
1618	/*
1619	 * If this dbuf is referenced from an indirect dbuf,
1620	 * decrement the ref count on the indirect dbuf.
1621	 */
1622	if (parent && parent != dndb)
1623		dbuf_rele(parent, db);
1624}
1625
1626static int
1627dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1628    dmu_buf_impl_t **parentp, blkptr_t **bpp)
1629{
1630	int nlevels, epbs;
1631
1632	*parentp = NULL;
1633	*bpp = NULL;
1634
1635	ASSERT(blkid != DMU_BONUS_BLKID);
1636
1637	if (blkid == DMU_SPILL_BLKID) {
1638		mutex_enter(&dn->dn_mtx);
1639		if (dn->dn_have_spill &&
1640		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1641			*bpp = &dn->dn_phys->dn_spill;
1642		else
1643			*bpp = NULL;
1644		dbuf_add_ref(dn->dn_dbuf, NULL);
1645		*parentp = dn->dn_dbuf;
1646		mutex_exit(&dn->dn_mtx);
1647		return (0);
1648	}
1649
1650	if (dn->dn_phys->dn_nlevels == 0)
1651		nlevels = 1;
1652	else
1653		nlevels = dn->dn_phys->dn_nlevels;
1654
1655	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1656
1657	ASSERT3U(level * epbs, <, 64);
1658	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1659	if (level >= nlevels ||
1660	    (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1661		/* the buffer has no parent yet */
1662		return (SET_ERROR(ENOENT));
1663	} else if (level < nlevels-1) {
1664		/* this block is referenced from an indirect block */
1665		int err = dbuf_hold_impl(dn, level+1,
1666		    blkid >> epbs, fail_sparse, NULL, parentp);
1667		if (err)
1668			return (err);
1669		err = dbuf_read(*parentp, NULL,
1670		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1671		if (err) {
1672			dbuf_rele(*parentp, NULL);
1673			*parentp = NULL;
1674			return (err);
1675		}
1676		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1677		    (blkid & ((1ULL << epbs) - 1));
1678		return (0);
1679	} else {
1680		/* the block is referenced from the dnode */
1681		ASSERT3U(level, ==, nlevels-1);
1682		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1683		    blkid < dn->dn_phys->dn_nblkptr);
1684		if (dn->dn_dbuf) {
1685			dbuf_add_ref(dn->dn_dbuf, NULL);
1686			*parentp = dn->dn_dbuf;
1687		}
1688		*bpp = &dn->dn_phys->dn_blkptr[blkid];
1689		return (0);
1690	}
1691}
1692
1693static dmu_buf_impl_t *
1694dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1695    dmu_buf_impl_t *parent, blkptr_t *blkptr)
1696{
1697	objset_t *os = dn->dn_objset;
1698	dmu_buf_impl_t *db, *odb;
1699
1700	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1701	ASSERT(dn->dn_type != DMU_OT_NONE);
1702
1703	db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1704
1705	db->db_objset = os;
1706	db->db.db_object = dn->dn_object;
1707	db->db_level = level;
1708	db->db_blkid = blkid;
1709	db->db_last_dirty = NULL;
1710	db->db_dirtycnt = 0;
1711	db->db_dnode_handle = dn->dn_handle;
1712	db->db_parent = parent;
1713	db->db_blkptr = blkptr;
1714
1715	db->db_user_ptr = NULL;
1716	db->db_evict_func = NULL;
1717	db->db_immediate_evict = 0;
1718	db->db_freed_in_flight = 0;
1719
1720	if (blkid == DMU_BONUS_BLKID) {
1721		ASSERT3P(parent, ==, dn->dn_dbuf);
1722		db->db.db_size = DN_MAX_BONUSLEN -
1723		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1724		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1725		db->db.db_offset = DMU_BONUS_BLKID;
1726		db->db_state = DB_UNCACHED;
1727		/* the bonus dbuf is not placed in the hash table */
1728		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1729		return (db);
1730	} else if (blkid == DMU_SPILL_BLKID) {
1731		db->db.db_size = (blkptr != NULL) ?
1732		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1733		db->db.db_offset = 0;
1734	} else {
1735		int blocksize =
1736		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1737		db->db.db_size = blocksize;
1738		db->db.db_offset = db->db_blkid * blocksize;
1739	}
1740
1741	/*
1742	 * Hold the dn_dbufs_mtx while we get the new dbuf
1743	 * in the hash table *and* added to the dbufs list.
1744	 * This prevents a possible deadlock with someone
1745	 * trying to look up this dbuf before its added to the
1746	 * dn_dbufs list.
1747	 */
1748	mutex_enter(&dn->dn_dbufs_mtx);
1749	db->db_state = DB_EVICTING;
1750	if ((odb = dbuf_hash_insert(db)) != NULL) {
1751		/* someone else inserted it first */
1752		kmem_cache_free(dbuf_cache, db);
1753		mutex_exit(&dn->dn_dbufs_mtx);
1754		return (odb);
1755	}
1756	avl_add(&dn->dn_dbufs, db);
1757	if (db->db_level == 0 && db->db_blkid >=
1758	    dn->dn_unlisted_l0_blkid)
1759		dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1760	db->db_state = DB_UNCACHED;
1761	mutex_exit(&dn->dn_dbufs_mtx);
1762	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1763
1764	if (parent && parent != dn->dn_dbuf)
1765		dbuf_add_ref(parent, db);
1766
1767	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1768	    refcount_count(&dn->dn_holds) > 0);
1769	(void) refcount_add(&dn->dn_holds, db);
1770	atomic_inc_32(&dn->dn_dbufs_count);
1771
1772	dprintf_dbuf(db, "db=%p\n", db);
1773
1774	return (db);
1775}
1776
1777static int
1778dbuf_do_evict(void *private)
1779{
1780	dmu_buf_impl_t *db = private;
1781
1782	if (!MUTEX_HELD(&db->db_mtx))
1783		mutex_enter(&db->db_mtx);
1784
1785	ASSERT(refcount_is_zero(&db->db_holds));
1786
1787	if (db->db_state != DB_EVICTING) {
1788		ASSERT(db->db_state == DB_CACHED);
1789		DBUF_VERIFY(db);
1790		db->db_buf = NULL;
1791		dbuf_evict(db);
1792	} else {
1793		mutex_exit(&db->db_mtx);
1794		dbuf_destroy(db);
1795	}
1796	return (0);
1797}
1798
1799static void
1800dbuf_destroy(dmu_buf_impl_t *db)
1801{
1802	ASSERT(refcount_is_zero(&db->db_holds));
1803
1804	if (db->db_blkid != DMU_BONUS_BLKID) {
1805		/*
1806		 * If this dbuf is still on the dn_dbufs list,
1807		 * remove it from that list.
1808		 */
1809		if (db->db_dnode_handle != NULL) {
1810			dnode_t *dn;
1811
1812			DB_DNODE_ENTER(db);
1813			dn = DB_DNODE(db);
1814			mutex_enter(&dn->dn_dbufs_mtx);
1815			avl_remove(&dn->dn_dbufs, db);
1816			atomic_dec_32(&dn->dn_dbufs_count);
1817			mutex_exit(&dn->dn_dbufs_mtx);
1818			DB_DNODE_EXIT(db);
1819			/*
1820			 * Decrementing the dbuf count means that the hold
1821			 * corresponding to the removed dbuf is no longer
1822			 * discounted in dnode_move(), so the dnode cannot be
1823			 * moved until after we release the hold.
1824			 */
1825			dnode_rele(dn, db);
1826			db->db_dnode_handle = NULL;
1827		}
1828		dbuf_hash_remove(db);
1829	}
1830	db->db_parent = NULL;
1831	db->db_buf = NULL;
1832
1833	ASSERT(db->db.db_data == NULL);
1834	ASSERT(db->db_hash_next == NULL);
1835	ASSERT(db->db_blkptr == NULL);
1836	ASSERT(db->db_data_pending == NULL);
1837
1838	kmem_cache_free(dbuf_cache, db);
1839	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1840}
1841
1842void
1843dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
1844{
1845	dmu_buf_impl_t *db = NULL;
1846	blkptr_t *bp = NULL;
1847
1848	ASSERT(blkid != DMU_BONUS_BLKID);
1849	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1850
1851	if (dnode_block_freed(dn, blkid))
1852		return;
1853
1854	/* dbuf_find() returns with db_mtx held */
1855	if (db = dbuf_find(dn, 0, blkid)) {
1856		/*
1857		 * This dbuf is already in the cache.  We assume that
1858		 * it is already CACHED, or else about to be either
1859		 * read or filled.
1860		 */
1861		mutex_exit(&db->db_mtx);
1862		return;
1863	}
1864
1865	if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1866		if (bp && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
1867			dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1868			arc_flags_t aflags =
1869			    ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
1870			zbookmark_phys_t zb;
1871
1872			SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1873			    dn->dn_object, 0, blkid);
1874
1875			(void) arc_read(NULL, dn->dn_objset->os_spa,
1876			    bp, NULL, NULL, prio,
1877			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1878			    &aflags, &zb);
1879		}
1880		if (db)
1881			dbuf_rele(db, NULL);
1882	}
1883}
1884
1885/*
1886 * Returns with db_holds incremented, and db_mtx not held.
1887 * Note: dn_struct_rwlock must be held.
1888 */
1889int
1890dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1891    void *tag, dmu_buf_impl_t **dbp)
1892{
1893	dmu_buf_impl_t *db, *parent = NULL;
1894
1895	ASSERT(blkid != DMU_BONUS_BLKID);
1896	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1897	ASSERT3U(dn->dn_nlevels, >, level);
1898
1899	*dbp = NULL;
1900top:
1901	/* dbuf_find() returns with db_mtx held */
1902	db = dbuf_find(dn, level, blkid);
1903
1904	if (db == NULL) {
1905		blkptr_t *bp = NULL;
1906		int err;
1907
1908		ASSERT3P(parent, ==, NULL);
1909		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1910		if (fail_sparse) {
1911			if (err == 0 && bp && BP_IS_HOLE(bp))
1912				err = SET_ERROR(ENOENT);
1913			if (err) {
1914				if (parent)
1915					dbuf_rele(parent, NULL);
1916				return (err);
1917			}
1918		}
1919		if (err && err != ENOENT)
1920			return (err);
1921		db = dbuf_create(dn, level, blkid, parent, bp);
1922	}
1923
1924	if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1925		arc_buf_add_ref(db->db_buf, db);
1926		if (db->db_buf->b_data == NULL) {
1927			dbuf_clear(db);
1928			if (parent) {
1929				dbuf_rele(parent, NULL);
1930				parent = NULL;
1931			}
1932			goto top;
1933		}
1934		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1935	}
1936
1937	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1938
1939	/*
1940	 * If this buffer is currently syncing out, and we are are
1941	 * still referencing it from db_data, we need to make a copy
1942	 * of it in case we decide we want to dirty it again in this txg.
1943	 */
1944	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1945	    dn->dn_object != DMU_META_DNODE_OBJECT &&
1946	    db->db_state == DB_CACHED && db->db_data_pending) {
1947		dbuf_dirty_record_t *dr = db->db_data_pending;
1948
1949		if (dr->dt.dl.dr_data == db->db_buf) {
1950			arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1951
1952			dbuf_set_data(db,
1953			    arc_buf_alloc(dn->dn_objset->os_spa,
1954			    db->db.db_size, db, type));
1955			bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1956			    db->db.db_size);
1957		}
1958	}
1959
1960	(void) refcount_add(&db->db_holds, tag);
1961	DBUF_VERIFY(db);
1962	mutex_exit(&db->db_mtx);
1963
1964	/* NOTE: we can't rele the parent until after we drop the db_mtx */
1965	if (parent)
1966		dbuf_rele(parent, NULL);
1967
1968	ASSERT3P(DB_DNODE(db), ==, dn);
1969	ASSERT3U(db->db_blkid, ==, blkid);
1970	ASSERT3U(db->db_level, ==, level);
1971	*dbp = db;
1972
1973	return (0);
1974}
1975
1976dmu_buf_impl_t *
1977dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1978{
1979	dmu_buf_impl_t *db;
1980	int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1981	return (err ? NULL : db);
1982}
1983
1984dmu_buf_impl_t *
1985dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1986{
1987	dmu_buf_impl_t *db;
1988	int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1989	return (err ? NULL : db);
1990}
1991
1992void
1993dbuf_create_bonus(dnode_t *dn)
1994{
1995	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1996
1997	ASSERT(dn->dn_bonus == NULL);
1998	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
1999}
2000
2001int
2002dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2003{
2004	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2005	dnode_t *dn;
2006
2007	if (db->db_blkid != DMU_SPILL_BLKID)
2008		return (SET_ERROR(ENOTSUP));
2009	if (blksz == 0)
2010		blksz = SPA_MINBLOCKSIZE;
2011	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
2012	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2013
2014	DB_DNODE_ENTER(db);
2015	dn = DB_DNODE(db);
2016	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2017	dbuf_new_size(db, blksz, tx);
2018	rw_exit(&dn->dn_struct_rwlock);
2019	DB_DNODE_EXIT(db);
2020
2021	return (0);
2022}
2023
2024void
2025dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2026{
2027	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2028}
2029
2030#pragma weak dmu_buf_add_ref = dbuf_add_ref
2031void
2032dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2033{
2034	int64_t holds = refcount_add(&db->db_holds, tag);
2035	ASSERT(holds > 1);
2036}
2037
2038/*
2039 * If you call dbuf_rele() you had better not be referencing the dnode handle
2040 * unless you have some other direct or indirect hold on the dnode. (An indirect
2041 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2042 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2043 * dnode's parent dbuf evicting its dnode handles.
2044 */
2045void
2046dbuf_rele(dmu_buf_impl_t *db, void *tag)
2047{
2048	mutex_enter(&db->db_mtx);
2049	dbuf_rele_and_unlock(db, tag);
2050}
2051
2052void
2053dmu_buf_rele(dmu_buf_t *db, void *tag)
2054{
2055	dbuf_rele((dmu_buf_impl_t *)db, tag);
2056}
2057
2058/*
2059 * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2060 * db_dirtycnt and db_holds to be updated atomically.
2061 */
2062void
2063dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2064{
2065	int64_t holds;
2066
2067	ASSERT(MUTEX_HELD(&db->db_mtx));
2068	DBUF_VERIFY(db);
2069
2070	/*
2071	 * Remove the reference to the dbuf before removing its hold on the
2072	 * dnode so we can guarantee in dnode_move() that a referenced bonus
2073	 * buffer has a corresponding dnode hold.
2074	 */
2075	holds = refcount_remove(&db->db_holds, tag);
2076	ASSERT(holds >= 0);
2077
2078	/*
2079	 * We can't freeze indirects if there is a possibility that they
2080	 * may be modified in the current syncing context.
2081	 */
2082	if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2083		arc_buf_freeze(db->db_buf);
2084
2085	if (holds == db->db_dirtycnt &&
2086	    db->db_level == 0 && db->db_immediate_evict)
2087		dbuf_evict_user(db);
2088
2089	if (holds == 0) {
2090		if (db->db_blkid == DMU_BONUS_BLKID) {
2091			mutex_exit(&db->db_mtx);
2092
2093			/*
2094			 * If the dnode moves here, we cannot cross this barrier
2095			 * until the move completes.
2096			 */
2097			DB_DNODE_ENTER(db);
2098			atomic_dec_32(&DB_DNODE(db)->dn_dbufs_count);
2099			DB_DNODE_EXIT(db);
2100			/*
2101			 * The bonus buffer's dnode hold is no longer discounted
2102			 * in dnode_move(). The dnode cannot move until after
2103			 * the dnode_rele().
2104			 */
2105			dnode_rele(DB_DNODE(db), db);
2106		} else if (db->db_buf == NULL) {
2107			/*
2108			 * This is a special case: we never associated this
2109			 * dbuf with any data allocated from the ARC.
2110			 */
2111			ASSERT(db->db_state == DB_UNCACHED ||
2112			    db->db_state == DB_NOFILL);
2113			dbuf_evict(db);
2114		} else if (arc_released(db->db_buf)) {
2115			arc_buf_t *buf = db->db_buf;
2116			/*
2117			 * This dbuf has anonymous data associated with it.
2118			 */
2119			dbuf_set_data(db, NULL);
2120			VERIFY(arc_buf_remove_ref(buf, db));
2121			dbuf_evict(db);
2122		} else {
2123			VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2124
2125			/*
2126			 * A dbuf will be eligible for eviction if either the
2127			 * 'primarycache' property is set or a duplicate
2128			 * copy of this buffer is already cached in the arc.
2129			 *
2130			 * In the case of the 'primarycache' a buffer
2131			 * is considered for eviction if it matches the
2132			 * criteria set in the property.
2133			 *
2134			 * To decide if our buffer is considered a
2135			 * duplicate, we must call into the arc to determine
2136			 * if multiple buffers are referencing the same
2137			 * block on-disk. If so, then we simply evict
2138			 * ourselves.
2139			 */
2140			if (!DBUF_IS_CACHEABLE(db)) {
2141				if (db->db_blkptr != NULL &&
2142				    !BP_IS_HOLE(db->db_blkptr) &&
2143				    !BP_IS_EMBEDDED(db->db_blkptr)) {
2144					spa_t *spa =
2145					    dmu_objset_spa(db->db_objset);
2146					blkptr_t bp = *db->db_blkptr;
2147					dbuf_clear(db);
2148					arc_freed(spa, &bp);
2149				} else {
2150					dbuf_clear(db);
2151				}
2152			} else if (arc_buf_eviction_needed(db->db_buf)) {
2153				dbuf_clear(db);
2154			} else {
2155				mutex_exit(&db->db_mtx);
2156			}
2157		}
2158	} else {
2159		mutex_exit(&db->db_mtx);
2160	}
2161}
2162
2163#pragma weak dmu_buf_refcount = dbuf_refcount
2164uint64_t
2165dbuf_refcount(dmu_buf_impl_t *db)
2166{
2167	return (refcount_count(&db->db_holds));
2168}
2169
2170void *
2171dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr,
2172    dmu_buf_evict_func_t *evict_func)
2173{
2174	return (dmu_buf_update_user(db_fake, NULL, user_ptr, evict_func));
2175}
2176
2177void *
2178dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr,
2179    dmu_buf_evict_func_t *evict_func)
2180{
2181	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2182
2183	db->db_immediate_evict = TRUE;
2184	return (dmu_buf_update_user(db_fake, NULL, user_ptr, evict_func));
2185}
2186
2187void *
2188dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2189    dmu_buf_evict_func_t *evict_func)
2190{
2191	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2192	ASSERT(db->db_level == 0);
2193
2194	ASSERT((user_ptr == NULL) == (evict_func == NULL));
2195
2196	mutex_enter(&db->db_mtx);
2197
2198	if (db->db_user_ptr == old_user_ptr) {
2199		db->db_user_ptr = user_ptr;
2200		db->db_evict_func = evict_func;
2201	} else {
2202		old_user_ptr = db->db_user_ptr;
2203	}
2204
2205	mutex_exit(&db->db_mtx);
2206	return (old_user_ptr);
2207}
2208
2209void *
2210dmu_buf_get_user(dmu_buf_t *db_fake)
2211{
2212	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2213	ASSERT(!refcount_is_zero(&db->db_holds));
2214
2215	return (db->db_user_ptr);
2216}
2217
2218boolean_t
2219dmu_buf_freeable(dmu_buf_t *dbuf)
2220{
2221	boolean_t res = B_FALSE;
2222	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2223
2224	if (db->db_blkptr)
2225		res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2226		    db->db_blkptr, db->db_blkptr->blk_birth);
2227
2228	return (res);
2229}
2230
2231blkptr_t *
2232dmu_buf_get_blkptr(dmu_buf_t *db)
2233{
2234	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2235	return (dbi->db_blkptr);
2236}
2237
2238static void
2239dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2240{
2241	/* ASSERT(dmu_tx_is_syncing(tx) */
2242	ASSERT(MUTEX_HELD(&db->db_mtx));
2243
2244	if (db->db_blkptr != NULL)
2245		return;
2246
2247	if (db->db_blkid == DMU_SPILL_BLKID) {
2248		db->db_blkptr = &dn->dn_phys->dn_spill;
2249		BP_ZERO(db->db_blkptr);
2250		return;
2251	}
2252	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2253		/*
2254		 * This buffer was allocated at a time when there was
2255		 * no available blkptrs from the dnode, or it was
2256		 * inappropriate to hook it in (i.e., nlevels mis-match).
2257		 */
2258		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2259		ASSERT(db->db_parent == NULL);
2260		db->db_parent = dn->dn_dbuf;
2261		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2262		DBUF_VERIFY(db);
2263	} else {
2264		dmu_buf_impl_t *parent = db->db_parent;
2265		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2266
2267		ASSERT(dn->dn_phys->dn_nlevels > 1);
2268		if (parent == NULL) {
2269			mutex_exit(&db->db_mtx);
2270			rw_enter(&dn->dn_struct_rwlock, RW_READER);
2271			(void) dbuf_hold_impl(dn, db->db_level+1,
2272			    db->db_blkid >> epbs, FALSE, db, &parent);
2273			rw_exit(&dn->dn_struct_rwlock);
2274			mutex_enter(&db->db_mtx);
2275			db->db_parent = parent;
2276		}
2277		db->db_blkptr = (blkptr_t *)parent->db.db_data +
2278		    (db->db_blkid & ((1ULL << epbs) - 1));
2279		DBUF_VERIFY(db);
2280	}
2281}
2282
2283static void
2284dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2285{
2286	dmu_buf_impl_t *db = dr->dr_dbuf;
2287	dnode_t *dn;
2288	zio_t *zio;
2289
2290	ASSERT(dmu_tx_is_syncing(tx));
2291
2292	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2293
2294	mutex_enter(&db->db_mtx);
2295
2296	ASSERT(db->db_level > 0);
2297	DBUF_VERIFY(db);
2298
2299	/* Read the block if it hasn't been read yet. */
2300	if (db->db_buf == NULL) {
2301		mutex_exit(&db->db_mtx);
2302		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2303		mutex_enter(&db->db_mtx);
2304	}
2305	ASSERT3U(db->db_state, ==, DB_CACHED);
2306	ASSERT(db->db_buf != NULL);
2307
2308	DB_DNODE_ENTER(db);
2309	dn = DB_DNODE(db);
2310	/* Indirect block size must match what the dnode thinks it is. */
2311	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2312	dbuf_check_blkptr(dn, db);
2313	DB_DNODE_EXIT(db);
2314
2315	/* Provide the pending dirty record to child dbufs */
2316	db->db_data_pending = dr;
2317
2318	mutex_exit(&db->db_mtx);
2319	dbuf_write(dr, db->db_buf, tx);
2320
2321	zio = dr->dr_zio;
2322	mutex_enter(&dr->dt.di.dr_mtx);
2323	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
2324	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2325	mutex_exit(&dr->dt.di.dr_mtx);
2326	zio_nowait(zio);
2327}
2328
2329static void
2330dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2331{
2332	arc_buf_t **datap = &dr->dt.dl.dr_data;
2333	dmu_buf_impl_t *db = dr->dr_dbuf;
2334	dnode_t *dn;
2335	objset_t *os;
2336	uint64_t txg = tx->tx_txg;
2337
2338	ASSERT(dmu_tx_is_syncing(tx));
2339
2340	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2341
2342	mutex_enter(&db->db_mtx);
2343	/*
2344	 * To be synced, we must be dirtied.  But we
2345	 * might have been freed after the dirty.
2346	 */
2347	if (db->db_state == DB_UNCACHED) {
2348		/* This buffer has been freed since it was dirtied */
2349		ASSERT(db->db.db_data == NULL);
2350	} else if (db->db_state == DB_FILL) {
2351		/* This buffer was freed and is now being re-filled */
2352		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2353	} else {
2354		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2355	}
2356	DBUF_VERIFY(db);
2357
2358	DB_DNODE_ENTER(db);
2359	dn = DB_DNODE(db);
2360
2361	if (db->db_blkid == DMU_SPILL_BLKID) {
2362		mutex_enter(&dn->dn_mtx);
2363		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2364		mutex_exit(&dn->dn_mtx);
2365	}
2366
2367	/*
2368	 * If this is a bonus buffer, simply copy the bonus data into the
2369	 * dnode.  It will be written out when the dnode is synced (and it
2370	 * will be synced, since it must have been dirty for dbuf_sync to
2371	 * be called).
2372	 */
2373	if (db->db_blkid == DMU_BONUS_BLKID) {
2374		dbuf_dirty_record_t **drp;
2375
2376		ASSERT(*datap != NULL);
2377		ASSERT0(db->db_level);
2378		ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2379		bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2380		DB_DNODE_EXIT(db);
2381
2382		if (*datap != db->db.db_data) {
2383			zio_buf_free(*datap, DN_MAX_BONUSLEN);
2384			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2385		}
2386		db->db_data_pending = NULL;
2387		drp = &db->db_last_dirty;
2388		while (*drp != dr)
2389			drp = &(*drp)->dr_next;
2390		ASSERT(dr->dr_next == NULL);
2391		ASSERT(dr->dr_dbuf == db);
2392		*drp = dr->dr_next;
2393		if (dr->dr_dbuf->db_level != 0) {
2394			list_destroy(&dr->dt.di.dr_children);
2395			mutex_destroy(&dr->dt.di.dr_mtx);
2396		}
2397		kmem_free(dr, sizeof (dbuf_dirty_record_t));
2398		ASSERT(db->db_dirtycnt > 0);
2399		db->db_dirtycnt -= 1;
2400		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2401		return;
2402	}
2403
2404	os = dn->dn_objset;
2405
2406	/*
2407	 * This function may have dropped the db_mtx lock allowing a dmu_sync
2408	 * operation to sneak in. As a result, we need to ensure that we
2409	 * don't check the dr_override_state until we have returned from
2410	 * dbuf_check_blkptr.
2411	 */
2412	dbuf_check_blkptr(dn, db);
2413
2414	/*
2415	 * If this buffer is in the middle of an immediate write,
2416	 * wait for the synchronous IO to complete.
2417	 */
2418	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2419		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2420		cv_wait(&db->db_changed, &db->db_mtx);
2421		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2422	}
2423
2424	if (db->db_state != DB_NOFILL &&
2425	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2426	    refcount_count(&db->db_holds) > 1 &&
2427	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2428	    *datap == db->db_buf) {
2429		/*
2430		 * If this buffer is currently "in use" (i.e., there
2431		 * are active holds and db_data still references it),
2432		 * then make a copy before we start the write so that
2433		 * any modifications from the open txg will not leak
2434		 * into this write.
2435		 *
2436		 * NOTE: this copy does not need to be made for
2437		 * objects only modified in the syncing context (e.g.
2438		 * DNONE_DNODE blocks).
2439		 */
2440		int blksz = arc_buf_size(*datap);
2441		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2442		*datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2443		bcopy(db->db.db_data, (*datap)->b_data, blksz);
2444	}
2445	db->db_data_pending = dr;
2446
2447	mutex_exit(&db->db_mtx);
2448
2449	dbuf_write(dr, *datap, tx);
2450
2451	ASSERT(!list_link_active(&dr->dr_dirty_node));
2452	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2453		list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2454		DB_DNODE_EXIT(db);
2455	} else {
2456		/*
2457		 * Although zio_nowait() does not "wait for an IO", it does
2458		 * initiate the IO. If this is an empty write it seems plausible
2459		 * that the IO could actually be completed before the nowait
2460		 * returns. We need to DB_DNODE_EXIT() first in case
2461		 * zio_nowait() invalidates the dbuf.
2462		 */
2463		DB_DNODE_EXIT(db);
2464		zio_nowait(dr->dr_zio);
2465	}
2466}
2467
2468void
2469dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
2470{
2471	dbuf_dirty_record_t *dr;
2472
2473	while (dr = list_head(list)) {
2474		if (dr->dr_zio != NULL) {
2475			/*
2476			 * If we find an already initialized zio then we
2477			 * are processing the meta-dnode, and we have finished.
2478			 * The dbufs for all dnodes are put back on the list
2479			 * during processing, so that we can zio_wait()
2480			 * these IOs after initiating all child IOs.
2481			 */
2482			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2483			    DMU_META_DNODE_OBJECT);
2484			break;
2485		}
2486		if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
2487		    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
2488			VERIFY3U(dr->dr_dbuf->db_level, ==, level);
2489		}
2490		list_remove(list, dr);
2491		if (dr->dr_dbuf->db_level > 0)
2492			dbuf_sync_indirect(dr, tx);
2493		else
2494			dbuf_sync_leaf(dr, tx);
2495	}
2496}
2497
2498/* ARGSUSED */
2499static void
2500dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2501{
2502	dmu_buf_impl_t *db = vdb;
2503	dnode_t *dn;
2504	blkptr_t *bp = zio->io_bp;
2505	blkptr_t *bp_orig = &zio->io_bp_orig;
2506	spa_t *spa = zio->io_spa;
2507	int64_t delta;
2508	uint64_t fill = 0;
2509	int i;
2510
2511	ASSERT3P(db->db_blkptr, ==, bp);
2512
2513	DB_DNODE_ENTER(db);
2514	dn = DB_DNODE(db);
2515	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2516	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2517	zio->io_prev_space_delta = delta;
2518
2519	if (bp->blk_birth != 0) {
2520		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2521		    BP_GET_TYPE(bp) == dn->dn_type) ||
2522		    (db->db_blkid == DMU_SPILL_BLKID &&
2523		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2524		    BP_IS_EMBEDDED(bp));
2525		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2526	}
2527
2528	mutex_enter(&db->db_mtx);
2529
2530#ifdef ZFS_DEBUG
2531	if (db->db_blkid == DMU_SPILL_BLKID) {
2532		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2533		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2534		    db->db_blkptr == &dn->dn_phys->dn_spill);
2535	}
2536#endif
2537
2538	if (db->db_level == 0) {
2539		mutex_enter(&dn->dn_mtx);
2540		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2541		    db->db_blkid != DMU_SPILL_BLKID)
2542			dn->dn_phys->dn_maxblkid = db->db_blkid;
2543		mutex_exit(&dn->dn_mtx);
2544
2545		if (dn->dn_type == DMU_OT_DNODE) {
2546			dnode_phys_t *dnp = db->db.db_data;
2547			for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2548			    i--, dnp++) {
2549				if (dnp->dn_type != DMU_OT_NONE)
2550					fill++;
2551			}
2552		} else {
2553			if (BP_IS_HOLE(bp)) {
2554				fill = 0;
2555			} else {
2556				fill = 1;
2557			}
2558		}
2559	} else {
2560		blkptr_t *ibp = db->db.db_data;
2561		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2562		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2563			if (BP_IS_HOLE(ibp))
2564				continue;
2565			fill += BP_GET_FILL(ibp);
2566		}
2567	}
2568	DB_DNODE_EXIT(db);
2569
2570	if (!BP_IS_EMBEDDED(bp))
2571		bp->blk_fill = fill;
2572
2573	mutex_exit(&db->db_mtx);
2574}
2575
2576/*
2577 * The SPA will call this callback several times for each zio - once
2578 * for every physical child i/o (zio->io_phys_children times).  This
2579 * allows the DMU to monitor the progress of each logical i/o.  For example,
2580 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2581 * block.  There may be a long delay before all copies/fragments are completed,
2582 * so this callback allows us to retire dirty space gradually, as the physical
2583 * i/os complete.
2584 */
2585/* ARGSUSED */
2586static void
2587dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2588{
2589	dmu_buf_impl_t *db = arg;
2590	objset_t *os = db->db_objset;
2591	dsl_pool_t *dp = dmu_objset_pool(os);
2592	dbuf_dirty_record_t *dr;
2593	int delta = 0;
2594
2595	dr = db->db_data_pending;
2596	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2597
2598	/*
2599	 * The callback will be called io_phys_children times.  Retire one
2600	 * portion of our dirty space each time we are called.  Any rounding
2601	 * error will be cleaned up by dsl_pool_sync()'s call to
2602	 * dsl_pool_undirty_space().
2603	 */
2604	delta = dr->dr_accounted / zio->io_phys_children;
2605	dsl_pool_undirty_space(dp, delta, zio->io_txg);
2606}
2607
2608/* ARGSUSED */
2609static void
2610dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2611{
2612	dmu_buf_impl_t *db = vdb;
2613	blkptr_t *bp_orig = &zio->io_bp_orig;
2614	blkptr_t *bp = db->db_blkptr;
2615	objset_t *os = db->db_objset;
2616	dmu_tx_t *tx = os->os_synctx;
2617	dbuf_dirty_record_t **drp, *dr;
2618
2619	ASSERT0(zio->io_error);
2620	ASSERT(db->db_blkptr == bp);
2621
2622	/*
2623	 * For nopwrites and rewrites we ensure that the bp matches our
2624	 * original and bypass all the accounting.
2625	 */
2626	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2627		ASSERT(BP_EQUAL(bp, bp_orig));
2628	} else {
2629		dsl_dataset_t *ds = os->os_dsl_dataset;
2630		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2631		dsl_dataset_block_born(ds, bp, tx);
2632	}
2633
2634	mutex_enter(&db->db_mtx);
2635
2636	DBUF_VERIFY(db);
2637
2638	drp = &db->db_last_dirty;
2639	while ((dr = *drp) != db->db_data_pending)
2640		drp = &dr->dr_next;
2641	ASSERT(!list_link_active(&dr->dr_dirty_node));
2642	ASSERT(dr->dr_dbuf == db);
2643	ASSERT(dr->dr_next == NULL);
2644	*drp = dr->dr_next;
2645
2646#ifdef ZFS_DEBUG
2647	if (db->db_blkid == DMU_SPILL_BLKID) {
2648		dnode_t *dn;
2649
2650		DB_DNODE_ENTER(db);
2651		dn = DB_DNODE(db);
2652		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2653		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2654		    db->db_blkptr == &dn->dn_phys->dn_spill);
2655		DB_DNODE_EXIT(db);
2656	}
2657#endif
2658
2659	if (db->db_level == 0) {
2660		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2661		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2662		if (db->db_state != DB_NOFILL) {
2663			if (dr->dt.dl.dr_data != db->db_buf)
2664				VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2665				    db));
2666			else if (!arc_released(db->db_buf))
2667				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2668		}
2669	} else {
2670		dnode_t *dn;
2671
2672		DB_DNODE_ENTER(db);
2673		dn = DB_DNODE(db);
2674		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2675		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
2676		if (!BP_IS_HOLE(db->db_blkptr)) {
2677			int epbs =
2678			    dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2679			ASSERT3U(db->db_blkid, <=,
2680			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
2681			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2682			    db->db.db_size);
2683			if (!arc_released(db->db_buf))
2684				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2685		}
2686		DB_DNODE_EXIT(db);
2687		mutex_destroy(&dr->dt.di.dr_mtx);
2688		list_destroy(&dr->dt.di.dr_children);
2689	}
2690	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2691
2692	cv_broadcast(&db->db_changed);
2693	ASSERT(db->db_dirtycnt > 0);
2694	db->db_dirtycnt -= 1;
2695	db->db_data_pending = NULL;
2696	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
2697}
2698
2699static void
2700dbuf_write_nofill_ready(zio_t *zio)
2701{
2702	dbuf_write_ready(zio, NULL, zio->io_private);
2703}
2704
2705static void
2706dbuf_write_nofill_done(zio_t *zio)
2707{
2708	dbuf_write_done(zio, NULL, zio->io_private);
2709}
2710
2711static void
2712dbuf_write_override_ready(zio_t *zio)
2713{
2714	dbuf_dirty_record_t *dr = zio->io_private;
2715	dmu_buf_impl_t *db = dr->dr_dbuf;
2716
2717	dbuf_write_ready(zio, NULL, db);
2718}
2719
2720static void
2721dbuf_write_override_done(zio_t *zio)
2722{
2723	dbuf_dirty_record_t *dr = zio->io_private;
2724	dmu_buf_impl_t *db = dr->dr_dbuf;
2725	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2726
2727	mutex_enter(&db->db_mtx);
2728	if (!BP_EQUAL(zio->io_bp, obp)) {
2729		if (!BP_IS_HOLE(obp))
2730			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2731		arc_release(dr->dt.dl.dr_data, db);
2732	}
2733	mutex_exit(&db->db_mtx);
2734
2735	dbuf_write_done(zio, NULL, db);
2736}
2737
2738/* Issue I/O to commit a dirty buffer to disk. */
2739static void
2740dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2741{
2742	dmu_buf_impl_t *db = dr->dr_dbuf;
2743	dnode_t *dn;
2744	objset_t *os;
2745	dmu_buf_impl_t *parent = db->db_parent;
2746	uint64_t txg = tx->tx_txg;
2747	zbookmark_phys_t zb;
2748	zio_prop_t zp;
2749	zio_t *zio;
2750	int wp_flag = 0;
2751
2752	DB_DNODE_ENTER(db);
2753	dn = DB_DNODE(db);
2754	os = dn->dn_objset;
2755
2756	if (db->db_state != DB_NOFILL) {
2757		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2758			/*
2759			 * Private object buffers are released here rather
2760			 * than in dbuf_dirty() since they are only modified
2761			 * in the syncing context and we don't want the
2762			 * overhead of making multiple copies of the data.
2763			 */
2764			if (BP_IS_HOLE(db->db_blkptr)) {
2765				arc_buf_thaw(data);
2766			} else {
2767				dbuf_release_bp(db);
2768			}
2769		}
2770	}
2771
2772	if (parent != dn->dn_dbuf) {
2773		/* Our parent is an indirect block. */
2774		/* We have a dirty parent that has been scheduled for write. */
2775		ASSERT(parent && parent->db_data_pending);
2776		/* Our parent's buffer is one level closer to the dnode. */
2777		ASSERT(db->db_level == parent->db_level-1);
2778		/*
2779		 * We're about to modify our parent's db_data by modifying
2780		 * our block pointer, so the parent must be released.
2781		 */
2782		ASSERT(arc_released(parent->db_buf));
2783		zio = parent->db_data_pending->dr_zio;
2784	} else {
2785		/* Our parent is the dnode itself. */
2786		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2787		    db->db_blkid != DMU_SPILL_BLKID) ||
2788		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2789		if (db->db_blkid != DMU_SPILL_BLKID)
2790			ASSERT3P(db->db_blkptr, ==,
2791			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
2792		zio = dn->dn_zio;
2793	}
2794
2795	ASSERT(db->db_level == 0 || data == db->db_buf);
2796	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2797	ASSERT(zio);
2798
2799	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2800	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2801	    db->db.db_object, db->db_level, db->db_blkid);
2802
2803	if (db->db_blkid == DMU_SPILL_BLKID)
2804		wp_flag = WP_SPILL;
2805	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2806
2807	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2808	DB_DNODE_EXIT(db);
2809
2810	if (db->db_level == 0 &&
2811	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2812		/*
2813		 * The BP for this block has been provided by open context
2814		 * (by dmu_sync() or dmu_buf_write_embedded()).
2815		 */
2816		void *contents = (data != NULL) ? data->b_data : NULL;
2817
2818		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2819		    db->db_blkptr, contents, db->db.db_size, &zp,
2820		    dbuf_write_override_ready, NULL, dbuf_write_override_done,
2821		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2822		mutex_enter(&db->db_mtx);
2823		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2824		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2825		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
2826		mutex_exit(&db->db_mtx);
2827	} else if (db->db_state == DB_NOFILL) {
2828		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
2829		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
2830		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2831		    db->db_blkptr, NULL, db->db.db_size, &zp,
2832		    dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
2833		    ZIO_PRIORITY_ASYNC_WRITE,
2834		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2835	} else {
2836		ASSERT(arc_released(data));
2837		dr->dr_zio = arc_write(zio, os->os_spa, txg,
2838		    db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
2839		    DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
2840		    dbuf_write_physdone, dbuf_write_done, db,
2841		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2842	}
2843}
2844