dbuf.c revision 271001
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dmu.h>
31#include <sys/dmu_send.h>
32#include <sys/dmu_impl.h>
33#include <sys/dbuf.h>
34#include <sys/dmu_objset.h>
35#include <sys/dsl_dataset.h>
36#include <sys/dsl_dir.h>
37#include <sys/dmu_tx.h>
38#include <sys/spa.h>
39#include <sys/zio.h>
40#include <sys/dmu_zfetch.h>
41#include <sys/sa.h>
42#include <sys/sa_impl.h>
43#include <sys/zfeature.h>
44#include <sys/blkptr.h>
45#include <sys/range_tree.h>
46
47/*
48 * Number of times that zfs_free_range() took the slow path while doing
49 * a zfs receive.  A nonzero value indicates a potential performance problem.
50 */
51uint64_t zfs_free_range_recv_miss;
52
53static void dbuf_destroy(dmu_buf_impl_t *db);
54static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
55static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
56
57/*
58 * Global data structures and functions for the dbuf cache.
59 */
60static kmem_cache_t *dbuf_cache;
61
62/* ARGSUSED */
63static int
64dbuf_cons(void *vdb, void *unused, int kmflag)
65{
66	dmu_buf_impl_t *db = vdb;
67	bzero(db, sizeof (dmu_buf_impl_t));
68
69	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
70	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
71	refcount_create(&db->db_holds);
72
73	return (0);
74}
75
76/* ARGSUSED */
77static void
78dbuf_dest(void *vdb, void *unused)
79{
80	dmu_buf_impl_t *db = vdb;
81	mutex_destroy(&db->db_mtx);
82	cv_destroy(&db->db_changed);
83	refcount_destroy(&db->db_holds);
84}
85
86/*
87 * dbuf hash table routines
88 */
89static dbuf_hash_table_t dbuf_hash_table;
90
91static uint64_t dbuf_hash_count;
92
93static uint64_t
94dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
95{
96	uintptr_t osv = (uintptr_t)os;
97	uint64_t crc = -1ULL;
98
99	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
100	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
101	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
102	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
103	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
104	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
105	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
106
107	crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
108
109	return (crc);
110}
111
112#define	DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
113
114#define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
115	((dbuf)->db.db_object == (obj) &&		\
116	(dbuf)->db_objset == (os) &&			\
117	(dbuf)->db_level == (level) &&			\
118	(dbuf)->db_blkid == (blkid))
119
120dmu_buf_impl_t *
121dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
122{
123	dbuf_hash_table_t *h = &dbuf_hash_table;
124	objset_t *os = dn->dn_objset;
125	uint64_t obj = dn->dn_object;
126	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
127	uint64_t idx = hv & h->hash_table_mask;
128	dmu_buf_impl_t *db;
129
130	mutex_enter(DBUF_HASH_MUTEX(h, idx));
131	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
132		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
133			mutex_enter(&db->db_mtx);
134			if (db->db_state != DB_EVICTING) {
135				mutex_exit(DBUF_HASH_MUTEX(h, idx));
136				return (db);
137			}
138			mutex_exit(&db->db_mtx);
139		}
140	}
141	mutex_exit(DBUF_HASH_MUTEX(h, idx));
142	return (NULL);
143}
144
145/*
146 * Insert an entry into the hash table.  If there is already an element
147 * equal to elem in the hash table, then the already existing element
148 * will be returned and the new element will not be inserted.
149 * Otherwise returns NULL.
150 */
151static dmu_buf_impl_t *
152dbuf_hash_insert(dmu_buf_impl_t *db)
153{
154	dbuf_hash_table_t *h = &dbuf_hash_table;
155	objset_t *os = db->db_objset;
156	uint64_t obj = db->db.db_object;
157	int level = db->db_level;
158	uint64_t blkid = db->db_blkid;
159	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
160	uint64_t idx = hv & h->hash_table_mask;
161	dmu_buf_impl_t *dbf;
162
163	mutex_enter(DBUF_HASH_MUTEX(h, idx));
164	for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
165		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
166			mutex_enter(&dbf->db_mtx);
167			if (dbf->db_state != DB_EVICTING) {
168				mutex_exit(DBUF_HASH_MUTEX(h, idx));
169				return (dbf);
170			}
171			mutex_exit(&dbf->db_mtx);
172		}
173	}
174
175	mutex_enter(&db->db_mtx);
176	db->db_hash_next = h->hash_table[idx];
177	h->hash_table[idx] = db;
178	mutex_exit(DBUF_HASH_MUTEX(h, idx));
179	atomic_inc_64(&dbuf_hash_count);
180
181	return (NULL);
182}
183
184/*
185 * Remove an entry from the hash table.  It must be in the EVICTING state.
186 */
187static void
188dbuf_hash_remove(dmu_buf_impl_t *db)
189{
190	dbuf_hash_table_t *h = &dbuf_hash_table;
191	uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
192	    db->db_level, db->db_blkid);
193	uint64_t idx = hv & h->hash_table_mask;
194	dmu_buf_impl_t *dbf, **dbp;
195
196	/*
197	 * We musn't hold db_mtx to maintain lock ordering:
198	 * DBUF_HASH_MUTEX > db_mtx.
199	 */
200	ASSERT(refcount_is_zero(&db->db_holds));
201	ASSERT(db->db_state == DB_EVICTING);
202	ASSERT(!MUTEX_HELD(&db->db_mtx));
203
204	mutex_enter(DBUF_HASH_MUTEX(h, idx));
205	dbp = &h->hash_table[idx];
206	while ((dbf = *dbp) != db) {
207		dbp = &dbf->db_hash_next;
208		ASSERT(dbf != NULL);
209	}
210	*dbp = db->db_hash_next;
211	db->db_hash_next = NULL;
212	mutex_exit(DBUF_HASH_MUTEX(h, idx));
213	atomic_dec_64(&dbuf_hash_count);
214}
215
216static arc_evict_func_t dbuf_do_evict;
217
218static void
219dbuf_evict_user(dmu_buf_impl_t *db)
220{
221	ASSERT(MUTEX_HELD(&db->db_mtx));
222
223	if (db->db_level != 0 || db->db_evict_func == NULL)
224		return;
225
226	if (db->db_user_data_ptr_ptr)
227		*db->db_user_data_ptr_ptr = db->db.db_data;
228	db->db_evict_func(&db->db, db->db_user_ptr);
229	db->db_user_ptr = NULL;
230	db->db_user_data_ptr_ptr = NULL;
231	db->db_evict_func = NULL;
232}
233
234boolean_t
235dbuf_is_metadata(dmu_buf_impl_t *db)
236{
237	if (db->db_level > 0) {
238		return (B_TRUE);
239	} else {
240		boolean_t is_metadata;
241
242		DB_DNODE_ENTER(db);
243		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
244		DB_DNODE_EXIT(db);
245
246		return (is_metadata);
247	}
248}
249
250void
251dbuf_evict(dmu_buf_impl_t *db)
252{
253	ASSERT(MUTEX_HELD(&db->db_mtx));
254	ASSERT(db->db_buf == NULL);
255	ASSERT(db->db_data_pending == NULL);
256
257	dbuf_clear(db);
258	dbuf_destroy(db);
259}
260
261void
262dbuf_init(void)
263{
264	uint64_t hsize = 1ULL << 16;
265	dbuf_hash_table_t *h = &dbuf_hash_table;
266	int i;
267
268	/*
269	 * The hash table is big enough to fill all of physical memory
270	 * with an average 4K block size.  The table will take up
271	 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
272	 */
273	while (hsize * 4096 < (uint64_t)physmem * PAGESIZE)
274		hsize <<= 1;
275
276retry:
277	h->hash_table_mask = hsize - 1;
278	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
279	if (h->hash_table == NULL) {
280		/* XXX - we should really return an error instead of assert */
281		ASSERT(hsize > (1ULL << 10));
282		hsize >>= 1;
283		goto retry;
284	}
285
286	dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
287	    sizeof (dmu_buf_impl_t),
288	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
289
290	for (i = 0; i < DBUF_MUTEXES; i++)
291		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
292}
293
294void
295dbuf_fini(void)
296{
297	dbuf_hash_table_t *h = &dbuf_hash_table;
298	int i;
299
300	for (i = 0; i < DBUF_MUTEXES; i++)
301		mutex_destroy(&h->hash_mutexes[i]);
302	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
303	kmem_cache_destroy(dbuf_cache);
304}
305
306/*
307 * Other stuff.
308 */
309
310#ifdef ZFS_DEBUG
311static void
312dbuf_verify(dmu_buf_impl_t *db)
313{
314	dnode_t *dn;
315	dbuf_dirty_record_t *dr;
316
317	ASSERT(MUTEX_HELD(&db->db_mtx));
318
319	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
320		return;
321
322	ASSERT(db->db_objset != NULL);
323	DB_DNODE_ENTER(db);
324	dn = DB_DNODE(db);
325	if (dn == NULL) {
326		ASSERT(db->db_parent == NULL);
327		ASSERT(db->db_blkptr == NULL);
328	} else {
329		ASSERT3U(db->db.db_object, ==, dn->dn_object);
330		ASSERT3P(db->db_objset, ==, dn->dn_objset);
331		ASSERT3U(db->db_level, <, dn->dn_nlevels);
332		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
333		    db->db_blkid == DMU_SPILL_BLKID ||
334		    !avl_is_empty(&dn->dn_dbufs));
335	}
336	if (db->db_blkid == DMU_BONUS_BLKID) {
337		ASSERT(dn != NULL);
338		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
339		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
340	} else if (db->db_blkid == DMU_SPILL_BLKID) {
341		ASSERT(dn != NULL);
342		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
343		ASSERT0(db->db.db_offset);
344	} else {
345		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
346	}
347
348	for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
349		ASSERT(dr->dr_dbuf == db);
350
351	for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
352		ASSERT(dr->dr_dbuf == db);
353
354	/*
355	 * We can't assert that db_size matches dn_datablksz because it
356	 * can be momentarily different when another thread is doing
357	 * dnode_set_blksz().
358	 */
359	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
360		dr = db->db_data_pending;
361		/*
362		 * It should only be modified in syncing context, so
363		 * make sure we only have one copy of the data.
364		 */
365		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
366	}
367
368	/* verify db->db_blkptr */
369	if (db->db_blkptr) {
370		if (db->db_parent == dn->dn_dbuf) {
371			/* db is pointed to by the dnode */
372			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
373			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
374				ASSERT(db->db_parent == NULL);
375			else
376				ASSERT(db->db_parent != NULL);
377			if (db->db_blkid != DMU_SPILL_BLKID)
378				ASSERT3P(db->db_blkptr, ==,
379				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
380		} else {
381			/* db is pointed to by an indirect block */
382			int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
383			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
384			ASSERT3U(db->db_parent->db.db_object, ==,
385			    db->db.db_object);
386			/*
387			 * dnode_grow_indblksz() can make this fail if we don't
388			 * have the struct_rwlock.  XXX indblksz no longer
389			 * grows.  safe to do this now?
390			 */
391			if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
392				ASSERT3P(db->db_blkptr, ==,
393				    ((blkptr_t *)db->db_parent->db.db_data +
394				    db->db_blkid % epb));
395			}
396		}
397	}
398	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
399	    (db->db_buf == NULL || db->db_buf->b_data) &&
400	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
401	    db->db_state != DB_FILL && !dn->dn_free_txg) {
402		/*
403		 * If the blkptr isn't set but they have nonzero data,
404		 * it had better be dirty, otherwise we'll lose that
405		 * data when we evict this buffer.
406		 */
407		if (db->db_dirtycnt == 0) {
408			uint64_t *buf = db->db.db_data;
409			int i;
410
411			for (i = 0; i < db->db.db_size >> 3; i++) {
412				ASSERT(buf[i] == 0);
413			}
414		}
415	}
416	DB_DNODE_EXIT(db);
417}
418#endif
419
420static void
421dbuf_update_data(dmu_buf_impl_t *db)
422{
423	ASSERT(MUTEX_HELD(&db->db_mtx));
424	if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
425		ASSERT(!refcount_is_zero(&db->db_holds));
426		*db->db_user_data_ptr_ptr = db->db.db_data;
427	}
428}
429
430static void
431dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
432{
433	ASSERT(MUTEX_HELD(&db->db_mtx));
434	db->db_buf = buf;
435	if (buf != NULL) {
436		ASSERT(buf->b_data != NULL);
437		db->db.db_data = buf->b_data;
438		if (!arc_released(buf))
439			arc_set_callback(buf, dbuf_do_evict, db);
440		dbuf_update_data(db);
441	} else {
442		dbuf_evict_user(db);
443		db->db.db_data = NULL;
444		if (db->db_state != DB_NOFILL)
445			db->db_state = DB_UNCACHED;
446	}
447}
448
449/*
450 * Loan out an arc_buf for read.  Return the loaned arc_buf.
451 */
452arc_buf_t *
453dbuf_loan_arcbuf(dmu_buf_impl_t *db)
454{
455	arc_buf_t *abuf;
456
457	mutex_enter(&db->db_mtx);
458	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
459		int blksz = db->db.db_size;
460		spa_t *spa = db->db_objset->os_spa;
461
462		mutex_exit(&db->db_mtx);
463		abuf = arc_loan_buf(spa, blksz);
464		bcopy(db->db.db_data, abuf->b_data, blksz);
465	} else {
466		abuf = db->db_buf;
467		arc_loan_inuse_buf(abuf, db);
468		dbuf_set_data(db, NULL);
469		mutex_exit(&db->db_mtx);
470	}
471	return (abuf);
472}
473
474uint64_t
475dbuf_whichblock(dnode_t *dn, uint64_t offset)
476{
477	if (dn->dn_datablkshift) {
478		return (offset >> dn->dn_datablkshift);
479	} else {
480		ASSERT3U(offset, <, dn->dn_datablksz);
481		return (0);
482	}
483}
484
485static void
486dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
487{
488	dmu_buf_impl_t *db = vdb;
489
490	mutex_enter(&db->db_mtx);
491	ASSERT3U(db->db_state, ==, DB_READ);
492	/*
493	 * All reads are synchronous, so we must have a hold on the dbuf
494	 */
495	ASSERT(refcount_count(&db->db_holds) > 0);
496	ASSERT(db->db_buf == NULL);
497	ASSERT(db->db.db_data == NULL);
498	if (db->db_level == 0 && db->db_freed_in_flight) {
499		/* we were freed in flight; disregard any error */
500		arc_release(buf, db);
501		bzero(buf->b_data, db->db.db_size);
502		arc_buf_freeze(buf);
503		db->db_freed_in_flight = FALSE;
504		dbuf_set_data(db, buf);
505		db->db_state = DB_CACHED;
506	} else if (zio == NULL || zio->io_error == 0) {
507		dbuf_set_data(db, buf);
508		db->db_state = DB_CACHED;
509	} else {
510		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
511		ASSERT3P(db->db_buf, ==, NULL);
512		VERIFY(arc_buf_remove_ref(buf, db));
513		db->db_state = DB_UNCACHED;
514	}
515	cv_broadcast(&db->db_changed);
516	dbuf_rele_and_unlock(db, NULL);
517}
518
519static void
520dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
521{
522	dnode_t *dn;
523	zbookmark_phys_t zb;
524	uint32_t aflags = ARC_NOWAIT;
525
526	DB_DNODE_ENTER(db);
527	dn = DB_DNODE(db);
528	ASSERT(!refcount_is_zero(&db->db_holds));
529	/* We need the struct_rwlock to prevent db_blkptr from changing. */
530	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
531	ASSERT(MUTEX_HELD(&db->db_mtx));
532	ASSERT(db->db_state == DB_UNCACHED);
533	ASSERT(db->db_buf == NULL);
534
535	if (db->db_blkid == DMU_BONUS_BLKID) {
536		int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
537
538		ASSERT3U(bonuslen, <=, db->db.db_size);
539		db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
540		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
541		if (bonuslen < DN_MAX_BONUSLEN)
542			bzero(db->db.db_data, DN_MAX_BONUSLEN);
543		if (bonuslen)
544			bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
545		DB_DNODE_EXIT(db);
546		dbuf_update_data(db);
547		db->db_state = DB_CACHED;
548		mutex_exit(&db->db_mtx);
549		return;
550	}
551
552	/*
553	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
554	 * processes the delete record and clears the bp while we are waiting
555	 * for the dn_mtx (resulting in a "no" from block_freed).
556	 */
557	if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
558	    (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
559	    BP_IS_HOLE(db->db_blkptr)))) {
560		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
561
562		DB_DNODE_EXIT(db);
563		dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
564		    db->db.db_size, db, type));
565		bzero(db->db.db_data, db->db.db_size);
566		db->db_state = DB_CACHED;
567		*flags |= DB_RF_CACHED;
568		mutex_exit(&db->db_mtx);
569		return;
570	}
571
572	DB_DNODE_EXIT(db);
573
574	db->db_state = DB_READ;
575	mutex_exit(&db->db_mtx);
576
577	if (DBUF_IS_L2CACHEABLE(db))
578		aflags |= ARC_L2CACHE;
579	if (DBUF_IS_L2COMPRESSIBLE(db))
580		aflags |= ARC_L2COMPRESS;
581
582	SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
583	    db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
584	    db->db.db_object, db->db_level, db->db_blkid);
585
586	dbuf_add_ref(db, NULL);
587
588	(void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
589	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
590	    (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
591	    &aflags, &zb);
592	if (aflags & ARC_CACHED)
593		*flags |= DB_RF_CACHED;
594}
595
596int
597dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
598{
599	int err = 0;
600	boolean_t havepzio = (zio != NULL);
601	boolean_t prefetch;
602	dnode_t *dn;
603
604	/*
605	 * We don't have to hold the mutex to check db_state because it
606	 * can't be freed while we have a hold on the buffer.
607	 */
608	ASSERT(!refcount_is_zero(&db->db_holds));
609
610	if (db->db_state == DB_NOFILL)
611		return (SET_ERROR(EIO));
612
613	DB_DNODE_ENTER(db);
614	dn = DB_DNODE(db);
615	if ((flags & DB_RF_HAVESTRUCT) == 0)
616		rw_enter(&dn->dn_struct_rwlock, RW_READER);
617
618	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
619	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
620	    DBUF_IS_CACHEABLE(db);
621
622	mutex_enter(&db->db_mtx);
623	if (db->db_state == DB_CACHED) {
624		mutex_exit(&db->db_mtx);
625		if (prefetch)
626			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
627			    db->db.db_size, TRUE);
628		if ((flags & DB_RF_HAVESTRUCT) == 0)
629			rw_exit(&dn->dn_struct_rwlock);
630		DB_DNODE_EXIT(db);
631	} else if (db->db_state == DB_UNCACHED) {
632		spa_t *spa = dn->dn_objset->os_spa;
633
634		if (zio == NULL)
635			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
636		dbuf_read_impl(db, zio, &flags);
637
638		/* dbuf_read_impl has dropped db_mtx for us */
639
640		if (prefetch)
641			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
642			    db->db.db_size, flags & DB_RF_CACHED);
643
644		if ((flags & DB_RF_HAVESTRUCT) == 0)
645			rw_exit(&dn->dn_struct_rwlock);
646		DB_DNODE_EXIT(db);
647
648		if (!havepzio)
649			err = zio_wait(zio);
650	} else {
651		/*
652		 * Another reader came in while the dbuf was in flight
653		 * between UNCACHED and CACHED.  Either a writer will finish
654		 * writing the buffer (sending the dbuf to CACHED) or the
655		 * first reader's request will reach the read_done callback
656		 * and send the dbuf to CACHED.  Otherwise, a failure
657		 * occurred and the dbuf went to UNCACHED.
658		 */
659		mutex_exit(&db->db_mtx);
660		if (prefetch)
661			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
662			    db->db.db_size, TRUE);
663		if ((flags & DB_RF_HAVESTRUCT) == 0)
664			rw_exit(&dn->dn_struct_rwlock);
665		DB_DNODE_EXIT(db);
666
667		/* Skip the wait per the caller's request. */
668		mutex_enter(&db->db_mtx);
669		if ((flags & DB_RF_NEVERWAIT) == 0) {
670			while (db->db_state == DB_READ ||
671			    db->db_state == DB_FILL) {
672				ASSERT(db->db_state == DB_READ ||
673				    (flags & DB_RF_HAVESTRUCT) == 0);
674				cv_wait(&db->db_changed, &db->db_mtx);
675			}
676			if (db->db_state == DB_UNCACHED)
677				err = SET_ERROR(EIO);
678		}
679		mutex_exit(&db->db_mtx);
680	}
681
682	ASSERT(err || havepzio || db->db_state == DB_CACHED);
683	return (err);
684}
685
686static void
687dbuf_noread(dmu_buf_impl_t *db)
688{
689	ASSERT(!refcount_is_zero(&db->db_holds));
690	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
691	mutex_enter(&db->db_mtx);
692	while (db->db_state == DB_READ || db->db_state == DB_FILL)
693		cv_wait(&db->db_changed, &db->db_mtx);
694	if (db->db_state == DB_UNCACHED) {
695		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
696		spa_t *spa = db->db_objset->os_spa;
697
698		ASSERT(db->db_buf == NULL);
699		ASSERT(db->db.db_data == NULL);
700		dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
701		db->db_state = DB_FILL;
702	} else if (db->db_state == DB_NOFILL) {
703		dbuf_set_data(db, NULL);
704	} else {
705		ASSERT3U(db->db_state, ==, DB_CACHED);
706	}
707	mutex_exit(&db->db_mtx);
708}
709
710/*
711 * This is our just-in-time copy function.  It makes a copy of
712 * buffers, that have been modified in a previous transaction
713 * group, before we modify them in the current active group.
714 *
715 * This function is used in two places: when we are dirtying a
716 * buffer for the first time in a txg, and when we are freeing
717 * a range in a dnode that includes this buffer.
718 *
719 * Note that when we are called from dbuf_free_range() we do
720 * not put a hold on the buffer, we just traverse the active
721 * dbuf list for the dnode.
722 */
723static void
724dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
725{
726	dbuf_dirty_record_t *dr = db->db_last_dirty;
727
728	ASSERT(MUTEX_HELD(&db->db_mtx));
729	ASSERT(db->db.db_data != NULL);
730	ASSERT(db->db_level == 0);
731	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
732
733	if (dr == NULL ||
734	    (dr->dt.dl.dr_data !=
735	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
736		return;
737
738	/*
739	 * If the last dirty record for this dbuf has not yet synced
740	 * and its referencing the dbuf data, either:
741	 *	reset the reference to point to a new copy,
742	 * or (if there a no active holders)
743	 *	just null out the current db_data pointer.
744	 */
745	ASSERT(dr->dr_txg >= txg - 2);
746	if (db->db_blkid == DMU_BONUS_BLKID) {
747		/* Note that the data bufs here are zio_bufs */
748		dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
749		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
750		bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
751	} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
752		int size = db->db.db_size;
753		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
754		spa_t *spa = db->db_objset->os_spa;
755
756		dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
757		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
758	} else {
759		dbuf_set_data(db, NULL);
760	}
761}
762
763void
764dbuf_unoverride(dbuf_dirty_record_t *dr)
765{
766	dmu_buf_impl_t *db = dr->dr_dbuf;
767	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
768	uint64_t txg = dr->dr_txg;
769
770	ASSERT(MUTEX_HELD(&db->db_mtx));
771	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
772	ASSERT(db->db_level == 0);
773
774	if (db->db_blkid == DMU_BONUS_BLKID ||
775	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
776		return;
777
778	ASSERT(db->db_data_pending != dr);
779
780	/* free this block */
781	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
782		zio_free(db->db_objset->os_spa, txg, bp);
783
784	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
785	dr->dt.dl.dr_nopwrite = B_FALSE;
786
787	/*
788	 * Release the already-written buffer, so we leave it in
789	 * a consistent dirty state.  Note that all callers are
790	 * modifying the buffer, so they will immediately do
791	 * another (redundant) arc_release().  Therefore, leave
792	 * the buf thawed to save the effort of freezing &
793	 * immediately re-thawing it.
794	 */
795	arc_release(dr->dt.dl.dr_data, db);
796}
797
798/*
799 * Evict (if its unreferenced) or clear (if its referenced) any level-0
800 * data blocks in the free range, so that any future readers will find
801 * empty blocks.
802 *
803 * This is a no-op if the dataset is in the middle of an incremental
804 * receive; see comment below for details.
805 */
806void
807dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
808    dmu_tx_t *tx)
809{
810	dmu_buf_impl_t *db, *db_next, db_search;
811	uint64_t txg = tx->tx_txg;
812	avl_index_t where;
813
814	if (end_blkid > dn->dn_maxblkid && (end_blkid != DMU_SPILL_BLKID))
815		end_blkid = dn->dn_maxblkid;
816	dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
817
818	db_search.db_level = 0;
819	db_search.db_blkid = start_blkid;
820	db_search.db_state = DB_SEARCH;
821
822	mutex_enter(&dn->dn_dbufs_mtx);
823	if (start_blkid >= dn->dn_unlisted_l0_blkid) {
824		/* There can't be any dbufs in this range; no need to search. */
825#ifdef DEBUG
826		db = avl_find(&dn->dn_dbufs, &db_search, &where);
827		ASSERT3P(db, ==, NULL);
828		db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
829		ASSERT(db == NULL || db->db_level > 0);
830#endif
831		mutex_exit(&dn->dn_dbufs_mtx);
832		return;
833	} else if (dmu_objset_is_receiving(dn->dn_objset)) {
834		/*
835		 * If we are receiving, we expect there to be no dbufs in
836		 * the range to be freed, because receive modifies each
837		 * block at most once, and in offset order.  If this is
838		 * not the case, it can lead to performance problems,
839		 * so note that we unexpectedly took the slow path.
840		 */
841		atomic_inc_64(&zfs_free_range_recv_miss);
842	}
843
844	db = avl_find(&dn->dn_dbufs, &db_search, &where);
845	ASSERT3P(db, ==, NULL);
846	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
847
848	for (; db != NULL; db = db_next) {
849		db_next = AVL_NEXT(&dn->dn_dbufs, db);
850		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
851
852		if (db->db_level != 0 || db->db_blkid > end_blkid) {
853			break;
854		}
855		ASSERT3U(db->db_blkid, >=, start_blkid);
856
857		/* found a level 0 buffer in the range */
858		mutex_enter(&db->db_mtx);
859		if (dbuf_undirty(db, tx)) {
860			/* mutex has been dropped and dbuf destroyed */
861			continue;
862		}
863
864		if (db->db_state == DB_UNCACHED ||
865		    db->db_state == DB_NOFILL ||
866		    db->db_state == DB_EVICTING) {
867			ASSERT(db->db.db_data == NULL);
868			mutex_exit(&db->db_mtx);
869			continue;
870		}
871		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
872			/* will be handled in dbuf_read_done or dbuf_rele */
873			db->db_freed_in_flight = TRUE;
874			mutex_exit(&db->db_mtx);
875			continue;
876		}
877		if (refcount_count(&db->db_holds) == 0) {
878			ASSERT(db->db_buf);
879			dbuf_clear(db);
880			continue;
881		}
882		/* The dbuf is referenced */
883
884		if (db->db_last_dirty != NULL) {
885			dbuf_dirty_record_t *dr = db->db_last_dirty;
886
887			if (dr->dr_txg == txg) {
888				/*
889				 * This buffer is "in-use", re-adjust the file
890				 * size to reflect that this buffer may
891				 * contain new data when we sync.
892				 */
893				if (db->db_blkid != DMU_SPILL_BLKID &&
894				    db->db_blkid > dn->dn_maxblkid)
895					dn->dn_maxblkid = db->db_blkid;
896				dbuf_unoverride(dr);
897			} else {
898				/*
899				 * This dbuf is not dirty in the open context.
900				 * Either uncache it (if its not referenced in
901				 * the open context) or reset its contents to
902				 * empty.
903				 */
904				dbuf_fix_old_data(db, txg);
905			}
906		}
907		/* clear the contents if its cached */
908		if (db->db_state == DB_CACHED) {
909			ASSERT(db->db.db_data != NULL);
910			arc_release(db->db_buf, db);
911			bzero(db->db.db_data, db->db.db_size);
912			arc_buf_freeze(db->db_buf);
913		}
914
915		mutex_exit(&db->db_mtx);
916	}
917	mutex_exit(&dn->dn_dbufs_mtx);
918}
919
920static int
921dbuf_block_freeable(dmu_buf_impl_t *db)
922{
923	dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
924	uint64_t birth_txg = 0;
925
926	/*
927	 * We don't need any locking to protect db_blkptr:
928	 * If it's syncing, then db_last_dirty will be set
929	 * so we'll ignore db_blkptr.
930	 *
931	 * This logic ensures that only block births for
932	 * filled blocks are considered.
933	 */
934	ASSERT(MUTEX_HELD(&db->db_mtx));
935	if (db->db_last_dirty && (db->db_blkptr == NULL ||
936	    !BP_IS_HOLE(db->db_blkptr))) {
937		birth_txg = db->db_last_dirty->dr_txg;
938	} else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
939		birth_txg = db->db_blkptr->blk_birth;
940	}
941
942	/*
943	 * If this block don't exist or is in a snapshot, it can't be freed.
944	 * Don't pass the bp to dsl_dataset_block_freeable() since we
945	 * are holding the db_mtx lock and might deadlock if we are
946	 * prefetching a dedup-ed block.
947	 */
948	if (birth_txg != 0)
949		return (ds == NULL ||
950		    dsl_dataset_block_freeable(ds, NULL, birth_txg));
951	else
952		return (B_FALSE);
953}
954
955void
956dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
957{
958	arc_buf_t *buf, *obuf;
959	int osize = db->db.db_size;
960	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
961	dnode_t *dn;
962
963	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
964
965	DB_DNODE_ENTER(db);
966	dn = DB_DNODE(db);
967
968	/* XXX does *this* func really need the lock? */
969	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
970
971	/*
972	 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
973	 * is OK, because there can be no other references to the db
974	 * when we are changing its size, so no concurrent DB_FILL can
975	 * be happening.
976	 */
977	/*
978	 * XXX we should be doing a dbuf_read, checking the return
979	 * value and returning that up to our callers
980	 */
981	dmu_buf_will_dirty(&db->db, tx);
982
983	/* create the data buffer for the new block */
984	buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
985
986	/* copy old block data to the new block */
987	obuf = db->db_buf;
988	bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
989	/* zero the remainder */
990	if (size > osize)
991		bzero((uint8_t *)buf->b_data + osize, size - osize);
992
993	mutex_enter(&db->db_mtx);
994	dbuf_set_data(db, buf);
995	VERIFY(arc_buf_remove_ref(obuf, db));
996	db->db.db_size = size;
997
998	if (db->db_level == 0) {
999		ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1000		db->db_last_dirty->dt.dl.dr_data = buf;
1001	}
1002	mutex_exit(&db->db_mtx);
1003
1004	dnode_willuse_space(dn, size-osize, tx);
1005	DB_DNODE_EXIT(db);
1006}
1007
1008void
1009dbuf_release_bp(dmu_buf_impl_t *db)
1010{
1011	objset_t *os = db->db_objset;
1012
1013	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1014	ASSERT(arc_released(os->os_phys_buf) ||
1015	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1016	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1017
1018	(void) arc_release(db->db_buf, db);
1019}
1020
1021dbuf_dirty_record_t *
1022dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1023{
1024	dnode_t *dn;
1025	objset_t *os;
1026	dbuf_dirty_record_t **drp, *dr;
1027	int drop_struct_lock = FALSE;
1028	boolean_t do_free_accounting = B_FALSE;
1029	int txgoff = tx->tx_txg & TXG_MASK;
1030
1031	ASSERT(tx->tx_txg != 0);
1032	ASSERT(!refcount_is_zero(&db->db_holds));
1033	DMU_TX_DIRTY_BUF(tx, db);
1034
1035	DB_DNODE_ENTER(db);
1036	dn = DB_DNODE(db);
1037	/*
1038	 * Shouldn't dirty a regular buffer in syncing context.  Private
1039	 * objects may be dirtied in syncing context, but only if they
1040	 * were already pre-dirtied in open context.
1041	 */
1042	ASSERT(!dmu_tx_is_syncing(tx) ||
1043	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1044	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1045	    dn->dn_objset->os_dsl_dataset == NULL);
1046	/*
1047	 * We make this assert for private objects as well, but after we
1048	 * check if we're already dirty.  They are allowed to re-dirty
1049	 * in syncing context.
1050	 */
1051	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1052	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1053	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1054
1055	mutex_enter(&db->db_mtx);
1056	/*
1057	 * XXX make this true for indirects too?  The problem is that
1058	 * transactions created with dmu_tx_create_assigned() from
1059	 * syncing context don't bother holding ahead.
1060	 */
1061	ASSERT(db->db_level != 0 ||
1062	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1063	    db->db_state == DB_NOFILL);
1064
1065	mutex_enter(&dn->dn_mtx);
1066	/*
1067	 * Don't set dirtyctx to SYNC if we're just modifying this as we
1068	 * initialize the objset.
1069	 */
1070	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1071	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1072		dn->dn_dirtyctx =
1073		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1074		ASSERT(dn->dn_dirtyctx_firstset == NULL);
1075		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1076	}
1077	mutex_exit(&dn->dn_mtx);
1078
1079	if (db->db_blkid == DMU_SPILL_BLKID)
1080		dn->dn_have_spill = B_TRUE;
1081
1082	/*
1083	 * If this buffer is already dirty, we're done.
1084	 */
1085	drp = &db->db_last_dirty;
1086	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1087	    db->db.db_object == DMU_META_DNODE_OBJECT);
1088	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1089		drp = &dr->dr_next;
1090	if (dr && dr->dr_txg == tx->tx_txg) {
1091		DB_DNODE_EXIT(db);
1092
1093		if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1094			/*
1095			 * If this buffer has already been written out,
1096			 * we now need to reset its state.
1097			 */
1098			dbuf_unoverride(dr);
1099			if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1100			    db->db_state != DB_NOFILL)
1101				arc_buf_thaw(db->db_buf);
1102		}
1103		mutex_exit(&db->db_mtx);
1104		return (dr);
1105	}
1106
1107	/*
1108	 * Only valid if not already dirty.
1109	 */
1110	ASSERT(dn->dn_object == 0 ||
1111	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1112	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1113
1114	ASSERT3U(dn->dn_nlevels, >, db->db_level);
1115	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1116	    dn->dn_phys->dn_nlevels > db->db_level ||
1117	    dn->dn_next_nlevels[txgoff] > db->db_level ||
1118	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1119	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1120
1121	/*
1122	 * We should only be dirtying in syncing context if it's the
1123	 * mos or we're initializing the os or it's a special object.
1124	 * However, we are allowed to dirty in syncing context provided
1125	 * we already dirtied it in open context.  Hence we must make
1126	 * this assertion only if we're not already dirty.
1127	 */
1128	os = dn->dn_objset;
1129	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1130	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1131	ASSERT(db->db.db_size != 0);
1132
1133	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1134
1135	if (db->db_blkid != DMU_BONUS_BLKID) {
1136		/*
1137		 * Update the accounting.
1138		 * Note: we delay "free accounting" until after we drop
1139		 * the db_mtx.  This keeps us from grabbing other locks
1140		 * (and possibly deadlocking) in bp_get_dsize() while
1141		 * also holding the db_mtx.
1142		 */
1143		dnode_willuse_space(dn, db->db.db_size, tx);
1144		do_free_accounting = dbuf_block_freeable(db);
1145	}
1146
1147	/*
1148	 * If this buffer is dirty in an old transaction group we need
1149	 * to make a copy of it so that the changes we make in this
1150	 * transaction group won't leak out when we sync the older txg.
1151	 */
1152	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1153	if (db->db_level == 0) {
1154		void *data_old = db->db_buf;
1155
1156		if (db->db_state != DB_NOFILL) {
1157			if (db->db_blkid == DMU_BONUS_BLKID) {
1158				dbuf_fix_old_data(db, tx->tx_txg);
1159				data_old = db->db.db_data;
1160			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1161				/*
1162				 * Release the data buffer from the cache so
1163				 * that we can modify it without impacting
1164				 * possible other users of this cached data
1165				 * block.  Note that indirect blocks and
1166				 * private objects are not released until the
1167				 * syncing state (since they are only modified
1168				 * then).
1169				 */
1170				arc_release(db->db_buf, db);
1171				dbuf_fix_old_data(db, tx->tx_txg);
1172				data_old = db->db_buf;
1173			}
1174			ASSERT(data_old != NULL);
1175		}
1176		dr->dt.dl.dr_data = data_old;
1177	} else {
1178		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1179		list_create(&dr->dt.di.dr_children,
1180		    sizeof (dbuf_dirty_record_t),
1181		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
1182	}
1183	if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1184		dr->dr_accounted = db->db.db_size;
1185	dr->dr_dbuf = db;
1186	dr->dr_txg = tx->tx_txg;
1187	dr->dr_next = *drp;
1188	*drp = dr;
1189
1190	/*
1191	 * We could have been freed_in_flight between the dbuf_noread
1192	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
1193	 * happened after the free.
1194	 */
1195	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1196	    db->db_blkid != DMU_SPILL_BLKID) {
1197		mutex_enter(&dn->dn_mtx);
1198		if (dn->dn_free_ranges[txgoff] != NULL) {
1199			range_tree_clear(dn->dn_free_ranges[txgoff],
1200			    db->db_blkid, 1);
1201		}
1202		mutex_exit(&dn->dn_mtx);
1203		db->db_freed_in_flight = FALSE;
1204	}
1205
1206	/*
1207	 * This buffer is now part of this txg
1208	 */
1209	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1210	db->db_dirtycnt += 1;
1211	ASSERT3U(db->db_dirtycnt, <=, 3);
1212
1213	mutex_exit(&db->db_mtx);
1214
1215	if (db->db_blkid == DMU_BONUS_BLKID ||
1216	    db->db_blkid == DMU_SPILL_BLKID) {
1217		mutex_enter(&dn->dn_mtx);
1218		ASSERT(!list_link_active(&dr->dr_dirty_node));
1219		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1220		mutex_exit(&dn->dn_mtx);
1221		dnode_setdirty(dn, tx);
1222		DB_DNODE_EXIT(db);
1223		return (dr);
1224	} else if (do_free_accounting) {
1225		blkptr_t *bp = db->db_blkptr;
1226		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1227		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1228		/*
1229		 * This is only a guess -- if the dbuf is dirty
1230		 * in a previous txg, we don't know how much
1231		 * space it will use on disk yet.  We should
1232		 * really have the struct_rwlock to access
1233		 * db_blkptr, but since this is just a guess,
1234		 * it's OK if we get an odd answer.
1235		 */
1236		ddt_prefetch(os->os_spa, bp);
1237		dnode_willuse_space(dn, -willfree, tx);
1238	}
1239
1240	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1241		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1242		drop_struct_lock = TRUE;
1243	}
1244
1245	if (db->db_level == 0) {
1246		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1247		ASSERT(dn->dn_maxblkid >= db->db_blkid);
1248	}
1249
1250	if (db->db_level+1 < dn->dn_nlevels) {
1251		dmu_buf_impl_t *parent = db->db_parent;
1252		dbuf_dirty_record_t *di;
1253		int parent_held = FALSE;
1254
1255		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1256			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1257
1258			parent = dbuf_hold_level(dn, db->db_level+1,
1259			    db->db_blkid >> epbs, FTAG);
1260			ASSERT(parent != NULL);
1261			parent_held = TRUE;
1262		}
1263		if (drop_struct_lock)
1264			rw_exit(&dn->dn_struct_rwlock);
1265		ASSERT3U(db->db_level+1, ==, parent->db_level);
1266		di = dbuf_dirty(parent, tx);
1267		if (parent_held)
1268			dbuf_rele(parent, FTAG);
1269
1270		mutex_enter(&db->db_mtx);
1271		/*
1272		 * Since we've dropped the mutex, it's possible that
1273		 * dbuf_undirty() might have changed this out from under us.
1274		 */
1275		if (db->db_last_dirty == dr ||
1276		    dn->dn_object == DMU_META_DNODE_OBJECT) {
1277			mutex_enter(&di->dt.di.dr_mtx);
1278			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1279			ASSERT(!list_link_active(&dr->dr_dirty_node));
1280			list_insert_tail(&di->dt.di.dr_children, dr);
1281			mutex_exit(&di->dt.di.dr_mtx);
1282			dr->dr_parent = di;
1283		}
1284		mutex_exit(&db->db_mtx);
1285	} else {
1286		ASSERT(db->db_level+1 == dn->dn_nlevels);
1287		ASSERT(db->db_blkid < dn->dn_nblkptr);
1288		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1289		mutex_enter(&dn->dn_mtx);
1290		ASSERT(!list_link_active(&dr->dr_dirty_node));
1291		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1292		mutex_exit(&dn->dn_mtx);
1293		if (drop_struct_lock)
1294			rw_exit(&dn->dn_struct_rwlock);
1295	}
1296
1297	dnode_setdirty(dn, tx);
1298	DB_DNODE_EXIT(db);
1299	return (dr);
1300}
1301
1302/*
1303 * Undirty a buffer in the transaction group referenced by the given
1304 * transaction.  Return whether this evicted the dbuf.
1305 */
1306static boolean_t
1307dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1308{
1309	dnode_t *dn;
1310	uint64_t txg = tx->tx_txg;
1311	dbuf_dirty_record_t *dr, **drp;
1312
1313	ASSERT(txg != 0);
1314	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1315	ASSERT0(db->db_level);
1316	ASSERT(MUTEX_HELD(&db->db_mtx));
1317
1318	/*
1319	 * If this buffer is not dirty, we're done.
1320	 */
1321	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1322		if (dr->dr_txg <= txg)
1323			break;
1324	if (dr == NULL || dr->dr_txg < txg)
1325		return (B_FALSE);
1326	ASSERT(dr->dr_txg == txg);
1327	ASSERT(dr->dr_dbuf == db);
1328
1329	DB_DNODE_ENTER(db);
1330	dn = DB_DNODE(db);
1331
1332	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1333
1334	ASSERT(db->db.db_size != 0);
1335
1336	/*
1337	 * Any space we accounted for in dp_dirty_* will be cleaned up by
1338	 * dsl_pool_sync().  This is relatively rare so the discrepancy
1339	 * is not a big deal.
1340	 */
1341
1342	*drp = dr->dr_next;
1343
1344	/*
1345	 * Note that there are three places in dbuf_dirty()
1346	 * where this dirty record may be put on a list.
1347	 * Make sure to do a list_remove corresponding to
1348	 * every one of those list_insert calls.
1349	 */
1350	if (dr->dr_parent) {
1351		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1352		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1353		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1354	} else if (db->db_blkid == DMU_SPILL_BLKID ||
1355	    db->db_level+1 == dn->dn_nlevels) {
1356		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1357		mutex_enter(&dn->dn_mtx);
1358		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1359		mutex_exit(&dn->dn_mtx);
1360	}
1361	DB_DNODE_EXIT(db);
1362
1363	if (db->db_state != DB_NOFILL) {
1364		dbuf_unoverride(dr);
1365
1366		ASSERT(db->db_buf != NULL);
1367		ASSERT(dr->dt.dl.dr_data != NULL);
1368		if (dr->dt.dl.dr_data != db->db_buf)
1369			VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1370	}
1371
1372	if (db->db_level != 0) {
1373		mutex_destroy(&dr->dt.di.dr_mtx);
1374		list_destroy(&dr->dt.di.dr_children);
1375	}
1376
1377	kmem_free(dr, sizeof (dbuf_dirty_record_t));
1378
1379	ASSERT(db->db_dirtycnt > 0);
1380	db->db_dirtycnt -= 1;
1381
1382	if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1383		arc_buf_t *buf = db->db_buf;
1384
1385		ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1386		dbuf_set_data(db, NULL);
1387		VERIFY(arc_buf_remove_ref(buf, db));
1388		dbuf_evict(db);
1389		return (B_TRUE);
1390	}
1391
1392	return (B_FALSE);
1393}
1394
1395void
1396dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1397{
1398	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1399	int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1400
1401	ASSERT(tx->tx_txg != 0);
1402	ASSERT(!refcount_is_zero(&db->db_holds));
1403
1404	DB_DNODE_ENTER(db);
1405	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1406		rf |= DB_RF_HAVESTRUCT;
1407	DB_DNODE_EXIT(db);
1408	(void) dbuf_read(db, NULL, rf);
1409	(void) dbuf_dirty(db, tx);
1410}
1411
1412void
1413dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1414{
1415	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1416
1417	db->db_state = DB_NOFILL;
1418
1419	dmu_buf_will_fill(db_fake, tx);
1420}
1421
1422void
1423dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1424{
1425	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1426
1427	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1428	ASSERT(tx->tx_txg != 0);
1429	ASSERT(db->db_level == 0);
1430	ASSERT(!refcount_is_zero(&db->db_holds));
1431
1432	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1433	    dmu_tx_private_ok(tx));
1434
1435	dbuf_noread(db);
1436	(void) dbuf_dirty(db, tx);
1437}
1438
1439#pragma weak dmu_buf_fill_done = dbuf_fill_done
1440/* ARGSUSED */
1441void
1442dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1443{
1444	mutex_enter(&db->db_mtx);
1445	DBUF_VERIFY(db);
1446
1447	if (db->db_state == DB_FILL) {
1448		if (db->db_level == 0 && db->db_freed_in_flight) {
1449			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1450			/* we were freed while filling */
1451			/* XXX dbuf_undirty? */
1452			bzero(db->db.db_data, db->db.db_size);
1453			db->db_freed_in_flight = FALSE;
1454		}
1455		db->db_state = DB_CACHED;
1456		cv_broadcast(&db->db_changed);
1457	}
1458	mutex_exit(&db->db_mtx);
1459}
1460
1461void
1462dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1463    bp_embedded_type_t etype, enum zio_compress comp,
1464    int uncompressed_size, int compressed_size, int byteorder,
1465    dmu_tx_t *tx)
1466{
1467	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1468	struct dirty_leaf *dl;
1469	dmu_object_type_t type;
1470
1471	DB_DNODE_ENTER(db);
1472	type = DB_DNODE(db)->dn_type;
1473	DB_DNODE_EXIT(db);
1474
1475	ASSERT0(db->db_level);
1476	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1477
1478	dmu_buf_will_not_fill(dbuf, tx);
1479
1480	ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1481	dl = &db->db_last_dirty->dt.dl;
1482	encode_embedded_bp_compressed(&dl->dr_overridden_by,
1483	    data, comp, uncompressed_size, compressed_size);
1484	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1485	BP_SET_TYPE(&dl->dr_overridden_by, type);
1486	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1487	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1488
1489	dl->dr_override_state = DR_OVERRIDDEN;
1490	dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1491}
1492
1493/*
1494 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1495 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1496 */
1497void
1498dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1499{
1500	ASSERT(!refcount_is_zero(&db->db_holds));
1501	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1502	ASSERT(db->db_level == 0);
1503	ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1504	ASSERT(buf != NULL);
1505	ASSERT(arc_buf_size(buf) == db->db.db_size);
1506	ASSERT(tx->tx_txg != 0);
1507
1508	arc_return_buf(buf, db);
1509	ASSERT(arc_released(buf));
1510
1511	mutex_enter(&db->db_mtx);
1512
1513	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1514		cv_wait(&db->db_changed, &db->db_mtx);
1515
1516	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1517
1518	if (db->db_state == DB_CACHED &&
1519	    refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1520		mutex_exit(&db->db_mtx);
1521		(void) dbuf_dirty(db, tx);
1522		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1523		VERIFY(arc_buf_remove_ref(buf, db));
1524		xuio_stat_wbuf_copied();
1525		return;
1526	}
1527
1528	xuio_stat_wbuf_nocopy();
1529	if (db->db_state == DB_CACHED) {
1530		dbuf_dirty_record_t *dr = db->db_last_dirty;
1531
1532		ASSERT(db->db_buf != NULL);
1533		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1534			ASSERT(dr->dt.dl.dr_data == db->db_buf);
1535			if (!arc_released(db->db_buf)) {
1536				ASSERT(dr->dt.dl.dr_override_state ==
1537				    DR_OVERRIDDEN);
1538				arc_release(db->db_buf, db);
1539			}
1540			dr->dt.dl.dr_data = buf;
1541			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1542		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1543			arc_release(db->db_buf, db);
1544			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1545		}
1546		db->db_buf = NULL;
1547	}
1548	ASSERT(db->db_buf == NULL);
1549	dbuf_set_data(db, buf);
1550	db->db_state = DB_FILL;
1551	mutex_exit(&db->db_mtx);
1552	(void) dbuf_dirty(db, tx);
1553	dmu_buf_fill_done(&db->db, tx);
1554}
1555
1556/*
1557 * "Clear" the contents of this dbuf.  This will mark the dbuf
1558 * EVICTING and clear *most* of its references.  Unfortunately,
1559 * when we are not holding the dn_dbufs_mtx, we can't clear the
1560 * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1561 * in this case.  For callers from the DMU we will usually see:
1562 *	dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
1563 * For the arc callback, we will usually see:
1564 *	dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1565 * Sometimes, though, we will get a mix of these two:
1566 *	DMU: dbuf_clear()->arc_clear_callback()
1567 *	ARC: dbuf_do_evict()->dbuf_destroy()
1568 *
1569 * This routine will dissociate the dbuf from the arc, by calling
1570 * arc_clear_callback(), but will not evict the data from the ARC.
1571 */
1572void
1573dbuf_clear(dmu_buf_impl_t *db)
1574{
1575	dnode_t *dn;
1576	dmu_buf_impl_t *parent = db->db_parent;
1577	dmu_buf_impl_t *dndb;
1578	boolean_t dbuf_gone = B_FALSE;
1579
1580	ASSERT(MUTEX_HELD(&db->db_mtx));
1581	ASSERT(refcount_is_zero(&db->db_holds));
1582
1583	dbuf_evict_user(db);
1584
1585	if (db->db_state == DB_CACHED) {
1586		ASSERT(db->db.db_data != NULL);
1587		if (db->db_blkid == DMU_BONUS_BLKID) {
1588			zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1589			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1590		}
1591		db->db.db_data = NULL;
1592		db->db_state = DB_UNCACHED;
1593	}
1594
1595	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1596	ASSERT(db->db_data_pending == NULL);
1597
1598	db->db_state = DB_EVICTING;
1599	db->db_blkptr = NULL;
1600
1601	DB_DNODE_ENTER(db);
1602	dn = DB_DNODE(db);
1603	dndb = dn->dn_dbuf;
1604	if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1605		avl_remove(&dn->dn_dbufs, db);
1606		(void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1607		membar_producer();
1608		DB_DNODE_EXIT(db);
1609		/*
1610		 * Decrementing the dbuf count means that the hold corresponding
1611		 * to the removed dbuf is no longer discounted in dnode_move(),
1612		 * so the dnode cannot be moved until after we release the hold.
1613		 * The membar_producer() ensures visibility of the decremented
1614		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1615		 * release any lock.
1616		 */
1617		dnode_rele(dn, db);
1618		db->db_dnode_handle = NULL;
1619	} else {
1620		DB_DNODE_EXIT(db);
1621	}
1622
1623	if (db->db_buf)
1624		dbuf_gone = arc_clear_callback(db->db_buf);
1625
1626	if (!dbuf_gone)
1627		mutex_exit(&db->db_mtx);
1628
1629	/*
1630	 * If this dbuf is referenced from an indirect dbuf,
1631	 * decrement the ref count on the indirect dbuf.
1632	 */
1633	if (parent && parent != dndb)
1634		dbuf_rele(parent, db);
1635}
1636
1637static int
1638dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1639    dmu_buf_impl_t **parentp, blkptr_t **bpp)
1640{
1641	int nlevels, epbs;
1642
1643	*parentp = NULL;
1644	*bpp = NULL;
1645
1646	ASSERT(blkid != DMU_BONUS_BLKID);
1647
1648	if (blkid == DMU_SPILL_BLKID) {
1649		mutex_enter(&dn->dn_mtx);
1650		if (dn->dn_have_spill &&
1651		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1652			*bpp = &dn->dn_phys->dn_spill;
1653		else
1654			*bpp = NULL;
1655		dbuf_add_ref(dn->dn_dbuf, NULL);
1656		*parentp = dn->dn_dbuf;
1657		mutex_exit(&dn->dn_mtx);
1658		return (0);
1659	}
1660
1661	if (dn->dn_phys->dn_nlevels == 0)
1662		nlevels = 1;
1663	else
1664		nlevels = dn->dn_phys->dn_nlevels;
1665
1666	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1667
1668	ASSERT3U(level * epbs, <, 64);
1669	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1670	if (level >= nlevels ||
1671	    (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1672		/* the buffer has no parent yet */
1673		return (SET_ERROR(ENOENT));
1674	} else if (level < nlevels-1) {
1675		/* this block is referenced from an indirect block */
1676		int err = dbuf_hold_impl(dn, level+1,
1677		    blkid >> epbs, fail_sparse, NULL, parentp);
1678		if (err)
1679			return (err);
1680		err = dbuf_read(*parentp, NULL,
1681		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1682		if (err) {
1683			dbuf_rele(*parentp, NULL);
1684			*parentp = NULL;
1685			return (err);
1686		}
1687		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1688		    (blkid & ((1ULL << epbs) - 1));
1689		return (0);
1690	} else {
1691		/* the block is referenced from the dnode */
1692		ASSERT3U(level, ==, nlevels-1);
1693		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1694		    blkid < dn->dn_phys->dn_nblkptr);
1695		if (dn->dn_dbuf) {
1696			dbuf_add_ref(dn->dn_dbuf, NULL);
1697			*parentp = dn->dn_dbuf;
1698		}
1699		*bpp = &dn->dn_phys->dn_blkptr[blkid];
1700		return (0);
1701	}
1702}
1703
1704static dmu_buf_impl_t *
1705dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1706    dmu_buf_impl_t *parent, blkptr_t *blkptr)
1707{
1708	objset_t *os = dn->dn_objset;
1709	dmu_buf_impl_t *db, *odb;
1710
1711	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1712	ASSERT(dn->dn_type != DMU_OT_NONE);
1713
1714	db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1715
1716	db->db_objset = os;
1717	db->db.db_object = dn->dn_object;
1718	db->db_level = level;
1719	db->db_blkid = blkid;
1720	db->db_last_dirty = NULL;
1721	db->db_dirtycnt = 0;
1722	db->db_dnode_handle = dn->dn_handle;
1723	db->db_parent = parent;
1724	db->db_blkptr = blkptr;
1725
1726	db->db_user_ptr = NULL;
1727	db->db_user_data_ptr_ptr = NULL;
1728	db->db_evict_func = NULL;
1729	db->db_immediate_evict = 0;
1730	db->db_freed_in_flight = 0;
1731
1732	if (blkid == DMU_BONUS_BLKID) {
1733		ASSERT3P(parent, ==, dn->dn_dbuf);
1734		db->db.db_size = DN_MAX_BONUSLEN -
1735		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1736		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1737		db->db.db_offset = DMU_BONUS_BLKID;
1738		db->db_state = DB_UNCACHED;
1739		/* the bonus dbuf is not placed in the hash table */
1740		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1741		return (db);
1742	} else if (blkid == DMU_SPILL_BLKID) {
1743		db->db.db_size = (blkptr != NULL) ?
1744		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1745		db->db.db_offset = 0;
1746	} else {
1747		int blocksize =
1748		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1749		db->db.db_size = blocksize;
1750		db->db.db_offset = db->db_blkid * blocksize;
1751	}
1752
1753	/*
1754	 * Hold the dn_dbufs_mtx while we get the new dbuf
1755	 * in the hash table *and* added to the dbufs list.
1756	 * This prevents a possible deadlock with someone
1757	 * trying to look up this dbuf before its added to the
1758	 * dn_dbufs list.
1759	 */
1760	mutex_enter(&dn->dn_dbufs_mtx);
1761	db->db_state = DB_EVICTING;
1762	if ((odb = dbuf_hash_insert(db)) != NULL) {
1763		/* someone else inserted it first */
1764		kmem_cache_free(dbuf_cache, db);
1765		mutex_exit(&dn->dn_dbufs_mtx);
1766		return (odb);
1767	}
1768	avl_add(&dn->dn_dbufs, db);
1769	if (db->db_level == 0 && db->db_blkid >=
1770	    dn->dn_unlisted_l0_blkid)
1771		dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1772	db->db_state = DB_UNCACHED;
1773	mutex_exit(&dn->dn_dbufs_mtx);
1774	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1775
1776	if (parent && parent != dn->dn_dbuf)
1777		dbuf_add_ref(parent, db);
1778
1779	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1780	    refcount_count(&dn->dn_holds) > 0);
1781	(void) refcount_add(&dn->dn_holds, db);
1782	(void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1783
1784	dprintf_dbuf(db, "db=%p\n", db);
1785
1786	return (db);
1787}
1788
1789static int
1790dbuf_do_evict(void *private)
1791{
1792	dmu_buf_impl_t *db = private;
1793
1794	if (!MUTEX_HELD(&db->db_mtx))
1795		mutex_enter(&db->db_mtx);
1796
1797	ASSERT(refcount_is_zero(&db->db_holds));
1798
1799	if (db->db_state != DB_EVICTING) {
1800		ASSERT(db->db_state == DB_CACHED);
1801		DBUF_VERIFY(db);
1802		db->db_buf = NULL;
1803		dbuf_evict(db);
1804	} else {
1805		mutex_exit(&db->db_mtx);
1806		dbuf_destroy(db);
1807	}
1808	return (0);
1809}
1810
1811static void
1812dbuf_destroy(dmu_buf_impl_t *db)
1813{
1814	ASSERT(refcount_is_zero(&db->db_holds));
1815
1816	if (db->db_blkid != DMU_BONUS_BLKID) {
1817		/*
1818		 * If this dbuf is still on the dn_dbufs list,
1819		 * remove it from that list.
1820		 */
1821		if (db->db_dnode_handle != NULL) {
1822			dnode_t *dn;
1823
1824			DB_DNODE_ENTER(db);
1825			dn = DB_DNODE(db);
1826			mutex_enter(&dn->dn_dbufs_mtx);
1827			avl_remove(&dn->dn_dbufs, db);
1828			(void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1829			mutex_exit(&dn->dn_dbufs_mtx);
1830			DB_DNODE_EXIT(db);
1831			/*
1832			 * Decrementing the dbuf count means that the hold
1833			 * corresponding to the removed dbuf is no longer
1834			 * discounted in dnode_move(), so the dnode cannot be
1835			 * moved until after we release the hold.
1836			 */
1837			dnode_rele(dn, db);
1838			db->db_dnode_handle = NULL;
1839		}
1840		dbuf_hash_remove(db);
1841	}
1842	db->db_parent = NULL;
1843	db->db_buf = NULL;
1844
1845	ASSERT(db->db.db_data == NULL);
1846	ASSERT(db->db_hash_next == NULL);
1847	ASSERT(db->db_blkptr == NULL);
1848	ASSERT(db->db_data_pending == NULL);
1849
1850	kmem_cache_free(dbuf_cache, db);
1851	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1852}
1853
1854void
1855dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
1856{
1857	dmu_buf_impl_t *db = NULL;
1858	blkptr_t *bp = NULL;
1859
1860	ASSERT(blkid != DMU_BONUS_BLKID);
1861	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1862
1863	if (dnode_block_freed(dn, blkid))
1864		return;
1865
1866	/* dbuf_find() returns with db_mtx held */
1867	if (db = dbuf_find(dn, 0, blkid)) {
1868		/*
1869		 * This dbuf is already in the cache.  We assume that
1870		 * it is already CACHED, or else about to be either
1871		 * read or filled.
1872		 */
1873		mutex_exit(&db->db_mtx);
1874		return;
1875	}
1876
1877	if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1878		if (bp && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
1879			dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1880			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1881			zbookmark_phys_t zb;
1882
1883			SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1884			    dn->dn_object, 0, blkid);
1885
1886			(void) arc_read(NULL, dn->dn_objset->os_spa,
1887			    bp, NULL, NULL, prio,
1888			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1889			    &aflags, &zb);
1890		}
1891		if (db)
1892			dbuf_rele(db, NULL);
1893	}
1894}
1895
1896/*
1897 * Returns with db_holds incremented, and db_mtx not held.
1898 * Note: dn_struct_rwlock must be held.
1899 */
1900int
1901dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1902    void *tag, dmu_buf_impl_t **dbp)
1903{
1904	dmu_buf_impl_t *db, *parent = NULL;
1905
1906	ASSERT(blkid != DMU_BONUS_BLKID);
1907	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1908	ASSERT3U(dn->dn_nlevels, >, level);
1909
1910	*dbp = NULL;
1911top:
1912	/* dbuf_find() returns with db_mtx held */
1913	db = dbuf_find(dn, level, blkid);
1914
1915	if (db == NULL) {
1916		blkptr_t *bp = NULL;
1917		int err;
1918
1919		ASSERT3P(parent, ==, NULL);
1920		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1921		if (fail_sparse) {
1922			if (err == 0 && bp && BP_IS_HOLE(bp))
1923				err = SET_ERROR(ENOENT);
1924			if (err) {
1925				if (parent)
1926					dbuf_rele(parent, NULL);
1927				return (err);
1928			}
1929		}
1930		if (err && err != ENOENT)
1931			return (err);
1932		db = dbuf_create(dn, level, blkid, parent, bp);
1933	}
1934
1935	if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1936		arc_buf_add_ref(db->db_buf, db);
1937		if (db->db_buf->b_data == NULL) {
1938			dbuf_clear(db);
1939			if (parent) {
1940				dbuf_rele(parent, NULL);
1941				parent = NULL;
1942			}
1943			goto top;
1944		}
1945		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1946	}
1947
1948	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1949
1950	/*
1951	 * If this buffer is currently syncing out, and we are are
1952	 * still referencing it from db_data, we need to make a copy
1953	 * of it in case we decide we want to dirty it again in this txg.
1954	 */
1955	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1956	    dn->dn_object != DMU_META_DNODE_OBJECT &&
1957	    db->db_state == DB_CACHED && db->db_data_pending) {
1958		dbuf_dirty_record_t *dr = db->db_data_pending;
1959
1960		if (dr->dt.dl.dr_data == db->db_buf) {
1961			arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1962
1963			dbuf_set_data(db,
1964			    arc_buf_alloc(dn->dn_objset->os_spa,
1965			    db->db.db_size, db, type));
1966			bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1967			    db->db.db_size);
1968		}
1969	}
1970
1971	(void) refcount_add(&db->db_holds, tag);
1972	dbuf_update_data(db);
1973	DBUF_VERIFY(db);
1974	mutex_exit(&db->db_mtx);
1975
1976	/* NOTE: we can't rele the parent until after we drop the db_mtx */
1977	if (parent)
1978		dbuf_rele(parent, NULL);
1979
1980	ASSERT3P(DB_DNODE(db), ==, dn);
1981	ASSERT3U(db->db_blkid, ==, blkid);
1982	ASSERT3U(db->db_level, ==, level);
1983	*dbp = db;
1984
1985	return (0);
1986}
1987
1988dmu_buf_impl_t *
1989dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1990{
1991	dmu_buf_impl_t *db;
1992	int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1993	return (err ? NULL : db);
1994}
1995
1996dmu_buf_impl_t *
1997dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1998{
1999	dmu_buf_impl_t *db;
2000	int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2001	return (err ? NULL : db);
2002}
2003
2004void
2005dbuf_create_bonus(dnode_t *dn)
2006{
2007	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2008
2009	ASSERT(dn->dn_bonus == NULL);
2010	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2011}
2012
2013int
2014dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2015{
2016	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2017	dnode_t *dn;
2018
2019	if (db->db_blkid != DMU_SPILL_BLKID)
2020		return (SET_ERROR(ENOTSUP));
2021	if (blksz == 0)
2022		blksz = SPA_MINBLOCKSIZE;
2023	if (blksz > SPA_MAXBLOCKSIZE)
2024		blksz = SPA_MAXBLOCKSIZE;
2025	else
2026		blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2027
2028	DB_DNODE_ENTER(db);
2029	dn = DB_DNODE(db);
2030	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2031	dbuf_new_size(db, blksz, tx);
2032	rw_exit(&dn->dn_struct_rwlock);
2033	DB_DNODE_EXIT(db);
2034
2035	return (0);
2036}
2037
2038void
2039dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2040{
2041	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2042}
2043
2044#pragma weak dmu_buf_add_ref = dbuf_add_ref
2045void
2046dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2047{
2048	int64_t holds = refcount_add(&db->db_holds, tag);
2049	ASSERT(holds > 1);
2050}
2051
2052/*
2053 * If you call dbuf_rele() you had better not be referencing the dnode handle
2054 * unless you have some other direct or indirect hold on the dnode. (An indirect
2055 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2056 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2057 * dnode's parent dbuf evicting its dnode handles.
2058 */
2059void
2060dbuf_rele(dmu_buf_impl_t *db, void *tag)
2061{
2062	mutex_enter(&db->db_mtx);
2063	dbuf_rele_and_unlock(db, tag);
2064}
2065
2066void
2067dmu_buf_rele(dmu_buf_t *db, void *tag)
2068{
2069	dbuf_rele((dmu_buf_impl_t *)db, tag);
2070}
2071
2072/*
2073 * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2074 * db_dirtycnt and db_holds to be updated atomically.
2075 */
2076void
2077dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2078{
2079	int64_t holds;
2080
2081	ASSERT(MUTEX_HELD(&db->db_mtx));
2082	DBUF_VERIFY(db);
2083
2084	/*
2085	 * Remove the reference to the dbuf before removing its hold on the
2086	 * dnode so we can guarantee in dnode_move() that a referenced bonus
2087	 * buffer has a corresponding dnode hold.
2088	 */
2089	holds = refcount_remove(&db->db_holds, tag);
2090	ASSERT(holds >= 0);
2091
2092	/*
2093	 * We can't freeze indirects if there is a possibility that they
2094	 * may be modified in the current syncing context.
2095	 */
2096	if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2097		arc_buf_freeze(db->db_buf);
2098
2099	if (holds == db->db_dirtycnt &&
2100	    db->db_level == 0 && db->db_immediate_evict)
2101		dbuf_evict_user(db);
2102
2103	if (holds == 0) {
2104		if (db->db_blkid == DMU_BONUS_BLKID) {
2105			mutex_exit(&db->db_mtx);
2106
2107			/*
2108			 * If the dnode moves here, we cannot cross this barrier
2109			 * until the move completes.
2110			 */
2111			DB_DNODE_ENTER(db);
2112			(void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2113			DB_DNODE_EXIT(db);
2114			/*
2115			 * The bonus buffer's dnode hold is no longer discounted
2116			 * in dnode_move(). The dnode cannot move until after
2117			 * the dnode_rele().
2118			 */
2119			dnode_rele(DB_DNODE(db), db);
2120		} else if (db->db_buf == NULL) {
2121			/*
2122			 * This is a special case: we never associated this
2123			 * dbuf with any data allocated from the ARC.
2124			 */
2125			ASSERT(db->db_state == DB_UNCACHED ||
2126			    db->db_state == DB_NOFILL);
2127			dbuf_evict(db);
2128		} else if (arc_released(db->db_buf)) {
2129			arc_buf_t *buf = db->db_buf;
2130			/*
2131			 * This dbuf has anonymous data associated with it.
2132			 */
2133			dbuf_set_data(db, NULL);
2134			VERIFY(arc_buf_remove_ref(buf, db));
2135			dbuf_evict(db);
2136		} else {
2137			VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2138
2139			/*
2140			 * A dbuf will be eligible for eviction if either the
2141			 * 'primarycache' property is set or a duplicate
2142			 * copy of this buffer is already cached in the arc.
2143			 *
2144			 * In the case of the 'primarycache' a buffer
2145			 * is considered for eviction if it matches the
2146			 * criteria set in the property.
2147			 *
2148			 * To decide if our buffer is considered a
2149			 * duplicate, we must call into the arc to determine
2150			 * if multiple buffers are referencing the same
2151			 * block on-disk. If so, then we simply evict
2152			 * ourselves.
2153			 */
2154			if (!DBUF_IS_CACHEABLE(db)) {
2155				if (db->db_blkptr != NULL &&
2156				    !BP_IS_HOLE(db->db_blkptr) &&
2157				    !BP_IS_EMBEDDED(db->db_blkptr)) {
2158					spa_t *spa =
2159					    dmu_objset_spa(db->db_objset);
2160					blkptr_t bp = *db->db_blkptr;
2161					dbuf_clear(db);
2162					arc_freed(spa, &bp);
2163				} else {
2164					dbuf_clear(db);
2165				}
2166			} else if (arc_buf_eviction_needed(db->db_buf)) {
2167				dbuf_clear(db);
2168			} else {
2169				mutex_exit(&db->db_mtx);
2170			}
2171		}
2172	} else {
2173		mutex_exit(&db->db_mtx);
2174	}
2175}
2176
2177#pragma weak dmu_buf_refcount = dbuf_refcount
2178uint64_t
2179dbuf_refcount(dmu_buf_impl_t *db)
2180{
2181	return (refcount_count(&db->db_holds));
2182}
2183
2184void *
2185dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2186    dmu_buf_evict_func_t *evict_func)
2187{
2188	return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2189	    user_data_ptr_ptr, evict_func));
2190}
2191
2192void *
2193dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2194    dmu_buf_evict_func_t *evict_func)
2195{
2196	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2197
2198	db->db_immediate_evict = TRUE;
2199	return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2200	    user_data_ptr_ptr, evict_func));
2201}
2202
2203void *
2204dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2205    void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2206{
2207	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2208	ASSERT(db->db_level == 0);
2209
2210	ASSERT((user_ptr == NULL) == (evict_func == NULL));
2211
2212	mutex_enter(&db->db_mtx);
2213
2214	if (db->db_user_ptr == old_user_ptr) {
2215		db->db_user_ptr = user_ptr;
2216		db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2217		db->db_evict_func = evict_func;
2218
2219		dbuf_update_data(db);
2220	} else {
2221		old_user_ptr = db->db_user_ptr;
2222	}
2223
2224	mutex_exit(&db->db_mtx);
2225	return (old_user_ptr);
2226}
2227
2228void *
2229dmu_buf_get_user(dmu_buf_t *db_fake)
2230{
2231	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2232	ASSERT(!refcount_is_zero(&db->db_holds));
2233
2234	return (db->db_user_ptr);
2235}
2236
2237boolean_t
2238dmu_buf_freeable(dmu_buf_t *dbuf)
2239{
2240	boolean_t res = B_FALSE;
2241	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2242
2243	if (db->db_blkptr)
2244		res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2245		    db->db_blkptr, db->db_blkptr->blk_birth);
2246
2247	return (res);
2248}
2249
2250blkptr_t *
2251dmu_buf_get_blkptr(dmu_buf_t *db)
2252{
2253	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2254	return (dbi->db_blkptr);
2255}
2256
2257static void
2258dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2259{
2260	/* ASSERT(dmu_tx_is_syncing(tx) */
2261	ASSERT(MUTEX_HELD(&db->db_mtx));
2262
2263	if (db->db_blkptr != NULL)
2264		return;
2265
2266	if (db->db_blkid == DMU_SPILL_BLKID) {
2267		db->db_blkptr = &dn->dn_phys->dn_spill;
2268		BP_ZERO(db->db_blkptr);
2269		return;
2270	}
2271	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2272		/*
2273		 * This buffer was allocated at a time when there was
2274		 * no available blkptrs from the dnode, or it was
2275		 * inappropriate to hook it in (i.e., nlevels mis-match).
2276		 */
2277		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2278		ASSERT(db->db_parent == NULL);
2279		db->db_parent = dn->dn_dbuf;
2280		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2281		DBUF_VERIFY(db);
2282	} else {
2283		dmu_buf_impl_t *parent = db->db_parent;
2284		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2285
2286		ASSERT(dn->dn_phys->dn_nlevels > 1);
2287		if (parent == NULL) {
2288			mutex_exit(&db->db_mtx);
2289			rw_enter(&dn->dn_struct_rwlock, RW_READER);
2290			(void) dbuf_hold_impl(dn, db->db_level+1,
2291			    db->db_blkid >> epbs, FALSE, db, &parent);
2292			rw_exit(&dn->dn_struct_rwlock);
2293			mutex_enter(&db->db_mtx);
2294			db->db_parent = parent;
2295		}
2296		db->db_blkptr = (blkptr_t *)parent->db.db_data +
2297		    (db->db_blkid & ((1ULL << epbs) - 1));
2298		DBUF_VERIFY(db);
2299	}
2300}
2301
2302static void
2303dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2304{
2305	dmu_buf_impl_t *db = dr->dr_dbuf;
2306	dnode_t *dn;
2307	zio_t *zio;
2308
2309	ASSERT(dmu_tx_is_syncing(tx));
2310
2311	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2312
2313	mutex_enter(&db->db_mtx);
2314
2315	ASSERT(db->db_level > 0);
2316	DBUF_VERIFY(db);
2317
2318	/* Read the block if it hasn't been read yet. */
2319	if (db->db_buf == NULL) {
2320		mutex_exit(&db->db_mtx);
2321		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2322		mutex_enter(&db->db_mtx);
2323	}
2324	ASSERT3U(db->db_state, ==, DB_CACHED);
2325	ASSERT(db->db_buf != NULL);
2326
2327	DB_DNODE_ENTER(db);
2328	dn = DB_DNODE(db);
2329	/* Indirect block size must match what the dnode thinks it is. */
2330	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2331	dbuf_check_blkptr(dn, db);
2332	DB_DNODE_EXIT(db);
2333
2334	/* Provide the pending dirty record to child dbufs */
2335	db->db_data_pending = dr;
2336
2337	mutex_exit(&db->db_mtx);
2338	dbuf_write(dr, db->db_buf, tx);
2339
2340	zio = dr->dr_zio;
2341	mutex_enter(&dr->dt.di.dr_mtx);
2342	dbuf_sync_list(&dr->dt.di.dr_children, tx);
2343	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2344	mutex_exit(&dr->dt.di.dr_mtx);
2345	zio_nowait(zio);
2346}
2347
2348static void
2349dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2350{
2351	arc_buf_t **datap = &dr->dt.dl.dr_data;
2352	dmu_buf_impl_t *db = dr->dr_dbuf;
2353	dnode_t *dn;
2354	objset_t *os;
2355	uint64_t txg = tx->tx_txg;
2356
2357	ASSERT(dmu_tx_is_syncing(tx));
2358
2359	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2360
2361	mutex_enter(&db->db_mtx);
2362	/*
2363	 * To be synced, we must be dirtied.  But we
2364	 * might have been freed after the dirty.
2365	 */
2366	if (db->db_state == DB_UNCACHED) {
2367		/* This buffer has been freed since it was dirtied */
2368		ASSERT(db->db.db_data == NULL);
2369	} else if (db->db_state == DB_FILL) {
2370		/* This buffer was freed and is now being re-filled */
2371		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2372	} else {
2373		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2374	}
2375	DBUF_VERIFY(db);
2376
2377	DB_DNODE_ENTER(db);
2378	dn = DB_DNODE(db);
2379
2380	if (db->db_blkid == DMU_SPILL_BLKID) {
2381		mutex_enter(&dn->dn_mtx);
2382		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2383		mutex_exit(&dn->dn_mtx);
2384	}
2385
2386	/*
2387	 * If this is a bonus buffer, simply copy the bonus data into the
2388	 * dnode.  It will be written out when the dnode is synced (and it
2389	 * will be synced, since it must have been dirty for dbuf_sync to
2390	 * be called).
2391	 */
2392	if (db->db_blkid == DMU_BONUS_BLKID) {
2393		dbuf_dirty_record_t **drp;
2394
2395		ASSERT(*datap != NULL);
2396		ASSERT0(db->db_level);
2397		ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2398		bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2399		DB_DNODE_EXIT(db);
2400
2401		if (*datap != db->db.db_data) {
2402			zio_buf_free(*datap, DN_MAX_BONUSLEN);
2403			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2404		}
2405		db->db_data_pending = NULL;
2406		drp = &db->db_last_dirty;
2407		while (*drp != dr)
2408			drp = &(*drp)->dr_next;
2409		ASSERT(dr->dr_next == NULL);
2410		ASSERT(dr->dr_dbuf == db);
2411		*drp = dr->dr_next;
2412		if (dr->dr_dbuf->db_level != 0) {
2413			list_destroy(&dr->dt.di.dr_children);
2414			mutex_destroy(&dr->dt.di.dr_mtx);
2415		}
2416		kmem_free(dr, sizeof (dbuf_dirty_record_t));
2417		ASSERT(db->db_dirtycnt > 0);
2418		db->db_dirtycnt -= 1;
2419		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2420		return;
2421	}
2422
2423	os = dn->dn_objset;
2424
2425	/*
2426	 * This function may have dropped the db_mtx lock allowing a dmu_sync
2427	 * operation to sneak in. As a result, we need to ensure that we
2428	 * don't check the dr_override_state until we have returned from
2429	 * dbuf_check_blkptr.
2430	 */
2431	dbuf_check_blkptr(dn, db);
2432
2433	/*
2434	 * If this buffer is in the middle of an immediate write,
2435	 * wait for the synchronous IO to complete.
2436	 */
2437	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2438		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2439		cv_wait(&db->db_changed, &db->db_mtx);
2440		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2441	}
2442
2443	if (db->db_state != DB_NOFILL &&
2444	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2445	    refcount_count(&db->db_holds) > 1 &&
2446	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2447	    *datap == db->db_buf) {
2448		/*
2449		 * If this buffer is currently "in use" (i.e., there
2450		 * are active holds and db_data still references it),
2451		 * then make a copy before we start the write so that
2452		 * any modifications from the open txg will not leak
2453		 * into this write.
2454		 *
2455		 * NOTE: this copy does not need to be made for
2456		 * objects only modified in the syncing context (e.g.
2457		 * DNONE_DNODE blocks).
2458		 */
2459		int blksz = arc_buf_size(*datap);
2460		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2461		*datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2462		bcopy(db->db.db_data, (*datap)->b_data, blksz);
2463	}
2464	db->db_data_pending = dr;
2465
2466	mutex_exit(&db->db_mtx);
2467
2468	dbuf_write(dr, *datap, tx);
2469
2470	ASSERT(!list_link_active(&dr->dr_dirty_node));
2471	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2472		list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2473		DB_DNODE_EXIT(db);
2474	} else {
2475		/*
2476		 * Although zio_nowait() does not "wait for an IO", it does
2477		 * initiate the IO. If this is an empty write it seems plausible
2478		 * that the IO could actually be completed before the nowait
2479		 * returns. We need to DB_DNODE_EXIT() first in case
2480		 * zio_nowait() invalidates the dbuf.
2481		 */
2482		DB_DNODE_EXIT(db);
2483		zio_nowait(dr->dr_zio);
2484	}
2485}
2486
2487void
2488dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2489{
2490	dbuf_dirty_record_t *dr;
2491
2492	while (dr = list_head(list)) {
2493		if (dr->dr_zio != NULL) {
2494			/*
2495			 * If we find an already initialized zio then we
2496			 * are processing the meta-dnode, and we have finished.
2497			 * The dbufs for all dnodes are put back on the list
2498			 * during processing, so that we can zio_wait()
2499			 * these IOs after initiating all child IOs.
2500			 */
2501			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2502			    DMU_META_DNODE_OBJECT);
2503			break;
2504		}
2505		list_remove(list, dr);
2506		if (dr->dr_dbuf->db_level > 0)
2507			dbuf_sync_indirect(dr, tx);
2508		else
2509			dbuf_sync_leaf(dr, tx);
2510	}
2511}
2512
2513/* ARGSUSED */
2514static void
2515dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2516{
2517	dmu_buf_impl_t *db = vdb;
2518	dnode_t *dn;
2519	blkptr_t *bp = zio->io_bp;
2520	blkptr_t *bp_orig = &zio->io_bp_orig;
2521	spa_t *spa = zio->io_spa;
2522	int64_t delta;
2523	uint64_t fill = 0;
2524	int i;
2525
2526	ASSERT3P(db->db_blkptr, ==, bp);
2527
2528	DB_DNODE_ENTER(db);
2529	dn = DB_DNODE(db);
2530	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2531	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2532	zio->io_prev_space_delta = delta;
2533
2534	if (bp->blk_birth != 0) {
2535		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2536		    BP_GET_TYPE(bp) == dn->dn_type) ||
2537		    (db->db_blkid == DMU_SPILL_BLKID &&
2538		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2539		    BP_IS_EMBEDDED(bp));
2540		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2541	}
2542
2543	mutex_enter(&db->db_mtx);
2544
2545#ifdef ZFS_DEBUG
2546	if (db->db_blkid == DMU_SPILL_BLKID) {
2547		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2548		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2549		    db->db_blkptr == &dn->dn_phys->dn_spill);
2550	}
2551#endif
2552
2553	if (db->db_level == 0) {
2554		mutex_enter(&dn->dn_mtx);
2555		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2556		    db->db_blkid != DMU_SPILL_BLKID)
2557			dn->dn_phys->dn_maxblkid = db->db_blkid;
2558		mutex_exit(&dn->dn_mtx);
2559
2560		if (dn->dn_type == DMU_OT_DNODE) {
2561			dnode_phys_t *dnp = db->db.db_data;
2562			for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2563			    i--, dnp++) {
2564				if (dnp->dn_type != DMU_OT_NONE)
2565					fill++;
2566			}
2567		} else {
2568			if (BP_IS_HOLE(bp)) {
2569				fill = 0;
2570			} else {
2571				fill = 1;
2572			}
2573		}
2574	} else {
2575		blkptr_t *ibp = db->db.db_data;
2576		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2577		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2578			if (BP_IS_HOLE(ibp))
2579				continue;
2580			fill += BP_GET_FILL(ibp);
2581		}
2582	}
2583	DB_DNODE_EXIT(db);
2584
2585	if (!BP_IS_EMBEDDED(bp))
2586		bp->blk_fill = fill;
2587
2588	mutex_exit(&db->db_mtx);
2589}
2590
2591/*
2592 * The SPA will call this callback several times for each zio - once
2593 * for every physical child i/o (zio->io_phys_children times).  This
2594 * allows the DMU to monitor the progress of each logical i/o.  For example,
2595 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2596 * block.  There may be a long delay before all copies/fragments are completed,
2597 * so this callback allows us to retire dirty space gradually, as the physical
2598 * i/os complete.
2599 */
2600/* ARGSUSED */
2601static void
2602dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2603{
2604	dmu_buf_impl_t *db = arg;
2605	objset_t *os = db->db_objset;
2606	dsl_pool_t *dp = dmu_objset_pool(os);
2607	dbuf_dirty_record_t *dr;
2608	int delta = 0;
2609
2610	dr = db->db_data_pending;
2611	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2612
2613	/*
2614	 * The callback will be called io_phys_children times.  Retire one
2615	 * portion of our dirty space each time we are called.  Any rounding
2616	 * error will be cleaned up by dsl_pool_sync()'s call to
2617	 * dsl_pool_undirty_space().
2618	 */
2619	delta = dr->dr_accounted / zio->io_phys_children;
2620	dsl_pool_undirty_space(dp, delta, zio->io_txg);
2621}
2622
2623/* ARGSUSED */
2624static void
2625dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2626{
2627	dmu_buf_impl_t *db = vdb;
2628	blkptr_t *bp_orig = &zio->io_bp_orig;
2629	blkptr_t *bp = db->db_blkptr;
2630	objset_t *os = db->db_objset;
2631	dmu_tx_t *tx = os->os_synctx;
2632	dbuf_dirty_record_t **drp, *dr;
2633
2634	ASSERT0(zio->io_error);
2635	ASSERT(db->db_blkptr == bp);
2636
2637	/*
2638	 * For nopwrites and rewrites we ensure that the bp matches our
2639	 * original and bypass all the accounting.
2640	 */
2641	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2642		ASSERT(BP_EQUAL(bp, bp_orig));
2643	} else {
2644		dsl_dataset_t *ds = os->os_dsl_dataset;
2645		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2646		dsl_dataset_block_born(ds, bp, tx);
2647	}
2648
2649	mutex_enter(&db->db_mtx);
2650
2651	DBUF_VERIFY(db);
2652
2653	drp = &db->db_last_dirty;
2654	while ((dr = *drp) != db->db_data_pending)
2655		drp = &dr->dr_next;
2656	ASSERT(!list_link_active(&dr->dr_dirty_node));
2657	ASSERT(dr->dr_dbuf == db);
2658	ASSERT(dr->dr_next == NULL);
2659	*drp = dr->dr_next;
2660
2661#ifdef ZFS_DEBUG
2662	if (db->db_blkid == DMU_SPILL_BLKID) {
2663		dnode_t *dn;
2664
2665		DB_DNODE_ENTER(db);
2666		dn = DB_DNODE(db);
2667		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2668		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2669		    db->db_blkptr == &dn->dn_phys->dn_spill);
2670		DB_DNODE_EXIT(db);
2671	}
2672#endif
2673
2674	if (db->db_level == 0) {
2675		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2676		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2677		if (db->db_state != DB_NOFILL) {
2678			if (dr->dt.dl.dr_data != db->db_buf)
2679				VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2680				    db));
2681			else if (!arc_released(db->db_buf))
2682				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2683		}
2684	} else {
2685		dnode_t *dn;
2686
2687		DB_DNODE_ENTER(db);
2688		dn = DB_DNODE(db);
2689		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2690		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
2691		if (!BP_IS_HOLE(db->db_blkptr)) {
2692			int epbs =
2693			    dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2694			ASSERT3U(db->db_blkid, <=,
2695			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
2696			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2697			    db->db.db_size);
2698			if (!arc_released(db->db_buf))
2699				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2700		}
2701		DB_DNODE_EXIT(db);
2702		mutex_destroy(&dr->dt.di.dr_mtx);
2703		list_destroy(&dr->dt.di.dr_children);
2704	}
2705	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2706
2707	cv_broadcast(&db->db_changed);
2708	ASSERT(db->db_dirtycnt > 0);
2709	db->db_dirtycnt -= 1;
2710	db->db_data_pending = NULL;
2711	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
2712}
2713
2714static void
2715dbuf_write_nofill_ready(zio_t *zio)
2716{
2717	dbuf_write_ready(zio, NULL, zio->io_private);
2718}
2719
2720static void
2721dbuf_write_nofill_done(zio_t *zio)
2722{
2723	dbuf_write_done(zio, NULL, zio->io_private);
2724}
2725
2726static void
2727dbuf_write_override_ready(zio_t *zio)
2728{
2729	dbuf_dirty_record_t *dr = zio->io_private;
2730	dmu_buf_impl_t *db = dr->dr_dbuf;
2731
2732	dbuf_write_ready(zio, NULL, db);
2733}
2734
2735static void
2736dbuf_write_override_done(zio_t *zio)
2737{
2738	dbuf_dirty_record_t *dr = zio->io_private;
2739	dmu_buf_impl_t *db = dr->dr_dbuf;
2740	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2741
2742	mutex_enter(&db->db_mtx);
2743	if (!BP_EQUAL(zio->io_bp, obp)) {
2744		if (!BP_IS_HOLE(obp))
2745			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2746		arc_release(dr->dt.dl.dr_data, db);
2747	}
2748	mutex_exit(&db->db_mtx);
2749
2750	dbuf_write_done(zio, NULL, db);
2751}
2752
2753/* Issue I/O to commit a dirty buffer to disk. */
2754static void
2755dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2756{
2757	dmu_buf_impl_t *db = dr->dr_dbuf;
2758	dnode_t *dn;
2759	objset_t *os;
2760	dmu_buf_impl_t *parent = db->db_parent;
2761	uint64_t txg = tx->tx_txg;
2762	zbookmark_phys_t zb;
2763	zio_prop_t zp;
2764	zio_t *zio;
2765	int wp_flag = 0;
2766
2767	DB_DNODE_ENTER(db);
2768	dn = DB_DNODE(db);
2769	os = dn->dn_objset;
2770
2771	if (db->db_state != DB_NOFILL) {
2772		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2773			/*
2774			 * Private object buffers are released here rather
2775			 * than in dbuf_dirty() since they are only modified
2776			 * in the syncing context and we don't want the
2777			 * overhead of making multiple copies of the data.
2778			 */
2779			if (BP_IS_HOLE(db->db_blkptr)) {
2780				arc_buf_thaw(data);
2781			} else {
2782				dbuf_release_bp(db);
2783			}
2784		}
2785	}
2786
2787	if (parent != dn->dn_dbuf) {
2788		/* Our parent is an indirect block. */
2789		/* We have a dirty parent that has been scheduled for write. */
2790		ASSERT(parent && parent->db_data_pending);
2791		/* Our parent's buffer is one level closer to the dnode. */
2792		ASSERT(db->db_level == parent->db_level-1);
2793		/*
2794		 * We're about to modify our parent's db_data by modifying
2795		 * our block pointer, so the parent must be released.
2796		 */
2797		ASSERT(arc_released(parent->db_buf));
2798		zio = parent->db_data_pending->dr_zio;
2799	} else {
2800		/* Our parent is the dnode itself. */
2801		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2802		    db->db_blkid != DMU_SPILL_BLKID) ||
2803		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2804		if (db->db_blkid != DMU_SPILL_BLKID)
2805			ASSERT3P(db->db_blkptr, ==,
2806			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
2807		zio = dn->dn_zio;
2808	}
2809
2810	ASSERT(db->db_level == 0 || data == db->db_buf);
2811	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2812	ASSERT(zio);
2813
2814	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2815	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2816	    db->db.db_object, db->db_level, db->db_blkid);
2817
2818	if (db->db_blkid == DMU_SPILL_BLKID)
2819		wp_flag = WP_SPILL;
2820	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2821
2822	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2823	DB_DNODE_EXIT(db);
2824
2825	if (db->db_level == 0 &&
2826	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2827		/*
2828		 * The BP for this block has been provided by open context
2829		 * (by dmu_sync() or dmu_buf_write_embedded()).
2830		 */
2831		void *contents = (data != NULL) ? data->b_data : NULL;
2832
2833		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2834		    db->db_blkptr, contents, db->db.db_size, &zp,
2835		    dbuf_write_override_ready, NULL, dbuf_write_override_done,
2836		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2837		mutex_enter(&db->db_mtx);
2838		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2839		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2840		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
2841		mutex_exit(&db->db_mtx);
2842	} else if (db->db_state == DB_NOFILL) {
2843		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
2844		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
2845		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2846		    db->db_blkptr, NULL, db->db.db_size, &zp,
2847		    dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
2848		    ZIO_PRIORITY_ASYNC_WRITE,
2849		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2850	} else {
2851		ASSERT(arc_released(data));
2852		dr->dr_zio = arc_write(zio, os->os_spa, txg,
2853		    db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
2854		    DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
2855		    dbuf_write_physdone, dbuf_write_done, db,
2856		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2857	}
2858}
2859