dmu_tx.c revision 251629
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 */
26
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36#include <sys/spa.h>
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
39#include <sys/zfs_context.h>
40#include <sys/varargs.h>
41
42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43    uint64_t arg1, uint64_t arg2);
44
45
46dmu_tx_t *
47dmu_tx_create_dd(dsl_dir_t *dd)
48{
49	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50	tx->tx_dir = dd;
51	if (dd != NULL)
52		tx->tx_pool = dd->dd_pool;
53	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54	    offsetof(dmu_tx_hold_t, txh_node));
55	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56	    offsetof(dmu_tx_callback_t, dcb_node));
57#ifdef ZFS_DEBUG
58	refcount_create(&tx->tx_space_written);
59	refcount_create(&tx->tx_space_freed);
60#endif
61	return (tx);
62}
63
64dmu_tx_t *
65dmu_tx_create(objset_t *os)
66{
67	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
68	tx->tx_objset = os;
69	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
70	return (tx);
71}
72
73dmu_tx_t *
74dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
75{
76	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
77
78	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
79	tx->tx_pool = dp;
80	tx->tx_txg = txg;
81	tx->tx_anyobj = TRUE;
82
83	return (tx);
84}
85
86int
87dmu_tx_is_syncing(dmu_tx_t *tx)
88{
89	return (tx->tx_anyobj);
90}
91
92int
93dmu_tx_private_ok(dmu_tx_t *tx)
94{
95	return (tx->tx_anyobj);
96}
97
98static dmu_tx_hold_t *
99dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
100    enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
101{
102	dmu_tx_hold_t *txh;
103	dnode_t *dn = NULL;
104	int err;
105
106	if (object != DMU_NEW_OBJECT) {
107		err = dnode_hold(os, object, tx, &dn);
108		if (err) {
109			tx->tx_err = err;
110			return (NULL);
111		}
112
113		if (err == 0 && tx->tx_txg != 0) {
114			mutex_enter(&dn->dn_mtx);
115			/*
116			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
117			 * problem, but there's no way for it to happen (for
118			 * now, at least).
119			 */
120			ASSERT(dn->dn_assigned_txg == 0);
121			dn->dn_assigned_txg = tx->tx_txg;
122			(void) refcount_add(&dn->dn_tx_holds, tx);
123			mutex_exit(&dn->dn_mtx);
124		}
125	}
126
127	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
128	txh->txh_tx = tx;
129	txh->txh_dnode = dn;
130#ifdef ZFS_DEBUG
131	txh->txh_type = type;
132	txh->txh_arg1 = arg1;
133	txh->txh_arg2 = arg2;
134#endif
135	list_insert_tail(&tx->tx_holds, txh);
136
137	return (txh);
138}
139
140void
141dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
142{
143	/*
144	 * If we're syncing, they can manipulate any object anyhow, and
145	 * the hold on the dnode_t can cause problems.
146	 */
147	if (!dmu_tx_is_syncing(tx)) {
148		(void) dmu_tx_hold_object_impl(tx, os,
149		    object, THT_NEWOBJECT, 0, 0);
150	}
151}
152
153static int
154dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
155{
156	int err;
157	dmu_buf_impl_t *db;
158
159	rw_enter(&dn->dn_struct_rwlock, RW_READER);
160	db = dbuf_hold_level(dn, level, blkid, FTAG);
161	rw_exit(&dn->dn_struct_rwlock);
162	if (db == NULL)
163		return (SET_ERROR(EIO));
164	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
165	dbuf_rele(db, FTAG);
166	return (err);
167}
168
169static void
170dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
171    int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
172{
173	objset_t *os = dn->dn_objset;
174	dsl_dataset_t *ds = os->os_dsl_dataset;
175	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
176	dmu_buf_impl_t *parent = NULL;
177	blkptr_t *bp = NULL;
178	uint64_t space;
179
180	if (level >= dn->dn_nlevels || history[level] == blkid)
181		return;
182
183	history[level] = blkid;
184
185	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
186
187	if (db == NULL || db == dn->dn_dbuf) {
188		ASSERT(level != 0);
189		db = NULL;
190	} else {
191		ASSERT(DB_DNODE(db) == dn);
192		ASSERT(db->db_level == level);
193		ASSERT(db->db.db_size == space);
194		ASSERT(db->db_blkid == blkid);
195		bp = db->db_blkptr;
196		parent = db->db_parent;
197	}
198
199	freeable = (bp && (freeable ||
200	    dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
201
202	if (freeable)
203		txh->txh_space_tooverwrite += space;
204	else
205		txh->txh_space_towrite += space;
206	if (bp)
207		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
208
209	dmu_tx_count_twig(txh, dn, parent, level + 1,
210	    blkid >> epbs, freeable, history);
211}
212
213/* ARGSUSED */
214static void
215dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
216{
217	dnode_t *dn = txh->txh_dnode;
218	uint64_t start, end, i;
219	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
220	int err = 0;
221
222	if (len == 0)
223		return;
224
225	min_bs = SPA_MINBLOCKSHIFT;
226	max_bs = SPA_MAXBLOCKSHIFT;
227	min_ibs = DN_MIN_INDBLKSHIFT;
228	max_ibs = DN_MAX_INDBLKSHIFT;
229
230	if (dn) {
231		uint64_t history[DN_MAX_LEVELS];
232		int nlvls = dn->dn_nlevels;
233		int delta;
234
235		/*
236		 * For i/o error checking, read the first and last level-0
237		 * blocks (if they are not aligned), and all the level-1 blocks.
238		 */
239		if (dn->dn_maxblkid == 0) {
240			delta = dn->dn_datablksz;
241			start = (off < dn->dn_datablksz) ? 0 : 1;
242			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
243			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
244				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
245				if (err)
246					goto out;
247				delta -= off;
248			}
249		} else {
250			zio_t *zio = zio_root(dn->dn_objset->os_spa,
251			    NULL, NULL, ZIO_FLAG_CANFAIL);
252
253			/* first level-0 block */
254			start = off >> dn->dn_datablkshift;
255			if (P2PHASE(off, dn->dn_datablksz) ||
256			    len < dn->dn_datablksz) {
257				err = dmu_tx_check_ioerr(zio, dn, 0, start);
258				if (err)
259					goto out;
260			}
261
262			/* last level-0 block */
263			end = (off+len-1) >> dn->dn_datablkshift;
264			if (end != start && end <= dn->dn_maxblkid &&
265			    P2PHASE(off+len, dn->dn_datablksz)) {
266				err = dmu_tx_check_ioerr(zio, dn, 0, end);
267				if (err)
268					goto out;
269			}
270
271			/* level-1 blocks */
272			if (nlvls > 1) {
273				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
274				for (i = (start>>shft)+1; i < end>>shft; i++) {
275					err = dmu_tx_check_ioerr(zio, dn, 1, i);
276					if (err)
277						goto out;
278				}
279			}
280
281			err = zio_wait(zio);
282			if (err)
283				goto out;
284			delta = P2NPHASE(off, dn->dn_datablksz);
285		}
286
287		min_ibs = max_ibs = dn->dn_indblkshift;
288		if (dn->dn_maxblkid > 0) {
289			/*
290			 * The blocksize can't change,
291			 * so we can make a more precise estimate.
292			 */
293			ASSERT(dn->dn_datablkshift != 0);
294			min_bs = max_bs = dn->dn_datablkshift;
295		}
296
297		/*
298		 * If this write is not off the end of the file
299		 * we need to account for overwrites/unref.
300		 */
301		if (start <= dn->dn_maxblkid) {
302			for (int l = 0; l < DN_MAX_LEVELS; l++)
303				history[l] = -1ULL;
304		}
305		while (start <= dn->dn_maxblkid) {
306			dmu_buf_impl_t *db;
307
308			rw_enter(&dn->dn_struct_rwlock, RW_READER);
309			err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
310			rw_exit(&dn->dn_struct_rwlock);
311
312			if (err) {
313				txh->txh_tx->tx_err = err;
314				return;
315			}
316
317			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
318			    history);
319			dbuf_rele(db, FTAG);
320			if (++start > end) {
321				/*
322				 * Account for new indirects appearing
323				 * before this IO gets assigned into a txg.
324				 */
325				bits = 64 - min_bs;
326				epbs = min_ibs - SPA_BLKPTRSHIFT;
327				for (bits -= epbs * (nlvls - 1);
328				    bits >= 0; bits -= epbs)
329					txh->txh_fudge += 1ULL << max_ibs;
330				goto out;
331			}
332			off += delta;
333			if (len >= delta)
334				len -= delta;
335			delta = dn->dn_datablksz;
336		}
337	}
338
339	/*
340	 * 'end' is the last thing we will access, not one past.
341	 * This way we won't overflow when accessing the last byte.
342	 */
343	start = P2ALIGN(off, 1ULL << max_bs);
344	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
345	txh->txh_space_towrite += end - start + 1;
346
347	start >>= min_bs;
348	end >>= min_bs;
349
350	epbs = min_ibs - SPA_BLKPTRSHIFT;
351
352	/*
353	 * The object contains at most 2^(64 - min_bs) blocks,
354	 * and each indirect level maps 2^epbs.
355	 */
356	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
357		start >>= epbs;
358		end >>= epbs;
359		ASSERT3U(end, >=, start);
360		txh->txh_space_towrite += (end - start + 1) << max_ibs;
361		if (start != 0) {
362			/*
363			 * We also need a new blkid=0 indirect block
364			 * to reference any existing file data.
365			 */
366			txh->txh_space_towrite += 1ULL << max_ibs;
367		}
368	}
369
370out:
371	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
372	    2 * DMU_MAX_ACCESS)
373		err = SET_ERROR(EFBIG);
374
375	if (err)
376		txh->txh_tx->tx_err = err;
377}
378
379static void
380dmu_tx_count_dnode(dmu_tx_hold_t *txh)
381{
382	dnode_t *dn = txh->txh_dnode;
383	dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
384	uint64_t space = mdn->dn_datablksz +
385	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
386
387	if (dn && dn->dn_dbuf->db_blkptr &&
388	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
389	    dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
390		txh->txh_space_tooverwrite += space;
391		txh->txh_space_tounref += space;
392	} else {
393		txh->txh_space_towrite += space;
394		if (dn && dn->dn_dbuf->db_blkptr)
395			txh->txh_space_tounref += space;
396	}
397}
398
399void
400dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
401{
402	dmu_tx_hold_t *txh;
403
404	ASSERT(tx->tx_txg == 0);
405	ASSERT(len < DMU_MAX_ACCESS);
406	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
407
408	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
409	    object, THT_WRITE, off, len);
410	if (txh == NULL)
411		return;
412
413	dmu_tx_count_write(txh, off, len);
414	dmu_tx_count_dnode(txh);
415}
416
417static void
418dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
419{
420	uint64_t blkid, nblks, lastblk;
421	uint64_t space = 0, unref = 0, skipped = 0;
422	dnode_t *dn = txh->txh_dnode;
423	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
424	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
425	int epbs;
426	uint64_t l0span = 0, nl1blks = 0;
427
428	if (dn->dn_nlevels == 0)
429		return;
430
431	/*
432	 * The struct_rwlock protects us against dn_nlevels
433	 * changing, in case (against all odds) we manage to dirty &
434	 * sync out the changes after we check for being dirty.
435	 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
436	 */
437	rw_enter(&dn->dn_struct_rwlock, RW_READER);
438	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
439	if (dn->dn_maxblkid == 0) {
440		if (off == 0 && len >= dn->dn_datablksz) {
441			blkid = 0;
442			nblks = 1;
443		} else {
444			rw_exit(&dn->dn_struct_rwlock);
445			return;
446		}
447	} else {
448		blkid = off >> dn->dn_datablkshift;
449		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
450
451		if (blkid >= dn->dn_maxblkid) {
452			rw_exit(&dn->dn_struct_rwlock);
453			return;
454		}
455		if (blkid + nblks > dn->dn_maxblkid)
456			nblks = dn->dn_maxblkid - blkid;
457
458	}
459	l0span = nblks;    /* save for later use to calc level > 1 overhead */
460	if (dn->dn_nlevels == 1) {
461		int i;
462		for (i = 0; i < nblks; i++) {
463			blkptr_t *bp = dn->dn_phys->dn_blkptr;
464			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
465			bp += blkid + i;
466			if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
467				dprintf_bp(bp, "can free old%s", "");
468				space += bp_get_dsize(spa, bp);
469			}
470			unref += BP_GET_ASIZE(bp);
471		}
472		nl1blks = 1;
473		nblks = 0;
474	}
475
476	lastblk = blkid + nblks - 1;
477	while (nblks) {
478		dmu_buf_impl_t *dbuf;
479		uint64_t ibyte, new_blkid;
480		int epb = 1 << epbs;
481		int err, i, blkoff, tochk;
482		blkptr_t *bp;
483
484		ibyte = blkid << dn->dn_datablkshift;
485		err = dnode_next_offset(dn,
486		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
487		new_blkid = ibyte >> dn->dn_datablkshift;
488		if (err == ESRCH) {
489			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
490			break;
491		}
492		if (err) {
493			txh->txh_tx->tx_err = err;
494			break;
495		}
496		if (new_blkid > lastblk) {
497			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
498			break;
499		}
500
501		if (new_blkid > blkid) {
502			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
503			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
504			nblks -= new_blkid - blkid;
505			blkid = new_blkid;
506		}
507		blkoff = P2PHASE(blkid, epb);
508		tochk = MIN(epb - blkoff, nblks);
509
510		err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
511		if (err) {
512			txh->txh_tx->tx_err = err;
513			break;
514		}
515
516		txh->txh_memory_tohold += dbuf->db.db_size;
517
518		/*
519		 * We don't check memory_tohold against DMU_MAX_ACCESS because
520		 * memory_tohold is an over-estimation (especially the >L1
521		 * indirect blocks), so it could fail.  Callers should have
522		 * already verified that they will not be holding too much
523		 * memory.
524		 */
525
526		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
527		if (err != 0) {
528			txh->txh_tx->tx_err = err;
529			dbuf_rele(dbuf, FTAG);
530			break;
531		}
532
533		bp = dbuf->db.db_data;
534		bp += blkoff;
535
536		for (i = 0; i < tochk; i++) {
537			if (dsl_dataset_block_freeable(ds, &bp[i],
538			    bp[i].blk_birth)) {
539				dprintf_bp(&bp[i], "can free old%s", "");
540				space += bp_get_dsize(spa, &bp[i]);
541			}
542			unref += BP_GET_ASIZE(bp);
543		}
544		dbuf_rele(dbuf, FTAG);
545
546		++nl1blks;
547		blkid += tochk;
548		nblks -= tochk;
549	}
550	rw_exit(&dn->dn_struct_rwlock);
551
552	/*
553	 * Add in memory requirements of higher-level indirects.
554	 * This assumes a worst-possible scenario for dn_nlevels and a
555	 * worst-possible distribution of l1-blocks over the region to free.
556	 */
557	{
558		uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
559		int level = 2;
560		/*
561		 * Here we don't use DN_MAX_LEVEL, but calculate it with the
562		 * given datablkshift and indblkshift. This makes the
563		 * difference between 19 and 8 on large files.
564		 */
565		int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
566		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
567
568		while (level++ < maxlevel) {
569			txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
570			    << dn->dn_indblkshift;
571			blkcnt = 1 + (blkcnt >> epbs);
572		}
573	}
574
575	/* account for new level 1 indirect blocks that might show up */
576	if (skipped > 0) {
577		txh->txh_fudge += skipped << dn->dn_indblkshift;
578		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
579		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
580	}
581	txh->txh_space_tofree += space;
582	txh->txh_space_tounref += unref;
583}
584
585void
586dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
587{
588	dmu_tx_hold_t *txh;
589	dnode_t *dn;
590	uint64_t start, end, i;
591	int err, shift;
592	zio_t *zio;
593
594	ASSERT(tx->tx_txg == 0);
595
596	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
597	    object, THT_FREE, off, len);
598	if (txh == NULL)
599		return;
600	dn = txh->txh_dnode;
601
602	/* first block */
603	if (off != 0)
604		dmu_tx_count_write(txh, off, 1);
605	/* last block */
606	if (len != DMU_OBJECT_END)
607		dmu_tx_count_write(txh, off+len, 1);
608
609	dmu_tx_count_dnode(txh);
610
611	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
612		return;
613	if (len == DMU_OBJECT_END)
614		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
615
616	/*
617	 * For i/o error checking, read the first and last level-0
618	 * blocks, and all the level-1 blocks.  The above count_write's
619	 * have already taken care of the level-0 blocks.
620	 */
621	if (dn->dn_nlevels > 1) {
622		shift = dn->dn_datablkshift + dn->dn_indblkshift -
623		    SPA_BLKPTRSHIFT;
624		start = off >> shift;
625		end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
626
627		zio = zio_root(tx->tx_pool->dp_spa,
628		    NULL, NULL, ZIO_FLAG_CANFAIL);
629		for (i = start; i <= end; i++) {
630			uint64_t ibyte = i << shift;
631			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
632			i = ibyte >> shift;
633			if (err == ESRCH)
634				break;
635			if (err) {
636				tx->tx_err = err;
637				return;
638			}
639
640			err = dmu_tx_check_ioerr(zio, dn, 1, i);
641			if (err) {
642				tx->tx_err = err;
643				return;
644			}
645		}
646		err = zio_wait(zio);
647		if (err) {
648			tx->tx_err = err;
649			return;
650		}
651	}
652
653	dmu_tx_count_free(txh, off, len);
654}
655
656void
657dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
658{
659	dmu_tx_hold_t *txh;
660	dnode_t *dn;
661	uint64_t nblocks;
662	int epbs, err;
663
664	ASSERT(tx->tx_txg == 0);
665
666	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
667	    object, THT_ZAP, add, (uintptr_t)name);
668	if (txh == NULL)
669		return;
670	dn = txh->txh_dnode;
671
672	dmu_tx_count_dnode(txh);
673
674	if (dn == NULL) {
675		/*
676		 * We will be able to fit a new object's entries into one leaf
677		 * block.  So there will be at most 2 blocks total,
678		 * including the header block.
679		 */
680		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
681		return;
682	}
683
684	ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
685
686	if (dn->dn_maxblkid == 0 && !add) {
687		blkptr_t *bp;
688
689		/*
690		 * If there is only one block  (i.e. this is a micro-zap)
691		 * and we are not adding anything, the accounting is simple.
692		 */
693		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
694		if (err) {
695			tx->tx_err = err;
696			return;
697		}
698
699		/*
700		 * Use max block size here, since we don't know how much
701		 * the size will change between now and the dbuf dirty call.
702		 */
703		bp = &dn->dn_phys->dn_blkptr[0];
704		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
705		    bp, bp->blk_birth))
706			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
707		else
708			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
709		if (!BP_IS_HOLE(bp))
710			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
711		return;
712	}
713
714	if (dn->dn_maxblkid > 0 && name) {
715		/*
716		 * access the name in this fat-zap so that we'll check
717		 * for i/o errors to the leaf blocks, etc.
718		 */
719		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
720		    8, 0, NULL);
721		if (err == EIO) {
722			tx->tx_err = err;
723			return;
724		}
725	}
726
727	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
728	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
729
730	/*
731	 * If the modified blocks are scattered to the four winds,
732	 * we'll have to modify an indirect twig for each.
733	 */
734	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
735	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
736		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
737			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
738		else
739			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
740}
741
742void
743dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
744{
745	dmu_tx_hold_t *txh;
746
747	ASSERT(tx->tx_txg == 0);
748
749	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
750	    object, THT_BONUS, 0, 0);
751	if (txh)
752		dmu_tx_count_dnode(txh);
753}
754
755void
756dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
757{
758	dmu_tx_hold_t *txh;
759	ASSERT(tx->tx_txg == 0);
760
761	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
762	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
763
764	txh->txh_space_towrite += space;
765}
766
767int
768dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
769{
770	dmu_tx_hold_t *txh;
771	int holds = 0;
772
773	/*
774	 * By asserting that the tx is assigned, we're counting the
775	 * number of dn_tx_holds, which is the same as the number of
776	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
777	 * dn_tx_holds could be 0.
778	 */
779	ASSERT(tx->tx_txg != 0);
780
781	/* if (tx->tx_anyobj == TRUE) */
782		/* return (0); */
783
784	for (txh = list_head(&tx->tx_holds); txh;
785	    txh = list_next(&tx->tx_holds, txh)) {
786		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
787			holds++;
788	}
789
790	return (holds);
791}
792
793#ifdef ZFS_DEBUG
794void
795dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
796{
797	dmu_tx_hold_t *txh;
798	int match_object = FALSE, match_offset = FALSE;
799	dnode_t *dn;
800
801	DB_DNODE_ENTER(db);
802	dn = DB_DNODE(db);
803	ASSERT(tx->tx_txg != 0);
804	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
805	ASSERT3U(dn->dn_object, ==, db->db.db_object);
806
807	if (tx->tx_anyobj) {
808		DB_DNODE_EXIT(db);
809		return;
810	}
811
812	/* XXX No checking on the meta dnode for now */
813	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
814		DB_DNODE_EXIT(db);
815		return;
816	}
817
818	for (txh = list_head(&tx->tx_holds); txh;
819	    txh = list_next(&tx->tx_holds, txh)) {
820		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
821		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
822			match_object = TRUE;
823		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
824			int datablkshift = dn->dn_datablkshift ?
825			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
826			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
827			int shift = datablkshift + epbs * db->db_level;
828			uint64_t beginblk = shift >= 64 ? 0 :
829			    (txh->txh_arg1 >> shift);
830			uint64_t endblk = shift >= 64 ? 0 :
831			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
832			uint64_t blkid = db->db_blkid;
833
834			/* XXX txh_arg2 better not be zero... */
835
836			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
837			    txh->txh_type, beginblk, endblk);
838
839			switch (txh->txh_type) {
840			case THT_WRITE:
841				if (blkid >= beginblk && blkid <= endblk)
842					match_offset = TRUE;
843				/*
844				 * We will let this hold work for the bonus
845				 * or spill buffer so that we don't need to
846				 * hold it when creating a new object.
847				 */
848				if (blkid == DMU_BONUS_BLKID ||
849				    blkid == DMU_SPILL_BLKID)
850					match_offset = TRUE;
851				/*
852				 * They might have to increase nlevels,
853				 * thus dirtying the new TLIBs.  Or the
854				 * might have to change the block size,
855				 * thus dirying the new lvl=0 blk=0.
856				 */
857				if (blkid == 0)
858					match_offset = TRUE;
859				break;
860			case THT_FREE:
861				/*
862				 * We will dirty all the level 1 blocks in
863				 * the free range and perhaps the first and
864				 * last level 0 block.
865				 */
866				if (blkid >= beginblk && (blkid <= endblk ||
867				    txh->txh_arg2 == DMU_OBJECT_END))
868					match_offset = TRUE;
869				break;
870			case THT_SPILL:
871				if (blkid == DMU_SPILL_BLKID)
872					match_offset = TRUE;
873				break;
874			case THT_BONUS:
875				if (blkid == DMU_BONUS_BLKID)
876					match_offset = TRUE;
877				break;
878			case THT_ZAP:
879				match_offset = TRUE;
880				break;
881			case THT_NEWOBJECT:
882				match_object = TRUE;
883				break;
884			default:
885				ASSERT(!"bad txh_type");
886			}
887		}
888		if (match_object && match_offset) {
889			DB_DNODE_EXIT(db);
890			return;
891		}
892	}
893	DB_DNODE_EXIT(db);
894	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
895	    (u_longlong_t)db->db.db_object, db->db_level,
896	    (u_longlong_t)db->db_blkid);
897}
898#endif
899
900static int
901dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
902{
903	dmu_tx_hold_t *txh;
904	spa_t *spa = tx->tx_pool->dp_spa;
905	uint64_t memory, asize, fsize, usize;
906	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
907
908	ASSERT0(tx->tx_txg);
909
910	if (tx->tx_err)
911		return (tx->tx_err);
912
913	if (spa_suspended(spa)) {
914		/*
915		 * If the user has indicated a blocking failure mode
916		 * then return ERESTART which will block in dmu_tx_wait().
917		 * Otherwise, return EIO so that an error can get
918		 * propagated back to the VOP calls.
919		 *
920		 * Note that we always honor the txg_how flag regardless
921		 * of the failuremode setting.
922		 */
923		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
924		    txg_how != TXG_WAIT)
925			return (SET_ERROR(EIO));
926
927		return (SET_ERROR(ERESTART));
928	}
929
930	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
931	tx->tx_needassign_txh = NULL;
932
933	/*
934	 * NB: No error returns are allowed after txg_hold_open, but
935	 * before processing the dnode holds, due to the
936	 * dmu_tx_unassign() logic.
937	 */
938
939	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
940	for (txh = list_head(&tx->tx_holds); txh;
941	    txh = list_next(&tx->tx_holds, txh)) {
942		dnode_t *dn = txh->txh_dnode;
943		if (dn != NULL) {
944			mutex_enter(&dn->dn_mtx);
945			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
946				mutex_exit(&dn->dn_mtx);
947				tx->tx_needassign_txh = txh;
948				return (SET_ERROR(ERESTART));
949			}
950			if (dn->dn_assigned_txg == 0)
951				dn->dn_assigned_txg = tx->tx_txg;
952			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
953			(void) refcount_add(&dn->dn_tx_holds, tx);
954			mutex_exit(&dn->dn_mtx);
955		}
956		towrite += txh->txh_space_towrite;
957		tofree += txh->txh_space_tofree;
958		tooverwrite += txh->txh_space_tooverwrite;
959		tounref += txh->txh_space_tounref;
960		tohold += txh->txh_memory_tohold;
961		fudge += txh->txh_fudge;
962	}
963
964	/*
965	 * If a snapshot has been taken since we made our estimates,
966	 * assume that we won't be able to free or overwrite anything.
967	 */
968	if (tx->tx_objset &&
969	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
970	    tx->tx_lastsnap_txg) {
971		towrite += tooverwrite;
972		tooverwrite = tofree = 0;
973	}
974
975	/* needed allocation: worst-case estimate of write space */
976	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
977	/* freed space estimate: worst-case overwrite + free estimate */
978	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
979	/* convert unrefd space to worst-case estimate */
980	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
981	/* calculate memory footprint estimate */
982	memory = towrite + tooverwrite + tohold;
983
984#ifdef ZFS_DEBUG
985	/*
986	 * Add in 'tohold' to account for our dirty holds on this memory
987	 * XXX - the "fudge" factor is to account for skipped blocks that
988	 * we missed because dnode_next_offset() misses in-core-only blocks.
989	 */
990	tx->tx_space_towrite = asize +
991	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
992	tx->tx_space_tofree = tofree;
993	tx->tx_space_tooverwrite = tooverwrite;
994	tx->tx_space_tounref = tounref;
995#endif
996
997	if (tx->tx_dir && asize != 0) {
998		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
999		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1000		if (err)
1001			return (err);
1002	}
1003
1004	return (0);
1005}
1006
1007static void
1008dmu_tx_unassign(dmu_tx_t *tx)
1009{
1010	dmu_tx_hold_t *txh;
1011
1012	if (tx->tx_txg == 0)
1013		return;
1014
1015	txg_rele_to_quiesce(&tx->tx_txgh);
1016
1017	/*
1018	 * Walk the transaction's hold list, removing the hold on the
1019	 * associated dnode, and notifying waiters if the refcount drops to 0.
1020	 */
1021	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1022	    txh = list_next(&tx->tx_holds, txh)) {
1023		dnode_t *dn = txh->txh_dnode;
1024
1025		if (dn == NULL)
1026			continue;
1027		mutex_enter(&dn->dn_mtx);
1028		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1029
1030		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1031			dn->dn_assigned_txg = 0;
1032			cv_broadcast(&dn->dn_notxholds);
1033		}
1034		mutex_exit(&dn->dn_mtx);
1035	}
1036
1037	txg_rele_to_sync(&tx->tx_txgh);
1038
1039	tx->tx_lasttried_txg = tx->tx_txg;
1040	tx->tx_txg = 0;
1041}
1042
1043/*
1044 * Assign tx to a transaction group.  txg_how can be one of:
1045 *
1046 * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1047 *	a new one.  This should be used when you're not holding locks.
1048 *	It will only fail if we're truly out of space (or over quota).
1049 *
1050 * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1051 *	blocking, returns immediately with ERESTART.  This should be used
1052 *	whenever you're holding locks.  On an ERESTART error, the caller
1053 *	should drop locks, do a dmu_tx_wait(tx), and try again.
1054 */
1055int
1056dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1057{
1058	int err;
1059
1060	ASSERT(tx->tx_txg == 0);
1061	ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT);
1062	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1063
1064	/* If we might wait, we must not hold the config lock. */
1065	ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1066
1067	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1068		dmu_tx_unassign(tx);
1069
1070		if (err != ERESTART || txg_how != TXG_WAIT)
1071			return (err);
1072
1073		dmu_tx_wait(tx);
1074	}
1075
1076	txg_rele_to_quiesce(&tx->tx_txgh);
1077
1078	return (0);
1079}
1080
1081void
1082dmu_tx_wait(dmu_tx_t *tx)
1083{
1084	spa_t *spa = tx->tx_pool->dp_spa;
1085
1086	ASSERT(tx->tx_txg == 0);
1087	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1088
1089	/*
1090	 * It's possible that the pool has become active after this thread
1091	 * has tried to obtain a tx. If that's the case then his
1092	 * tx_lasttried_txg would not have been assigned.
1093	 */
1094	if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1095		txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1096	} else if (tx->tx_needassign_txh) {
1097		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1098
1099		mutex_enter(&dn->dn_mtx);
1100		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1101			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1102		mutex_exit(&dn->dn_mtx);
1103		tx->tx_needassign_txh = NULL;
1104	} else {
1105		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1106	}
1107}
1108
1109void
1110dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1111{
1112#ifdef ZFS_DEBUG
1113	if (tx->tx_dir == NULL || delta == 0)
1114		return;
1115
1116	if (delta > 0) {
1117		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1118		    tx->tx_space_towrite);
1119		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1120	} else {
1121		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1122	}
1123#endif
1124}
1125
1126void
1127dmu_tx_commit(dmu_tx_t *tx)
1128{
1129	dmu_tx_hold_t *txh;
1130
1131	ASSERT(tx->tx_txg != 0);
1132
1133	/*
1134	 * Go through the transaction's hold list and remove holds on
1135	 * associated dnodes, notifying waiters if no holds remain.
1136	 */
1137	while (txh = list_head(&tx->tx_holds)) {
1138		dnode_t *dn = txh->txh_dnode;
1139
1140		list_remove(&tx->tx_holds, txh);
1141		kmem_free(txh, sizeof (dmu_tx_hold_t));
1142		if (dn == NULL)
1143			continue;
1144		mutex_enter(&dn->dn_mtx);
1145		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1146
1147		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1148			dn->dn_assigned_txg = 0;
1149			cv_broadcast(&dn->dn_notxholds);
1150		}
1151		mutex_exit(&dn->dn_mtx);
1152		dnode_rele(dn, tx);
1153	}
1154
1155	if (tx->tx_tempreserve_cookie)
1156		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1157
1158	if (!list_is_empty(&tx->tx_callbacks))
1159		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1160
1161	if (tx->tx_anyobj == FALSE)
1162		txg_rele_to_sync(&tx->tx_txgh);
1163
1164	list_destroy(&tx->tx_callbacks);
1165	list_destroy(&tx->tx_holds);
1166#ifdef ZFS_DEBUG
1167	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1168	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1169	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1170	refcount_destroy_many(&tx->tx_space_written,
1171	    refcount_count(&tx->tx_space_written));
1172	refcount_destroy_many(&tx->tx_space_freed,
1173	    refcount_count(&tx->tx_space_freed));
1174#endif
1175	kmem_free(tx, sizeof (dmu_tx_t));
1176}
1177
1178void
1179dmu_tx_abort(dmu_tx_t *tx)
1180{
1181	dmu_tx_hold_t *txh;
1182
1183	ASSERT(tx->tx_txg == 0);
1184
1185	while (txh = list_head(&tx->tx_holds)) {
1186		dnode_t *dn = txh->txh_dnode;
1187
1188		list_remove(&tx->tx_holds, txh);
1189		kmem_free(txh, sizeof (dmu_tx_hold_t));
1190		if (dn != NULL)
1191			dnode_rele(dn, tx);
1192	}
1193
1194	/*
1195	 * Call any registered callbacks with an error code.
1196	 */
1197	if (!list_is_empty(&tx->tx_callbacks))
1198		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1199
1200	list_destroy(&tx->tx_callbacks);
1201	list_destroy(&tx->tx_holds);
1202#ifdef ZFS_DEBUG
1203	refcount_destroy_many(&tx->tx_space_written,
1204	    refcount_count(&tx->tx_space_written));
1205	refcount_destroy_many(&tx->tx_space_freed,
1206	    refcount_count(&tx->tx_space_freed));
1207#endif
1208	kmem_free(tx, sizeof (dmu_tx_t));
1209}
1210
1211uint64_t
1212dmu_tx_get_txg(dmu_tx_t *tx)
1213{
1214	ASSERT(tx->tx_txg != 0);
1215	return (tx->tx_txg);
1216}
1217
1218dsl_pool_t *
1219dmu_tx_pool(dmu_tx_t *tx)
1220{
1221	ASSERT(tx->tx_pool != NULL);
1222	return (tx->tx_pool);
1223}
1224
1225
1226void
1227dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1228{
1229	dmu_tx_callback_t *dcb;
1230
1231	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1232
1233	dcb->dcb_func = func;
1234	dcb->dcb_data = data;
1235
1236	list_insert_tail(&tx->tx_callbacks, dcb);
1237}
1238
1239/*
1240 * Call all the commit callbacks on a list, with a given error code.
1241 */
1242void
1243dmu_tx_do_callbacks(list_t *cb_list, int error)
1244{
1245	dmu_tx_callback_t *dcb;
1246
1247	while (dcb = list_head(cb_list)) {
1248		list_remove(cb_list, dcb);
1249		dcb->dcb_func(dcb->dcb_data, error);
1250		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1251	}
1252}
1253
1254/*
1255 * Interface to hold a bunch of attributes.
1256 * used for creating new files.
1257 * attrsize is the total size of all attributes
1258 * to be added during object creation
1259 *
1260 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1261 */
1262
1263/*
1264 * hold necessary attribute name for attribute registration.
1265 * should be a very rare case where this is needed.  If it does
1266 * happen it would only happen on the first write to the file system.
1267 */
1268static void
1269dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1270{
1271	int i;
1272
1273	if (!sa->sa_need_attr_registration)
1274		return;
1275
1276	for (i = 0; i != sa->sa_num_attrs; i++) {
1277		if (!sa->sa_attr_table[i].sa_registered) {
1278			if (sa->sa_reg_attr_obj)
1279				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1280				    B_TRUE, sa->sa_attr_table[i].sa_name);
1281			else
1282				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1283				    B_TRUE, sa->sa_attr_table[i].sa_name);
1284		}
1285	}
1286}
1287
1288
1289void
1290dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1291{
1292	dnode_t *dn;
1293	dmu_tx_hold_t *txh;
1294
1295	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1296	    THT_SPILL, 0, 0);
1297
1298	dn = txh->txh_dnode;
1299
1300	if (dn == NULL)
1301		return;
1302
1303	/* If blkptr doesn't exist then add space to towrite */
1304	if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1305		txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1306	} else {
1307		blkptr_t *bp;
1308
1309		bp = &dn->dn_phys->dn_spill;
1310		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1311		    bp, bp->blk_birth))
1312			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1313		else
1314			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1315		if (!BP_IS_HOLE(bp))
1316			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1317	}
1318}
1319
1320void
1321dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1322{
1323	sa_os_t *sa = tx->tx_objset->os_sa;
1324
1325	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1326
1327	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1328		return;
1329
1330	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1331		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1332	else {
1333		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1334		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1335		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1336		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1337	}
1338
1339	dmu_tx_sa_registration_hold(sa, tx);
1340
1341	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1342		return;
1343
1344	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1345	    THT_SPILL, 0, 0);
1346}
1347
1348/*
1349 * Hold SA attribute
1350 *
1351 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1352 *
1353 * variable_size is the total size of all variable sized attributes
1354 * passed to this function.  It is not the total size of all
1355 * variable size attributes that *may* exist on this object.
1356 */
1357void
1358dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1359{
1360	uint64_t object;
1361	sa_os_t *sa = tx->tx_objset->os_sa;
1362
1363	ASSERT(hdl != NULL);
1364
1365	object = sa_handle_object(hdl);
1366
1367	dmu_tx_hold_bonus(tx, object);
1368
1369	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1370		return;
1371
1372	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1373	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1374		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1375		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1376		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1377		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1378	}
1379
1380	dmu_tx_sa_registration_hold(sa, tx);
1381
1382	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1383		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1384
1385	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1386		ASSERT(tx->tx_txg == 0);
1387		dmu_tx_hold_spill(tx, object);
1388	} else {
1389		dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1390		dnode_t *dn;
1391
1392		DB_DNODE_ENTER(db);
1393		dn = DB_DNODE(db);
1394		if (dn->dn_have_spill) {
1395			ASSERT(tx->tx_txg == 0);
1396			dmu_tx_hold_spill(tx, object);
1397		}
1398		DB_DNODE_EXIT(db);
1399	}
1400}
1401