dmu_tx.c revision 256259
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 */
26
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36#include <sys/spa.h>
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
39#include <sys/zfs_context.h>
40#include <sys/varargs.h>
41
42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43    uint64_t arg1, uint64_t arg2);
44
45
46dmu_tx_t *
47dmu_tx_create_dd(dsl_dir_t *dd)
48{
49	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50	tx->tx_dir = dd;
51	if (dd != NULL)
52		tx->tx_pool = dd->dd_pool;
53	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54	    offsetof(dmu_tx_hold_t, txh_node));
55	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56	    offsetof(dmu_tx_callback_t, dcb_node));
57#ifdef ZFS_DEBUG
58	refcount_create(&tx->tx_space_written);
59	refcount_create(&tx->tx_space_freed);
60#endif
61	return (tx);
62}
63
64dmu_tx_t *
65dmu_tx_create(objset_t *os)
66{
67	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
68	tx->tx_objset = os;
69	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
70	return (tx);
71}
72
73dmu_tx_t *
74dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
75{
76	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
77
78	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
79	tx->tx_pool = dp;
80	tx->tx_txg = txg;
81	tx->tx_anyobj = TRUE;
82
83	return (tx);
84}
85
86int
87dmu_tx_is_syncing(dmu_tx_t *tx)
88{
89	return (tx->tx_anyobj);
90}
91
92int
93dmu_tx_private_ok(dmu_tx_t *tx)
94{
95	return (tx->tx_anyobj);
96}
97
98static dmu_tx_hold_t *
99dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
100    enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
101{
102	dmu_tx_hold_t *txh;
103	dnode_t *dn = NULL;
104	int err;
105
106	if (object != DMU_NEW_OBJECT) {
107		err = dnode_hold(os, object, tx, &dn);
108		if (err) {
109			tx->tx_err = err;
110			return (NULL);
111		}
112
113		if (err == 0 && tx->tx_txg != 0) {
114			mutex_enter(&dn->dn_mtx);
115			/*
116			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
117			 * problem, but there's no way for it to happen (for
118			 * now, at least).
119			 */
120			ASSERT(dn->dn_assigned_txg == 0);
121			dn->dn_assigned_txg = tx->tx_txg;
122			(void) refcount_add(&dn->dn_tx_holds, tx);
123			mutex_exit(&dn->dn_mtx);
124		}
125	}
126
127	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
128	txh->txh_tx = tx;
129	txh->txh_dnode = dn;
130#ifdef ZFS_DEBUG
131	txh->txh_type = type;
132	txh->txh_arg1 = arg1;
133	txh->txh_arg2 = arg2;
134#endif
135	list_insert_tail(&tx->tx_holds, txh);
136
137	return (txh);
138}
139
140void
141dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
142{
143	/*
144	 * If we're syncing, they can manipulate any object anyhow, and
145	 * the hold on the dnode_t can cause problems.
146	 */
147	if (!dmu_tx_is_syncing(tx)) {
148		(void) dmu_tx_hold_object_impl(tx, os,
149		    object, THT_NEWOBJECT, 0, 0);
150	}
151}
152
153static int
154dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
155{
156	int err;
157	dmu_buf_impl_t *db;
158
159	rw_enter(&dn->dn_struct_rwlock, RW_READER);
160	db = dbuf_hold_level(dn, level, blkid, FTAG);
161	rw_exit(&dn->dn_struct_rwlock);
162	if (db == NULL)
163		return (SET_ERROR(EIO));
164	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
165	dbuf_rele(db, FTAG);
166	return (err);
167}
168
169static void
170dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
171    int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
172{
173	objset_t *os = dn->dn_objset;
174	dsl_dataset_t *ds = os->os_dsl_dataset;
175	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
176	dmu_buf_impl_t *parent = NULL;
177	blkptr_t *bp = NULL;
178	uint64_t space;
179
180	if (level >= dn->dn_nlevels || history[level] == blkid)
181		return;
182
183	history[level] = blkid;
184
185	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
186
187	if (db == NULL || db == dn->dn_dbuf) {
188		ASSERT(level != 0);
189		db = NULL;
190	} else {
191		ASSERT(DB_DNODE(db) == dn);
192		ASSERT(db->db_level == level);
193		ASSERT(db->db.db_size == space);
194		ASSERT(db->db_blkid == blkid);
195		bp = db->db_blkptr;
196		parent = db->db_parent;
197	}
198
199	freeable = (bp && (freeable ||
200	    dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
201
202	if (freeable)
203		txh->txh_space_tooverwrite += space;
204	else
205		txh->txh_space_towrite += space;
206	if (bp)
207		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
208
209	dmu_tx_count_twig(txh, dn, parent, level + 1,
210	    blkid >> epbs, freeable, history);
211}
212
213/* ARGSUSED */
214static void
215dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
216{
217	dnode_t *dn = txh->txh_dnode;
218	uint64_t start, end, i;
219	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
220	int err = 0;
221
222	if (len == 0)
223		return;
224
225	min_bs = SPA_MINBLOCKSHIFT;
226	max_bs = SPA_MAXBLOCKSHIFT;
227	min_ibs = DN_MIN_INDBLKSHIFT;
228	max_ibs = DN_MAX_INDBLKSHIFT;
229
230	if (dn) {
231		uint64_t history[DN_MAX_LEVELS];
232		int nlvls = dn->dn_nlevels;
233		int delta;
234
235		/*
236		 * For i/o error checking, read the first and last level-0
237		 * blocks (if they are not aligned), and all the level-1 blocks.
238		 */
239		if (dn->dn_maxblkid == 0) {
240			delta = dn->dn_datablksz;
241			start = (off < dn->dn_datablksz) ? 0 : 1;
242			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
243			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
244				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
245				if (err)
246					goto out;
247				delta -= off;
248			}
249		} else {
250			zio_t *zio = zio_root(dn->dn_objset->os_spa,
251			    NULL, NULL, ZIO_FLAG_CANFAIL);
252
253			/* first level-0 block */
254			start = off >> dn->dn_datablkshift;
255			if (P2PHASE(off, dn->dn_datablksz) ||
256			    len < dn->dn_datablksz) {
257				err = dmu_tx_check_ioerr(zio, dn, 0, start);
258				if (err)
259					goto out;
260			}
261
262			/* last level-0 block */
263			end = (off+len-1) >> dn->dn_datablkshift;
264			if (end != start && end <= dn->dn_maxblkid &&
265			    P2PHASE(off+len, dn->dn_datablksz)) {
266				err = dmu_tx_check_ioerr(zio, dn, 0, end);
267				if (err)
268					goto out;
269			}
270
271			/* level-1 blocks */
272			if (nlvls > 1) {
273				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
274				for (i = (start>>shft)+1; i < end>>shft; i++) {
275					err = dmu_tx_check_ioerr(zio, dn, 1, i);
276					if (err)
277						goto out;
278				}
279			}
280
281			err = zio_wait(zio);
282			if (err)
283				goto out;
284			delta = P2NPHASE(off, dn->dn_datablksz);
285		}
286
287		min_ibs = max_ibs = dn->dn_indblkshift;
288		if (dn->dn_maxblkid > 0) {
289			/*
290			 * The blocksize can't change,
291			 * so we can make a more precise estimate.
292			 */
293			ASSERT(dn->dn_datablkshift != 0);
294			min_bs = max_bs = dn->dn_datablkshift;
295		}
296
297		/*
298		 * If this write is not off the end of the file
299		 * we need to account for overwrites/unref.
300		 */
301		if (start <= dn->dn_maxblkid) {
302			for (int l = 0; l < DN_MAX_LEVELS; l++)
303				history[l] = -1ULL;
304		}
305		while (start <= dn->dn_maxblkid) {
306			dmu_buf_impl_t *db;
307
308			rw_enter(&dn->dn_struct_rwlock, RW_READER);
309			err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
310			rw_exit(&dn->dn_struct_rwlock);
311
312			if (err) {
313				txh->txh_tx->tx_err = err;
314				return;
315			}
316
317			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
318			    history);
319			dbuf_rele(db, FTAG);
320			if (++start > end) {
321				/*
322				 * Account for new indirects appearing
323				 * before this IO gets assigned into a txg.
324				 */
325				bits = 64 - min_bs;
326				epbs = min_ibs - SPA_BLKPTRSHIFT;
327				for (bits -= epbs * (nlvls - 1);
328				    bits >= 0; bits -= epbs)
329					txh->txh_fudge += 1ULL << max_ibs;
330				goto out;
331			}
332			off += delta;
333			if (len >= delta)
334				len -= delta;
335			delta = dn->dn_datablksz;
336		}
337	}
338
339	/*
340	 * 'end' is the last thing we will access, not one past.
341	 * This way we won't overflow when accessing the last byte.
342	 */
343	start = P2ALIGN(off, 1ULL << max_bs);
344	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
345	txh->txh_space_towrite += end - start + 1;
346
347	start >>= min_bs;
348	end >>= min_bs;
349
350	epbs = min_ibs - SPA_BLKPTRSHIFT;
351
352	/*
353	 * The object contains at most 2^(64 - min_bs) blocks,
354	 * and each indirect level maps 2^epbs.
355	 */
356	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
357		start >>= epbs;
358		end >>= epbs;
359		ASSERT3U(end, >=, start);
360		txh->txh_space_towrite += (end - start + 1) << max_ibs;
361		if (start != 0) {
362			/*
363			 * We also need a new blkid=0 indirect block
364			 * to reference any existing file data.
365			 */
366			txh->txh_space_towrite += 1ULL << max_ibs;
367		}
368	}
369
370out:
371	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
372	    2 * DMU_MAX_ACCESS)
373		err = SET_ERROR(EFBIG);
374
375	if (err)
376		txh->txh_tx->tx_err = err;
377}
378
379static void
380dmu_tx_count_dnode(dmu_tx_hold_t *txh)
381{
382	dnode_t *dn = txh->txh_dnode;
383	dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
384	uint64_t space = mdn->dn_datablksz +
385	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
386
387	if (dn && dn->dn_dbuf->db_blkptr &&
388	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
389	    dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
390		txh->txh_space_tooverwrite += space;
391		txh->txh_space_tounref += space;
392	} else {
393		txh->txh_space_towrite += space;
394		if (dn && dn->dn_dbuf->db_blkptr)
395			txh->txh_space_tounref += space;
396	}
397}
398
399void
400dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
401{
402	dmu_tx_hold_t *txh;
403
404	ASSERT(tx->tx_txg == 0);
405	ASSERT(len < DMU_MAX_ACCESS);
406	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
407
408	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
409	    object, THT_WRITE, off, len);
410	if (txh == NULL)
411		return;
412
413	dmu_tx_count_write(txh, off, len);
414	dmu_tx_count_dnode(txh);
415}
416
417static void
418dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
419{
420	uint64_t blkid, nblks, lastblk;
421	uint64_t space = 0, unref = 0, skipped = 0;
422	dnode_t *dn = txh->txh_dnode;
423	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
424	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
425	int epbs;
426	uint64_t l0span = 0, nl1blks = 0;
427
428	if (dn->dn_nlevels == 0)
429		return;
430
431	/*
432	 * The struct_rwlock protects us against dn_nlevels
433	 * changing, in case (against all odds) we manage to dirty &
434	 * sync out the changes after we check for being dirty.
435	 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
436	 */
437	rw_enter(&dn->dn_struct_rwlock, RW_READER);
438	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
439	if (dn->dn_maxblkid == 0) {
440		if (off == 0 && len >= dn->dn_datablksz) {
441			blkid = 0;
442			nblks = 1;
443		} else {
444			rw_exit(&dn->dn_struct_rwlock);
445			return;
446		}
447	} else {
448		blkid = off >> dn->dn_datablkshift;
449		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
450
451		if (blkid > dn->dn_maxblkid) {
452			rw_exit(&dn->dn_struct_rwlock);
453			return;
454		}
455		if (blkid + nblks > dn->dn_maxblkid)
456			nblks = dn->dn_maxblkid - blkid + 1;
457
458	}
459	l0span = nblks;    /* save for later use to calc level > 1 overhead */
460	if (dn->dn_nlevels == 1) {
461		int i;
462		for (i = 0; i < nblks; i++) {
463			blkptr_t *bp = dn->dn_phys->dn_blkptr;
464			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
465			bp += blkid + i;
466			if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
467				dprintf_bp(bp, "can free old%s", "");
468				space += bp_get_dsize(spa, bp);
469			}
470			unref += BP_GET_ASIZE(bp);
471		}
472		nl1blks = 1;
473		nblks = 0;
474	}
475
476	lastblk = blkid + nblks - 1;
477	while (nblks) {
478		dmu_buf_impl_t *dbuf;
479		uint64_t ibyte, new_blkid;
480		int epb = 1 << epbs;
481		int err, i, blkoff, tochk;
482		blkptr_t *bp;
483
484		ibyte = blkid << dn->dn_datablkshift;
485		err = dnode_next_offset(dn,
486		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
487		new_blkid = ibyte >> dn->dn_datablkshift;
488		if (err == ESRCH) {
489			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
490			break;
491		}
492		if (err) {
493			txh->txh_tx->tx_err = err;
494			break;
495		}
496		if (new_blkid > lastblk) {
497			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
498			break;
499		}
500
501		if (new_blkid > blkid) {
502			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
503			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
504			nblks -= new_blkid - blkid;
505			blkid = new_blkid;
506		}
507		blkoff = P2PHASE(blkid, epb);
508		tochk = MIN(epb - blkoff, nblks);
509
510		err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
511		if (err) {
512			txh->txh_tx->tx_err = err;
513			break;
514		}
515
516		txh->txh_memory_tohold += dbuf->db.db_size;
517
518		/*
519		 * We don't check memory_tohold against DMU_MAX_ACCESS because
520		 * memory_tohold is an over-estimation (especially the >L1
521		 * indirect blocks), so it could fail.  Callers should have
522		 * already verified that they will not be holding too much
523		 * memory.
524		 */
525
526		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
527		if (err != 0) {
528			txh->txh_tx->tx_err = err;
529			dbuf_rele(dbuf, FTAG);
530			break;
531		}
532
533		bp = dbuf->db.db_data;
534		bp += blkoff;
535
536		for (i = 0; i < tochk; i++) {
537			if (dsl_dataset_block_freeable(ds, &bp[i],
538			    bp[i].blk_birth)) {
539				dprintf_bp(&bp[i], "can free old%s", "");
540				space += bp_get_dsize(spa, &bp[i]);
541			}
542			unref += BP_GET_ASIZE(bp);
543		}
544		dbuf_rele(dbuf, FTAG);
545
546		++nl1blks;
547		blkid += tochk;
548		nblks -= tochk;
549	}
550	rw_exit(&dn->dn_struct_rwlock);
551
552	/*
553	 * Add in memory requirements of higher-level indirects.
554	 * This assumes a worst-possible scenario for dn_nlevels and a
555	 * worst-possible distribution of l1-blocks over the region to free.
556	 */
557	{
558		uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
559		int level = 2;
560		/*
561		 * Here we don't use DN_MAX_LEVEL, but calculate it with the
562		 * given datablkshift and indblkshift. This makes the
563		 * difference between 19 and 8 on large files.
564		 */
565		int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
566		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
567
568		while (level++ < maxlevel) {
569			txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
570			    << dn->dn_indblkshift;
571			blkcnt = 1 + (blkcnt >> epbs);
572		}
573	}
574
575	/* account for new level 1 indirect blocks that might show up */
576	if (skipped > 0) {
577		txh->txh_fudge += skipped << dn->dn_indblkshift;
578		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
579		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
580	}
581	txh->txh_space_tofree += space;
582	txh->txh_space_tounref += unref;
583}
584
585void
586dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
587{
588	dmu_tx_hold_t *txh;
589	dnode_t *dn;
590	int err;
591	zio_t *zio;
592
593	ASSERT(tx->tx_txg == 0);
594
595	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
596	    object, THT_FREE, off, len);
597	if (txh == NULL)
598		return;
599	dn = txh->txh_dnode;
600
601	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
602		return;
603	if (len == DMU_OBJECT_END)
604		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
605
606	dmu_tx_count_dnode(txh);
607
608	/*
609	 * For i/o error checking, we read the first and last level-0
610	 * blocks if they are not aligned, and all the level-1 blocks.
611	 *
612	 * Note:  dbuf_free_range() assumes that we have not instantiated
613	 * any level-0 dbufs that will be completely freed.  Therefore we must
614	 * exercise care to not read or count the first and last blocks
615	 * if they are blocksize-aligned.
616	 */
617	if (dn->dn_datablkshift == 0) {
618		if (off != 0 || len < dn->dn_datablksz)
619			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
620	} else {
621		/* first block will be modified if it is not aligned */
622		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
623			dmu_tx_count_write(txh, off, 1);
624		/* last block will be modified if it is not aligned */
625		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
626			dmu_tx_count_write(txh, off+len, 1);
627	}
628
629	/*
630	 * Check level-1 blocks.
631	 */
632	if (dn->dn_nlevels > 1) {
633		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
634		    SPA_BLKPTRSHIFT;
635		uint64_t start = off >> shift;
636		uint64_t end = (off + len) >> shift;
637
638		ASSERT(dn->dn_datablkshift != 0);
639		ASSERT(dn->dn_indblkshift != 0);
640
641		zio = zio_root(tx->tx_pool->dp_spa,
642		    NULL, NULL, ZIO_FLAG_CANFAIL);
643		for (uint64_t i = start; i <= end; i++) {
644			uint64_t ibyte = i << shift;
645			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
646			i = ibyte >> shift;
647			if (err == ESRCH)
648				break;
649			if (err) {
650				tx->tx_err = err;
651				return;
652			}
653
654			err = dmu_tx_check_ioerr(zio, dn, 1, i);
655			if (err) {
656				tx->tx_err = err;
657				return;
658			}
659		}
660		err = zio_wait(zio);
661		if (err) {
662			tx->tx_err = err;
663			return;
664		}
665	}
666
667	dmu_tx_count_free(txh, off, len);
668}
669
670void
671dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
672{
673	dmu_tx_hold_t *txh;
674	dnode_t *dn;
675	uint64_t nblocks;
676	int epbs, err;
677
678	ASSERT(tx->tx_txg == 0);
679
680	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
681	    object, THT_ZAP, add, (uintptr_t)name);
682	if (txh == NULL)
683		return;
684	dn = txh->txh_dnode;
685
686	dmu_tx_count_dnode(txh);
687
688	if (dn == NULL) {
689		/*
690		 * We will be able to fit a new object's entries into one leaf
691		 * block.  So there will be at most 2 blocks total,
692		 * including the header block.
693		 */
694		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
695		return;
696	}
697
698	ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
699
700	if (dn->dn_maxblkid == 0 && !add) {
701		blkptr_t *bp;
702
703		/*
704		 * If there is only one block  (i.e. this is a micro-zap)
705		 * and we are not adding anything, the accounting is simple.
706		 */
707		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
708		if (err) {
709			tx->tx_err = err;
710			return;
711		}
712
713		/*
714		 * Use max block size here, since we don't know how much
715		 * the size will change between now and the dbuf dirty call.
716		 */
717		bp = &dn->dn_phys->dn_blkptr[0];
718		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
719		    bp, bp->blk_birth))
720			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
721		else
722			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
723		if (!BP_IS_HOLE(bp))
724			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
725		return;
726	}
727
728	if (dn->dn_maxblkid > 0 && name) {
729		/*
730		 * access the name in this fat-zap so that we'll check
731		 * for i/o errors to the leaf blocks, etc.
732		 */
733		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
734		    8, 0, NULL);
735		if (err == EIO) {
736			tx->tx_err = err;
737			return;
738		}
739	}
740
741	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
742	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
743
744	/*
745	 * If the modified blocks are scattered to the four winds,
746	 * we'll have to modify an indirect twig for each.
747	 */
748	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
749	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
750		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
751			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
752		else
753			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
754}
755
756void
757dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
758{
759	dmu_tx_hold_t *txh;
760
761	ASSERT(tx->tx_txg == 0);
762
763	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
764	    object, THT_BONUS, 0, 0);
765	if (txh)
766		dmu_tx_count_dnode(txh);
767}
768
769void
770dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
771{
772	dmu_tx_hold_t *txh;
773	ASSERT(tx->tx_txg == 0);
774
775	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
776	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
777
778	txh->txh_space_towrite += space;
779}
780
781int
782dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
783{
784	dmu_tx_hold_t *txh;
785	int holds = 0;
786
787	/*
788	 * By asserting that the tx is assigned, we're counting the
789	 * number of dn_tx_holds, which is the same as the number of
790	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
791	 * dn_tx_holds could be 0.
792	 */
793	ASSERT(tx->tx_txg != 0);
794
795	/* if (tx->tx_anyobj == TRUE) */
796		/* return (0); */
797
798	for (txh = list_head(&tx->tx_holds); txh;
799	    txh = list_next(&tx->tx_holds, txh)) {
800		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
801			holds++;
802	}
803
804	return (holds);
805}
806
807#ifdef ZFS_DEBUG
808void
809dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
810{
811	dmu_tx_hold_t *txh;
812	int match_object = FALSE, match_offset = FALSE;
813	dnode_t *dn;
814
815	DB_DNODE_ENTER(db);
816	dn = DB_DNODE(db);
817	ASSERT(tx->tx_txg != 0);
818	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
819	ASSERT3U(dn->dn_object, ==, db->db.db_object);
820
821	if (tx->tx_anyobj) {
822		DB_DNODE_EXIT(db);
823		return;
824	}
825
826	/* XXX No checking on the meta dnode for now */
827	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
828		DB_DNODE_EXIT(db);
829		return;
830	}
831
832	for (txh = list_head(&tx->tx_holds); txh;
833	    txh = list_next(&tx->tx_holds, txh)) {
834		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
835		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
836			match_object = TRUE;
837		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
838			int datablkshift = dn->dn_datablkshift ?
839			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
840			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
841			int shift = datablkshift + epbs * db->db_level;
842			uint64_t beginblk = shift >= 64 ? 0 :
843			    (txh->txh_arg1 >> shift);
844			uint64_t endblk = shift >= 64 ? 0 :
845			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
846			uint64_t blkid = db->db_blkid;
847
848			/* XXX txh_arg2 better not be zero... */
849
850			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
851			    txh->txh_type, beginblk, endblk);
852
853			switch (txh->txh_type) {
854			case THT_WRITE:
855				if (blkid >= beginblk && blkid <= endblk)
856					match_offset = TRUE;
857				/*
858				 * We will let this hold work for the bonus
859				 * or spill buffer so that we don't need to
860				 * hold it when creating a new object.
861				 */
862				if (blkid == DMU_BONUS_BLKID ||
863				    blkid == DMU_SPILL_BLKID)
864					match_offset = TRUE;
865				/*
866				 * They might have to increase nlevels,
867				 * thus dirtying the new TLIBs.  Or the
868				 * might have to change the block size,
869				 * thus dirying the new lvl=0 blk=0.
870				 */
871				if (blkid == 0)
872					match_offset = TRUE;
873				break;
874			case THT_FREE:
875				/*
876				 * We will dirty all the level 1 blocks in
877				 * the free range and perhaps the first and
878				 * last level 0 block.
879				 */
880				if (blkid >= beginblk && (blkid <= endblk ||
881				    txh->txh_arg2 == DMU_OBJECT_END))
882					match_offset = TRUE;
883				break;
884			case THT_SPILL:
885				if (blkid == DMU_SPILL_BLKID)
886					match_offset = TRUE;
887				break;
888			case THT_BONUS:
889				if (blkid == DMU_BONUS_BLKID)
890					match_offset = TRUE;
891				break;
892			case THT_ZAP:
893				match_offset = TRUE;
894				break;
895			case THT_NEWOBJECT:
896				match_object = TRUE;
897				break;
898			default:
899				ASSERT(!"bad txh_type");
900			}
901		}
902		if (match_object && match_offset) {
903			DB_DNODE_EXIT(db);
904			return;
905		}
906	}
907	DB_DNODE_EXIT(db);
908	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
909	    (u_longlong_t)db->db.db_object, db->db_level,
910	    (u_longlong_t)db->db_blkid);
911}
912#endif
913
914static int
915dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
916{
917	dmu_tx_hold_t *txh;
918	spa_t *spa = tx->tx_pool->dp_spa;
919	uint64_t memory, asize, fsize, usize;
920	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
921
922	ASSERT0(tx->tx_txg);
923
924	if (tx->tx_err)
925		return (tx->tx_err);
926
927	if (spa_suspended(spa)) {
928		/*
929		 * If the user has indicated a blocking failure mode
930		 * then return ERESTART which will block in dmu_tx_wait().
931		 * Otherwise, return EIO so that an error can get
932		 * propagated back to the VOP calls.
933		 *
934		 * Note that we always honor the txg_how flag regardless
935		 * of the failuremode setting.
936		 */
937		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
938		    txg_how != TXG_WAIT)
939			return (SET_ERROR(EIO));
940
941		return (SET_ERROR(ERESTART));
942	}
943
944	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
945	tx->tx_needassign_txh = NULL;
946
947	/*
948	 * NB: No error returns are allowed after txg_hold_open, but
949	 * before processing the dnode holds, due to the
950	 * dmu_tx_unassign() logic.
951	 */
952
953	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
954	for (txh = list_head(&tx->tx_holds); txh;
955	    txh = list_next(&tx->tx_holds, txh)) {
956		dnode_t *dn = txh->txh_dnode;
957		if (dn != NULL) {
958			mutex_enter(&dn->dn_mtx);
959			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
960				mutex_exit(&dn->dn_mtx);
961				tx->tx_needassign_txh = txh;
962				return (SET_ERROR(ERESTART));
963			}
964			if (dn->dn_assigned_txg == 0)
965				dn->dn_assigned_txg = tx->tx_txg;
966			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
967			(void) refcount_add(&dn->dn_tx_holds, tx);
968			mutex_exit(&dn->dn_mtx);
969		}
970		towrite += txh->txh_space_towrite;
971		tofree += txh->txh_space_tofree;
972		tooverwrite += txh->txh_space_tooverwrite;
973		tounref += txh->txh_space_tounref;
974		tohold += txh->txh_memory_tohold;
975		fudge += txh->txh_fudge;
976	}
977
978	/*
979	 * If a snapshot has been taken since we made our estimates,
980	 * assume that we won't be able to free or overwrite anything.
981	 */
982	if (tx->tx_objset &&
983	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
984	    tx->tx_lastsnap_txg) {
985		towrite += tooverwrite;
986		tooverwrite = tofree = 0;
987	}
988
989	/* needed allocation: worst-case estimate of write space */
990	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
991	/* freed space estimate: worst-case overwrite + free estimate */
992	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
993	/* convert unrefd space to worst-case estimate */
994	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
995	/* calculate memory footprint estimate */
996	memory = towrite + tooverwrite + tohold;
997
998#ifdef ZFS_DEBUG
999	/*
1000	 * Add in 'tohold' to account for our dirty holds on this memory
1001	 * XXX - the "fudge" factor is to account for skipped blocks that
1002	 * we missed because dnode_next_offset() misses in-core-only blocks.
1003	 */
1004	tx->tx_space_towrite = asize +
1005	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1006	tx->tx_space_tofree = tofree;
1007	tx->tx_space_tooverwrite = tooverwrite;
1008	tx->tx_space_tounref = tounref;
1009#endif
1010
1011	if (tx->tx_dir && asize != 0) {
1012		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1013		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1014		if (err)
1015			return (err);
1016	}
1017
1018	return (0);
1019}
1020
1021static void
1022dmu_tx_unassign(dmu_tx_t *tx)
1023{
1024	dmu_tx_hold_t *txh;
1025
1026	if (tx->tx_txg == 0)
1027		return;
1028
1029	txg_rele_to_quiesce(&tx->tx_txgh);
1030
1031	/*
1032	 * Walk the transaction's hold list, removing the hold on the
1033	 * associated dnode, and notifying waiters if the refcount drops to 0.
1034	 */
1035	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1036	    txh = list_next(&tx->tx_holds, txh)) {
1037		dnode_t *dn = txh->txh_dnode;
1038
1039		if (dn == NULL)
1040			continue;
1041		mutex_enter(&dn->dn_mtx);
1042		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1043
1044		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1045			dn->dn_assigned_txg = 0;
1046			cv_broadcast(&dn->dn_notxholds);
1047		}
1048		mutex_exit(&dn->dn_mtx);
1049	}
1050
1051	txg_rele_to_sync(&tx->tx_txgh);
1052
1053	tx->tx_lasttried_txg = tx->tx_txg;
1054	tx->tx_txg = 0;
1055}
1056
1057/*
1058 * Assign tx to a transaction group.  txg_how can be one of:
1059 *
1060 * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1061 *	a new one.  This should be used when you're not holding locks.
1062 *	It will only fail if we're truly out of space (or over quota).
1063 *
1064 * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1065 *	blocking, returns immediately with ERESTART.  This should be used
1066 *	whenever you're holding locks.  On an ERESTART error, the caller
1067 *	should drop locks, do a dmu_tx_wait(tx), and try again.
1068 */
1069int
1070dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1071{
1072	int err;
1073
1074	ASSERT(tx->tx_txg == 0);
1075	ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT);
1076	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1077
1078	/* If we might wait, we must not hold the config lock. */
1079	ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1080
1081	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1082		dmu_tx_unassign(tx);
1083
1084		if (err != ERESTART || txg_how != TXG_WAIT)
1085			return (err);
1086
1087		dmu_tx_wait(tx);
1088	}
1089
1090	txg_rele_to_quiesce(&tx->tx_txgh);
1091
1092	return (0);
1093}
1094
1095void
1096dmu_tx_wait(dmu_tx_t *tx)
1097{
1098	spa_t *spa = tx->tx_pool->dp_spa;
1099
1100	ASSERT(tx->tx_txg == 0);
1101	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1102
1103	/*
1104	 * It's possible that the pool has become active after this thread
1105	 * has tried to obtain a tx. If that's the case then his
1106	 * tx_lasttried_txg would not have been assigned.
1107	 */
1108	if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1109		txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1110	} else if (tx->tx_needassign_txh) {
1111		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1112
1113		mutex_enter(&dn->dn_mtx);
1114		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1115			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1116		mutex_exit(&dn->dn_mtx);
1117		tx->tx_needassign_txh = NULL;
1118	} else {
1119		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1120	}
1121}
1122
1123void
1124dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1125{
1126#ifdef ZFS_DEBUG
1127	if (tx->tx_dir == NULL || delta == 0)
1128		return;
1129
1130	if (delta > 0) {
1131		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1132		    tx->tx_space_towrite);
1133		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1134	} else {
1135		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1136	}
1137#endif
1138}
1139
1140void
1141dmu_tx_commit(dmu_tx_t *tx)
1142{
1143	dmu_tx_hold_t *txh;
1144
1145	ASSERT(tx->tx_txg != 0);
1146
1147	/*
1148	 * Go through the transaction's hold list and remove holds on
1149	 * associated dnodes, notifying waiters if no holds remain.
1150	 */
1151	while (txh = list_head(&tx->tx_holds)) {
1152		dnode_t *dn = txh->txh_dnode;
1153
1154		list_remove(&tx->tx_holds, txh);
1155		kmem_free(txh, sizeof (dmu_tx_hold_t));
1156		if (dn == NULL)
1157			continue;
1158		mutex_enter(&dn->dn_mtx);
1159		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1160
1161		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1162			dn->dn_assigned_txg = 0;
1163			cv_broadcast(&dn->dn_notxholds);
1164		}
1165		mutex_exit(&dn->dn_mtx);
1166		dnode_rele(dn, tx);
1167	}
1168
1169	if (tx->tx_tempreserve_cookie)
1170		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1171
1172	if (!list_is_empty(&tx->tx_callbacks))
1173		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1174
1175	if (tx->tx_anyobj == FALSE)
1176		txg_rele_to_sync(&tx->tx_txgh);
1177
1178	list_destroy(&tx->tx_callbacks);
1179	list_destroy(&tx->tx_holds);
1180#ifdef ZFS_DEBUG
1181	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1182	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1183	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1184	refcount_destroy_many(&tx->tx_space_written,
1185	    refcount_count(&tx->tx_space_written));
1186	refcount_destroy_many(&tx->tx_space_freed,
1187	    refcount_count(&tx->tx_space_freed));
1188#endif
1189	kmem_free(tx, sizeof (dmu_tx_t));
1190}
1191
1192void
1193dmu_tx_abort(dmu_tx_t *tx)
1194{
1195	dmu_tx_hold_t *txh;
1196
1197	ASSERT(tx->tx_txg == 0);
1198
1199	while (txh = list_head(&tx->tx_holds)) {
1200		dnode_t *dn = txh->txh_dnode;
1201
1202		list_remove(&tx->tx_holds, txh);
1203		kmem_free(txh, sizeof (dmu_tx_hold_t));
1204		if (dn != NULL)
1205			dnode_rele(dn, tx);
1206	}
1207
1208	/*
1209	 * Call any registered callbacks with an error code.
1210	 */
1211	if (!list_is_empty(&tx->tx_callbacks))
1212		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1213
1214	list_destroy(&tx->tx_callbacks);
1215	list_destroy(&tx->tx_holds);
1216#ifdef ZFS_DEBUG
1217	refcount_destroy_many(&tx->tx_space_written,
1218	    refcount_count(&tx->tx_space_written));
1219	refcount_destroy_many(&tx->tx_space_freed,
1220	    refcount_count(&tx->tx_space_freed));
1221#endif
1222	kmem_free(tx, sizeof (dmu_tx_t));
1223}
1224
1225uint64_t
1226dmu_tx_get_txg(dmu_tx_t *tx)
1227{
1228	ASSERT(tx->tx_txg != 0);
1229	return (tx->tx_txg);
1230}
1231
1232dsl_pool_t *
1233dmu_tx_pool(dmu_tx_t *tx)
1234{
1235	ASSERT(tx->tx_pool != NULL);
1236	return (tx->tx_pool);
1237}
1238
1239
1240void
1241dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1242{
1243	dmu_tx_callback_t *dcb;
1244
1245	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1246
1247	dcb->dcb_func = func;
1248	dcb->dcb_data = data;
1249
1250	list_insert_tail(&tx->tx_callbacks, dcb);
1251}
1252
1253/*
1254 * Call all the commit callbacks on a list, with a given error code.
1255 */
1256void
1257dmu_tx_do_callbacks(list_t *cb_list, int error)
1258{
1259	dmu_tx_callback_t *dcb;
1260
1261	while (dcb = list_head(cb_list)) {
1262		list_remove(cb_list, dcb);
1263		dcb->dcb_func(dcb->dcb_data, error);
1264		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1265	}
1266}
1267
1268/*
1269 * Interface to hold a bunch of attributes.
1270 * used for creating new files.
1271 * attrsize is the total size of all attributes
1272 * to be added during object creation
1273 *
1274 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1275 */
1276
1277/*
1278 * hold necessary attribute name for attribute registration.
1279 * should be a very rare case where this is needed.  If it does
1280 * happen it would only happen on the first write to the file system.
1281 */
1282static void
1283dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1284{
1285	int i;
1286
1287	if (!sa->sa_need_attr_registration)
1288		return;
1289
1290	for (i = 0; i != sa->sa_num_attrs; i++) {
1291		if (!sa->sa_attr_table[i].sa_registered) {
1292			if (sa->sa_reg_attr_obj)
1293				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1294				    B_TRUE, sa->sa_attr_table[i].sa_name);
1295			else
1296				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1297				    B_TRUE, sa->sa_attr_table[i].sa_name);
1298		}
1299	}
1300}
1301
1302
1303void
1304dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1305{
1306	dnode_t *dn;
1307	dmu_tx_hold_t *txh;
1308
1309	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1310	    THT_SPILL, 0, 0);
1311
1312	dn = txh->txh_dnode;
1313
1314	if (dn == NULL)
1315		return;
1316
1317	/* If blkptr doesn't exist then add space to towrite */
1318	if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1319		txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1320	} else {
1321		blkptr_t *bp;
1322
1323		bp = &dn->dn_phys->dn_spill;
1324		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1325		    bp, bp->blk_birth))
1326			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1327		else
1328			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1329		if (!BP_IS_HOLE(bp))
1330			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1331	}
1332}
1333
1334void
1335dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1336{
1337	sa_os_t *sa = tx->tx_objset->os_sa;
1338
1339	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1340
1341	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1342		return;
1343
1344	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1345		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1346	else {
1347		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1348		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1349		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1350		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1351	}
1352
1353	dmu_tx_sa_registration_hold(sa, tx);
1354
1355	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1356		return;
1357
1358	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1359	    THT_SPILL, 0, 0);
1360}
1361
1362/*
1363 * Hold SA attribute
1364 *
1365 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1366 *
1367 * variable_size is the total size of all variable sized attributes
1368 * passed to this function.  It is not the total size of all
1369 * variable size attributes that *may* exist on this object.
1370 */
1371void
1372dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1373{
1374	uint64_t object;
1375	sa_os_t *sa = tx->tx_objset->os_sa;
1376
1377	ASSERT(hdl != NULL);
1378
1379	object = sa_handle_object(hdl);
1380
1381	dmu_tx_hold_bonus(tx, object);
1382
1383	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1384		return;
1385
1386	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1387	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1388		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1389		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1390		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1391		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1392	}
1393
1394	dmu_tx_sa_registration_hold(sa, tx);
1395
1396	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1397		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1398
1399	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1400		ASSERT(tx->tx_txg == 0);
1401		dmu_tx_hold_spill(tx, object);
1402	} else {
1403		dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1404		dnode_t *dn;
1405
1406		DB_DNODE_ENTER(db);
1407		dn = DB_DNODE(db);
1408		if (dn->dn_have_spill) {
1409			ASSERT(tx->tx_txg == 0);
1410			dmu_tx_hold_spill(tx, object);
1411		}
1412		DB_DNODE_EXIT(db);
1413	}
1414}
1415