dmu_tx.c revision 240133
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23226512Smm * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24236884Smm * Copyright (c) 2012 by Delphix. All rights reserved.
25226512Smm */
26168404Spjd
27168404Spjd#include <sys/dmu.h>
28168404Spjd#include <sys/dmu_impl.h>
29168404Spjd#include <sys/dbuf.h>
30168404Spjd#include <sys/dmu_tx.h>
31168404Spjd#include <sys/dmu_objset.h>
32168404Spjd#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33168404Spjd#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34168404Spjd#include <sys/dsl_pool.h>
35168404Spjd#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36168404Spjd#include <sys/spa.h>
37219089Spjd#include <sys/sa.h>
38219089Spjd#include <sys/sa_impl.h>
39168404Spjd#include <sys/zfs_context.h>
40219089Spjd#include <sys/varargs.h>
41168404Spjd
42168404Spjdtypedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43168404Spjd    uint64_t arg1, uint64_t arg2);
44168404Spjd
45168404Spjd
46168404Spjddmu_tx_t *
47168404Spjddmu_tx_create_dd(dsl_dir_t *dd)
48168404Spjd{
49168404Spjd	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50168404Spjd	tx->tx_dir = dd;
51168404Spjd	if (dd)
52168404Spjd		tx->tx_pool = dd->dd_pool;
53168404Spjd	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54168404Spjd	    offsetof(dmu_tx_hold_t, txh_node));
55219089Spjd	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56219089Spjd	    offsetof(dmu_tx_callback_t, dcb_node));
57168404Spjd#ifdef ZFS_DEBUG
58168404Spjd	refcount_create(&tx->tx_space_written);
59168404Spjd	refcount_create(&tx->tx_space_freed);
60168404Spjd#endif
61168404Spjd	return (tx);
62168404Spjd}
63168404Spjd
64168404Spjddmu_tx_t *
65168404Spjddmu_tx_create(objset_t *os)
66168404Spjd{
67219089Spjd	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
68168404Spjd	tx->tx_objset = os;
69219089Spjd	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
70168404Spjd	return (tx);
71168404Spjd}
72168404Spjd
73168404Spjddmu_tx_t *
74168404Spjddmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
75168404Spjd{
76168404Spjd	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
77168404Spjd
78168404Spjd	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
79168404Spjd	tx->tx_pool = dp;
80168404Spjd	tx->tx_txg = txg;
81168404Spjd	tx->tx_anyobj = TRUE;
82168404Spjd
83168404Spjd	return (tx);
84168404Spjd}
85168404Spjd
86168404Spjdint
87168404Spjddmu_tx_is_syncing(dmu_tx_t *tx)
88168404Spjd{
89168404Spjd	return (tx->tx_anyobj);
90168404Spjd}
91168404Spjd
92168404Spjdint
93168404Spjddmu_tx_private_ok(dmu_tx_t *tx)
94168404Spjd{
95168404Spjd	return (tx->tx_anyobj);
96168404Spjd}
97168404Spjd
98168404Spjdstatic dmu_tx_hold_t *
99168404Spjddmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
100168404Spjd    enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
101168404Spjd{
102168404Spjd	dmu_tx_hold_t *txh;
103168404Spjd	dnode_t *dn = NULL;
104168404Spjd	int err;
105168404Spjd
106168404Spjd	if (object != DMU_NEW_OBJECT) {
107219089Spjd		err = dnode_hold(os, object, tx, &dn);
108168404Spjd		if (err) {
109168404Spjd			tx->tx_err = err;
110168404Spjd			return (NULL);
111168404Spjd		}
112168404Spjd
113168404Spjd		if (err == 0 && tx->tx_txg != 0) {
114168404Spjd			mutex_enter(&dn->dn_mtx);
115168404Spjd			/*
116168404Spjd			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
117168404Spjd			 * problem, but there's no way for it to happen (for
118168404Spjd			 * now, at least).
119168404Spjd			 */
120168404Spjd			ASSERT(dn->dn_assigned_txg == 0);
121168404Spjd			dn->dn_assigned_txg = tx->tx_txg;
122168404Spjd			(void) refcount_add(&dn->dn_tx_holds, tx);
123168404Spjd			mutex_exit(&dn->dn_mtx);
124168404Spjd		}
125168404Spjd	}
126168404Spjd
127168404Spjd	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
128168404Spjd	txh->txh_tx = tx;
129168404Spjd	txh->txh_dnode = dn;
130168404Spjd#ifdef ZFS_DEBUG
131168404Spjd	txh->txh_type = type;
132168404Spjd	txh->txh_arg1 = arg1;
133168404Spjd	txh->txh_arg2 = arg2;
134168404Spjd#endif
135168404Spjd	list_insert_tail(&tx->tx_holds, txh);
136168404Spjd
137168404Spjd	return (txh);
138168404Spjd}
139168404Spjd
140168404Spjdvoid
141168404Spjddmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
142168404Spjd{
143168404Spjd	/*
144168404Spjd	 * If we're syncing, they can manipulate any object anyhow, and
145168404Spjd	 * the hold on the dnode_t can cause problems.
146168404Spjd	 */
147168404Spjd	if (!dmu_tx_is_syncing(tx)) {
148168404Spjd		(void) dmu_tx_hold_object_impl(tx, os,
149168404Spjd		    object, THT_NEWOBJECT, 0, 0);
150168404Spjd	}
151168404Spjd}
152168404Spjd
153168404Spjdstatic int
154168404Spjddmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
155168404Spjd{
156168404Spjd	int err;
157168404Spjd	dmu_buf_impl_t *db;
158168404Spjd
159168404Spjd	rw_enter(&dn->dn_struct_rwlock, RW_READER);
160168404Spjd	db = dbuf_hold_level(dn, level, blkid, FTAG);
161168404Spjd	rw_exit(&dn->dn_struct_rwlock);
162168404Spjd	if (db == NULL)
163168404Spjd		return (EIO);
164185029Spjd	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
165168404Spjd	dbuf_rele(db, FTAG);
166168404Spjd	return (err);
167168404Spjd}
168168404Spjd
169209962Smmstatic void
170219089Spjddmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
171219089Spjd    int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
172209962Smm{
173219089Spjd	objset_t *os = dn->dn_objset;
174219089Spjd	dsl_dataset_t *ds = os->os_dsl_dataset;
175219089Spjd	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
176219089Spjd	dmu_buf_impl_t *parent = NULL;
177219089Spjd	blkptr_t *bp = NULL;
178219089Spjd	uint64_t space;
179209962Smm
180219089Spjd	if (level >= dn->dn_nlevels || history[level] == blkid)
181209962Smm		return;
182209962Smm
183219089Spjd	history[level] = blkid;
184209962Smm
185219089Spjd	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
186219089Spjd
187219089Spjd	if (db == NULL || db == dn->dn_dbuf) {
188219089Spjd		ASSERT(level != 0);
189219089Spjd		db = NULL;
190219089Spjd	} else {
191219089Spjd		ASSERT(DB_DNODE(db) == dn);
192219089Spjd		ASSERT(db->db_level == level);
193219089Spjd		ASSERT(db->db.db_size == space);
194219089Spjd		ASSERT(db->db_blkid == blkid);
195219089Spjd		bp = db->db_blkptr;
196219089Spjd		parent = db->db_parent;
197209962Smm	}
198209962Smm
199219089Spjd	freeable = (bp && (freeable ||
200219089Spjd	    dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
201209962Smm
202219089Spjd	if (freeable)
203219089Spjd		txh->txh_space_tooverwrite += space;
204219089Spjd	else
205219089Spjd		txh->txh_space_towrite += space;
206219089Spjd	if (bp)
207219089Spjd		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
208219089Spjd
209219089Spjd	dmu_tx_count_twig(txh, dn, parent, level + 1,
210219089Spjd	    blkid >> epbs, freeable, history);
211209962Smm}
212209962Smm
213168404Spjd/* ARGSUSED */
214168404Spjdstatic void
215168404Spjddmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
216168404Spjd{
217168404Spjd	dnode_t *dn = txh->txh_dnode;
218168404Spjd	uint64_t start, end, i;
219168404Spjd	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
220168404Spjd	int err = 0;
221168404Spjd
222168404Spjd	if (len == 0)
223168404Spjd		return;
224168404Spjd
225168404Spjd	min_bs = SPA_MINBLOCKSHIFT;
226168404Spjd	max_bs = SPA_MAXBLOCKSHIFT;
227168404Spjd	min_ibs = DN_MIN_INDBLKSHIFT;
228168404Spjd	max_ibs = DN_MAX_INDBLKSHIFT;
229168404Spjd
230209962Smm	if (dn) {
231219089Spjd		uint64_t history[DN_MAX_LEVELS];
232209962Smm		int nlvls = dn->dn_nlevels;
233209962Smm		int delta;
234168404Spjd
235209962Smm		/*
236209962Smm		 * For i/o error checking, read the first and last level-0
237209962Smm		 * blocks (if they are not aligned), and all the level-1 blocks.
238209962Smm		 */
239168404Spjd		if (dn->dn_maxblkid == 0) {
240209962Smm			delta = dn->dn_datablksz;
241209962Smm			start = (off < dn->dn_datablksz) ? 0 : 1;
242209962Smm			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
243209962Smm			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
244209962Smm				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
245209962Smm				if (err)
246209962Smm					goto out;
247209962Smm				delta -= off;
248209962Smm			}
249168404Spjd		} else {
250168404Spjd			zio_t *zio = zio_root(dn->dn_objset->os_spa,
251168404Spjd			    NULL, NULL, ZIO_FLAG_CANFAIL);
252168404Spjd
253168404Spjd			/* first level-0 block */
254168404Spjd			start = off >> dn->dn_datablkshift;
255168404Spjd			if (P2PHASE(off, dn->dn_datablksz) ||
256168404Spjd			    len < dn->dn_datablksz) {
257168404Spjd				err = dmu_tx_check_ioerr(zio, dn, 0, start);
258168404Spjd				if (err)
259168404Spjd					goto out;
260168404Spjd			}
261168404Spjd
262168404Spjd			/* last level-0 block */
263168404Spjd			end = (off+len-1) >> dn->dn_datablkshift;
264219089Spjd			if (end != start && end <= dn->dn_maxblkid &&
265168404Spjd			    P2PHASE(off+len, dn->dn_datablksz)) {
266168404Spjd				err = dmu_tx_check_ioerr(zio, dn, 0, end);
267168404Spjd				if (err)
268168404Spjd					goto out;
269168404Spjd			}
270168404Spjd
271168404Spjd			/* level-1 blocks */
272209962Smm			if (nlvls > 1) {
273209962Smm				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
274209962Smm				for (i = (start>>shft)+1; i < end>>shft; i++) {
275168404Spjd					err = dmu_tx_check_ioerr(zio, dn, 1, i);
276168404Spjd					if (err)
277168404Spjd						goto out;
278168404Spjd				}
279168404Spjd			}
280168404Spjd
281168404Spjd			err = zio_wait(zio);
282168404Spjd			if (err)
283168404Spjd				goto out;
284209962Smm			delta = P2NPHASE(off, dn->dn_datablksz);
285168404Spjd		}
286168404Spjd
287209962Smm		if (dn->dn_maxblkid > 0) {
288209962Smm			/*
289209962Smm			 * The blocksize can't change,
290209962Smm			 * so we can make a more precise estimate.
291209962Smm			 */
292209962Smm			ASSERT(dn->dn_datablkshift != 0);
293168404Spjd			min_bs = max_bs = dn->dn_datablkshift;
294209962Smm			min_ibs = max_ibs = dn->dn_indblkshift;
295209962Smm		} else if (dn->dn_indblkshift > max_ibs) {
296209962Smm			/*
297209962Smm			 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
298209962Smm			 * the code will still work correctly on older pools.
299209962Smm			 */
300209962Smm			min_ibs = max_ibs = dn->dn_indblkshift;
301209962Smm		}
302209962Smm
303209962Smm		/*
304209962Smm		 * If this write is not off the end of the file
305209962Smm		 * we need to account for overwrites/unref.
306209962Smm		 */
307219089Spjd		if (start <= dn->dn_maxblkid) {
308219089Spjd			for (int l = 0; l < DN_MAX_LEVELS; l++)
309219089Spjd				history[l] = -1ULL;
310219089Spjd		}
311209962Smm		while (start <= dn->dn_maxblkid) {
312209962Smm			dmu_buf_impl_t *db;
313209962Smm
314209962Smm			rw_enter(&dn->dn_struct_rwlock, RW_READER);
315219089Spjd			err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
316209962Smm			rw_exit(&dn->dn_struct_rwlock);
317219089Spjd
318219089Spjd			if (err) {
319219089Spjd				txh->txh_tx->tx_err = err;
320219089Spjd				return;
321209962Smm			}
322219089Spjd
323219089Spjd			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
324219089Spjd			    history);
325209962Smm			dbuf_rele(db, FTAG);
326209962Smm			if (++start > end) {
327209962Smm				/*
328209962Smm				 * Account for new indirects appearing
329209962Smm				 * before this IO gets assigned into a txg.
330209962Smm				 */
331209962Smm				bits = 64 - min_bs;
332209962Smm				epbs = min_ibs - SPA_BLKPTRSHIFT;
333209962Smm				for (bits -= epbs * (nlvls - 1);
334209962Smm				    bits >= 0; bits -= epbs)
335209962Smm					txh->txh_fudge += 1ULL << max_ibs;
336209962Smm				goto out;
337209962Smm			}
338209962Smm			off += delta;
339209962Smm			if (len >= delta)
340209962Smm				len -= delta;
341209962Smm			delta = dn->dn_datablksz;
342209962Smm		}
343168404Spjd	}
344168404Spjd
345168404Spjd	/*
346168404Spjd	 * 'end' is the last thing we will access, not one past.
347168404Spjd	 * This way we won't overflow when accessing the last byte.
348168404Spjd	 */
349168404Spjd	start = P2ALIGN(off, 1ULL << max_bs);
350168404Spjd	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
351168404Spjd	txh->txh_space_towrite += end - start + 1;
352168404Spjd
353168404Spjd	start >>= min_bs;
354168404Spjd	end >>= min_bs;
355168404Spjd
356168404Spjd	epbs = min_ibs - SPA_BLKPTRSHIFT;
357168404Spjd
358168404Spjd	/*
359168404Spjd	 * The object contains at most 2^(64 - min_bs) blocks,
360168404Spjd	 * and each indirect level maps 2^epbs.
361168404Spjd	 */
362168404Spjd	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
363168404Spjd		start >>= epbs;
364168404Spjd		end >>= epbs;
365209962Smm		ASSERT3U(end, >=, start);
366209962Smm		txh->txh_space_towrite += (end - start + 1) << max_ibs;
367209962Smm		if (start != 0) {
368209962Smm			/*
369209962Smm			 * We also need a new blkid=0 indirect block
370209962Smm			 * to reference any existing file data.
371209962Smm			 */
372168404Spjd			txh->txh_space_towrite += 1ULL << max_ibs;
373209962Smm		}
374168404Spjd	}
375168404Spjd
376209962Smmout:
377209962Smm	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
378209962Smm	    2 * DMU_MAX_ACCESS)
379209962Smm		err = EFBIG;
380168404Spjd
381168404Spjd	if (err)
382168404Spjd		txh->txh_tx->tx_err = err;
383168404Spjd}
384168404Spjd
385168404Spjdstatic void
386168404Spjddmu_tx_count_dnode(dmu_tx_hold_t *txh)
387168404Spjd{
388168404Spjd	dnode_t *dn = txh->txh_dnode;
389219089Spjd	dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
390168404Spjd	uint64_t space = mdn->dn_datablksz +
391168404Spjd	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
392168404Spjd
393168404Spjd	if (dn && dn->dn_dbuf->db_blkptr &&
394168404Spjd	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
395219089Spjd	    dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
396168404Spjd		txh->txh_space_tooverwrite += space;
397209962Smm		txh->txh_space_tounref += space;
398168404Spjd	} else {
399168404Spjd		txh->txh_space_towrite += space;
400185029Spjd		if (dn && dn->dn_dbuf->db_blkptr)
401185029Spjd			txh->txh_space_tounref += space;
402168404Spjd	}
403168404Spjd}
404168404Spjd
405168404Spjdvoid
406168404Spjddmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
407168404Spjd{
408168404Spjd	dmu_tx_hold_t *txh;
409168404Spjd
410168404Spjd	ASSERT(tx->tx_txg == 0);
411168404Spjd	ASSERT(len < DMU_MAX_ACCESS);
412168404Spjd	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
413168404Spjd
414168404Spjd	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
415168404Spjd	    object, THT_WRITE, off, len);
416168404Spjd	if (txh == NULL)
417168404Spjd		return;
418168404Spjd
419168404Spjd	dmu_tx_count_write(txh, off, len);
420168404Spjd	dmu_tx_count_dnode(txh);
421168404Spjd}
422168404Spjd
423168404Spjdstatic void
424168404Spjddmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
425168404Spjd{
426185029Spjd	uint64_t blkid, nblks, lastblk;
427185029Spjd	uint64_t space = 0, unref = 0, skipped = 0;
428168404Spjd	dnode_t *dn = txh->txh_dnode;
429168404Spjd	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
430168404Spjd	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
431185029Spjd	int epbs;
432240133Smm	uint64_t l0span = 0, nl1blks = 0;
433168404Spjd
434185029Spjd	if (dn->dn_nlevels == 0)
435168404Spjd		return;
436168404Spjd
437168404Spjd	/*
438185029Spjd	 * The struct_rwlock protects us against dn_nlevels
439168404Spjd	 * changing, in case (against all odds) we manage to dirty &
440168404Spjd	 * sync out the changes after we check for being dirty.
441219089Spjd	 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
442168404Spjd	 */
443168404Spjd	rw_enter(&dn->dn_struct_rwlock, RW_READER);
444185029Spjd	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
445185029Spjd	if (dn->dn_maxblkid == 0) {
446168404Spjd		if (off == 0 && len >= dn->dn_datablksz) {
447168404Spjd			blkid = 0;
448168404Spjd			nblks = 1;
449168404Spjd		} else {
450168404Spjd			rw_exit(&dn->dn_struct_rwlock);
451168404Spjd			return;
452168404Spjd		}
453168404Spjd	} else {
454168404Spjd		blkid = off >> dn->dn_datablkshift;
455185029Spjd		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
456168404Spjd
457185029Spjd		if (blkid >= dn->dn_maxblkid) {
458168404Spjd			rw_exit(&dn->dn_struct_rwlock);
459168404Spjd			return;
460168404Spjd		}
461185029Spjd		if (blkid + nblks > dn->dn_maxblkid)
462185029Spjd			nblks = dn->dn_maxblkid - blkid;
463168404Spjd
464168404Spjd	}
465240133Smm	l0span = nblks;    /* save for later use to calc level > 1 overhead */
466185029Spjd	if (dn->dn_nlevels == 1) {
467168404Spjd		int i;
468168404Spjd		for (i = 0; i < nblks; i++) {
469168404Spjd			blkptr_t *bp = dn->dn_phys->dn_blkptr;
470185029Spjd			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
471168404Spjd			bp += blkid + i;
472219089Spjd			if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
473168404Spjd				dprintf_bp(bp, "can free old%s", "");
474219089Spjd				space += bp_get_dsize(spa, bp);
475168404Spjd			}
476185029Spjd			unref += BP_GET_ASIZE(bp);
477168404Spjd		}
478240133Smm		nl1blks = 1;
479168404Spjd		nblks = 0;
480168404Spjd	}
481168404Spjd
482185029Spjd	lastblk = blkid + nblks - 1;
483168404Spjd	while (nblks) {
484168404Spjd		dmu_buf_impl_t *dbuf;
485185029Spjd		uint64_t ibyte, new_blkid;
486185029Spjd		int epb = 1 << epbs;
487185029Spjd		int err, i, blkoff, tochk;
488185029Spjd		blkptr_t *bp;
489168404Spjd
490185029Spjd		ibyte = blkid << dn->dn_datablkshift;
491185029Spjd		err = dnode_next_offset(dn,
492185029Spjd		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
493185029Spjd		new_blkid = ibyte >> dn->dn_datablkshift;
494185029Spjd		if (err == ESRCH) {
495185029Spjd			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
496185029Spjd			break;
497185029Spjd		}
498185029Spjd		if (err) {
499185029Spjd			txh->txh_tx->tx_err = err;
500185029Spjd			break;
501185029Spjd		}
502185029Spjd		if (new_blkid > lastblk) {
503185029Spjd			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
504185029Spjd			break;
505185029Spjd		}
506168404Spjd
507185029Spjd		if (new_blkid > blkid) {
508185029Spjd			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
509185029Spjd			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
510185029Spjd			nblks -= new_blkid - blkid;
511185029Spjd			blkid = new_blkid;
512185029Spjd		}
513185029Spjd		blkoff = P2PHASE(blkid, epb);
514185029Spjd		tochk = MIN(epb - blkoff, nblks);
515168404Spjd
516219089Spjd		err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
517219089Spjd		if (err) {
518219089Spjd			txh->txh_tx->tx_err = err;
519219089Spjd			break;
520219089Spjd		}
521168404Spjd
522185029Spjd		txh->txh_memory_tohold += dbuf->db.db_size;
523219089Spjd
524219089Spjd		/*
525219089Spjd		 * We don't check memory_tohold against DMU_MAX_ACCESS because
526219089Spjd		 * memory_tohold is an over-estimation (especially the >L1
527219089Spjd		 * indirect blocks), so it could fail.  Callers should have
528219089Spjd		 * already verified that they will not be holding too much
529219089Spjd		 * memory.
530219089Spjd		 */
531219089Spjd
532185029Spjd		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
533185029Spjd		if (err != 0) {
534168404Spjd			txh->txh_tx->tx_err = err;
535185029Spjd			dbuf_rele(dbuf, FTAG);
536168404Spjd			break;
537168404Spjd		}
538168404Spjd
539185029Spjd		bp = dbuf->db.db_data;
540185029Spjd		bp += blkoff;
541185029Spjd
542185029Spjd		for (i = 0; i < tochk; i++) {
543219089Spjd			if (dsl_dataset_block_freeable(ds, &bp[i],
544219089Spjd			    bp[i].blk_birth)) {
545185029Spjd				dprintf_bp(&bp[i], "can free old%s", "");
546219089Spjd				space += bp_get_dsize(spa, &bp[i]);
547185029Spjd			}
548185029Spjd			unref += BP_GET_ASIZE(bp);
549185029Spjd		}
550185029Spjd		dbuf_rele(dbuf, FTAG);
551185029Spjd
552240133Smm		++nl1blks;
553168404Spjd		blkid += tochk;
554168404Spjd		nblks -= tochk;
555168404Spjd	}
556168404Spjd	rw_exit(&dn->dn_struct_rwlock);
557168404Spjd
558240133Smm	/*
559240133Smm	 * Add in memory requirements of higher-level indirects.
560240133Smm	 * This assumes a worst-possible scenario for dn_nlevels and a
561240133Smm	 * worst-possible distribution of l1-blocks over the region to free.
562240133Smm	 */
563240133Smm	{
564240133Smm		uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
565240133Smm		int level = 2;
566240133Smm		/*
567240133Smm		 * Here we don't use DN_MAX_LEVEL, but calculate it with the
568240133Smm		 * given datablkshift and indblkshift. This makes the
569240133Smm		 * difference between 19 and 8 on large files.
570240133Smm		 */
571240133Smm		int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
572240133Smm		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
573240133Smm
574240133Smm		while (level++ < maxlevel) {
575240133Smm			txh->txh_memory_tohold += MIN(blkcnt, (nl1blks >> epbs))
576240133Smm			    << dn->dn_indblkshift;
577240133Smm			blkcnt = 1 + (blkcnt >> epbs);
578240133Smm		}
579240133Smm	}
580240133Smm
581185029Spjd	/* account for new level 1 indirect blocks that might show up */
582185029Spjd	if (skipped > 0) {
583185029Spjd		txh->txh_fudge += skipped << dn->dn_indblkshift;
584185029Spjd		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
585185029Spjd		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
586185029Spjd	}
587168404Spjd	txh->txh_space_tofree += space;
588185029Spjd	txh->txh_space_tounref += unref;
589168404Spjd}
590168404Spjd
591168404Spjdvoid
592168404Spjddmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
593168404Spjd{
594168404Spjd	dmu_tx_hold_t *txh;
595168404Spjd	dnode_t *dn;
596168404Spjd	uint64_t start, end, i;
597168404Spjd	int err, shift;
598168404Spjd	zio_t *zio;
599168404Spjd
600168404Spjd	ASSERT(tx->tx_txg == 0);
601168404Spjd
602168404Spjd	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
603168404Spjd	    object, THT_FREE, off, len);
604168404Spjd	if (txh == NULL)
605168404Spjd		return;
606168404Spjd	dn = txh->txh_dnode;
607168404Spjd
608168404Spjd	/* first block */
609168404Spjd	if (off != 0)
610168404Spjd		dmu_tx_count_write(txh, off, 1);
611168404Spjd	/* last block */
612168404Spjd	if (len != DMU_OBJECT_END)
613168404Spjd		dmu_tx_count_write(txh, off+len, 1);
614168404Spjd
615219089Spjd	dmu_tx_count_dnode(txh);
616219089Spjd
617168404Spjd	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
618168404Spjd		return;
619168404Spjd	if (len == DMU_OBJECT_END)
620168404Spjd		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
621168404Spjd
622168404Spjd	/*
623168404Spjd	 * For i/o error checking, read the first and last level-0
624168404Spjd	 * blocks, and all the level-1 blocks.  The above count_write's
625185029Spjd	 * have already taken care of the level-0 blocks.
626168404Spjd	 */
627168404Spjd	if (dn->dn_nlevels > 1) {
628168404Spjd		shift = dn->dn_datablkshift + dn->dn_indblkshift -
629168404Spjd		    SPA_BLKPTRSHIFT;
630168404Spjd		start = off >> shift;
631168404Spjd		end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
632168404Spjd
633168404Spjd		zio = zio_root(tx->tx_pool->dp_spa,
634168404Spjd		    NULL, NULL, ZIO_FLAG_CANFAIL);
635168404Spjd		for (i = start; i <= end; i++) {
636168404Spjd			uint64_t ibyte = i << shift;
637185029Spjd			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
638168404Spjd			i = ibyte >> shift;
639168404Spjd			if (err == ESRCH)
640168404Spjd				break;
641168404Spjd			if (err) {
642168404Spjd				tx->tx_err = err;
643168404Spjd				return;
644168404Spjd			}
645168404Spjd
646168404Spjd			err = dmu_tx_check_ioerr(zio, dn, 1, i);
647168404Spjd			if (err) {
648168404Spjd				tx->tx_err = err;
649168404Spjd				return;
650168404Spjd			}
651168404Spjd		}
652168404Spjd		err = zio_wait(zio);
653168404Spjd		if (err) {
654168404Spjd			tx->tx_err = err;
655168404Spjd			return;
656168404Spjd		}
657168404Spjd	}
658168404Spjd
659168404Spjd	dmu_tx_count_free(txh, off, len);
660168404Spjd}
661168404Spjd
662168404Spjdvoid
663209962Smmdmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
664168404Spjd{
665168404Spjd	dmu_tx_hold_t *txh;
666168404Spjd	dnode_t *dn;
667168404Spjd	uint64_t nblocks;
668168404Spjd	int epbs, err;
669168404Spjd
670168404Spjd	ASSERT(tx->tx_txg == 0);
671168404Spjd
672168404Spjd	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
673168404Spjd	    object, THT_ZAP, add, (uintptr_t)name);
674168404Spjd	if (txh == NULL)
675168404Spjd		return;
676168404Spjd	dn = txh->txh_dnode;
677168404Spjd
678168404Spjd	dmu_tx_count_dnode(txh);
679168404Spjd
680168404Spjd	if (dn == NULL) {
681168404Spjd		/*
682168404Spjd		 * We will be able to fit a new object's entries into one leaf
683168404Spjd		 * block.  So there will be at most 2 blocks total,
684168404Spjd		 * including the header block.
685168404Spjd		 */
686168404Spjd		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
687168404Spjd		return;
688168404Spjd	}
689168404Spjd
690236884Smm	ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
691168404Spjd
692168404Spjd	if (dn->dn_maxblkid == 0 && !add) {
693226512Smm		blkptr_t *bp;
694226512Smm
695168404Spjd		/*
696168404Spjd		 * If there is only one block  (i.e. this is a micro-zap)
697168404Spjd		 * and we are not adding anything, the accounting is simple.
698168404Spjd		 */
699168404Spjd		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
700168404Spjd		if (err) {
701168404Spjd			tx->tx_err = err;
702168404Spjd			return;
703168404Spjd		}
704168404Spjd
705168404Spjd		/*
706168404Spjd		 * Use max block size here, since we don't know how much
707168404Spjd		 * the size will change between now and the dbuf dirty call.
708168404Spjd		 */
709226512Smm		bp = &dn->dn_phys->dn_blkptr[0];
710168404Spjd		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
711226512Smm		    bp, bp->blk_birth))
712168404Spjd			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
713226512Smm		else
714168404Spjd			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
715226512Smm		if (!BP_IS_HOLE(bp))
716209093Smm			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
717168404Spjd		return;
718168404Spjd	}
719168404Spjd
720168404Spjd	if (dn->dn_maxblkid > 0 && name) {
721168404Spjd		/*
722168404Spjd		 * access the name in this fat-zap so that we'll check
723168404Spjd		 * for i/o errors to the leaf blocks, etc.
724168404Spjd		 */
725219089Spjd		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
726168404Spjd		    8, 0, NULL);
727168404Spjd		if (err == EIO) {
728168404Spjd			tx->tx_err = err;
729168404Spjd			return;
730168404Spjd		}
731168404Spjd	}
732168404Spjd
733219089Spjd	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
734209962Smm	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
735168404Spjd
736168404Spjd	/*
737168404Spjd	 * If the modified blocks are scattered to the four winds,
738168404Spjd	 * we'll have to modify an indirect twig for each.
739168404Spjd	 */
740168404Spjd	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
741168404Spjd	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
742209962Smm		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
743209962Smm			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
744209962Smm		else
745209962Smm			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
746168404Spjd}
747168404Spjd
748168404Spjdvoid
749168404Spjddmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
750168404Spjd{
751168404Spjd	dmu_tx_hold_t *txh;
752168404Spjd
753168404Spjd	ASSERT(tx->tx_txg == 0);
754168404Spjd
755168404Spjd	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
756168404Spjd	    object, THT_BONUS, 0, 0);
757168404Spjd	if (txh)
758168404Spjd		dmu_tx_count_dnode(txh);
759168404Spjd}
760168404Spjd
761168404Spjdvoid
762168404Spjddmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
763168404Spjd{
764168404Spjd	dmu_tx_hold_t *txh;
765168404Spjd	ASSERT(tx->tx_txg == 0);
766168404Spjd
767168404Spjd	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
768168404Spjd	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
769168404Spjd
770168404Spjd	txh->txh_space_towrite += space;
771168404Spjd}
772168404Spjd
773168404Spjdint
774168404Spjddmu_tx_holds(dmu_tx_t *tx, uint64_t object)
775168404Spjd{
776168404Spjd	dmu_tx_hold_t *txh;
777168404Spjd	int holds = 0;
778168404Spjd
779168404Spjd	/*
780168404Spjd	 * By asserting that the tx is assigned, we're counting the
781168404Spjd	 * number of dn_tx_holds, which is the same as the number of
782168404Spjd	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
783168404Spjd	 * dn_tx_holds could be 0.
784168404Spjd	 */
785168404Spjd	ASSERT(tx->tx_txg != 0);
786168404Spjd
787168404Spjd	/* if (tx->tx_anyobj == TRUE) */
788168404Spjd		/* return (0); */
789168404Spjd
790168404Spjd	for (txh = list_head(&tx->tx_holds); txh;
791168404Spjd	    txh = list_next(&tx->tx_holds, txh)) {
792168404Spjd		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
793168404Spjd			holds++;
794168404Spjd	}
795168404Spjd
796168404Spjd	return (holds);
797168404Spjd}
798168404Spjd
799168404Spjd#ifdef ZFS_DEBUG
800168404Spjdvoid
801168404Spjddmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
802168404Spjd{
803168404Spjd	dmu_tx_hold_t *txh;
804168404Spjd	int match_object = FALSE, match_offset = FALSE;
805219089Spjd	dnode_t *dn;
806168404Spjd
807219089Spjd	DB_DNODE_ENTER(db);
808219089Spjd	dn = DB_DNODE(db);
809168404Spjd	ASSERT(tx->tx_txg != 0);
810219089Spjd	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
811168404Spjd	ASSERT3U(dn->dn_object, ==, db->db.db_object);
812168404Spjd
813219089Spjd	if (tx->tx_anyobj) {
814219089Spjd		DB_DNODE_EXIT(db);
815168404Spjd		return;
816219089Spjd	}
817168404Spjd
818168404Spjd	/* XXX No checking on the meta dnode for now */
819219089Spjd	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
820219089Spjd		DB_DNODE_EXIT(db);
821168404Spjd		return;
822219089Spjd	}
823168404Spjd
824168404Spjd	for (txh = list_head(&tx->tx_holds); txh;
825168404Spjd	    txh = list_next(&tx->tx_holds, txh)) {
826168404Spjd		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
827168404Spjd		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
828168404Spjd			match_object = TRUE;
829168404Spjd		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
830168404Spjd			int datablkshift = dn->dn_datablkshift ?
831168404Spjd			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
832168404Spjd			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
833168404Spjd			int shift = datablkshift + epbs * db->db_level;
834168404Spjd			uint64_t beginblk = shift >= 64 ? 0 :
835168404Spjd			    (txh->txh_arg1 >> shift);
836168404Spjd			uint64_t endblk = shift >= 64 ? 0 :
837168404Spjd			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
838168404Spjd			uint64_t blkid = db->db_blkid;
839168404Spjd
840168404Spjd			/* XXX txh_arg2 better not be zero... */
841168404Spjd
842168404Spjd			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
843168404Spjd			    txh->txh_type, beginblk, endblk);
844168404Spjd
845168404Spjd			switch (txh->txh_type) {
846168404Spjd			case THT_WRITE:
847168404Spjd				if (blkid >= beginblk && blkid <= endblk)
848168404Spjd					match_offset = TRUE;
849168404Spjd				/*
850168404Spjd				 * We will let this hold work for the bonus
851219089Spjd				 * or spill buffer so that we don't need to
852219089Spjd				 * hold it when creating a new object.
853168404Spjd				 */
854219089Spjd				if (blkid == DMU_BONUS_BLKID ||
855219089Spjd				    blkid == DMU_SPILL_BLKID)
856168404Spjd					match_offset = TRUE;
857168404Spjd				/*
858168404Spjd				 * They might have to increase nlevels,
859168404Spjd				 * thus dirtying the new TLIBs.  Or the
860168404Spjd				 * might have to change the block size,
861168404Spjd				 * thus dirying the new lvl=0 blk=0.
862168404Spjd				 */
863168404Spjd				if (blkid == 0)
864168404Spjd					match_offset = TRUE;
865168404Spjd				break;
866168404Spjd			case THT_FREE:
867185029Spjd				/*
868185029Spjd				 * We will dirty all the level 1 blocks in
869185029Spjd				 * the free range and perhaps the first and
870185029Spjd				 * last level 0 block.
871185029Spjd				 */
872185029Spjd				if (blkid >= beginblk && (blkid <= endblk ||
873185029Spjd				    txh->txh_arg2 == DMU_OBJECT_END))
874168404Spjd					match_offset = TRUE;
875168404Spjd				break;
876219089Spjd			case THT_SPILL:
877219089Spjd				if (blkid == DMU_SPILL_BLKID)
878219089Spjd					match_offset = TRUE;
879219089Spjd				break;
880168404Spjd			case THT_BONUS:
881219089Spjd				if (blkid == DMU_BONUS_BLKID)
882168404Spjd					match_offset = TRUE;
883168404Spjd				break;
884168404Spjd			case THT_ZAP:
885168404Spjd				match_offset = TRUE;
886168404Spjd				break;
887168404Spjd			case THT_NEWOBJECT:
888168404Spjd				match_object = TRUE;
889168404Spjd				break;
890168404Spjd			default:
891168404Spjd				ASSERT(!"bad txh_type");
892168404Spjd			}
893168404Spjd		}
894219089Spjd		if (match_object && match_offset) {
895219089Spjd			DB_DNODE_EXIT(db);
896168404Spjd			return;
897219089Spjd		}
898168404Spjd	}
899219089Spjd	DB_DNODE_EXIT(db);
900168404Spjd	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
901168404Spjd	    (u_longlong_t)db->db.db_object, db->db_level,
902168404Spjd	    (u_longlong_t)db->db_blkid);
903168404Spjd}
904168404Spjd#endif
905168404Spjd
906168404Spjdstatic int
907168404Spjddmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
908168404Spjd{
909168404Spjd	dmu_tx_hold_t *txh;
910185029Spjd	spa_t *spa = tx->tx_pool->dp_spa;
911185029Spjd	uint64_t memory, asize, fsize, usize;
912185029Spjd	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
913168404Spjd
914168404Spjd	ASSERT3U(tx->tx_txg, ==, 0);
915185029Spjd
916168404Spjd	if (tx->tx_err)
917168404Spjd		return (tx->tx_err);
918168404Spjd
919185029Spjd	if (spa_suspended(spa)) {
920185029Spjd		/*
921185029Spjd		 * If the user has indicated a blocking failure mode
922185029Spjd		 * then return ERESTART which will block in dmu_tx_wait().
923185029Spjd		 * Otherwise, return EIO so that an error can get
924185029Spjd		 * propagated back to the VOP calls.
925185029Spjd		 *
926185029Spjd		 * Note that we always honor the txg_how flag regardless
927185029Spjd		 * of the failuremode setting.
928185029Spjd		 */
929185029Spjd		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
930185029Spjd		    txg_how != TXG_WAIT)
931185029Spjd			return (EIO);
932185029Spjd
933185029Spjd		return (ERESTART);
934185029Spjd	}
935185029Spjd
936168404Spjd	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
937168404Spjd	tx->tx_needassign_txh = NULL;
938168404Spjd
939168404Spjd	/*
940168404Spjd	 * NB: No error returns are allowed after txg_hold_open, but
941168404Spjd	 * before processing the dnode holds, due to the
942168404Spjd	 * dmu_tx_unassign() logic.
943168404Spjd	 */
944168404Spjd
945185029Spjd	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
946168404Spjd	for (txh = list_head(&tx->tx_holds); txh;
947168404Spjd	    txh = list_next(&tx->tx_holds, txh)) {
948168404Spjd		dnode_t *dn = txh->txh_dnode;
949168404Spjd		if (dn != NULL) {
950168404Spjd			mutex_enter(&dn->dn_mtx);
951168404Spjd			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
952168404Spjd				mutex_exit(&dn->dn_mtx);
953168404Spjd				tx->tx_needassign_txh = txh;
954168404Spjd				return (ERESTART);
955168404Spjd			}
956168404Spjd			if (dn->dn_assigned_txg == 0)
957168404Spjd				dn->dn_assigned_txg = tx->tx_txg;
958168404Spjd			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
959168404Spjd			(void) refcount_add(&dn->dn_tx_holds, tx);
960168404Spjd			mutex_exit(&dn->dn_mtx);
961168404Spjd		}
962168404Spjd		towrite += txh->txh_space_towrite;
963168404Spjd		tofree += txh->txh_space_tofree;
964168404Spjd		tooverwrite += txh->txh_space_tooverwrite;
965185029Spjd		tounref += txh->txh_space_tounref;
966185029Spjd		tohold += txh->txh_memory_tohold;
967185029Spjd		fudge += txh->txh_fudge;
968168404Spjd	}
969168404Spjd
970168404Spjd	/*
971168404Spjd	 * NB: This check must be after we've held the dnodes, so that
972168404Spjd	 * the dmu_tx_unassign() logic will work properly
973168404Spjd	 */
974168404Spjd	if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
975168404Spjd		return (ERESTART);
976168404Spjd
977168404Spjd	/*
978168404Spjd	 * If a snapshot has been taken since we made our estimates,
979168404Spjd	 * assume that we won't be able to free or overwrite anything.
980168404Spjd	 */
981168404Spjd	if (tx->tx_objset &&
982219089Spjd	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
983168404Spjd	    tx->tx_lastsnap_txg) {
984168404Spjd		towrite += tooverwrite;
985168404Spjd		tooverwrite = tofree = 0;
986168404Spjd	}
987168404Spjd
988185029Spjd	/* needed allocation: worst-case estimate of write space */
989185029Spjd	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
990185029Spjd	/* freed space estimate: worst-case overwrite + free estimate */
991168404Spjd	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
992185029Spjd	/* convert unrefd space to worst-case estimate */
993185029Spjd	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
994185029Spjd	/* calculate memory footprint estimate */
995185029Spjd	memory = towrite + tooverwrite + tohold;
996168404Spjd
997168404Spjd#ifdef ZFS_DEBUG
998185029Spjd	/*
999185029Spjd	 * Add in 'tohold' to account for our dirty holds on this memory
1000185029Spjd	 * XXX - the "fudge" factor is to account for skipped blocks that
1001185029Spjd	 * we missed because dnode_next_offset() misses in-core-only blocks.
1002185029Spjd	 */
1003185029Spjd	tx->tx_space_towrite = asize +
1004185029Spjd	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1005168404Spjd	tx->tx_space_tofree = tofree;
1006168404Spjd	tx->tx_space_tooverwrite = tooverwrite;
1007185029Spjd	tx->tx_space_tounref = tounref;
1008168404Spjd#endif
1009168404Spjd
1010168404Spjd	if (tx->tx_dir && asize != 0) {
1011185029Spjd		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1012185029Spjd		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1013168404Spjd		if (err)
1014168404Spjd			return (err);
1015168404Spjd	}
1016168404Spjd
1017168404Spjd	return (0);
1018168404Spjd}
1019168404Spjd
1020168404Spjdstatic void
1021168404Spjddmu_tx_unassign(dmu_tx_t *tx)
1022168404Spjd{
1023168404Spjd	dmu_tx_hold_t *txh;
1024168404Spjd
1025168404Spjd	if (tx->tx_txg == 0)
1026168404Spjd		return;
1027168404Spjd
1028168404Spjd	txg_rele_to_quiesce(&tx->tx_txgh);
1029168404Spjd
1030168404Spjd	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1031168404Spjd	    txh = list_next(&tx->tx_holds, txh)) {
1032168404Spjd		dnode_t *dn = txh->txh_dnode;
1033168404Spjd
1034168404Spjd		if (dn == NULL)
1035168404Spjd			continue;
1036168404Spjd		mutex_enter(&dn->dn_mtx);
1037168404Spjd		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1038168404Spjd
1039168404Spjd		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1040168404Spjd			dn->dn_assigned_txg = 0;
1041168404Spjd			cv_broadcast(&dn->dn_notxholds);
1042168404Spjd		}
1043168404Spjd		mutex_exit(&dn->dn_mtx);
1044168404Spjd	}
1045168404Spjd
1046168404Spjd	txg_rele_to_sync(&tx->tx_txgh);
1047168404Spjd
1048168404Spjd	tx->tx_lasttried_txg = tx->tx_txg;
1049168404Spjd	tx->tx_txg = 0;
1050168404Spjd}
1051168404Spjd
1052168404Spjd/*
1053168404Spjd * Assign tx to a transaction group.  txg_how can be one of:
1054168404Spjd *
1055168404Spjd * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1056168404Spjd *	a new one.  This should be used when you're not holding locks.
1057168404Spjd *	If will only fail if we're truly out of space (or over quota).
1058168404Spjd *
1059168404Spjd * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1060168404Spjd *	blocking, returns immediately with ERESTART.  This should be used
1061168404Spjd *	whenever you're holding locks.  On an ERESTART error, the caller
1062168404Spjd *	should drop locks, do a dmu_tx_wait(tx), and try again.
1063168404Spjd *
1064168404Spjd * (3)	A specific txg.  Use this if you need to ensure that multiple
1065168404Spjd *	transactions all sync in the same txg.  Like TXG_NOWAIT, it
1066168404Spjd *	returns ERESTART if it can't assign you into the requested txg.
1067168404Spjd */
1068168404Spjdint
1069168404Spjddmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1070168404Spjd{
1071168404Spjd	int err;
1072168404Spjd
1073168404Spjd	ASSERT(tx->tx_txg == 0);
1074168404Spjd	ASSERT(txg_how != 0);
1075168404Spjd	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1076168404Spjd
1077168404Spjd	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1078168404Spjd		dmu_tx_unassign(tx);
1079168404Spjd
1080168404Spjd		if (err != ERESTART || txg_how != TXG_WAIT)
1081168404Spjd			return (err);
1082168404Spjd
1083168404Spjd		dmu_tx_wait(tx);
1084168404Spjd	}
1085168404Spjd
1086168404Spjd	txg_rele_to_quiesce(&tx->tx_txgh);
1087168404Spjd
1088168404Spjd	return (0);
1089168404Spjd}
1090168404Spjd
1091168404Spjdvoid
1092168404Spjddmu_tx_wait(dmu_tx_t *tx)
1093168404Spjd{
1094185029Spjd	spa_t *spa = tx->tx_pool->dp_spa;
1095185029Spjd
1096168404Spjd	ASSERT(tx->tx_txg == 0);
1097168404Spjd
1098185029Spjd	/*
1099185029Spjd	 * It's possible that the pool has become active after this thread
1100185029Spjd	 * has tried to obtain a tx. If that's the case then his
1101185029Spjd	 * tx_lasttried_txg would not have been assigned.
1102185029Spjd	 */
1103185029Spjd	if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1104185029Spjd		txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1105185029Spjd	} else if (tx->tx_needassign_txh) {
1106168404Spjd		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1107168404Spjd
1108168404Spjd		mutex_enter(&dn->dn_mtx);
1109168404Spjd		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1110168404Spjd			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1111168404Spjd		mutex_exit(&dn->dn_mtx);
1112168404Spjd		tx->tx_needassign_txh = NULL;
1113168404Spjd	} else {
1114168404Spjd		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1115168404Spjd	}
1116168404Spjd}
1117168404Spjd
1118168404Spjdvoid
1119168404Spjddmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1120168404Spjd{
1121168404Spjd#ifdef ZFS_DEBUG
1122168404Spjd	if (tx->tx_dir == NULL || delta == 0)
1123168404Spjd		return;
1124168404Spjd
1125168404Spjd	if (delta > 0) {
1126168404Spjd		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1127168404Spjd		    tx->tx_space_towrite);
1128168404Spjd		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1129168404Spjd	} else {
1130168404Spjd		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1131168404Spjd	}
1132168404Spjd#endif
1133168404Spjd}
1134168404Spjd
1135168404Spjdvoid
1136168404Spjddmu_tx_commit(dmu_tx_t *tx)
1137168404Spjd{
1138168404Spjd	dmu_tx_hold_t *txh;
1139168404Spjd
1140168404Spjd	ASSERT(tx->tx_txg != 0);
1141168404Spjd
1142168404Spjd	while (txh = list_head(&tx->tx_holds)) {
1143168404Spjd		dnode_t *dn = txh->txh_dnode;
1144168404Spjd
1145168404Spjd		list_remove(&tx->tx_holds, txh);
1146168404Spjd		kmem_free(txh, sizeof (dmu_tx_hold_t));
1147168404Spjd		if (dn == NULL)
1148168404Spjd			continue;
1149168404Spjd		mutex_enter(&dn->dn_mtx);
1150168404Spjd		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1151168404Spjd
1152168404Spjd		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1153168404Spjd			dn->dn_assigned_txg = 0;
1154168404Spjd			cv_broadcast(&dn->dn_notxholds);
1155168404Spjd		}
1156168404Spjd		mutex_exit(&dn->dn_mtx);
1157168404Spjd		dnode_rele(dn, tx);
1158168404Spjd	}
1159168404Spjd
1160168404Spjd	if (tx->tx_tempreserve_cookie)
1161168404Spjd		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1162168404Spjd
1163219089Spjd	if (!list_is_empty(&tx->tx_callbacks))
1164219089Spjd		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1165219089Spjd
1166168404Spjd	if (tx->tx_anyobj == FALSE)
1167168404Spjd		txg_rele_to_sync(&tx->tx_txgh);
1168219089Spjd
1169219089Spjd	list_destroy(&tx->tx_callbacks);
1170185029Spjd	list_destroy(&tx->tx_holds);
1171168404Spjd#ifdef ZFS_DEBUG
1172168404Spjd	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1173168404Spjd	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1174168404Spjd	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1175168404Spjd	refcount_destroy_many(&tx->tx_space_written,
1176168404Spjd	    refcount_count(&tx->tx_space_written));
1177168404Spjd	refcount_destroy_many(&tx->tx_space_freed,
1178168404Spjd	    refcount_count(&tx->tx_space_freed));
1179168404Spjd#endif
1180168404Spjd	kmem_free(tx, sizeof (dmu_tx_t));
1181168404Spjd}
1182168404Spjd
1183168404Spjdvoid
1184168404Spjddmu_tx_abort(dmu_tx_t *tx)
1185168404Spjd{
1186168404Spjd	dmu_tx_hold_t *txh;
1187168404Spjd
1188168404Spjd	ASSERT(tx->tx_txg == 0);
1189168404Spjd
1190168404Spjd	while (txh = list_head(&tx->tx_holds)) {
1191168404Spjd		dnode_t *dn = txh->txh_dnode;
1192168404Spjd
1193168404Spjd		list_remove(&tx->tx_holds, txh);
1194168404Spjd		kmem_free(txh, sizeof (dmu_tx_hold_t));
1195168404Spjd		if (dn != NULL)
1196168404Spjd			dnode_rele(dn, tx);
1197168404Spjd	}
1198219089Spjd
1199219089Spjd	/*
1200219089Spjd	 * Call any registered callbacks with an error code.
1201219089Spjd	 */
1202219089Spjd	if (!list_is_empty(&tx->tx_callbacks))
1203219089Spjd		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1204219089Spjd
1205219089Spjd	list_destroy(&tx->tx_callbacks);
1206185029Spjd	list_destroy(&tx->tx_holds);
1207168404Spjd#ifdef ZFS_DEBUG
1208168404Spjd	refcount_destroy_many(&tx->tx_space_written,
1209168404Spjd	    refcount_count(&tx->tx_space_written));
1210168404Spjd	refcount_destroy_many(&tx->tx_space_freed,
1211168404Spjd	    refcount_count(&tx->tx_space_freed));
1212168404Spjd#endif
1213168404Spjd	kmem_free(tx, sizeof (dmu_tx_t));
1214168404Spjd}
1215168404Spjd
1216168404Spjduint64_t
1217168404Spjddmu_tx_get_txg(dmu_tx_t *tx)
1218168404Spjd{
1219168404Spjd	ASSERT(tx->tx_txg != 0);
1220168404Spjd	return (tx->tx_txg);
1221168404Spjd}
1222219089Spjd
1223219089Spjdvoid
1224219089Spjddmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1225219089Spjd{
1226219089Spjd	dmu_tx_callback_t *dcb;
1227219089Spjd
1228219089Spjd	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1229219089Spjd
1230219089Spjd	dcb->dcb_func = func;
1231219089Spjd	dcb->dcb_data = data;
1232219089Spjd
1233219089Spjd	list_insert_tail(&tx->tx_callbacks, dcb);
1234219089Spjd}
1235219089Spjd
1236219089Spjd/*
1237219089Spjd * Call all the commit callbacks on a list, with a given error code.
1238219089Spjd */
1239219089Spjdvoid
1240219089Spjddmu_tx_do_callbacks(list_t *cb_list, int error)
1241219089Spjd{
1242219089Spjd	dmu_tx_callback_t *dcb;
1243219089Spjd
1244219089Spjd	while (dcb = list_head(cb_list)) {
1245219089Spjd		list_remove(cb_list, dcb);
1246219089Spjd		dcb->dcb_func(dcb->dcb_data, error);
1247219089Spjd		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1248219089Spjd	}
1249219089Spjd}
1250219089Spjd
1251219089Spjd/*
1252219089Spjd * Interface to hold a bunch of attributes.
1253219089Spjd * used for creating new files.
1254219089Spjd * attrsize is the total size of all attributes
1255219089Spjd * to be added during object creation
1256219089Spjd *
1257219089Spjd * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1258219089Spjd */
1259219089Spjd
1260219089Spjd/*
1261219089Spjd * hold necessary attribute name for attribute registration.
1262219089Spjd * should be a very rare case where this is needed.  If it does
1263219089Spjd * happen it would only happen on the first write to the file system.
1264219089Spjd */
1265219089Spjdstatic void
1266219089Spjddmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1267219089Spjd{
1268219089Spjd	int i;
1269219089Spjd
1270219089Spjd	if (!sa->sa_need_attr_registration)
1271219089Spjd		return;
1272219089Spjd
1273219089Spjd	for (i = 0; i != sa->sa_num_attrs; i++) {
1274219089Spjd		if (!sa->sa_attr_table[i].sa_registered) {
1275219089Spjd			if (sa->sa_reg_attr_obj)
1276219089Spjd				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1277219089Spjd				    B_TRUE, sa->sa_attr_table[i].sa_name);
1278219089Spjd			else
1279219089Spjd				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1280219089Spjd				    B_TRUE, sa->sa_attr_table[i].sa_name);
1281219089Spjd		}
1282219089Spjd	}
1283219089Spjd}
1284219089Spjd
1285219089Spjd
1286219089Spjdvoid
1287219089Spjddmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1288219089Spjd{
1289219089Spjd	dnode_t *dn;
1290219089Spjd	dmu_tx_hold_t *txh;
1291219089Spjd
1292219089Spjd	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1293219089Spjd	    THT_SPILL, 0, 0);
1294219089Spjd
1295219089Spjd	dn = txh->txh_dnode;
1296219089Spjd
1297219089Spjd	if (dn == NULL)
1298219089Spjd		return;
1299219089Spjd
1300219089Spjd	/* If blkptr doesn't exist then add space to towrite */
1301219089Spjd	if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1302219089Spjd		txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1303219089Spjd	} else {
1304226512Smm		blkptr_t *bp;
1305226512Smm
1306219089Spjd		bp = &dn->dn_phys->dn_spill;
1307219089Spjd		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1308219089Spjd		    bp, bp->blk_birth))
1309219089Spjd			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1310219089Spjd		else
1311219089Spjd			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1312226512Smm		if (!BP_IS_HOLE(bp))
1313219089Spjd			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1314219089Spjd	}
1315219089Spjd}
1316219089Spjd
1317219089Spjdvoid
1318219089Spjddmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1319219089Spjd{
1320219089Spjd	sa_os_t *sa = tx->tx_objset->os_sa;
1321219089Spjd
1322219089Spjd	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1323219089Spjd
1324219089Spjd	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1325219089Spjd		return;
1326219089Spjd
1327219089Spjd	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1328219089Spjd		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1329219089Spjd	else {
1330219089Spjd		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1331219089Spjd		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1332219089Spjd		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1333219089Spjd		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1334219089Spjd	}
1335219089Spjd
1336219089Spjd	dmu_tx_sa_registration_hold(sa, tx);
1337219089Spjd
1338219089Spjd	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1339219089Spjd		return;
1340219089Spjd
1341219089Spjd	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1342219089Spjd	    THT_SPILL, 0, 0);
1343219089Spjd}
1344219089Spjd
1345219089Spjd/*
1346219089Spjd * Hold SA attribute
1347219089Spjd *
1348219089Spjd * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1349219089Spjd *
1350219089Spjd * variable_size is the total size of all variable sized attributes
1351219089Spjd * passed to this function.  It is not the total size of all
1352219089Spjd * variable size attributes that *may* exist on this object.
1353219089Spjd */
1354219089Spjdvoid
1355219089Spjddmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1356219089Spjd{
1357219089Spjd	uint64_t object;
1358219089Spjd	sa_os_t *sa = tx->tx_objset->os_sa;
1359219089Spjd
1360219089Spjd	ASSERT(hdl != NULL);
1361219089Spjd
1362219089Spjd	object = sa_handle_object(hdl);
1363219089Spjd
1364219089Spjd	dmu_tx_hold_bonus(tx, object);
1365219089Spjd
1366219089Spjd	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1367219089Spjd		return;
1368219089Spjd
1369219089Spjd	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1370219089Spjd	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1371219089Spjd		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1372219089Spjd		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1373219089Spjd		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1374219089Spjd		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1375219089Spjd	}
1376219089Spjd
1377219089Spjd	dmu_tx_sa_registration_hold(sa, tx);
1378219089Spjd
1379219089Spjd	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1380219089Spjd		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1381219089Spjd
1382219089Spjd	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1383219089Spjd		ASSERT(tx->tx_txg == 0);
1384219089Spjd		dmu_tx_hold_spill(tx, object);
1385219089Spjd	} else {
1386219089Spjd		dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1387219089Spjd		dnode_t *dn;
1388219089Spjd
1389219089Spjd		DB_DNODE_ENTER(db);
1390219089Spjd		dn = DB_DNODE(db);
1391219089Spjd		if (dn->dn_have_spill) {
1392219089Spjd			ASSERT(tx->tx_txg == 0);
1393219089Spjd			dmu_tx_hold_spill(tx, object);
1394219089Spjd		}
1395219089Spjd		DB_DNODE_EXIT(db);
1396219089Spjd	}
1397219089Spjd}
1398