1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 */
26
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h>
33#include <sys/dsl_dir.h>
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h>
36#include <sys/spa.h>
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
39#include <sys/zfs_context.h>
40#include <sys/trace_zfs.h>
41
42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43    uint64_t arg1, uint64_t arg2);
44
45dmu_tx_stats_t dmu_tx_stats = {
46	{ "dmu_tx_assigned",		KSTAT_DATA_UINT64 },
47	{ "dmu_tx_delay",		KSTAT_DATA_UINT64 },
48	{ "dmu_tx_error",		KSTAT_DATA_UINT64 },
49	{ "dmu_tx_suspended",		KSTAT_DATA_UINT64 },
50	{ "dmu_tx_group",		KSTAT_DATA_UINT64 },
51	{ "dmu_tx_memory_reserve",	KSTAT_DATA_UINT64 },
52	{ "dmu_tx_memory_reclaim",	KSTAT_DATA_UINT64 },
53	{ "dmu_tx_dirty_throttle",	KSTAT_DATA_UINT64 },
54	{ "dmu_tx_dirty_delay",		KSTAT_DATA_UINT64 },
55	{ "dmu_tx_dirty_over_max",	KSTAT_DATA_UINT64 },
56	{ "dmu_tx_dirty_frees_delay",	KSTAT_DATA_UINT64 },
57	{ "dmu_tx_wrlog_delay",		KSTAT_DATA_UINT64 },
58	{ "dmu_tx_quota",		KSTAT_DATA_UINT64 },
59};
60
61static kstat_t *dmu_tx_ksp;
62
63dmu_tx_t *
64dmu_tx_create_dd(dsl_dir_t *dd)
65{
66	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
67	tx->tx_dir = dd;
68	if (dd != NULL)
69		tx->tx_pool = dd->dd_pool;
70	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
71	    offsetof(dmu_tx_hold_t, txh_node));
72	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
73	    offsetof(dmu_tx_callback_t, dcb_node));
74	tx->tx_start = gethrtime();
75	return (tx);
76}
77
78dmu_tx_t *
79dmu_tx_create(objset_t *os)
80{
81	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
82	tx->tx_objset = os;
83	return (tx);
84}
85
86dmu_tx_t *
87dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
88{
89	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
90
91	TXG_VERIFY(dp->dp_spa, txg);
92	tx->tx_pool = dp;
93	tx->tx_txg = txg;
94	tx->tx_anyobj = TRUE;
95
96	return (tx);
97}
98
99int
100dmu_tx_is_syncing(dmu_tx_t *tx)
101{
102	return (tx->tx_anyobj);
103}
104
105int
106dmu_tx_private_ok(dmu_tx_t *tx)
107{
108	return (tx->tx_anyobj);
109}
110
111static dmu_tx_hold_t *
112dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
113    uint64_t arg1, uint64_t arg2)
114{
115	dmu_tx_hold_t *txh;
116
117	if (dn != NULL) {
118		(void) zfs_refcount_add(&dn->dn_holds, tx);
119		if (tx->tx_txg != 0) {
120			mutex_enter(&dn->dn_mtx);
121			/*
122			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
123			 * problem, but there's no way for it to happen (for
124			 * now, at least).
125			 */
126			ASSERT(dn->dn_assigned_txg == 0);
127			dn->dn_assigned_txg = tx->tx_txg;
128			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
129			mutex_exit(&dn->dn_mtx);
130		}
131	}
132
133	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
134	txh->txh_tx = tx;
135	txh->txh_dnode = dn;
136	zfs_refcount_create(&txh->txh_space_towrite);
137	zfs_refcount_create(&txh->txh_memory_tohold);
138	txh->txh_type = type;
139	txh->txh_arg1 = arg1;
140	txh->txh_arg2 = arg2;
141	list_insert_tail(&tx->tx_holds, txh);
142
143	return (txh);
144}
145
146static dmu_tx_hold_t *
147dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
148    enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
149{
150	dnode_t *dn = NULL;
151	dmu_tx_hold_t *txh;
152	int err;
153
154	if (object != DMU_NEW_OBJECT) {
155		err = dnode_hold(os, object, FTAG, &dn);
156		if (err != 0) {
157			tx->tx_err = err;
158			return (NULL);
159		}
160	}
161	txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
162	if (dn != NULL)
163		dnode_rele(dn, FTAG);
164	return (txh);
165}
166
167void
168dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
169{
170	/*
171	 * If we're syncing, they can manipulate any object anyhow, and
172	 * the hold on the dnode_t can cause problems.
173	 */
174	if (!dmu_tx_is_syncing(tx))
175		(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
176}
177
178/*
179 * This function reads specified data from disk.  The specified data will
180 * be needed to perform the transaction -- i.e, it will be read after
181 * we do dmu_tx_assign().  There are two reasons that we read the data now
182 * (before dmu_tx_assign()):
183 *
184 * 1. Reading it now has potentially better performance.  The transaction
185 * has not yet been assigned, so the TXG is not held open, and also the
186 * caller typically has less locks held when calling dmu_tx_hold_*() than
187 * after the transaction has been assigned.  This reduces the lock (and txg)
188 * hold times, thus reducing lock contention.
189 *
190 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
191 * that are detected before they start making changes to the DMU state
192 * (i.e. now).  Once the transaction has been assigned, and some DMU
193 * state has been changed, it can be difficult to recover from an i/o
194 * error (e.g. to undo the changes already made in memory at the DMU
195 * layer).  Typically code to do so does not exist in the caller -- it
196 * assumes that the data has already been cached and thus i/o errors are
197 * not possible.
198 *
199 * It has been observed that the i/o initiated here can be a performance
200 * problem, and it appears to be optional, because we don't look at the
201 * data which is read.  However, removing this read would only serve to
202 * move the work elsewhere (after the dmu_tx_assign()), where it may
203 * have a greater impact on performance (in addition to the impact on
204 * fault tolerance noted above).
205 */
206static int
207dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
208{
209	int err;
210	dmu_buf_impl_t *db;
211
212	rw_enter(&dn->dn_struct_rwlock, RW_READER);
213	err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db);
214	rw_exit(&dn->dn_struct_rwlock);
215	if (err == ENOENT)
216		return (0);
217	if (err != 0)
218		return (err);
219	/*
220	 * PARTIAL_FIRST allows caching for uncacheable blocks.  It will
221	 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
222	 */
223	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH |
224	    (level == 0 ? DB_RF_PARTIAL_FIRST : 0));
225	dbuf_rele(db, FTAG);
226	return (err);
227}
228
229static void
230dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
231{
232	dnode_t *dn = txh->txh_dnode;
233	int err = 0;
234
235	if (len == 0)
236		return;
237
238	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
239
240	if (dn == NULL)
241		return;
242
243	/*
244	 * For i/o error checking, read the blocks that will be needed
245	 * to perform the write: the first and last level-0 blocks (if
246	 * they are not aligned, i.e. if they are partial-block writes),
247	 * and all the level-1 blocks.
248	 */
249	if (dn->dn_maxblkid == 0) {
250		if (off < dn->dn_datablksz &&
251		    (off > 0 || len < dn->dn_datablksz)) {
252			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
253			if (err != 0) {
254				txh->txh_tx->tx_err = err;
255			}
256		}
257	} else {
258		zio_t *zio = zio_root(dn->dn_objset->os_spa,
259		    NULL, NULL, ZIO_FLAG_CANFAIL);
260
261		/* first level-0 block */
262		uint64_t start = off >> dn->dn_datablkshift;
263		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
264			err = dmu_tx_check_ioerr(zio, dn, 0, start);
265			if (err != 0) {
266				txh->txh_tx->tx_err = err;
267			}
268		}
269
270		/* last level-0 block */
271		uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
272		if (end != start && end <= dn->dn_maxblkid &&
273		    P2PHASE(off + len, dn->dn_datablksz)) {
274			err = dmu_tx_check_ioerr(zio, dn, 0, end);
275			if (err != 0) {
276				txh->txh_tx->tx_err = err;
277			}
278		}
279
280		/* level-1 blocks */
281		if (dn->dn_nlevels > 1) {
282			int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
283			for (uint64_t i = (start >> shft) + 1;
284			    i < end >> shft; i++) {
285				err = dmu_tx_check_ioerr(zio, dn, 1, i);
286				if (err != 0) {
287					txh->txh_tx->tx_err = err;
288				}
289			}
290		}
291
292		err = zio_wait(zio);
293		if (err != 0) {
294			txh->txh_tx->tx_err = err;
295		}
296	}
297}
298
299static void
300dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
301{
302	dnode_t *dn = txh->txh_dnode;
303	int err = 0;
304
305	if (len == 0)
306		return;
307
308	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
309
310	if (dn == NULL)
311		return;
312
313	/*
314	 * For i/o error checking, read the blocks that will be needed
315	 * to perform the append; first level-0 block (if not aligned, i.e.
316	 * if they are partial-block writes), no additional blocks are read.
317	 */
318	if (dn->dn_maxblkid == 0) {
319		if (off < dn->dn_datablksz &&
320		    (off > 0 || len < dn->dn_datablksz)) {
321			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
322			if (err != 0) {
323				txh->txh_tx->tx_err = err;
324			}
325		}
326	} else {
327		zio_t *zio = zio_root(dn->dn_objset->os_spa,
328		    NULL, NULL, ZIO_FLAG_CANFAIL);
329
330		/* first level-0 block */
331		uint64_t start = off >> dn->dn_datablkshift;
332		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
333			err = dmu_tx_check_ioerr(zio, dn, 0, start);
334			if (err != 0) {
335				txh->txh_tx->tx_err = err;
336			}
337		}
338
339		err = zio_wait(zio);
340		if (err != 0) {
341			txh->txh_tx->tx_err = err;
342		}
343	}
344}
345
346static void
347dmu_tx_count_dnode(dmu_tx_hold_t *txh)
348{
349	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
350	    DNODE_MIN_SIZE, FTAG);
351}
352
353void
354dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
355{
356	dmu_tx_hold_t *txh;
357
358	ASSERT0(tx->tx_txg);
359	ASSERT3U(len, <=, DMU_MAX_ACCESS);
360	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
361
362	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
363	    object, THT_WRITE, off, len);
364	if (txh != NULL) {
365		dmu_tx_count_write(txh, off, len);
366		dmu_tx_count_dnode(txh);
367	}
368}
369
370void
371dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
372{
373	dmu_tx_hold_t *txh;
374
375	ASSERT0(tx->tx_txg);
376	ASSERT3U(len, <=, DMU_MAX_ACCESS);
377	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
378
379	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
380	if (txh != NULL) {
381		dmu_tx_count_write(txh, off, len);
382		dmu_tx_count_dnode(txh);
383	}
384}
385
386/*
387 * Should be used when appending to an object and the exact offset is unknown.
388 * The write must occur at or beyond the specified offset.  Only the L0 block
389 * at provided offset will be prefetched.
390 */
391void
392dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
393{
394	dmu_tx_hold_t *txh;
395
396	ASSERT0(tx->tx_txg);
397	ASSERT3U(len, <=, DMU_MAX_ACCESS);
398
399	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
400	    object, THT_APPEND, off, DMU_OBJECT_END);
401	if (txh != NULL) {
402		dmu_tx_count_append(txh, off, len);
403		dmu_tx_count_dnode(txh);
404	}
405}
406
407void
408dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
409{
410	dmu_tx_hold_t *txh;
411
412	ASSERT0(tx->tx_txg);
413	ASSERT3U(len, <=, DMU_MAX_ACCESS);
414
415	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END);
416	if (txh != NULL) {
417		dmu_tx_count_append(txh, off, len);
418		dmu_tx_count_dnode(txh);
419	}
420}
421
422/*
423 * This function marks the transaction as being a "net free".  The end
424 * result is that refquotas will be disabled for this transaction, and
425 * this transaction will be able to use half of the pool space overhead
426 * (see dsl_pool_adjustedsize()).  Therefore this function should only
427 * be called for transactions that we expect will not cause a net increase
428 * in the amount of space used (but it's OK if that is occasionally not true).
429 */
430void
431dmu_tx_mark_netfree(dmu_tx_t *tx)
432{
433	tx->tx_netfree = B_TRUE;
434}
435
436static void
437dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
438{
439	dmu_tx_t *tx = txh->txh_tx;
440	dnode_t *dn = txh->txh_dnode;
441	int err;
442
443	ASSERT(tx->tx_txg == 0);
444
445	if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
446		return;
447	if (len == DMU_OBJECT_END)
448		len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
449
450	/*
451	 * For i/o error checking, we read the first and last level-0
452	 * blocks if they are not aligned, and all the level-1 blocks.
453	 *
454	 * Note:  dbuf_free_range() assumes that we have not instantiated
455	 * any level-0 dbufs that will be completely freed.  Therefore we must
456	 * exercise care to not read or count the first and last blocks
457	 * if they are blocksize-aligned.
458	 */
459	if (dn->dn_datablkshift == 0) {
460		if (off != 0 || len < dn->dn_datablksz)
461			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
462	} else {
463		/* first block will be modified if it is not aligned */
464		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
465			dmu_tx_count_write(txh, off, 1);
466		/* last block will be modified if it is not aligned */
467		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
468			dmu_tx_count_write(txh, off + len, 1);
469	}
470
471	/*
472	 * Check level-1 blocks.
473	 */
474	if (dn->dn_nlevels > 1) {
475		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
476		    SPA_BLKPTRSHIFT;
477		uint64_t start = off >> shift;
478		uint64_t end = (off + len) >> shift;
479
480		ASSERT(dn->dn_indblkshift != 0);
481
482		/*
483		 * dnode_reallocate() can result in an object with indirect
484		 * blocks having an odd data block size.  In this case,
485		 * just check the single block.
486		 */
487		if (dn->dn_datablkshift == 0)
488			start = end = 0;
489
490		zio_t *zio = zio_root(tx->tx_pool->dp_spa,
491		    NULL, NULL, ZIO_FLAG_CANFAIL);
492		for (uint64_t i = start; i <= end; i++) {
493			uint64_t ibyte = i << shift;
494			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
495			i = ibyte >> shift;
496			if (err == ESRCH || i > end)
497				break;
498			if (err != 0) {
499				tx->tx_err = err;
500				(void) zio_wait(zio);
501				return;
502			}
503
504			(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
505			    1 << dn->dn_indblkshift, FTAG);
506
507			err = dmu_tx_check_ioerr(zio, dn, 1, i);
508			if (err != 0) {
509				tx->tx_err = err;
510				(void) zio_wait(zio);
511				return;
512			}
513		}
514		err = zio_wait(zio);
515		if (err != 0) {
516			tx->tx_err = err;
517			return;
518		}
519	}
520}
521
522void
523dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
524{
525	dmu_tx_hold_t *txh;
526
527	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
528	    object, THT_FREE, off, len);
529	if (txh != NULL) {
530		dmu_tx_count_dnode(txh);
531		dmu_tx_count_free(txh, off, len);
532	}
533}
534
535void
536dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
537{
538	dmu_tx_hold_t *txh;
539
540	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
541	if (txh != NULL) {
542		dmu_tx_count_dnode(txh);
543		dmu_tx_count_free(txh, off, len);
544	}
545}
546
547static void
548dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
549{
550
551	/*
552	 * Reuse dmu_tx_count_free(), it does exactly what we need for clone.
553	 */
554	dmu_tx_count_free(txh, off, len);
555}
556
557void
558dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
559{
560	dmu_tx_hold_t *txh;
561
562	ASSERT0(tx->tx_txg);
563	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
564
565	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
566	if (txh != NULL) {
567		dmu_tx_count_dnode(txh);
568		dmu_tx_count_clone(txh, off, len);
569	}
570}
571
572static void
573dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
574{
575	dmu_tx_t *tx = txh->txh_tx;
576	dnode_t *dn = txh->txh_dnode;
577	int err;
578	extern int zap_micro_max_size;
579
580	ASSERT(tx->tx_txg == 0);
581
582	dmu_tx_count_dnode(txh);
583
584	/*
585	 * Modifying a almost-full microzap is around the worst case (128KB)
586	 *
587	 * If it is a fat zap, the worst case would be 7*16KB=112KB:
588	 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
589	 * - 4 new blocks written if adding:
590	 *    - 2 blocks for possibly split leaves,
591	 *    - 2 grown ptrtbl blocks
592	 */
593	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
594	    zap_micro_max_size, FTAG);
595
596	if (dn == NULL)
597		return;
598
599	ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
600
601	if (dn->dn_maxblkid == 0 || name == NULL) {
602		/*
603		 * This is a microzap (only one block), or we don't know
604		 * the name.  Check the first block for i/o errors.
605		 */
606		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
607		if (err != 0) {
608			tx->tx_err = err;
609		}
610	} else {
611		/*
612		 * Access the name so that we'll check for i/o errors to
613		 * the leaf blocks, etc.  We ignore ENOENT, as this name
614		 * may not yet exist.
615		 */
616		err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
617		if (err == EIO || err == ECKSUM || err == ENXIO) {
618			tx->tx_err = err;
619		}
620	}
621}
622
623void
624dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
625{
626	dmu_tx_hold_t *txh;
627
628	ASSERT0(tx->tx_txg);
629
630	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
631	    object, THT_ZAP, add, (uintptr_t)name);
632	if (txh != NULL)
633		dmu_tx_hold_zap_impl(txh, name);
634}
635
636void
637dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
638{
639	dmu_tx_hold_t *txh;
640
641	ASSERT0(tx->tx_txg);
642	ASSERT(dn != NULL);
643
644	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
645	if (txh != NULL)
646		dmu_tx_hold_zap_impl(txh, name);
647}
648
649void
650dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
651{
652	dmu_tx_hold_t *txh;
653
654	ASSERT(tx->tx_txg == 0);
655
656	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
657	    object, THT_BONUS, 0, 0);
658	if (txh)
659		dmu_tx_count_dnode(txh);
660}
661
662void
663dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
664{
665	dmu_tx_hold_t *txh;
666
667	ASSERT0(tx->tx_txg);
668
669	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
670	if (txh)
671		dmu_tx_count_dnode(txh);
672}
673
674void
675dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
676{
677	dmu_tx_hold_t *txh;
678
679	ASSERT(tx->tx_txg == 0);
680
681	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
682	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
683	if (txh) {
684		(void) zfs_refcount_add_many(
685		    &txh->txh_space_towrite, space, FTAG);
686	}
687}
688
689#ifdef ZFS_DEBUG
690void
691dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
692{
693	boolean_t match_object = B_FALSE;
694	boolean_t match_offset = B_FALSE;
695
696	DB_DNODE_ENTER(db);
697	dnode_t *dn = DB_DNODE(db);
698	ASSERT(tx->tx_txg != 0);
699	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
700	ASSERT3U(dn->dn_object, ==, db->db.db_object);
701
702	if (tx->tx_anyobj) {
703		DB_DNODE_EXIT(db);
704		return;
705	}
706
707	/* XXX No checking on the meta dnode for now */
708	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
709		DB_DNODE_EXIT(db);
710		return;
711	}
712
713	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
714	    txh = list_next(&tx->tx_holds, txh)) {
715		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
716		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
717			match_object = TRUE;
718		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
719			int datablkshift = dn->dn_datablkshift ?
720			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
721			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
722			int shift = datablkshift + epbs * db->db_level;
723			uint64_t beginblk = shift >= 64 ? 0 :
724			    (txh->txh_arg1 >> shift);
725			uint64_t endblk = shift >= 64 ? 0 :
726			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
727			uint64_t blkid = db->db_blkid;
728
729			/* XXX txh_arg2 better not be zero... */
730
731			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
732			    txh->txh_type, (u_longlong_t)beginblk,
733			    (u_longlong_t)endblk);
734
735			switch (txh->txh_type) {
736			case THT_WRITE:
737				if (blkid >= beginblk && blkid <= endblk)
738					match_offset = TRUE;
739				/*
740				 * We will let this hold work for the bonus
741				 * or spill buffer so that we don't need to
742				 * hold it when creating a new object.
743				 */
744				if (blkid == DMU_BONUS_BLKID ||
745				    blkid == DMU_SPILL_BLKID)
746					match_offset = TRUE;
747				/*
748				 * They might have to increase nlevels,
749				 * thus dirtying the new TLIBs.  Or the
750				 * might have to change the block size,
751				 * thus dirying the new lvl=0 blk=0.
752				 */
753				if (blkid == 0)
754					match_offset = TRUE;
755				break;
756			case THT_APPEND:
757				if (blkid >= beginblk && (blkid <= endblk ||
758				    txh->txh_arg2 == DMU_OBJECT_END))
759					match_offset = TRUE;
760
761				/*
762				 * THT_WRITE used for bonus and spill blocks.
763				 */
764				ASSERT(blkid != DMU_BONUS_BLKID &&
765				    blkid != DMU_SPILL_BLKID);
766
767				/*
768				 * They might have to increase nlevels,
769				 * thus dirtying the new TLIBs.  Or the
770				 * might have to change the block size,
771				 * thus dirying the new lvl=0 blk=0.
772				 */
773				if (blkid == 0)
774					match_offset = TRUE;
775				break;
776			case THT_FREE:
777				/*
778				 * We will dirty all the level 1 blocks in
779				 * the free range and perhaps the first and
780				 * last level 0 block.
781				 */
782				if (blkid >= beginblk && (blkid <= endblk ||
783				    txh->txh_arg2 == DMU_OBJECT_END))
784					match_offset = TRUE;
785				break;
786			case THT_SPILL:
787				if (blkid == DMU_SPILL_BLKID)
788					match_offset = TRUE;
789				break;
790			case THT_BONUS:
791				if (blkid == DMU_BONUS_BLKID)
792					match_offset = TRUE;
793				break;
794			case THT_ZAP:
795				match_offset = TRUE;
796				break;
797			case THT_NEWOBJECT:
798				match_object = TRUE;
799				break;
800			case THT_CLONE:
801				if (blkid >= beginblk && blkid <= endblk)
802					match_offset = TRUE;
803				break;
804			default:
805				cmn_err(CE_PANIC, "bad txh_type %d",
806				    txh->txh_type);
807			}
808		}
809		if (match_object && match_offset) {
810			DB_DNODE_EXIT(db);
811			return;
812		}
813	}
814	DB_DNODE_EXIT(db);
815	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
816	    (u_longlong_t)db->db.db_object, db->db_level,
817	    (u_longlong_t)db->db_blkid);
818}
819#endif
820
821/*
822 * If we can't do 10 iops, something is wrong.  Let us go ahead
823 * and hit zfs_dirty_data_max.
824 */
825static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
826
827/*
828 * We delay transactions when we've determined that the backend storage
829 * isn't able to accommodate the rate of incoming writes.
830 *
831 * If there is already a transaction waiting, we delay relative to when
832 * that transaction finishes waiting.  This way the calculated min_time
833 * is independent of the number of threads concurrently executing
834 * transactions.
835 *
836 * If we are the only waiter, wait relative to when the transaction
837 * started, rather than the current time.  This credits the transaction for
838 * "time already served", e.g. reading indirect blocks.
839 *
840 * The minimum time for a transaction to take is calculated as:
841 *     min_time = scale * (dirty - min) / (max - dirty)
842 *     min_time is then capped at zfs_delay_max_ns.
843 *
844 * The delay has two degrees of freedom that can be adjusted via tunables.
845 * The percentage of dirty data at which we start to delay is defined by
846 * zfs_delay_min_dirty_percent. This should typically be at or above
847 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
848 * delay after writing at full speed has failed to keep up with the incoming
849 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
850 * speaking, this variable determines the amount of delay at the midpoint of
851 * the curve.
852 *
853 * delay
854 *  10ms +-------------------------------------------------------------*+
855 *       |                                                             *|
856 *   9ms +                                                             *+
857 *       |                                                             *|
858 *   8ms +                                                             *+
859 *       |                                                            * |
860 *   7ms +                                                            * +
861 *       |                                                            * |
862 *   6ms +                                                            * +
863 *       |                                                            * |
864 *   5ms +                                                           *  +
865 *       |                                                           *  |
866 *   4ms +                                                           *  +
867 *       |                                                           *  |
868 *   3ms +                                                          *   +
869 *       |                                                          *   |
870 *   2ms +                                              (midpoint) *    +
871 *       |                                                  |    **     |
872 *   1ms +                                                  v ***       +
873 *       |             zfs_delay_scale ---------->     ********         |
874 *     0 +-------------------------------------*********----------------+
875 *       0%                    <- zfs_dirty_data_max ->               100%
876 *
877 * Note that since the delay is added to the outstanding time remaining on the
878 * most recent transaction, the delay is effectively the inverse of IOPS.
879 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
880 * was chosen such that small changes in the amount of accumulated dirty data
881 * in the first 3/4 of the curve yield relatively small differences in the
882 * amount of delay.
883 *
884 * The effects can be easier to understand when the amount of delay is
885 * represented on a log scale:
886 *
887 * delay
888 * 100ms +-------------------------------------------------------------++
889 *       +                                                              +
890 *       |                                                              |
891 *       +                                                             *+
892 *  10ms +                                                             *+
893 *       +                                                           ** +
894 *       |                                              (midpoint)  **  |
895 *       +                                                  |     **    +
896 *   1ms +                                                  v ****      +
897 *       +             zfs_delay_scale ---------->        *****         +
898 *       |                                             ****             |
899 *       +                                          ****                +
900 * 100us +                                        **                    +
901 *       +                                       *                      +
902 *       |                                      *                       |
903 *       +                                     *                        +
904 *  10us +                                     *                        +
905 *       +                                                              +
906 *       |                                                              |
907 *       +                                                              +
908 *       +--------------------------------------------------------------+
909 *       0%                    <- zfs_dirty_data_max ->               100%
910 *
911 * Note here that only as the amount of dirty data approaches its limit does
912 * the delay start to increase rapidly. The goal of a properly tuned system
913 * should be to keep the amount of dirty data out of that range by first
914 * ensuring that the appropriate limits are set for the I/O scheduler to reach
915 * optimal throughput on the backend storage, and then by changing the value
916 * of zfs_delay_scale to increase the steepness of the curve.
917 */
918static void
919dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
920{
921	dsl_pool_t *dp = tx->tx_pool;
922	uint64_t delay_min_bytes, wrlog;
923	hrtime_t wakeup, tx_time = 0, now;
924
925	/* Calculate minimum transaction time for the dirty data amount. */
926	delay_min_bytes =
927	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
928	if (dirty > delay_min_bytes) {
929		/*
930		 * The caller has already waited until we are under the max.
931		 * We make them pass us the amount of dirty data so we don't
932		 * have to handle the case of it being >= the max, which
933		 * could cause a divide-by-zero if it's == the max.
934		 */
935		ASSERT3U(dirty, <, zfs_dirty_data_max);
936
937		tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
938		    (zfs_dirty_data_max - dirty);
939	}
940
941	/* Calculate minimum transaction time for the TX_WRITE log size. */
942	wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
943	delay_min_bytes =
944	    zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
945	if (wrlog >= zfs_wrlog_data_max) {
946		tx_time = zfs_delay_max_ns;
947	} else if (wrlog > delay_min_bytes) {
948		tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
949		    (zfs_wrlog_data_max - wrlog), tx_time);
950	}
951
952	if (tx_time == 0)
953		return;
954
955	tx_time = MIN(tx_time, zfs_delay_max_ns);
956	now = gethrtime();
957	if (now > tx->tx_start + tx_time)
958		return;
959
960	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
961	    uint64_t, tx_time);
962
963	mutex_enter(&dp->dp_lock);
964	wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
965	dp->dp_last_wakeup = wakeup;
966	mutex_exit(&dp->dp_lock);
967
968	zfs_sleep_until(wakeup);
969}
970
971/*
972 * This routine attempts to assign the transaction to a transaction group.
973 * To do so, we must determine if there is sufficient free space on disk.
974 *
975 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
976 * on it), then it is assumed that there is sufficient free space,
977 * unless there's insufficient slop space in the pool (see the comment
978 * above spa_slop_shift in spa_misc.c).
979 *
980 * If it is not a "netfree" transaction, then if the data already on disk
981 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
982 * ENOSPC.  Otherwise, if the current rough estimate of pending changes,
983 * plus the rough estimate of this transaction's changes, may exceed the
984 * allowed usage, then this will fail with ERESTART, which will cause the
985 * caller to wait for the pending changes to be written to disk (by waiting
986 * for the next TXG to open), and then check the space usage again.
987 *
988 * The rough estimate of pending changes is comprised of the sum of:
989 *
990 *  - this transaction's holds' txh_space_towrite
991 *
992 *  - dd_tempreserved[], which is the sum of in-flight transactions'
993 *    holds' txh_space_towrite (i.e. those transactions that have called
994 *    dmu_tx_assign() but not yet called dmu_tx_commit()).
995 *
996 *  - dd_space_towrite[], which is the amount of dirtied dbufs.
997 *
998 * Note that all of these values are inflated by spa_get_worst_case_asize(),
999 * which means that we may get ERESTART well before we are actually in danger
1000 * of running out of space, but this also mitigates any small inaccuracies
1001 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
1002 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
1003 * to the MOS).
1004 *
1005 * Note that due to this algorithm, it is possible to exceed the allowed
1006 * usage by one transaction.  Also, as we approach the allowed usage,
1007 * we will allow a very limited amount of changes into each TXG, thus
1008 * decreasing performance.
1009 */
1010static int
1011dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
1012{
1013	spa_t *spa = tx->tx_pool->dp_spa;
1014
1015	ASSERT0(tx->tx_txg);
1016
1017	if (tx->tx_err) {
1018		DMU_TX_STAT_BUMP(dmu_tx_error);
1019		return (tx->tx_err);
1020	}
1021
1022	if (spa_suspended(spa)) {
1023		DMU_TX_STAT_BUMP(dmu_tx_suspended);
1024
1025		/*
1026		 * If the user has indicated a blocking failure mode
1027		 * then return ERESTART which will block in dmu_tx_wait().
1028		 * Otherwise, return EIO so that an error can get
1029		 * propagated back to the VOP calls.
1030		 *
1031		 * Note that we always honor the txg_how flag regardless
1032		 * of the failuremode setting.
1033		 */
1034		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1035		    !(txg_how & TXG_WAIT))
1036			return (SET_ERROR(EIO));
1037
1038		return (SET_ERROR(ERESTART));
1039	}
1040
1041	if (!tx->tx_dirty_delayed &&
1042	    dsl_pool_need_wrlog_delay(tx->tx_pool)) {
1043		tx->tx_wait_dirty = B_TRUE;
1044		DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
1045		return (SET_ERROR(ERESTART));
1046	}
1047
1048	if (!tx->tx_dirty_delayed &&
1049	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
1050		tx->tx_wait_dirty = B_TRUE;
1051		DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
1052		return (SET_ERROR(ERESTART));
1053	}
1054
1055	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1056	tx->tx_needassign_txh = NULL;
1057
1058	/*
1059	 * NB: No error returns are allowed after txg_hold_open, but
1060	 * before processing the dnode holds, due to the
1061	 * dmu_tx_unassign() logic.
1062	 */
1063
1064	uint64_t towrite = 0;
1065	uint64_t tohold = 0;
1066	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1067	    txh = list_next(&tx->tx_holds, txh)) {
1068		dnode_t *dn = txh->txh_dnode;
1069		if (dn != NULL) {
1070			/*
1071			 * This thread can't hold the dn_struct_rwlock
1072			 * while assigning the tx, because this can lead to
1073			 * deadlock. Specifically, if this dnode is already
1074			 * assigned to an earlier txg, this thread may need
1075			 * to wait for that txg to sync (the ERESTART case
1076			 * below).  The other thread that has assigned this
1077			 * dnode to an earlier txg prevents this txg from
1078			 * syncing until its tx can complete (calling
1079			 * dmu_tx_commit()), but it may need to acquire the
1080			 * dn_struct_rwlock to do so (e.g. via
1081			 * dmu_buf_hold*()).
1082			 *
1083			 * Note that this thread can't hold the lock for
1084			 * read either, but the rwlock doesn't record
1085			 * enough information to make that assertion.
1086			 */
1087			ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
1088
1089			mutex_enter(&dn->dn_mtx);
1090			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1091				mutex_exit(&dn->dn_mtx);
1092				tx->tx_needassign_txh = txh;
1093				DMU_TX_STAT_BUMP(dmu_tx_group);
1094				return (SET_ERROR(ERESTART));
1095			}
1096			if (dn->dn_assigned_txg == 0)
1097				dn->dn_assigned_txg = tx->tx_txg;
1098			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1099			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
1100			mutex_exit(&dn->dn_mtx);
1101		}
1102		towrite += zfs_refcount_count(&txh->txh_space_towrite);
1103		tohold += zfs_refcount_count(&txh->txh_memory_tohold);
1104	}
1105
1106	/* needed allocation: worst-case estimate of write space */
1107	uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
1108	/* calculate memory footprint estimate */
1109	uint64_t memory = towrite + tohold;
1110
1111	if (tx->tx_dir != NULL && asize != 0) {
1112		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1113		    asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
1114		if (err != 0)
1115			return (err);
1116	}
1117
1118	DMU_TX_STAT_BUMP(dmu_tx_assigned);
1119
1120	return (0);
1121}
1122
1123static void
1124dmu_tx_unassign(dmu_tx_t *tx)
1125{
1126	if (tx->tx_txg == 0)
1127		return;
1128
1129	txg_rele_to_quiesce(&tx->tx_txgh);
1130
1131	/*
1132	 * Walk the transaction's hold list, removing the hold on the
1133	 * associated dnode, and notifying waiters if the refcount drops to 0.
1134	 */
1135	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
1136	    txh && txh != tx->tx_needassign_txh;
1137	    txh = list_next(&tx->tx_holds, txh)) {
1138		dnode_t *dn = txh->txh_dnode;
1139
1140		if (dn == NULL)
1141			continue;
1142		mutex_enter(&dn->dn_mtx);
1143		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1144
1145		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1146			dn->dn_assigned_txg = 0;
1147			cv_broadcast(&dn->dn_notxholds);
1148		}
1149		mutex_exit(&dn->dn_mtx);
1150	}
1151
1152	txg_rele_to_sync(&tx->tx_txgh);
1153
1154	tx->tx_lasttried_txg = tx->tx_txg;
1155	tx->tx_txg = 0;
1156}
1157
1158/*
1159 * Assign tx to a transaction group; txg_how is a bitmask:
1160 *
1161 * If TXG_WAIT is set and the currently open txg is full, this function
1162 * will wait until there's a new txg. This should be used when no locks
1163 * are being held. With this bit set, this function will only fail if
1164 * we're truly out of space (or over quota).
1165 *
1166 * If TXG_WAIT is *not* set and we can't assign into the currently open
1167 * txg without blocking, this function will return immediately with
1168 * ERESTART. This should be used whenever locks are being held.  On an
1169 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1170 * and try again.
1171 *
1172 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1173 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1174 * details on the throttle). This is used by the VFS operations, after
1175 * they have already called dmu_tx_wait() (though most likely on a
1176 * different tx).
1177 *
1178 * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1179 * will assign the tx to monotonically increasing txgs. Of course this is
1180 * not strong monotonicity, because the same txg can be returned multiple
1181 * times in a row. This guarantee holds both for subsequent calls from
1182 * one thread and for multiple threads. For example, it is impossible to
1183 * observe the following sequence of events:
1184 *
1185 *          Thread 1                            Thread 2
1186 *
1187 *     dmu_tx_assign(T1, ...)
1188 *     1 <- dmu_tx_get_txg(T1)
1189 *                                       dmu_tx_assign(T2, ...)
1190 *                                       2 <- dmu_tx_get_txg(T2)
1191 *     dmu_tx_assign(T3, ...)
1192 *     1 <- dmu_tx_get_txg(T3)
1193 */
1194int
1195dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1196{
1197	int err;
1198
1199	ASSERT(tx->tx_txg == 0);
1200	ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1201	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1202
1203	/* If we might wait, we must not hold the config lock. */
1204	IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1205
1206	if ((txg_how & TXG_NOTHROTTLE))
1207		tx->tx_dirty_delayed = B_TRUE;
1208
1209	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1210		dmu_tx_unassign(tx);
1211
1212		if (err != ERESTART || !(txg_how & TXG_WAIT))
1213			return (err);
1214
1215		dmu_tx_wait(tx);
1216	}
1217
1218	txg_rele_to_quiesce(&tx->tx_txgh);
1219
1220	return (0);
1221}
1222
1223void
1224dmu_tx_wait(dmu_tx_t *tx)
1225{
1226	spa_t *spa = tx->tx_pool->dp_spa;
1227	dsl_pool_t *dp = tx->tx_pool;
1228	hrtime_t before;
1229
1230	ASSERT(tx->tx_txg == 0);
1231	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1232
1233	before = gethrtime();
1234
1235	if (tx->tx_wait_dirty) {
1236		uint64_t dirty;
1237
1238		/*
1239		 * dmu_tx_try_assign() has determined that we need to wait
1240		 * because we've consumed much or all of the dirty buffer
1241		 * space.
1242		 */
1243		mutex_enter(&dp->dp_lock);
1244		if (dp->dp_dirty_total >= zfs_dirty_data_max)
1245			DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1246		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1247			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1248		dirty = dp->dp_dirty_total;
1249		mutex_exit(&dp->dp_lock);
1250
1251		dmu_tx_delay(tx, dirty);
1252
1253		tx->tx_wait_dirty = B_FALSE;
1254
1255		/*
1256		 * Note: setting tx_dirty_delayed only has effect if the
1257		 * caller used TX_WAIT.  Otherwise they are going to
1258		 * destroy this tx and try again.  The common case,
1259		 * zfs_write(), uses TX_WAIT.
1260		 */
1261		tx->tx_dirty_delayed = B_TRUE;
1262	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1263		/*
1264		 * If the pool is suspended we need to wait until it
1265		 * is resumed.  Note that it's possible that the pool
1266		 * has become active after this thread has tried to
1267		 * obtain a tx.  If that's the case then tx_lasttried_txg
1268		 * would not have been set.
1269		 */
1270		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1271	} else if (tx->tx_needassign_txh) {
1272		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1273
1274		mutex_enter(&dn->dn_mtx);
1275		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1276			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1277		mutex_exit(&dn->dn_mtx);
1278		tx->tx_needassign_txh = NULL;
1279	} else {
1280		/*
1281		 * If we have a lot of dirty data just wait until we sync
1282		 * out a TXG at which point we'll hopefully have synced
1283		 * a portion of the changes.
1284		 */
1285		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1286	}
1287
1288	spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1289}
1290
1291static void
1292dmu_tx_destroy(dmu_tx_t *tx)
1293{
1294	dmu_tx_hold_t *txh;
1295
1296	while ((txh = list_head(&tx->tx_holds)) != NULL) {
1297		dnode_t *dn = txh->txh_dnode;
1298
1299		list_remove(&tx->tx_holds, txh);
1300		zfs_refcount_destroy_many(&txh->txh_space_towrite,
1301		    zfs_refcount_count(&txh->txh_space_towrite));
1302		zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1303		    zfs_refcount_count(&txh->txh_memory_tohold));
1304		kmem_free(txh, sizeof (dmu_tx_hold_t));
1305		if (dn != NULL)
1306			dnode_rele(dn, tx);
1307	}
1308
1309	list_destroy(&tx->tx_callbacks);
1310	list_destroy(&tx->tx_holds);
1311	kmem_free(tx, sizeof (dmu_tx_t));
1312}
1313
1314void
1315dmu_tx_commit(dmu_tx_t *tx)
1316{
1317	ASSERT(tx->tx_txg != 0);
1318
1319	/*
1320	 * Go through the transaction's hold list and remove holds on
1321	 * associated dnodes, notifying waiters if no holds remain.
1322	 */
1323	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1324	    txh = list_next(&tx->tx_holds, txh)) {
1325		dnode_t *dn = txh->txh_dnode;
1326
1327		if (dn == NULL)
1328			continue;
1329
1330		mutex_enter(&dn->dn_mtx);
1331		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1332
1333		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1334			dn->dn_assigned_txg = 0;
1335			cv_broadcast(&dn->dn_notxholds);
1336		}
1337		mutex_exit(&dn->dn_mtx);
1338	}
1339
1340	if (tx->tx_tempreserve_cookie)
1341		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1342
1343	if (!list_is_empty(&tx->tx_callbacks))
1344		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1345
1346	if (tx->tx_anyobj == FALSE)
1347		txg_rele_to_sync(&tx->tx_txgh);
1348
1349	dmu_tx_destroy(tx);
1350}
1351
1352void
1353dmu_tx_abort(dmu_tx_t *tx)
1354{
1355	ASSERT(tx->tx_txg == 0);
1356
1357	/*
1358	 * Call any registered callbacks with an error code.
1359	 */
1360	if (!list_is_empty(&tx->tx_callbacks))
1361		dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1362
1363	dmu_tx_destroy(tx);
1364}
1365
1366uint64_t
1367dmu_tx_get_txg(dmu_tx_t *tx)
1368{
1369	ASSERT(tx->tx_txg != 0);
1370	return (tx->tx_txg);
1371}
1372
1373dsl_pool_t *
1374dmu_tx_pool(dmu_tx_t *tx)
1375{
1376	ASSERT(tx->tx_pool != NULL);
1377	return (tx->tx_pool);
1378}
1379
1380void
1381dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1382{
1383	dmu_tx_callback_t *dcb;
1384
1385	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1386
1387	dcb->dcb_func = func;
1388	dcb->dcb_data = data;
1389
1390	list_insert_tail(&tx->tx_callbacks, dcb);
1391}
1392
1393/*
1394 * Call all the commit callbacks on a list, with a given error code.
1395 */
1396void
1397dmu_tx_do_callbacks(list_t *cb_list, int error)
1398{
1399	dmu_tx_callback_t *dcb;
1400
1401	while ((dcb = list_remove_tail(cb_list)) != NULL) {
1402		dcb->dcb_func(dcb->dcb_data, error);
1403		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1404	}
1405}
1406
1407/*
1408 * Interface to hold a bunch of attributes.
1409 * used for creating new files.
1410 * attrsize is the total size of all attributes
1411 * to be added during object creation
1412 *
1413 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1414 */
1415
1416/*
1417 * hold necessary attribute name for attribute registration.
1418 * should be a very rare case where this is needed.  If it does
1419 * happen it would only happen on the first write to the file system.
1420 */
1421static void
1422dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1423{
1424	if (!sa->sa_need_attr_registration)
1425		return;
1426
1427	for (int i = 0; i != sa->sa_num_attrs; i++) {
1428		if (!sa->sa_attr_table[i].sa_registered) {
1429			if (sa->sa_reg_attr_obj)
1430				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1431				    B_TRUE, sa->sa_attr_table[i].sa_name);
1432			else
1433				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1434				    B_TRUE, sa->sa_attr_table[i].sa_name);
1435		}
1436	}
1437}
1438
1439void
1440dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1441{
1442	dmu_tx_hold_t *txh;
1443
1444	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1445	    THT_SPILL, 0, 0);
1446	if (txh != NULL)
1447		(void) zfs_refcount_add_many(&txh->txh_space_towrite,
1448		    SPA_OLD_MAXBLOCKSIZE, FTAG);
1449}
1450
1451void
1452dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1453{
1454	sa_os_t *sa = tx->tx_objset->os_sa;
1455
1456	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1457
1458	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1459		return;
1460
1461	if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1462		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1463	} else {
1464		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1465		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1466		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1467		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1468	}
1469
1470	dmu_tx_sa_registration_hold(sa, tx);
1471
1472	if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1473		return;
1474
1475	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1476	    THT_SPILL, 0, 0);
1477}
1478
1479/*
1480 * Hold SA attribute
1481 *
1482 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1483 *
1484 * variable_size is the total size of all variable sized attributes
1485 * passed to this function.  It is not the total size of all
1486 * variable size attributes that *may* exist on this object.
1487 */
1488void
1489dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1490{
1491	uint64_t object;
1492	sa_os_t *sa = tx->tx_objset->os_sa;
1493
1494	ASSERT(hdl != NULL);
1495
1496	object = sa_handle_object(hdl);
1497
1498	dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1499	DB_DNODE_ENTER(db);
1500	dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1501	DB_DNODE_EXIT(db);
1502
1503	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1504		return;
1505
1506	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1507	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1508		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1509		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1510		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1511		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1512	}
1513
1514	dmu_tx_sa_registration_hold(sa, tx);
1515
1516	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1517		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1518
1519	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1520		ASSERT(tx->tx_txg == 0);
1521		dmu_tx_hold_spill(tx, object);
1522	} else {
1523		dnode_t *dn;
1524
1525		DB_DNODE_ENTER(db);
1526		dn = DB_DNODE(db);
1527		if (dn->dn_have_spill) {
1528			ASSERT(tx->tx_txg == 0);
1529			dmu_tx_hold_spill(tx, object);
1530		}
1531		DB_DNODE_EXIT(db);
1532	}
1533}
1534
1535void
1536dmu_tx_init(void)
1537{
1538	dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1539	    KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1540	    KSTAT_FLAG_VIRTUAL);
1541
1542	if (dmu_tx_ksp != NULL) {
1543		dmu_tx_ksp->ks_data = &dmu_tx_stats;
1544		kstat_install(dmu_tx_ksp);
1545	}
1546}
1547
1548void
1549dmu_tx_fini(void)
1550{
1551	if (dmu_tx_ksp != NULL) {
1552		kstat_delete(dmu_tx_ksp);
1553		dmu_tx_ksp = NULL;
1554	}
1555}
1556
1557#if defined(_KERNEL)
1558EXPORT_SYMBOL(dmu_tx_create);
1559EXPORT_SYMBOL(dmu_tx_hold_write);
1560EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1561EXPORT_SYMBOL(dmu_tx_hold_append);
1562EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode);
1563EXPORT_SYMBOL(dmu_tx_hold_free);
1564EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1565EXPORT_SYMBOL(dmu_tx_hold_zap);
1566EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1567EXPORT_SYMBOL(dmu_tx_hold_bonus);
1568EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1569EXPORT_SYMBOL(dmu_tx_abort);
1570EXPORT_SYMBOL(dmu_tx_assign);
1571EXPORT_SYMBOL(dmu_tx_wait);
1572EXPORT_SYMBOL(dmu_tx_commit);
1573EXPORT_SYMBOL(dmu_tx_mark_netfree);
1574EXPORT_SYMBOL(dmu_tx_get_txg);
1575EXPORT_SYMBOL(dmu_tx_callback_register);
1576EXPORT_SYMBOL(dmu_tx_do_callbacks);
1577EXPORT_SYMBOL(dmu_tx_hold_spill);
1578EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1579EXPORT_SYMBOL(dmu_tx_hold_sa);
1580#endif
1581