zil.c revision 288549
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23268657Sdelphij * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24168404Spjd */
25168404Spjd
26219089Spjd/* Portions Copyright 2010 Robert Milkowski */
27219089Spjd
28168404Spjd#include <sys/zfs_context.h>
29168404Spjd#include <sys/spa.h>
30168404Spjd#include <sys/dmu.h>
31168404Spjd#include <sys/zap.h>
32168404Spjd#include <sys/arc.h>
33168404Spjd#include <sys/stat.h>
34168404Spjd#include <sys/resource.h>
35168404Spjd#include <sys/zil.h>
36168404Spjd#include <sys/zil_impl.h>
37168404Spjd#include <sys/dsl_dataset.h>
38219089Spjd#include <sys/vdev_impl.h>
39168404Spjd#include <sys/dmu_tx.h>
40219089Spjd#include <sys/dsl_pool.h>
41168404Spjd
42168404Spjd/*
43168404Spjd * The zfs intent log (ZIL) saves transaction records of system calls
44168404Spjd * that change the file system in memory with enough information
45168404Spjd * to be able to replay them. These are stored in memory until
46168404Spjd * either the DMU transaction group (txg) commits them to the stable pool
47168404Spjd * and they can be discarded, or they are flushed to the stable log
48168404Spjd * (also in the pool) due to a fsync, O_DSYNC or other synchronous
49168404Spjd * requirement. In the event of a panic or power fail then those log
50168404Spjd * records (transactions) are replayed.
51168404Spjd *
52168404Spjd * There is one ZIL per file system. Its on-disk (pool) format consists
53168404Spjd * of 3 parts:
54168404Spjd *
55168404Spjd * 	- ZIL header
56168404Spjd * 	- ZIL blocks
57168404Spjd * 	- ZIL records
58168404Spjd *
59168404Spjd * A log record holds a system call transaction. Log blocks can
60168404Spjd * hold many log records and the blocks are chained together.
61168404Spjd * Each ZIL block contains a block pointer (blkptr_t) to the next
62168404Spjd * ZIL block in the chain. The ZIL header points to the first
63168404Spjd * block in the chain. Note there is not a fixed place in the pool
64168404Spjd * to hold blocks. They are dynamically allocated and freed as
65168404Spjd * needed from the blocks available. Figure X shows the ZIL structure:
66168404Spjd */
67168404Spjd
68168404Spjd/*
69251631Sdelphij * Disable intent logging replay.  This global ZIL switch affects all pools.
70168404Spjd */
71251631Sdelphijint zil_replay_disable = 0;
72168404SpjdSYSCTL_DECL(_vfs_zfs);
73219089SpjdTUNABLE_INT("vfs.zfs.zil_replay_disable", &zil_replay_disable);
74219089SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW,
75219089Spjd    &zil_replay_disable, 0, "Disable intent logging replay");
76168404Spjd
77168404Spjd/*
78168404Spjd * Tunable parameter for debugging or performance analysis.  Setting
79168404Spjd * zfs_nocacheflush will cause corruption on power loss if a volatile
80168404Spjd * out-of-order write cache is enabled.
81168404Spjd */
82168404Spjdboolean_t zfs_nocacheflush = B_FALSE;
83168404SpjdTUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
84168404SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
85168404Spjd    &zfs_nocacheflush, 0, "Disable cache flush");
86249921Ssmhboolean_t zfs_trim_enabled = B_TRUE;
87249921SsmhSYSCTL_DECL(_vfs_zfs_trim);
88249921SsmhTUNABLE_INT("vfs.zfs.trim.enabled", &zfs_trim_enabled);
89249921SsmhSYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0,
90249921Ssmh    "Enable ZFS TRIM");
91168404Spjd
92168404Spjdstatic kmem_cache_t *zil_lwb_cache;
93168404Spjd
94219089Spjdstatic void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
95219089Spjd
96219089Spjd#define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
97219089Spjd    sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
98219089Spjd
99219089Spjd
100219089Spjd/*
101219089Spjd * ziltest is by and large an ugly hack, but very useful in
102219089Spjd * checking replay without tedious work.
103219089Spjd * When running ziltest we want to keep all itx's and so maintain
104219089Spjd * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
105219089Spjd * We subtract TXG_CONCURRENT_STATES to allow for common code.
106219089Spjd */
107219089Spjd#define	ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
108219089Spjd
109168404Spjdstatic int
110219089Spjdzil_bp_compare(const void *x1, const void *x2)
111168404Spjd{
112219089Spjd	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
113219089Spjd	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
114168404Spjd
115168404Spjd	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
116168404Spjd		return (-1);
117168404Spjd	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
118168404Spjd		return (1);
119168404Spjd
120168404Spjd	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
121168404Spjd		return (-1);
122168404Spjd	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
123168404Spjd		return (1);
124168404Spjd
125168404Spjd	return (0);
126168404Spjd}
127168404Spjd
128168404Spjdstatic void
129219089Spjdzil_bp_tree_init(zilog_t *zilog)
130168404Spjd{
131219089Spjd	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
132219089Spjd	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
133168404Spjd}
134168404Spjd
135168404Spjdstatic void
136219089Spjdzil_bp_tree_fini(zilog_t *zilog)
137168404Spjd{
138219089Spjd	avl_tree_t *t = &zilog->zl_bp_tree;
139219089Spjd	zil_bp_node_t *zn;
140168404Spjd	void *cookie = NULL;
141168404Spjd
142168404Spjd	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
143219089Spjd		kmem_free(zn, sizeof (zil_bp_node_t));
144168404Spjd
145168404Spjd	avl_destroy(t);
146168404Spjd}
147168404Spjd
148219089Spjdint
149219089Spjdzil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
150168404Spjd{
151219089Spjd	avl_tree_t *t = &zilog->zl_bp_tree;
152268649Sdelphij	const dva_t *dva;
153219089Spjd	zil_bp_node_t *zn;
154168404Spjd	avl_index_t where;
155168404Spjd
156268649Sdelphij	if (BP_IS_EMBEDDED(bp))
157268649Sdelphij		return (0);
158268649Sdelphij
159268649Sdelphij	dva = BP_IDENTITY(bp);
160268649Sdelphij
161168404Spjd	if (avl_find(t, dva, &where) != NULL)
162249195Smm		return (SET_ERROR(EEXIST));
163168404Spjd
164219089Spjd	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
165168404Spjd	zn->zn_dva = *dva;
166168404Spjd	avl_insert(t, zn, where);
167168404Spjd
168168404Spjd	return (0);
169168404Spjd}
170168404Spjd
171168404Spjdstatic zil_header_t *
172168404Spjdzil_header_in_syncing_context(zilog_t *zilog)
173168404Spjd{
174168404Spjd	return ((zil_header_t *)zilog->zl_header);
175168404Spjd}
176168404Spjd
177168404Spjdstatic void
178168404Spjdzil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
179168404Spjd{
180168404Spjd	zio_cksum_t *zc = &bp->blk_cksum;
181168404Spjd
182168404Spjd	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
183168404Spjd	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
184168404Spjd	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
185168404Spjd	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
186168404Spjd}
187168404Spjd
188168404Spjd/*
189219089Spjd * Read a log block and make sure it's valid.
190168404Spjd */
191168404Spjdstatic int
192219089Spjdzil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
193219089Spjd    char **end)
194168404Spjd{
195219089Spjd	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
196277586Sdelphij	arc_flags_t aflags = ARC_FLAG_WAIT;
197219089Spjd	arc_buf_t *abuf = NULL;
198268657Sdelphij	zbookmark_phys_t zb;
199168404Spjd	int error;
200168404Spjd
201219089Spjd	if (zilog->zl_header->zh_claim_txg == 0)
202219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
203168404Spjd
204219089Spjd	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
205219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE;
206168404Spjd
207219089Spjd	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
208219089Spjd	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
209168404Spjd
210246666Smm	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
211219089Spjd	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
212219089Spjd
213168404Spjd	if (error == 0) {
214168404Spjd		zio_cksum_t cksum = bp->blk_cksum;
215168404Spjd
216168404Spjd		/*
217185029Spjd		 * Validate the checksummed log block.
218185029Spjd		 *
219168404Spjd		 * Sequence numbers should be... sequential.  The checksum
220168404Spjd		 * verifier for the next block should be bp's checksum plus 1.
221185029Spjd		 *
222185029Spjd		 * Also check the log chain linkage and size used.
223168404Spjd		 */
224168404Spjd		cksum.zc_word[ZIL_ZC_SEQ]++;
225168404Spjd
226219089Spjd		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
227219089Spjd			zil_chain_t *zilc = abuf->b_data;
228219089Spjd			char *lr = (char *)(zilc + 1);
229219089Spjd			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
230219089Spjd
231219089Spjd			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
232219089Spjd			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
233249195Smm				error = SET_ERROR(ECKSUM);
234219089Spjd			} else {
235276081Sdelphij				ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
236219089Spjd				bcopy(lr, dst, len);
237219089Spjd				*end = (char *)dst + len;
238219089Spjd				*nbp = zilc->zc_next_blk;
239219089Spjd			}
240219089Spjd		} else {
241219089Spjd			char *lr = abuf->b_data;
242219089Spjd			uint64_t size = BP_GET_LSIZE(bp);
243219089Spjd			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
244219089Spjd
245219089Spjd			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
246219089Spjd			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
247219089Spjd			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
248249195Smm				error = SET_ERROR(ECKSUM);
249219089Spjd			} else {
250276081Sdelphij				ASSERT3U(zilc->zc_nused, <=,
251276081Sdelphij				    SPA_OLD_MAXBLOCKSIZE);
252219089Spjd				bcopy(lr, dst, zilc->zc_nused);
253219089Spjd				*end = (char *)dst + zilc->zc_nused;
254219089Spjd				*nbp = zilc->zc_next_blk;
255219089Spjd			}
256185029Spjd		}
257168404Spjd
258248571Smm		VERIFY(arc_buf_remove_ref(abuf, &abuf));
259168404Spjd	}
260168404Spjd
261219089Spjd	return (error);
262219089Spjd}
263168404Spjd
264219089Spjd/*
265219089Spjd * Read a TX_WRITE log data block.
266219089Spjd */
267219089Spjdstatic int
268219089Spjdzil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
269219089Spjd{
270219089Spjd	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
271219089Spjd	const blkptr_t *bp = &lr->lr_blkptr;
272277586Sdelphij	arc_flags_t aflags = ARC_FLAG_WAIT;
273219089Spjd	arc_buf_t *abuf = NULL;
274268657Sdelphij	zbookmark_phys_t zb;
275219089Spjd	int error;
276219089Spjd
277219089Spjd	if (BP_IS_HOLE(bp)) {
278219089Spjd		if (wbuf != NULL)
279219089Spjd			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
280219089Spjd		return (0);
281219089Spjd	}
282219089Spjd
283219089Spjd	if (zilog->zl_header->zh_claim_txg == 0)
284219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
285219089Spjd
286219089Spjd	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
287219089Spjd	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
288219089Spjd
289246666Smm	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
290219089Spjd	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
291219089Spjd
292219089Spjd	if (error == 0) {
293219089Spjd		if (wbuf != NULL)
294219089Spjd			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
295219089Spjd		(void) arc_buf_remove_ref(abuf, &abuf);
296219089Spjd	}
297219089Spjd
298168404Spjd	return (error);
299168404Spjd}
300168404Spjd
301168404Spjd/*
302168404Spjd * Parse the intent log, and call parse_func for each valid record within.
303168404Spjd */
304219089Spjdint
305168404Spjdzil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
306168404Spjd    zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
307168404Spjd{
308168404Spjd	const zil_header_t *zh = zilog->zl_header;
309219089Spjd	boolean_t claimed = !!zh->zh_claim_txg;
310219089Spjd	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
311219089Spjd	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
312219089Spjd	uint64_t max_blk_seq = 0;
313219089Spjd	uint64_t max_lr_seq = 0;
314219089Spjd	uint64_t blk_count = 0;
315219089Spjd	uint64_t lr_count = 0;
316219089Spjd	blkptr_t blk, next_blk;
317168404Spjd	char *lrbuf, *lrp;
318219089Spjd	int error = 0;
319168404Spjd
320219089Spjd	/*
321219089Spjd	 * Old logs didn't record the maximum zh_claim_lr_seq.
322219089Spjd	 */
323219089Spjd	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
324219089Spjd		claim_lr_seq = UINT64_MAX;
325168404Spjd
326168404Spjd	/*
327168404Spjd	 * Starting at the block pointed to by zh_log we read the log chain.
328168404Spjd	 * For each block in the chain we strongly check that block to
329168404Spjd	 * ensure its validity.  We stop when an invalid block is found.
330168404Spjd	 * For each block pointer in the chain we call parse_blk_func().
331168404Spjd	 * For each record in each valid block we call parse_lr_func().
332168404Spjd	 * If the log has been claimed, stop if we encounter a sequence
333168404Spjd	 * number greater than the highest claimed sequence number.
334168404Spjd	 */
335276081Sdelphij	lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
336219089Spjd	zil_bp_tree_init(zilog);
337168404Spjd
338219089Spjd	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
339219089Spjd		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
340219089Spjd		int reclen;
341219089Spjd		char *end;
342219089Spjd
343219089Spjd		if (blk_seq > claim_blk_seq)
344168404Spjd			break;
345219089Spjd		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
346219089Spjd			break;
347219089Spjd		ASSERT3U(max_blk_seq, <, blk_seq);
348219089Spjd		max_blk_seq = blk_seq;
349219089Spjd		blk_count++;
350168404Spjd
351219089Spjd		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
352219089Spjd			break;
353168404Spjd
354219089Spjd		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
355248571Smm		if (error != 0)
356168404Spjd			break;
357168404Spjd
358219089Spjd		for (lrp = lrbuf; lrp < end; lrp += reclen) {
359168404Spjd			lr_t *lr = (lr_t *)lrp;
360168404Spjd			reclen = lr->lrc_reclen;
361168404Spjd			ASSERT3U(reclen, >=, sizeof (lr_t));
362219089Spjd			if (lr->lrc_seq > claim_lr_seq)
363219089Spjd				goto done;
364219089Spjd			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
365219089Spjd				goto done;
366219089Spjd			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
367219089Spjd			max_lr_seq = lr->lrc_seq;
368219089Spjd			lr_count++;
369168404Spjd		}
370168404Spjd	}
371219089Spjddone:
372219089Spjd	zilog->zl_parse_error = error;
373219089Spjd	zilog->zl_parse_blk_seq = max_blk_seq;
374219089Spjd	zilog->zl_parse_lr_seq = max_lr_seq;
375219089Spjd	zilog->zl_parse_blk_count = blk_count;
376219089Spjd	zilog->zl_parse_lr_count = lr_count;
377168404Spjd
378219089Spjd	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
379219089Spjd	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
380219089Spjd
381219089Spjd	zil_bp_tree_fini(zilog);
382276081Sdelphij	zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
383219089Spjd
384219089Spjd	return (error);
385168404Spjd}
386168404Spjd
387219089Spjdstatic int
388168404Spjdzil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
389168404Spjd{
390168404Spjd	/*
391168404Spjd	 * Claim log block if not already committed and not already claimed.
392219089Spjd	 * If tx == NULL, just verify that the block is claimable.
393168404Spjd	 */
394263397Sdelphij	if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
395263397Sdelphij	    zil_bp_tree_add(zilog, bp) != 0)
396219089Spjd		return (0);
397219089Spjd
398219089Spjd	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
399219089Spjd	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
400219089Spjd	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
401168404Spjd}
402168404Spjd
403219089Spjdstatic int
404168404Spjdzil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
405168404Spjd{
406219089Spjd	lr_write_t *lr = (lr_write_t *)lrc;
407219089Spjd	int error;
408219089Spjd
409219089Spjd	if (lrc->lrc_txtype != TX_WRITE)
410219089Spjd		return (0);
411219089Spjd
412219089Spjd	/*
413219089Spjd	 * If the block is not readable, don't claim it.  This can happen
414219089Spjd	 * in normal operation when a log block is written to disk before
415219089Spjd	 * some of the dmu_sync() blocks it points to.  In this case, the
416219089Spjd	 * transaction cannot have been committed to anyone (we would have
417219089Spjd	 * waited for all writes to be stable first), so it is semantically
418219089Spjd	 * correct to declare this the end of the log.
419219089Spjd	 */
420219089Spjd	if (lr->lr_blkptr.blk_birth >= first_txg &&
421219089Spjd	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
422219089Spjd		return (error);
423219089Spjd	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
424168404Spjd}
425168404Spjd
426168404Spjd/* ARGSUSED */
427219089Spjdstatic int
428168404Spjdzil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
429168404Spjd{
430219089Spjd	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
431219089Spjd
432219089Spjd	return (0);
433168404Spjd}
434168404Spjd
435219089Spjdstatic int
436168404Spjdzil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
437168404Spjd{
438219089Spjd	lr_write_t *lr = (lr_write_t *)lrc;
439219089Spjd	blkptr_t *bp = &lr->lr_blkptr;
440219089Spjd
441168404Spjd	/*
442168404Spjd	 * If we previously claimed it, we need to free it.
443168404Spjd	 */
444219089Spjd	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
445263397Sdelphij	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
446263397Sdelphij	    !BP_IS_HOLE(bp))
447219089Spjd		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
448219089Spjd
449219089Spjd	return (0);
450219089Spjd}
451219089Spjd
452219089Spjdstatic lwb_t *
453219089Spjdzil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
454219089Spjd{
455219089Spjd	lwb_t *lwb;
456219089Spjd
457219089Spjd	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
458219089Spjd	lwb->lwb_zilog = zilog;
459219089Spjd	lwb->lwb_blk = *bp;
460219089Spjd	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
461219089Spjd	lwb->lwb_max_txg = txg;
462219089Spjd	lwb->lwb_zio = NULL;
463219089Spjd	lwb->lwb_tx = NULL;
464219089Spjd	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
465219089Spjd		lwb->lwb_nused = sizeof (zil_chain_t);
466219089Spjd		lwb->lwb_sz = BP_GET_LSIZE(bp);
467219089Spjd	} else {
468219089Spjd		lwb->lwb_nused = 0;
469219089Spjd		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
470168404Spjd	}
471219089Spjd
472219089Spjd	mutex_enter(&zilog->zl_lock);
473219089Spjd	list_insert_tail(&zilog->zl_lwb_list, lwb);
474219089Spjd	mutex_exit(&zilog->zl_lock);
475219089Spjd
476219089Spjd	return (lwb);
477168404Spjd}
478168404Spjd
479168404Spjd/*
480239620Smm * Called when we create in-memory log transactions so that we know
481239620Smm * to cleanup the itxs at the end of spa_sync().
482239620Smm */
483239620Smmvoid
484239620Smmzilog_dirty(zilog_t *zilog, uint64_t txg)
485239620Smm{
486239620Smm	dsl_pool_t *dp = zilog->zl_dmu_pool;
487239620Smm	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
488239620Smm
489288549Smav	if (ds->ds_is_snapshot)
490239620Smm		panic("dirtying snapshot!");
491239620Smm
492248571Smm	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
493239620Smm		/* up the hold count until we can be written out */
494239620Smm		dmu_buf_add_ref(ds->ds_dbuf, zilog);
495239620Smm	}
496239620Smm}
497239620Smm
498239620Smmboolean_t
499239620Smmzilog_is_dirty(zilog_t *zilog)
500239620Smm{
501239620Smm	dsl_pool_t *dp = zilog->zl_dmu_pool;
502239620Smm
503239620Smm	for (int t = 0; t < TXG_SIZE; t++) {
504239620Smm		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
505239620Smm			return (B_TRUE);
506239620Smm	}
507239620Smm	return (B_FALSE);
508239620Smm}
509239620Smm
510239620Smm/*
511168404Spjd * Create an on-disk intent log.
512168404Spjd */
513219089Spjdstatic lwb_t *
514168404Spjdzil_create(zilog_t *zilog)
515168404Spjd{
516168404Spjd	const zil_header_t *zh = zilog->zl_header;
517219089Spjd	lwb_t *lwb = NULL;
518168404Spjd	uint64_t txg = 0;
519168404Spjd	dmu_tx_t *tx = NULL;
520168404Spjd	blkptr_t blk;
521168404Spjd	int error = 0;
522168404Spjd
523168404Spjd	/*
524168404Spjd	 * Wait for any previous destroy to complete.
525168404Spjd	 */
526168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
527168404Spjd
528168404Spjd	ASSERT(zh->zh_claim_txg == 0);
529168404Spjd	ASSERT(zh->zh_replay_seq == 0);
530168404Spjd
531168404Spjd	blk = zh->zh_log;
532168404Spjd
533168404Spjd	/*
534219089Spjd	 * Allocate an initial log block if:
535219089Spjd	 *    - there isn't one already
536219089Spjd	 *    - the existing block is the wrong endianess
537168404Spjd	 */
538207908Smm	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
539168404Spjd		tx = dmu_tx_create(zilog->zl_os);
540219089Spjd		VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
541168404Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
542168404Spjd		txg = dmu_tx_get_txg(tx);
543168404Spjd
544207908Smm		if (!BP_IS_HOLE(&blk)) {
545219089Spjd			zio_free_zil(zilog->zl_spa, txg, &blk);
546207908Smm			BP_ZERO(&blk);
547207908Smm		}
548207908Smm
549219089Spjd		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
550219089Spjd		    ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
551168404Spjd
552168404Spjd		if (error == 0)
553168404Spjd			zil_init_log_chain(zilog, &blk);
554168404Spjd	}
555168404Spjd
556168404Spjd	/*
557168404Spjd	 * Allocate a log write buffer (lwb) for the first log block.
558168404Spjd	 */
559219089Spjd	if (error == 0)
560219089Spjd		lwb = zil_alloc_lwb(zilog, &blk, txg);
561168404Spjd
562168404Spjd	/*
563168404Spjd	 * If we just allocated the first log block, commit our transaction
564168404Spjd	 * and wait for zil_sync() to stuff the block poiner into zh_log.
565168404Spjd	 * (zh is part of the MOS, so we cannot modify it in open context.)
566168404Spjd	 */
567168404Spjd	if (tx != NULL) {
568168404Spjd		dmu_tx_commit(tx);
569168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
570168404Spjd	}
571168404Spjd
572168404Spjd	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
573219089Spjd
574219089Spjd	return (lwb);
575168404Spjd}
576168404Spjd
577168404Spjd/*
578168404Spjd * In one tx, free all log blocks and clear the log header.
579168404Spjd * If keep_first is set, then we're replaying a log with no content.
580168404Spjd * We want to keep the first block, however, so that the first
581168404Spjd * synchronous transaction doesn't require a txg_wait_synced()
582168404Spjd * in zil_create().  We don't need to txg_wait_synced() here either
583168404Spjd * when keep_first is set, because both zil_create() and zil_destroy()
584168404Spjd * will wait for any in-progress destroys to complete.
585168404Spjd */
586168404Spjdvoid
587168404Spjdzil_destroy(zilog_t *zilog, boolean_t keep_first)
588168404Spjd{
589168404Spjd	const zil_header_t *zh = zilog->zl_header;
590168404Spjd	lwb_t *lwb;
591168404Spjd	dmu_tx_t *tx;
592168404Spjd	uint64_t txg;
593168404Spjd
594168404Spjd	/*
595168404Spjd	 * Wait for any previous destroy to complete.
596168404Spjd	 */
597168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
598168404Spjd
599219089Spjd	zilog->zl_old_header = *zh;		/* debugging aid */
600219089Spjd
601168404Spjd	if (BP_IS_HOLE(&zh->zh_log))
602168404Spjd		return;
603168404Spjd
604168404Spjd	tx = dmu_tx_create(zilog->zl_os);
605219089Spjd	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
606168404Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
607168404Spjd	txg = dmu_tx_get_txg(tx);
608168404Spjd
609168404Spjd	mutex_enter(&zilog->zl_lock);
610168404Spjd
611168404Spjd	ASSERT3U(zilog->zl_destroy_txg, <, txg);
612168404Spjd	zilog->zl_destroy_txg = txg;
613168404Spjd	zilog->zl_keep_first = keep_first;
614168404Spjd
615168404Spjd	if (!list_is_empty(&zilog->zl_lwb_list)) {
616168404Spjd		ASSERT(zh->zh_claim_txg == 0);
617224526Smm		VERIFY(!keep_first);
618168404Spjd		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
619168404Spjd			list_remove(&zilog->zl_lwb_list, lwb);
620168404Spjd			if (lwb->lwb_buf != NULL)
621168404Spjd				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
622219089Spjd			zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
623168404Spjd			kmem_cache_free(zil_lwb_cache, lwb);
624168404Spjd		}
625219089Spjd	} else if (!keep_first) {
626239620Smm		zil_destroy_sync(zilog, tx);
627168404Spjd	}
628168404Spjd	mutex_exit(&zilog->zl_lock);
629168404Spjd
630168404Spjd	dmu_tx_commit(tx);
631185029Spjd}
632168404Spjd
633239620Smmvoid
634239620Smmzil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
635239620Smm{
636239620Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
637239620Smm	(void) zil_parse(zilog, zil_free_log_block,
638239620Smm	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
639239620Smm}
640239620Smm
641168404Spjdint
642219089Spjdzil_claim(const char *osname, void *txarg)
643168404Spjd{
644168404Spjd	dmu_tx_t *tx = txarg;
645168404Spjd	uint64_t first_txg = dmu_tx_get_txg(tx);
646168404Spjd	zilog_t *zilog;
647168404Spjd	zil_header_t *zh;
648168404Spjd	objset_t *os;
649168404Spjd	int error;
650168404Spjd
651248571Smm	error = dmu_objset_own(osname, DMU_OST_ANY, B_FALSE, FTAG, &os);
652248571Smm	if (error != 0) {
653272133Sdelphij		/*
654272133Sdelphij		 * EBUSY indicates that the objset is inconsistent, in which
655272133Sdelphij		 * case it can not have a ZIL.
656272133Sdelphij		 */
657272133Sdelphij		if (error != EBUSY) {
658272133Sdelphij			cmn_err(CE_WARN, "can't open objset for %s, error %u",
659272133Sdelphij			    osname, error);
660272133Sdelphij		}
661168404Spjd		return (0);
662168404Spjd	}
663168404Spjd
664168404Spjd	zilog = dmu_objset_zil(os);
665168404Spjd	zh = zil_header_in_syncing_context(zilog);
666168404Spjd
667219089Spjd	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
668213197Smm		if (!BP_IS_HOLE(&zh->zh_log))
669219089Spjd			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
670213197Smm		BP_ZERO(&zh->zh_log);
671213197Smm		dsl_dataset_dirty(dmu_objset_ds(os), tx);
672248571Smm		dmu_objset_disown(os, FTAG);
673219089Spjd		return (0);
674213197Smm	}
675213197Smm
676168404Spjd	/*
677168404Spjd	 * Claim all log blocks if we haven't already done so, and remember
678168404Spjd	 * the highest claimed sequence number.  This ensures that if we can
679168404Spjd	 * read only part of the log now (e.g. due to a missing device),
680168404Spjd	 * but we can read the entire log later, we will not try to replay
681168404Spjd	 * or destroy beyond the last block we successfully claimed.
682168404Spjd	 */
683168404Spjd	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
684168404Spjd	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
685219089Spjd		(void) zil_parse(zilog, zil_claim_log_block,
686219089Spjd		    zil_claim_log_record, tx, first_txg);
687168404Spjd		zh->zh_claim_txg = first_txg;
688219089Spjd		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
689219089Spjd		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
690219089Spjd		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
691219089Spjd			zh->zh_flags |= ZIL_REPLAY_NEEDED;
692219089Spjd		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
693168404Spjd		dsl_dataset_dirty(dmu_objset_ds(os), tx);
694168404Spjd	}
695168404Spjd
696168404Spjd	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
697248571Smm	dmu_objset_disown(os, FTAG);
698168404Spjd	return (0);
699168404Spjd}
700168404Spjd
701185029Spjd/*
702185029Spjd * Check the log by walking the log chain.
703185029Spjd * Checksum errors are ok as they indicate the end of the chain.
704185029Spjd * Any other error (no device or read failure) returns an error.
705185029Spjd */
706185029Spjdint
707219089Spjdzil_check_log_chain(const char *osname, void *tx)
708168404Spjd{
709185029Spjd	zilog_t *zilog;
710185029Spjd	objset_t *os;
711219089Spjd	blkptr_t *bp;
712185029Spjd	int error;
713168404Spjd
714219089Spjd	ASSERT(tx == NULL);
715219089Spjd
716219089Spjd	error = dmu_objset_hold(osname, FTAG, &os);
717248571Smm	if (error != 0) {
718185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
719185029Spjd		return (0);
720185029Spjd	}
721168404Spjd
722185029Spjd	zilog = dmu_objset_zil(os);
723219089Spjd	bp = (blkptr_t *)&zilog->zl_header->zh_log;
724219089Spjd
725219089Spjd	/*
726219089Spjd	 * Check the first block and determine if it's on a log device
727219089Spjd	 * which may have been removed or faulted prior to loading this
728219089Spjd	 * pool.  If so, there's no point in checking the rest of the log
729219089Spjd	 * as its content should have already been synced to the pool.
730219089Spjd	 */
731219089Spjd	if (!BP_IS_HOLE(bp)) {
732219089Spjd		vdev_t *vd;
733219089Spjd		boolean_t valid = B_TRUE;
734219089Spjd
735219089Spjd		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
736219089Spjd		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
737219089Spjd		if (vd->vdev_islog && vdev_is_dead(vd))
738219089Spjd			valid = vdev_log_state_valid(vd);
739219089Spjd		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
740219089Spjd
741219089Spjd		if (!valid) {
742219089Spjd			dmu_objset_rele(os, FTAG);
743219089Spjd			return (0);
744219089Spjd		}
745168404Spjd	}
746185029Spjd
747219089Spjd	/*
748219089Spjd	 * Because tx == NULL, zil_claim_log_block() will not actually claim
749219089Spjd	 * any blocks, but just determine whether it is possible to do so.
750219089Spjd	 * In addition to checking the log chain, zil_claim_log_block()
751219089Spjd	 * will invoke zio_claim() with a done func of spa_claim_notify(),
752219089Spjd	 * which will update spa_max_claim_txg.  See spa_load() for details.
753219089Spjd	 */
754219089Spjd	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
755219089Spjd	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
756219089Spjd
757219089Spjd	dmu_objset_rele(os, FTAG);
758219089Spjd
759219089Spjd	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
760168404Spjd}
761168404Spjd
762185029Spjdstatic int
763185029Spjdzil_vdev_compare(const void *x1, const void *x2)
764185029Spjd{
765219089Spjd	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
766219089Spjd	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
767185029Spjd
768185029Spjd	if (v1 < v2)
769185029Spjd		return (-1);
770185029Spjd	if (v1 > v2)
771185029Spjd		return (1);
772185029Spjd
773185029Spjd	return (0);
774185029Spjd}
775185029Spjd
776168404Spjdvoid
777219089Spjdzil_add_block(zilog_t *zilog, const blkptr_t *bp)
778168404Spjd{
779185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
780185029Spjd	avl_index_t where;
781185029Spjd	zil_vdev_node_t *zv, zvsearch;
782185029Spjd	int ndvas = BP_GET_NDVAS(bp);
783185029Spjd	int i;
784168404Spjd
785185029Spjd	if (zfs_nocacheflush)
786185029Spjd		return;
787168404Spjd
788185029Spjd	ASSERT(zilog->zl_writer);
789168404Spjd
790185029Spjd	/*
791185029Spjd	 * Even though we're zl_writer, we still need a lock because the
792185029Spjd	 * zl_get_data() callbacks may have dmu_sync() done callbacks
793185029Spjd	 * that will run concurrently.
794185029Spjd	 */
795185029Spjd	mutex_enter(&zilog->zl_vdev_lock);
796185029Spjd	for (i = 0; i < ndvas; i++) {
797185029Spjd		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
798185029Spjd		if (avl_find(t, &zvsearch, &where) == NULL) {
799185029Spjd			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
800185029Spjd			zv->zv_vdev = zvsearch.zv_vdev;
801185029Spjd			avl_insert(t, zv, where);
802185029Spjd		}
803185029Spjd	}
804185029Spjd	mutex_exit(&zilog->zl_vdev_lock);
805168404Spjd}
806168404Spjd
807219089Spjdstatic void
808168404Spjdzil_flush_vdevs(zilog_t *zilog)
809168404Spjd{
810168404Spjd	spa_t *spa = zilog->zl_spa;
811185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
812185029Spjd	void *cookie = NULL;
813185029Spjd	zil_vdev_node_t *zv;
814185029Spjd	zio_t *zio;
815168404Spjd
816168404Spjd	ASSERT(zilog->zl_writer);
817168404Spjd
818185029Spjd	/*
819185029Spjd	 * We don't need zl_vdev_lock here because we're the zl_writer,
820185029Spjd	 * and all zl_get_data() callbacks are done.
821185029Spjd	 */
822185029Spjd	if (avl_numnodes(t) == 0)
823185029Spjd		return;
824185029Spjd
825185029Spjd	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
826185029Spjd
827185029Spjd	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
828185029Spjd
829185029Spjd	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
830185029Spjd		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
831185029Spjd		if (vd != NULL)
832185029Spjd			zio_flush(zio, vd);
833185029Spjd		kmem_free(zv, sizeof (*zv));
834168404Spjd	}
835168404Spjd
836168404Spjd	/*
837168404Spjd	 * Wait for all the flushes to complete.  Not all devices actually
838168404Spjd	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
839168404Spjd	 */
840185029Spjd	(void) zio_wait(zio);
841185029Spjd
842185029Spjd	spa_config_exit(spa, SCL_STATE, FTAG);
843168404Spjd}
844168404Spjd
845168404Spjd/*
846168404Spjd * Function called when a log block write completes
847168404Spjd */
848168404Spjdstatic void
849168404Spjdzil_lwb_write_done(zio_t *zio)
850168404Spjd{
851168404Spjd	lwb_t *lwb = zio->io_private;
852168404Spjd	zilog_t *zilog = lwb->lwb_zilog;
853219089Spjd	dmu_tx_t *tx = lwb->lwb_tx;
854168404Spjd
855185029Spjd	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
856185029Spjd	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
857185029Spjd	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
858185029Spjd	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
859185029Spjd	ASSERT(!BP_IS_GANG(zio->io_bp));
860185029Spjd	ASSERT(!BP_IS_HOLE(zio->io_bp));
861268649Sdelphij	ASSERT(BP_GET_FILL(zio->io_bp) == 0);
862185029Spjd
863168404Spjd	/*
864209962Smm	 * Ensure the lwb buffer pointer is cleared before releasing
865209962Smm	 * the txg. If we have had an allocation failure and
866209962Smm	 * the txg is waiting to sync then we want want zil_sync()
867209962Smm	 * to remove the lwb so that it's not picked up as the next new
868209962Smm	 * one in zil_commit_writer(). zil_sync() will only remove
869209962Smm	 * the lwb if lwb_buf is null.
870168404Spjd	 */
871168404Spjd	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
872168404Spjd	mutex_enter(&zilog->zl_lock);
873168404Spjd	lwb->lwb_buf = NULL;
874219089Spjd	lwb->lwb_tx = NULL;
875219089Spjd	mutex_exit(&zilog->zl_lock);
876209962Smm
877209962Smm	/*
878209962Smm	 * Now that we've written this log block, we have a stable pointer
879209962Smm	 * to the next block in the chain, so it's OK to let the txg in
880219089Spjd	 * which we allocated the next block sync.
881209962Smm	 */
882219089Spjd	dmu_tx_commit(tx);
883168404Spjd}
884168404Spjd
885168404Spjd/*
886168404Spjd * Initialize the io for a log block.
887168404Spjd */
888168404Spjdstatic void
889168404Spjdzil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
890168404Spjd{
891268657Sdelphij	zbookmark_phys_t zb;
892168404Spjd
893219089Spjd	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
894219089Spjd	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
895219089Spjd	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
896168404Spjd
897168404Spjd	if (zilog->zl_root_zio == NULL) {
898168404Spjd		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
899168404Spjd		    ZIO_FLAG_CANFAIL);
900168404Spjd	}
901168404Spjd	if (lwb->lwb_zio == NULL) {
902168404Spjd		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
903219089Spjd		    0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
904260763Savg		    zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE,
905219089Spjd		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
906168404Spjd	}
907168404Spjd}
908168404Spjd
909168404Spjd/*
910219089Spjd * Define a limited set of intent log block sizes.
911251631Sdelphij *
912219089Spjd * These must be a multiple of 4KB. Note only the amount used (again
913219089Spjd * aligned to 4KB) actually gets written. However, we can't always just
914276081Sdelphij * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
915219089Spjd */
916219089Spjduint64_t zil_block_buckets[] = {
917219089Spjd    4096,		/* non TX_WRITE */
918219089Spjd    8192+4096,		/* data base */
919219089Spjd    32*1024 + 4096, 	/* NFS writes */
920219089Spjd    UINT64_MAX
921219089Spjd};
922219089Spjd
923219089Spjd/*
924219089Spjd * Use the slog as long as the logbias is 'latency' and the current commit size
925219089Spjd * is less than the limit or the total list size is less than 2X the limit.
926219089Spjd * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
927219089Spjd */
928219089Spjduint64_t zil_slog_limit = 1024 * 1024;
929219089Spjd#define	USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
930219089Spjd	(((zilog)->zl_cur_used < zil_slog_limit) || \
931219089Spjd	((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
932219089Spjd
933219089Spjd/*
934168404Spjd * Start a log block write and advance to the next log block.
935168404Spjd * Calls are serialized.
936168404Spjd */
937168404Spjdstatic lwb_t *
938168404Spjdzil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
939168404Spjd{
940219089Spjd	lwb_t *nlwb = NULL;
941219089Spjd	zil_chain_t *zilc;
942168404Spjd	spa_t *spa = zilog->zl_spa;
943219089Spjd	blkptr_t *bp;
944219089Spjd	dmu_tx_t *tx;
945168404Spjd	uint64_t txg;
946219089Spjd	uint64_t zil_blksz, wsz;
947219089Spjd	int i, error;
948168404Spjd
949219089Spjd	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
950219089Spjd		zilc = (zil_chain_t *)lwb->lwb_buf;
951219089Spjd		bp = &zilc->zc_next_blk;
952219089Spjd	} else {
953219089Spjd		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
954219089Spjd		bp = &zilc->zc_next_blk;
955219089Spjd	}
956168404Spjd
957219089Spjd	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
958219089Spjd
959168404Spjd	/*
960168404Spjd	 * Allocate the next block and save its address in this block
961168404Spjd	 * before writing it in order to establish the log chain.
962168404Spjd	 * Note that if the allocation of nlwb synced before we wrote
963168404Spjd	 * the block that points at it (lwb), we'd leak it if we crashed.
964219089Spjd	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
965219089Spjd	 * We dirty the dataset to ensure that zil_sync() will be called
966219089Spjd	 * to clean up in the event of allocation failure or I/O failure.
967168404Spjd	 */
968219089Spjd	tx = dmu_tx_create(zilog->zl_os);
969219089Spjd	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
970219089Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
971219089Spjd	txg = dmu_tx_get_txg(tx);
972168404Spjd
973219089Spjd	lwb->lwb_tx = tx;
974219089Spjd
975168404Spjd	/*
976219089Spjd	 * Log blocks are pre-allocated. Here we select the size of the next
977219089Spjd	 * block, based on size used in the last block.
978219089Spjd	 * - first find the smallest bucket that will fit the block from a
979219089Spjd	 *   limited set of block sizes. This is because it's faster to write
980219089Spjd	 *   blocks allocated from the same metaslab as they are adjacent or
981219089Spjd	 *   close.
982219089Spjd	 * - next find the maximum from the new suggested size and an array of
983219089Spjd	 *   previous sizes. This lessens a picket fence effect of wrongly
984219089Spjd	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
985219089Spjd	 *   requests.
986219089Spjd	 *
987219089Spjd	 * Note we only write what is used, but we can't just allocate
988219089Spjd	 * the maximum block size because we can exhaust the available
989219089Spjd	 * pool log space.
990168404Spjd	 */
991219089Spjd	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
992219089Spjd	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
993219089Spjd		continue;
994219089Spjd	zil_blksz = zil_block_buckets[i];
995219089Spjd	if (zil_blksz == UINT64_MAX)
996276081Sdelphij		zil_blksz = SPA_OLD_MAXBLOCKSIZE;
997219089Spjd	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
998219089Spjd	for (i = 0; i < ZIL_PREV_BLKS; i++)
999219089Spjd		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1000219089Spjd	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1001168404Spjd
1002168404Spjd	BP_ZERO(bp);
1003168404Spjd	/* pass the old blkptr in order to spread log blocks across devs */
1004219089Spjd	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
1005219089Spjd	    USE_SLOG(zilog));
1006248571Smm	if (error == 0) {
1007219089Spjd		ASSERT3U(bp->blk_birth, ==, txg);
1008219089Spjd		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
1009219089Spjd		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
1010168404Spjd
1011168404Spjd		/*
1012219089Spjd		 * Allocate a new log write buffer (lwb).
1013168404Spjd		 */
1014219089Spjd		nlwb = zil_alloc_lwb(zilog, bp, txg);
1015168404Spjd
1016219089Spjd		/* Record the block for later vdev flushing */
1017219089Spjd		zil_add_block(zilog, &lwb->lwb_blk);
1018168404Spjd	}
1019168404Spjd
1020219089Spjd	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1021219089Spjd		/* For Slim ZIL only write what is used. */
1022219089Spjd		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1023219089Spjd		ASSERT3U(wsz, <=, lwb->lwb_sz);
1024219089Spjd		zio_shrink(lwb->lwb_zio, wsz);
1025168404Spjd
1026219089Spjd	} else {
1027219089Spjd		wsz = lwb->lwb_sz;
1028219089Spjd	}
1029168404Spjd
1030219089Spjd	zilc->zc_pad = 0;
1031219089Spjd	zilc->zc_nused = lwb->lwb_nused;
1032219089Spjd	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1033168404Spjd
1034168404Spjd	/*
1035219089Spjd	 * clear unused data for security
1036168404Spjd	 */
1037219089Spjd	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1038168404Spjd
1039219089Spjd	zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
1040168404Spjd
1041168404Spjd	/*
1042219089Spjd	 * If there was an allocation failure then nlwb will be null which
1043219089Spjd	 * forces a txg_wait_synced().
1044168404Spjd	 */
1045168404Spjd	return (nlwb);
1046168404Spjd}
1047168404Spjd
1048168404Spjdstatic lwb_t *
1049168404Spjdzil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1050168404Spjd{
1051168404Spjd	lr_t *lrc = &itx->itx_lr; /* common log record */
1052219089Spjd	lr_write_t *lrw = (lr_write_t *)lrc;
1053219089Spjd	char *lr_buf;
1054168404Spjd	uint64_t txg = lrc->lrc_txg;
1055168404Spjd	uint64_t reclen = lrc->lrc_reclen;
1056219089Spjd	uint64_t dlen = 0;
1057168404Spjd
1058168404Spjd	if (lwb == NULL)
1059168404Spjd		return (NULL);
1060219089Spjd
1061168404Spjd	ASSERT(lwb->lwb_buf != NULL);
1062239620Smm	ASSERT(zilog_is_dirty(zilog) ||
1063239620Smm	    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1064168404Spjd
1065168404Spjd	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
1066168404Spjd		dlen = P2ROUNDUP_TYPED(
1067219089Spjd		    lrw->lr_length, sizeof (uint64_t), uint64_t);
1068168404Spjd
1069168404Spjd	zilog->zl_cur_used += (reclen + dlen);
1070168404Spjd
1071168404Spjd	zil_lwb_write_init(zilog, lwb);
1072168404Spjd
1073168404Spjd	/*
1074168404Spjd	 * If this record won't fit in the current log block, start a new one.
1075168404Spjd	 */
1076219089Spjd	if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1077168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1078168404Spjd		if (lwb == NULL)
1079168404Spjd			return (NULL);
1080168404Spjd		zil_lwb_write_init(zilog, lwb);
1081219089Spjd		ASSERT(LWB_EMPTY(lwb));
1082219089Spjd		if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1083168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
1084168404Spjd			return (lwb);
1085168404Spjd		}
1086168404Spjd	}
1087168404Spjd
1088219089Spjd	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1089219089Spjd	bcopy(lrc, lr_buf, reclen);
1090219089Spjd	lrc = (lr_t *)lr_buf;
1091219089Spjd	lrw = (lr_write_t *)lrc;
1092168404Spjd
1093168404Spjd	/*
1094168404Spjd	 * If it's a write, fetch the data or get its blkptr as appropriate.
1095168404Spjd	 */
1096168404Spjd	if (lrc->lrc_txtype == TX_WRITE) {
1097168404Spjd		if (txg > spa_freeze_txg(zilog->zl_spa))
1098168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
1099168404Spjd		if (itx->itx_wr_state != WR_COPIED) {
1100168404Spjd			char *dbuf;
1101168404Spjd			int error;
1102168404Spjd
1103168404Spjd			if (dlen) {
1104168404Spjd				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
1105219089Spjd				dbuf = lr_buf + reclen;
1106219089Spjd				lrw->lr_common.lrc_reclen += dlen;
1107168404Spjd			} else {
1108168404Spjd				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1109168404Spjd				dbuf = NULL;
1110168404Spjd			}
1111168404Spjd			error = zilog->zl_get_data(
1112219089Spjd			    itx->itx_private, lrw, dbuf, lwb->lwb_zio);
1113214378Smm			if (error == EIO) {
1114214378Smm				txg_wait_synced(zilog->zl_dmu_pool, txg);
1115214378Smm				return (lwb);
1116214378Smm			}
1117248571Smm			if (error != 0) {
1118168404Spjd				ASSERT(error == ENOENT || error == EEXIST ||
1119168404Spjd				    error == EALREADY);
1120168404Spjd				return (lwb);
1121168404Spjd			}
1122168404Spjd		}
1123168404Spjd	}
1124168404Spjd
1125219089Spjd	/*
1126219089Spjd	 * We're actually making an entry, so update lrc_seq to be the
1127219089Spjd	 * log record sequence number.  Note that this is generally not
1128219089Spjd	 * equal to the itx sequence number because not all transactions
1129219089Spjd	 * are synchronous, and sometimes spa_sync() gets there first.
1130219089Spjd	 */
1131219089Spjd	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1132168404Spjd	lwb->lwb_nused += reclen + dlen;
1133168404Spjd	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1134219089Spjd	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1135240415Smm	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1136168404Spjd
1137168404Spjd	return (lwb);
1138168404Spjd}
1139168404Spjd
1140168404Spjditx_t *
1141185029Spjdzil_itx_create(uint64_t txtype, size_t lrsize)
1142168404Spjd{
1143168404Spjd	itx_t *itx;
1144168404Spjd
1145168404Spjd	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1146168404Spjd
1147168404Spjd	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1148168404Spjd	itx->itx_lr.lrc_txtype = txtype;
1149168404Spjd	itx->itx_lr.lrc_reclen = lrsize;
1150185029Spjd	itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
1151168404Spjd	itx->itx_lr.lrc_seq = 0;	/* defensive */
1152219089Spjd	itx->itx_sync = B_TRUE;		/* default is synchronous */
1153168404Spjd
1154168404Spjd	return (itx);
1155168404Spjd}
1156168404Spjd
1157219089Spjdvoid
1158219089Spjdzil_itx_destroy(itx_t *itx)
1159168404Spjd{
1160219089Spjd	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1161219089Spjd}
1162168404Spjd
1163219089Spjd/*
1164219089Spjd * Free up the sync and async itxs. The itxs_t has already been detached
1165219089Spjd * so no locks are needed.
1166219089Spjd */
1167219089Spjdstatic void
1168219089Spjdzil_itxg_clean(itxs_t *itxs)
1169219089Spjd{
1170219089Spjd	itx_t *itx;
1171219089Spjd	list_t *list;
1172219089Spjd	avl_tree_t *t;
1173219089Spjd	void *cookie;
1174219089Spjd	itx_async_node_t *ian;
1175168404Spjd
1176219089Spjd	list = &itxs->i_sync_list;
1177219089Spjd	while ((itx = list_head(list)) != NULL) {
1178219089Spjd		list_remove(list, itx);
1179219089Spjd		kmem_free(itx, offsetof(itx_t, itx_lr) +
1180219089Spjd		    itx->itx_lr.lrc_reclen);
1181219089Spjd	}
1182168404Spjd
1183219089Spjd	cookie = NULL;
1184219089Spjd	t = &itxs->i_async_tree;
1185219089Spjd	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1186219089Spjd		list = &ian->ia_list;
1187219089Spjd		while ((itx = list_head(list)) != NULL) {
1188219089Spjd			list_remove(list, itx);
1189219089Spjd			kmem_free(itx, offsetof(itx_t, itx_lr) +
1190219089Spjd			    itx->itx_lr.lrc_reclen);
1191219089Spjd		}
1192219089Spjd		list_destroy(list);
1193219089Spjd		kmem_free(ian, sizeof (itx_async_node_t));
1194219089Spjd	}
1195219089Spjd	avl_destroy(t);
1196219089Spjd
1197219089Spjd	kmem_free(itxs, sizeof (itxs_t));
1198168404Spjd}
1199168404Spjd
1200219089Spjdstatic int
1201219089Spjdzil_aitx_compare(const void *x1, const void *x2)
1202219089Spjd{
1203219089Spjd	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1204219089Spjd	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1205219089Spjd
1206219089Spjd	if (o1 < o2)
1207219089Spjd		return (-1);
1208219089Spjd	if (o1 > o2)
1209219089Spjd		return (1);
1210219089Spjd
1211219089Spjd	return (0);
1212219089Spjd}
1213219089Spjd
1214168404Spjd/*
1215219089Spjd * Remove all async itx with the given oid.
1216168404Spjd */
1217168404Spjdstatic void
1218219089Spjdzil_remove_async(zilog_t *zilog, uint64_t oid)
1219168404Spjd{
1220219089Spjd	uint64_t otxg, txg;
1221219089Spjd	itx_async_node_t *ian;
1222219089Spjd	avl_tree_t *t;
1223219089Spjd	avl_index_t where;
1224168404Spjd	list_t clean_list;
1225168404Spjd	itx_t *itx;
1226168404Spjd
1227219089Spjd	ASSERT(oid != 0);
1228168404Spjd	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1229168404Spjd
1230219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1231219089Spjd		otxg = ZILTEST_TXG;
1232219089Spjd	else
1233219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1234219089Spjd
1235219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1236219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1237219089Spjd
1238219089Spjd		mutex_enter(&itxg->itxg_lock);
1239219089Spjd		if (itxg->itxg_txg != txg) {
1240219089Spjd			mutex_exit(&itxg->itxg_lock);
1241219089Spjd			continue;
1242219089Spjd		}
1243219089Spjd
1244219089Spjd		/*
1245219089Spjd		 * Locate the object node and append its list.
1246219089Spjd		 */
1247219089Spjd		t = &itxg->itxg_itxs->i_async_tree;
1248219089Spjd		ian = avl_find(t, &oid, &where);
1249219089Spjd		if (ian != NULL)
1250219089Spjd			list_move_tail(&clean_list, &ian->ia_list);
1251219089Spjd		mutex_exit(&itxg->itxg_lock);
1252168404Spjd	}
1253219089Spjd	while ((itx = list_head(&clean_list)) != NULL) {
1254219089Spjd		list_remove(&clean_list, itx);
1255219089Spjd		kmem_free(itx, offsetof(itx_t, itx_lr) +
1256219089Spjd		    itx->itx_lr.lrc_reclen);
1257219089Spjd	}
1258219089Spjd	list_destroy(&clean_list);
1259219089Spjd}
1260168404Spjd
1261219089Spjdvoid
1262219089Spjdzil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1263219089Spjd{
1264219089Spjd	uint64_t txg;
1265219089Spjd	itxg_t *itxg;
1266219089Spjd	itxs_t *itxs, *clean = NULL;
1267219089Spjd
1268168404Spjd	/*
1269219089Spjd	 * Object ids can be re-instantiated in the next txg so
1270219089Spjd	 * remove any async transactions to avoid future leaks.
1271219089Spjd	 * This can happen if a fsync occurs on the re-instantiated
1272219089Spjd	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1273219089Spjd	 * the new file data and flushes a write record for the old object.
1274168404Spjd	 */
1275219089Spjd	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1276219089Spjd		zil_remove_async(zilog, itx->itx_oid);
1277219089Spjd
1278219089Spjd	/*
1279219089Spjd	 * Ensure the data of a renamed file is committed before the rename.
1280219089Spjd	 */
1281219089Spjd	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1282219089Spjd		zil_async_to_sync(zilog, itx->itx_oid);
1283219089Spjd
1284239620Smm	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1285219089Spjd		txg = ZILTEST_TXG;
1286219089Spjd	else
1287219089Spjd		txg = dmu_tx_get_txg(tx);
1288219089Spjd
1289219089Spjd	itxg = &zilog->zl_itxg[txg & TXG_MASK];
1290219089Spjd	mutex_enter(&itxg->itxg_lock);
1291219089Spjd	itxs = itxg->itxg_itxs;
1292219089Spjd	if (itxg->itxg_txg != txg) {
1293219089Spjd		if (itxs != NULL) {
1294219089Spjd			/*
1295219089Spjd			 * The zil_clean callback hasn't got around to cleaning
1296219089Spjd			 * this itxg. Save the itxs for release below.
1297219089Spjd			 * This should be rare.
1298219089Spjd			 */
1299219089Spjd			atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1300219089Spjd			itxg->itxg_sod = 0;
1301219089Spjd			clean = itxg->itxg_itxs;
1302219089Spjd		}
1303219089Spjd		ASSERT(itxg->itxg_sod == 0);
1304219089Spjd		itxg->itxg_txg = txg;
1305219089Spjd		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1306219089Spjd
1307219089Spjd		list_create(&itxs->i_sync_list, sizeof (itx_t),
1308219089Spjd		    offsetof(itx_t, itx_node));
1309219089Spjd		avl_create(&itxs->i_async_tree, zil_aitx_compare,
1310219089Spjd		    sizeof (itx_async_node_t),
1311219089Spjd		    offsetof(itx_async_node_t, ia_node));
1312168404Spjd	}
1313219089Spjd	if (itx->itx_sync) {
1314219089Spjd		list_insert_tail(&itxs->i_sync_list, itx);
1315219089Spjd		atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1316219089Spjd		itxg->itxg_sod += itx->itx_sod;
1317219089Spjd	} else {
1318219089Spjd		avl_tree_t *t = &itxs->i_async_tree;
1319219089Spjd		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1320219089Spjd		itx_async_node_t *ian;
1321219089Spjd		avl_index_t where;
1322168404Spjd
1323219089Spjd		ian = avl_find(t, &foid, &where);
1324219089Spjd		if (ian == NULL) {
1325219089Spjd			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1326219089Spjd			list_create(&ian->ia_list, sizeof (itx_t),
1327219089Spjd			    offsetof(itx_t, itx_node));
1328219089Spjd			ian->ia_foid = foid;
1329219089Spjd			avl_insert(t, ian, where);
1330219089Spjd		}
1331219089Spjd		list_insert_tail(&ian->ia_list, itx);
1332168404Spjd	}
1333219089Spjd
1334219089Spjd	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1335239620Smm	zilog_dirty(zilog, txg);
1336219089Spjd	mutex_exit(&itxg->itxg_lock);
1337219089Spjd
1338219089Spjd	/* Release the old itxs now we've dropped the lock */
1339219089Spjd	if (clean != NULL)
1340219089Spjd		zil_itxg_clean(clean);
1341168404Spjd}
1342168404Spjd
1343168404Spjd/*
1344168404Spjd * If there are any in-memory intent log transactions which have now been
1345239620Smm * synced then start up a taskq to free them. We should only do this after we
1346239620Smm * have written out the uberblocks (i.e. txg has been comitted) so that
1347239620Smm * don't inadvertently clean out in-memory log records that would be required
1348239620Smm * by zil_commit().
1349168404Spjd */
1350168404Spjdvoid
1351219089Spjdzil_clean(zilog_t *zilog, uint64_t synced_txg)
1352168404Spjd{
1353219089Spjd	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1354219089Spjd	itxs_t *clean_me;
1355168404Spjd
1356219089Spjd	mutex_enter(&itxg->itxg_lock);
1357219089Spjd	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1358219089Spjd		mutex_exit(&itxg->itxg_lock);
1359219089Spjd		return;
1360168404Spjd	}
1361219089Spjd	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1362219089Spjd	ASSERT(itxg->itxg_txg != 0);
1363219089Spjd	ASSERT(zilog->zl_clean_taskq != NULL);
1364219089Spjd	atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1365219089Spjd	itxg->itxg_sod = 0;
1366219089Spjd	clean_me = itxg->itxg_itxs;
1367219089Spjd	itxg->itxg_itxs = NULL;
1368219089Spjd	itxg->itxg_txg = 0;
1369219089Spjd	mutex_exit(&itxg->itxg_lock);
1370219089Spjd	/*
1371219089Spjd	 * Preferably start a task queue to free up the old itxs but
1372219089Spjd	 * if taskq_dispatch can't allocate resources to do that then
1373219089Spjd	 * free it in-line. This should be rare. Note, using TQ_SLEEP
1374219089Spjd	 * created a bad performance problem.
1375219089Spjd	 */
1376219089Spjd	if (taskq_dispatch(zilog->zl_clean_taskq,
1377219089Spjd	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
1378219089Spjd		zil_itxg_clean(clean_me);
1379168404Spjd}
1380168404Spjd
1381219089Spjd/*
1382219089Spjd * Get the list of itxs to commit into zl_itx_commit_list.
1383219089Spjd */
1384185029Spjdstatic void
1385219089Spjdzil_get_commit_list(zilog_t *zilog)
1386168404Spjd{
1387219089Spjd	uint64_t otxg, txg;
1388219089Spjd	list_t *commit_list = &zilog->zl_itx_commit_list;
1389219089Spjd	uint64_t push_sod = 0;
1390219089Spjd
1391219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1392219089Spjd		otxg = ZILTEST_TXG;
1393219089Spjd	else
1394219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1395219089Spjd
1396219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1397219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1398219089Spjd
1399219089Spjd		mutex_enter(&itxg->itxg_lock);
1400219089Spjd		if (itxg->itxg_txg != txg) {
1401219089Spjd			mutex_exit(&itxg->itxg_lock);
1402219089Spjd			continue;
1403219089Spjd		}
1404219089Spjd
1405219089Spjd		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1406219089Spjd		push_sod += itxg->itxg_sod;
1407219089Spjd		itxg->itxg_sod = 0;
1408219089Spjd
1409219089Spjd		mutex_exit(&itxg->itxg_lock);
1410219089Spjd	}
1411219089Spjd	atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1412219089Spjd}
1413219089Spjd
1414219089Spjd/*
1415219089Spjd * Move the async itxs for a specified object to commit into sync lists.
1416219089Spjd */
1417219089Spjdstatic void
1418219089Spjdzil_async_to_sync(zilog_t *zilog, uint64_t foid)
1419219089Spjd{
1420219089Spjd	uint64_t otxg, txg;
1421219089Spjd	itx_async_node_t *ian;
1422219089Spjd	avl_tree_t *t;
1423219089Spjd	avl_index_t where;
1424219089Spjd
1425219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1426219089Spjd		otxg = ZILTEST_TXG;
1427219089Spjd	else
1428219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1429219089Spjd
1430219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1431219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1432219089Spjd
1433219089Spjd		mutex_enter(&itxg->itxg_lock);
1434219089Spjd		if (itxg->itxg_txg != txg) {
1435219089Spjd			mutex_exit(&itxg->itxg_lock);
1436219089Spjd			continue;
1437219089Spjd		}
1438219089Spjd
1439219089Spjd		/*
1440219089Spjd		 * If a foid is specified then find that node and append its
1441219089Spjd		 * list. Otherwise walk the tree appending all the lists
1442219089Spjd		 * to the sync list. We add to the end rather than the
1443219089Spjd		 * beginning to ensure the create has happened.
1444219089Spjd		 */
1445219089Spjd		t = &itxg->itxg_itxs->i_async_tree;
1446219089Spjd		if (foid != 0) {
1447219089Spjd			ian = avl_find(t, &foid, &where);
1448219089Spjd			if (ian != NULL) {
1449219089Spjd				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1450219089Spjd				    &ian->ia_list);
1451219089Spjd			}
1452219089Spjd		} else {
1453219089Spjd			void *cookie = NULL;
1454219089Spjd
1455219089Spjd			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1456219089Spjd				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1457219089Spjd				    &ian->ia_list);
1458219089Spjd				list_destroy(&ian->ia_list);
1459219089Spjd				kmem_free(ian, sizeof (itx_async_node_t));
1460219089Spjd			}
1461219089Spjd		}
1462219089Spjd		mutex_exit(&itxg->itxg_lock);
1463219089Spjd	}
1464219089Spjd}
1465219089Spjd
1466219089Spjdstatic void
1467219089Spjdzil_commit_writer(zilog_t *zilog)
1468219089Spjd{
1469168404Spjd	uint64_t txg;
1470219089Spjd	itx_t *itx;
1471168404Spjd	lwb_t *lwb;
1472219089Spjd	spa_t *spa = zilog->zl_spa;
1473219089Spjd	int error = 0;
1474168404Spjd
1475185029Spjd	ASSERT(zilog->zl_root_zio == NULL);
1476168404Spjd
1477219089Spjd	mutex_exit(&zilog->zl_lock);
1478219089Spjd
1479219089Spjd	zil_get_commit_list(zilog);
1480219089Spjd
1481219089Spjd	/*
1482219089Spjd	 * Return if there's nothing to commit before we dirty the fs by
1483219089Spjd	 * calling zil_create().
1484219089Spjd	 */
1485219089Spjd	if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1486219089Spjd		mutex_enter(&zilog->zl_lock);
1487219089Spjd		return;
1488219089Spjd	}
1489219089Spjd
1490168404Spjd	if (zilog->zl_suspend) {
1491168404Spjd		lwb = NULL;
1492168404Spjd	} else {
1493168404Spjd		lwb = list_tail(&zilog->zl_lwb_list);
1494219089Spjd		if (lwb == NULL)
1495219089Spjd			lwb = zil_create(zilog);
1496168404Spjd	}
1497168404Spjd
1498168404Spjd	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1499219089Spjd	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1500168404Spjd		txg = itx->itx_lr.lrc_txg;
1501168404Spjd		ASSERT(txg);
1502168404Spjd
1503219089Spjd		if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1504168404Spjd			lwb = zil_lwb_commit(zilog, itx, lwb);
1505219089Spjd		list_remove(&zilog->zl_itx_commit_list, itx);
1506168404Spjd		kmem_free(itx, offsetof(itx_t, itx_lr)
1507168404Spjd		    + itx->itx_lr.lrc_reclen);
1508168404Spjd	}
1509168404Spjd	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1510168404Spjd
1511168404Spjd	/* write the last block out */
1512168404Spjd	if (lwb != NULL && lwb->lwb_zio != NULL)
1513168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1514168404Spjd
1515168404Spjd	zilog->zl_cur_used = 0;
1516168404Spjd
1517168404Spjd	/*
1518168404Spjd	 * Wait if necessary for the log blocks to be on stable storage.
1519168404Spjd	 */
1520168404Spjd	if (zilog->zl_root_zio) {
1521219089Spjd		error = zio_wait(zilog->zl_root_zio);
1522185029Spjd		zilog->zl_root_zio = NULL;
1523185029Spjd		zil_flush_vdevs(zilog);
1524168404Spjd	}
1525168404Spjd
1526219089Spjd	if (error || lwb == NULL)
1527168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, 0);
1528168404Spjd
1529168404Spjd	mutex_enter(&zilog->zl_lock);
1530168404Spjd
1531219089Spjd	/*
1532219089Spjd	 * Remember the highest committed log sequence number for ztest.
1533219089Spjd	 * We only update this value when all the log writes succeeded,
1534219089Spjd	 * because ztest wants to ASSERT that it got the whole log chain.
1535219089Spjd	 */
1536219089Spjd	if (error == 0 && lwb != NULL)
1537219089Spjd		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1538168404Spjd}
1539168404Spjd
1540168404Spjd/*
1541219089Spjd * Commit zfs transactions to stable storage.
1542168404Spjd * If foid is 0 push out all transactions, otherwise push only those
1543219089Spjd * for that object or might reference that object.
1544219089Spjd *
1545219089Spjd * itxs are committed in batches. In a heavily stressed zil there will be
1546219089Spjd * a commit writer thread who is writing out a bunch of itxs to the log
1547219089Spjd * for a set of committing threads (cthreads) in the same batch as the writer.
1548219089Spjd * Those cthreads are all waiting on the same cv for that batch.
1549219089Spjd *
1550219089Spjd * There will also be a different and growing batch of threads that are
1551219089Spjd * waiting to commit (qthreads). When the committing batch completes
1552219089Spjd * a transition occurs such that the cthreads exit and the qthreads become
1553219089Spjd * cthreads. One of the new cthreads becomes the writer thread for the
1554219089Spjd * batch. Any new threads arriving become new qthreads.
1555219089Spjd *
1556219089Spjd * Only 2 condition variables are needed and there's no transition
1557219089Spjd * between the two cvs needed. They just flip-flop between qthreads
1558219089Spjd * and cthreads.
1559219089Spjd *
1560219089Spjd * Using this scheme we can efficiently wakeup up only those threads
1561219089Spjd * that have been committed.
1562168404Spjd */
1563168404Spjdvoid
1564219089Spjdzil_commit(zilog_t *zilog, uint64_t foid)
1565168404Spjd{
1566219089Spjd	uint64_t mybatch;
1567219089Spjd
1568219089Spjd	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1569168404Spjd		return;
1570168404Spjd
1571219089Spjd	/* move the async itxs for the foid to the sync queues */
1572219089Spjd	zil_async_to_sync(zilog, foid);
1573219089Spjd
1574168404Spjd	mutex_enter(&zilog->zl_lock);
1575219089Spjd	mybatch = zilog->zl_next_batch;
1576168404Spjd	while (zilog->zl_writer) {
1577219089Spjd		cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1578219089Spjd		if (mybatch <= zilog->zl_com_batch) {
1579168404Spjd			mutex_exit(&zilog->zl_lock);
1580168404Spjd			return;
1581168404Spjd		}
1582168404Spjd	}
1583219089Spjd
1584219089Spjd	zilog->zl_next_batch++;
1585219089Spjd	zilog->zl_writer = B_TRUE;
1586219089Spjd	zil_commit_writer(zilog);
1587219089Spjd	zilog->zl_com_batch = mybatch;
1588219089Spjd	zilog->zl_writer = B_FALSE;
1589168404Spjd	mutex_exit(&zilog->zl_lock);
1590219089Spjd
1591219089Spjd	/* wake up one thread to become the next writer */
1592219089Spjd	cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1593219089Spjd
1594219089Spjd	/* wake up all threads waiting for this batch to be committed */
1595219089Spjd	cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1596168404Spjd}
1597168404Spjd
1598168404Spjd/*
1599168404Spjd * Called in syncing context to free committed log blocks and update log header.
1600168404Spjd */
1601168404Spjdvoid
1602168404Spjdzil_sync(zilog_t *zilog, dmu_tx_t *tx)
1603168404Spjd{
1604168404Spjd	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1605168404Spjd	uint64_t txg = dmu_tx_get_txg(tx);
1606168404Spjd	spa_t *spa = zilog->zl_spa;
1607219089Spjd	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1608168404Spjd	lwb_t *lwb;
1609168404Spjd
1610209962Smm	/*
1611209962Smm	 * We don't zero out zl_destroy_txg, so make sure we don't try
1612209962Smm	 * to destroy it twice.
1613209962Smm	 */
1614209962Smm	if (spa_sync_pass(spa) != 1)
1615209962Smm		return;
1616209962Smm
1617168404Spjd	mutex_enter(&zilog->zl_lock);
1618168404Spjd
1619168404Spjd	ASSERT(zilog->zl_stop_sync == 0);
1620168404Spjd
1621219089Spjd	if (*replayed_seq != 0) {
1622219089Spjd		ASSERT(zh->zh_replay_seq < *replayed_seq);
1623219089Spjd		zh->zh_replay_seq = *replayed_seq;
1624219089Spjd		*replayed_seq = 0;
1625219089Spjd	}
1626168404Spjd
1627168404Spjd	if (zilog->zl_destroy_txg == txg) {
1628168404Spjd		blkptr_t blk = zh->zh_log;
1629168404Spjd
1630168404Spjd		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1631168404Spjd
1632168404Spjd		bzero(zh, sizeof (zil_header_t));
1633209962Smm		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1634168404Spjd
1635168404Spjd		if (zilog->zl_keep_first) {
1636168404Spjd			/*
1637168404Spjd			 * If this block was part of log chain that couldn't
1638168404Spjd			 * be claimed because a device was missing during
1639168404Spjd			 * zil_claim(), but that device later returns,
1640168404Spjd			 * then this block could erroneously appear valid.
1641168404Spjd			 * To guard against this, assign a new GUID to the new
1642168404Spjd			 * log chain so it doesn't matter what blk points to.
1643168404Spjd			 */
1644168404Spjd			zil_init_log_chain(zilog, &blk);
1645168404Spjd			zh->zh_log = blk;
1646168404Spjd		}
1647168404Spjd	}
1648168404Spjd
1649213197Smm	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1650168404Spjd		zh->zh_log = lwb->lwb_blk;
1651168404Spjd		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1652168404Spjd			break;
1653168404Spjd		list_remove(&zilog->zl_lwb_list, lwb);
1654219089Spjd		zio_free_zil(spa, txg, &lwb->lwb_blk);
1655168404Spjd		kmem_cache_free(zil_lwb_cache, lwb);
1656168404Spjd
1657168404Spjd		/*
1658168404Spjd		 * If we don't have anything left in the lwb list then
1659168404Spjd		 * we've had an allocation failure and we need to zero
1660168404Spjd		 * out the zil_header blkptr so that we don't end
1661168404Spjd		 * up freeing the same block twice.
1662168404Spjd		 */
1663168404Spjd		if (list_head(&zilog->zl_lwb_list) == NULL)
1664168404Spjd			BP_ZERO(&zh->zh_log);
1665168404Spjd	}
1666168404Spjd	mutex_exit(&zilog->zl_lock);
1667168404Spjd}
1668168404Spjd
1669168404Spjdvoid
1670168404Spjdzil_init(void)
1671168404Spjd{
1672168404Spjd	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1673168404Spjd	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1674168404Spjd}
1675168404Spjd
1676168404Spjdvoid
1677168404Spjdzil_fini(void)
1678168404Spjd{
1679168404Spjd	kmem_cache_destroy(zil_lwb_cache);
1680168404Spjd}
1681168404Spjd
1682219089Spjdvoid
1683219089Spjdzil_set_sync(zilog_t *zilog, uint64_t sync)
1684219089Spjd{
1685219089Spjd	zilog->zl_sync = sync;
1686219089Spjd}
1687219089Spjd
1688219089Spjdvoid
1689219089Spjdzil_set_logbias(zilog_t *zilog, uint64_t logbias)
1690219089Spjd{
1691219089Spjd	zilog->zl_logbias = logbias;
1692219089Spjd}
1693219089Spjd
1694168404Spjdzilog_t *
1695168404Spjdzil_alloc(objset_t *os, zil_header_t *zh_phys)
1696168404Spjd{
1697168404Spjd	zilog_t *zilog;
1698168404Spjd
1699168404Spjd	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1700168404Spjd
1701168404Spjd	zilog->zl_header = zh_phys;
1702168404Spjd	zilog->zl_os = os;
1703168404Spjd	zilog->zl_spa = dmu_objset_spa(os);
1704168404Spjd	zilog->zl_dmu_pool = dmu_objset_pool(os);
1705168404Spjd	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1706219089Spjd	zilog->zl_logbias = dmu_objset_logbias(os);
1707219089Spjd	zilog->zl_sync = dmu_objset_syncprop(os);
1708219089Spjd	zilog->zl_next_batch = 1;
1709168404Spjd
1710168404Spjd	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1711168404Spjd
1712219089Spjd	for (int i = 0; i < TXG_SIZE; i++) {
1713219089Spjd		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1714219089Spjd		    MUTEX_DEFAULT, NULL);
1715219089Spjd	}
1716168404Spjd
1717168404Spjd	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1718168404Spjd	    offsetof(lwb_t, lwb_node));
1719168404Spjd
1720219089Spjd	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1721219089Spjd	    offsetof(itx_t, itx_node));
1722219089Spjd
1723185029Spjd	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1724168404Spjd
1725185029Spjd	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1726185029Spjd	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1727185029Spjd
1728185029Spjd	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1729185029Spjd	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1730219089Spjd	cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1731219089Spjd	cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1732185029Spjd
1733168404Spjd	return (zilog);
1734168404Spjd}
1735168404Spjd
1736168404Spjdvoid
1737168404Spjdzil_free(zilog_t *zilog)
1738168404Spjd{
1739168404Spjd	zilog->zl_stop_sync = 1;
1740168404Spjd
1741248571Smm	ASSERT0(zilog->zl_suspend);
1742248571Smm	ASSERT0(zilog->zl_suspending);
1743248571Smm
1744224526Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1745168404Spjd	list_destroy(&zilog->zl_lwb_list);
1746168404Spjd
1747185029Spjd	avl_destroy(&zilog->zl_vdev_tree);
1748185029Spjd	mutex_destroy(&zilog->zl_vdev_lock);
1749168404Spjd
1750219089Spjd	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1751219089Spjd	list_destroy(&zilog->zl_itx_commit_list);
1752219089Spjd
1753219089Spjd	for (int i = 0; i < TXG_SIZE; i++) {
1754219089Spjd		/*
1755219089Spjd		 * It's possible for an itx to be generated that doesn't dirty
1756219089Spjd		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1757219089Spjd		 * callback to remove the entry. We remove those here.
1758219089Spjd		 *
1759219089Spjd		 * Also free up the ziltest itxs.
1760219089Spjd		 */
1761219089Spjd		if (zilog->zl_itxg[i].itxg_itxs)
1762219089Spjd			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1763219089Spjd		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1764219089Spjd	}
1765219089Spjd
1766168404Spjd	mutex_destroy(&zilog->zl_lock);
1767168404Spjd
1768185029Spjd	cv_destroy(&zilog->zl_cv_writer);
1769185029Spjd	cv_destroy(&zilog->zl_cv_suspend);
1770219089Spjd	cv_destroy(&zilog->zl_cv_batch[0]);
1771219089Spjd	cv_destroy(&zilog->zl_cv_batch[1]);
1772185029Spjd
1773168404Spjd	kmem_free(zilog, sizeof (zilog_t));
1774168404Spjd}
1775168404Spjd
1776168404Spjd/*
1777168404Spjd * Open an intent log.
1778168404Spjd */
1779168404Spjdzilog_t *
1780168404Spjdzil_open(objset_t *os, zil_get_data_t *get_data)
1781168404Spjd{
1782168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
1783168404Spjd
1784224526Smm	ASSERT(zilog->zl_clean_taskq == NULL);
1785224526Smm	ASSERT(zilog->zl_get_data == NULL);
1786224526Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1787224526Smm
1788168404Spjd	zilog->zl_get_data = get_data;
1789168404Spjd	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1790168404Spjd	    2, 2, TASKQ_PREPOPULATE);
1791168404Spjd
1792168404Spjd	return (zilog);
1793168404Spjd}
1794168404Spjd
1795168404Spjd/*
1796168404Spjd * Close an intent log.
1797168404Spjd */
1798168404Spjdvoid
1799168404Spjdzil_close(zilog_t *zilog)
1800168404Spjd{
1801224526Smm	lwb_t *lwb;
1802219089Spjd	uint64_t txg = 0;
1803219089Spjd
1804219089Spjd	zil_commit(zilog, 0); /* commit all itx */
1805219089Spjd
1806168404Spjd	/*
1807219089Spjd	 * The lwb_max_txg for the stubby lwb will reflect the last activity
1808219089Spjd	 * for the zil.  After a txg_wait_synced() on the txg we know all the
1809219089Spjd	 * callbacks have occurred that may clean the zil.  Only then can we
1810219089Spjd	 * destroy the zl_clean_taskq.
1811168404Spjd	 */
1812219089Spjd	mutex_enter(&zilog->zl_lock);
1813224526Smm	lwb = list_tail(&zilog->zl_lwb_list);
1814224526Smm	if (lwb != NULL)
1815224526Smm		txg = lwb->lwb_max_txg;
1816219089Spjd	mutex_exit(&zilog->zl_lock);
1817219089Spjd	if (txg)
1818168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
1819239620Smm	ASSERT(!zilog_is_dirty(zilog));
1820168404Spjd
1821168404Spjd	taskq_destroy(zilog->zl_clean_taskq);
1822168404Spjd	zilog->zl_clean_taskq = NULL;
1823168404Spjd	zilog->zl_get_data = NULL;
1824224526Smm
1825224526Smm	/*
1826224526Smm	 * We should have only one LWB left on the list; remove it now.
1827224526Smm	 */
1828224526Smm	mutex_enter(&zilog->zl_lock);
1829224526Smm	lwb = list_head(&zilog->zl_lwb_list);
1830224526Smm	if (lwb != NULL) {
1831224526Smm		ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1832224526Smm		list_remove(&zilog->zl_lwb_list, lwb);
1833224526Smm		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1834224526Smm		kmem_cache_free(zil_lwb_cache, lwb);
1835224526Smm	}
1836224526Smm	mutex_exit(&zilog->zl_lock);
1837168404Spjd}
1838168404Spjd
1839248571Smmstatic char *suspend_tag = "zil suspending";
1840248571Smm
1841168404Spjd/*
1842168404Spjd * Suspend an intent log.  While in suspended mode, we still honor
1843168404Spjd * synchronous semantics, but we rely on txg_wait_synced() to do it.
1844248571Smm * On old version pools, we suspend the log briefly when taking a
1845248571Smm * snapshot so that it will have an empty intent log.
1846248571Smm *
1847248571Smm * Long holds are not really intended to be used the way we do here --
1848248571Smm * held for such a short time.  A concurrent caller of dsl_dataset_long_held()
1849248571Smm * could fail.  Therefore we take pains to only put a long hold if it is
1850248571Smm * actually necessary.  Fortunately, it will only be necessary if the
1851248571Smm * objset is currently mounted (or the ZVOL equivalent).  In that case it
1852248571Smm * will already have a long hold, so we are not really making things any worse.
1853248571Smm *
1854248571Smm * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
1855248571Smm * zvol_state_t), and use their mechanism to prevent their hold from being
1856248571Smm * dropped (e.g. VFS_HOLD()).  However, that would be even more pain for
1857248571Smm * very little gain.
1858248571Smm *
1859248571Smm * if cookiep == NULL, this does both the suspend & resume.
1860248571Smm * Otherwise, it returns with the dataset "long held", and the cookie
1861248571Smm * should be passed into zil_resume().
1862168404Spjd */
1863168404Spjdint
1864248571Smmzil_suspend(const char *osname, void **cookiep)
1865168404Spjd{
1866248571Smm	objset_t *os;
1867248571Smm	zilog_t *zilog;
1868248571Smm	const zil_header_t *zh;
1869248571Smm	int error;
1870168404Spjd
1871248571Smm	error = dmu_objset_hold(osname, suspend_tag, &os);
1872248571Smm	if (error != 0)
1873248571Smm		return (error);
1874248571Smm	zilog = dmu_objset_zil(os);
1875248571Smm
1876168404Spjd	mutex_enter(&zilog->zl_lock);
1877248571Smm	zh = zilog->zl_header;
1878248571Smm
1879200724Sdelphij	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1880168404Spjd		mutex_exit(&zilog->zl_lock);
1881248571Smm		dmu_objset_rele(os, suspend_tag);
1882249195Smm		return (SET_ERROR(EBUSY));
1883168404Spjd	}
1884248571Smm
1885248571Smm	/*
1886248571Smm	 * Don't put a long hold in the cases where we can avoid it.  This
1887248571Smm	 * is when there is no cookie so we are doing a suspend & resume
1888248571Smm	 * (i.e. called from zil_vdev_offline()), and there's nothing to do
1889248571Smm	 * for the suspend because it's already suspended, or there's no ZIL.
1890248571Smm	 */
1891248571Smm	if (cookiep == NULL && !zilog->zl_suspending &&
1892248571Smm	    (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1893248571Smm		mutex_exit(&zilog->zl_lock);
1894248571Smm		dmu_objset_rele(os, suspend_tag);
1895248571Smm		return (0);
1896248571Smm	}
1897248571Smm
1898248571Smm	dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
1899248571Smm	dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
1900248571Smm
1901248571Smm	zilog->zl_suspend++;
1902248571Smm
1903248571Smm	if (zilog->zl_suspend > 1) {
1904168404Spjd		/*
1905248571Smm		 * Someone else is already suspending it.
1906168404Spjd		 * Just wait for them to finish.
1907168404Spjd		 */
1908248571Smm
1909168404Spjd		while (zilog->zl_suspending)
1910168404Spjd			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1911168404Spjd		mutex_exit(&zilog->zl_lock);
1912248571Smm
1913248571Smm		if (cookiep == NULL)
1914248571Smm			zil_resume(os);
1915248571Smm		else
1916248571Smm			*cookiep = os;
1917168404Spjd		return (0);
1918168404Spjd	}
1919248571Smm
1920248571Smm	/*
1921248571Smm	 * If there is no pointer to an on-disk block, this ZIL must not
1922248571Smm	 * be active (e.g. filesystem not mounted), so there's nothing
1923248571Smm	 * to clean up.
1924248571Smm	 */
1925248571Smm	if (BP_IS_HOLE(&zh->zh_log)) {
1926248571Smm		ASSERT(cookiep != NULL); /* fast path already handled */
1927248571Smm
1928248571Smm		*cookiep = os;
1929248571Smm		mutex_exit(&zilog->zl_lock);
1930248571Smm		return (0);
1931248571Smm	}
1932248571Smm
1933168404Spjd	zilog->zl_suspending = B_TRUE;
1934168404Spjd	mutex_exit(&zilog->zl_lock);
1935168404Spjd
1936219089Spjd	zil_commit(zilog, 0);
1937168404Spjd
1938168404Spjd	zil_destroy(zilog, B_FALSE);
1939168404Spjd
1940168404Spjd	mutex_enter(&zilog->zl_lock);
1941168404Spjd	zilog->zl_suspending = B_FALSE;
1942168404Spjd	cv_broadcast(&zilog->zl_cv_suspend);
1943168404Spjd	mutex_exit(&zilog->zl_lock);
1944168404Spjd
1945248571Smm	if (cookiep == NULL)
1946248571Smm		zil_resume(os);
1947248571Smm	else
1948248571Smm		*cookiep = os;
1949168404Spjd	return (0);
1950168404Spjd}
1951168404Spjd
1952168404Spjdvoid
1953248571Smmzil_resume(void *cookie)
1954168404Spjd{
1955248571Smm	objset_t *os = cookie;
1956248571Smm	zilog_t *zilog = dmu_objset_zil(os);
1957248571Smm
1958168404Spjd	mutex_enter(&zilog->zl_lock);
1959168404Spjd	ASSERT(zilog->zl_suspend != 0);
1960168404Spjd	zilog->zl_suspend--;
1961168404Spjd	mutex_exit(&zilog->zl_lock);
1962248571Smm	dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
1963248571Smm	dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
1964168404Spjd}
1965168404Spjd
1966219089Spjdtypedef struct zil_replay_arg {
1967219089Spjd	zil_replay_func_t **zr_replay;
1968219089Spjd	void		*zr_arg;
1969219089Spjd	boolean_t	zr_byteswap;
1970219089Spjd	char		*zr_lr;
1971219089Spjd} zil_replay_arg_t;
1972219089Spjd
1973219089Spjdstatic int
1974219089Spjdzil_replay_error(zilog_t *zilog, lr_t *lr, int error)
1975209962Smm{
1976219089Spjd	char name[MAXNAMELEN];
1977209962Smm
1978219089Spjd	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
1979209962Smm
1980219089Spjd	dmu_objset_name(zilog->zl_os, name);
1981209962Smm
1982219089Spjd	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1983219089Spjd	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
1984219089Spjd	    (u_longlong_t)lr->lrc_seq,
1985219089Spjd	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
1986219089Spjd	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
1987219089Spjd
1988219089Spjd	return (error);
1989209962Smm}
1990209962Smm
1991219089Spjdstatic int
1992168404Spjdzil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1993168404Spjd{
1994168404Spjd	zil_replay_arg_t *zr = zra;
1995168404Spjd	const zil_header_t *zh = zilog->zl_header;
1996168404Spjd	uint64_t reclen = lr->lrc_reclen;
1997168404Spjd	uint64_t txtype = lr->lrc_txtype;
1998219089Spjd	int error = 0;
1999168404Spjd
2000219089Spjd	zilog->zl_replaying_seq = lr->lrc_seq;
2001168404Spjd
2002219089Spjd	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
2003219089Spjd		return (0);
2004219089Spjd
2005168404Spjd	if (lr->lrc_txg < claim_txg)		/* already committed */
2006219089Spjd		return (0);
2007168404Spjd
2008185029Spjd	/* Strip case-insensitive bit, still present in log record */
2009185029Spjd	txtype &= ~TX_CI;
2010185029Spjd
2011219089Spjd	if (txtype == 0 || txtype >= TX_MAX_TYPE)
2012219089Spjd		return (zil_replay_error(zilog, lr, EINVAL));
2013219089Spjd
2014219089Spjd	/*
2015219089Spjd	 * If this record type can be logged out of order, the object
2016219089Spjd	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
2017219089Spjd	 */
2018219089Spjd	if (TX_OOO(txtype)) {
2019219089Spjd		error = dmu_object_info(zilog->zl_os,
2020219089Spjd		    ((lr_ooo_t *)lr)->lr_foid, NULL);
2021219089Spjd		if (error == ENOENT || error == EEXIST)
2022219089Spjd			return (0);
2023209962Smm	}
2024209962Smm
2025168404Spjd	/*
2026168404Spjd	 * Make a copy of the data so we can revise and extend it.
2027168404Spjd	 */
2028219089Spjd	bcopy(lr, zr->zr_lr, reclen);
2029168404Spjd
2030168404Spjd	/*
2031219089Spjd	 * If this is a TX_WRITE with a blkptr, suck in the data.
2032219089Spjd	 */
2033219089Spjd	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
2034219089Spjd		error = zil_read_log_data(zilog, (lr_write_t *)lr,
2035219089Spjd		    zr->zr_lr + reclen);
2036248571Smm		if (error != 0)
2037219089Spjd			return (zil_replay_error(zilog, lr, error));
2038219089Spjd	}
2039219089Spjd
2040219089Spjd	/*
2041168404Spjd	 * The log block containing this lr may have been byteswapped
2042168404Spjd	 * so that we can easily examine common fields like lrc_txtype.
2043219089Spjd	 * However, the log is a mix of different record types, and only the
2044168404Spjd	 * replay vectors know how to byteswap their records.  Therefore, if
2045168404Spjd	 * the lr was byteswapped, undo it before invoking the replay vector.
2046168404Spjd	 */
2047168404Spjd	if (zr->zr_byteswap)
2048219089Spjd		byteswap_uint64_array(zr->zr_lr, reclen);
2049168404Spjd
2050168404Spjd	/*
2051168404Spjd	 * We must now do two things atomically: replay this log record,
2052209962Smm	 * and update the log header sequence number to reflect the fact that
2053209962Smm	 * we did so. At the end of each replay function the sequence number
2054209962Smm	 * is updated if we are in replay mode.
2055168404Spjd	 */
2056219089Spjd	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
2057248571Smm	if (error != 0) {
2058168404Spjd		/*
2059168404Spjd		 * The DMU's dnode layer doesn't see removes until the txg
2060168404Spjd		 * commits, so a subsequent claim can spuriously fail with
2061209962Smm		 * EEXIST. So if we receive any error we try syncing out
2062219089Spjd		 * any removes then retry the transaction.  Note that we
2063219089Spjd		 * specify B_FALSE for byteswap now, so we don't do it twice.
2064168404Spjd		 */
2065219089Spjd		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2066219089Spjd		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
2067248571Smm		if (error != 0)
2068219089Spjd			return (zil_replay_error(zilog, lr, error));
2069168404Spjd	}
2070219089Spjd	return (0);
2071168404Spjd}
2072168404Spjd
2073168404Spjd/* ARGSUSED */
2074219089Spjdstatic int
2075168404Spjdzil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2076168404Spjd{
2077168404Spjd	zilog->zl_replay_blks++;
2078219089Spjd
2079219089Spjd	return (0);
2080168404Spjd}
2081168404Spjd
2082168404Spjd/*
2083168404Spjd * If this dataset has a non-empty intent log, replay it and destroy it.
2084168404Spjd */
2085168404Spjdvoid
2086209962Smmzil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
2087168404Spjd{
2088168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
2089168404Spjd	const zil_header_t *zh = zilog->zl_header;
2090168404Spjd	zil_replay_arg_t zr;
2091168404Spjd
2092200724Sdelphij	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
2093168404Spjd		zil_destroy(zilog, B_TRUE);
2094168404Spjd		return;
2095168404Spjd	}
2096168404Spjd	//printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name);
2097168404Spjd
2098168404Spjd	zr.zr_replay = replay_func;
2099168404Spjd	zr.zr_arg = arg;
2100168404Spjd	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
2101219089Spjd	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
2102168404Spjd
2103168404Spjd	/*
2104168404Spjd	 * Wait for in-progress removes to sync before starting replay.
2105168404Spjd	 */
2106168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, 0);
2107168404Spjd
2108209962Smm	zilog->zl_replay = B_TRUE;
2109219089Spjd	zilog->zl_replay_time = ddi_get_lbolt();
2110168404Spjd	ASSERT(zilog->zl_replay_blks == 0);
2111168404Spjd	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2112168404Spjd	    zh->zh_claim_txg);
2113219089Spjd	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
2114168404Spjd
2115168404Spjd	zil_destroy(zilog, B_FALSE);
2116185029Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2117209962Smm	zilog->zl_replay = B_FALSE;
2118168404Spjd	//printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name);
2119168404Spjd}
2120168404Spjd
2121219089Spjdboolean_t
2122219089Spjdzil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2123168404Spjd{
2124219089Spjd	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2125219089Spjd		return (B_TRUE);
2126168404Spjd
2127219089Spjd	if (zilog->zl_replay) {
2128219089Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2129219089Spjd		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2130219089Spjd		    zilog->zl_replaying_seq;
2131219089Spjd		return (B_TRUE);
2132168404Spjd	}
2133168404Spjd
2134219089Spjd	return (B_FALSE);
2135168404Spjd}
2136213197Smm
2137213197Smm/* ARGSUSED */
2138213197Smmint
2139219089Spjdzil_vdev_offline(const char *osname, void *arg)
2140213197Smm{
2141213197Smm	int error;
2142213197Smm
2143248571Smm	error = zil_suspend(osname, NULL);
2144248571Smm	if (error != 0)
2145249195Smm		return (SET_ERROR(EEXIST));
2146248571Smm	return (0);
2147213197Smm}
2148