zil.c revision 246666
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23239620Smm * Copyright (c) 2012 by Delphix. All rights reserved.
24168404Spjd */
25168404Spjd
26219089Spjd/* Portions Copyright 2010 Robert Milkowski */
27219089Spjd
28168404Spjd#include <sys/zfs_context.h>
29168404Spjd#include <sys/spa.h>
30168404Spjd#include <sys/dmu.h>
31168404Spjd#include <sys/zap.h>
32168404Spjd#include <sys/arc.h>
33168404Spjd#include <sys/stat.h>
34168404Spjd#include <sys/resource.h>
35168404Spjd#include <sys/zil.h>
36168404Spjd#include <sys/zil_impl.h>
37168404Spjd#include <sys/dsl_dataset.h>
38219089Spjd#include <sys/vdev_impl.h>
39168404Spjd#include <sys/dmu_tx.h>
40219089Spjd#include <sys/dsl_pool.h>
41168404Spjd
42168404Spjd/*
43168404Spjd * The zfs intent log (ZIL) saves transaction records of system calls
44168404Spjd * that change the file system in memory with enough information
45168404Spjd * to be able to replay them. These are stored in memory until
46168404Spjd * either the DMU transaction group (txg) commits them to the stable pool
47168404Spjd * and they can be discarded, or they are flushed to the stable log
48168404Spjd * (also in the pool) due to a fsync, O_DSYNC or other synchronous
49168404Spjd * requirement. In the event of a panic or power fail then those log
50168404Spjd * records (transactions) are replayed.
51168404Spjd *
52168404Spjd * There is one ZIL per file system. Its on-disk (pool) format consists
53168404Spjd * of 3 parts:
54168404Spjd *
55168404Spjd * 	- ZIL header
56168404Spjd * 	- ZIL blocks
57168404Spjd * 	- ZIL records
58168404Spjd *
59168404Spjd * A log record holds a system call transaction. Log blocks can
60168404Spjd * hold many log records and the blocks are chained together.
61168404Spjd * Each ZIL block contains a block pointer (blkptr_t) to the next
62168404Spjd * ZIL block in the chain. The ZIL header points to the first
63168404Spjd * block in the chain. Note there is not a fixed place in the pool
64168404Spjd * to hold blocks. They are dynamically allocated and freed as
65168404Spjd * needed from the blocks available. Figure X shows the ZIL structure:
66168404Spjd */
67168404Spjd
68168404Spjd/*
69168404Spjd * This global ZIL switch affects all pools
70168404Spjd */
71219089Spjdint zil_replay_disable = 0;    /* disable intent logging replay */
72168404SpjdSYSCTL_DECL(_vfs_zfs);
73219089SpjdTUNABLE_INT("vfs.zfs.zil_replay_disable", &zil_replay_disable);
74219089SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW,
75219089Spjd    &zil_replay_disable, 0, "Disable intent logging replay");
76168404Spjd
77168404Spjd/*
78168404Spjd * Tunable parameter for debugging or performance analysis.  Setting
79168404Spjd * zfs_nocacheflush will cause corruption on power loss if a volatile
80168404Spjd * out-of-order write cache is enabled.
81168404Spjd */
82168404Spjdboolean_t zfs_nocacheflush = B_FALSE;
83168404SpjdTUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
84168404SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
85168404Spjd    &zfs_nocacheflush, 0, "Disable cache flush");
86240868Spjdboolean_t zfs_notrim = B_TRUE;
87240868SpjdTUNABLE_INT("vfs.zfs.trim_disable", &zfs_notrim);
88240868SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, trim_disable, CTLFLAG_RDTUN, &zfs_notrim, 0,
89240868Spjd    "Disable trim");
90168404Spjd
91168404Spjdstatic kmem_cache_t *zil_lwb_cache;
92168404Spjd
93219089Spjdstatic void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
94219089Spjd
95219089Spjd#define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
96219089Spjd    sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
97219089Spjd
98219089Spjd
99219089Spjd/*
100219089Spjd * ziltest is by and large an ugly hack, but very useful in
101219089Spjd * checking replay without tedious work.
102219089Spjd * When running ziltest we want to keep all itx's and so maintain
103219089Spjd * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
104219089Spjd * We subtract TXG_CONCURRENT_STATES to allow for common code.
105219089Spjd */
106219089Spjd#define	ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
107219089Spjd
108168404Spjdstatic int
109219089Spjdzil_bp_compare(const void *x1, const void *x2)
110168404Spjd{
111219089Spjd	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
112219089Spjd	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
113168404Spjd
114168404Spjd	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
115168404Spjd		return (-1);
116168404Spjd	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
117168404Spjd		return (1);
118168404Spjd
119168404Spjd	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
120168404Spjd		return (-1);
121168404Spjd	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
122168404Spjd		return (1);
123168404Spjd
124168404Spjd	return (0);
125168404Spjd}
126168404Spjd
127168404Spjdstatic void
128219089Spjdzil_bp_tree_init(zilog_t *zilog)
129168404Spjd{
130219089Spjd	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
131219089Spjd	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
132168404Spjd}
133168404Spjd
134168404Spjdstatic void
135219089Spjdzil_bp_tree_fini(zilog_t *zilog)
136168404Spjd{
137219089Spjd	avl_tree_t *t = &zilog->zl_bp_tree;
138219089Spjd	zil_bp_node_t *zn;
139168404Spjd	void *cookie = NULL;
140168404Spjd
141168404Spjd	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
142219089Spjd		kmem_free(zn, sizeof (zil_bp_node_t));
143168404Spjd
144168404Spjd	avl_destroy(t);
145168404Spjd}
146168404Spjd
147219089Spjdint
148219089Spjdzil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
149168404Spjd{
150219089Spjd	avl_tree_t *t = &zilog->zl_bp_tree;
151219089Spjd	const dva_t *dva = BP_IDENTITY(bp);
152219089Spjd	zil_bp_node_t *zn;
153168404Spjd	avl_index_t where;
154168404Spjd
155168404Spjd	if (avl_find(t, dva, &where) != NULL)
156168404Spjd		return (EEXIST);
157168404Spjd
158219089Spjd	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
159168404Spjd	zn->zn_dva = *dva;
160168404Spjd	avl_insert(t, zn, where);
161168404Spjd
162168404Spjd	return (0);
163168404Spjd}
164168404Spjd
165168404Spjdstatic zil_header_t *
166168404Spjdzil_header_in_syncing_context(zilog_t *zilog)
167168404Spjd{
168168404Spjd	return ((zil_header_t *)zilog->zl_header);
169168404Spjd}
170168404Spjd
171168404Spjdstatic void
172168404Spjdzil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
173168404Spjd{
174168404Spjd	zio_cksum_t *zc = &bp->blk_cksum;
175168404Spjd
176168404Spjd	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
177168404Spjd	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
178168404Spjd	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
179168404Spjd	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
180168404Spjd}
181168404Spjd
182168404Spjd/*
183219089Spjd * Read a log block and make sure it's valid.
184168404Spjd */
185168404Spjdstatic int
186219089Spjdzil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
187219089Spjd    char **end)
188168404Spjd{
189219089Spjd	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
190219089Spjd	uint32_t aflags = ARC_WAIT;
191219089Spjd	arc_buf_t *abuf = NULL;
192168404Spjd	zbookmark_t zb;
193168404Spjd	int error;
194168404Spjd
195219089Spjd	if (zilog->zl_header->zh_claim_txg == 0)
196219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
197168404Spjd
198219089Spjd	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
199219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE;
200168404Spjd
201219089Spjd	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
202219089Spjd	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
203168404Spjd
204246666Smm	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
205219089Spjd	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
206219089Spjd
207168404Spjd	if (error == 0) {
208168404Spjd		zio_cksum_t cksum = bp->blk_cksum;
209168404Spjd
210168404Spjd		/*
211185029Spjd		 * Validate the checksummed log block.
212185029Spjd		 *
213168404Spjd		 * Sequence numbers should be... sequential.  The checksum
214168404Spjd		 * verifier for the next block should be bp's checksum plus 1.
215185029Spjd		 *
216185029Spjd		 * Also check the log chain linkage and size used.
217168404Spjd		 */
218168404Spjd		cksum.zc_word[ZIL_ZC_SEQ]++;
219168404Spjd
220219089Spjd		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
221219089Spjd			zil_chain_t *zilc = abuf->b_data;
222219089Spjd			char *lr = (char *)(zilc + 1);
223219089Spjd			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
224219089Spjd
225219089Spjd			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
226219089Spjd			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
227219089Spjd				error = ECKSUM;
228219089Spjd			} else {
229219089Spjd				bcopy(lr, dst, len);
230219089Spjd				*end = (char *)dst + len;
231219089Spjd				*nbp = zilc->zc_next_blk;
232219089Spjd			}
233219089Spjd		} else {
234219089Spjd			char *lr = abuf->b_data;
235219089Spjd			uint64_t size = BP_GET_LSIZE(bp);
236219089Spjd			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
237219089Spjd
238219089Spjd			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
239219089Spjd			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
240219089Spjd			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
241219089Spjd				error = ECKSUM;
242219089Spjd			} else {
243219089Spjd				bcopy(lr, dst, zilc->zc_nused);
244219089Spjd				*end = (char *)dst + zilc->zc_nused;
245219089Spjd				*nbp = zilc->zc_next_blk;
246219089Spjd			}
247185029Spjd		}
248168404Spjd
249219089Spjd		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
250168404Spjd	}
251168404Spjd
252219089Spjd	return (error);
253219089Spjd}
254168404Spjd
255219089Spjd/*
256219089Spjd * Read a TX_WRITE log data block.
257219089Spjd */
258219089Spjdstatic int
259219089Spjdzil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
260219089Spjd{
261219089Spjd	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
262219089Spjd	const blkptr_t *bp = &lr->lr_blkptr;
263219089Spjd	uint32_t aflags = ARC_WAIT;
264219089Spjd	arc_buf_t *abuf = NULL;
265219089Spjd	zbookmark_t zb;
266219089Spjd	int error;
267219089Spjd
268219089Spjd	if (BP_IS_HOLE(bp)) {
269219089Spjd		if (wbuf != NULL)
270219089Spjd			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
271219089Spjd		return (0);
272219089Spjd	}
273219089Spjd
274219089Spjd	if (zilog->zl_header->zh_claim_txg == 0)
275219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
276219089Spjd
277219089Spjd	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
278219089Spjd	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
279219089Spjd
280246666Smm	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
281219089Spjd	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
282219089Spjd
283219089Spjd	if (error == 0) {
284219089Spjd		if (wbuf != NULL)
285219089Spjd			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
286219089Spjd		(void) arc_buf_remove_ref(abuf, &abuf);
287219089Spjd	}
288219089Spjd
289168404Spjd	return (error);
290168404Spjd}
291168404Spjd
292168404Spjd/*
293168404Spjd * Parse the intent log, and call parse_func for each valid record within.
294168404Spjd */
295219089Spjdint
296168404Spjdzil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
297168404Spjd    zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
298168404Spjd{
299168404Spjd	const zil_header_t *zh = zilog->zl_header;
300219089Spjd	boolean_t claimed = !!zh->zh_claim_txg;
301219089Spjd	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
302219089Spjd	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
303219089Spjd	uint64_t max_blk_seq = 0;
304219089Spjd	uint64_t max_lr_seq = 0;
305219089Spjd	uint64_t blk_count = 0;
306219089Spjd	uint64_t lr_count = 0;
307219089Spjd	blkptr_t blk, next_blk;
308168404Spjd	char *lrbuf, *lrp;
309219089Spjd	int error = 0;
310168404Spjd
311219089Spjd	/*
312219089Spjd	 * Old logs didn't record the maximum zh_claim_lr_seq.
313219089Spjd	 */
314219089Spjd	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
315219089Spjd		claim_lr_seq = UINT64_MAX;
316168404Spjd
317168404Spjd	/*
318168404Spjd	 * Starting at the block pointed to by zh_log we read the log chain.
319168404Spjd	 * For each block in the chain we strongly check that block to
320168404Spjd	 * ensure its validity.  We stop when an invalid block is found.
321168404Spjd	 * For each block pointer in the chain we call parse_blk_func().
322168404Spjd	 * For each record in each valid block we call parse_lr_func().
323168404Spjd	 * If the log has been claimed, stop if we encounter a sequence
324168404Spjd	 * number greater than the highest claimed sequence number.
325168404Spjd	 */
326219089Spjd	lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE);
327219089Spjd	zil_bp_tree_init(zilog);
328168404Spjd
329219089Spjd	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
330219089Spjd		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
331219089Spjd		int reclen;
332219089Spjd		char *end;
333219089Spjd
334219089Spjd		if (blk_seq > claim_blk_seq)
335168404Spjd			break;
336219089Spjd		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
337219089Spjd			break;
338219089Spjd		ASSERT3U(max_blk_seq, <, blk_seq);
339219089Spjd		max_blk_seq = blk_seq;
340219089Spjd		blk_count++;
341168404Spjd
342219089Spjd		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
343219089Spjd			break;
344168404Spjd
345219089Spjd		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
346168404Spjd		if (error)
347168404Spjd			break;
348168404Spjd
349219089Spjd		for (lrp = lrbuf; lrp < end; lrp += reclen) {
350168404Spjd			lr_t *lr = (lr_t *)lrp;
351168404Spjd			reclen = lr->lrc_reclen;
352168404Spjd			ASSERT3U(reclen, >=, sizeof (lr_t));
353219089Spjd			if (lr->lrc_seq > claim_lr_seq)
354219089Spjd				goto done;
355219089Spjd			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
356219089Spjd				goto done;
357219089Spjd			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
358219089Spjd			max_lr_seq = lr->lrc_seq;
359219089Spjd			lr_count++;
360168404Spjd		}
361168404Spjd	}
362219089Spjddone:
363219089Spjd	zilog->zl_parse_error = error;
364219089Spjd	zilog->zl_parse_blk_seq = max_blk_seq;
365219089Spjd	zilog->zl_parse_lr_seq = max_lr_seq;
366219089Spjd	zilog->zl_parse_blk_count = blk_count;
367219089Spjd	zilog->zl_parse_lr_count = lr_count;
368168404Spjd
369219089Spjd	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
370219089Spjd	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
371219089Spjd
372219089Spjd	zil_bp_tree_fini(zilog);
373219089Spjd	zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE);
374219089Spjd
375219089Spjd	return (error);
376168404Spjd}
377168404Spjd
378219089Spjdstatic int
379168404Spjdzil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
380168404Spjd{
381168404Spjd	/*
382168404Spjd	 * Claim log block if not already committed and not already claimed.
383219089Spjd	 * If tx == NULL, just verify that the block is claimable.
384168404Spjd	 */
385219089Spjd	if (bp->blk_birth < first_txg || zil_bp_tree_add(zilog, bp) != 0)
386219089Spjd		return (0);
387219089Spjd
388219089Spjd	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
389219089Spjd	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
390219089Spjd	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
391168404Spjd}
392168404Spjd
393219089Spjdstatic int
394168404Spjdzil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
395168404Spjd{
396219089Spjd	lr_write_t *lr = (lr_write_t *)lrc;
397219089Spjd	int error;
398219089Spjd
399219089Spjd	if (lrc->lrc_txtype != TX_WRITE)
400219089Spjd		return (0);
401219089Spjd
402219089Spjd	/*
403219089Spjd	 * If the block is not readable, don't claim it.  This can happen
404219089Spjd	 * in normal operation when a log block is written to disk before
405219089Spjd	 * some of the dmu_sync() blocks it points to.  In this case, the
406219089Spjd	 * transaction cannot have been committed to anyone (we would have
407219089Spjd	 * waited for all writes to be stable first), so it is semantically
408219089Spjd	 * correct to declare this the end of the log.
409219089Spjd	 */
410219089Spjd	if (lr->lr_blkptr.blk_birth >= first_txg &&
411219089Spjd	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
412219089Spjd		return (error);
413219089Spjd	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
414168404Spjd}
415168404Spjd
416168404Spjd/* ARGSUSED */
417219089Spjdstatic int
418168404Spjdzil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
419168404Spjd{
420219089Spjd	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
421219089Spjd
422219089Spjd	return (0);
423168404Spjd}
424168404Spjd
425219089Spjdstatic int
426168404Spjdzil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
427168404Spjd{
428219089Spjd	lr_write_t *lr = (lr_write_t *)lrc;
429219089Spjd	blkptr_t *bp = &lr->lr_blkptr;
430219089Spjd
431168404Spjd	/*
432168404Spjd	 * If we previously claimed it, we need to free it.
433168404Spjd	 */
434219089Spjd	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
435219089Spjd	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0)
436219089Spjd		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
437219089Spjd
438219089Spjd	return (0);
439219089Spjd}
440219089Spjd
441219089Spjdstatic lwb_t *
442219089Spjdzil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
443219089Spjd{
444219089Spjd	lwb_t *lwb;
445219089Spjd
446219089Spjd	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
447219089Spjd	lwb->lwb_zilog = zilog;
448219089Spjd	lwb->lwb_blk = *bp;
449219089Spjd	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
450219089Spjd	lwb->lwb_max_txg = txg;
451219089Spjd	lwb->lwb_zio = NULL;
452219089Spjd	lwb->lwb_tx = NULL;
453219089Spjd	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
454219089Spjd		lwb->lwb_nused = sizeof (zil_chain_t);
455219089Spjd		lwb->lwb_sz = BP_GET_LSIZE(bp);
456219089Spjd	} else {
457219089Spjd		lwb->lwb_nused = 0;
458219089Spjd		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
459168404Spjd	}
460219089Spjd
461219089Spjd	mutex_enter(&zilog->zl_lock);
462219089Spjd	list_insert_tail(&zilog->zl_lwb_list, lwb);
463219089Spjd	mutex_exit(&zilog->zl_lock);
464219089Spjd
465219089Spjd	return (lwb);
466168404Spjd}
467168404Spjd
468168404Spjd/*
469239620Smm * Called when we create in-memory log transactions so that we know
470239620Smm * to cleanup the itxs at the end of spa_sync().
471239620Smm */
472239620Smmvoid
473239620Smmzilog_dirty(zilog_t *zilog, uint64_t txg)
474239620Smm{
475239620Smm	dsl_pool_t *dp = zilog->zl_dmu_pool;
476239620Smm	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
477239620Smm
478239620Smm	if (dsl_dataset_is_snapshot(ds))
479239620Smm		panic("dirtying snapshot!");
480239620Smm
481239620Smm	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg) == 0) {
482239620Smm		/* up the hold count until we can be written out */
483239620Smm		dmu_buf_add_ref(ds->ds_dbuf, zilog);
484239620Smm	}
485239620Smm}
486239620Smm
487239620Smmboolean_t
488239620Smmzilog_is_dirty(zilog_t *zilog)
489239620Smm{
490239620Smm	dsl_pool_t *dp = zilog->zl_dmu_pool;
491239620Smm
492239620Smm	for (int t = 0; t < TXG_SIZE; t++) {
493239620Smm		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
494239620Smm			return (B_TRUE);
495239620Smm	}
496239620Smm	return (B_FALSE);
497239620Smm}
498239620Smm
499239620Smm/*
500168404Spjd * Create an on-disk intent log.
501168404Spjd */
502219089Spjdstatic lwb_t *
503168404Spjdzil_create(zilog_t *zilog)
504168404Spjd{
505168404Spjd	const zil_header_t *zh = zilog->zl_header;
506219089Spjd	lwb_t *lwb = NULL;
507168404Spjd	uint64_t txg = 0;
508168404Spjd	dmu_tx_t *tx = NULL;
509168404Spjd	blkptr_t blk;
510168404Spjd	int error = 0;
511168404Spjd
512168404Spjd	/*
513168404Spjd	 * Wait for any previous destroy to complete.
514168404Spjd	 */
515168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
516168404Spjd
517168404Spjd	ASSERT(zh->zh_claim_txg == 0);
518168404Spjd	ASSERT(zh->zh_replay_seq == 0);
519168404Spjd
520168404Spjd	blk = zh->zh_log;
521168404Spjd
522168404Spjd	/*
523219089Spjd	 * Allocate an initial log block if:
524219089Spjd	 *    - there isn't one already
525219089Spjd	 *    - the existing block is the wrong endianess
526168404Spjd	 */
527207908Smm	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
528168404Spjd		tx = dmu_tx_create(zilog->zl_os);
529219089Spjd		VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
530168404Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
531168404Spjd		txg = dmu_tx_get_txg(tx);
532168404Spjd
533207908Smm		if (!BP_IS_HOLE(&blk)) {
534219089Spjd			zio_free_zil(zilog->zl_spa, txg, &blk);
535207908Smm			BP_ZERO(&blk);
536207908Smm		}
537207908Smm
538219089Spjd		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
539219089Spjd		    ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
540168404Spjd
541168404Spjd		if (error == 0)
542168404Spjd			zil_init_log_chain(zilog, &blk);
543168404Spjd	}
544168404Spjd
545168404Spjd	/*
546168404Spjd	 * Allocate a log write buffer (lwb) for the first log block.
547168404Spjd	 */
548219089Spjd	if (error == 0)
549219089Spjd		lwb = zil_alloc_lwb(zilog, &blk, txg);
550168404Spjd
551168404Spjd	/*
552168404Spjd	 * If we just allocated the first log block, commit our transaction
553168404Spjd	 * and wait for zil_sync() to stuff the block poiner into zh_log.
554168404Spjd	 * (zh is part of the MOS, so we cannot modify it in open context.)
555168404Spjd	 */
556168404Spjd	if (tx != NULL) {
557168404Spjd		dmu_tx_commit(tx);
558168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
559168404Spjd	}
560168404Spjd
561168404Spjd	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
562219089Spjd
563219089Spjd	return (lwb);
564168404Spjd}
565168404Spjd
566168404Spjd/*
567168404Spjd * In one tx, free all log blocks and clear the log header.
568168404Spjd * If keep_first is set, then we're replaying a log with no content.
569168404Spjd * We want to keep the first block, however, so that the first
570168404Spjd * synchronous transaction doesn't require a txg_wait_synced()
571168404Spjd * in zil_create().  We don't need to txg_wait_synced() here either
572168404Spjd * when keep_first is set, because both zil_create() and zil_destroy()
573168404Spjd * will wait for any in-progress destroys to complete.
574168404Spjd */
575168404Spjdvoid
576168404Spjdzil_destroy(zilog_t *zilog, boolean_t keep_first)
577168404Spjd{
578168404Spjd	const zil_header_t *zh = zilog->zl_header;
579168404Spjd	lwb_t *lwb;
580168404Spjd	dmu_tx_t *tx;
581168404Spjd	uint64_t txg;
582168404Spjd
583168404Spjd	/*
584168404Spjd	 * Wait for any previous destroy to complete.
585168404Spjd	 */
586168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
587168404Spjd
588219089Spjd	zilog->zl_old_header = *zh;		/* debugging aid */
589219089Spjd
590168404Spjd	if (BP_IS_HOLE(&zh->zh_log))
591168404Spjd		return;
592168404Spjd
593168404Spjd	tx = dmu_tx_create(zilog->zl_os);
594219089Spjd	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
595168404Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
596168404Spjd	txg = dmu_tx_get_txg(tx);
597168404Spjd
598168404Spjd	mutex_enter(&zilog->zl_lock);
599168404Spjd
600168404Spjd	ASSERT3U(zilog->zl_destroy_txg, <, txg);
601168404Spjd	zilog->zl_destroy_txg = txg;
602168404Spjd	zilog->zl_keep_first = keep_first;
603168404Spjd
604168404Spjd	if (!list_is_empty(&zilog->zl_lwb_list)) {
605168404Spjd		ASSERT(zh->zh_claim_txg == 0);
606224526Smm		VERIFY(!keep_first);
607168404Spjd		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
608168404Spjd			list_remove(&zilog->zl_lwb_list, lwb);
609168404Spjd			if (lwb->lwb_buf != NULL)
610168404Spjd				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
611219089Spjd			zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
612168404Spjd			kmem_cache_free(zil_lwb_cache, lwb);
613168404Spjd		}
614219089Spjd	} else if (!keep_first) {
615239620Smm		zil_destroy_sync(zilog, tx);
616168404Spjd	}
617168404Spjd	mutex_exit(&zilog->zl_lock);
618168404Spjd
619168404Spjd	dmu_tx_commit(tx);
620185029Spjd}
621168404Spjd
622239620Smmvoid
623239620Smmzil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
624239620Smm{
625239620Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
626239620Smm	(void) zil_parse(zilog, zil_free_log_block,
627239620Smm	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
628239620Smm}
629239620Smm
630168404Spjdint
631219089Spjdzil_claim(const char *osname, void *txarg)
632168404Spjd{
633168404Spjd	dmu_tx_t *tx = txarg;
634168404Spjd	uint64_t first_txg = dmu_tx_get_txg(tx);
635168404Spjd	zilog_t *zilog;
636168404Spjd	zil_header_t *zh;
637168404Spjd	objset_t *os;
638168404Spjd	int error;
639168404Spjd
640219089Spjd	error = dmu_objset_hold(osname, FTAG, &os);
641168404Spjd	if (error) {
642185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
643168404Spjd		return (0);
644168404Spjd	}
645168404Spjd
646168404Spjd	zilog = dmu_objset_zil(os);
647168404Spjd	zh = zil_header_in_syncing_context(zilog);
648168404Spjd
649219089Spjd	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
650213197Smm		if (!BP_IS_HOLE(&zh->zh_log))
651219089Spjd			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
652213197Smm		BP_ZERO(&zh->zh_log);
653213197Smm		dsl_dataset_dirty(dmu_objset_ds(os), tx);
654219089Spjd		dmu_objset_rele(os, FTAG);
655219089Spjd		return (0);
656213197Smm	}
657213197Smm
658168404Spjd	/*
659168404Spjd	 * Claim all log blocks if we haven't already done so, and remember
660168404Spjd	 * the highest claimed sequence number.  This ensures that if we can
661168404Spjd	 * read only part of the log now (e.g. due to a missing device),
662168404Spjd	 * but we can read the entire log later, we will not try to replay
663168404Spjd	 * or destroy beyond the last block we successfully claimed.
664168404Spjd	 */
665168404Spjd	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
666168404Spjd	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
667219089Spjd		(void) zil_parse(zilog, zil_claim_log_block,
668219089Spjd		    zil_claim_log_record, tx, first_txg);
669168404Spjd		zh->zh_claim_txg = first_txg;
670219089Spjd		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
671219089Spjd		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
672219089Spjd		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
673219089Spjd			zh->zh_flags |= ZIL_REPLAY_NEEDED;
674219089Spjd		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
675168404Spjd		dsl_dataset_dirty(dmu_objset_ds(os), tx);
676168404Spjd	}
677168404Spjd
678168404Spjd	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
679219089Spjd	dmu_objset_rele(os, FTAG);
680168404Spjd	return (0);
681168404Spjd}
682168404Spjd
683185029Spjd/*
684185029Spjd * Check the log by walking the log chain.
685185029Spjd * Checksum errors are ok as they indicate the end of the chain.
686185029Spjd * Any other error (no device or read failure) returns an error.
687185029Spjd */
688185029Spjdint
689219089Spjdzil_check_log_chain(const char *osname, void *tx)
690168404Spjd{
691185029Spjd	zilog_t *zilog;
692185029Spjd	objset_t *os;
693219089Spjd	blkptr_t *bp;
694185029Spjd	int error;
695168404Spjd
696219089Spjd	ASSERT(tx == NULL);
697219089Spjd
698219089Spjd	error = dmu_objset_hold(osname, FTAG, &os);
699185029Spjd	if (error) {
700185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
701185029Spjd		return (0);
702185029Spjd	}
703168404Spjd
704185029Spjd	zilog = dmu_objset_zil(os);
705219089Spjd	bp = (blkptr_t *)&zilog->zl_header->zh_log;
706219089Spjd
707219089Spjd	/*
708219089Spjd	 * Check the first block and determine if it's on a log device
709219089Spjd	 * which may have been removed or faulted prior to loading this
710219089Spjd	 * pool.  If so, there's no point in checking the rest of the log
711219089Spjd	 * as its content should have already been synced to the pool.
712219089Spjd	 */
713219089Spjd	if (!BP_IS_HOLE(bp)) {
714219089Spjd		vdev_t *vd;
715219089Spjd		boolean_t valid = B_TRUE;
716219089Spjd
717219089Spjd		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
718219089Spjd		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
719219089Spjd		if (vd->vdev_islog && vdev_is_dead(vd))
720219089Spjd			valid = vdev_log_state_valid(vd);
721219089Spjd		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
722219089Spjd
723219089Spjd		if (!valid) {
724219089Spjd			dmu_objset_rele(os, FTAG);
725219089Spjd			return (0);
726219089Spjd		}
727168404Spjd	}
728185029Spjd
729219089Spjd	/*
730219089Spjd	 * Because tx == NULL, zil_claim_log_block() will not actually claim
731219089Spjd	 * any blocks, but just determine whether it is possible to do so.
732219089Spjd	 * In addition to checking the log chain, zil_claim_log_block()
733219089Spjd	 * will invoke zio_claim() with a done func of spa_claim_notify(),
734219089Spjd	 * which will update spa_max_claim_txg.  See spa_load() for details.
735219089Spjd	 */
736219089Spjd	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
737219089Spjd	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
738219089Spjd
739219089Spjd	dmu_objset_rele(os, FTAG);
740219089Spjd
741219089Spjd	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
742168404Spjd}
743168404Spjd
744185029Spjdstatic int
745185029Spjdzil_vdev_compare(const void *x1, const void *x2)
746185029Spjd{
747219089Spjd	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
748219089Spjd	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
749185029Spjd
750185029Spjd	if (v1 < v2)
751185029Spjd		return (-1);
752185029Spjd	if (v1 > v2)
753185029Spjd		return (1);
754185029Spjd
755185029Spjd	return (0);
756185029Spjd}
757185029Spjd
758168404Spjdvoid
759219089Spjdzil_add_block(zilog_t *zilog, const blkptr_t *bp)
760168404Spjd{
761185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
762185029Spjd	avl_index_t where;
763185029Spjd	zil_vdev_node_t *zv, zvsearch;
764185029Spjd	int ndvas = BP_GET_NDVAS(bp);
765185029Spjd	int i;
766168404Spjd
767185029Spjd	if (zfs_nocacheflush)
768185029Spjd		return;
769168404Spjd
770185029Spjd	ASSERT(zilog->zl_writer);
771168404Spjd
772185029Spjd	/*
773185029Spjd	 * Even though we're zl_writer, we still need a lock because the
774185029Spjd	 * zl_get_data() callbacks may have dmu_sync() done callbacks
775185029Spjd	 * that will run concurrently.
776185029Spjd	 */
777185029Spjd	mutex_enter(&zilog->zl_vdev_lock);
778185029Spjd	for (i = 0; i < ndvas; i++) {
779185029Spjd		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
780185029Spjd		if (avl_find(t, &zvsearch, &where) == NULL) {
781185029Spjd			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
782185029Spjd			zv->zv_vdev = zvsearch.zv_vdev;
783185029Spjd			avl_insert(t, zv, where);
784185029Spjd		}
785185029Spjd	}
786185029Spjd	mutex_exit(&zilog->zl_vdev_lock);
787168404Spjd}
788168404Spjd
789219089Spjdstatic void
790168404Spjdzil_flush_vdevs(zilog_t *zilog)
791168404Spjd{
792168404Spjd	spa_t *spa = zilog->zl_spa;
793185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
794185029Spjd	void *cookie = NULL;
795185029Spjd	zil_vdev_node_t *zv;
796185029Spjd	zio_t *zio;
797168404Spjd
798168404Spjd	ASSERT(zilog->zl_writer);
799168404Spjd
800185029Spjd	/*
801185029Spjd	 * We don't need zl_vdev_lock here because we're the zl_writer,
802185029Spjd	 * and all zl_get_data() callbacks are done.
803185029Spjd	 */
804185029Spjd	if (avl_numnodes(t) == 0)
805185029Spjd		return;
806185029Spjd
807185029Spjd	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
808185029Spjd
809185029Spjd	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
810185029Spjd
811185029Spjd	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
812185029Spjd		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
813185029Spjd		if (vd != NULL)
814185029Spjd			zio_flush(zio, vd);
815185029Spjd		kmem_free(zv, sizeof (*zv));
816168404Spjd	}
817168404Spjd
818168404Spjd	/*
819168404Spjd	 * Wait for all the flushes to complete.  Not all devices actually
820168404Spjd	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
821168404Spjd	 */
822185029Spjd	(void) zio_wait(zio);
823185029Spjd
824185029Spjd	spa_config_exit(spa, SCL_STATE, FTAG);
825168404Spjd}
826168404Spjd
827168404Spjd/*
828168404Spjd * Function called when a log block write completes
829168404Spjd */
830168404Spjdstatic void
831168404Spjdzil_lwb_write_done(zio_t *zio)
832168404Spjd{
833168404Spjd	lwb_t *lwb = zio->io_private;
834168404Spjd	zilog_t *zilog = lwb->lwb_zilog;
835219089Spjd	dmu_tx_t *tx = lwb->lwb_tx;
836168404Spjd
837185029Spjd	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
838185029Spjd	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
839185029Spjd	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
840185029Spjd	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
841185029Spjd	ASSERT(!BP_IS_GANG(zio->io_bp));
842185029Spjd	ASSERT(!BP_IS_HOLE(zio->io_bp));
843185029Spjd	ASSERT(zio->io_bp->blk_fill == 0);
844185029Spjd
845168404Spjd	/*
846209962Smm	 * Ensure the lwb buffer pointer is cleared before releasing
847209962Smm	 * the txg. If we have had an allocation failure and
848209962Smm	 * the txg is waiting to sync then we want want zil_sync()
849209962Smm	 * to remove the lwb so that it's not picked up as the next new
850209962Smm	 * one in zil_commit_writer(). zil_sync() will only remove
851209962Smm	 * the lwb if lwb_buf is null.
852168404Spjd	 */
853168404Spjd	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
854168404Spjd	mutex_enter(&zilog->zl_lock);
855168404Spjd	lwb->lwb_buf = NULL;
856219089Spjd	lwb->lwb_tx = NULL;
857219089Spjd	mutex_exit(&zilog->zl_lock);
858209962Smm
859209962Smm	/*
860209962Smm	 * Now that we've written this log block, we have a stable pointer
861209962Smm	 * to the next block in the chain, so it's OK to let the txg in
862219089Spjd	 * which we allocated the next block sync.
863209962Smm	 */
864219089Spjd	dmu_tx_commit(tx);
865168404Spjd}
866168404Spjd
867168404Spjd/*
868168404Spjd * Initialize the io for a log block.
869168404Spjd */
870168404Spjdstatic void
871168404Spjdzil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
872168404Spjd{
873168404Spjd	zbookmark_t zb;
874168404Spjd
875219089Spjd	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
876219089Spjd	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
877219089Spjd	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
878168404Spjd
879168404Spjd	if (zilog->zl_root_zio == NULL) {
880168404Spjd		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
881168404Spjd		    ZIO_FLAG_CANFAIL);
882168404Spjd	}
883168404Spjd	if (lwb->lwb_zio == NULL) {
884168404Spjd		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
885219089Spjd		    0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
886213197Smm		    zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
887219089Spjd		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
888168404Spjd	}
889168404Spjd}
890168404Spjd
891168404Spjd/*
892219089Spjd * Define a limited set of intent log block sizes.
893219089Spjd * These must be a multiple of 4KB. Note only the amount used (again
894219089Spjd * aligned to 4KB) actually gets written. However, we can't always just
895219089Spjd * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.
896219089Spjd */
897219089Spjduint64_t zil_block_buckets[] = {
898219089Spjd    4096,		/* non TX_WRITE */
899219089Spjd    8192+4096,		/* data base */
900219089Spjd    32*1024 + 4096, 	/* NFS writes */
901219089Spjd    UINT64_MAX
902219089Spjd};
903219089Spjd
904219089Spjd/*
905219089Spjd * Use the slog as long as the logbias is 'latency' and the current commit size
906219089Spjd * is less than the limit or the total list size is less than 2X the limit.
907219089Spjd * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
908219089Spjd */
909219089Spjduint64_t zil_slog_limit = 1024 * 1024;
910219089Spjd#define	USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
911219089Spjd	(((zilog)->zl_cur_used < zil_slog_limit) || \
912219089Spjd	((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
913219089Spjd
914219089Spjd/*
915168404Spjd * Start a log block write and advance to the next log block.
916168404Spjd * Calls are serialized.
917168404Spjd */
918168404Spjdstatic lwb_t *
919168404Spjdzil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
920168404Spjd{
921219089Spjd	lwb_t *nlwb = NULL;
922219089Spjd	zil_chain_t *zilc;
923168404Spjd	spa_t *spa = zilog->zl_spa;
924219089Spjd	blkptr_t *bp;
925219089Spjd	dmu_tx_t *tx;
926168404Spjd	uint64_t txg;
927219089Spjd	uint64_t zil_blksz, wsz;
928219089Spjd	int i, error;
929168404Spjd
930219089Spjd	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
931219089Spjd		zilc = (zil_chain_t *)lwb->lwb_buf;
932219089Spjd		bp = &zilc->zc_next_blk;
933219089Spjd	} else {
934219089Spjd		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
935219089Spjd		bp = &zilc->zc_next_blk;
936219089Spjd	}
937168404Spjd
938219089Spjd	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
939219089Spjd
940168404Spjd	/*
941168404Spjd	 * Allocate the next block and save its address in this block
942168404Spjd	 * before writing it in order to establish the log chain.
943168404Spjd	 * Note that if the allocation of nlwb synced before we wrote
944168404Spjd	 * the block that points at it (lwb), we'd leak it if we crashed.
945219089Spjd	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
946219089Spjd	 * We dirty the dataset to ensure that zil_sync() will be called
947219089Spjd	 * to clean up in the event of allocation failure or I/O failure.
948168404Spjd	 */
949219089Spjd	tx = dmu_tx_create(zilog->zl_os);
950219089Spjd	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
951219089Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
952219089Spjd	txg = dmu_tx_get_txg(tx);
953168404Spjd
954219089Spjd	lwb->lwb_tx = tx;
955219089Spjd
956168404Spjd	/*
957219089Spjd	 * Log blocks are pre-allocated. Here we select the size of the next
958219089Spjd	 * block, based on size used in the last block.
959219089Spjd	 * - first find the smallest bucket that will fit the block from a
960219089Spjd	 *   limited set of block sizes. This is because it's faster to write
961219089Spjd	 *   blocks allocated from the same metaslab as they are adjacent or
962219089Spjd	 *   close.
963219089Spjd	 * - next find the maximum from the new suggested size and an array of
964219089Spjd	 *   previous sizes. This lessens a picket fence effect of wrongly
965219089Spjd	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
966219089Spjd	 *   requests.
967219089Spjd	 *
968219089Spjd	 * Note we only write what is used, but we can't just allocate
969219089Spjd	 * the maximum block size because we can exhaust the available
970219089Spjd	 * pool log space.
971168404Spjd	 */
972219089Spjd	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
973219089Spjd	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
974219089Spjd		continue;
975219089Spjd	zil_blksz = zil_block_buckets[i];
976219089Spjd	if (zil_blksz == UINT64_MAX)
977219089Spjd		zil_blksz = SPA_MAXBLOCKSIZE;
978219089Spjd	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
979219089Spjd	for (i = 0; i < ZIL_PREV_BLKS; i++)
980219089Spjd		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
981219089Spjd	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
982168404Spjd
983168404Spjd	BP_ZERO(bp);
984168404Spjd	/* pass the old blkptr in order to spread log blocks across devs */
985219089Spjd	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
986219089Spjd	    USE_SLOG(zilog));
987219089Spjd	if (!error) {
988219089Spjd		ASSERT3U(bp->blk_birth, ==, txg);
989219089Spjd		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
990219089Spjd		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
991168404Spjd
992168404Spjd		/*
993219089Spjd		 * Allocate a new log write buffer (lwb).
994168404Spjd		 */
995219089Spjd		nlwb = zil_alloc_lwb(zilog, bp, txg);
996168404Spjd
997219089Spjd		/* Record the block for later vdev flushing */
998219089Spjd		zil_add_block(zilog, &lwb->lwb_blk);
999168404Spjd	}
1000168404Spjd
1001219089Spjd	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1002219089Spjd		/* For Slim ZIL only write what is used. */
1003219089Spjd		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1004219089Spjd		ASSERT3U(wsz, <=, lwb->lwb_sz);
1005219089Spjd		zio_shrink(lwb->lwb_zio, wsz);
1006168404Spjd
1007219089Spjd	} else {
1008219089Spjd		wsz = lwb->lwb_sz;
1009219089Spjd	}
1010168404Spjd
1011219089Spjd	zilc->zc_pad = 0;
1012219089Spjd	zilc->zc_nused = lwb->lwb_nused;
1013219089Spjd	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1014168404Spjd
1015168404Spjd	/*
1016219089Spjd	 * clear unused data for security
1017168404Spjd	 */
1018219089Spjd	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1019168404Spjd
1020219089Spjd	zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
1021168404Spjd
1022168404Spjd	/*
1023219089Spjd	 * If there was an allocation failure then nlwb will be null which
1024219089Spjd	 * forces a txg_wait_synced().
1025168404Spjd	 */
1026168404Spjd	return (nlwb);
1027168404Spjd}
1028168404Spjd
1029168404Spjdstatic lwb_t *
1030168404Spjdzil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1031168404Spjd{
1032168404Spjd	lr_t *lrc = &itx->itx_lr; /* common log record */
1033219089Spjd	lr_write_t *lrw = (lr_write_t *)lrc;
1034219089Spjd	char *lr_buf;
1035168404Spjd	uint64_t txg = lrc->lrc_txg;
1036168404Spjd	uint64_t reclen = lrc->lrc_reclen;
1037219089Spjd	uint64_t dlen = 0;
1038168404Spjd
1039168404Spjd	if (lwb == NULL)
1040168404Spjd		return (NULL);
1041219089Spjd
1042168404Spjd	ASSERT(lwb->lwb_buf != NULL);
1043239620Smm	ASSERT(zilog_is_dirty(zilog) ||
1044239620Smm	    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1045168404Spjd
1046168404Spjd	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
1047168404Spjd		dlen = P2ROUNDUP_TYPED(
1048219089Spjd		    lrw->lr_length, sizeof (uint64_t), uint64_t);
1049168404Spjd
1050168404Spjd	zilog->zl_cur_used += (reclen + dlen);
1051168404Spjd
1052168404Spjd	zil_lwb_write_init(zilog, lwb);
1053168404Spjd
1054168404Spjd	/*
1055168404Spjd	 * If this record won't fit in the current log block, start a new one.
1056168404Spjd	 */
1057219089Spjd	if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1058168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1059168404Spjd		if (lwb == NULL)
1060168404Spjd			return (NULL);
1061168404Spjd		zil_lwb_write_init(zilog, lwb);
1062219089Spjd		ASSERT(LWB_EMPTY(lwb));
1063219089Spjd		if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1064168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
1065168404Spjd			return (lwb);
1066168404Spjd		}
1067168404Spjd	}
1068168404Spjd
1069219089Spjd	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1070219089Spjd	bcopy(lrc, lr_buf, reclen);
1071219089Spjd	lrc = (lr_t *)lr_buf;
1072219089Spjd	lrw = (lr_write_t *)lrc;
1073168404Spjd
1074168404Spjd	/*
1075168404Spjd	 * If it's a write, fetch the data or get its blkptr as appropriate.
1076168404Spjd	 */
1077168404Spjd	if (lrc->lrc_txtype == TX_WRITE) {
1078168404Spjd		if (txg > spa_freeze_txg(zilog->zl_spa))
1079168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
1080168404Spjd		if (itx->itx_wr_state != WR_COPIED) {
1081168404Spjd			char *dbuf;
1082168404Spjd			int error;
1083168404Spjd
1084168404Spjd			if (dlen) {
1085168404Spjd				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
1086219089Spjd				dbuf = lr_buf + reclen;
1087219089Spjd				lrw->lr_common.lrc_reclen += dlen;
1088168404Spjd			} else {
1089168404Spjd				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1090168404Spjd				dbuf = NULL;
1091168404Spjd			}
1092168404Spjd			error = zilog->zl_get_data(
1093219089Spjd			    itx->itx_private, lrw, dbuf, lwb->lwb_zio);
1094214378Smm			if (error == EIO) {
1095214378Smm				txg_wait_synced(zilog->zl_dmu_pool, txg);
1096214378Smm				return (lwb);
1097214378Smm			}
1098168404Spjd			if (error) {
1099168404Spjd				ASSERT(error == ENOENT || error == EEXIST ||
1100168404Spjd				    error == EALREADY);
1101168404Spjd				return (lwb);
1102168404Spjd			}
1103168404Spjd		}
1104168404Spjd	}
1105168404Spjd
1106219089Spjd	/*
1107219089Spjd	 * We're actually making an entry, so update lrc_seq to be the
1108219089Spjd	 * log record sequence number.  Note that this is generally not
1109219089Spjd	 * equal to the itx sequence number because not all transactions
1110219089Spjd	 * are synchronous, and sometimes spa_sync() gets there first.
1111219089Spjd	 */
1112219089Spjd	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1113168404Spjd	lwb->lwb_nused += reclen + dlen;
1114168404Spjd	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1115219089Spjd	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1116240415Smm	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1117168404Spjd
1118168404Spjd	return (lwb);
1119168404Spjd}
1120168404Spjd
1121168404Spjditx_t *
1122185029Spjdzil_itx_create(uint64_t txtype, size_t lrsize)
1123168404Spjd{
1124168404Spjd	itx_t *itx;
1125168404Spjd
1126168404Spjd	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1127168404Spjd
1128168404Spjd	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1129168404Spjd	itx->itx_lr.lrc_txtype = txtype;
1130168404Spjd	itx->itx_lr.lrc_reclen = lrsize;
1131185029Spjd	itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
1132168404Spjd	itx->itx_lr.lrc_seq = 0;	/* defensive */
1133219089Spjd	itx->itx_sync = B_TRUE;		/* default is synchronous */
1134168404Spjd
1135168404Spjd	return (itx);
1136168404Spjd}
1137168404Spjd
1138219089Spjdvoid
1139219089Spjdzil_itx_destroy(itx_t *itx)
1140168404Spjd{
1141219089Spjd	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1142219089Spjd}
1143168404Spjd
1144219089Spjd/*
1145219089Spjd * Free up the sync and async itxs. The itxs_t has already been detached
1146219089Spjd * so no locks are needed.
1147219089Spjd */
1148219089Spjdstatic void
1149219089Spjdzil_itxg_clean(itxs_t *itxs)
1150219089Spjd{
1151219089Spjd	itx_t *itx;
1152219089Spjd	list_t *list;
1153219089Spjd	avl_tree_t *t;
1154219089Spjd	void *cookie;
1155219089Spjd	itx_async_node_t *ian;
1156168404Spjd
1157219089Spjd	list = &itxs->i_sync_list;
1158219089Spjd	while ((itx = list_head(list)) != NULL) {
1159219089Spjd		list_remove(list, itx);
1160219089Spjd		kmem_free(itx, offsetof(itx_t, itx_lr) +
1161219089Spjd		    itx->itx_lr.lrc_reclen);
1162219089Spjd	}
1163168404Spjd
1164219089Spjd	cookie = NULL;
1165219089Spjd	t = &itxs->i_async_tree;
1166219089Spjd	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1167219089Spjd		list = &ian->ia_list;
1168219089Spjd		while ((itx = list_head(list)) != NULL) {
1169219089Spjd			list_remove(list, itx);
1170219089Spjd			kmem_free(itx, offsetof(itx_t, itx_lr) +
1171219089Spjd			    itx->itx_lr.lrc_reclen);
1172219089Spjd		}
1173219089Spjd		list_destroy(list);
1174219089Spjd		kmem_free(ian, sizeof (itx_async_node_t));
1175219089Spjd	}
1176219089Spjd	avl_destroy(t);
1177219089Spjd
1178219089Spjd	kmem_free(itxs, sizeof (itxs_t));
1179168404Spjd}
1180168404Spjd
1181219089Spjdstatic int
1182219089Spjdzil_aitx_compare(const void *x1, const void *x2)
1183219089Spjd{
1184219089Spjd	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1185219089Spjd	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1186219089Spjd
1187219089Spjd	if (o1 < o2)
1188219089Spjd		return (-1);
1189219089Spjd	if (o1 > o2)
1190219089Spjd		return (1);
1191219089Spjd
1192219089Spjd	return (0);
1193219089Spjd}
1194219089Spjd
1195168404Spjd/*
1196219089Spjd * Remove all async itx with the given oid.
1197168404Spjd */
1198168404Spjdstatic void
1199219089Spjdzil_remove_async(zilog_t *zilog, uint64_t oid)
1200168404Spjd{
1201219089Spjd	uint64_t otxg, txg;
1202219089Spjd	itx_async_node_t *ian;
1203219089Spjd	avl_tree_t *t;
1204219089Spjd	avl_index_t where;
1205168404Spjd	list_t clean_list;
1206168404Spjd	itx_t *itx;
1207168404Spjd
1208219089Spjd	ASSERT(oid != 0);
1209168404Spjd	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1210168404Spjd
1211219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1212219089Spjd		otxg = ZILTEST_TXG;
1213219089Spjd	else
1214219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1215219089Spjd
1216219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1217219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1218219089Spjd
1219219089Spjd		mutex_enter(&itxg->itxg_lock);
1220219089Spjd		if (itxg->itxg_txg != txg) {
1221219089Spjd			mutex_exit(&itxg->itxg_lock);
1222219089Spjd			continue;
1223219089Spjd		}
1224219089Spjd
1225219089Spjd		/*
1226219089Spjd		 * Locate the object node and append its list.
1227219089Spjd		 */
1228219089Spjd		t = &itxg->itxg_itxs->i_async_tree;
1229219089Spjd		ian = avl_find(t, &oid, &where);
1230219089Spjd		if (ian != NULL)
1231219089Spjd			list_move_tail(&clean_list, &ian->ia_list);
1232219089Spjd		mutex_exit(&itxg->itxg_lock);
1233168404Spjd	}
1234219089Spjd	while ((itx = list_head(&clean_list)) != NULL) {
1235219089Spjd		list_remove(&clean_list, itx);
1236219089Spjd		kmem_free(itx, offsetof(itx_t, itx_lr) +
1237219089Spjd		    itx->itx_lr.lrc_reclen);
1238219089Spjd	}
1239219089Spjd	list_destroy(&clean_list);
1240219089Spjd}
1241168404Spjd
1242219089Spjdvoid
1243219089Spjdzil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1244219089Spjd{
1245219089Spjd	uint64_t txg;
1246219089Spjd	itxg_t *itxg;
1247219089Spjd	itxs_t *itxs, *clean = NULL;
1248219089Spjd
1249168404Spjd	/*
1250219089Spjd	 * Object ids can be re-instantiated in the next txg so
1251219089Spjd	 * remove any async transactions to avoid future leaks.
1252219089Spjd	 * This can happen if a fsync occurs on the re-instantiated
1253219089Spjd	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1254219089Spjd	 * the new file data and flushes a write record for the old object.
1255168404Spjd	 */
1256219089Spjd	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1257219089Spjd		zil_remove_async(zilog, itx->itx_oid);
1258219089Spjd
1259219089Spjd	/*
1260219089Spjd	 * Ensure the data of a renamed file is committed before the rename.
1261219089Spjd	 */
1262219089Spjd	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1263219089Spjd		zil_async_to_sync(zilog, itx->itx_oid);
1264219089Spjd
1265239620Smm	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1266219089Spjd		txg = ZILTEST_TXG;
1267219089Spjd	else
1268219089Spjd		txg = dmu_tx_get_txg(tx);
1269219089Spjd
1270219089Spjd	itxg = &zilog->zl_itxg[txg & TXG_MASK];
1271219089Spjd	mutex_enter(&itxg->itxg_lock);
1272219089Spjd	itxs = itxg->itxg_itxs;
1273219089Spjd	if (itxg->itxg_txg != txg) {
1274219089Spjd		if (itxs != NULL) {
1275219089Spjd			/*
1276219089Spjd			 * The zil_clean callback hasn't got around to cleaning
1277219089Spjd			 * this itxg. Save the itxs for release below.
1278219089Spjd			 * This should be rare.
1279219089Spjd			 */
1280219089Spjd			atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1281219089Spjd			itxg->itxg_sod = 0;
1282219089Spjd			clean = itxg->itxg_itxs;
1283219089Spjd		}
1284219089Spjd		ASSERT(itxg->itxg_sod == 0);
1285219089Spjd		itxg->itxg_txg = txg;
1286219089Spjd		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1287219089Spjd
1288219089Spjd		list_create(&itxs->i_sync_list, sizeof (itx_t),
1289219089Spjd		    offsetof(itx_t, itx_node));
1290219089Spjd		avl_create(&itxs->i_async_tree, zil_aitx_compare,
1291219089Spjd		    sizeof (itx_async_node_t),
1292219089Spjd		    offsetof(itx_async_node_t, ia_node));
1293168404Spjd	}
1294219089Spjd	if (itx->itx_sync) {
1295219089Spjd		list_insert_tail(&itxs->i_sync_list, itx);
1296219089Spjd		atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1297219089Spjd		itxg->itxg_sod += itx->itx_sod;
1298219089Spjd	} else {
1299219089Spjd		avl_tree_t *t = &itxs->i_async_tree;
1300219089Spjd		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1301219089Spjd		itx_async_node_t *ian;
1302219089Spjd		avl_index_t where;
1303168404Spjd
1304219089Spjd		ian = avl_find(t, &foid, &where);
1305219089Spjd		if (ian == NULL) {
1306219089Spjd			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1307219089Spjd			list_create(&ian->ia_list, sizeof (itx_t),
1308219089Spjd			    offsetof(itx_t, itx_node));
1309219089Spjd			ian->ia_foid = foid;
1310219089Spjd			avl_insert(t, ian, where);
1311219089Spjd		}
1312219089Spjd		list_insert_tail(&ian->ia_list, itx);
1313168404Spjd	}
1314219089Spjd
1315219089Spjd	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1316239620Smm	zilog_dirty(zilog, txg);
1317219089Spjd	mutex_exit(&itxg->itxg_lock);
1318219089Spjd
1319219089Spjd	/* Release the old itxs now we've dropped the lock */
1320219089Spjd	if (clean != NULL)
1321219089Spjd		zil_itxg_clean(clean);
1322168404Spjd}
1323168404Spjd
1324168404Spjd/*
1325168404Spjd * If there are any in-memory intent log transactions which have now been
1326239620Smm * synced then start up a taskq to free them. We should only do this after we
1327239620Smm * have written out the uberblocks (i.e. txg has been comitted) so that
1328239620Smm * don't inadvertently clean out in-memory log records that would be required
1329239620Smm * by zil_commit().
1330168404Spjd */
1331168404Spjdvoid
1332219089Spjdzil_clean(zilog_t *zilog, uint64_t synced_txg)
1333168404Spjd{
1334219089Spjd	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1335219089Spjd	itxs_t *clean_me;
1336168404Spjd
1337219089Spjd	mutex_enter(&itxg->itxg_lock);
1338219089Spjd	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1339219089Spjd		mutex_exit(&itxg->itxg_lock);
1340219089Spjd		return;
1341168404Spjd	}
1342219089Spjd	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1343219089Spjd	ASSERT(itxg->itxg_txg != 0);
1344219089Spjd	ASSERT(zilog->zl_clean_taskq != NULL);
1345219089Spjd	atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1346219089Spjd	itxg->itxg_sod = 0;
1347219089Spjd	clean_me = itxg->itxg_itxs;
1348219089Spjd	itxg->itxg_itxs = NULL;
1349219089Spjd	itxg->itxg_txg = 0;
1350219089Spjd	mutex_exit(&itxg->itxg_lock);
1351219089Spjd	/*
1352219089Spjd	 * Preferably start a task queue to free up the old itxs but
1353219089Spjd	 * if taskq_dispatch can't allocate resources to do that then
1354219089Spjd	 * free it in-line. This should be rare. Note, using TQ_SLEEP
1355219089Spjd	 * created a bad performance problem.
1356219089Spjd	 */
1357219089Spjd	if (taskq_dispatch(zilog->zl_clean_taskq,
1358219089Spjd	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
1359219089Spjd		zil_itxg_clean(clean_me);
1360168404Spjd}
1361168404Spjd
1362219089Spjd/*
1363219089Spjd * Get the list of itxs to commit into zl_itx_commit_list.
1364219089Spjd */
1365185029Spjdstatic void
1366219089Spjdzil_get_commit_list(zilog_t *zilog)
1367168404Spjd{
1368219089Spjd	uint64_t otxg, txg;
1369219089Spjd	list_t *commit_list = &zilog->zl_itx_commit_list;
1370219089Spjd	uint64_t push_sod = 0;
1371219089Spjd
1372219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1373219089Spjd		otxg = ZILTEST_TXG;
1374219089Spjd	else
1375219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1376219089Spjd
1377219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1378219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1379219089Spjd
1380219089Spjd		mutex_enter(&itxg->itxg_lock);
1381219089Spjd		if (itxg->itxg_txg != txg) {
1382219089Spjd			mutex_exit(&itxg->itxg_lock);
1383219089Spjd			continue;
1384219089Spjd		}
1385219089Spjd
1386219089Spjd		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1387219089Spjd		push_sod += itxg->itxg_sod;
1388219089Spjd		itxg->itxg_sod = 0;
1389219089Spjd
1390219089Spjd		mutex_exit(&itxg->itxg_lock);
1391219089Spjd	}
1392219089Spjd	atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1393219089Spjd}
1394219089Spjd
1395219089Spjd/*
1396219089Spjd * Move the async itxs for a specified object to commit into sync lists.
1397219089Spjd */
1398219089Spjdstatic void
1399219089Spjdzil_async_to_sync(zilog_t *zilog, uint64_t foid)
1400219089Spjd{
1401219089Spjd	uint64_t otxg, txg;
1402219089Spjd	itx_async_node_t *ian;
1403219089Spjd	avl_tree_t *t;
1404219089Spjd	avl_index_t where;
1405219089Spjd
1406219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1407219089Spjd		otxg = ZILTEST_TXG;
1408219089Spjd	else
1409219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1410219089Spjd
1411219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1412219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1413219089Spjd
1414219089Spjd		mutex_enter(&itxg->itxg_lock);
1415219089Spjd		if (itxg->itxg_txg != txg) {
1416219089Spjd			mutex_exit(&itxg->itxg_lock);
1417219089Spjd			continue;
1418219089Spjd		}
1419219089Spjd
1420219089Spjd		/*
1421219089Spjd		 * If a foid is specified then find that node and append its
1422219089Spjd		 * list. Otherwise walk the tree appending all the lists
1423219089Spjd		 * to the sync list. We add to the end rather than the
1424219089Spjd		 * beginning to ensure the create has happened.
1425219089Spjd		 */
1426219089Spjd		t = &itxg->itxg_itxs->i_async_tree;
1427219089Spjd		if (foid != 0) {
1428219089Spjd			ian = avl_find(t, &foid, &where);
1429219089Spjd			if (ian != NULL) {
1430219089Spjd				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1431219089Spjd				    &ian->ia_list);
1432219089Spjd			}
1433219089Spjd		} else {
1434219089Spjd			void *cookie = NULL;
1435219089Spjd
1436219089Spjd			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1437219089Spjd				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1438219089Spjd				    &ian->ia_list);
1439219089Spjd				list_destroy(&ian->ia_list);
1440219089Spjd				kmem_free(ian, sizeof (itx_async_node_t));
1441219089Spjd			}
1442219089Spjd		}
1443219089Spjd		mutex_exit(&itxg->itxg_lock);
1444219089Spjd	}
1445219089Spjd}
1446219089Spjd
1447219089Spjdstatic void
1448219089Spjdzil_commit_writer(zilog_t *zilog)
1449219089Spjd{
1450168404Spjd	uint64_t txg;
1451219089Spjd	itx_t *itx;
1452168404Spjd	lwb_t *lwb;
1453219089Spjd	spa_t *spa = zilog->zl_spa;
1454219089Spjd	int error = 0;
1455168404Spjd
1456185029Spjd	ASSERT(zilog->zl_root_zio == NULL);
1457168404Spjd
1458219089Spjd	mutex_exit(&zilog->zl_lock);
1459219089Spjd
1460219089Spjd	zil_get_commit_list(zilog);
1461219089Spjd
1462219089Spjd	/*
1463219089Spjd	 * Return if there's nothing to commit before we dirty the fs by
1464219089Spjd	 * calling zil_create().
1465219089Spjd	 */
1466219089Spjd	if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1467219089Spjd		mutex_enter(&zilog->zl_lock);
1468219089Spjd		return;
1469219089Spjd	}
1470219089Spjd
1471168404Spjd	if (zilog->zl_suspend) {
1472168404Spjd		lwb = NULL;
1473168404Spjd	} else {
1474168404Spjd		lwb = list_tail(&zilog->zl_lwb_list);
1475219089Spjd		if (lwb == NULL)
1476219089Spjd			lwb = zil_create(zilog);
1477168404Spjd	}
1478168404Spjd
1479168404Spjd	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1480219089Spjd	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1481168404Spjd		txg = itx->itx_lr.lrc_txg;
1482168404Spjd		ASSERT(txg);
1483168404Spjd
1484219089Spjd		if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1485168404Spjd			lwb = zil_lwb_commit(zilog, itx, lwb);
1486219089Spjd		list_remove(&zilog->zl_itx_commit_list, itx);
1487168404Spjd		kmem_free(itx, offsetof(itx_t, itx_lr)
1488168404Spjd		    + itx->itx_lr.lrc_reclen);
1489168404Spjd	}
1490168404Spjd	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1491168404Spjd
1492168404Spjd	/* write the last block out */
1493168404Spjd	if (lwb != NULL && lwb->lwb_zio != NULL)
1494168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1495168404Spjd
1496168404Spjd	zilog->zl_cur_used = 0;
1497168404Spjd
1498168404Spjd	/*
1499168404Spjd	 * Wait if necessary for the log blocks to be on stable storage.
1500168404Spjd	 */
1501168404Spjd	if (zilog->zl_root_zio) {
1502219089Spjd		error = zio_wait(zilog->zl_root_zio);
1503185029Spjd		zilog->zl_root_zio = NULL;
1504185029Spjd		zil_flush_vdevs(zilog);
1505168404Spjd	}
1506168404Spjd
1507219089Spjd	if (error || lwb == NULL)
1508168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, 0);
1509168404Spjd
1510168404Spjd	mutex_enter(&zilog->zl_lock);
1511168404Spjd
1512219089Spjd	/*
1513219089Spjd	 * Remember the highest committed log sequence number for ztest.
1514219089Spjd	 * We only update this value when all the log writes succeeded,
1515219089Spjd	 * because ztest wants to ASSERT that it got the whole log chain.
1516219089Spjd	 */
1517219089Spjd	if (error == 0 && lwb != NULL)
1518219089Spjd		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1519168404Spjd}
1520168404Spjd
1521168404Spjd/*
1522219089Spjd * Commit zfs transactions to stable storage.
1523168404Spjd * If foid is 0 push out all transactions, otherwise push only those
1524219089Spjd * for that object or might reference that object.
1525219089Spjd *
1526219089Spjd * itxs are committed in batches. In a heavily stressed zil there will be
1527219089Spjd * a commit writer thread who is writing out a bunch of itxs to the log
1528219089Spjd * for a set of committing threads (cthreads) in the same batch as the writer.
1529219089Spjd * Those cthreads are all waiting on the same cv for that batch.
1530219089Spjd *
1531219089Spjd * There will also be a different and growing batch of threads that are
1532219089Spjd * waiting to commit (qthreads). When the committing batch completes
1533219089Spjd * a transition occurs such that the cthreads exit and the qthreads become
1534219089Spjd * cthreads. One of the new cthreads becomes the writer thread for the
1535219089Spjd * batch. Any new threads arriving become new qthreads.
1536219089Spjd *
1537219089Spjd * Only 2 condition variables are needed and there's no transition
1538219089Spjd * between the two cvs needed. They just flip-flop between qthreads
1539219089Spjd * and cthreads.
1540219089Spjd *
1541219089Spjd * Using this scheme we can efficiently wakeup up only those threads
1542219089Spjd * that have been committed.
1543168404Spjd */
1544168404Spjdvoid
1545219089Spjdzil_commit(zilog_t *zilog, uint64_t foid)
1546168404Spjd{
1547219089Spjd	uint64_t mybatch;
1548219089Spjd
1549219089Spjd	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1550168404Spjd		return;
1551168404Spjd
1552219089Spjd	/* move the async itxs for the foid to the sync queues */
1553219089Spjd	zil_async_to_sync(zilog, foid);
1554219089Spjd
1555168404Spjd	mutex_enter(&zilog->zl_lock);
1556219089Spjd	mybatch = zilog->zl_next_batch;
1557168404Spjd	while (zilog->zl_writer) {
1558219089Spjd		cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1559219089Spjd		if (mybatch <= zilog->zl_com_batch) {
1560168404Spjd			mutex_exit(&zilog->zl_lock);
1561168404Spjd			return;
1562168404Spjd		}
1563168404Spjd	}
1564219089Spjd
1565219089Spjd	zilog->zl_next_batch++;
1566219089Spjd	zilog->zl_writer = B_TRUE;
1567219089Spjd	zil_commit_writer(zilog);
1568219089Spjd	zilog->zl_com_batch = mybatch;
1569219089Spjd	zilog->zl_writer = B_FALSE;
1570168404Spjd	mutex_exit(&zilog->zl_lock);
1571219089Spjd
1572219089Spjd	/* wake up one thread to become the next writer */
1573219089Spjd	cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1574219089Spjd
1575219089Spjd	/* wake up all threads waiting for this batch to be committed */
1576219089Spjd	cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1577168404Spjd}
1578168404Spjd
1579168404Spjd/*
1580168404Spjd * Called in syncing context to free committed log blocks and update log header.
1581168404Spjd */
1582168404Spjdvoid
1583168404Spjdzil_sync(zilog_t *zilog, dmu_tx_t *tx)
1584168404Spjd{
1585168404Spjd	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1586168404Spjd	uint64_t txg = dmu_tx_get_txg(tx);
1587168404Spjd	spa_t *spa = zilog->zl_spa;
1588219089Spjd	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1589168404Spjd	lwb_t *lwb;
1590168404Spjd
1591209962Smm	/*
1592209962Smm	 * We don't zero out zl_destroy_txg, so make sure we don't try
1593209962Smm	 * to destroy it twice.
1594209962Smm	 */
1595209962Smm	if (spa_sync_pass(spa) != 1)
1596209962Smm		return;
1597209962Smm
1598168404Spjd	mutex_enter(&zilog->zl_lock);
1599168404Spjd
1600168404Spjd	ASSERT(zilog->zl_stop_sync == 0);
1601168404Spjd
1602219089Spjd	if (*replayed_seq != 0) {
1603219089Spjd		ASSERT(zh->zh_replay_seq < *replayed_seq);
1604219089Spjd		zh->zh_replay_seq = *replayed_seq;
1605219089Spjd		*replayed_seq = 0;
1606219089Spjd	}
1607168404Spjd
1608168404Spjd	if (zilog->zl_destroy_txg == txg) {
1609168404Spjd		blkptr_t blk = zh->zh_log;
1610168404Spjd
1611168404Spjd		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1612168404Spjd
1613168404Spjd		bzero(zh, sizeof (zil_header_t));
1614209962Smm		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1615168404Spjd
1616168404Spjd		if (zilog->zl_keep_first) {
1617168404Spjd			/*
1618168404Spjd			 * If this block was part of log chain that couldn't
1619168404Spjd			 * be claimed because a device was missing during
1620168404Spjd			 * zil_claim(), but that device later returns,
1621168404Spjd			 * then this block could erroneously appear valid.
1622168404Spjd			 * To guard against this, assign a new GUID to the new
1623168404Spjd			 * log chain so it doesn't matter what blk points to.
1624168404Spjd			 */
1625168404Spjd			zil_init_log_chain(zilog, &blk);
1626168404Spjd			zh->zh_log = blk;
1627168404Spjd		}
1628168404Spjd	}
1629168404Spjd
1630213197Smm	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1631168404Spjd		zh->zh_log = lwb->lwb_blk;
1632168404Spjd		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1633168404Spjd			break;
1634168404Spjd		list_remove(&zilog->zl_lwb_list, lwb);
1635219089Spjd		zio_free_zil(spa, txg, &lwb->lwb_blk);
1636168404Spjd		kmem_cache_free(zil_lwb_cache, lwb);
1637168404Spjd
1638168404Spjd		/*
1639168404Spjd		 * If we don't have anything left in the lwb list then
1640168404Spjd		 * we've had an allocation failure and we need to zero
1641168404Spjd		 * out the zil_header blkptr so that we don't end
1642168404Spjd		 * up freeing the same block twice.
1643168404Spjd		 */
1644168404Spjd		if (list_head(&zilog->zl_lwb_list) == NULL)
1645168404Spjd			BP_ZERO(&zh->zh_log);
1646168404Spjd	}
1647168404Spjd	mutex_exit(&zilog->zl_lock);
1648168404Spjd}
1649168404Spjd
1650168404Spjdvoid
1651168404Spjdzil_init(void)
1652168404Spjd{
1653168404Spjd	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1654168404Spjd	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1655168404Spjd}
1656168404Spjd
1657168404Spjdvoid
1658168404Spjdzil_fini(void)
1659168404Spjd{
1660168404Spjd	kmem_cache_destroy(zil_lwb_cache);
1661168404Spjd}
1662168404Spjd
1663219089Spjdvoid
1664219089Spjdzil_set_sync(zilog_t *zilog, uint64_t sync)
1665219089Spjd{
1666219089Spjd	zilog->zl_sync = sync;
1667219089Spjd}
1668219089Spjd
1669219089Spjdvoid
1670219089Spjdzil_set_logbias(zilog_t *zilog, uint64_t logbias)
1671219089Spjd{
1672219089Spjd	zilog->zl_logbias = logbias;
1673219089Spjd}
1674219089Spjd
1675168404Spjdzilog_t *
1676168404Spjdzil_alloc(objset_t *os, zil_header_t *zh_phys)
1677168404Spjd{
1678168404Spjd	zilog_t *zilog;
1679168404Spjd
1680168404Spjd	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1681168404Spjd
1682168404Spjd	zilog->zl_header = zh_phys;
1683168404Spjd	zilog->zl_os = os;
1684168404Spjd	zilog->zl_spa = dmu_objset_spa(os);
1685168404Spjd	zilog->zl_dmu_pool = dmu_objset_pool(os);
1686168404Spjd	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1687219089Spjd	zilog->zl_logbias = dmu_objset_logbias(os);
1688219089Spjd	zilog->zl_sync = dmu_objset_syncprop(os);
1689219089Spjd	zilog->zl_next_batch = 1;
1690168404Spjd
1691168404Spjd	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1692168404Spjd
1693219089Spjd	for (int i = 0; i < TXG_SIZE; i++) {
1694219089Spjd		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1695219089Spjd		    MUTEX_DEFAULT, NULL);
1696219089Spjd	}
1697168404Spjd
1698168404Spjd	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1699168404Spjd	    offsetof(lwb_t, lwb_node));
1700168404Spjd
1701219089Spjd	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1702219089Spjd	    offsetof(itx_t, itx_node));
1703219089Spjd
1704185029Spjd	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1705168404Spjd
1706185029Spjd	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1707185029Spjd	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1708185029Spjd
1709185029Spjd	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1710185029Spjd	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1711219089Spjd	cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1712219089Spjd	cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1713185029Spjd
1714168404Spjd	return (zilog);
1715168404Spjd}
1716168404Spjd
1717168404Spjdvoid
1718168404Spjdzil_free(zilog_t *zilog)
1719168404Spjd{
1720168404Spjd	zilog->zl_stop_sync = 1;
1721168404Spjd
1722224526Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1723168404Spjd	list_destroy(&zilog->zl_lwb_list);
1724168404Spjd
1725185029Spjd	avl_destroy(&zilog->zl_vdev_tree);
1726185029Spjd	mutex_destroy(&zilog->zl_vdev_lock);
1727168404Spjd
1728219089Spjd	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1729219089Spjd	list_destroy(&zilog->zl_itx_commit_list);
1730219089Spjd
1731219089Spjd	for (int i = 0; i < TXG_SIZE; i++) {
1732219089Spjd		/*
1733219089Spjd		 * It's possible for an itx to be generated that doesn't dirty
1734219089Spjd		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1735219089Spjd		 * callback to remove the entry. We remove those here.
1736219089Spjd		 *
1737219089Spjd		 * Also free up the ziltest itxs.
1738219089Spjd		 */
1739219089Spjd		if (zilog->zl_itxg[i].itxg_itxs)
1740219089Spjd			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1741219089Spjd		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1742219089Spjd	}
1743219089Spjd
1744168404Spjd	mutex_destroy(&zilog->zl_lock);
1745168404Spjd
1746185029Spjd	cv_destroy(&zilog->zl_cv_writer);
1747185029Spjd	cv_destroy(&zilog->zl_cv_suspend);
1748219089Spjd	cv_destroy(&zilog->zl_cv_batch[0]);
1749219089Spjd	cv_destroy(&zilog->zl_cv_batch[1]);
1750185029Spjd
1751168404Spjd	kmem_free(zilog, sizeof (zilog_t));
1752168404Spjd}
1753168404Spjd
1754168404Spjd/*
1755168404Spjd * Open an intent log.
1756168404Spjd */
1757168404Spjdzilog_t *
1758168404Spjdzil_open(objset_t *os, zil_get_data_t *get_data)
1759168404Spjd{
1760168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
1761168404Spjd
1762224526Smm	ASSERT(zilog->zl_clean_taskq == NULL);
1763224526Smm	ASSERT(zilog->zl_get_data == NULL);
1764224526Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1765224526Smm
1766168404Spjd	zilog->zl_get_data = get_data;
1767168404Spjd	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1768168404Spjd	    2, 2, TASKQ_PREPOPULATE);
1769168404Spjd
1770168404Spjd	return (zilog);
1771168404Spjd}
1772168404Spjd
1773168404Spjd/*
1774168404Spjd * Close an intent log.
1775168404Spjd */
1776168404Spjdvoid
1777168404Spjdzil_close(zilog_t *zilog)
1778168404Spjd{
1779224526Smm	lwb_t *lwb;
1780219089Spjd	uint64_t txg = 0;
1781219089Spjd
1782219089Spjd	zil_commit(zilog, 0); /* commit all itx */
1783219089Spjd
1784168404Spjd	/*
1785219089Spjd	 * The lwb_max_txg for the stubby lwb will reflect the last activity
1786219089Spjd	 * for the zil.  After a txg_wait_synced() on the txg we know all the
1787219089Spjd	 * callbacks have occurred that may clean the zil.  Only then can we
1788219089Spjd	 * destroy the zl_clean_taskq.
1789168404Spjd	 */
1790219089Spjd	mutex_enter(&zilog->zl_lock);
1791224526Smm	lwb = list_tail(&zilog->zl_lwb_list);
1792224526Smm	if (lwb != NULL)
1793224526Smm		txg = lwb->lwb_max_txg;
1794219089Spjd	mutex_exit(&zilog->zl_lock);
1795219089Spjd	if (txg)
1796168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
1797239620Smm	ASSERT(!zilog_is_dirty(zilog));
1798168404Spjd
1799168404Spjd	taskq_destroy(zilog->zl_clean_taskq);
1800168404Spjd	zilog->zl_clean_taskq = NULL;
1801168404Spjd	zilog->zl_get_data = NULL;
1802224526Smm
1803224526Smm	/*
1804224526Smm	 * We should have only one LWB left on the list; remove it now.
1805224526Smm	 */
1806224526Smm	mutex_enter(&zilog->zl_lock);
1807224526Smm	lwb = list_head(&zilog->zl_lwb_list);
1808224526Smm	if (lwb != NULL) {
1809224526Smm		ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1810224526Smm		list_remove(&zilog->zl_lwb_list, lwb);
1811224526Smm		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1812224526Smm		kmem_cache_free(zil_lwb_cache, lwb);
1813224526Smm	}
1814224526Smm	mutex_exit(&zilog->zl_lock);
1815168404Spjd}
1816168404Spjd
1817168404Spjd/*
1818168404Spjd * Suspend an intent log.  While in suspended mode, we still honor
1819168404Spjd * synchronous semantics, but we rely on txg_wait_synced() to do it.
1820168404Spjd * We suspend the log briefly when taking a snapshot so that the snapshot
1821168404Spjd * contains all the data it's supposed to, and has an empty intent log.
1822168404Spjd */
1823168404Spjdint
1824168404Spjdzil_suspend(zilog_t *zilog)
1825168404Spjd{
1826168404Spjd	const zil_header_t *zh = zilog->zl_header;
1827168404Spjd
1828168404Spjd	mutex_enter(&zilog->zl_lock);
1829200724Sdelphij	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1830168404Spjd		mutex_exit(&zilog->zl_lock);
1831168404Spjd		return (EBUSY);
1832168404Spjd	}
1833168404Spjd	if (zilog->zl_suspend++ != 0) {
1834168404Spjd		/*
1835168404Spjd		 * Someone else already began a suspend.
1836168404Spjd		 * Just wait for them to finish.
1837168404Spjd		 */
1838168404Spjd		while (zilog->zl_suspending)
1839168404Spjd			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1840168404Spjd		mutex_exit(&zilog->zl_lock);
1841168404Spjd		return (0);
1842168404Spjd	}
1843168404Spjd	zilog->zl_suspending = B_TRUE;
1844168404Spjd	mutex_exit(&zilog->zl_lock);
1845168404Spjd
1846219089Spjd	zil_commit(zilog, 0);
1847168404Spjd
1848168404Spjd	zil_destroy(zilog, B_FALSE);
1849168404Spjd
1850168404Spjd	mutex_enter(&zilog->zl_lock);
1851168404Spjd	zilog->zl_suspending = B_FALSE;
1852168404Spjd	cv_broadcast(&zilog->zl_cv_suspend);
1853168404Spjd	mutex_exit(&zilog->zl_lock);
1854168404Spjd
1855168404Spjd	return (0);
1856168404Spjd}
1857168404Spjd
1858168404Spjdvoid
1859168404Spjdzil_resume(zilog_t *zilog)
1860168404Spjd{
1861168404Spjd	mutex_enter(&zilog->zl_lock);
1862168404Spjd	ASSERT(zilog->zl_suspend != 0);
1863168404Spjd	zilog->zl_suspend--;
1864168404Spjd	mutex_exit(&zilog->zl_lock);
1865168404Spjd}
1866168404Spjd
1867219089Spjdtypedef struct zil_replay_arg {
1868219089Spjd	zil_replay_func_t **zr_replay;
1869219089Spjd	void		*zr_arg;
1870219089Spjd	boolean_t	zr_byteswap;
1871219089Spjd	char		*zr_lr;
1872219089Spjd} zil_replay_arg_t;
1873219089Spjd
1874219089Spjdstatic int
1875219089Spjdzil_replay_error(zilog_t *zilog, lr_t *lr, int error)
1876209962Smm{
1877219089Spjd	char name[MAXNAMELEN];
1878209962Smm
1879219089Spjd	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
1880209962Smm
1881219089Spjd	dmu_objset_name(zilog->zl_os, name);
1882209962Smm
1883219089Spjd	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1884219089Spjd	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
1885219089Spjd	    (u_longlong_t)lr->lrc_seq,
1886219089Spjd	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
1887219089Spjd	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
1888219089Spjd
1889219089Spjd	return (error);
1890209962Smm}
1891209962Smm
1892219089Spjdstatic int
1893168404Spjdzil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1894168404Spjd{
1895168404Spjd	zil_replay_arg_t *zr = zra;
1896168404Spjd	const zil_header_t *zh = zilog->zl_header;
1897168404Spjd	uint64_t reclen = lr->lrc_reclen;
1898168404Spjd	uint64_t txtype = lr->lrc_txtype;
1899219089Spjd	int error = 0;
1900168404Spjd
1901219089Spjd	zilog->zl_replaying_seq = lr->lrc_seq;
1902168404Spjd
1903219089Spjd	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1904219089Spjd		return (0);
1905219089Spjd
1906168404Spjd	if (lr->lrc_txg < claim_txg)		/* already committed */
1907219089Spjd		return (0);
1908168404Spjd
1909185029Spjd	/* Strip case-insensitive bit, still present in log record */
1910185029Spjd	txtype &= ~TX_CI;
1911185029Spjd
1912219089Spjd	if (txtype == 0 || txtype >= TX_MAX_TYPE)
1913219089Spjd		return (zil_replay_error(zilog, lr, EINVAL));
1914219089Spjd
1915219089Spjd	/*
1916219089Spjd	 * If this record type can be logged out of order, the object
1917219089Spjd	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
1918219089Spjd	 */
1919219089Spjd	if (TX_OOO(txtype)) {
1920219089Spjd		error = dmu_object_info(zilog->zl_os,
1921219089Spjd		    ((lr_ooo_t *)lr)->lr_foid, NULL);
1922219089Spjd		if (error == ENOENT || error == EEXIST)
1923219089Spjd			return (0);
1924209962Smm	}
1925209962Smm
1926168404Spjd	/*
1927168404Spjd	 * Make a copy of the data so we can revise and extend it.
1928168404Spjd	 */
1929219089Spjd	bcopy(lr, zr->zr_lr, reclen);
1930168404Spjd
1931168404Spjd	/*
1932219089Spjd	 * If this is a TX_WRITE with a blkptr, suck in the data.
1933219089Spjd	 */
1934219089Spjd	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1935219089Spjd		error = zil_read_log_data(zilog, (lr_write_t *)lr,
1936219089Spjd		    zr->zr_lr + reclen);
1937219089Spjd		if (error)
1938219089Spjd			return (zil_replay_error(zilog, lr, error));
1939219089Spjd	}
1940219089Spjd
1941219089Spjd	/*
1942168404Spjd	 * The log block containing this lr may have been byteswapped
1943168404Spjd	 * so that we can easily examine common fields like lrc_txtype.
1944219089Spjd	 * However, the log is a mix of different record types, and only the
1945168404Spjd	 * replay vectors know how to byteswap their records.  Therefore, if
1946168404Spjd	 * the lr was byteswapped, undo it before invoking the replay vector.
1947168404Spjd	 */
1948168404Spjd	if (zr->zr_byteswap)
1949219089Spjd		byteswap_uint64_array(zr->zr_lr, reclen);
1950168404Spjd
1951168404Spjd	/*
1952168404Spjd	 * We must now do two things atomically: replay this log record,
1953209962Smm	 * and update the log header sequence number to reflect the fact that
1954209962Smm	 * we did so. At the end of each replay function the sequence number
1955209962Smm	 * is updated if we are in replay mode.
1956168404Spjd	 */
1957219089Spjd	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
1958219089Spjd	if (error) {
1959168404Spjd		/*
1960168404Spjd		 * The DMU's dnode layer doesn't see removes until the txg
1961168404Spjd		 * commits, so a subsequent claim can spuriously fail with
1962209962Smm		 * EEXIST. So if we receive any error we try syncing out
1963219089Spjd		 * any removes then retry the transaction.  Note that we
1964219089Spjd		 * specify B_FALSE for byteswap now, so we don't do it twice.
1965168404Spjd		 */
1966219089Spjd		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1967219089Spjd		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
1968219089Spjd		if (error)
1969219089Spjd			return (zil_replay_error(zilog, lr, error));
1970168404Spjd	}
1971219089Spjd	return (0);
1972168404Spjd}
1973168404Spjd
1974168404Spjd/* ARGSUSED */
1975219089Spjdstatic int
1976168404Spjdzil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1977168404Spjd{
1978168404Spjd	zilog->zl_replay_blks++;
1979219089Spjd
1980219089Spjd	return (0);
1981168404Spjd}
1982168404Spjd
1983168404Spjd/*
1984168404Spjd * If this dataset has a non-empty intent log, replay it and destroy it.
1985168404Spjd */
1986168404Spjdvoid
1987209962Smmzil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
1988168404Spjd{
1989168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
1990168404Spjd	const zil_header_t *zh = zilog->zl_header;
1991168404Spjd	zil_replay_arg_t zr;
1992168404Spjd
1993200724Sdelphij	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
1994168404Spjd		zil_destroy(zilog, B_TRUE);
1995168404Spjd		return;
1996168404Spjd	}
1997168404Spjd	//printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name);
1998168404Spjd
1999168404Spjd	zr.zr_replay = replay_func;
2000168404Spjd	zr.zr_arg = arg;
2001168404Spjd	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
2002219089Spjd	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
2003168404Spjd
2004168404Spjd	/*
2005168404Spjd	 * Wait for in-progress removes to sync before starting replay.
2006168404Spjd	 */
2007168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, 0);
2008168404Spjd
2009209962Smm	zilog->zl_replay = B_TRUE;
2010219089Spjd	zilog->zl_replay_time = ddi_get_lbolt();
2011168404Spjd	ASSERT(zilog->zl_replay_blks == 0);
2012168404Spjd	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2013168404Spjd	    zh->zh_claim_txg);
2014219089Spjd	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
2015168404Spjd
2016168404Spjd	zil_destroy(zilog, B_FALSE);
2017185029Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2018209962Smm	zilog->zl_replay = B_FALSE;
2019168404Spjd	//printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name);
2020168404Spjd}
2021168404Spjd
2022219089Spjdboolean_t
2023219089Spjdzil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2024168404Spjd{
2025219089Spjd	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2026219089Spjd		return (B_TRUE);
2027168404Spjd
2028219089Spjd	if (zilog->zl_replay) {
2029219089Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2030219089Spjd		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2031219089Spjd		    zilog->zl_replaying_seq;
2032219089Spjd		return (B_TRUE);
2033168404Spjd	}
2034168404Spjd
2035219089Spjd	return (B_FALSE);
2036168404Spjd}
2037213197Smm
2038213197Smm/* ARGSUSED */
2039213197Smmint
2040219089Spjdzil_vdev_offline(const char *osname, void *arg)
2041213197Smm{
2042213197Smm	objset_t *os;
2043213197Smm	zilog_t *zilog;
2044213197Smm	int error;
2045213197Smm
2046219089Spjd	error = dmu_objset_hold(osname, FTAG, &os);
2047213197Smm	if (error)
2048213197Smm		return (error);
2049213197Smm
2050213197Smm	zilog = dmu_objset_zil(os);
2051213197Smm	if (zil_suspend(zilog) != 0)
2052213197Smm		error = EEXIST;
2053213197Smm	else
2054213197Smm		zil_resume(zilog);
2055219089Spjd	dmu_objset_rele(os, FTAG);
2056213197Smm	return (error);
2057213197Smm}
2058