zil.c revision 258632
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23249195Smm * Copyright (c) 2013 by Delphix. All rights reserved.
24168404Spjd */
25168404Spjd
26219089Spjd/* Portions Copyright 2010 Robert Milkowski */
27219089Spjd
28168404Spjd#include <sys/zfs_context.h>
29168404Spjd#include <sys/spa.h>
30168404Spjd#include <sys/dmu.h>
31168404Spjd#include <sys/zap.h>
32168404Spjd#include <sys/arc.h>
33168404Spjd#include <sys/stat.h>
34168404Spjd#include <sys/resource.h>
35168404Spjd#include <sys/zil.h>
36168404Spjd#include <sys/zil_impl.h>
37168404Spjd#include <sys/dsl_dataset.h>
38219089Spjd#include <sys/vdev_impl.h>
39168404Spjd#include <sys/dmu_tx.h>
40219089Spjd#include <sys/dsl_pool.h>
41168404Spjd
42168404Spjd/*
43168404Spjd * The zfs intent log (ZIL) saves transaction records of system calls
44168404Spjd * that change the file system in memory with enough information
45168404Spjd * to be able to replay them. These are stored in memory until
46168404Spjd * either the DMU transaction group (txg) commits them to the stable pool
47168404Spjd * and they can be discarded, or they are flushed to the stable log
48168404Spjd * (also in the pool) due to a fsync, O_DSYNC or other synchronous
49168404Spjd * requirement. In the event of a panic or power fail then those log
50168404Spjd * records (transactions) are replayed.
51168404Spjd *
52168404Spjd * There is one ZIL per file system. Its on-disk (pool) format consists
53168404Spjd * of 3 parts:
54168404Spjd *
55168404Spjd * 	- ZIL header
56168404Spjd * 	- ZIL blocks
57168404Spjd * 	- ZIL records
58168404Spjd *
59168404Spjd * A log record holds a system call transaction. Log blocks can
60168404Spjd * hold many log records and the blocks are chained together.
61168404Spjd * Each ZIL block contains a block pointer (blkptr_t) to the next
62168404Spjd * ZIL block in the chain. The ZIL header points to the first
63168404Spjd * block in the chain. Note there is not a fixed place in the pool
64168404Spjd * to hold blocks. They are dynamically allocated and freed as
65168404Spjd * needed from the blocks available. Figure X shows the ZIL structure:
66168404Spjd */
67168404Spjd
68168404Spjd/*
69251631Sdelphij * Disable intent logging replay.  This global ZIL switch affects all pools.
70168404Spjd */
71251631Sdelphijint zil_replay_disable = 0;
72168404SpjdSYSCTL_DECL(_vfs_zfs);
73219089SpjdTUNABLE_INT("vfs.zfs.zil_replay_disable", &zil_replay_disable);
74219089SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW,
75219089Spjd    &zil_replay_disable, 0, "Disable intent logging replay");
76168404Spjd
77168404Spjd/*
78168404Spjd * Tunable parameter for debugging or performance analysis.  Setting
79168404Spjd * zfs_nocacheflush will cause corruption on power loss if a volatile
80168404Spjd * out-of-order write cache is enabled.
81168404Spjd */
82168404Spjdboolean_t zfs_nocacheflush = B_FALSE;
83168404SpjdTUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
84168404SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
85168404Spjd    &zfs_nocacheflush, 0, "Disable cache flush");
86249921Ssmhboolean_t zfs_trim_enabled = B_TRUE;
87249921SsmhSYSCTL_DECL(_vfs_zfs_trim);
88249921SsmhTUNABLE_INT("vfs.zfs.trim.enabled", &zfs_trim_enabled);
89249921SsmhSYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0,
90249921Ssmh    "Enable ZFS TRIM");
91168404Spjd
92168404Spjdstatic kmem_cache_t *zil_lwb_cache;
93168404Spjd
94219089Spjdstatic void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
95219089Spjd
96219089Spjd#define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
97219089Spjd    sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
98219089Spjd
99219089Spjd
100219089Spjd/*
101219089Spjd * ziltest is by and large an ugly hack, but very useful in
102219089Spjd * checking replay without tedious work.
103219089Spjd * When running ziltest we want to keep all itx's and so maintain
104219089Spjd * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
105219089Spjd * We subtract TXG_CONCURRENT_STATES to allow for common code.
106219089Spjd */
107219089Spjd#define	ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
108219089Spjd
109168404Spjdstatic int
110219089Spjdzil_bp_compare(const void *x1, const void *x2)
111168404Spjd{
112219089Spjd	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
113219089Spjd	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
114168404Spjd
115168404Spjd	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
116168404Spjd		return (-1);
117168404Spjd	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
118168404Spjd		return (1);
119168404Spjd
120168404Spjd	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
121168404Spjd		return (-1);
122168404Spjd	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
123168404Spjd		return (1);
124168404Spjd
125168404Spjd	return (0);
126168404Spjd}
127168404Spjd
128168404Spjdstatic void
129219089Spjdzil_bp_tree_init(zilog_t *zilog)
130168404Spjd{
131219089Spjd	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
132219089Spjd	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
133168404Spjd}
134168404Spjd
135168404Spjdstatic void
136219089Spjdzil_bp_tree_fini(zilog_t *zilog)
137168404Spjd{
138219089Spjd	avl_tree_t *t = &zilog->zl_bp_tree;
139219089Spjd	zil_bp_node_t *zn;
140168404Spjd	void *cookie = NULL;
141168404Spjd
142168404Spjd	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
143219089Spjd		kmem_free(zn, sizeof (zil_bp_node_t));
144168404Spjd
145168404Spjd	avl_destroy(t);
146168404Spjd}
147168404Spjd
148219089Spjdint
149219089Spjdzil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
150168404Spjd{
151219089Spjd	avl_tree_t *t = &zilog->zl_bp_tree;
152219089Spjd	const dva_t *dva = BP_IDENTITY(bp);
153219089Spjd	zil_bp_node_t *zn;
154168404Spjd	avl_index_t where;
155168404Spjd
156168404Spjd	if (avl_find(t, dva, &where) != NULL)
157249195Smm		return (SET_ERROR(EEXIST));
158168404Spjd
159219089Spjd	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
160168404Spjd	zn->zn_dva = *dva;
161168404Spjd	avl_insert(t, zn, where);
162168404Spjd
163168404Spjd	return (0);
164168404Spjd}
165168404Spjd
166168404Spjdstatic zil_header_t *
167168404Spjdzil_header_in_syncing_context(zilog_t *zilog)
168168404Spjd{
169168404Spjd	return ((zil_header_t *)zilog->zl_header);
170168404Spjd}
171168404Spjd
172168404Spjdstatic void
173168404Spjdzil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
174168404Spjd{
175168404Spjd	zio_cksum_t *zc = &bp->blk_cksum;
176168404Spjd
177168404Spjd	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
178168404Spjd	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
179168404Spjd	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
180168404Spjd	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
181168404Spjd}
182168404Spjd
183168404Spjd/*
184219089Spjd * Read a log block and make sure it's valid.
185168404Spjd */
186168404Spjdstatic int
187219089Spjdzil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
188219089Spjd    char **end)
189168404Spjd{
190219089Spjd	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
191219089Spjd	uint32_t aflags = ARC_WAIT;
192219089Spjd	arc_buf_t *abuf = NULL;
193168404Spjd	zbookmark_t zb;
194168404Spjd	int error;
195168404Spjd
196219089Spjd	if (zilog->zl_header->zh_claim_txg == 0)
197219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
198168404Spjd
199219089Spjd	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
200219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE;
201168404Spjd
202219089Spjd	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
203219089Spjd	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
204168404Spjd
205246666Smm	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
206219089Spjd	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
207219089Spjd
208168404Spjd	if (error == 0) {
209168404Spjd		zio_cksum_t cksum = bp->blk_cksum;
210168404Spjd
211168404Spjd		/*
212185029Spjd		 * Validate the checksummed log block.
213185029Spjd		 *
214168404Spjd		 * Sequence numbers should be... sequential.  The checksum
215168404Spjd		 * verifier for the next block should be bp's checksum plus 1.
216185029Spjd		 *
217185029Spjd		 * Also check the log chain linkage and size used.
218168404Spjd		 */
219168404Spjd		cksum.zc_word[ZIL_ZC_SEQ]++;
220168404Spjd
221219089Spjd		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
222219089Spjd			zil_chain_t *zilc = abuf->b_data;
223219089Spjd			char *lr = (char *)(zilc + 1);
224219089Spjd			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
225219089Spjd
226219089Spjd			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
227219089Spjd			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
228249195Smm				error = SET_ERROR(ECKSUM);
229219089Spjd			} else {
230219089Spjd				bcopy(lr, dst, len);
231219089Spjd				*end = (char *)dst + len;
232219089Spjd				*nbp = zilc->zc_next_blk;
233219089Spjd			}
234219089Spjd		} else {
235219089Spjd			char *lr = abuf->b_data;
236219089Spjd			uint64_t size = BP_GET_LSIZE(bp);
237219089Spjd			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
238219089Spjd
239219089Spjd			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
240219089Spjd			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
241219089Spjd			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
242249195Smm				error = SET_ERROR(ECKSUM);
243219089Spjd			} else {
244219089Spjd				bcopy(lr, dst, zilc->zc_nused);
245219089Spjd				*end = (char *)dst + zilc->zc_nused;
246219089Spjd				*nbp = zilc->zc_next_blk;
247219089Spjd			}
248185029Spjd		}
249168404Spjd
250248571Smm		VERIFY(arc_buf_remove_ref(abuf, &abuf));
251168404Spjd	}
252168404Spjd
253219089Spjd	return (error);
254219089Spjd}
255168404Spjd
256219089Spjd/*
257219089Spjd * Read a TX_WRITE log data block.
258219089Spjd */
259219089Spjdstatic int
260219089Spjdzil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
261219089Spjd{
262219089Spjd	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
263219089Spjd	const blkptr_t *bp = &lr->lr_blkptr;
264219089Spjd	uint32_t aflags = ARC_WAIT;
265219089Spjd	arc_buf_t *abuf = NULL;
266219089Spjd	zbookmark_t zb;
267219089Spjd	int error;
268219089Spjd
269219089Spjd	if (BP_IS_HOLE(bp)) {
270219089Spjd		if (wbuf != NULL)
271219089Spjd			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
272219089Spjd		return (0);
273219089Spjd	}
274219089Spjd
275219089Spjd	if (zilog->zl_header->zh_claim_txg == 0)
276219089Spjd		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
277219089Spjd
278219089Spjd	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
279219089Spjd	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
280219089Spjd
281246666Smm	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
282219089Spjd	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
283219089Spjd
284219089Spjd	if (error == 0) {
285219089Spjd		if (wbuf != NULL)
286219089Spjd			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
287219089Spjd		(void) arc_buf_remove_ref(abuf, &abuf);
288219089Spjd	}
289219089Spjd
290168404Spjd	return (error);
291168404Spjd}
292168404Spjd
293168404Spjd/*
294168404Spjd * Parse the intent log, and call parse_func for each valid record within.
295168404Spjd */
296219089Spjdint
297168404Spjdzil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
298168404Spjd    zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
299168404Spjd{
300168404Spjd	const zil_header_t *zh = zilog->zl_header;
301219089Spjd	boolean_t claimed = !!zh->zh_claim_txg;
302219089Spjd	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
303219089Spjd	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
304219089Spjd	uint64_t max_blk_seq = 0;
305219089Spjd	uint64_t max_lr_seq = 0;
306219089Spjd	uint64_t blk_count = 0;
307219089Spjd	uint64_t lr_count = 0;
308219089Spjd	blkptr_t blk, next_blk;
309168404Spjd	char *lrbuf, *lrp;
310219089Spjd	int error = 0;
311168404Spjd
312219089Spjd	/*
313219089Spjd	 * Old logs didn't record the maximum zh_claim_lr_seq.
314219089Spjd	 */
315219089Spjd	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
316219089Spjd		claim_lr_seq = UINT64_MAX;
317168404Spjd
318168404Spjd	/*
319168404Spjd	 * Starting at the block pointed to by zh_log we read the log chain.
320168404Spjd	 * For each block in the chain we strongly check that block to
321168404Spjd	 * ensure its validity.  We stop when an invalid block is found.
322168404Spjd	 * For each block pointer in the chain we call parse_blk_func().
323168404Spjd	 * For each record in each valid block we call parse_lr_func().
324168404Spjd	 * If the log has been claimed, stop if we encounter a sequence
325168404Spjd	 * number greater than the highest claimed sequence number.
326168404Spjd	 */
327219089Spjd	lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE);
328219089Spjd	zil_bp_tree_init(zilog);
329168404Spjd
330219089Spjd	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
331219089Spjd		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
332219089Spjd		int reclen;
333219089Spjd		char *end;
334219089Spjd
335219089Spjd		if (blk_seq > claim_blk_seq)
336168404Spjd			break;
337219089Spjd		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
338219089Spjd			break;
339219089Spjd		ASSERT3U(max_blk_seq, <, blk_seq);
340219089Spjd		max_blk_seq = blk_seq;
341219089Spjd		blk_count++;
342168404Spjd
343219089Spjd		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
344219089Spjd			break;
345168404Spjd
346219089Spjd		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
347248571Smm		if (error != 0)
348168404Spjd			break;
349168404Spjd
350219089Spjd		for (lrp = lrbuf; lrp < end; lrp += reclen) {
351168404Spjd			lr_t *lr = (lr_t *)lrp;
352168404Spjd			reclen = lr->lrc_reclen;
353168404Spjd			ASSERT3U(reclen, >=, sizeof (lr_t));
354219089Spjd			if (lr->lrc_seq > claim_lr_seq)
355219089Spjd				goto done;
356219089Spjd			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
357219089Spjd				goto done;
358219089Spjd			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
359219089Spjd			max_lr_seq = lr->lrc_seq;
360219089Spjd			lr_count++;
361168404Spjd		}
362168404Spjd	}
363219089Spjddone:
364219089Spjd	zilog->zl_parse_error = error;
365219089Spjd	zilog->zl_parse_blk_seq = max_blk_seq;
366219089Spjd	zilog->zl_parse_lr_seq = max_lr_seq;
367219089Spjd	zilog->zl_parse_blk_count = blk_count;
368219089Spjd	zilog->zl_parse_lr_count = lr_count;
369168404Spjd
370219089Spjd	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
371219089Spjd	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
372219089Spjd
373219089Spjd	zil_bp_tree_fini(zilog);
374219089Spjd	zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE);
375219089Spjd
376219089Spjd	return (error);
377168404Spjd}
378168404Spjd
379219089Spjdstatic int
380168404Spjdzil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
381168404Spjd{
382168404Spjd	/*
383168404Spjd	 * Claim log block if not already committed and not already claimed.
384219089Spjd	 * If tx == NULL, just verify that the block is claimable.
385168404Spjd	 */
386219089Spjd	if (bp->blk_birth < first_txg || zil_bp_tree_add(zilog, bp) != 0)
387219089Spjd		return (0);
388219089Spjd
389219089Spjd	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
390219089Spjd	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
391219089Spjd	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
392168404Spjd}
393168404Spjd
394219089Spjdstatic int
395168404Spjdzil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
396168404Spjd{
397219089Spjd	lr_write_t *lr = (lr_write_t *)lrc;
398219089Spjd	int error;
399219089Spjd
400219089Spjd	if (lrc->lrc_txtype != TX_WRITE)
401219089Spjd		return (0);
402219089Spjd
403219089Spjd	/*
404219089Spjd	 * If the block is not readable, don't claim it.  This can happen
405219089Spjd	 * in normal operation when a log block is written to disk before
406219089Spjd	 * some of the dmu_sync() blocks it points to.  In this case, the
407219089Spjd	 * transaction cannot have been committed to anyone (we would have
408219089Spjd	 * waited for all writes to be stable first), so it is semantically
409219089Spjd	 * correct to declare this the end of the log.
410219089Spjd	 */
411219089Spjd	if (lr->lr_blkptr.blk_birth >= first_txg &&
412219089Spjd	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
413219089Spjd		return (error);
414219089Spjd	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
415168404Spjd}
416168404Spjd
417168404Spjd/* ARGSUSED */
418219089Spjdstatic int
419168404Spjdzil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
420168404Spjd{
421219089Spjd	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
422219089Spjd
423219089Spjd	return (0);
424168404Spjd}
425168404Spjd
426219089Spjdstatic int
427168404Spjdzil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
428168404Spjd{
429219089Spjd	lr_write_t *lr = (lr_write_t *)lrc;
430219089Spjd	blkptr_t *bp = &lr->lr_blkptr;
431219089Spjd
432168404Spjd	/*
433168404Spjd	 * If we previously claimed it, we need to free it.
434168404Spjd	 */
435219089Spjd	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
436219089Spjd	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0)
437219089Spjd		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
438219089Spjd
439219089Spjd	return (0);
440219089Spjd}
441219089Spjd
442219089Spjdstatic lwb_t *
443219089Spjdzil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
444219089Spjd{
445219089Spjd	lwb_t *lwb;
446219089Spjd
447219089Spjd	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
448219089Spjd	lwb->lwb_zilog = zilog;
449219089Spjd	lwb->lwb_blk = *bp;
450219089Spjd	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
451219089Spjd	lwb->lwb_max_txg = txg;
452219089Spjd	lwb->lwb_zio = NULL;
453219089Spjd	lwb->lwb_tx = NULL;
454219089Spjd	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
455219089Spjd		lwb->lwb_nused = sizeof (zil_chain_t);
456219089Spjd		lwb->lwb_sz = BP_GET_LSIZE(bp);
457219089Spjd	} else {
458219089Spjd		lwb->lwb_nused = 0;
459219089Spjd		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
460168404Spjd	}
461219089Spjd
462219089Spjd	mutex_enter(&zilog->zl_lock);
463219089Spjd	list_insert_tail(&zilog->zl_lwb_list, lwb);
464219089Spjd	mutex_exit(&zilog->zl_lock);
465219089Spjd
466219089Spjd	return (lwb);
467168404Spjd}
468168404Spjd
469168404Spjd/*
470239620Smm * Called when we create in-memory log transactions so that we know
471239620Smm * to cleanup the itxs at the end of spa_sync().
472239620Smm */
473239620Smmvoid
474239620Smmzilog_dirty(zilog_t *zilog, uint64_t txg)
475239620Smm{
476239620Smm	dsl_pool_t *dp = zilog->zl_dmu_pool;
477239620Smm	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
478239620Smm
479239620Smm	if (dsl_dataset_is_snapshot(ds))
480239620Smm		panic("dirtying snapshot!");
481239620Smm
482248571Smm	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
483239620Smm		/* up the hold count until we can be written out */
484239620Smm		dmu_buf_add_ref(ds->ds_dbuf, zilog);
485239620Smm	}
486239620Smm}
487239620Smm
488239620Smmboolean_t
489239620Smmzilog_is_dirty(zilog_t *zilog)
490239620Smm{
491239620Smm	dsl_pool_t *dp = zilog->zl_dmu_pool;
492239620Smm
493239620Smm	for (int t = 0; t < TXG_SIZE; t++) {
494239620Smm		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
495239620Smm			return (B_TRUE);
496239620Smm	}
497239620Smm	return (B_FALSE);
498239620Smm}
499239620Smm
500239620Smm/*
501168404Spjd * Create an on-disk intent log.
502168404Spjd */
503219089Spjdstatic lwb_t *
504168404Spjdzil_create(zilog_t *zilog)
505168404Spjd{
506168404Spjd	const zil_header_t *zh = zilog->zl_header;
507219089Spjd	lwb_t *lwb = NULL;
508168404Spjd	uint64_t txg = 0;
509168404Spjd	dmu_tx_t *tx = NULL;
510168404Spjd	blkptr_t blk;
511168404Spjd	int error = 0;
512168404Spjd
513168404Spjd	/*
514168404Spjd	 * Wait for any previous destroy to complete.
515168404Spjd	 */
516168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
517168404Spjd
518168404Spjd	ASSERT(zh->zh_claim_txg == 0);
519168404Spjd	ASSERT(zh->zh_replay_seq == 0);
520168404Spjd
521168404Spjd	blk = zh->zh_log;
522168404Spjd
523168404Spjd	/*
524219089Spjd	 * Allocate an initial log block if:
525219089Spjd	 *    - there isn't one already
526219089Spjd	 *    - the existing block is the wrong endianess
527168404Spjd	 */
528207908Smm	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
529168404Spjd		tx = dmu_tx_create(zilog->zl_os);
530219089Spjd		VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
531168404Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
532168404Spjd		txg = dmu_tx_get_txg(tx);
533168404Spjd
534207908Smm		if (!BP_IS_HOLE(&blk)) {
535219089Spjd			zio_free_zil(zilog->zl_spa, txg, &blk);
536207908Smm			BP_ZERO(&blk);
537207908Smm		}
538207908Smm
539219089Spjd		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
540219089Spjd		    ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
541168404Spjd
542168404Spjd		if (error == 0)
543168404Spjd			zil_init_log_chain(zilog, &blk);
544168404Spjd	}
545168404Spjd
546168404Spjd	/*
547168404Spjd	 * Allocate a log write buffer (lwb) for the first log block.
548168404Spjd	 */
549219089Spjd	if (error == 0)
550219089Spjd		lwb = zil_alloc_lwb(zilog, &blk, txg);
551168404Spjd
552168404Spjd	/*
553168404Spjd	 * If we just allocated the first log block, commit our transaction
554168404Spjd	 * and wait for zil_sync() to stuff the block poiner into zh_log.
555168404Spjd	 * (zh is part of the MOS, so we cannot modify it in open context.)
556168404Spjd	 */
557168404Spjd	if (tx != NULL) {
558168404Spjd		dmu_tx_commit(tx);
559168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
560168404Spjd	}
561168404Spjd
562168404Spjd	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
563219089Spjd
564219089Spjd	return (lwb);
565168404Spjd}
566168404Spjd
567168404Spjd/*
568168404Spjd * In one tx, free all log blocks and clear the log header.
569168404Spjd * If keep_first is set, then we're replaying a log with no content.
570168404Spjd * We want to keep the first block, however, so that the first
571168404Spjd * synchronous transaction doesn't require a txg_wait_synced()
572168404Spjd * in zil_create().  We don't need to txg_wait_synced() here either
573168404Spjd * when keep_first is set, because both zil_create() and zil_destroy()
574168404Spjd * will wait for any in-progress destroys to complete.
575168404Spjd */
576168404Spjdvoid
577168404Spjdzil_destroy(zilog_t *zilog, boolean_t keep_first)
578168404Spjd{
579168404Spjd	const zil_header_t *zh = zilog->zl_header;
580168404Spjd	lwb_t *lwb;
581168404Spjd	dmu_tx_t *tx;
582168404Spjd	uint64_t txg;
583168404Spjd
584168404Spjd	/*
585168404Spjd	 * Wait for any previous destroy to complete.
586168404Spjd	 */
587168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
588168404Spjd
589219089Spjd	zilog->zl_old_header = *zh;		/* debugging aid */
590219089Spjd
591168404Spjd	if (BP_IS_HOLE(&zh->zh_log))
592168404Spjd		return;
593168404Spjd
594168404Spjd	tx = dmu_tx_create(zilog->zl_os);
595219089Spjd	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
596168404Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
597168404Spjd	txg = dmu_tx_get_txg(tx);
598168404Spjd
599168404Spjd	mutex_enter(&zilog->zl_lock);
600168404Spjd
601168404Spjd	ASSERT3U(zilog->zl_destroy_txg, <, txg);
602168404Spjd	zilog->zl_destroy_txg = txg;
603168404Spjd	zilog->zl_keep_first = keep_first;
604168404Spjd
605168404Spjd	if (!list_is_empty(&zilog->zl_lwb_list)) {
606168404Spjd		ASSERT(zh->zh_claim_txg == 0);
607224526Smm		VERIFY(!keep_first);
608168404Spjd		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
609168404Spjd			list_remove(&zilog->zl_lwb_list, lwb);
610168404Spjd			if (lwb->lwb_buf != NULL)
611168404Spjd				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
612219089Spjd			zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
613168404Spjd			kmem_cache_free(zil_lwb_cache, lwb);
614168404Spjd		}
615219089Spjd	} else if (!keep_first) {
616239620Smm		zil_destroy_sync(zilog, tx);
617168404Spjd	}
618168404Spjd	mutex_exit(&zilog->zl_lock);
619168404Spjd
620168404Spjd	dmu_tx_commit(tx);
621185029Spjd}
622168404Spjd
623239620Smmvoid
624239620Smmzil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
625239620Smm{
626239620Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
627239620Smm	(void) zil_parse(zilog, zil_free_log_block,
628239620Smm	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
629239620Smm}
630239620Smm
631168404Spjdint
632219089Spjdzil_claim(const char *osname, void *txarg)
633168404Spjd{
634168404Spjd	dmu_tx_t *tx = txarg;
635168404Spjd	uint64_t first_txg = dmu_tx_get_txg(tx);
636168404Spjd	zilog_t *zilog;
637168404Spjd	zil_header_t *zh;
638168404Spjd	objset_t *os;
639168404Spjd	int error;
640168404Spjd
641248571Smm	error = dmu_objset_own(osname, DMU_OST_ANY, B_FALSE, FTAG, &os);
642248571Smm	if (error != 0) {
643185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
644168404Spjd		return (0);
645168404Spjd	}
646168404Spjd
647168404Spjd	zilog = dmu_objset_zil(os);
648168404Spjd	zh = zil_header_in_syncing_context(zilog);
649168404Spjd
650219089Spjd	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
651213197Smm		if (!BP_IS_HOLE(&zh->zh_log))
652219089Spjd			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
653213197Smm		BP_ZERO(&zh->zh_log);
654213197Smm		dsl_dataset_dirty(dmu_objset_ds(os), tx);
655248571Smm		dmu_objset_disown(os, FTAG);
656219089Spjd		return (0);
657213197Smm	}
658213197Smm
659168404Spjd	/*
660168404Spjd	 * Claim all log blocks if we haven't already done so, and remember
661168404Spjd	 * the highest claimed sequence number.  This ensures that if we can
662168404Spjd	 * read only part of the log now (e.g. due to a missing device),
663168404Spjd	 * but we can read the entire log later, we will not try to replay
664168404Spjd	 * or destroy beyond the last block we successfully claimed.
665168404Spjd	 */
666168404Spjd	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
667168404Spjd	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
668219089Spjd		(void) zil_parse(zilog, zil_claim_log_block,
669219089Spjd		    zil_claim_log_record, tx, first_txg);
670168404Spjd		zh->zh_claim_txg = first_txg;
671219089Spjd		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
672219089Spjd		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
673219089Spjd		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
674219089Spjd			zh->zh_flags |= ZIL_REPLAY_NEEDED;
675219089Spjd		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
676168404Spjd		dsl_dataset_dirty(dmu_objset_ds(os), tx);
677168404Spjd	}
678168404Spjd
679168404Spjd	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
680248571Smm	dmu_objset_disown(os, FTAG);
681168404Spjd	return (0);
682168404Spjd}
683168404Spjd
684185029Spjd/*
685185029Spjd * Check the log by walking the log chain.
686185029Spjd * Checksum errors are ok as they indicate the end of the chain.
687185029Spjd * Any other error (no device or read failure) returns an error.
688185029Spjd */
689185029Spjdint
690219089Spjdzil_check_log_chain(const char *osname, void *tx)
691168404Spjd{
692185029Spjd	zilog_t *zilog;
693185029Spjd	objset_t *os;
694219089Spjd	blkptr_t *bp;
695185029Spjd	int error;
696168404Spjd
697219089Spjd	ASSERT(tx == NULL);
698219089Spjd
699219089Spjd	error = dmu_objset_hold(osname, FTAG, &os);
700248571Smm	if (error != 0) {
701185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
702185029Spjd		return (0);
703185029Spjd	}
704168404Spjd
705185029Spjd	zilog = dmu_objset_zil(os);
706219089Spjd	bp = (blkptr_t *)&zilog->zl_header->zh_log;
707219089Spjd
708219089Spjd	/*
709219089Spjd	 * Check the first block and determine if it's on a log device
710219089Spjd	 * which may have been removed or faulted prior to loading this
711219089Spjd	 * pool.  If so, there's no point in checking the rest of the log
712219089Spjd	 * as its content should have already been synced to the pool.
713219089Spjd	 */
714219089Spjd	if (!BP_IS_HOLE(bp)) {
715219089Spjd		vdev_t *vd;
716219089Spjd		boolean_t valid = B_TRUE;
717219089Spjd
718219089Spjd		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
719219089Spjd		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
720219089Spjd		if (vd->vdev_islog && vdev_is_dead(vd))
721219089Spjd			valid = vdev_log_state_valid(vd);
722219089Spjd		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
723219089Spjd
724219089Spjd		if (!valid) {
725219089Spjd			dmu_objset_rele(os, FTAG);
726219089Spjd			return (0);
727219089Spjd		}
728168404Spjd	}
729185029Spjd
730219089Spjd	/*
731219089Spjd	 * Because tx == NULL, zil_claim_log_block() will not actually claim
732219089Spjd	 * any blocks, but just determine whether it is possible to do so.
733219089Spjd	 * In addition to checking the log chain, zil_claim_log_block()
734219089Spjd	 * will invoke zio_claim() with a done func of spa_claim_notify(),
735219089Spjd	 * which will update spa_max_claim_txg.  See spa_load() for details.
736219089Spjd	 */
737219089Spjd	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
738219089Spjd	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
739219089Spjd
740219089Spjd	dmu_objset_rele(os, FTAG);
741219089Spjd
742219089Spjd	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
743168404Spjd}
744168404Spjd
745185029Spjdstatic int
746185029Spjdzil_vdev_compare(const void *x1, const void *x2)
747185029Spjd{
748219089Spjd	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
749219089Spjd	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
750185029Spjd
751185029Spjd	if (v1 < v2)
752185029Spjd		return (-1);
753185029Spjd	if (v1 > v2)
754185029Spjd		return (1);
755185029Spjd
756185029Spjd	return (0);
757185029Spjd}
758185029Spjd
759168404Spjdvoid
760219089Spjdzil_add_block(zilog_t *zilog, const blkptr_t *bp)
761168404Spjd{
762185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
763185029Spjd	avl_index_t where;
764185029Spjd	zil_vdev_node_t *zv, zvsearch;
765185029Spjd	int ndvas = BP_GET_NDVAS(bp);
766185029Spjd	int i;
767168404Spjd
768185029Spjd	if (zfs_nocacheflush)
769185029Spjd		return;
770168404Spjd
771185029Spjd	ASSERT(zilog->zl_writer);
772168404Spjd
773185029Spjd	/*
774185029Spjd	 * Even though we're zl_writer, we still need a lock because the
775185029Spjd	 * zl_get_data() callbacks may have dmu_sync() done callbacks
776185029Spjd	 * that will run concurrently.
777185029Spjd	 */
778185029Spjd	mutex_enter(&zilog->zl_vdev_lock);
779185029Spjd	for (i = 0; i < ndvas; i++) {
780185029Spjd		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
781185029Spjd		if (avl_find(t, &zvsearch, &where) == NULL) {
782185029Spjd			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
783185029Spjd			zv->zv_vdev = zvsearch.zv_vdev;
784185029Spjd			avl_insert(t, zv, where);
785185029Spjd		}
786185029Spjd	}
787185029Spjd	mutex_exit(&zilog->zl_vdev_lock);
788168404Spjd}
789168404Spjd
790219089Spjdstatic void
791168404Spjdzil_flush_vdevs(zilog_t *zilog)
792168404Spjd{
793168404Spjd	spa_t *spa = zilog->zl_spa;
794185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
795185029Spjd	void *cookie = NULL;
796185029Spjd	zil_vdev_node_t *zv;
797185029Spjd	zio_t *zio;
798168404Spjd
799168404Spjd	ASSERT(zilog->zl_writer);
800168404Spjd
801185029Spjd	/*
802185029Spjd	 * We don't need zl_vdev_lock here because we're the zl_writer,
803185029Spjd	 * and all zl_get_data() callbacks are done.
804185029Spjd	 */
805185029Spjd	if (avl_numnodes(t) == 0)
806185029Spjd		return;
807185029Spjd
808185029Spjd	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
809185029Spjd
810185029Spjd	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
811185029Spjd
812185029Spjd	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
813185029Spjd		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
814185029Spjd		if (vd != NULL)
815185029Spjd			zio_flush(zio, vd);
816185029Spjd		kmem_free(zv, sizeof (*zv));
817168404Spjd	}
818168404Spjd
819168404Spjd	/*
820168404Spjd	 * Wait for all the flushes to complete.  Not all devices actually
821168404Spjd	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
822168404Spjd	 */
823185029Spjd	(void) zio_wait(zio);
824185029Spjd
825185029Spjd	spa_config_exit(spa, SCL_STATE, FTAG);
826168404Spjd}
827168404Spjd
828168404Spjd/*
829168404Spjd * Function called when a log block write completes
830168404Spjd */
831168404Spjdstatic void
832168404Spjdzil_lwb_write_done(zio_t *zio)
833168404Spjd{
834168404Spjd	lwb_t *lwb = zio->io_private;
835168404Spjd	zilog_t *zilog = lwb->lwb_zilog;
836219089Spjd	dmu_tx_t *tx = lwb->lwb_tx;
837168404Spjd
838185029Spjd	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
839185029Spjd	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
840185029Spjd	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
841185029Spjd	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
842185029Spjd	ASSERT(!BP_IS_GANG(zio->io_bp));
843185029Spjd	ASSERT(!BP_IS_HOLE(zio->io_bp));
844185029Spjd	ASSERT(zio->io_bp->blk_fill == 0);
845185029Spjd
846168404Spjd	/*
847209962Smm	 * Ensure the lwb buffer pointer is cleared before releasing
848209962Smm	 * the txg. If we have had an allocation failure and
849209962Smm	 * the txg is waiting to sync then we want want zil_sync()
850209962Smm	 * to remove the lwb so that it's not picked up as the next new
851209962Smm	 * one in zil_commit_writer(). zil_sync() will only remove
852209962Smm	 * the lwb if lwb_buf is null.
853168404Spjd	 */
854168404Spjd	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
855168404Spjd	mutex_enter(&zilog->zl_lock);
856168404Spjd	lwb->lwb_buf = NULL;
857219089Spjd	lwb->lwb_tx = NULL;
858219089Spjd	mutex_exit(&zilog->zl_lock);
859209962Smm
860209962Smm	/*
861209962Smm	 * Now that we've written this log block, we have a stable pointer
862209962Smm	 * to the next block in the chain, so it's OK to let the txg in
863219089Spjd	 * which we allocated the next block sync.
864209962Smm	 */
865219089Spjd	dmu_tx_commit(tx);
866168404Spjd}
867168404Spjd
868168404Spjd/*
869168404Spjd * Initialize the io for a log block.
870168404Spjd */
871168404Spjdstatic void
872168404Spjdzil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
873168404Spjd{
874168404Spjd	zbookmark_t zb;
875168404Spjd
876219089Spjd	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
877219089Spjd	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
878219089Spjd	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
879168404Spjd
880168404Spjd	if (zilog->zl_root_zio == NULL) {
881168404Spjd		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
882168404Spjd		    ZIO_FLAG_CANFAIL);
883168404Spjd	}
884168404Spjd	if (lwb->lwb_zio == NULL) {
885168404Spjd		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
886219089Spjd		    0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
887258632Savg		    zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE,
888219089Spjd		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
889168404Spjd	}
890168404Spjd}
891168404Spjd
892168404Spjd/*
893219089Spjd * Define a limited set of intent log block sizes.
894251631Sdelphij *
895219089Spjd * These must be a multiple of 4KB. Note only the amount used (again
896219089Spjd * aligned to 4KB) actually gets written. However, we can't always just
897219089Spjd * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.
898219089Spjd */
899219089Spjduint64_t zil_block_buckets[] = {
900219089Spjd    4096,		/* non TX_WRITE */
901219089Spjd    8192+4096,		/* data base */
902219089Spjd    32*1024 + 4096, 	/* NFS writes */
903219089Spjd    UINT64_MAX
904219089Spjd};
905219089Spjd
906219089Spjd/*
907219089Spjd * Use the slog as long as the logbias is 'latency' and the current commit size
908219089Spjd * is less than the limit or the total list size is less than 2X the limit.
909219089Spjd * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
910219089Spjd */
911219089Spjduint64_t zil_slog_limit = 1024 * 1024;
912219089Spjd#define	USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
913219089Spjd	(((zilog)->zl_cur_used < zil_slog_limit) || \
914219089Spjd	((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
915219089Spjd
916219089Spjd/*
917168404Spjd * Start a log block write and advance to the next log block.
918168404Spjd * Calls are serialized.
919168404Spjd */
920168404Spjdstatic lwb_t *
921168404Spjdzil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
922168404Spjd{
923219089Spjd	lwb_t *nlwb = NULL;
924219089Spjd	zil_chain_t *zilc;
925168404Spjd	spa_t *spa = zilog->zl_spa;
926219089Spjd	blkptr_t *bp;
927219089Spjd	dmu_tx_t *tx;
928168404Spjd	uint64_t txg;
929219089Spjd	uint64_t zil_blksz, wsz;
930219089Spjd	int i, error;
931168404Spjd
932219089Spjd	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
933219089Spjd		zilc = (zil_chain_t *)lwb->lwb_buf;
934219089Spjd		bp = &zilc->zc_next_blk;
935219089Spjd	} else {
936219089Spjd		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
937219089Spjd		bp = &zilc->zc_next_blk;
938219089Spjd	}
939168404Spjd
940219089Spjd	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
941219089Spjd
942168404Spjd	/*
943168404Spjd	 * Allocate the next block and save its address in this block
944168404Spjd	 * before writing it in order to establish the log chain.
945168404Spjd	 * Note that if the allocation of nlwb synced before we wrote
946168404Spjd	 * the block that points at it (lwb), we'd leak it if we crashed.
947219089Spjd	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
948219089Spjd	 * We dirty the dataset to ensure that zil_sync() will be called
949219089Spjd	 * to clean up in the event of allocation failure or I/O failure.
950168404Spjd	 */
951219089Spjd	tx = dmu_tx_create(zilog->zl_os);
952219089Spjd	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
953219089Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
954219089Spjd	txg = dmu_tx_get_txg(tx);
955168404Spjd
956219089Spjd	lwb->lwb_tx = tx;
957219089Spjd
958168404Spjd	/*
959219089Spjd	 * Log blocks are pre-allocated. Here we select the size of the next
960219089Spjd	 * block, based on size used in the last block.
961219089Spjd	 * - first find the smallest bucket that will fit the block from a
962219089Spjd	 *   limited set of block sizes. This is because it's faster to write
963219089Spjd	 *   blocks allocated from the same metaslab as they are adjacent or
964219089Spjd	 *   close.
965219089Spjd	 * - next find the maximum from the new suggested size and an array of
966219089Spjd	 *   previous sizes. This lessens a picket fence effect of wrongly
967219089Spjd	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
968219089Spjd	 *   requests.
969219089Spjd	 *
970219089Spjd	 * Note we only write what is used, but we can't just allocate
971219089Spjd	 * the maximum block size because we can exhaust the available
972219089Spjd	 * pool log space.
973168404Spjd	 */
974219089Spjd	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
975219089Spjd	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
976219089Spjd		continue;
977219089Spjd	zil_blksz = zil_block_buckets[i];
978219089Spjd	if (zil_blksz == UINT64_MAX)
979219089Spjd		zil_blksz = SPA_MAXBLOCKSIZE;
980219089Spjd	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
981219089Spjd	for (i = 0; i < ZIL_PREV_BLKS; i++)
982219089Spjd		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
983219089Spjd	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
984168404Spjd
985168404Spjd	BP_ZERO(bp);
986168404Spjd	/* pass the old blkptr in order to spread log blocks across devs */
987219089Spjd	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
988219089Spjd	    USE_SLOG(zilog));
989248571Smm	if (error == 0) {
990219089Spjd		ASSERT3U(bp->blk_birth, ==, txg);
991219089Spjd		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
992219089Spjd		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
993168404Spjd
994168404Spjd		/*
995219089Spjd		 * Allocate a new log write buffer (lwb).
996168404Spjd		 */
997219089Spjd		nlwb = zil_alloc_lwb(zilog, bp, txg);
998168404Spjd
999219089Spjd		/* Record the block for later vdev flushing */
1000219089Spjd		zil_add_block(zilog, &lwb->lwb_blk);
1001168404Spjd	}
1002168404Spjd
1003219089Spjd	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1004219089Spjd		/* For Slim ZIL only write what is used. */
1005219089Spjd		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1006219089Spjd		ASSERT3U(wsz, <=, lwb->lwb_sz);
1007219089Spjd		zio_shrink(lwb->lwb_zio, wsz);
1008168404Spjd
1009219089Spjd	} else {
1010219089Spjd		wsz = lwb->lwb_sz;
1011219089Spjd	}
1012168404Spjd
1013219089Spjd	zilc->zc_pad = 0;
1014219089Spjd	zilc->zc_nused = lwb->lwb_nused;
1015219089Spjd	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1016168404Spjd
1017168404Spjd	/*
1018219089Spjd	 * clear unused data for security
1019168404Spjd	 */
1020219089Spjd	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1021168404Spjd
1022219089Spjd	zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
1023168404Spjd
1024168404Spjd	/*
1025219089Spjd	 * If there was an allocation failure then nlwb will be null which
1026219089Spjd	 * forces a txg_wait_synced().
1027168404Spjd	 */
1028168404Spjd	return (nlwb);
1029168404Spjd}
1030168404Spjd
1031168404Spjdstatic lwb_t *
1032168404Spjdzil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1033168404Spjd{
1034168404Spjd	lr_t *lrc = &itx->itx_lr; /* common log record */
1035219089Spjd	lr_write_t *lrw = (lr_write_t *)lrc;
1036219089Spjd	char *lr_buf;
1037168404Spjd	uint64_t txg = lrc->lrc_txg;
1038168404Spjd	uint64_t reclen = lrc->lrc_reclen;
1039219089Spjd	uint64_t dlen = 0;
1040168404Spjd
1041168404Spjd	if (lwb == NULL)
1042168404Spjd		return (NULL);
1043219089Spjd
1044168404Spjd	ASSERT(lwb->lwb_buf != NULL);
1045239620Smm	ASSERT(zilog_is_dirty(zilog) ||
1046239620Smm	    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1047168404Spjd
1048168404Spjd	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
1049168404Spjd		dlen = P2ROUNDUP_TYPED(
1050219089Spjd		    lrw->lr_length, sizeof (uint64_t), uint64_t);
1051168404Spjd
1052168404Spjd	zilog->zl_cur_used += (reclen + dlen);
1053168404Spjd
1054168404Spjd	zil_lwb_write_init(zilog, lwb);
1055168404Spjd
1056168404Spjd	/*
1057168404Spjd	 * If this record won't fit in the current log block, start a new one.
1058168404Spjd	 */
1059219089Spjd	if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1060168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1061168404Spjd		if (lwb == NULL)
1062168404Spjd			return (NULL);
1063168404Spjd		zil_lwb_write_init(zilog, lwb);
1064219089Spjd		ASSERT(LWB_EMPTY(lwb));
1065219089Spjd		if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
1066168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
1067168404Spjd			return (lwb);
1068168404Spjd		}
1069168404Spjd	}
1070168404Spjd
1071219089Spjd	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1072219089Spjd	bcopy(lrc, lr_buf, reclen);
1073219089Spjd	lrc = (lr_t *)lr_buf;
1074219089Spjd	lrw = (lr_write_t *)lrc;
1075168404Spjd
1076168404Spjd	/*
1077168404Spjd	 * If it's a write, fetch the data or get its blkptr as appropriate.
1078168404Spjd	 */
1079168404Spjd	if (lrc->lrc_txtype == TX_WRITE) {
1080168404Spjd		if (txg > spa_freeze_txg(zilog->zl_spa))
1081168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
1082168404Spjd		if (itx->itx_wr_state != WR_COPIED) {
1083168404Spjd			char *dbuf;
1084168404Spjd			int error;
1085168404Spjd
1086168404Spjd			if (dlen) {
1087168404Spjd				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
1088219089Spjd				dbuf = lr_buf + reclen;
1089219089Spjd				lrw->lr_common.lrc_reclen += dlen;
1090168404Spjd			} else {
1091168404Spjd				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1092168404Spjd				dbuf = NULL;
1093168404Spjd			}
1094168404Spjd			error = zilog->zl_get_data(
1095219089Spjd			    itx->itx_private, lrw, dbuf, lwb->lwb_zio);
1096214378Smm			if (error == EIO) {
1097214378Smm				txg_wait_synced(zilog->zl_dmu_pool, txg);
1098214378Smm				return (lwb);
1099214378Smm			}
1100248571Smm			if (error != 0) {
1101168404Spjd				ASSERT(error == ENOENT || error == EEXIST ||
1102168404Spjd				    error == EALREADY);
1103168404Spjd				return (lwb);
1104168404Spjd			}
1105168404Spjd		}
1106168404Spjd	}
1107168404Spjd
1108219089Spjd	/*
1109219089Spjd	 * We're actually making an entry, so update lrc_seq to be the
1110219089Spjd	 * log record sequence number.  Note that this is generally not
1111219089Spjd	 * equal to the itx sequence number because not all transactions
1112219089Spjd	 * are synchronous, and sometimes spa_sync() gets there first.
1113219089Spjd	 */
1114219089Spjd	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1115168404Spjd	lwb->lwb_nused += reclen + dlen;
1116168404Spjd	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1117219089Spjd	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1118240415Smm	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1119168404Spjd
1120168404Spjd	return (lwb);
1121168404Spjd}
1122168404Spjd
1123168404Spjditx_t *
1124185029Spjdzil_itx_create(uint64_t txtype, size_t lrsize)
1125168404Spjd{
1126168404Spjd	itx_t *itx;
1127168404Spjd
1128168404Spjd	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1129168404Spjd
1130168404Spjd	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1131168404Spjd	itx->itx_lr.lrc_txtype = txtype;
1132168404Spjd	itx->itx_lr.lrc_reclen = lrsize;
1133185029Spjd	itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
1134168404Spjd	itx->itx_lr.lrc_seq = 0;	/* defensive */
1135219089Spjd	itx->itx_sync = B_TRUE;		/* default is synchronous */
1136168404Spjd
1137168404Spjd	return (itx);
1138168404Spjd}
1139168404Spjd
1140219089Spjdvoid
1141219089Spjdzil_itx_destroy(itx_t *itx)
1142168404Spjd{
1143219089Spjd	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1144219089Spjd}
1145168404Spjd
1146219089Spjd/*
1147219089Spjd * Free up the sync and async itxs. The itxs_t has already been detached
1148219089Spjd * so no locks are needed.
1149219089Spjd */
1150219089Spjdstatic void
1151219089Spjdzil_itxg_clean(itxs_t *itxs)
1152219089Spjd{
1153219089Spjd	itx_t *itx;
1154219089Spjd	list_t *list;
1155219089Spjd	avl_tree_t *t;
1156219089Spjd	void *cookie;
1157219089Spjd	itx_async_node_t *ian;
1158168404Spjd
1159219089Spjd	list = &itxs->i_sync_list;
1160219089Spjd	while ((itx = list_head(list)) != NULL) {
1161219089Spjd		list_remove(list, itx);
1162219089Spjd		kmem_free(itx, offsetof(itx_t, itx_lr) +
1163219089Spjd		    itx->itx_lr.lrc_reclen);
1164219089Spjd	}
1165168404Spjd
1166219089Spjd	cookie = NULL;
1167219089Spjd	t = &itxs->i_async_tree;
1168219089Spjd	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1169219089Spjd		list = &ian->ia_list;
1170219089Spjd		while ((itx = list_head(list)) != NULL) {
1171219089Spjd			list_remove(list, itx);
1172219089Spjd			kmem_free(itx, offsetof(itx_t, itx_lr) +
1173219089Spjd			    itx->itx_lr.lrc_reclen);
1174219089Spjd		}
1175219089Spjd		list_destroy(list);
1176219089Spjd		kmem_free(ian, sizeof (itx_async_node_t));
1177219089Spjd	}
1178219089Spjd	avl_destroy(t);
1179219089Spjd
1180219089Spjd	kmem_free(itxs, sizeof (itxs_t));
1181168404Spjd}
1182168404Spjd
1183219089Spjdstatic int
1184219089Spjdzil_aitx_compare(const void *x1, const void *x2)
1185219089Spjd{
1186219089Spjd	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1187219089Spjd	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1188219089Spjd
1189219089Spjd	if (o1 < o2)
1190219089Spjd		return (-1);
1191219089Spjd	if (o1 > o2)
1192219089Spjd		return (1);
1193219089Spjd
1194219089Spjd	return (0);
1195219089Spjd}
1196219089Spjd
1197168404Spjd/*
1198219089Spjd * Remove all async itx with the given oid.
1199168404Spjd */
1200168404Spjdstatic void
1201219089Spjdzil_remove_async(zilog_t *zilog, uint64_t oid)
1202168404Spjd{
1203219089Spjd	uint64_t otxg, txg;
1204219089Spjd	itx_async_node_t *ian;
1205219089Spjd	avl_tree_t *t;
1206219089Spjd	avl_index_t where;
1207168404Spjd	list_t clean_list;
1208168404Spjd	itx_t *itx;
1209168404Spjd
1210219089Spjd	ASSERT(oid != 0);
1211168404Spjd	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1212168404Spjd
1213219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1214219089Spjd		otxg = ZILTEST_TXG;
1215219089Spjd	else
1216219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1217219089Spjd
1218219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1219219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1220219089Spjd
1221219089Spjd		mutex_enter(&itxg->itxg_lock);
1222219089Spjd		if (itxg->itxg_txg != txg) {
1223219089Spjd			mutex_exit(&itxg->itxg_lock);
1224219089Spjd			continue;
1225219089Spjd		}
1226219089Spjd
1227219089Spjd		/*
1228219089Spjd		 * Locate the object node and append its list.
1229219089Spjd		 */
1230219089Spjd		t = &itxg->itxg_itxs->i_async_tree;
1231219089Spjd		ian = avl_find(t, &oid, &where);
1232219089Spjd		if (ian != NULL)
1233219089Spjd			list_move_tail(&clean_list, &ian->ia_list);
1234219089Spjd		mutex_exit(&itxg->itxg_lock);
1235168404Spjd	}
1236219089Spjd	while ((itx = list_head(&clean_list)) != NULL) {
1237219089Spjd		list_remove(&clean_list, itx);
1238219089Spjd		kmem_free(itx, offsetof(itx_t, itx_lr) +
1239219089Spjd		    itx->itx_lr.lrc_reclen);
1240219089Spjd	}
1241219089Spjd	list_destroy(&clean_list);
1242219089Spjd}
1243168404Spjd
1244219089Spjdvoid
1245219089Spjdzil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1246219089Spjd{
1247219089Spjd	uint64_t txg;
1248219089Spjd	itxg_t *itxg;
1249219089Spjd	itxs_t *itxs, *clean = NULL;
1250219089Spjd
1251168404Spjd	/*
1252219089Spjd	 * Object ids can be re-instantiated in the next txg so
1253219089Spjd	 * remove any async transactions to avoid future leaks.
1254219089Spjd	 * This can happen if a fsync occurs on the re-instantiated
1255219089Spjd	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1256219089Spjd	 * the new file data and flushes a write record for the old object.
1257168404Spjd	 */
1258219089Spjd	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1259219089Spjd		zil_remove_async(zilog, itx->itx_oid);
1260219089Spjd
1261219089Spjd	/*
1262219089Spjd	 * Ensure the data of a renamed file is committed before the rename.
1263219089Spjd	 */
1264219089Spjd	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1265219089Spjd		zil_async_to_sync(zilog, itx->itx_oid);
1266219089Spjd
1267239620Smm	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1268219089Spjd		txg = ZILTEST_TXG;
1269219089Spjd	else
1270219089Spjd		txg = dmu_tx_get_txg(tx);
1271219089Spjd
1272219089Spjd	itxg = &zilog->zl_itxg[txg & TXG_MASK];
1273219089Spjd	mutex_enter(&itxg->itxg_lock);
1274219089Spjd	itxs = itxg->itxg_itxs;
1275219089Spjd	if (itxg->itxg_txg != txg) {
1276219089Spjd		if (itxs != NULL) {
1277219089Spjd			/*
1278219089Spjd			 * The zil_clean callback hasn't got around to cleaning
1279219089Spjd			 * this itxg. Save the itxs for release below.
1280219089Spjd			 * This should be rare.
1281219089Spjd			 */
1282219089Spjd			atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1283219089Spjd			itxg->itxg_sod = 0;
1284219089Spjd			clean = itxg->itxg_itxs;
1285219089Spjd		}
1286219089Spjd		ASSERT(itxg->itxg_sod == 0);
1287219089Spjd		itxg->itxg_txg = txg;
1288219089Spjd		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1289219089Spjd
1290219089Spjd		list_create(&itxs->i_sync_list, sizeof (itx_t),
1291219089Spjd		    offsetof(itx_t, itx_node));
1292219089Spjd		avl_create(&itxs->i_async_tree, zil_aitx_compare,
1293219089Spjd		    sizeof (itx_async_node_t),
1294219089Spjd		    offsetof(itx_async_node_t, ia_node));
1295168404Spjd	}
1296219089Spjd	if (itx->itx_sync) {
1297219089Spjd		list_insert_tail(&itxs->i_sync_list, itx);
1298219089Spjd		atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1299219089Spjd		itxg->itxg_sod += itx->itx_sod;
1300219089Spjd	} else {
1301219089Spjd		avl_tree_t *t = &itxs->i_async_tree;
1302219089Spjd		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1303219089Spjd		itx_async_node_t *ian;
1304219089Spjd		avl_index_t where;
1305168404Spjd
1306219089Spjd		ian = avl_find(t, &foid, &where);
1307219089Spjd		if (ian == NULL) {
1308219089Spjd			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1309219089Spjd			list_create(&ian->ia_list, sizeof (itx_t),
1310219089Spjd			    offsetof(itx_t, itx_node));
1311219089Spjd			ian->ia_foid = foid;
1312219089Spjd			avl_insert(t, ian, where);
1313219089Spjd		}
1314219089Spjd		list_insert_tail(&ian->ia_list, itx);
1315168404Spjd	}
1316219089Spjd
1317219089Spjd	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1318239620Smm	zilog_dirty(zilog, txg);
1319219089Spjd	mutex_exit(&itxg->itxg_lock);
1320219089Spjd
1321219089Spjd	/* Release the old itxs now we've dropped the lock */
1322219089Spjd	if (clean != NULL)
1323219089Spjd		zil_itxg_clean(clean);
1324168404Spjd}
1325168404Spjd
1326168404Spjd/*
1327168404Spjd * If there are any in-memory intent log transactions which have now been
1328239620Smm * synced then start up a taskq to free them. We should only do this after we
1329239620Smm * have written out the uberblocks (i.e. txg has been comitted) so that
1330239620Smm * don't inadvertently clean out in-memory log records that would be required
1331239620Smm * by zil_commit().
1332168404Spjd */
1333168404Spjdvoid
1334219089Spjdzil_clean(zilog_t *zilog, uint64_t synced_txg)
1335168404Spjd{
1336219089Spjd	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1337219089Spjd	itxs_t *clean_me;
1338168404Spjd
1339219089Spjd	mutex_enter(&itxg->itxg_lock);
1340219089Spjd	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1341219089Spjd		mutex_exit(&itxg->itxg_lock);
1342219089Spjd		return;
1343168404Spjd	}
1344219089Spjd	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1345219089Spjd	ASSERT(itxg->itxg_txg != 0);
1346219089Spjd	ASSERT(zilog->zl_clean_taskq != NULL);
1347219089Spjd	atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1348219089Spjd	itxg->itxg_sod = 0;
1349219089Spjd	clean_me = itxg->itxg_itxs;
1350219089Spjd	itxg->itxg_itxs = NULL;
1351219089Spjd	itxg->itxg_txg = 0;
1352219089Spjd	mutex_exit(&itxg->itxg_lock);
1353219089Spjd	/*
1354219089Spjd	 * Preferably start a task queue to free up the old itxs but
1355219089Spjd	 * if taskq_dispatch can't allocate resources to do that then
1356219089Spjd	 * free it in-line. This should be rare. Note, using TQ_SLEEP
1357219089Spjd	 * created a bad performance problem.
1358219089Spjd	 */
1359219089Spjd	if (taskq_dispatch(zilog->zl_clean_taskq,
1360219089Spjd	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
1361219089Spjd		zil_itxg_clean(clean_me);
1362168404Spjd}
1363168404Spjd
1364219089Spjd/*
1365219089Spjd * Get the list of itxs to commit into zl_itx_commit_list.
1366219089Spjd */
1367185029Spjdstatic void
1368219089Spjdzil_get_commit_list(zilog_t *zilog)
1369168404Spjd{
1370219089Spjd	uint64_t otxg, txg;
1371219089Spjd	list_t *commit_list = &zilog->zl_itx_commit_list;
1372219089Spjd	uint64_t push_sod = 0;
1373219089Spjd
1374219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1375219089Spjd		otxg = ZILTEST_TXG;
1376219089Spjd	else
1377219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1378219089Spjd
1379219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1380219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1381219089Spjd
1382219089Spjd		mutex_enter(&itxg->itxg_lock);
1383219089Spjd		if (itxg->itxg_txg != txg) {
1384219089Spjd			mutex_exit(&itxg->itxg_lock);
1385219089Spjd			continue;
1386219089Spjd		}
1387219089Spjd
1388219089Spjd		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1389219089Spjd		push_sod += itxg->itxg_sod;
1390219089Spjd		itxg->itxg_sod = 0;
1391219089Spjd
1392219089Spjd		mutex_exit(&itxg->itxg_lock);
1393219089Spjd	}
1394219089Spjd	atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1395219089Spjd}
1396219089Spjd
1397219089Spjd/*
1398219089Spjd * Move the async itxs for a specified object to commit into sync lists.
1399219089Spjd */
1400219089Spjdstatic void
1401219089Spjdzil_async_to_sync(zilog_t *zilog, uint64_t foid)
1402219089Spjd{
1403219089Spjd	uint64_t otxg, txg;
1404219089Spjd	itx_async_node_t *ian;
1405219089Spjd	avl_tree_t *t;
1406219089Spjd	avl_index_t where;
1407219089Spjd
1408219089Spjd	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1409219089Spjd		otxg = ZILTEST_TXG;
1410219089Spjd	else
1411219089Spjd		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1412219089Spjd
1413219089Spjd	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1414219089Spjd		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1415219089Spjd
1416219089Spjd		mutex_enter(&itxg->itxg_lock);
1417219089Spjd		if (itxg->itxg_txg != txg) {
1418219089Spjd			mutex_exit(&itxg->itxg_lock);
1419219089Spjd			continue;
1420219089Spjd		}
1421219089Spjd
1422219089Spjd		/*
1423219089Spjd		 * If a foid is specified then find that node and append its
1424219089Spjd		 * list. Otherwise walk the tree appending all the lists
1425219089Spjd		 * to the sync list. We add to the end rather than the
1426219089Spjd		 * beginning to ensure the create has happened.
1427219089Spjd		 */
1428219089Spjd		t = &itxg->itxg_itxs->i_async_tree;
1429219089Spjd		if (foid != 0) {
1430219089Spjd			ian = avl_find(t, &foid, &where);
1431219089Spjd			if (ian != NULL) {
1432219089Spjd				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1433219089Spjd				    &ian->ia_list);
1434219089Spjd			}
1435219089Spjd		} else {
1436219089Spjd			void *cookie = NULL;
1437219089Spjd
1438219089Spjd			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1439219089Spjd				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1440219089Spjd				    &ian->ia_list);
1441219089Spjd				list_destroy(&ian->ia_list);
1442219089Spjd				kmem_free(ian, sizeof (itx_async_node_t));
1443219089Spjd			}
1444219089Spjd		}
1445219089Spjd		mutex_exit(&itxg->itxg_lock);
1446219089Spjd	}
1447219089Spjd}
1448219089Spjd
1449219089Spjdstatic void
1450219089Spjdzil_commit_writer(zilog_t *zilog)
1451219089Spjd{
1452168404Spjd	uint64_t txg;
1453219089Spjd	itx_t *itx;
1454168404Spjd	lwb_t *lwb;
1455219089Spjd	spa_t *spa = zilog->zl_spa;
1456219089Spjd	int error = 0;
1457168404Spjd
1458185029Spjd	ASSERT(zilog->zl_root_zio == NULL);
1459168404Spjd
1460219089Spjd	mutex_exit(&zilog->zl_lock);
1461219089Spjd
1462219089Spjd	zil_get_commit_list(zilog);
1463219089Spjd
1464219089Spjd	/*
1465219089Spjd	 * Return if there's nothing to commit before we dirty the fs by
1466219089Spjd	 * calling zil_create().
1467219089Spjd	 */
1468219089Spjd	if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1469219089Spjd		mutex_enter(&zilog->zl_lock);
1470219089Spjd		return;
1471219089Spjd	}
1472219089Spjd
1473168404Spjd	if (zilog->zl_suspend) {
1474168404Spjd		lwb = NULL;
1475168404Spjd	} else {
1476168404Spjd		lwb = list_tail(&zilog->zl_lwb_list);
1477219089Spjd		if (lwb == NULL)
1478219089Spjd			lwb = zil_create(zilog);
1479168404Spjd	}
1480168404Spjd
1481168404Spjd	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1482219089Spjd	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1483168404Spjd		txg = itx->itx_lr.lrc_txg;
1484168404Spjd		ASSERT(txg);
1485168404Spjd
1486219089Spjd		if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1487168404Spjd			lwb = zil_lwb_commit(zilog, itx, lwb);
1488219089Spjd		list_remove(&zilog->zl_itx_commit_list, itx);
1489168404Spjd		kmem_free(itx, offsetof(itx_t, itx_lr)
1490168404Spjd		    + itx->itx_lr.lrc_reclen);
1491168404Spjd	}
1492168404Spjd	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1493168404Spjd
1494168404Spjd	/* write the last block out */
1495168404Spjd	if (lwb != NULL && lwb->lwb_zio != NULL)
1496168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1497168404Spjd
1498168404Spjd	zilog->zl_cur_used = 0;
1499168404Spjd
1500168404Spjd	/*
1501168404Spjd	 * Wait if necessary for the log blocks to be on stable storage.
1502168404Spjd	 */
1503168404Spjd	if (zilog->zl_root_zio) {
1504219089Spjd		error = zio_wait(zilog->zl_root_zio);
1505185029Spjd		zilog->zl_root_zio = NULL;
1506185029Spjd		zil_flush_vdevs(zilog);
1507168404Spjd	}
1508168404Spjd
1509219089Spjd	if (error || lwb == NULL)
1510168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, 0);
1511168404Spjd
1512168404Spjd	mutex_enter(&zilog->zl_lock);
1513168404Spjd
1514219089Spjd	/*
1515219089Spjd	 * Remember the highest committed log sequence number for ztest.
1516219089Spjd	 * We only update this value when all the log writes succeeded,
1517219089Spjd	 * because ztest wants to ASSERT that it got the whole log chain.
1518219089Spjd	 */
1519219089Spjd	if (error == 0 && lwb != NULL)
1520219089Spjd		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1521168404Spjd}
1522168404Spjd
1523168404Spjd/*
1524219089Spjd * Commit zfs transactions to stable storage.
1525168404Spjd * If foid is 0 push out all transactions, otherwise push only those
1526219089Spjd * for that object or might reference that object.
1527219089Spjd *
1528219089Spjd * itxs are committed in batches. In a heavily stressed zil there will be
1529219089Spjd * a commit writer thread who is writing out a bunch of itxs to the log
1530219089Spjd * for a set of committing threads (cthreads) in the same batch as the writer.
1531219089Spjd * Those cthreads are all waiting on the same cv for that batch.
1532219089Spjd *
1533219089Spjd * There will also be a different and growing batch of threads that are
1534219089Spjd * waiting to commit (qthreads). When the committing batch completes
1535219089Spjd * a transition occurs such that the cthreads exit and the qthreads become
1536219089Spjd * cthreads. One of the new cthreads becomes the writer thread for the
1537219089Spjd * batch. Any new threads arriving become new qthreads.
1538219089Spjd *
1539219089Spjd * Only 2 condition variables are needed and there's no transition
1540219089Spjd * between the two cvs needed. They just flip-flop between qthreads
1541219089Spjd * and cthreads.
1542219089Spjd *
1543219089Spjd * Using this scheme we can efficiently wakeup up only those threads
1544219089Spjd * that have been committed.
1545168404Spjd */
1546168404Spjdvoid
1547219089Spjdzil_commit(zilog_t *zilog, uint64_t foid)
1548168404Spjd{
1549219089Spjd	uint64_t mybatch;
1550219089Spjd
1551219089Spjd	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1552168404Spjd		return;
1553168404Spjd
1554219089Spjd	/* move the async itxs for the foid to the sync queues */
1555219089Spjd	zil_async_to_sync(zilog, foid);
1556219089Spjd
1557168404Spjd	mutex_enter(&zilog->zl_lock);
1558219089Spjd	mybatch = zilog->zl_next_batch;
1559168404Spjd	while (zilog->zl_writer) {
1560219089Spjd		cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1561219089Spjd		if (mybatch <= zilog->zl_com_batch) {
1562168404Spjd			mutex_exit(&zilog->zl_lock);
1563168404Spjd			return;
1564168404Spjd		}
1565168404Spjd	}
1566219089Spjd
1567219089Spjd	zilog->zl_next_batch++;
1568219089Spjd	zilog->zl_writer = B_TRUE;
1569219089Spjd	zil_commit_writer(zilog);
1570219089Spjd	zilog->zl_com_batch = mybatch;
1571219089Spjd	zilog->zl_writer = B_FALSE;
1572168404Spjd	mutex_exit(&zilog->zl_lock);
1573219089Spjd
1574219089Spjd	/* wake up one thread to become the next writer */
1575219089Spjd	cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1576219089Spjd
1577219089Spjd	/* wake up all threads waiting for this batch to be committed */
1578219089Spjd	cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1579168404Spjd}
1580168404Spjd
1581168404Spjd/*
1582168404Spjd * Called in syncing context to free committed log blocks and update log header.
1583168404Spjd */
1584168404Spjdvoid
1585168404Spjdzil_sync(zilog_t *zilog, dmu_tx_t *tx)
1586168404Spjd{
1587168404Spjd	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1588168404Spjd	uint64_t txg = dmu_tx_get_txg(tx);
1589168404Spjd	spa_t *spa = zilog->zl_spa;
1590219089Spjd	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1591168404Spjd	lwb_t *lwb;
1592168404Spjd
1593209962Smm	/*
1594209962Smm	 * We don't zero out zl_destroy_txg, so make sure we don't try
1595209962Smm	 * to destroy it twice.
1596209962Smm	 */
1597209962Smm	if (spa_sync_pass(spa) != 1)
1598209962Smm		return;
1599209962Smm
1600168404Spjd	mutex_enter(&zilog->zl_lock);
1601168404Spjd
1602168404Spjd	ASSERT(zilog->zl_stop_sync == 0);
1603168404Spjd
1604219089Spjd	if (*replayed_seq != 0) {
1605219089Spjd		ASSERT(zh->zh_replay_seq < *replayed_seq);
1606219089Spjd		zh->zh_replay_seq = *replayed_seq;
1607219089Spjd		*replayed_seq = 0;
1608219089Spjd	}
1609168404Spjd
1610168404Spjd	if (zilog->zl_destroy_txg == txg) {
1611168404Spjd		blkptr_t blk = zh->zh_log;
1612168404Spjd
1613168404Spjd		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1614168404Spjd
1615168404Spjd		bzero(zh, sizeof (zil_header_t));
1616209962Smm		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1617168404Spjd
1618168404Spjd		if (zilog->zl_keep_first) {
1619168404Spjd			/*
1620168404Spjd			 * If this block was part of log chain that couldn't
1621168404Spjd			 * be claimed because a device was missing during
1622168404Spjd			 * zil_claim(), but that device later returns,
1623168404Spjd			 * then this block could erroneously appear valid.
1624168404Spjd			 * To guard against this, assign a new GUID to the new
1625168404Spjd			 * log chain so it doesn't matter what blk points to.
1626168404Spjd			 */
1627168404Spjd			zil_init_log_chain(zilog, &blk);
1628168404Spjd			zh->zh_log = blk;
1629168404Spjd		}
1630168404Spjd	}
1631168404Spjd
1632213197Smm	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1633168404Spjd		zh->zh_log = lwb->lwb_blk;
1634168404Spjd		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1635168404Spjd			break;
1636168404Spjd		list_remove(&zilog->zl_lwb_list, lwb);
1637219089Spjd		zio_free_zil(spa, txg, &lwb->lwb_blk);
1638168404Spjd		kmem_cache_free(zil_lwb_cache, lwb);
1639168404Spjd
1640168404Spjd		/*
1641168404Spjd		 * If we don't have anything left in the lwb list then
1642168404Spjd		 * we've had an allocation failure and we need to zero
1643168404Spjd		 * out the zil_header blkptr so that we don't end
1644168404Spjd		 * up freeing the same block twice.
1645168404Spjd		 */
1646168404Spjd		if (list_head(&zilog->zl_lwb_list) == NULL)
1647168404Spjd			BP_ZERO(&zh->zh_log);
1648168404Spjd	}
1649168404Spjd	mutex_exit(&zilog->zl_lock);
1650168404Spjd}
1651168404Spjd
1652168404Spjdvoid
1653168404Spjdzil_init(void)
1654168404Spjd{
1655168404Spjd	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1656168404Spjd	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1657168404Spjd}
1658168404Spjd
1659168404Spjdvoid
1660168404Spjdzil_fini(void)
1661168404Spjd{
1662168404Spjd	kmem_cache_destroy(zil_lwb_cache);
1663168404Spjd}
1664168404Spjd
1665219089Spjdvoid
1666219089Spjdzil_set_sync(zilog_t *zilog, uint64_t sync)
1667219089Spjd{
1668219089Spjd	zilog->zl_sync = sync;
1669219089Spjd}
1670219089Spjd
1671219089Spjdvoid
1672219089Spjdzil_set_logbias(zilog_t *zilog, uint64_t logbias)
1673219089Spjd{
1674219089Spjd	zilog->zl_logbias = logbias;
1675219089Spjd}
1676219089Spjd
1677168404Spjdzilog_t *
1678168404Spjdzil_alloc(objset_t *os, zil_header_t *zh_phys)
1679168404Spjd{
1680168404Spjd	zilog_t *zilog;
1681168404Spjd
1682168404Spjd	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1683168404Spjd
1684168404Spjd	zilog->zl_header = zh_phys;
1685168404Spjd	zilog->zl_os = os;
1686168404Spjd	zilog->zl_spa = dmu_objset_spa(os);
1687168404Spjd	zilog->zl_dmu_pool = dmu_objset_pool(os);
1688168404Spjd	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1689219089Spjd	zilog->zl_logbias = dmu_objset_logbias(os);
1690219089Spjd	zilog->zl_sync = dmu_objset_syncprop(os);
1691219089Spjd	zilog->zl_next_batch = 1;
1692168404Spjd
1693168404Spjd	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1694168404Spjd
1695219089Spjd	for (int i = 0; i < TXG_SIZE; i++) {
1696219089Spjd		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1697219089Spjd		    MUTEX_DEFAULT, NULL);
1698219089Spjd	}
1699168404Spjd
1700168404Spjd	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1701168404Spjd	    offsetof(lwb_t, lwb_node));
1702168404Spjd
1703219089Spjd	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1704219089Spjd	    offsetof(itx_t, itx_node));
1705219089Spjd
1706185029Spjd	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1707168404Spjd
1708185029Spjd	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1709185029Spjd	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1710185029Spjd
1711185029Spjd	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1712185029Spjd	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1713219089Spjd	cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1714219089Spjd	cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1715185029Spjd
1716168404Spjd	return (zilog);
1717168404Spjd}
1718168404Spjd
1719168404Spjdvoid
1720168404Spjdzil_free(zilog_t *zilog)
1721168404Spjd{
1722168404Spjd	zilog->zl_stop_sync = 1;
1723168404Spjd
1724248571Smm	ASSERT0(zilog->zl_suspend);
1725248571Smm	ASSERT0(zilog->zl_suspending);
1726248571Smm
1727224526Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1728168404Spjd	list_destroy(&zilog->zl_lwb_list);
1729168404Spjd
1730185029Spjd	avl_destroy(&zilog->zl_vdev_tree);
1731185029Spjd	mutex_destroy(&zilog->zl_vdev_lock);
1732168404Spjd
1733219089Spjd	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1734219089Spjd	list_destroy(&zilog->zl_itx_commit_list);
1735219089Spjd
1736219089Spjd	for (int i = 0; i < TXG_SIZE; i++) {
1737219089Spjd		/*
1738219089Spjd		 * It's possible for an itx to be generated that doesn't dirty
1739219089Spjd		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1740219089Spjd		 * callback to remove the entry. We remove those here.
1741219089Spjd		 *
1742219089Spjd		 * Also free up the ziltest itxs.
1743219089Spjd		 */
1744219089Spjd		if (zilog->zl_itxg[i].itxg_itxs)
1745219089Spjd			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1746219089Spjd		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1747219089Spjd	}
1748219089Spjd
1749168404Spjd	mutex_destroy(&zilog->zl_lock);
1750168404Spjd
1751185029Spjd	cv_destroy(&zilog->zl_cv_writer);
1752185029Spjd	cv_destroy(&zilog->zl_cv_suspend);
1753219089Spjd	cv_destroy(&zilog->zl_cv_batch[0]);
1754219089Spjd	cv_destroy(&zilog->zl_cv_batch[1]);
1755185029Spjd
1756168404Spjd	kmem_free(zilog, sizeof (zilog_t));
1757168404Spjd}
1758168404Spjd
1759168404Spjd/*
1760168404Spjd * Open an intent log.
1761168404Spjd */
1762168404Spjdzilog_t *
1763168404Spjdzil_open(objset_t *os, zil_get_data_t *get_data)
1764168404Spjd{
1765168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
1766168404Spjd
1767224526Smm	ASSERT(zilog->zl_clean_taskq == NULL);
1768224526Smm	ASSERT(zilog->zl_get_data == NULL);
1769224526Smm	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1770224526Smm
1771168404Spjd	zilog->zl_get_data = get_data;
1772168404Spjd	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1773168404Spjd	    2, 2, TASKQ_PREPOPULATE);
1774168404Spjd
1775168404Spjd	return (zilog);
1776168404Spjd}
1777168404Spjd
1778168404Spjd/*
1779168404Spjd * Close an intent log.
1780168404Spjd */
1781168404Spjdvoid
1782168404Spjdzil_close(zilog_t *zilog)
1783168404Spjd{
1784224526Smm	lwb_t *lwb;
1785219089Spjd	uint64_t txg = 0;
1786219089Spjd
1787219089Spjd	zil_commit(zilog, 0); /* commit all itx */
1788219089Spjd
1789168404Spjd	/*
1790219089Spjd	 * The lwb_max_txg for the stubby lwb will reflect the last activity
1791219089Spjd	 * for the zil.  After a txg_wait_synced() on the txg we know all the
1792219089Spjd	 * callbacks have occurred that may clean the zil.  Only then can we
1793219089Spjd	 * destroy the zl_clean_taskq.
1794168404Spjd	 */
1795219089Spjd	mutex_enter(&zilog->zl_lock);
1796224526Smm	lwb = list_tail(&zilog->zl_lwb_list);
1797224526Smm	if (lwb != NULL)
1798224526Smm		txg = lwb->lwb_max_txg;
1799219089Spjd	mutex_exit(&zilog->zl_lock);
1800219089Spjd	if (txg)
1801168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
1802239620Smm	ASSERT(!zilog_is_dirty(zilog));
1803168404Spjd
1804168404Spjd	taskq_destroy(zilog->zl_clean_taskq);
1805168404Spjd	zilog->zl_clean_taskq = NULL;
1806168404Spjd	zilog->zl_get_data = NULL;
1807224526Smm
1808224526Smm	/*
1809224526Smm	 * We should have only one LWB left on the list; remove it now.
1810224526Smm	 */
1811224526Smm	mutex_enter(&zilog->zl_lock);
1812224526Smm	lwb = list_head(&zilog->zl_lwb_list);
1813224526Smm	if (lwb != NULL) {
1814224526Smm		ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1815224526Smm		list_remove(&zilog->zl_lwb_list, lwb);
1816224526Smm		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1817224526Smm		kmem_cache_free(zil_lwb_cache, lwb);
1818224526Smm	}
1819224526Smm	mutex_exit(&zilog->zl_lock);
1820168404Spjd}
1821168404Spjd
1822248571Smmstatic char *suspend_tag = "zil suspending";
1823248571Smm
1824168404Spjd/*
1825168404Spjd * Suspend an intent log.  While in suspended mode, we still honor
1826168404Spjd * synchronous semantics, but we rely on txg_wait_synced() to do it.
1827248571Smm * On old version pools, we suspend the log briefly when taking a
1828248571Smm * snapshot so that it will have an empty intent log.
1829248571Smm *
1830248571Smm * Long holds are not really intended to be used the way we do here --
1831248571Smm * held for such a short time.  A concurrent caller of dsl_dataset_long_held()
1832248571Smm * could fail.  Therefore we take pains to only put a long hold if it is
1833248571Smm * actually necessary.  Fortunately, it will only be necessary if the
1834248571Smm * objset is currently mounted (or the ZVOL equivalent).  In that case it
1835248571Smm * will already have a long hold, so we are not really making things any worse.
1836248571Smm *
1837248571Smm * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
1838248571Smm * zvol_state_t), and use their mechanism to prevent their hold from being
1839248571Smm * dropped (e.g. VFS_HOLD()).  However, that would be even more pain for
1840248571Smm * very little gain.
1841248571Smm *
1842248571Smm * if cookiep == NULL, this does both the suspend & resume.
1843248571Smm * Otherwise, it returns with the dataset "long held", and the cookie
1844248571Smm * should be passed into zil_resume().
1845168404Spjd */
1846168404Spjdint
1847248571Smmzil_suspend(const char *osname, void **cookiep)
1848168404Spjd{
1849248571Smm	objset_t *os;
1850248571Smm	zilog_t *zilog;
1851248571Smm	const zil_header_t *zh;
1852248571Smm	int error;
1853168404Spjd
1854248571Smm	error = dmu_objset_hold(osname, suspend_tag, &os);
1855248571Smm	if (error != 0)
1856248571Smm		return (error);
1857248571Smm	zilog = dmu_objset_zil(os);
1858248571Smm
1859168404Spjd	mutex_enter(&zilog->zl_lock);
1860248571Smm	zh = zilog->zl_header;
1861248571Smm
1862200724Sdelphij	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1863168404Spjd		mutex_exit(&zilog->zl_lock);
1864248571Smm		dmu_objset_rele(os, suspend_tag);
1865249195Smm		return (SET_ERROR(EBUSY));
1866168404Spjd	}
1867248571Smm
1868248571Smm	/*
1869248571Smm	 * Don't put a long hold in the cases where we can avoid it.  This
1870248571Smm	 * is when there is no cookie so we are doing a suspend & resume
1871248571Smm	 * (i.e. called from zil_vdev_offline()), and there's nothing to do
1872248571Smm	 * for the suspend because it's already suspended, or there's no ZIL.
1873248571Smm	 */
1874248571Smm	if (cookiep == NULL && !zilog->zl_suspending &&
1875248571Smm	    (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1876248571Smm		mutex_exit(&zilog->zl_lock);
1877248571Smm		dmu_objset_rele(os, suspend_tag);
1878248571Smm		return (0);
1879248571Smm	}
1880248571Smm
1881248571Smm	dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
1882248571Smm	dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
1883248571Smm
1884248571Smm	zilog->zl_suspend++;
1885248571Smm
1886248571Smm	if (zilog->zl_suspend > 1) {
1887168404Spjd		/*
1888248571Smm		 * Someone else is already suspending it.
1889168404Spjd		 * Just wait for them to finish.
1890168404Spjd		 */
1891248571Smm
1892168404Spjd		while (zilog->zl_suspending)
1893168404Spjd			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1894168404Spjd		mutex_exit(&zilog->zl_lock);
1895248571Smm
1896248571Smm		if (cookiep == NULL)
1897248571Smm			zil_resume(os);
1898248571Smm		else
1899248571Smm			*cookiep = os;
1900168404Spjd		return (0);
1901168404Spjd	}
1902248571Smm
1903248571Smm	/*
1904248571Smm	 * If there is no pointer to an on-disk block, this ZIL must not
1905248571Smm	 * be active (e.g. filesystem not mounted), so there's nothing
1906248571Smm	 * to clean up.
1907248571Smm	 */
1908248571Smm	if (BP_IS_HOLE(&zh->zh_log)) {
1909248571Smm		ASSERT(cookiep != NULL); /* fast path already handled */
1910248571Smm
1911248571Smm		*cookiep = os;
1912248571Smm		mutex_exit(&zilog->zl_lock);
1913248571Smm		return (0);
1914248571Smm	}
1915248571Smm
1916168404Spjd	zilog->zl_suspending = B_TRUE;
1917168404Spjd	mutex_exit(&zilog->zl_lock);
1918168404Spjd
1919219089Spjd	zil_commit(zilog, 0);
1920168404Spjd
1921168404Spjd	zil_destroy(zilog, B_FALSE);
1922168404Spjd
1923168404Spjd	mutex_enter(&zilog->zl_lock);
1924168404Spjd	zilog->zl_suspending = B_FALSE;
1925168404Spjd	cv_broadcast(&zilog->zl_cv_suspend);
1926168404Spjd	mutex_exit(&zilog->zl_lock);
1927168404Spjd
1928248571Smm	if (cookiep == NULL)
1929248571Smm		zil_resume(os);
1930248571Smm	else
1931248571Smm		*cookiep = os;
1932168404Spjd	return (0);
1933168404Spjd}
1934168404Spjd
1935168404Spjdvoid
1936248571Smmzil_resume(void *cookie)
1937168404Spjd{
1938248571Smm	objset_t *os = cookie;
1939248571Smm	zilog_t *zilog = dmu_objset_zil(os);
1940248571Smm
1941168404Spjd	mutex_enter(&zilog->zl_lock);
1942168404Spjd	ASSERT(zilog->zl_suspend != 0);
1943168404Spjd	zilog->zl_suspend--;
1944168404Spjd	mutex_exit(&zilog->zl_lock);
1945248571Smm	dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
1946248571Smm	dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
1947168404Spjd}
1948168404Spjd
1949219089Spjdtypedef struct zil_replay_arg {
1950219089Spjd	zil_replay_func_t **zr_replay;
1951219089Spjd	void		*zr_arg;
1952219089Spjd	boolean_t	zr_byteswap;
1953219089Spjd	char		*zr_lr;
1954219089Spjd} zil_replay_arg_t;
1955219089Spjd
1956219089Spjdstatic int
1957219089Spjdzil_replay_error(zilog_t *zilog, lr_t *lr, int error)
1958209962Smm{
1959219089Spjd	char name[MAXNAMELEN];
1960209962Smm
1961219089Spjd	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
1962209962Smm
1963219089Spjd	dmu_objset_name(zilog->zl_os, name);
1964209962Smm
1965219089Spjd	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1966219089Spjd	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
1967219089Spjd	    (u_longlong_t)lr->lrc_seq,
1968219089Spjd	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
1969219089Spjd	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
1970219089Spjd
1971219089Spjd	return (error);
1972209962Smm}
1973209962Smm
1974219089Spjdstatic int
1975168404Spjdzil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1976168404Spjd{
1977168404Spjd	zil_replay_arg_t *zr = zra;
1978168404Spjd	const zil_header_t *zh = zilog->zl_header;
1979168404Spjd	uint64_t reclen = lr->lrc_reclen;
1980168404Spjd	uint64_t txtype = lr->lrc_txtype;
1981219089Spjd	int error = 0;
1982168404Spjd
1983219089Spjd	zilog->zl_replaying_seq = lr->lrc_seq;
1984168404Spjd
1985219089Spjd	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1986219089Spjd		return (0);
1987219089Spjd
1988168404Spjd	if (lr->lrc_txg < claim_txg)		/* already committed */
1989219089Spjd		return (0);
1990168404Spjd
1991185029Spjd	/* Strip case-insensitive bit, still present in log record */
1992185029Spjd	txtype &= ~TX_CI;
1993185029Spjd
1994219089Spjd	if (txtype == 0 || txtype >= TX_MAX_TYPE)
1995219089Spjd		return (zil_replay_error(zilog, lr, EINVAL));
1996219089Spjd
1997219089Spjd	/*
1998219089Spjd	 * If this record type can be logged out of order, the object
1999219089Spjd	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
2000219089Spjd	 */
2001219089Spjd	if (TX_OOO(txtype)) {
2002219089Spjd		error = dmu_object_info(zilog->zl_os,
2003219089Spjd		    ((lr_ooo_t *)lr)->lr_foid, NULL);
2004219089Spjd		if (error == ENOENT || error == EEXIST)
2005219089Spjd			return (0);
2006209962Smm	}
2007209962Smm
2008168404Spjd	/*
2009168404Spjd	 * Make a copy of the data so we can revise and extend it.
2010168404Spjd	 */
2011219089Spjd	bcopy(lr, zr->zr_lr, reclen);
2012168404Spjd
2013168404Spjd	/*
2014219089Spjd	 * If this is a TX_WRITE with a blkptr, suck in the data.
2015219089Spjd	 */
2016219089Spjd	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
2017219089Spjd		error = zil_read_log_data(zilog, (lr_write_t *)lr,
2018219089Spjd		    zr->zr_lr + reclen);
2019248571Smm		if (error != 0)
2020219089Spjd			return (zil_replay_error(zilog, lr, error));
2021219089Spjd	}
2022219089Spjd
2023219089Spjd	/*
2024168404Spjd	 * The log block containing this lr may have been byteswapped
2025168404Spjd	 * so that we can easily examine common fields like lrc_txtype.
2026219089Spjd	 * However, the log is a mix of different record types, and only the
2027168404Spjd	 * replay vectors know how to byteswap their records.  Therefore, if
2028168404Spjd	 * the lr was byteswapped, undo it before invoking the replay vector.
2029168404Spjd	 */
2030168404Spjd	if (zr->zr_byteswap)
2031219089Spjd		byteswap_uint64_array(zr->zr_lr, reclen);
2032168404Spjd
2033168404Spjd	/*
2034168404Spjd	 * We must now do two things atomically: replay this log record,
2035209962Smm	 * and update the log header sequence number to reflect the fact that
2036209962Smm	 * we did so. At the end of each replay function the sequence number
2037209962Smm	 * is updated if we are in replay mode.
2038168404Spjd	 */
2039219089Spjd	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
2040248571Smm	if (error != 0) {
2041168404Spjd		/*
2042168404Spjd		 * The DMU's dnode layer doesn't see removes until the txg
2043168404Spjd		 * commits, so a subsequent claim can spuriously fail with
2044209962Smm		 * EEXIST. So if we receive any error we try syncing out
2045219089Spjd		 * any removes then retry the transaction.  Note that we
2046219089Spjd		 * specify B_FALSE for byteswap now, so we don't do it twice.
2047168404Spjd		 */
2048219089Spjd		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2049219089Spjd		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
2050248571Smm		if (error != 0)
2051219089Spjd			return (zil_replay_error(zilog, lr, error));
2052168404Spjd	}
2053219089Spjd	return (0);
2054168404Spjd}
2055168404Spjd
2056168404Spjd/* ARGSUSED */
2057219089Spjdstatic int
2058168404Spjdzil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2059168404Spjd{
2060168404Spjd	zilog->zl_replay_blks++;
2061219089Spjd
2062219089Spjd	return (0);
2063168404Spjd}
2064168404Spjd
2065168404Spjd/*
2066168404Spjd * If this dataset has a non-empty intent log, replay it and destroy it.
2067168404Spjd */
2068168404Spjdvoid
2069209962Smmzil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
2070168404Spjd{
2071168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
2072168404Spjd	const zil_header_t *zh = zilog->zl_header;
2073168404Spjd	zil_replay_arg_t zr;
2074168404Spjd
2075200724Sdelphij	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
2076168404Spjd		zil_destroy(zilog, B_TRUE);
2077168404Spjd		return;
2078168404Spjd	}
2079168404Spjd	//printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name);
2080168404Spjd
2081168404Spjd	zr.zr_replay = replay_func;
2082168404Spjd	zr.zr_arg = arg;
2083168404Spjd	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
2084219089Spjd	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
2085168404Spjd
2086168404Spjd	/*
2087168404Spjd	 * Wait for in-progress removes to sync before starting replay.
2088168404Spjd	 */
2089168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, 0);
2090168404Spjd
2091209962Smm	zilog->zl_replay = B_TRUE;
2092219089Spjd	zilog->zl_replay_time = ddi_get_lbolt();
2093168404Spjd	ASSERT(zilog->zl_replay_blks == 0);
2094168404Spjd	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2095168404Spjd	    zh->zh_claim_txg);
2096219089Spjd	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
2097168404Spjd
2098168404Spjd	zil_destroy(zilog, B_FALSE);
2099185029Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2100209962Smm	zilog->zl_replay = B_FALSE;
2101168404Spjd	//printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name);
2102168404Spjd}
2103168404Spjd
2104219089Spjdboolean_t
2105219089Spjdzil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2106168404Spjd{
2107219089Spjd	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2108219089Spjd		return (B_TRUE);
2109168404Spjd
2110219089Spjd	if (zilog->zl_replay) {
2111219089Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2112219089Spjd		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2113219089Spjd		    zilog->zl_replaying_seq;
2114219089Spjd		return (B_TRUE);
2115168404Spjd	}
2116168404Spjd
2117219089Spjd	return (B_FALSE);
2118168404Spjd}
2119213197Smm
2120213197Smm/* ARGSUSED */
2121213197Smmint
2122219089Spjdzil_vdev_offline(const char *osname, void *arg)
2123213197Smm{
2124213197Smm	int error;
2125213197Smm
2126248571Smm	error = zil_suspend(osname, NULL);
2127248571Smm	if (error != 0)
2128249195Smm		return (SET_ERROR(EEXIST));
2129248571Smm	return (0);
2130213197Smm}
2131