zil.c revision 200724
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22185029Spjd * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23168404Spjd * Use is subject to license terms.
24168404Spjd */
25168404Spjd
26168404Spjd#include <sys/zfs_context.h>
27168404Spjd#include <sys/spa.h>
28168404Spjd#include <sys/dmu.h>
29168404Spjd#include <sys/zap.h>
30168404Spjd#include <sys/arc.h>
31168404Spjd#include <sys/stat.h>
32168404Spjd#include <sys/resource.h>
33168404Spjd#include <sys/zil.h>
34168404Spjd#include <sys/zil_impl.h>
35168404Spjd#include <sys/dsl_dataset.h>
36168404Spjd#include <sys/vdev.h>
37168404Spjd#include <sys/dmu_tx.h>
38168404Spjd
39168404Spjd/*
40168404Spjd * The zfs intent log (ZIL) saves transaction records of system calls
41168404Spjd * that change the file system in memory with enough information
42168404Spjd * to be able to replay them. These are stored in memory until
43168404Spjd * either the DMU transaction group (txg) commits them to the stable pool
44168404Spjd * and they can be discarded, or they are flushed to the stable log
45168404Spjd * (also in the pool) due to a fsync, O_DSYNC or other synchronous
46168404Spjd * requirement. In the event of a panic or power fail then those log
47168404Spjd * records (transactions) are replayed.
48168404Spjd *
49168404Spjd * There is one ZIL per file system. Its on-disk (pool) format consists
50168404Spjd * of 3 parts:
51168404Spjd *
52168404Spjd * 	- ZIL header
53168404Spjd * 	- ZIL blocks
54168404Spjd * 	- ZIL records
55168404Spjd *
56168404Spjd * A log record holds a system call transaction. Log blocks can
57168404Spjd * hold many log records and the blocks are chained together.
58168404Spjd * Each ZIL block contains a block pointer (blkptr_t) to the next
59168404Spjd * ZIL block in the chain. The ZIL header points to the first
60168404Spjd * block in the chain. Note there is not a fixed place in the pool
61168404Spjd * to hold blocks. They are dynamically allocated and freed as
62168404Spjd * needed from the blocks available. Figure X shows the ZIL structure:
63168404Spjd */
64168404Spjd
65168404Spjd/*
66168404Spjd * This global ZIL switch affects all pools
67168404Spjd */
68168404Spjdint zil_disable = 0;	/* disable intent logging */
69168404SpjdSYSCTL_DECL(_vfs_zfs);
70168404SpjdTUNABLE_INT("vfs.zfs.zil_disable", &zil_disable);
71169028SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, zil_disable, CTLFLAG_RW, &zil_disable, 0,
72168404Spjd    "Disable ZFS Intent Log (ZIL)");
73168404Spjd
74168404Spjd/*
75168404Spjd * Tunable parameter for debugging or performance analysis.  Setting
76168404Spjd * zfs_nocacheflush will cause corruption on power loss if a volatile
77168404Spjd * out-of-order write cache is enabled.
78168404Spjd */
79168404Spjdboolean_t zfs_nocacheflush = B_FALSE;
80168404SpjdTUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
81168404SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
82168404Spjd    &zfs_nocacheflush, 0, "Disable cache flush");
83168404Spjd
84168404Spjdstatic kmem_cache_t *zil_lwb_cache;
85168404Spjd
86168404Spjdstatic int
87168404Spjdzil_dva_compare(const void *x1, const void *x2)
88168404Spjd{
89168404Spjd	const dva_t *dva1 = x1;
90168404Spjd	const dva_t *dva2 = x2;
91168404Spjd
92168404Spjd	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
93168404Spjd		return (-1);
94168404Spjd	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
95168404Spjd		return (1);
96168404Spjd
97168404Spjd	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
98168404Spjd		return (-1);
99168404Spjd	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
100168404Spjd		return (1);
101168404Spjd
102168404Spjd	return (0);
103168404Spjd}
104168404Spjd
105168404Spjdstatic void
106168404Spjdzil_dva_tree_init(avl_tree_t *t)
107168404Spjd{
108168404Spjd	avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
109168404Spjd	    offsetof(zil_dva_node_t, zn_node));
110168404Spjd}
111168404Spjd
112168404Spjdstatic void
113168404Spjdzil_dva_tree_fini(avl_tree_t *t)
114168404Spjd{
115168404Spjd	zil_dva_node_t *zn;
116168404Spjd	void *cookie = NULL;
117168404Spjd
118168404Spjd	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
119168404Spjd		kmem_free(zn, sizeof (zil_dva_node_t));
120168404Spjd
121168404Spjd	avl_destroy(t);
122168404Spjd}
123168404Spjd
124168404Spjdstatic int
125168404Spjdzil_dva_tree_add(avl_tree_t *t, dva_t *dva)
126168404Spjd{
127168404Spjd	zil_dva_node_t *zn;
128168404Spjd	avl_index_t where;
129168404Spjd
130168404Spjd	if (avl_find(t, dva, &where) != NULL)
131168404Spjd		return (EEXIST);
132168404Spjd
133168404Spjd	zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
134168404Spjd	zn->zn_dva = *dva;
135168404Spjd	avl_insert(t, zn, where);
136168404Spjd
137168404Spjd	return (0);
138168404Spjd}
139168404Spjd
140168404Spjdstatic zil_header_t *
141168404Spjdzil_header_in_syncing_context(zilog_t *zilog)
142168404Spjd{
143168404Spjd	return ((zil_header_t *)zilog->zl_header);
144168404Spjd}
145168404Spjd
146168404Spjdstatic void
147168404Spjdzil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
148168404Spjd{
149168404Spjd	zio_cksum_t *zc = &bp->blk_cksum;
150168404Spjd
151168404Spjd	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
152168404Spjd	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
153168404Spjd	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
154168404Spjd	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
155168404Spjd}
156168404Spjd
157168404Spjd/*
158168404Spjd * Read a log block, make sure it's valid, and byteswap it if necessary.
159168404Spjd */
160168404Spjdstatic int
161168404Spjdzil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
162168404Spjd{
163168404Spjd	blkptr_t blk = *bp;
164168404Spjd	zbookmark_t zb;
165168404Spjd	uint32_t aflags = ARC_WAIT;
166168404Spjd	int error;
167168404Spjd
168168404Spjd	zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
169168404Spjd	zb.zb_object = 0;
170168404Spjd	zb.zb_level = -1;
171168404Spjd	zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
172168404Spjd
173168404Spjd	*abufpp = NULL;
174168404Spjd
175185029Spjd	/*
176185029Spjd	 * We shouldn't be doing any scrubbing while we're doing log
177185029Spjd	 * replay, it's OK to not lock.
178185029Spjd	 */
179185029Spjd	error = arc_read_nolock(NULL, zilog->zl_spa, &blk,
180168404Spjd	    arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
181168404Spjd	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
182168404Spjd
183168404Spjd	if (error == 0) {
184168404Spjd		char *data = (*abufpp)->b_data;
185168404Spjd		uint64_t blksz = BP_GET_LSIZE(bp);
186168404Spjd		zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
187168404Spjd		zio_cksum_t cksum = bp->blk_cksum;
188168404Spjd
189168404Spjd		/*
190185029Spjd		 * Validate the checksummed log block.
191185029Spjd		 *
192168404Spjd		 * Sequence numbers should be... sequential.  The checksum
193168404Spjd		 * verifier for the next block should be bp's checksum plus 1.
194185029Spjd		 *
195185029Spjd		 * Also check the log chain linkage and size used.
196168404Spjd		 */
197168404Spjd		cksum.zc_word[ZIL_ZC_SEQ]++;
198168404Spjd
199185029Spjd		if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum,
200185029Spjd		    sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) ||
201185029Spjd		    (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) {
202185029Spjd			error = ECKSUM;
203185029Spjd		}
204168404Spjd
205168404Spjd		if (error) {
206168404Spjd			VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
207168404Spjd			*abufpp = NULL;
208168404Spjd		}
209168404Spjd	}
210168404Spjd
211168404Spjd	dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
212168404Spjd
213168404Spjd	return (error);
214168404Spjd}
215168404Spjd
216168404Spjd/*
217168404Spjd * Parse the intent log, and call parse_func for each valid record within.
218168404Spjd * Return the highest sequence number.
219168404Spjd */
220168404Spjduint64_t
221168404Spjdzil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
222168404Spjd    zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
223168404Spjd{
224168404Spjd	const zil_header_t *zh = zilog->zl_header;
225168404Spjd	uint64_t claim_seq = zh->zh_claim_seq;
226168404Spjd	uint64_t seq = 0;
227168404Spjd	uint64_t max_seq = 0;
228168404Spjd	blkptr_t blk = zh->zh_log;
229168404Spjd	arc_buf_t *abuf;
230168404Spjd	char *lrbuf, *lrp;
231168404Spjd	zil_trailer_t *ztp;
232168404Spjd	int reclen, error;
233168404Spjd
234168404Spjd	if (BP_IS_HOLE(&blk))
235168404Spjd		return (max_seq);
236168404Spjd
237168404Spjd	/*
238168404Spjd	 * Starting at the block pointed to by zh_log we read the log chain.
239168404Spjd	 * For each block in the chain we strongly check that block to
240168404Spjd	 * ensure its validity.  We stop when an invalid block is found.
241168404Spjd	 * For each block pointer in the chain we call parse_blk_func().
242168404Spjd	 * For each record in each valid block we call parse_lr_func().
243168404Spjd	 * If the log has been claimed, stop if we encounter a sequence
244168404Spjd	 * number greater than the highest claimed sequence number.
245168404Spjd	 */
246168404Spjd	zil_dva_tree_init(&zilog->zl_dva_tree);
247168404Spjd	for (;;) {
248168404Spjd		seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
249168404Spjd
250168404Spjd		if (claim_seq != 0 && seq > claim_seq)
251168404Spjd			break;
252168404Spjd
253168404Spjd		ASSERT(max_seq < seq);
254168404Spjd		max_seq = seq;
255168404Spjd
256168404Spjd		error = zil_read_log_block(zilog, &blk, &abuf);
257168404Spjd
258168404Spjd		if (parse_blk_func != NULL)
259168404Spjd			parse_blk_func(zilog, &blk, arg, txg);
260168404Spjd
261168404Spjd		if (error)
262168404Spjd			break;
263168404Spjd
264168404Spjd		lrbuf = abuf->b_data;
265168404Spjd		ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
266168404Spjd		blk = ztp->zit_next_blk;
267168404Spjd
268168404Spjd		if (parse_lr_func == NULL) {
269168404Spjd			VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
270168404Spjd			continue;
271168404Spjd		}
272168404Spjd
273168404Spjd		for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
274168404Spjd			lr_t *lr = (lr_t *)lrp;
275168404Spjd			reclen = lr->lrc_reclen;
276168404Spjd			ASSERT3U(reclen, >=, sizeof (lr_t));
277168404Spjd			parse_lr_func(zilog, lr, arg, txg);
278168404Spjd		}
279168404Spjd		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
280168404Spjd	}
281168404Spjd	zil_dva_tree_fini(&zilog->zl_dva_tree);
282168404Spjd
283168404Spjd	return (max_seq);
284168404Spjd}
285168404Spjd
286168404Spjd/* ARGSUSED */
287168404Spjdstatic void
288168404Spjdzil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
289168404Spjd{
290168404Spjd	spa_t *spa = zilog->zl_spa;
291168404Spjd	int err;
292168404Spjd
293168404Spjd	/*
294168404Spjd	 * Claim log block if not already committed and not already claimed.
295168404Spjd	 */
296168404Spjd	if (bp->blk_birth >= first_txg &&
297168404Spjd	    zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
298185029Spjd		err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL,
299185029Spjd		    ZIO_FLAG_MUSTSUCCEED));
300168404Spjd		ASSERT(err == 0);
301168404Spjd	}
302168404Spjd}
303168404Spjd
304168404Spjdstatic void
305168404Spjdzil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
306168404Spjd{
307168404Spjd	if (lrc->lrc_txtype == TX_WRITE) {
308168404Spjd		lr_write_t *lr = (lr_write_t *)lrc;
309168404Spjd		zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
310168404Spjd	}
311168404Spjd}
312168404Spjd
313168404Spjd/* ARGSUSED */
314168404Spjdstatic void
315168404Spjdzil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
316168404Spjd{
317168404Spjd	zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
318168404Spjd}
319168404Spjd
320168404Spjdstatic void
321168404Spjdzil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
322168404Spjd{
323168404Spjd	/*
324168404Spjd	 * If we previously claimed it, we need to free it.
325168404Spjd	 */
326168404Spjd	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
327168404Spjd		lr_write_t *lr = (lr_write_t *)lrc;
328168404Spjd		blkptr_t *bp = &lr->lr_blkptr;
329168404Spjd		if (bp->blk_birth >= claim_txg &&
330168404Spjd		    !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
331168404Spjd			(void) arc_free(NULL, zilog->zl_spa,
332168404Spjd			    dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
333168404Spjd		}
334168404Spjd	}
335168404Spjd}
336168404Spjd
337168404Spjd/*
338168404Spjd * Create an on-disk intent log.
339168404Spjd */
340168404Spjdstatic void
341168404Spjdzil_create(zilog_t *zilog)
342168404Spjd{
343168404Spjd	const zil_header_t *zh = zilog->zl_header;
344168404Spjd	lwb_t *lwb;
345168404Spjd	uint64_t txg = 0;
346168404Spjd	dmu_tx_t *tx = NULL;
347168404Spjd	blkptr_t blk;
348168404Spjd	int error = 0;
349168404Spjd
350168404Spjd	/*
351168404Spjd	 * Wait for any previous destroy to complete.
352168404Spjd	 */
353168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
354168404Spjd
355168404Spjd	ASSERT(zh->zh_claim_txg == 0);
356168404Spjd	ASSERT(zh->zh_replay_seq == 0);
357168404Spjd
358168404Spjd	blk = zh->zh_log;
359168404Spjd
360168404Spjd	/*
361168404Spjd	 * If we don't already have an initial log block, allocate one now.
362168404Spjd	 */
363168404Spjd	if (BP_IS_HOLE(&blk)) {
364168404Spjd		tx = dmu_tx_create(zilog->zl_os);
365168404Spjd		(void) dmu_tx_assign(tx, TXG_WAIT);
366168404Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
367168404Spjd		txg = dmu_tx_get_txg(tx);
368168404Spjd
369168404Spjd		error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
370168404Spjd		    NULL, txg);
371168404Spjd
372168404Spjd		if (error == 0)
373168404Spjd			zil_init_log_chain(zilog, &blk);
374168404Spjd	}
375168404Spjd
376168404Spjd	/*
377168404Spjd	 * Allocate a log write buffer (lwb) for the first log block.
378168404Spjd	 */
379168404Spjd	if (error == 0) {
380168404Spjd		lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
381168404Spjd		lwb->lwb_zilog = zilog;
382168404Spjd		lwb->lwb_blk = blk;
383168404Spjd		lwb->lwb_nused = 0;
384168404Spjd		lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
385168404Spjd		lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
386168404Spjd		lwb->lwb_max_txg = txg;
387168404Spjd		lwb->lwb_zio = NULL;
388168404Spjd
389168404Spjd		mutex_enter(&zilog->zl_lock);
390168404Spjd		list_insert_tail(&zilog->zl_lwb_list, lwb);
391168404Spjd		mutex_exit(&zilog->zl_lock);
392168404Spjd	}
393168404Spjd
394168404Spjd	/*
395168404Spjd	 * If we just allocated the first log block, commit our transaction
396168404Spjd	 * and wait for zil_sync() to stuff the block poiner into zh_log.
397168404Spjd	 * (zh is part of the MOS, so we cannot modify it in open context.)
398168404Spjd	 */
399168404Spjd	if (tx != NULL) {
400168404Spjd		dmu_tx_commit(tx);
401168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
402168404Spjd	}
403168404Spjd
404168404Spjd	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
405168404Spjd}
406168404Spjd
407168404Spjd/*
408168404Spjd * In one tx, free all log blocks and clear the log header.
409168404Spjd * If keep_first is set, then we're replaying a log with no content.
410168404Spjd * We want to keep the first block, however, so that the first
411168404Spjd * synchronous transaction doesn't require a txg_wait_synced()
412168404Spjd * in zil_create().  We don't need to txg_wait_synced() here either
413168404Spjd * when keep_first is set, because both zil_create() and zil_destroy()
414168404Spjd * will wait for any in-progress destroys to complete.
415168404Spjd */
416168404Spjdvoid
417168404Spjdzil_destroy(zilog_t *zilog, boolean_t keep_first)
418168404Spjd{
419168404Spjd	const zil_header_t *zh = zilog->zl_header;
420168404Spjd	lwb_t *lwb;
421168404Spjd	dmu_tx_t *tx;
422168404Spjd	uint64_t txg;
423168404Spjd
424168404Spjd	/*
425168404Spjd	 * Wait for any previous destroy to complete.
426168404Spjd	 */
427168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
428168404Spjd
429168404Spjd	if (BP_IS_HOLE(&zh->zh_log))
430168404Spjd		return;
431168404Spjd
432168404Spjd	tx = dmu_tx_create(zilog->zl_os);
433168404Spjd	(void) dmu_tx_assign(tx, TXG_WAIT);
434168404Spjd	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
435168404Spjd	txg = dmu_tx_get_txg(tx);
436168404Spjd
437168404Spjd	mutex_enter(&zilog->zl_lock);
438168404Spjd
439185029Spjd	/*
440185029Spjd	 * It is possible for the ZIL to get the previously mounted zilog
441185029Spjd	 * structure of the same dataset if quickly remounted and the dbuf
442185029Spjd	 * eviction has not completed. In this case we can see a non
443185029Spjd	 * empty lwb list and keep_first will be set. We fix this by
444185029Spjd	 * clearing the keep_first. This will be slower but it's very rare.
445185029Spjd	 */
446185029Spjd	if (!list_is_empty(&zilog->zl_lwb_list) && keep_first)
447185029Spjd		keep_first = B_FALSE;
448185029Spjd
449168404Spjd	ASSERT3U(zilog->zl_destroy_txg, <, txg);
450168404Spjd	zilog->zl_destroy_txg = txg;
451168404Spjd	zilog->zl_keep_first = keep_first;
452168404Spjd
453168404Spjd	if (!list_is_empty(&zilog->zl_lwb_list)) {
454168404Spjd		ASSERT(zh->zh_claim_txg == 0);
455168404Spjd		ASSERT(!keep_first);
456168404Spjd		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
457168404Spjd			list_remove(&zilog->zl_lwb_list, lwb);
458168404Spjd			if (lwb->lwb_buf != NULL)
459168404Spjd				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
460168404Spjd			zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
461168404Spjd			kmem_cache_free(zil_lwb_cache, lwb);
462168404Spjd		}
463168404Spjd	} else {
464168404Spjd		if (!keep_first) {
465168404Spjd			(void) zil_parse(zilog, zil_free_log_block,
466168404Spjd			    zil_free_log_record, tx, zh->zh_claim_txg);
467168404Spjd		}
468168404Spjd	}
469168404Spjd	mutex_exit(&zilog->zl_lock);
470168404Spjd
471168404Spjd	dmu_tx_commit(tx);
472185029Spjd}
473168404Spjd
474185029Spjd/*
475185029Spjd * zil_rollback_destroy() is only called by the rollback code.
476185029Spjd * We already have a syncing tx. Rollback has exclusive access to the
477185029Spjd * dataset, so we don't have to worry about concurrent zil access.
478185029Spjd * The actual freeing of any log blocks occurs in zil_sync() later in
479185029Spjd * this txg syncing phase.
480185029Spjd */
481185029Spjdvoid
482185029Spjdzil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx)
483185029Spjd{
484185029Spjd	const zil_header_t *zh = zilog->zl_header;
485185029Spjd	uint64_t txg;
486185029Spjd
487185029Spjd	if (BP_IS_HOLE(&zh->zh_log))
488168404Spjd		return;
489168404Spjd
490185029Spjd	txg = dmu_tx_get_txg(tx);
491185029Spjd	ASSERT3U(zilog->zl_destroy_txg, <, txg);
492185029Spjd	zilog->zl_destroy_txg = txg;
493185029Spjd	zilog->zl_keep_first = B_FALSE;
494185029Spjd
495185029Spjd	/*
496185029Spjd	 * Ensure there's no outstanding ZIL IO.  No lwbs or just the
497185029Spjd	 * unused one that allocated in advance is ok.
498185029Spjd	 */
499185029Spjd	ASSERT(zilog->zl_lwb_list.list_head.list_next ==
500185029Spjd	    zilog->zl_lwb_list.list_head.list_prev);
501185029Spjd	(void) zil_parse(zilog, zil_free_log_block, zil_free_log_record,
502185029Spjd	    tx, zh->zh_claim_txg);
503168404Spjd}
504168404Spjd
505200724Sdelphij/*
506200724Sdelphij * return true if the initial log block is not valid
507200724Sdelphij */
508200724Sdelphijstatic boolean_t
509200724Sdelphijzil_empty(zilog_t *zilog)
510200724Sdelphij{
511200724Sdelphij	const zil_header_t *zh = zilog->zl_header;
512200724Sdelphij	arc_buf_t *abuf = NULL;
513200724Sdelphij
514200724Sdelphij	if (BP_IS_HOLE(&zh->zh_log))
515200724Sdelphij		return (B_TRUE);
516200724Sdelphij
517200724Sdelphij	if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
518200724Sdelphij		return (B_TRUE);
519200724Sdelphij
520200724Sdelphij	VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
521200724Sdelphij	return (B_FALSE);
522200724Sdelphij}
523200724Sdelphij
524168404Spjdint
525168404Spjdzil_claim(char *osname, void *txarg)
526168404Spjd{
527168404Spjd	dmu_tx_t *tx = txarg;
528168404Spjd	uint64_t first_txg = dmu_tx_get_txg(tx);
529168404Spjd	zilog_t *zilog;
530168404Spjd	zil_header_t *zh;
531168404Spjd	objset_t *os;
532168404Spjd	int error;
533168404Spjd
534185029Spjd	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
535168404Spjd	if (error) {
536185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
537168404Spjd		return (0);
538168404Spjd	}
539168404Spjd
540168404Spjd	zilog = dmu_objset_zil(os);
541168404Spjd	zh = zil_header_in_syncing_context(zilog);
542168404Spjd
543168404Spjd	/*
544200724Sdelphij	 * Record here whether the zil has any records to replay.
545200724Sdelphij	 * If the header block pointer is null or the block points
546200724Sdelphij	 * to the stubby then we know there are no valid log records.
547200724Sdelphij	 * We use the header to store this state as the the zilog gets
548200724Sdelphij	 * freed later in dmu_objset_close().
549200724Sdelphij	 * The flags (and the rest of the header fields) are cleared in
550200724Sdelphij	 * zil_sync() as a result of a zil_destroy(), after replaying the log.
551200724Sdelphij	 *
552200724Sdelphij	 * Note, the intent log can be empty but still need the
553200724Sdelphij	 * stubby to be claimed.
554200724Sdelphij	 */
555200724Sdelphij	if (!zil_empty(zilog))
556200724Sdelphij		zh->zh_flags |= ZIL_REPLAY_NEEDED;
557200724Sdelphij
558200724Sdelphij	/*
559168404Spjd	 * Claim all log blocks if we haven't already done so, and remember
560168404Spjd	 * the highest claimed sequence number.  This ensures that if we can
561168404Spjd	 * read only part of the log now (e.g. due to a missing device),
562168404Spjd	 * but we can read the entire log later, we will not try to replay
563168404Spjd	 * or destroy beyond the last block we successfully claimed.
564168404Spjd	 */
565168404Spjd	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
566168404Spjd	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
567168404Spjd		zh->zh_claim_txg = first_txg;
568168404Spjd		zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
569168404Spjd		    zil_claim_log_record, tx, first_txg);
570168404Spjd		dsl_dataset_dirty(dmu_objset_ds(os), tx);
571168404Spjd	}
572168404Spjd
573168404Spjd	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
574168404Spjd	dmu_objset_close(os);
575168404Spjd	return (0);
576168404Spjd}
577168404Spjd
578185029Spjd/*
579185029Spjd * Check the log by walking the log chain.
580185029Spjd * Checksum errors are ok as they indicate the end of the chain.
581185029Spjd * Any other error (no device or read failure) returns an error.
582185029Spjd */
583185029Spjd/* ARGSUSED */
584185029Spjdint
585185029Spjdzil_check_log_chain(char *osname, void *txarg)
586168404Spjd{
587185029Spjd	zilog_t *zilog;
588185029Spjd	zil_header_t *zh;
589185029Spjd	blkptr_t blk;
590185029Spjd	arc_buf_t *abuf;
591185029Spjd	objset_t *os;
592185029Spjd	char *lrbuf;
593185029Spjd	zil_trailer_t *ztp;
594185029Spjd	int error;
595168404Spjd
596185029Spjd	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
597185029Spjd	if (error) {
598185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
599185029Spjd		return (0);
600185029Spjd	}
601168404Spjd
602185029Spjd	zilog = dmu_objset_zil(os);
603185029Spjd	zh = zil_header_in_syncing_context(zilog);
604185029Spjd	blk = zh->zh_log;
605185029Spjd	if (BP_IS_HOLE(&blk)) {
606185029Spjd		dmu_objset_close(os);
607185029Spjd		return (0); /* no chain */
608168404Spjd	}
609185029Spjd
610185029Spjd	for (;;) {
611185029Spjd		error = zil_read_log_block(zilog, &blk, &abuf);
612185029Spjd		if (error)
613185029Spjd			break;
614185029Spjd		lrbuf = abuf->b_data;
615185029Spjd		ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
616185029Spjd		blk = ztp->zit_next_blk;
617185029Spjd		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
618185029Spjd	}
619185029Spjd	dmu_objset_close(os);
620185029Spjd	if (error == ECKSUM)
621185029Spjd		return (0); /* normal end of chain */
622185029Spjd	return (error);
623168404Spjd}
624168404Spjd
625185029Spjd/*
626185029Spjd * Clear a log chain
627185029Spjd */
628185029Spjd/* ARGSUSED */
629185029Spjdint
630185029Spjdzil_clear_log_chain(char *osname, void *txarg)
631185029Spjd{
632185029Spjd	zilog_t *zilog;
633185029Spjd	zil_header_t *zh;
634185029Spjd	objset_t *os;
635185029Spjd	dmu_tx_t *tx;
636185029Spjd	int error;
637185029Spjd
638185029Spjd	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
639185029Spjd	if (error) {
640185029Spjd		cmn_err(CE_WARN, "can't open objset for %s", osname);
641185029Spjd		return (0);
642185029Spjd	}
643185029Spjd
644185029Spjd	zilog = dmu_objset_zil(os);
645185029Spjd	tx = dmu_tx_create(zilog->zl_os);
646185029Spjd	(void) dmu_tx_assign(tx, TXG_WAIT);
647185029Spjd	zh = zil_header_in_syncing_context(zilog);
648185029Spjd	BP_ZERO(&zh->zh_log);
649185029Spjd	dsl_dataset_dirty(dmu_objset_ds(os), tx);
650185029Spjd	dmu_tx_commit(tx);
651185029Spjd	dmu_objset_close(os);
652185029Spjd	return (0);
653185029Spjd}
654185029Spjd
655185029Spjdstatic int
656185029Spjdzil_vdev_compare(const void *x1, const void *x2)
657185029Spjd{
658185029Spjd	uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
659185029Spjd	uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
660185029Spjd
661185029Spjd	if (v1 < v2)
662185029Spjd		return (-1);
663185029Spjd	if (v1 > v2)
664185029Spjd		return (1);
665185029Spjd
666185029Spjd	return (0);
667185029Spjd}
668185029Spjd
669168404Spjdvoid
670185029Spjdzil_add_block(zilog_t *zilog, blkptr_t *bp)
671168404Spjd{
672185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
673185029Spjd	avl_index_t where;
674185029Spjd	zil_vdev_node_t *zv, zvsearch;
675185029Spjd	int ndvas = BP_GET_NDVAS(bp);
676185029Spjd	int i;
677168404Spjd
678185029Spjd	if (zfs_nocacheflush)
679185029Spjd		return;
680168404Spjd
681185029Spjd	ASSERT(zilog->zl_writer);
682168404Spjd
683185029Spjd	/*
684185029Spjd	 * Even though we're zl_writer, we still need a lock because the
685185029Spjd	 * zl_get_data() callbacks may have dmu_sync() done callbacks
686185029Spjd	 * that will run concurrently.
687185029Spjd	 */
688185029Spjd	mutex_enter(&zilog->zl_vdev_lock);
689185029Spjd	for (i = 0; i < ndvas; i++) {
690185029Spjd		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
691185029Spjd		if (avl_find(t, &zvsearch, &where) == NULL) {
692185029Spjd			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
693185029Spjd			zv->zv_vdev = zvsearch.zv_vdev;
694185029Spjd			avl_insert(t, zv, where);
695185029Spjd		}
696185029Spjd	}
697185029Spjd	mutex_exit(&zilog->zl_vdev_lock);
698168404Spjd}
699168404Spjd
700168404Spjdvoid
701168404Spjdzil_flush_vdevs(zilog_t *zilog)
702168404Spjd{
703168404Spjd	spa_t *spa = zilog->zl_spa;
704185029Spjd	avl_tree_t *t = &zilog->zl_vdev_tree;
705185029Spjd	void *cookie = NULL;
706185029Spjd	zil_vdev_node_t *zv;
707185029Spjd	zio_t *zio;
708168404Spjd
709168404Spjd	ASSERT(zilog->zl_writer);
710168404Spjd
711185029Spjd	/*
712185029Spjd	 * We don't need zl_vdev_lock here because we're the zl_writer,
713185029Spjd	 * and all zl_get_data() callbacks are done.
714185029Spjd	 */
715185029Spjd	if (avl_numnodes(t) == 0)
716185029Spjd		return;
717185029Spjd
718185029Spjd	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
719185029Spjd
720185029Spjd	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
721185029Spjd
722185029Spjd	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
723185029Spjd		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
724185029Spjd		if (vd != NULL)
725185029Spjd			zio_flush(zio, vd);
726185029Spjd		kmem_free(zv, sizeof (*zv));
727168404Spjd	}
728168404Spjd
729168404Spjd	/*
730168404Spjd	 * Wait for all the flushes to complete.  Not all devices actually
731168404Spjd	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
732168404Spjd	 */
733185029Spjd	(void) zio_wait(zio);
734185029Spjd
735185029Spjd	spa_config_exit(spa, SCL_STATE, FTAG);
736168404Spjd}
737168404Spjd
738168404Spjd/*
739168404Spjd * Function called when a log block write completes
740168404Spjd */
741168404Spjdstatic void
742168404Spjdzil_lwb_write_done(zio_t *zio)
743168404Spjd{
744168404Spjd	lwb_t *lwb = zio->io_private;
745168404Spjd	zilog_t *zilog = lwb->lwb_zilog;
746168404Spjd
747185029Spjd	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
748185029Spjd	ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG);
749185029Spjd	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
750185029Spjd	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
751185029Spjd	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
752185029Spjd	ASSERT(!BP_IS_GANG(zio->io_bp));
753185029Spjd	ASSERT(!BP_IS_HOLE(zio->io_bp));
754185029Spjd	ASSERT(zio->io_bp->blk_fill == 0);
755185029Spjd
756168404Spjd	/*
757168404Spjd	 * Now that we've written this log block, we have a stable pointer
758168404Spjd	 * to the next block in the chain, so it's OK to let the txg in
759168404Spjd	 * which we allocated the next block sync.
760168404Spjd	 */
761168404Spjd	txg_rele_to_sync(&lwb->lwb_txgh);
762168404Spjd
763168404Spjd	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
764168404Spjd	mutex_enter(&zilog->zl_lock);
765168404Spjd	lwb->lwb_buf = NULL;
766185029Spjd	if (zio->io_error)
767168404Spjd		zilog->zl_log_error = B_TRUE;
768168404Spjd	mutex_exit(&zilog->zl_lock);
769168404Spjd}
770168404Spjd
771168404Spjd/*
772168404Spjd * Initialize the io for a log block.
773168404Spjd */
774168404Spjdstatic void
775168404Spjdzil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
776168404Spjd{
777168404Spjd	zbookmark_t zb;
778168404Spjd
779168404Spjd	zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
780168404Spjd	zb.zb_object = 0;
781168404Spjd	zb.zb_level = -1;
782168404Spjd	zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
783168404Spjd
784168404Spjd	if (zilog->zl_root_zio == NULL) {
785168404Spjd		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
786168404Spjd		    ZIO_FLAG_CANFAIL);
787168404Spjd	}
788168404Spjd	if (lwb->lwb_zio == NULL) {
789168404Spjd		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
790185029Spjd		    0, &lwb->lwb_blk, lwb->lwb_buf,
791168404Spjd		    lwb->lwb_sz, zil_lwb_write_done, lwb,
792185029Spjd		    ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_CANFAIL, &zb);
793168404Spjd	}
794168404Spjd}
795168404Spjd
796168404Spjd/*
797168404Spjd * Start a log block write and advance to the next log block.
798168404Spjd * Calls are serialized.
799168404Spjd */
800168404Spjdstatic lwb_t *
801168404Spjdzil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
802168404Spjd{
803168404Spjd	lwb_t *nlwb;
804168404Spjd	zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
805168404Spjd	spa_t *spa = zilog->zl_spa;
806168404Spjd	blkptr_t *bp = &ztp->zit_next_blk;
807168404Spjd	uint64_t txg;
808168404Spjd	uint64_t zil_blksz;
809168404Spjd	int error;
810168404Spjd
811168404Spjd	ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
812168404Spjd
813168404Spjd	/*
814168404Spjd	 * Allocate the next block and save its address in this block
815168404Spjd	 * before writing it in order to establish the log chain.
816168404Spjd	 * Note that if the allocation of nlwb synced before we wrote
817168404Spjd	 * the block that points at it (lwb), we'd leak it if we crashed.
818168404Spjd	 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
819168404Spjd	 */
820168404Spjd	txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
821168404Spjd	txg_rele_to_quiesce(&lwb->lwb_txgh);
822168404Spjd
823168404Spjd	/*
824168404Spjd	 * Pick a ZIL blocksize. We request a size that is the
825168404Spjd	 * maximum of the previous used size, the current used size and
826168404Spjd	 * the amount waiting in the queue.
827168404Spjd	 */
828168404Spjd	zil_blksz = MAX(zilog->zl_prev_used,
829168404Spjd	    zilog->zl_cur_used + sizeof (*ztp));
830168404Spjd	zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
831168404Spjd	zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
832168404Spjd	if (zil_blksz > ZIL_MAX_BLKSZ)
833168404Spjd		zil_blksz = ZIL_MAX_BLKSZ;
834168404Spjd
835168404Spjd	BP_ZERO(bp);
836168404Spjd	/* pass the old blkptr in order to spread log blocks across devs */
837168404Spjd	error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
838168404Spjd	if (error) {
839168404Spjd		dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
840168404Spjd
841168404Spjd		/*
842168404Spjd		 * We dirty the dataset to ensure that zil_sync() will
843168404Spjd		 * be called to remove this lwb from our zl_lwb_list.
844168404Spjd		 * Failing to do so, may leave an lwb with a NULL lwb_buf
845168404Spjd		 * hanging around on the zl_lwb_list.
846168404Spjd		 */
847168404Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
848168404Spjd		dmu_tx_commit(tx);
849168404Spjd
850168404Spjd		/*
851168404Spjd		 * Since we've just experienced an allocation failure so we
852168404Spjd		 * terminate the current lwb and send it on its way.
853168404Spjd		 */
854168404Spjd		ztp->zit_pad = 0;
855168404Spjd		ztp->zit_nused = lwb->lwb_nused;
856168404Spjd		ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
857168404Spjd		zio_nowait(lwb->lwb_zio);
858168404Spjd
859168404Spjd		/*
860168404Spjd		 * By returning NULL the caller will call tx_wait_synced()
861168404Spjd		 */
862168404Spjd		return (NULL);
863168404Spjd	}
864168404Spjd
865168404Spjd	ASSERT3U(bp->blk_birth, ==, txg);
866168404Spjd	ztp->zit_pad = 0;
867168404Spjd	ztp->zit_nused = lwb->lwb_nused;
868168404Spjd	ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
869168404Spjd	bp->blk_cksum = lwb->lwb_blk.blk_cksum;
870168404Spjd	bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
871168404Spjd
872168404Spjd	/*
873168404Spjd	 * Allocate a new log write buffer (lwb).
874168404Spjd	 */
875168404Spjd	nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
876168404Spjd
877168404Spjd	nlwb->lwb_zilog = zilog;
878168404Spjd	nlwb->lwb_blk = *bp;
879168404Spjd	nlwb->lwb_nused = 0;
880168404Spjd	nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
881168404Spjd	nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
882168404Spjd	nlwb->lwb_max_txg = txg;
883168404Spjd	nlwb->lwb_zio = NULL;
884168404Spjd
885168404Spjd	/*
886168404Spjd	 * Put new lwb at the end of the log chain
887168404Spjd	 */
888168404Spjd	mutex_enter(&zilog->zl_lock);
889168404Spjd	list_insert_tail(&zilog->zl_lwb_list, nlwb);
890168404Spjd	mutex_exit(&zilog->zl_lock);
891168404Spjd
892185029Spjd	/* Record the block for later vdev flushing */
893185029Spjd	zil_add_block(zilog, &lwb->lwb_blk);
894168404Spjd
895168404Spjd	/*
896168404Spjd	 * kick off the write for the old log block
897168404Spjd	 */
898168404Spjd	dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
899168404Spjd	ASSERT(lwb->lwb_zio);
900168404Spjd	zio_nowait(lwb->lwb_zio);
901168404Spjd
902168404Spjd	return (nlwb);
903168404Spjd}
904168404Spjd
905168404Spjdstatic lwb_t *
906168404Spjdzil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
907168404Spjd{
908168404Spjd	lr_t *lrc = &itx->itx_lr; /* common log record */
909168404Spjd	lr_write_t *lr = (lr_write_t *)lrc;
910168404Spjd	uint64_t txg = lrc->lrc_txg;
911168404Spjd	uint64_t reclen = lrc->lrc_reclen;
912168404Spjd	uint64_t dlen;
913168404Spjd
914168404Spjd	if (lwb == NULL)
915168404Spjd		return (NULL);
916168404Spjd	ASSERT(lwb->lwb_buf != NULL);
917168404Spjd
918168404Spjd	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
919168404Spjd		dlen = P2ROUNDUP_TYPED(
920168404Spjd		    lr->lr_length, sizeof (uint64_t), uint64_t);
921168404Spjd	else
922168404Spjd		dlen = 0;
923168404Spjd
924168404Spjd	zilog->zl_cur_used += (reclen + dlen);
925168404Spjd
926168404Spjd	zil_lwb_write_init(zilog, lwb);
927168404Spjd
928168404Spjd	/*
929168404Spjd	 * If this record won't fit in the current log block, start a new one.
930168404Spjd	 */
931168404Spjd	if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
932168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
933168404Spjd		if (lwb == NULL)
934168404Spjd			return (NULL);
935168404Spjd		zil_lwb_write_init(zilog, lwb);
936168404Spjd		ASSERT(lwb->lwb_nused == 0);
937168404Spjd		if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
938168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
939168404Spjd			return (lwb);
940168404Spjd		}
941168404Spjd	}
942168404Spjd
943168404Spjd	/*
944168404Spjd	 * Update the lrc_seq, to be log record sequence number. See zil.h
945168404Spjd	 * Then copy the record to the log buffer.
946168404Spjd	 */
947168404Spjd	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
948168404Spjd	bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
949168404Spjd
950168404Spjd	/*
951168404Spjd	 * If it's a write, fetch the data or get its blkptr as appropriate.
952168404Spjd	 */
953168404Spjd	if (lrc->lrc_txtype == TX_WRITE) {
954168404Spjd		if (txg > spa_freeze_txg(zilog->zl_spa))
955168404Spjd			txg_wait_synced(zilog->zl_dmu_pool, txg);
956168404Spjd		if (itx->itx_wr_state != WR_COPIED) {
957168404Spjd			char *dbuf;
958168404Spjd			int error;
959168404Spjd
960168404Spjd			/* alignment is guaranteed */
961168404Spjd			lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
962168404Spjd			if (dlen) {
963168404Spjd				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
964168404Spjd				dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
965168404Spjd				lr->lr_common.lrc_reclen += dlen;
966168404Spjd			} else {
967168404Spjd				ASSERT(itx->itx_wr_state == WR_INDIRECT);
968168404Spjd				dbuf = NULL;
969168404Spjd			}
970168404Spjd			error = zilog->zl_get_data(
971168404Spjd			    itx->itx_private, lr, dbuf, lwb->lwb_zio);
972168404Spjd			if (error) {
973168404Spjd				ASSERT(error == ENOENT || error == EEXIST ||
974168404Spjd				    error == EALREADY);
975168404Spjd				return (lwb);
976168404Spjd			}
977168404Spjd		}
978168404Spjd	}
979168404Spjd
980168404Spjd	lwb->lwb_nused += reclen + dlen;
981168404Spjd	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
982168404Spjd	ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
983168404Spjd	ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
984168404Spjd
985168404Spjd	return (lwb);
986168404Spjd}
987168404Spjd
988168404Spjditx_t *
989185029Spjdzil_itx_create(uint64_t txtype, size_t lrsize)
990168404Spjd{
991168404Spjd	itx_t *itx;
992168404Spjd
993168404Spjd	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
994168404Spjd
995168404Spjd	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
996168404Spjd	itx->itx_lr.lrc_txtype = txtype;
997168404Spjd	itx->itx_lr.lrc_reclen = lrsize;
998185029Spjd	itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
999168404Spjd	itx->itx_lr.lrc_seq = 0;	/* defensive */
1000168404Spjd
1001168404Spjd	return (itx);
1002168404Spjd}
1003168404Spjd
1004168404Spjduint64_t
1005168404Spjdzil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1006168404Spjd{
1007168404Spjd	uint64_t seq;
1008168404Spjd
1009168404Spjd	ASSERT(itx->itx_lr.lrc_seq == 0);
1010168404Spjd
1011168404Spjd	mutex_enter(&zilog->zl_lock);
1012168404Spjd	list_insert_tail(&zilog->zl_itx_list, itx);
1013185029Spjd	zilog->zl_itx_list_sz += itx->itx_sod;
1014168404Spjd	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1015168404Spjd	itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
1016168404Spjd	mutex_exit(&zilog->zl_lock);
1017168404Spjd
1018168404Spjd	return (seq);
1019168404Spjd}
1020168404Spjd
1021168404Spjd/*
1022168404Spjd * Free up all in-memory intent log transactions that have now been synced.
1023168404Spjd */
1024168404Spjdstatic void
1025168404Spjdzil_itx_clean(zilog_t *zilog)
1026168404Spjd{
1027168404Spjd	uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
1028168404Spjd	uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
1029168404Spjd	list_t clean_list;
1030168404Spjd	itx_t *itx;
1031168404Spjd
1032168404Spjd	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1033168404Spjd
1034168404Spjd	mutex_enter(&zilog->zl_lock);
1035168404Spjd	/* wait for a log writer to finish walking list */
1036168404Spjd	while (zilog->zl_writer) {
1037168404Spjd		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1038168404Spjd	}
1039168404Spjd
1040168404Spjd	/*
1041168404Spjd	 * Move the sync'd log transactions to a separate list so we can call
1042168404Spjd	 * kmem_free without holding the zl_lock.
1043168404Spjd	 *
1044168404Spjd	 * There is no need to set zl_writer as we don't drop zl_lock here
1045168404Spjd	 */
1046168404Spjd	while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
1047168404Spjd	    itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
1048168404Spjd		list_remove(&zilog->zl_itx_list, itx);
1049185029Spjd		zilog->zl_itx_list_sz -= itx->itx_sod;
1050168404Spjd		list_insert_tail(&clean_list, itx);
1051168404Spjd	}
1052168404Spjd	cv_broadcast(&zilog->zl_cv_writer);
1053168404Spjd	mutex_exit(&zilog->zl_lock);
1054168404Spjd
1055168404Spjd	/* destroy sync'd log transactions */
1056168404Spjd	while ((itx = list_head(&clean_list)) != NULL) {
1057168404Spjd		list_remove(&clean_list, itx);
1058168404Spjd		kmem_free(itx, offsetof(itx_t, itx_lr)
1059168404Spjd		    + itx->itx_lr.lrc_reclen);
1060168404Spjd	}
1061168404Spjd	list_destroy(&clean_list);
1062168404Spjd}
1063168404Spjd
1064168404Spjd/*
1065168404Spjd * If there are any in-memory intent log transactions which have now been
1066168404Spjd * synced then start up a taskq to free them.
1067168404Spjd */
1068168404Spjdvoid
1069168404Spjdzil_clean(zilog_t *zilog)
1070168404Spjd{
1071168404Spjd	itx_t *itx;
1072168404Spjd
1073168404Spjd	mutex_enter(&zilog->zl_lock);
1074168404Spjd	itx = list_head(&zilog->zl_itx_list);
1075168404Spjd	if ((itx != NULL) &&
1076168404Spjd	    (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
1077168404Spjd		(void) taskq_dispatch(zilog->zl_clean_taskq,
1078191900Skmacy		    (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
1079168404Spjd	}
1080168404Spjd	mutex_exit(&zilog->zl_lock);
1081168404Spjd}
1082168404Spjd
1083185029Spjdstatic void
1084168404Spjdzil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
1085168404Spjd{
1086168404Spjd	uint64_t txg;
1087168404Spjd	uint64_t commit_seq = 0;
1088168404Spjd	itx_t *itx, *itx_next = (itx_t *)-1;
1089168404Spjd	lwb_t *lwb;
1090168404Spjd	spa_t *spa;
1091168404Spjd
1092168404Spjd	zilog->zl_writer = B_TRUE;
1093185029Spjd	ASSERT(zilog->zl_root_zio == NULL);
1094168404Spjd	spa = zilog->zl_spa;
1095168404Spjd
1096168404Spjd	if (zilog->zl_suspend) {
1097168404Spjd		lwb = NULL;
1098168404Spjd	} else {
1099168404Spjd		lwb = list_tail(&zilog->zl_lwb_list);
1100168404Spjd		if (lwb == NULL) {
1101168404Spjd			/*
1102168404Spjd			 * Return if there's nothing to flush before we
1103168404Spjd			 * dirty the fs by calling zil_create()
1104168404Spjd			 */
1105168404Spjd			if (list_is_empty(&zilog->zl_itx_list)) {
1106168404Spjd				zilog->zl_writer = B_FALSE;
1107168404Spjd				return;
1108168404Spjd			}
1109168404Spjd			mutex_exit(&zilog->zl_lock);
1110168404Spjd			zil_create(zilog);
1111168404Spjd			mutex_enter(&zilog->zl_lock);
1112168404Spjd			lwb = list_tail(&zilog->zl_lwb_list);
1113168404Spjd		}
1114168404Spjd	}
1115168404Spjd
1116168404Spjd	/* Loop through in-memory log transactions filling log blocks. */
1117168404Spjd	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1118168404Spjd	for (;;) {
1119168404Spjd		/*
1120168404Spjd		 * Find the next itx to push:
1121168404Spjd		 * Push all transactions related to specified foid and all
1122168404Spjd		 * other transactions except TX_WRITE, TX_TRUNCATE,
1123168404Spjd		 * TX_SETATTR and TX_ACL for all other files.
1124168404Spjd		 */
1125168404Spjd		if (itx_next != (itx_t *)-1)
1126168404Spjd			itx = itx_next;
1127168404Spjd		else
1128168404Spjd			itx = list_head(&zilog->zl_itx_list);
1129168404Spjd		for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
1130168404Spjd			if (foid == 0) /* push all foids? */
1131168404Spjd				break;
1132168404Spjd			if (itx->itx_sync) /* push all O_[D]SYNC */
1133168404Spjd				break;
1134168404Spjd			switch (itx->itx_lr.lrc_txtype) {
1135168404Spjd			case TX_SETATTR:
1136168404Spjd			case TX_WRITE:
1137168404Spjd			case TX_TRUNCATE:
1138168404Spjd			case TX_ACL:
1139168404Spjd				/* lr_foid is same offset for these records */
1140168404Spjd				if (((lr_write_t *)&itx->itx_lr)->lr_foid
1141168404Spjd				    != foid) {
1142168404Spjd					continue; /* skip this record */
1143168404Spjd				}
1144168404Spjd			}
1145168404Spjd			break;
1146168404Spjd		}
1147168404Spjd		if (itx == NULL)
1148168404Spjd			break;
1149168404Spjd
1150168404Spjd		if ((itx->itx_lr.lrc_seq > seq) &&
1151168404Spjd		    ((lwb == NULL) || (lwb->lwb_nused == 0) ||
1152185029Spjd		    (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) {
1153168404Spjd			break;
1154168404Spjd		}
1155168404Spjd
1156168404Spjd		/*
1157168404Spjd		 * Save the next pointer.  Even though we soon drop
1158168404Spjd		 * zl_lock all threads that may change the list
1159168404Spjd		 * (another writer or zil_itx_clean) can't do so until
1160168404Spjd		 * they have zl_writer.
1161168404Spjd		 */
1162168404Spjd		itx_next = list_next(&zilog->zl_itx_list, itx);
1163168404Spjd		list_remove(&zilog->zl_itx_list, itx);
1164185029Spjd		zilog->zl_itx_list_sz -= itx->itx_sod;
1165168404Spjd		mutex_exit(&zilog->zl_lock);
1166168404Spjd		txg = itx->itx_lr.lrc_txg;
1167168404Spjd		ASSERT(txg);
1168168404Spjd
1169168404Spjd		if (txg > spa_last_synced_txg(spa) ||
1170168404Spjd		    txg > spa_freeze_txg(spa))
1171168404Spjd			lwb = zil_lwb_commit(zilog, itx, lwb);
1172168404Spjd		kmem_free(itx, offsetof(itx_t, itx_lr)
1173168404Spjd		    + itx->itx_lr.lrc_reclen);
1174168404Spjd		mutex_enter(&zilog->zl_lock);
1175168404Spjd	}
1176168404Spjd	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1177168404Spjd	/* determine commit sequence number */
1178168404Spjd	itx = list_head(&zilog->zl_itx_list);
1179168404Spjd	if (itx)
1180168404Spjd		commit_seq = itx->itx_lr.lrc_seq;
1181168404Spjd	else
1182168404Spjd		commit_seq = zilog->zl_itx_seq;
1183168404Spjd	mutex_exit(&zilog->zl_lock);
1184168404Spjd
1185168404Spjd	/* write the last block out */
1186168404Spjd	if (lwb != NULL && lwb->lwb_zio != NULL)
1187168404Spjd		lwb = zil_lwb_write_start(zilog, lwb);
1188168404Spjd
1189168404Spjd	zilog->zl_prev_used = zilog->zl_cur_used;
1190168404Spjd	zilog->zl_cur_used = 0;
1191168404Spjd
1192168404Spjd	/*
1193168404Spjd	 * Wait if necessary for the log blocks to be on stable storage.
1194168404Spjd	 */
1195168404Spjd	if (zilog->zl_root_zio) {
1196168404Spjd		DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1197168404Spjd		(void) zio_wait(zilog->zl_root_zio);
1198185029Spjd		zilog->zl_root_zio = NULL;
1199168404Spjd		DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1200185029Spjd		zil_flush_vdevs(zilog);
1201168404Spjd	}
1202168404Spjd
1203168404Spjd	if (zilog->zl_log_error || lwb == NULL) {
1204168404Spjd		zilog->zl_log_error = 0;
1205168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, 0);
1206168404Spjd	}
1207168404Spjd
1208168404Spjd	mutex_enter(&zilog->zl_lock);
1209168404Spjd	zilog->zl_writer = B_FALSE;
1210168404Spjd
1211168404Spjd	ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1212168404Spjd	zilog->zl_commit_seq = commit_seq;
1213168404Spjd}
1214168404Spjd
1215168404Spjd/*
1216168404Spjd * Push zfs transactions to stable storage up to the supplied sequence number.
1217168404Spjd * If foid is 0 push out all transactions, otherwise push only those
1218168404Spjd * for that file or might have been used to create that file.
1219168404Spjd */
1220168404Spjdvoid
1221168404Spjdzil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1222168404Spjd{
1223168404Spjd	if (zilog == NULL || seq == 0)
1224168404Spjd		return;
1225168404Spjd
1226168404Spjd	mutex_enter(&zilog->zl_lock);
1227168404Spjd
1228168404Spjd	seq = MIN(seq, zilog->zl_itx_seq);	/* cap seq at largest itx seq */
1229168404Spjd
1230168404Spjd	while (zilog->zl_writer) {
1231168404Spjd		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1232168404Spjd		if (seq < zilog->zl_commit_seq) {
1233168404Spjd			mutex_exit(&zilog->zl_lock);
1234168404Spjd			return;
1235168404Spjd		}
1236168404Spjd	}
1237168404Spjd	zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1238168404Spjd	/* wake up others waiting on the commit */
1239168404Spjd	cv_broadcast(&zilog->zl_cv_writer);
1240168404Spjd	mutex_exit(&zilog->zl_lock);
1241168404Spjd}
1242168404Spjd
1243168404Spjd/*
1244168404Spjd * Called in syncing context to free committed log blocks and update log header.
1245168404Spjd */
1246168404Spjdvoid
1247168404Spjdzil_sync(zilog_t *zilog, dmu_tx_t *tx)
1248168404Spjd{
1249168404Spjd	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1250168404Spjd	uint64_t txg = dmu_tx_get_txg(tx);
1251168404Spjd	spa_t *spa = zilog->zl_spa;
1252168404Spjd	lwb_t *lwb;
1253168404Spjd
1254168404Spjd	mutex_enter(&zilog->zl_lock);
1255168404Spjd
1256168404Spjd	ASSERT(zilog->zl_stop_sync == 0);
1257168404Spjd
1258168404Spjd	zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK];
1259168404Spjd
1260168404Spjd	if (zilog->zl_destroy_txg == txg) {
1261168404Spjd		blkptr_t blk = zh->zh_log;
1262168404Spjd
1263168404Spjd		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1264168404Spjd		ASSERT(spa_sync_pass(spa) == 1);
1265168404Spjd
1266168404Spjd		bzero(zh, sizeof (zil_header_t));
1267168404Spjd		bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq));
1268168404Spjd
1269168404Spjd		if (zilog->zl_keep_first) {
1270168404Spjd			/*
1271168404Spjd			 * If this block was part of log chain that couldn't
1272168404Spjd			 * be claimed because a device was missing during
1273168404Spjd			 * zil_claim(), but that device later returns,
1274168404Spjd			 * then this block could erroneously appear valid.
1275168404Spjd			 * To guard against this, assign a new GUID to the new
1276168404Spjd			 * log chain so it doesn't matter what blk points to.
1277168404Spjd			 */
1278168404Spjd			zil_init_log_chain(zilog, &blk);
1279168404Spjd			zh->zh_log = blk;
1280168404Spjd		}
1281168404Spjd	}
1282168404Spjd
1283168404Spjd	for (;;) {
1284168404Spjd		lwb = list_head(&zilog->zl_lwb_list);
1285168404Spjd		if (lwb == NULL) {
1286168404Spjd			mutex_exit(&zilog->zl_lock);
1287168404Spjd			return;
1288168404Spjd		}
1289168404Spjd		zh->zh_log = lwb->lwb_blk;
1290168404Spjd		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1291168404Spjd			break;
1292168404Spjd		list_remove(&zilog->zl_lwb_list, lwb);
1293168404Spjd		zio_free_blk(spa, &lwb->lwb_blk, txg);
1294168404Spjd		kmem_cache_free(zil_lwb_cache, lwb);
1295168404Spjd
1296168404Spjd		/*
1297168404Spjd		 * If we don't have anything left in the lwb list then
1298168404Spjd		 * we've had an allocation failure and we need to zero
1299168404Spjd		 * out the zil_header blkptr so that we don't end
1300168404Spjd		 * up freeing the same block twice.
1301168404Spjd		 */
1302168404Spjd		if (list_head(&zilog->zl_lwb_list) == NULL)
1303168404Spjd			BP_ZERO(&zh->zh_log);
1304168404Spjd	}
1305168404Spjd	mutex_exit(&zilog->zl_lock);
1306168404Spjd}
1307168404Spjd
1308168404Spjdvoid
1309168404Spjdzil_init(void)
1310168404Spjd{
1311168404Spjd	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1312168404Spjd	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1313168404Spjd}
1314168404Spjd
1315168404Spjdvoid
1316168404Spjdzil_fini(void)
1317168404Spjd{
1318168404Spjd	kmem_cache_destroy(zil_lwb_cache);
1319168404Spjd}
1320168404Spjd
1321168404Spjdzilog_t *
1322168404Spjdzil_alloc(objset_t *os, zil_header_t *zh_phys)
1323168404Spjd{
1324168404Spjd	zilog_t *zilog;
1325168404Spjd
1326168404Spjd	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1327168404Spjd
1328168404Spjd	zilog->zl_header = zh_phys;
1329168404Spjd	zilog->zl_os = os;
1330168404Spjd	zilog->zl_spa = dmu_objset_spa(os);
1331168404Spjd	zilog->zl_dmu_pool = dmu_objset_pool(os);
1332168404Spjd	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1333168404Spjd
1334168404Spjd	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1335168404Spjd
1336168404Spjd	list_create(&zilog->zl_itx_list, sizeof (itx_t),
1337168404Spjd	    offsetof(itx_t, itx_node));
1338168404Spjd
1339168404Spjd	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1340168404Spjd	    offsetof(lwb_t, lwb_node));
1341168404Spjd
1342185029Spjd	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1343168404Spjd
1344185029Spjd	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1345185029Spjd	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1346185029Spjd
1347185029Spjd	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1348185029Spjd	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1349185029Spjd
1350168404Spjd	return (zilog);
1351168404Spjd}
1352168404Spjd
1353168404Spjdvoid
1354168404Spjdzil_free(zilog_t *zilog)
1355168404Spjd{
1356168404Spjd	lwb_t *lwb;
1357168404Spjd
1358168404Spjd	zilog->zl_stop_sync = 1;
1359168404Spjd
1360168404Spjd	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1361168404Spjd		list_remove(&zilog->zl_lwb_list, lwb);
1362168404Spjd		if (lwb->lwb_buf != NULL)
1363168404Spjd			zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1364168404Spjd		kmem_cache_free(zil_lwb_cache, lwb);
1365168404Spjd	}
1366168404Spjd	list_destroy(&zilog->zl_lwb_list);
1367168404Spjd
1368185029Spjd	avl_destroy(&zilog->zl_vdev_tree);
1369185029Spjd	mutex_destroy(&zilog->zl_vdev_lock);
1370168404Spjd
1371168404Spjd	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1372168404Spjd	list_destroy(&zilog->zl_itx_list);
1373168404Spjd	mutex_destroy(&zilog->zl_lock);
1374168404Spjd
1375185029Spjd	cv_destroy(&zilog->zl_cv_writer);
1376185029Spjd	cv_destroy(&zilog->zl_cv_suspend);
1377185029Spjd
1378168404Spjd	kmem_free(zilog, sizeof (zilog_t));
1379168404Spjd}
1380168404Spjd
1381168404Spjd/*
1382168404Spjd * Open an intent log.
1383168404Spjd */
1384168404Spjdzilog_t *
1385168404Spjdzil_open(objset_t *os, zil_get_data_t *get_data)
1386168404Spjd{
1387168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
1388168404Spjd
1389168404Spjd	zilog->zl_get_data = get_data;
1390168404Spjd	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1391168404Spjd	    2, 2, TASKQ_PREPOPULATE);
1392168404Spjd
1393168404Spjd	return (zilog);
1394168404Spjd}
1395168404Spjd
1396168404Spjd/*
1397168404Spjd * Close an intent log.
1398168404Spjd */
1399168404Spjdvoid
1400168404Spjdzil_close(zilog_t *zilog)
1401168404Spjd{
1402168404Spjd	/*
1403168404Spjd	 * If the log isn't already committed, mark the objset dirty
1404168404Spjd	 * (so zil_sync() will be called) and wait for that txg to sync.
1405168404Spjd	 */
1406168404Spjd	if (!zil_is_committed(zilog)) {
1407168404Spjd		uint64_t txg;
1408168404Spjd		dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1409168404Spjd		(void) dmu_tx_assign(tx, TXG_WAIT);
1410168404Spjd		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1411168404Spjd		txg = dmu_tx_get_txg(tx);
1412168404Spjd		dmu_tx_commit(tx);
1413168404Spjd		txg_wait_synced(zilog->zl_dmu_pool, txg);
1414168404Spjd	}
1415168404Spjd
1416168404Spjd	taskq_destroy(zilog->zl_clean_taskq);
1417168404Spjd	zilog->zl_clean_taskq = NULL;
1418168404Spjd	zilog->zl_get_data = NULL;
1419168404Spjd
1420168404Spjd	zil_itx_clean(zilog);
1421168404Spjd	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1422168404Spjd}
1423168404Spjd
1424168404Spjd/*
1425168404Spjd * Suspend an intent log.  While in suspended mode, we still honor
1426168404Spjd * synchronous semantics, but we rely on txg_wait_synced() to do it.
1427168404Spjd * We suspend the log briefly when taking a snapshot so that the snapshot
1428168404Spjd * contains all the data it's supposed to, and has an empty intent log.
1429168404Spjd */
1430168404Spjdint
1431168404Spjdzil_suspend(zilog_t *zilog)
1432168404Spjd{
1433168404Spjd	const zil_header_t *zh = zilog->zl_header;
1434168404Spjd
1435168404Spjd	mutex_enter(&zilog->zl_lock);
1436200724Sdelphij	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1437168404Spjd		mutex_exit(&zilog->zl_lock);
1438168404Spjd		return (EBUSY);
1439168404Spjd	}
1440168404Spjd	if (zilog->zl_suspend++ != 0) {
1441168404Spjd		/*
1442168404Spjd		 * Someone else already began a suspend.
1443168404Spjd		 * Just wait for them to finish.
1444168404Spjd		 */
1445168404Spjd		while (zilog->zl_suspending)
1446168404Spjd			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1447168404Spjd		mutex_exit(&zilog->zl_lock);
1448168404Spjd		return (0);
1449168404Spjd	}
1450168404Spjd	zilog->zl_suspending = B_TRUE;
1451168404Spjd	mutex_exit(&zilog->zl_lock);
1452168404Spjd
1453168404Spjd	zil_commit(zilog, UINT64_MAX, 0);
1454168404Spjd
1455168404Spjd	/*
1456168404Spjd	 * Wait for any in-flight log writes to complete.
1457168404Spjd	 */
1458168404Spjd	mutex_enter(&zilog->zl_lock);
1459168404Spjd	while (zilog->zl_writer)
1460168404Spjd		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1461168404Spjd	mutex_exit(&zilog->zl_lock);
1462168404Spjd
1463168404Spjd	zil_destroy(zilog, B_FALSE);
1464168404Spjd
1465168404Spjd	mutex_enter(&zilog->zl_lock);
1466168404Spjd	zilog->zl_suspending = B_FALSE;
1467168404Spjd	cv_broadcast(&zilog->zl_cv_suspend);
1468168404Spjd	mutex_exit(&zilog->zl_lock);
1469168404Spjd
1470168404Spjd	return (0);
1471168404Spjd}
1472168404Spjd
1473168404Spjdvoid
1474168404Spjdzil_resume(zilog_t *zilog)
1475168404Spjd{
1476168404Spjd	mutex_enter(&zilog->zl_lock);
1477168404Spjd	ASSERT(zilog->zl_suspend != 0);
1478168404Spjd	zilog->zl_suspend--;
1479168404Spjd	mutex_exit(&zilog->zl_lock);
1480168404Spjd}
1481168404Spjd
1482168404Spjdtypedef struct zil_replay_arg {
1483168404Spjd	objset_t	*zr_os;
1484168404Spjd	zil_replay_func_t **zr_replay;
1485185029Spjd	zil_replay_cleaner_t *zr_replay_cleaner;
1486168404Spjd	void		*zr_arg;
1487168404Spjd	uint64_t	*zr_txgp;
1488168404Spjd	boolean_t	zr_byteswap;
1489168404Spjd	char		*zr_lrbuf;
1490168404Spjd} zil_replay_arg_t;
1491168404Spjd
1492168404Spjdstatic void
1493168404Spjdzil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1494168404Spjd{
1495168404Spjd	zil_replay_arg_t *zr = zra;
1496168404Spjd	const zil_header_t *zh = zilog->zl_header;
1497168404Spjd	uint64_t reclen = lr->lrc_reclen;
1498168404Spjd	uint64_t txtype = lr->lrc_txtype;
1499168404Spjd	char *name;
1500168404Spjd	int pass, error, sunk;
1501168404Spjd
1502168404Spjd	if (zilog->zl_stop_replay)
1503168404Spjd		return;
1504168404Spjd
1505168404Spjd	if (lr->lrc_txg < claim_txg)		/* already committed */
1506168404Spjd		return;
1507168404Spjd
1508168404Spjd	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1509168404Spjd		return;
1510168404Spjd
1511185029Spjd	/* Strip case-insensitive bit, still present in log record */
1512185029Spjd	txtype &= ~TX_CI;
1513185029Spjd
1514168404Spjd	/*
1515168404Spjd	 * Make a copy of the data so we can revise and extend it.
1516168404Spjd	 */
1517168404Spjd	bcopy(lr, zr->zr_lrbuf, reclen);
1518168404Spjd
1519168404Spjd	/*
1520168404Spjd	 * The log block containing this lr may have been byteswapped
1521168404Spjd	 * so that we can easily examine common fields like lrc_txtype.
1522168404Spjd	 * However, the log is a mix of different data types, and only the
1523168404Spjd	 * replay vectors know how to byteswap their records.  Therefore, if
1524168404Spjd	 * the lr was byteswapped, undo it before invoking the replay vector.
1525168404Spjd	 */
1526168404Spjd	if (zr->zr_byteswap)
1527168404Spjd		byteswap_uint64_array(zr->zr_lrbuf, reclen);
1528168404Spjd
1529168404Spjd	/*
1530168404Spjd	 * If this is a TX_WRITE with a blkptr, suck in the data.
1531168404Spjd	 */
1532168404Spjd	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1533168404Spjd		lr_write_t *lrw = (lr_write_t *)lr;
1534168404Spjd		blkptr_t *wbp = &lrw->lr_blkptr;
1535168404Spjd		uint64_t wlen = lrw->lr_length;
1536168404Spjd		char *wbuf = zr->zr_lrbuf + reclen;
1537168404Spjd
1538168404Spjd		if (BP_IS_HOLE(wbp)) {	/* compressed to a hole */
1539168404Spjd			bzero(wbuf, wlen);
1540168404Spjd		} else {
1541168404Spjd			/*
1542168404Spjd			 * A subsequent write may have overwritten this block,
1543168404Spjd			 * in which case wbp may have been been freed and
1544168404Spjd			 * reallocated, and our read of wbp may fail with a
1545168404Spjd			 * checksum error.  We can safely ignore this because
1546168404Spjd			 * the later write will provide the correct data.
1547168404Spjd			 */
1548168404Spjd			zbookmark_t zb;
1549168404Spjd
1550168404Spjd			zb.zb_objset = dmu_objset_id(zilog->zl_os);
1551168404Spjd			zb.zb_object = lrw->lr_foid;
1552168404Spjd			zb.zb_level = -1;
1553168404Spjd			zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
1554168404Spjd
1555168404Spjd			(void) zio_wait(zio_read(NULL, zilog->zl_spa,
1556168404Spjd			    wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1557168404Spjd			    ZIO_PRIORITY_SYNC_READ,
1558168404Spjd			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1559168404Spjd			(void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1560168404Spjd		}
1561168404Spjd	}
1562168404Spjd
1563168404Spjd	/*
1564168404Spjd	 * We must now do two things atomically: replay this log record,
1565168404Spjd	 * and update the log header to reflect the fact that we did so.
1566168404Spjd	 * We use the DMU's ability to assign into a specific txg to do this.
1567168404Spjd	 */
1568168404Spjd	for (pass = 1, sunk = B_FALSE; /* CONSTANTCONDITION */; pass++) {
1569168404Spjd		uint64_t replay_txg;
1570168404Spjd		dmu_tx_t *replay_tx;
1571168404Spjd
1572168404Spjd		replay_tx = dmu_tx_create(zr->zr_os);
1573168404Spjd		error = dmu_tx_assign(replay_tx, TXG_WAIT);
1574168404Spjd		if (error) {
1575168404Spjd			dmu_tx_abort(replay_tx);
1576168404Spjd			break;
1577168404Spjd		}
1578168404Spjd
1579168404Spjd		replay_txg = dmu_tx_get_txg(replay_tx);
1580168404Spjd
1581168404Spjd		if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1582168404Spjd			error = EINVAL;
1583168404Spjd		} else {
1584168404Spjd			/*
1585168404Spjd			 * On the first pass, arrange for the replay vector
1586168404Spjd			 * to fail its dmu_tx_assign().  That's the only way
1587168404Spjd			 * to ensure that those code paths remain well tested.
1588185029Spjd			 *
1589185029Spjd			 * Only byteswap (if needed) on the 1st pass.
1590168404Spjd			 */
1591168404Spjd			*zr->zr_txgp = replay_txg - (pass == 1);
1592168404Spjd			error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1593185029Spjd			    zr->zr_byteswap && pass == 1);
1594168404Spjd			*zr->zr_txgp = TXG_NOWAIT;
1595168404Spjd		}
1596168404Spjd
1597168404Spjd		if (error == 0) {
1598168404Spjd			dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx);
1599168404Spjd			zilog->zl_replay_seq[replay_txg & TXG_MASK] =
1600168404Spjd			    lr->lrc_seq;
1601168404Spjd		}
1602168404Spjd
1603168404Spjd		dmu_tx_commit(replay_tx);
1604168404Spjd
1605168404Spjd		if (!error)
1606168404Spjd			return;
1607168404Spjd
1608168404Spjd		/*
1609168404Spjd		 * The DMU's dnode layer doesn't see removes until the txg
1610168404Spjd		 * commits, so a subsequent claim can spuriously fail with
1611168404Spjd		 * EEXIST. So if we receive any error other than ERESTART
1612168404Spjd		 * we try syncing out any removes then retrying the
1613168404Spjd		 * transaction.
1614168404Spjd		 */
1615168404Spjd		if (error != ERESTART && !sunk) {
1616185029Spjd			if (zr->zr_replay_cleaner)
1617185029Spjd				zr->zr_replay_cleaner(zr->zr_arg);
1618168404Spjd			txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1619168404Spjd			sunk = B_TRUE;
1620168404Spjd			continue; /* retry */
1621168404Spjd		}
1622168404Spjd
1623168404Spjd		if (error != ERESTART)
1624168404Spjd			break;
1625168404Spjd
1626168404Spjd		if (pass != 1)
1627168404Spjd			txg_wait_open(spa_get_dsl(zilog->zl_spa),
1628168404Spjd			    replay_txg + 1);
1629168404Spjd
1630168404Spjd		dprintf("pass %d, retrying\n", pass);
1631168404Spjd	}
1632168404Spjd
1633168404Spjd	ASSERT(error && error != ERESTART);
1634168404Spjd	name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1635168404Spjd	dmu_objset_name(zr->zr_os, name);
1636168404Spjd	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1637185029Spjd	    "dataset %s, seq 0x%llx, txtype %llu %s\n",
1638185029Spjd	    error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype,
1639185029Spjd	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
1640168404Spjd	zilog->zl_stop_replay = 1;
1641168404Spjd	kmem_free(name, MAXNAMELEN);
1642168404Spjd}
1643168404Spjd
1644168404Spjd/* ARGSUSED */
1645168404Spjdstatic void
1646168404Spjdzil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1647168404Spjd{
1648168404Spjd	zilog->zl_replay_blks++;
1649168404Spjd}
1650168404Spjd
1651168404Spjd/*
1652168404Spjd * If this dataset has a non-empty intent log, replay it and destroy it.
1653168404Spjd */
1654168404Spjdvoid
1655168404Spjdzil_replay(objset_t *os, void *arg, uint64_t *txgp,
1656185029Spjd	zil_replay_func_t *replay_func[TX_MAX_TYPE],
1657185029Spjd	zil_replay_cleaner_t *replay_cleaner)
1658168404Spjd{
1659168404Spjd	zilog_t *zilog = dmu_objset_zil(os);
1660168404Spjd	const zil_header_t *zh = zilog->zl_header;
1661168404Spjd	zil_replay_arg_t zr;
1662168404Spjd
1663200724Sdelphij	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
1664168404Spjd		zil_destroy(zilog, B_TRUE);
1665168404Spjd		return;
1666168404Spjd	}
1667168404Spjd	//printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name);
1668168404Spjd
1669168404Spjd	zr.zr_os = os;
1670168404Spjd	zr.zr_replay = replay_func;
1671185029Spjd	zr.zr_replay_cleaner = replay_cleaner;
1672168404Spjd	zr.zr_arg = arg;
1673168404Spjd	zr.zr_txgp = txgp;
1674168404Spjd	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1675168404Spjd	zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1676168404Spjd
1677168404Spjd	/*
1678168404Spjd	 * Wait for in-progress removes to sync before starting replay.
1679168404Spjd	 */
1680168404Spjd	txg_wait_synced(zilog->zl_dmu_pool, 0);
1681168404Spjd
1682168404Spjd	zilog->zl_stop_replay = 0;
1683174049Sjb	zilog->zl_replay_time = LBOLT;
1684168404Spjd	ASSERT(zilog->zl_replay_blks == 0);
1685168404Spjd	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1686168404Spjd	    zh->zh_claim_txg);
1687168404Spjd	kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1688168404Spjd
1689168404Spjd	zil_destroy(zilog, B_FALSE);
1690185029Spjd	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1691168404Spjd	//printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name);
1692168404Spjd}
1693168404Spjd
1694168404Spjd/*
1695168404Spjd * Report whether all transactions are committed
1696168404Spjd */
1697168404Spjdint
1698168404Spjdzil_is_committed(zilog_t *zilog)
1699168404Spjd{
1700168404Spjd	lwb_t *lwb;
1701168404Spjd	int ret;
1702168404Spjd
1703168404Spjd	mutex_enter(&zilog->zl_lock);
1704168404Spjd	while (zilog->zl_writer)
1705168404Spjd		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1706168404Spjd
1707168404Spjd	/* recent unpushed intent log transactions? */
1708168404Spjd	if (!list_is_empty(&zilog->zl_itx_list)) {
1709168404Spjd		ret = B_FALSE;
1710168404Spjd		goto out;
1711168404Spjd	}
1712168404Spjd
1713168404Spjd	/* intent log never used? */
1714168404Spjd	lwb = list_head(&zilog->zl_lwb_list);
1715168404Spjd	if (lwb == NULL) {
1716168404Spjd		ret = B_TRUE;
1717168404Spjd		goto out;
1718168404Spjd	}
1719168404Spjd
1720168404Spjd	/*
1721168404Spjd	 * more than 1 log buffer means zil_sync() hasn't yet freed
1722168404Spjd	 * entries after a txg has committed
1723168404Spjd	 */
1724168404Spjd	if (list_next(&zilog->zl_lwb_list, lwb)) {
1725168404Spjd		ret = B_FALSE;
1726168404Spjd		goto out;
1727168404Spjd	}
1728168404Spjd
1729168404Spjd	ASSERT(zil_empty(zilog));
1730168404Spjd	ret = B_TRUE;
1731168404Spjdout:
1732168404Spjd	cv_broadcast(&zilog->zl_cv_writer);
1733168404Spjd	mutex_exit(&zilog->zl_lock);
1734168404Spjd	return (ret);
1735168404Spjd}
1736