zil.c revision 308595
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23307265Smav * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24268123Sdelphij * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25296519Smav * Copyright (c) 2014 Integros [integros.com] 26168404Spjd */ 27168404Spjd 28219089Spjd/* Portions Copyright 2010 Robert Milkowski */ 29219089Spjd 30168404Spjd#include <sys/zfs_context.h> 31168404Spjd#include <sys/spa.h> 32168404Spjd#include <sys/dmu.h> 33168404Spjd#include <sys/zap.h> 34168404Spjd#include <sys/arc.h> 35168404Spjd#include <sys/stat.h> 36168404Spjd#include <sys/resource.h> 37168404Spjd#include <sys/zil.h> 38168404Spjd#include <sys/zil_impl.h> 39168404Spjd#include <sys/dsl_dataset.h> 40219089Spjd#include <sys/vdev_impl.h> 41168404Spjd#include <sys/dmu_tx.h> 42219089Spjd#include <sys/dsl_pool.h> 43168404Spjd 44168404Spjd/* 45168404Spjd * The zfs intent log (ZIL) saves transaction records of system calls 46168404Spjd * that change the file system in memory with enough information 47168404Spjd * to be able to replay them. These are stored in memory until 48168404Spjd * either the DMU transaction group (txg) commits them to the stable pool 49168404Spjd * and they can be discarded, or they are flushed to the stable log 50168404Spjd * (also in the pool) due to a fsync, O_DSYNC or other synchronous 51168404Spjd * requirement. In the event of a panic or power fail then those log 52168404Spjd * records (transactions) are replayed. 53168404Spjd * 54168404Spjd * There is one ZIL per file system. Its on-disk (pool) format consists 55168404Spjd * of 3 parts: 56168404Spjd * 57168404Spjd * - ZIL header 58168404Spjd * - ZIL blocks 59168404Spjd * - ZIL records 60168404Spjd * 61168404Spjd * A log record holds a system call transaction. Log blocks can 62168404Spjd * hold many log records and the blocks are chained together. 63168404Spjd * Each ZIL block contains a block pointer (blkptr_t) to the next 64168404Spjd * ZIL block in the chain. The ZIL header points to the first 65168404Spjd * block in the chain. Note there is not a fixed place in the pool 66168404Spjd * to hold blocks. They are dynamically allocated and freed as 67168404Spjd * needed from the blocks available. Figure X shows the ZIL structure: 68168404Spjd */ 69168404Spjd 70168404Spjd/* 71251631Sdelphij * Disable intent logging replay. This global ZIL switch affects all pools. 72168404Spjd */ 73251631Sdelphijint zil_replay_disable = 0; 74168404SpjdSYSCTL_DECL(_vfs_zfs); 75267992ShselaskySYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN, 76219089Spjd &zil_replay_disable, 0, "Disable intent logging replay"); 77168404Spjd 78168404Spjd/* 79168404Spjd * Tunable parameter for debugging or performance analysis. Setting 80168404Spjd * zfs_nocacheflush will cause corruption on power loss if a volatile 81168404Spjd * out-of-order write cache is enabled. 82168404Spjd */ 83168404Spjdboolean_t zfs_nocacheflush = B_FALSE; 84168404SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN, 85168404Spjd &zfs_nocacheflush, 0, "Disable cache flush"); 86249921Ssmhboolean_t zfs_trim_enabled = B_TRUE; 87249921SsmhSYSCTL_DECL(_vfs_zfs_trim); 88249921SsmhSYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0, 89249921Ssmh "Enable ZFS TRIM"); 90168404Spjd 91168404Spjdstatic kmem_cache_t *zil_lwb_cache; 92168404Spjd 93219089Spjd#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 94219089Spjd sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 95219089Spjd 96219089Spjd 97219089Spjd/* 98219089Spjd * ziltest is by and large an ugly hack, but very useful in 99219089Spjd * checking replay without tedious work. 100219089Spjd * When running ziltest we want to keep all itx's and so maintain 101219089Spjd * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG 102219089Spjd * We subtract TXG_CONCURRENT_STATES to allow for common code. 103219089Spjd */ 104219089Spjd#define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES) 105219089Spjd 106168404Spjdstatic int 107219089Spjdzil_bp_compare(const void *x1, const void *x2) 108168404Spjd{ 109219089Spjd const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 110219089Spjd const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 111168404Spjd 112168404Spjd if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 113168404Spjd return (-1); 114168404Spjd if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 115168404Spjd return (1); 116168404Spjd 117168404Spjd if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 118168404Spjd return (-1); 119168404Spjd if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 120168404Spjd return (1); 121168404Spjd 122168404Spjd return (0); 123168404Spjd} 124168404Spjd 125168404Spjdstatic void 126219089Spjdzil_bp_tree_init(zilog_t *zilog) 127168404Spjd{ 128219089Spjd avl_create(&zilog->zl_bp_tree, zil_bp_compare, 129219089Spjd sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 130168404Spjd} 131168404Spjd 132168404Spjdstatic void 133219089Spjdzil_bp_tree_fini(zilog_t *zilog) 134168404Spjd{ 135219089Spjd avl_tree_t *t = &zilog->zl_bp_tree; 136219089Spjd zil_bp_node_t *zn; 137168404Spjd void *cookie = NULL; 138168404Spjd 139168404Spjd while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 140219089Spjd kmem_free(zn, sizeof (zil_bp_node_t)); 141168404Spjd 142168404Spjd avl_destroy(t); 143168404Spjd} 144168404Spjd 145219089Spjdint 146219089Spjdzil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 147168404Spjd{ 148219089Spjd avl_tree_t *t = &zilog->zl_bp_tree; 149268075Sdelphij const dva_t *dva; 150219089Spjd zil_bp_node_t *zn; 151168404Spjd avl_index_t where; 152168404Spjd 153268075Sdelphij if (BP_IS_EMBEDDED(bp)) 154268075Sdelphij return (0); 155268075Sdelphij 156268075Sdelphij dva = BP_IDENTITY(bp); 157268075Sdelphij 158168404Spjd if (avl_find(t, dva, &where) != NULL) 159249195Smm return (SET_ERROR(EEXIST)); 160168404Spjd 161219089Spjd zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 162168404Spjd zn->zn_dva = *dva; 163168404Spjd avl_insert(t, zn, where); 164168404Spjd 165168404Spjd return (0); 166168404Spjd} 167168404Spjd 168168404Spjdstatic zil_header_t * 169168404Spjdzil_header_in_syncing_context(zilog_t *zilog) 170168404Spjd{ 171168404Spjd return ((zil_header_t *)zilog->zl_header); 172168404Spjd} 173168404Spjd 174168404Spjdstatic void 175168404Spjdzil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 176168404Spjd{ 177168404Spjd zio_cksum_t *zc = &bp->blk_cksum; 178168404Spjd 179168404Spjd zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 180168404Spjd zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 181168404Spjd zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 182168404Spjd zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 183168404Spjd} 184168404Spjd 185168404Spjd/* 186219089Spjd * Read a log block and make sure it's valid. 187168404Spjd */ 188168404Spjdstatic int 189219089Spjdzil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 190219089Spjd char **end) 191168404Spjd{ 192219089Spjd enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 193275811Sdelphij arc_flags_t aflags = ARC_FLAG_WAIT; 194219089Spjd arc_buf_t *abuf = NULL; 195268123Sdelphij zbookmark_phys_t zb; 196168404Spjd int error; 197168404Spjd 198219089Spjd if (zilog->zl_header->zh_claim_txg == 0) 199219089Spjd zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 200168404Spjd 201219089Spjd if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 202219089Spjd zio_flags |= ZIO_FLAG_SPECULATIVE; 203168404Spjd 204219089Spjd SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 205219089Spjd ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 206168404Spjd 207246666Smm error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 208219089Spjd ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 209219089Spjd 210168404Spjd if (error == 0) { 211168404Spjd zio_cksum_t cksum = bp->blk_cksum; 212168404Spjd 213168404Spjd /* 214185029Spjd * Validate the checksummed log block. 215185029Spjd * 216168404Spjd * Sequence numbers should be... sequential. The checksum 217168404Spjd * verifier for the next block should be bp's checksum plus 1. 218185029Spjd * 219185029Spjd * Also check the log chain linkage and size used. 220168404Spjd */ 221168404Spjd cksum.zc_word[ZIL_ZC_SEQ]++; 222168404Spjd 223219089Spjd if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 224219089Spjd zil_chain_t *zilc = abuf->b_data; 225219089Spjd char *lr = (char *)(zilc + 1); 226219089Spjd uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 227219089Spjd 228219089Spjd if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 229219089Spjd sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 230249195Smm error = SET_ERROR(ECKSUM); 231219089Spjd } else { 232274337Sdelphij ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 233219089Spjd bcopy(lr, dst, len); 234219089Spjd *end = (char *)dst + len; 235219089Spjd *nbp = zilc->zc_next_blk; 236219089Spjd } 237219089Spjd } else { 238219089Spjd char *lr = abuf->b_data; 239219089Spjd uint64_t size = BP_GET_LSIZE(bp); 240219089Spjd zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 241219089Spjd 242219089Spjd if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 243219089Spjd sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 244219089Spjd (zilc->zc_nused > (size - sizeof (*zilc)))) { 245249195Smm error = SET_ERROR(ECKSUM); 246219089Spjd } else { 247274337Sdelphij ASSERT3U(zilc->zc_nused, <=, 248274337Sdelphij SPA_OLD_MAXBLOCKSIZE); 249219089Spjd bcopy(lr, dst, zilc->zc_nused); 250219089Spjd *end = (char *)dst + zilc->zc_nused; 251219089Spjd *nbp = zilc->zc_next_blk; 252219089Spjd } 253185029Spjd } 254168404Spjd 255307265Smav arc_buf_destroy(abuf, &abuf); 256168404Spjd } 257168404Spjd 258219089Spjd return (error); 259219089Spjd} 260168404Spjd 261219089Spjd/* 262219089Spjd * Read a TX_WRITE log data block. 263219089Spjd */ 264219089Spjdstatic int 265219089Spjdzil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 266219089Spjd{ 267219089Spjd enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 268219089Spjd const blkptr_t *bp = &lr->lr_blkptr; 269275811Sdelphij arc_flags_t aflags = ARC_FLAG_WAIT; 270219089Spjd arc_buf_t *abuf = NULL; 271268123Sdelphij zbookmark_phys_t zb; 272219089Spjd int error; 273219089Spjd 274219089Spjd if (BP_IS_HOLE(bp)) { 275219089Spjd if (wbuf != NULL) 276219089Spjd bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 277219089Spjd return (0); 278219089Spjd } 279219089Spjd 280219089Spjd if (zilog->zl_header->zh_claim_txg == 0) 281219089Spjd zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 282219089Spjd 283219089Spjd SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 284219089Spjd ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 285219089Spjd 286246666Smm error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 287219089Spjd ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 288219089Spjd 289219089Spjd if (error == 0) { 290219089Spjd if (wbuf != NULL) 291219089Spjd bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 292307265Smav arc_buf_destroy(abuf, &abuf); 293219089Spjd } 294219089Spjd 295168404Spjd return (error); 296168404Spjd} 297168404Spjd 298168404Spjd/* 299168404Spjd * Parse the intent log, and call parse_func for each valid record within. 300168404Spjd */ 301219089Spjdint 302168404Spjdzil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 303168404Spjd zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 304168404Spjd{ 305168404Spjd const zil_header_t *zh = zilog->zl_header; 306219089Spjd boolean_t claimed = !!zh->zh_claim_txg; 307219089Spjd uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 308219089Spjd uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 309219089Spjd uint64_t max_blk_seq = 0; 310219089Spjd uint64_t max_lr_seq = 0; 311219089Spjd uint64_t blk_count = 0; 312219089Spjd uint64_t lr_count = 0; 313219089Spjd blkptr_t blk, next_blk; 314168404Spjd char *lrbuf, *lrp; 315219089Spjd int error = 0; 316168404Spjd 317219089Spjd /* 318219089Spjd * Old logs didn't record the maximum zh_claim_lr_seq. 319219089Spjd */ 320219089Spjd if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 321219089Spjd claim_lr_seq = UINT64_MAX; 322168404Spjd 323168404Spjd /* 324168404Spjd * Starting at the block pointed to by zh_log we read the log chain. 325168404Spjd * For each block in the chain we strongly check that block to 326168404Spjd * ensure its validity. We stop when an invalid block is found. 327168404Spjd * For each block pointer in the chain we call parse_blk_func(). 328168404Spjd * For each record in each valid block we call parse_lr_func(). 329168404Spjd * If the log has been claimed, stop if we encounter a sequence 330168404Spjd * number greater than the highest claimed sequence number. 331168404Spjd */ 332274337Sdelphij lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 333219089Spjd zil_bp_tree_init(zilog); 334168404Spjd 335219089Spjd for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 336219089Spjd uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 337219089Spjd int reclen; 338219089Spjd char *end; 339219089Spjd 340219089Spjd if (blk_seq > claim_blk_seq) 341168404Spjd break; 342219089Spjd if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 343219089Spjd break; 344219089Spjd ASSERT3U(max_blk_seq, <, blk_seq); 345219089Spjd max_blk_seq = blk_seq; 346219089Spjd blk_count++; 347168404Spjd 348219089Spjd if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 349219089Spjd break; 350168404Spjd 351219089Spjd error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 352248571Smm if (error != 0) 353168404Spjd break; 354168404Spjd 355219089Spjd for (lrp = lrbuf; lrp < end; lrp += reclen) { 356168404Spjd lr_t *lr = (lr_t *)lrp; 357168404Spjd reclen = lr->lrc_reclen; 358168404Spjd ASSERT3U(reclen, >=, sizeof (lr_t)); 359219089Spjd if (lr->lrc_seq > claim_lr_seq) 360219089Spjd goto done; 361219089Spjd if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 362219089Spjd goto done; 363219089Spjd ASSERT3U(max_lr_seq, <, lr->lrc_seq); 364219089Spjd max_lr_seq = lr->lrc_seq; 365219089Spjd lr_count++; 366168404Spjd } 367168404Spjd } 368219089Spjddone: 369219089Spjd zilog->zl_parse_error = error; 370219089Spjd zilog->zl_parse_blk_seq = max_blk_seq; 371219089Spjd zilog->zl_parse_lr_seq = max_lr_seq; 372219089Spjd zilog->zl_parse_blk_count = blk_count; 373219089Spjd zilog->zl_parse_lr_count = lr_count; 374168404Spjd 375219089Spjd ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 376219089Spjd (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 377219089Spjd 378219089Spjd zil_bp_tree_fini(zilog); 379274337Sdelphij zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 380219089Spjd 381219089Spjd return (error); 382168404Spjd} 383168404Spjd 384219089Spjdstatic int 385168404Spjdzil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 386168404Spjd{ 387168404Spjd /* 388168404Spjd * Claim log block if not already committed and not already claimed. 389219089Spjd * If tx == NULL, just verify that the block is claimable. 390168404Spjd */ 391260150Sdelphij if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 392260150Sdelphij zil_bp_tree_add(zilog, bp) != 0) 393219089Spjd return (0); 394219089Spjd 395219089Spjd return (zio_wait(zio_claim(NULL, zilog->zl_spa, 396219089Spjd tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 397219089Spjd ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 398168404Spjd} 399168404Spjd 400219089Spjdstatic int 401168404Spjdzil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 402168404Spjd{ 403219089Spjd lr_write_t *lr = (lr_write_t *)lrc; 404219089Spjd int error; 405219089Spjd 406219089Spjd if (lrc->lrc_txtype != TX_WRITE) 407219089Spjd return (0); 408219089Spjd 409219089Spjd /* 410219089Spjd * If the block is not readable, don't claim it. This can happen 411219089Spjd * in normal operation when a log block is written to disk before 412219089Spjd * some of the dmu_sync() blocks it points to. In this case, the 413219089Spjd * transaction cannot have been committed to anyone (we would have 414219089Spjd * waited for all writes to be stable first), so it is semantically 415219089Spjd * correct to declare this the end of the log. 416219089Spjd */ 417219089Spjd if (lr->lr_blkptr.blk_birth >= first_txg && 418219089Spjd (error = zil_read_log_data(zilog, lr, NULL)) != 0) 419219089Spjd return (error); 420219089Spjd return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 421168404Spjd} 422168404Spjd 423168404Spjd/* ARGSUSED */ 424219089Spjdstatic int 425168404Spjdzil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 426168404Spjd{ 427219089Spjd zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 428219089Spjd 429219089Spjd return (0); 430168404Spjd} 431168404Spjd 432219089Spjdstatic int 433168404Spjdzil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 434168404Spjd{ 435219089Spjd lr_write_t *lr = (lr_write_t *)lrc; 436219089Spjd blkptr_t *bp = &lr->lr_blkptr; 437219089Spjd 438168404Spjd /* 439168404Spjd * If we previously claimed it, we need to free it. 440168404Spjd */ 441219089Spjd if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 442260150Sdelphij bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 443260150Sdelphij !BP_IS_HOLE(bp)) 444219089Spjd zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 445219089Spjd 446219089Spjd return (0); 447219089Spjd} 448219089Spjd 449219089Spjdstatic lwb_t * 450219089Spjdzil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 451219089Spjd{ 452219089Spjd lwb_t *lwb; 453219089Spjd 454219089Spjd lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 455219089Spjd lwb->lwb_zilog = zilog; 456219089Spjd lwb->lwb_blk = *bp; 457219089Spjd lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 458219089Spjd lwb->lwb_max_txg = txg; 459219089Spjd lwb->lwb_zio = NULL; 460219089Spjd lwb->lwb_tx = NULL; 461219089Spjd if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 462219089Spjd lwb->lwb_nused = sizeof (zil_chain_t); 463219089Spjd lwb->lwb_sz = BP_GET_LSIZE(bp); 464219089Spjd } else { 465219089Spjd lwb->lwb_nused = 0; 466219089Spjd lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 467168404Spjd } 468219089Spjd 469219089Spjd mutex_enter(&zilog->zl_lock); 470219089Spjd list_insert_tail(&zilog->zl_lwb_list, lwb); 471219089Spjd mutex_exit(&zilog->zl_lock); 472219089Spjd 473219089Spjd return (lwb); 474168404Spjd} 475168404Spjd 476168404Spjd/* 477239620Smm * Called when we create in-memory log transactions so that we know 478239620Smm * to cleanup the itxs at the end of spa_sync(). 479239620Smm */ 480239620Smmvoid 481239620Smmzilog_dirty(zilog_t *zilog, uint64_t txg) 482239620Smm{ 483239620Smm dsl_pool_t *dp = zilog->zl_dmu_pool; 484239620Smm dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 485239620Smm 486286575Smav if (ds->ds_is_snapshot) 487239620Smm panic("dirtying snapshot!"); 488239620Smm 489248571Smm if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 490239620Smm /* up the hold count until we can be written out */ 491239620Smm dmu_buf_add_ref(ds->ds_dbuf, zilog); 492239620Smm } 493239620Smm} 494239620Smm 495239620Smmboolean_t 496239620Smmzilog_is_dirty(zilog_t *zilog) 497239620Smm{ 498239620Smm dsl_pool_t *dp = zilog->zl_dmu_pool; 499239620Smm 500239620Smm for (int t = 0; t < TXG_SIZE; t++) { 501239620Smm if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 502239620Smm return (B_TRUE); 503239620Smm } 504239620Smm return (B_FALSE); 505239620Smm} 506239620Smm 507239620Smm/* 508168404Spjd * Create an on-disk intent log. 509168404Spjd */ 510219089Spjdstatic lwb_t * 511168404Spjdzil_create(zilog_t *zilog) 512168404Spjd{ 513168404Spjd const zil_header_t *zh = zilog->zl_header; 514219089Spjd lwb_t *lwb = NULL; 515168404Spjd uint64_t txg = 0; 516168404Spjd dmu_tx_t *tx = NULL; 517168404Spjd blkptr_t blk; 518168404Spjd int error = 0; 519168404Spjd 520168404Spjd /* 521168404Spjd * Wait for any previous destroy to complete. 522168404Spjd */ 523168404Spjd txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 524168404Spjd 525168404Spjd ASSERT(zh->zh_claim_txg == 0); 526168404Spjd ASSERT(zh->zh_replay_seq == 0); 527168404Spjd 528168404Spjd blk = zh->zh_log; 529168404Spjd 530168404Spjd /* 531219089Spjd * Allocate an initial log block if: 532219089Spjd * - there isn't one already 533219089Spjd * - the existing block is the wrong endianess 534168404Spjd */ 535207908Smm if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 536168404Spjd tx = dmu_tx_create(zilog->zl_os); 537219089Spjd VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 538168404Spjd dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 539168404Spjd txg = dmu_tx_get_txg(tx); 540168404Spjd 541207908Smm if (!BP_IS_HOLE(&blk)) { 542219089Spjd zio_free_zil(zilog->zl_spa, txg, &blk); 543207908Smm BP_ZERO(&blk); 544207908Smm } 545207908Smm 546219089Spjd error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 547219089Spjd ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 548168404Spjd 549168404Spjd if (error == 0) 550168404Spjd zil_init_log_chain(zilog, &blk); 551168404Spjd } 552168404Spjd 553168404Spjd /* 554168404Spjd * Allocate a log write buffer (lwb) for the first log block. 555168404Spjd */ 556219089Spjd if (error == 0) 557219089Spjd lwb = zil_alloc_lwb(zilog, &blk, txg); 558168404Spjd 559168404Spjd /* 560168404Spjd * If we just allocated the first log block, commit our transaction 561168404Spjd * and wait for zil_sync() to stuff the block poiner into zh_log. 562168404Spjd * (zh is part of the MOS, so we cannot modify it in open context.) 563168404Spjd */ 564168404Spjd if (tx != NULL) { 565168404Spjd dmu_tx_commit(tx); 566168404Spjd txg_wait_synced(zilog->zl_dmu_pool, txg); 567168404Spjd } 568168404Spjd 569168404Spjd ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 570219089Spjd 571219089Spjd return (lwb); 572168404Spjd} 573168404Spjd 574168404Spjd/* 575168404Spjd * In one tx, free all log blocks and clear the log header. 576168404Spjd * If keep_first is set, then we're replaying a log with no content. 577168404Spjd * We want to keep the first block, however, so that the first 578168404Spjd * synchronous transaction doesn't require a txg_wait_synced() 579168404Spjd * in zil_create(). We don't need to txg_wait_synced() here either 580168404Spjd * when keep_first is set, because both zil_create() and zil_destroy() 581168404Spjd * will wait for any in-progress destroys to complete. 582168404Spjd */ 583168404Spjdvoid 584168404Spjdzil_destroy(zilog_t *zilog, boolean_t keep_first) 585168404Spjd{ 586168404Spjd const zil_header_t *zh = zilog->zl_header; 587168404Spjd lwb_t *lwb; 588168404Spjd dmu_tx_t *tx; 589168404Spjd uint64_t txg; 590168404Spjd 591168404Spjd /* 592168404Spjd * Wait for any previous destroy to complete. 593168404Spjd */ 594168404Spjd txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 595168404Spjd 596219089Spjd zilog->zl_old_header = *zh; /* debugging aid */ 597219089Spjd 598168404Spjd if (BP_IS_HOLE(&zh->zh_log)) 599168404Spjd return; 600168404Spjd 601168404Spjd tx = dmu_tx_create(zilog->zl_os); 602219089Spjd VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 603168404Spjd dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 604168404Spjd txg = dmu_tx_get_txg(tx); 605168404Spjd 606168404Spjd mutex_enter(&zilog->zl_lock); 607168404Spjd 608168404Spjd ASSERT3U(zilog->zl_destroy_txg, <, txg); 609168404Spjd zilog->zl_destroy_txg = txg; 610168404Spjd zilog->zl_keep_first = keep_first; 611168404Spjd 612168404Spjd if (!list_is_empty(&zilog->zl_lwb_list)) { 613168404Spjd ASSERT(zh->zh_claim_txg == 0); 614224526Smm VERIFY(!keep_first); 615168404Spjd while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 616168404Spjd list_remove(&zilog->zl_lwb_list, lwb); 617168404Spjd if (lwb->lwb_buf != NULL) 618168404Spjd zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 619219089Spjd zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 620168404Spjd kmem_cache_free(zil_lwb_cache, lwb); 621168404Spjd } 622219089Spjd } else if (!keep_first) { 623239620Smm zil_destroy_sync(zilog, tx); 624168404Spjd } 625168404Spjd mutex_exit(&zilog->zl_lock); 626168404Spjd 627168404Spjd dmu_tx_commit(tx); 628185029Spjd} 629168404Spjd 630239620Smmvoid 631239620Smmzil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 632239620Smm{ 633239620Smm ASSERT(list_is_empty(&zilog->zl_lwb_list)); 634239620Smm (void) zil_parse(zilog, zil_free_log_block, 635239620Smm zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 636239620Smm} 637239620Smm 638168404Spjdint 639286686Smavzil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 640168404Spjd{ 641168404Spjd dmu_tx_t *tx = txarg; 642168404Spjd uint64_t first_txg = dmu_tx_get_txg(tx); 643168404Spjd zilog_t *zilog; 644168404Spjd zil_header_t *zh; 645168404Spjd objset_t *os; 646168404Spjd int error; 647168404Spjd 648286686Smav error = dmu_objset_own_obj(dp, ds->ds_object, 649286686Smav DMU_OST_ANY, B_FALSE, FTAG, &os); 650248571Smm if (error != 0) { 651271534Sdelphij /* 652271534Sdelphij * EBUSY indicates that the objset is inconsistent, in which 653271534Sdelphij * case it can not have a ZIL. 654271534Sdelphij */ 655271534Sdelphij if (error != EBUSY) { 656286686Smav cmn_err(CE_WARN, "can't open objset for %llu, error %u", 657286686Smav (unsigned long long)ds->ds_object, error); 658271534Sdelphij } 659168404Spjd return (0); 660168404Spjd } 661168404Spjd 662168404Spjd zilog = dmu_objset_zil(os); 663168404Spjd zh = zil_header_in_syncing_context(zilog); 664168404Spjd 665219089Spjd if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 666213197Smm if (!BP_IS_HOLE(&zh->zh_log)) 667219089Spjd zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 668213197Smm BP_ZERO(&zh->zh_log); 669213197Smm dsl_dataset_dirty(dmu_objset_ds(os), tx); 670248571Smm dmu_objset_disown(os, FTAG); 671219089Spjd return (0); 672213197Smm } 673213197Smm 674168404Spjd /* 675168404Spjd * Claim all log blocks if we haven't already done so, and remember 676168404Spjd * the highest claimed sequence number. This ensures that if we can 677168404Spjd * read only part of the log now (e.g. due to a missing device), 678168404Spjd * but we can read the entire log later, we will not try to replay 679168404Spjd * or destroy beyond the last block we successfully claimed. 680168404Spjd */ 681168404Spjd ASSERT3U(zh->zh_claim_txg, <=, first_txg); 682168404Spjd if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 683219089Spjd (void) zil_parse(zilog, zil_claim_log_block, 684219089Spjd zil_claim_log_record, tx, first_txg); 685168404Spjd zh->zh_claim_txg = first_txg; 686219089Spjd zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 687219089Spjd zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 688219089Spjd if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 689219089Spjd zh->zh_flags |= ZIL_REPLAY_NEEDED; 690219089Spjd zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 691168404Spjd dsl_dataset_dirty(dmu_objset_ds(os), tx); 692168404Spjd } 693168404Spjd 694168404Spjd ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 695248571Smm dmu_objset_disown(os, FTAG); 696168404Spjd return (0); 697168404Spjd} 698168404Spjd 699185029Spjd/* 700185029Spjd * Check the log by walking the log chain. 701185029Spjd * Checksum errors are ok as they indicate the end of the chain. 702185029Spjd * Any other error (no device or read failure) returns an error. 703185029Spjd */ 704286686Smav/* ARGSUSED */ 705185029Spjdint 706286686Smavzil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 707168404Spjd{ 708185029Spjd zilog_t *zilog; 709185029Spjd objset_t *os; 710219089Spjd blkptr_t *bp; 711185029Spjd int error; 712168404Spjd 713219089Spjd ASSERT(tx == NULL); 714219089Spjd 715286686Smav error = dmu_objset_from_ds(ds, &os); 716248571Smm if (error != 0) { 717286686Smav cmn_err(CE_WARN, "can't open objset %llu, error %d", 718286686Smav (unsigned long long)ds->ds_object, error); 719185029Spjd return (0); 720185029Spjd } 721168404Spjd 722185029Spjd zilog = dmu_objset_zil(os); 723219089Spjd bp = (blkptr_t *)&zilog->zl_header->zh_log; 724219089Spjd 725219089Spjd /* 726219089Spjd * Check the first block and determine if it's on a log device 727219089Spjd * which may have been removed or faulted prior to loading this 728219089Spjd * pool. If so, there's no point in checking the rest of the log 729219089Spjd * as its content should have already been synced to the pool. 730219089Spjd */ 731219089Spjd if (!BP_IS_HOLE(bp)) { 732219089Spjd vdev_t *vd; 733219089Spjd boolean_t valid = B_TRUE; 734219089Spjd 735219089Spjd spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 736219089Spjd vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 737219089Spjd if (vd->vdev_islog && vdev_is_dead(vd)) 738219089Spjd valid = vdev_log_state_valid(vd); 739219089Spjd spa_config_exit(os->os_spa, SCL_STATE, FTAG); 740219089Spjd 741286686Smav if (!valid) 742219089Spjd return (0); 743168404Spjd } 744185029Spjd 745219089Spjd /* 746219089Spjd * Because tx == NULL, zil_claim_log_block() will not actually claim 747219089Spjd * any blocks, but just determine whether it is possible to do so. 748219089Spjd * In addition to checking the log chain, zil_claim_log_block() 749219089Spjd * will invoke zio_claim() with a done func of spa_claim_notify(), 750219089Spjd * which will update spa_max_claim_txg. See spa_load() for details. 751219089Spjd */ 752219089Spjd error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 753219089Spjd zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 754219089Spjd 755219089Spjd return ((error == ECKSUM || error == ENOENT) ? 0 : error); 756168404Spjd} 757168404Spjd 758185029Spjdstatic int 759185029Spjdzil_vdev_compare(const void *x1, const void *x2) 760185029Spjd{ 761219089Spjd const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 762219089Spjd const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 763185029Spjd 764185029Spjd if (v1 < v2) 765185029Spjd return (-1); 766185029Spjd if (v1 > v2) 767185029Spjd return (1); 768185029Spjd 769185029Spjd return (0); 770185029Spjd} 771185029Spjd 772168404Spjdvoid 773219089Spjdzil_add_block(zilog_t *zilog, const blkptr_t *bp) 774168404Spjd{ 775185029Spjd avl_tree_t *t = &zilog->zl_vdev_tree; 776185029Spjd avl_index_t where; 777185029Spjd zil_vdev_node_t *zv, zvsearch; 778185029Spjd int ndvas = BP_GET_NDVAS(bp); 779185029Spjd int i; 780168404Spjd 781185029Spjd if (zfs_nocacheflush) 782185029Spjd return; 783168404Spjd 784185029Spjd ASSERT(zilog->zl_writer); 785168404Spjd 786185029Spjd /* 787185029Spjd * Even though we're zl_writer, we still need a lock because the 788185029Spjd * zl_get_data() callbacks may have dmu_sync() done callbacks 789185029Spjd * that will run concurrently. 790185029Spjd */ 791185029Spjd mutex_enter(&zilog->zl_vdev_lock); 792185029Spjd for (i = 0; i < ndvas; i++) { 793185029Spjd zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 794185029Spjd if (avl_find(t, &zvsearch, &where) == NULL) { 795185029Spjd zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 796185029Spjd zv->zv_vdev = zvsearch.zv_vdev; 797185029Spjd avl_insert(t, zv, where); 798185029Spjd } 799185029Spjd } 800185029Spjd mutex_exit(&zilog->zl_vdev_lock); 801168404Spjd} 802168404Spjd 803219089Spjdstatic void 804168404Spjdzil_flush_vdevs(zilog_t *zilog) 805168404Spjd{ 806168404Spjd spa_t *spa = zilog->zl_spa; 807185029Spjd avl_tree_t *t = &zilog->zl_vdev_tree; 808185029Spjd void *cookie = NULL; 809185029Spjd zil_vdev_node_t *zv; 810185029Spjd zio_t *zio; 811168404Spjd 812168404Spjd ASSERT(zilog->zl_writer); 813168404Spjd 814185029Spjd /* 815185029Spjd * We don't need zl_vdev_lock here because we're the zl_writer, 816185029Spjd * and all zl_get_data() callbacks are done. 817185029Spjd */ 818185029Spjd if (avl_numnodes(t) == 0) 819185029Spjd return; 820185029Spjd 821185029Spjd spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 822185029Spjd 823185029Spjd zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 824185029Spjd 825185029Spjd while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 826185029Spjd vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 827185029Spjd if (vd != NULL) 828185029Spjd zio_flush(zio, vd); 829185029Spjd kmem_free(zv, sizeof (*zv)); 830168404Spjd } 831168404Spjd 832168404Spjd /* 833168404Spjd * Wait for all the flushes to complete. Not all devices actually 834168404Spjd * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 835168404Spjd */ 836185029Spjd (void) zio_wait(zio); 837185029Spjd 838185029Spjd spa_config_exit(spa, SCL_STATE, FTAG); 839168404Spjd} 840168404Spjd 841168404Spjd/* 842168404Spjd * Function called when a log block write completes 843168404Spjd */ 844168404Spjdstatic void 845168404Spjdzil_lwb_write_done(zio_t *zio) 846168404Spjd{ 847168404Spjd lwb_t *lwb = zio->io_private; 848168404Spjd zilog_t *zilog = lwb->lwb_zilog; 849219089Spjd dmu_tx_t *tx = lwb->lwb_tx; 850168404Spjd 851185029Spjd ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 852185029Spjd ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 853185029Spjd ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 854185029Spjd ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 855185029Spjd ASSERT(!BP_IS_GANG(zio->io_bp)); 856185029Spjd ASSERT(!BP_IS_HOLE(zio->io_bp)); 857268075Sdelphij ASSERT(BP_GET_FILL(zio->io_bp) == 0); 858185029Spjd 859168404Spjd /* 860209962Smm * Ensure the lwb buffer pointer is cleared before releasing 861209962Smm * the txg. If we have had an allocation failure and 862209962Smm * the txg is waiting to sync then we want want zil_sync() 863209962Smm * to remove the lwb so that it's not picked up as the next new 864209962Smm * one in zil_commit_writer(). zil_sync() will only remove 865209962Smm * the lwb if lwb_buf is null. 866168404Spjd */ 867168404Spjd zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 868168404Spjd mutex_enter(&zilog->zl_lock); 869168404Spjd lwb->lwb_buf = NULL; 870219089Spjd lwb->lwb_tx = NULL; 871219089Spjd mutex_exit(&zilog->zl_lock); 872209962Smm 873209962Smm /* 874209962Smm * Now that we've written this log block, we have a stable pointer 875209962Smm * to the next block in the chain, so it's OK to let the txg in 876219089Spjd * which we allocated the next block sync. 877209962Smm */ 878219089Spjd dmu_tx_commit(tx); 879168404Spjd} 880168404Spjd 881168404Spjd/* 882168404Spjd * Initialize the io for a log block. 883168404Spjd */ 884168404Spjdstatic void 885168404Spjdzil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 886168404Spjd{ 887268123Sdelphij zbookmark_phys_t zb; 888168404Spjd 889219089Spjd SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 890219089Spjd ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 891219089Spjd lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 892168404Spjd 893168404Spjd if (zilog->zl_root_zio == NULL) { 894168404Spjd zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 895168404Spjd ZIO_FLAG_CANFAIL); 896168404Spjd } 897168404Spjd if (lwb->lwb_zio == NULL) { 898168404Spjd lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 899219089Spjd 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 900258632Savg zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE, 901219089Spjd ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 902168404Spjd } 903168404Spjd} 904168404Spjd 905168404Spjd/* 906219089Spjd * Define a limited set of intent log block sizes. 907251631Sdelphij * 908219089Spjd * These must be a multiple of 4KB. Note only the amount used (again 909219089Spjd * aligned to 4KB) actually gets written. However, we can't always just 910274337Sdelphij * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 911219089Spjd */ 912219089Spjduint64_t zil_block_buckets[] = { 913219089Spjd 4096, /* non TX_WRITE */ 914219089Spjd 8192+4096, /* data base */ 915219089Spjd 32*1024 + 4096, /* NFS writes */ 916219089Spjd UINT64_MAX 917219089Spjd}; 918219089Spjd 919219089Spjd/* 920219089Spjd * Use the slog as long as the logbias is 'latency' and the current commit size 921219089Spjd * is less than the limit or the total list size is less than 2X the limit. 922219089Spjd * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 923219089Spjd */ 924219089Spjduint64_t zil_slog_limit = 1024 * 1024; 925219089Spjd#define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 926219089Spjd (((zilog)->zl_cur_used < zil_slog_limit) || \ 927219089Spjd ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 928219089Spjd 929219089Spjd/* 930168404Spjd * Start a log block write and advance to the next log block. 931168404Spjd * Calls are serialized. 932168404Spjd */ 933168404Spjdstatic lwb_t * 934168404Spjdzil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 935168404Spjd{ 936219089Spjd lwb_t *nlwb = NULL; 937219089Spjd zil_chain_t *zilc; 938168404Spjd spa_t *spa = zilog->zl_spa; 939219089Spjd blkptr_t *bp; 940219089Spjd dmu_tx_t *tx; 941168404Spjd uint64_t txg; 942219089Spjd uint64_t zil_blksz, wsz; 943219089Spjd int i, error; 944168404Spjd 945219089Spjd if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 946219089Spjd zilc = (zil_chain_t *)lwb->lwb_buf; 947219089Spjd bp = &zilc->zc_next_blk; 948219089Spjd } else { 949219089Spjd zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 950219089Spjd bp = &zilc->zc_next_blk; 951219089Spjd } 952168404Spjd 953219089Spjd ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 954219089Spjd 955168404Spjd /* 956168404Spjd * Allocate the next block and save its address in this block 957168404Spjd * before writing it in order to establish the log chain. 958168404Spjd * Note that if the allocation of nlwb synced before we wrote 959168404Spjd * the block that points at it (lwb), we'd leak it if we crashed. 960219089Spjd * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 961219089Spjd * We dirty the dataset to ensure that zil_sync() will be called 962219089Spjd * to clean up in the event of allocation failure or I/O failure. 963168404Spjd */ 964219089Spjd tx = dmu_tx_create(zilog->zl_os); 965219089Spjd VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 966219089Spjd dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 967219089Spjd txg = dmu_tx_get_txg(tx); 968168404Spjd 969219089Spjd lwb->lwb_tx = tx; 970219089Spjd 971168404Spjd /* 972219089Spjd * Log blocks are pre-allocated. Here we select the size of the next 973219089Spjd * block, based on size used in the last block. 974219089Spjd * - first find the smallest bucket that will fit the block from a 975219089Spjd * limited set of block sizes. This is because it's faster to write 976219089Spjd * blocks allocated from the same metaslab as they are adjacent or 977219089Spjd * close. 978219089Spjd * - next find the maximum from the new suggested size and an array of 979219089Spjd * previous sizes. This lessens a picket fence effect of wrongly 980219089Spjd * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 981219089Spjd * requests. 982219089Spjd * 983219089Spjd * Note we only write what is used, but we can't just allocate 984219089Spjd * the maximum block size because we can exhaust the available 985219089Spjd * pool log space. 986168404Spjd */ 987219089Spjd zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 988219089Spjd for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 989219089Spjd continue; 990219089Spjd zil_blksz = zil_block_buckets[i]; 991219089Spjd if (zil_blksz == UINT64_MAX) 992274337Sdelphij zil_blksz = SPA_OLD_MAXBLOCKSIZE; 993219089Spjd zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 994219089Spjd for (i = 0; i < ZIL_PREV_BLKS; i++) 995219089Spjd zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 996219089Spjd zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 997168404Spjd 998168404Spjd BP_ZERO(bp); 999168404Spjd /* pass the old blkptr in order to spread log blocks across devs */ 1000219089Spjd error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 1001219089Spjd USE_SLOG(zilog)); 1002248571Smm if (error == 0) { 1003219089Spjd ASSERT3U(bp->blk_birth, ==, txg); 1004219089Spjd bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1005219089Spjd bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1006168404Spjd 1007168404Spjd /* 1008219089Spjd * Allocate a new log write buffer (lwb). 1009168404Spjd */ 1010219089Spjd nlwb = zil_alloc_lwb(zilog, bp, txg); 1011168404Spjd 1012219089Spjd /* Record the block for later vdev flushing */ 1013219089Spjd zil_add_block(zilog, &lwb->lwb_blk); 1014168404Spjd } 1015168404Spjd 1016219089Spjd if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1017219089Spjd /* For Slim ZIL only write what is used. */ 1018219089Spjd wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1019219089Spjd ASSERT3U(wsz, <=, lwb->lwb_sz); 1020219089Spjd zio_shrink(lwb->lwb_zio, wsz); 1021168404Spjd 1022219089Spjd } else { 1023219089Spjd wsz = lwb->lwb_sz; 1024219089Spjd } 1025168404Spjd 1026219089Spjd zilc->zc_pad = 0; 1027219089Spjd zilc->zc_nused = lwb->lwb_nused; 1028219089Spjd zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1029168404Spjd 1030168404Spjd /* 1031219089Spjd * clear unused data for security 1032168404Spjd */ 1033219089Spjd bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1034168404Spjd 1035219089Spjd zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 1036168404Spjd 1037168404Spjd /* 1038219089Spjd * If there was an allocation failure then nlwb will be null which 1039219089Spjd * forces a txg_wait_synced(). 1040168404Spjd */ 1041168404Spjd return (nlwb); 1042168404Spjd} 1043168404Spjd 1044168404Spjdstatic lwb_t * 1045168404Spjdzil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1046168404Spjd{ 1047168404Spjd lr_t *lrc = &itx->itx_lr; /* common log record */ 1048219089Spjd lr_write_t *lrw = (lr_write_t *)lrc; 1049219089Spjd char *lr_buf; 1050168404Spjd uint64_t txg = lrc->lrc_txg; 1051168404Spjd uint64_t reclen = lrc->lrc_reclen; 1052219089Spjd uint64_t dlen = 0; 1053168404Spjd 1054168404Spjd if (lwb == NULL) 1055168404Spjd return (NULL); 1056219089Spjd 1057168404Spjd ASSERT(lwb->lwb_buf != NULL); 1058239620Smm ASSERT(zilog_is_dirty(zilog) || 1059239620Smm spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1060168404Spjd 1061168404Spjd if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 1062168404Spjd dlen = P2ROUNDUP_TYPED( 1063219089Spjd lrw->lr_length, sizeof (uint64_t), uint64_t); 1064168404Spjd 1065168404Spjd zilog->zl_cur_used += (reclen + dlen); 1066168404Spjd 1067168404Spjd zil_lwb_write_init(zilog, lwb); 1068168404Spjd 1069168404Spjd /* 1070168404Spjd * If this record won't fit in the current log block, start a new one. 1071168404Spjd */ 1072219089Spjd if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1073168404Spjd lwb = zil_lwb_write_start(zilog, lwb); 1074168404Spjd if (lwb == NULL) 1075168404Spjd return (NULL); 1076168404Spjd zil_lwb_write_init(zilog, lwb); 1077219089Spjd ASSERT(LWB_EMPTY(lwb)); 1078219089Spjd if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1079168404Spjd txg_wait_synced(zilog->zl_dmu_pool, txg); 1080168404Spjd return (lwb); 1081168404Spjd } 1082168404Spjd } 1083168404Spjd 1084219089Spjd lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1085219089Spjd bcopy(lrc, lr_buf, reclen); 1086219089Spjd lrc = (lr_t *)lr_buf; 1087219089Spjd lrw = (lr_write_t *)lrc; 1088168404Spjd 1089168404Spjd /* 1090168404Spjd * If it's a write, fetch the data or get its blkptr as appropriate. 1091168404Spjd */ 1092168404Spjd if (lrc->lrc_txtype == TX_WRITE) { 1093168404Spjd if (txg > spa_freeze_txg(zilog->zl_spa)) 1094168404Spjd txg_wait_synced(zilog->zl_dmu_pool, txg); 1095168404Spjd if (itx->itx_wr_state != WR_COPIED) { 1096168404Spjd char *dbuf; 1097168404Spjd int error; 1098168404Spjd 1099168404Spjd if (dlen) { 1100168404Spjd ASSERT(itx->itx_wr_state == WR_NEED_COPY); 1101219089Spjd dbuf = lr_buf + reclen; 1102219089Spjd lrw->lr_common.lrc_reclen += dlen; 1103168404Spjd } else { 1104168404Spjd ASSERT(itx->itx_wr_state == WR_INDIRECT); 1105168404Spjd dbuf = NULL; 1106168404Spjd } 1107168404Spjd error = zilog->zl_get_data( 1108219089Spjd itx->itx_private, lrw, dbuf, lwb->lwb_zio); 1109214378Smm if (error == EIO) { 1110214378Smm txg_wait_synced(zilog->zl_dmu_pool, txg); 1111214378Smm return (lwb); 1112214378Smm } 1113248571Smm if (error != 0) { 1114168404Spjd ASSERT(error == ENOENT || error == EEXIST || 1115168404Spjd error == EALREADY); 1116168404Spjd return (lwb); 1117168404Spjd } 1118168404Spjd } 1119168404Spjd } 1120168404Spjd 1121219089Spjd /* 1122219089Spjd * We're actually making an entry, so update lrc_seq to be the 1123219089Spjd * log record sequence number. Note that this is generally not 1124219089Spjd * equal to the itx sequence number because not all transactions 1125219089Spjd * are synchronous, and sometimes spa_sync() gets there first. 1126219089Spjd */ 1127219089Spjd lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 1128168404Spjd lwb->lwb_nused += reclen + dlen; 1129168404Spjd lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1130219089Spjd ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1131240415Smm ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1132168404Spjd 1133168404Spjd return (lwb); 1134168404Spjd} 1135168404Spjd 1136168404Spjditx_t * 1137185029Spjdzil_itx_create(uint64_t txtype, size_t lrsize) 1138168404Spjd{ 1139168404Spjd itx_t *itx; 1140168404Spjd 1141168404Spjd lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1142168404Spjd 1143168404Spjd itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1144168404Spjd itx->itx_lr.lrc_txtype = txtype; 1145168404Spjd itx->itx_lr.lrc_reclen = lrsize; 1146185029Spjd itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1147168404Spjd itx->itx_lr.lrc_seq = 0; /* defensive */ 1148219089Spjd itx->itx_sync = B_TRUE; /* default is synchronous */ 1149168404Spjd 1150168404Spjd return (itx); 1151168404Spjd} 1152168404Spjd 1153219089Spjdvoid 1154219089Spjdzil_itx_destroy(itx_t *itx) 1155168404Spjd{ 1156219089Spjd kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1157219089Spjd} 1158168404Spjd 1159219089Spjd/* 1160219089Spjd * Free up the sync and async itxs. The itxs_t has already been detached 1161219089Spjd * so no locks are needed. 1162219089Spjd */ 1163219089Spjdstatic void 1164219089Spjdzil_itxg_clean(itxs_t *itxs) 1165219089Spjd{ 1166219089Spjd itx_t *itx; 1167219089Spjd list_t *list; 1168219089Spjd avl_tree_t *t; 1169219089Spjd void *cookie; 1170219089Spjd itx_async_node_t *ian; 1171168404Spjd 1172219089Spjd list = &itxs->i_sync_list; 1173219089Spjd while ((itx = list_head(list)) != NULL) { 1174219089Spjd list_remove(list, itx); 1175219089Spjd kmem_free(itx, offsetof(itx_t, itx_lr) + 1176219089Spjd itx->itx_lr.lrc_reclen); 1177219089Spjd } 1178168404Spjd 1179219089Spjd cookie = NULL; 1180219089Spjd t = &itxs->i_async_tree; 1181219089Spjd while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1182219089Spjd list = &ian->ia_list; 1183219089Spjd while ((itx = list_head(list)) != NULL) { 1184219089Spjd list_remove(list, itx); 1185219089Spjd kmem_free(itx, offsetof(itx_t, itx_lr) + 1186219089Spjd itx->itx_lr.lrc_reclen); 1187219089Spjd } 1188219089Spjd list_destroy(list); 1189219089Spjd kmem_free(ian, sizeof (itx_async_node_t)); 1190219089Spjd } 1191219089Spjd avl_destroy(t); 1192219089Spjd 1193219089Spjd kmem_free(itxs, sizeof (itxs_t)); 1194168404Spjd} 1195168404Spjd 1196219089Spjdstatic int 1197219089Spjdzil_aitx_compare(const void *x1, const void *x2) 1198219089Spjd{ 1199219089Spjd const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1200219089Spjd const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1201219089Spjd 1202219089Spjd if (o1 < o2) 1203219089Spjd return (-1); 1204219089Spjd if (o1 > o2) 1205219089Spjd return (1); 1206219089Spjd 1207219089Spjd return (0); 1208219089Spjd} 1209219089Spjd 1210168404Spjd/* 1211219089Spjd * Remove all async itx with the given oid. 1212168404Spjd */ 1213168404Spjdstatic void 1214219089Spjdzil_remove_async(zilog_t *zilog, uint64_t oid) 1215168404Spjd{ 1216219089Spjd uint64_t otxg, txg; 1217219089Spjd itx_async_node_t *ian; 1218219089Spjd avl_tree_t *t; 1219219089Spjd avl_index_t where; 1220168404Spjd list_t clean_list; 1221168404Spjd itx_t *itx; 1222168404Spjd 1223219089Spjd ASSERT(oid != 0); 1224168404Spjd list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1225168404Spjd 1226219089Spjd if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1227219089Spjd otxg = ZILTEST_TXG; 1228219089Spjd else 1229219089Spjd otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1230219089Spjd 1231219089Spjd for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1232219089Spjd itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1233219089Spjd 1234219089Spjd mutex_enter(&itxg->itxg_lock); 1235219089Spjd if (itxg->itxg_txg != txg) { 1236219089Spjd mutex_exit(&itxg->itxg_lock); 1237219089Spjd continue; 1238219089Spjd } 1239219089Spjd 1240219089Spjd /* 1241219089Spjd * Locate the object node and append its list. 1242219089Spjd */ 1243219089Spjd t = &itxg->itxg_itxs->i_async_tree; 1244219089Spjd ian = avl_find(t, &oid, &where); 1245219089Spjd if (ian != NULL) 1246219089Spjd list_move_tail(&clean_list, &ian->ia_list); 1247219089Spjd mutex_exit(&itxg->itxg_lock); 1248168404Spjd } 1249219089Spjd while ((itx = list_head(&clean_list)) != NULL) { 1250219089Spjd list_remove(&clean_list, itx); 1251219089Spjd kmem_free(itx, offsetof(itx_t, itx_lr) + 1252219089Spjd itx->itx_lr.lrc_reclen); 1253219089Spjd } 1254219089Spjd list_destroy(&clean_list); 1255219089Spjd} 1256168404Spjd 1257219089Spjdvoid 1258219089Spjdzil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1259219089Spjd{ 1260219089Spjd uint64_t txg; 1261219089Spjd itxg_t *itxg; 1262219089Spjd itxs_t *itxs, *clean = NULL; 1263219089Spjd 1264168404Spjd /* 1265219089Spjd * Object ids can be re-instantiated in the next txg so 1266219089Spjd * remove any async transactions to avoid future leaks. 1267219089Spjd * This can happen if a fsync occurs on the re-instantiated 1268219089Spjd * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1269219089Spjd * the new file data and flushes a write record for the old object. 1270168404Spjd */ 1271219089Spjd if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1272219089Spjd zil_remove_async(zilog, itx->itx_oid); 1273219089Spjd 1274219089Spjd /* 1275219089Spjd * Ensure the data of a renamed file is committed before the rename. 1276219089Spjd */ 1277219089Spjd if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1278219089Spjd zil_async_to_sync(zilog, itx->itx_oid); 1279219089Spjd 1280239620Smm if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1281219089Spjd txg = ZILTEST_TXG; 1282219089Spjd else 1283219089Spjd txg = dmu_tx_get_txg(tx); 1284219089Spjd 1285219089Spjd itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1286219089Spjd mutex_enter(&itxg->itxg_lock); 1287219089Spjd itxs = itxg->itxg_itxs; 1288219089Spjd if (itxg->itxg_txg != txg) { 1289219089Spjd if (itxs != NULL) { 1290219089Spjd /* 1291219089Spjd * The zil_clean callback hasn't got around to cleaning 1292219089Spjd * this itxg. Save the itxs for release below. 1293219089Spjd * This should be rare. 1294219089Spjd */ 1295219089Spjd atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1296219089Spjd itxg->itxg_sod = 0; 1297219089Spjd clean = itxg->itxg_itxs; 1298219089Spjd } 1299219089Spjd ASSERT(itxg->itxg_sod == 0); 1300219089Spjd itxg->itxg_txg = txg; 1301219089Spjd itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1302219089Spjd 1303219089Spjd list_create(&itxs->i_sync_list, sizeof (itx_t), 1304219089Spjd offsetof(itx_t, itx_node)); 1305219089Spjd avl_create(&itxs->i_async_tree, zil_aitx_compare, 1306219089Spjd sizeof (itx_async_node_t), 1307219089Spjd offsetof(itx_async_node_t, ia_node)); 1308168404Spjd } 1309219089Spjd if (itx->itx_sync) { 1310219089Spjd list_insert_tail(&itxs->i_sync_list, itx); 1311219089Spjd atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod); 1312219089Spjd itxg->itxg_sod += itx->itx_sod; 1313219089Spjd } else { 1314219089Spjd avl_tree_t *t = &itxs->i_async_tree; 1315219089Spjd uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1316219089Spjd itx_async_node_t *ian; 1317219089Spjd avl_index_t where; 1318168404Spjd 1319219089Spjd ian = avl_find(t, &foid, &where); 1320219089Spjd if (ian == NULL) { 1321219089Spjd ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1322219089Spjd list_create(&ian->ia_list, sizeof (itx_t), 1323219089Spjd offsetof(itx_t, itx_node)); 1324219089Spjd ian->ia_foid = foid; 1325219089Spjd avl_insert(t, ian, where); 1326219089Spjd } 1327219089Spjd list_insert_tail(&ian->ia_list, itx); 1328168404Spjd } 1329219089Spjd 1330219089Spjd itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1331239620Smm zilog_dirty(zilog, txg); 1332219089Spjd mutex_exit(&itxg->itxg_lock); 1333219089Spjd 1334219089Spjd /* Release the old itxs now we've dropped the lock */ 1335219089Spjd if (clean != NULL) 1336219089Spjd zil_itxg_clean(clean); 1337168404Spjd} 1338168404Spjd 1339168404Spjd/* 1340168404Spjd * If there are any in-memory intent log transactions which have now been 1341239620Smm * synced then start up a taskq to free them. We should only do this after we 1342239620Smm * have written out the uberblocks (i.e. txg has been comitted) so that 1343239620Smm * don't inadvertently clean out in-memory log records that would be required 1344239620Smm * by zil_commit(). 1345168404Spjd */ 1346168404Spjdvoid 1347219089Spjdzil_clean(zilog_t *zilog, uint64_t synced_txg) 1348168404Spjd{ 1349219089Spjd itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1350219089Spjd itxs_t *clean_me; 1351168404Spjd 1352219089Spjd mutex_enter(&itxg->itxg_lock); 1353219089Spjd if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1354219089Spjd mutex_exit(&itxg->itxg_lock); 1355219089Spjd return; 1356168404Spjd } 1357219089Spjd ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1358219089Spjd ASSERT(itxg->itxg_txg != 0); 1359219089Spjd ASSERT(zilog->zl_clean_taskq != NULL); 1360219089Spjd atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1361219089Spjd itxg->itxg_sod = 0; 1362219089Spjd clean_me = itxg->itxg_itxs; 1363219089Spjd itxg->itxg_itxs = NULL; 1364219089Spjd itxg->itxg_txg = 0; 1365219089Spjd mutex_exit(&itxg->itxg_lock); 1366219089Spjd /* 1367219089Spjd * Preferably start a task queue to free up the old itxs but 1368219089Spjd * if taskq_dispatch can't allocate resources to do that then 1369219089Spjd * free it in-line. This should be rare. Note, using TQ_SLEEP 1370219089Spjd * created a bad performance problem. 1371219089Spjd */ 1372219089Spjd if (taskq_dispatch(zilog->zl_clean_taskq, 1373219089Spjd (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0) 1374219089Spjd zil_itxg_clean(clean_me); 1375168404Spjd} 1376168404Spjd 1377219089Spjd/* 1378219089Spjd * Get the list of itxs to commit into zl_itx_commit_list. 1379219089Spjd */ 1380185029Spjdstatic void 1381219089Spjdzil_get_commit_list(zilog_t *zilog) 1382168404Spjd{ 1383219089Spjd uint64_t otxg, txg; 1384219089Spjd list_t *commit_list = &zilog->zl_itx_commit_list; 1385219089Spjd uint64_t push_sod = 0; 1386219089Spjd 1387219089Spjd if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1388219089Spjd otxg = ZILTEST_TXG; 1389219089Spjd else 1390219089Spjd otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1391219089Spjd 1392219089Spjd for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1393219089Spjd itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1394219089Spjd 1395219089Spjd mutex_enter(&itxg->itxg_lock); 1396219089Spjd if (itxg->itxg_txg != txg) { 1397219089Spjd mutex_exit(&itxg->itxg_lock); 1398219089Spjd continue; 1399219089Spjd } 1400219089Spjd 1401219089Spjd list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1402219089Spjd push_sod += itxg->itxg_sod; 1403219089Spjd itxg->itxg_sod = 0; 1404219089Spjd 1405219089Spjd mutex_exit(&itxg->itxg_lock); 1406219089Spjd } 1407219089Spjd atomic_add_64(&zilog->zl_itx_list_sz, -push_sod); 1408219089Spjd} 1409219089Spjd 1410219089Spjd/* 1411219089Spjd * Move the async itxs for a specified object to commit into sync lists. 1412219089Spjd */ 1413308595Smavvoid 1414219089Spjdzil_async_to_sync(zilog_t *zilog, uint64_t foid) 1415219089Spjd{ 1416219089Spjd uint64_t otxg, txg; 1417219089Spjd itx_async_node_t *ian; 1418219089Spjd avl_tree_t *t; 1419219089Spjd avl_index_t where; 1420219089Spjd 1421219089Spjd if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1422219089Spjd otxg = ZILTEST_TXG; 1423219089Spjd else 1424219089Spjd otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1425219089Spjd 1426219089Spjd for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1427219089Spjd itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1428219089Spjd 1429219089Spjd mutex_enter(&itxg->itxg_lock); 1430219089Spjd if (itxg->itxg_txg != txg) { 1431219089Spjd mutex_exit(&itxg->itxg_lock); 1432219089Spjd continue; 1433219089Spjd } 1434219089Spjd 1435219089Spjd /* 1436219089Spjd * If a foid is specified then find that node and append its 1437219089Spjd * list. Otherwise walk the tree appending all the lists 1438219089Spjd * to the sync list. We add to the end rather than the 1439219089Spjd * beginning to ensure the create has happened. 1440219089Spjd */ 1441219089Spjd t = &itxg->itxg_itxs->i_async_tree; 1442219089Spjd if (foid != 0) { 1443219089Spjd ian = avl_find(t, &foid, &where); 1444219089Spjd if (ian != NULL) { 1445219089Spjd list_move_tail(&itxg->itxg_itxs->i_sync_list, 1446219089Spjd &ian->ia_list); 1447219089Spjd } 1448219089Spjd } else { 1449219089Spjd void *cookie = NULL; 1450219089Spjd 1451219089Spjd while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1452219089Spjd list_move_tail(&itxg->itxg_itxs->i_sync_list, 1453219089Spjd &ian->ia_list); 1454219089Spjd list_destroy(&ian->ia_list); 1455219089Spjd kmem_free(ian, sizeof (itx_async_node_t)); 1456219089Spjd } 1457219089Spjd } 1458219089Spjd mutex_exit(&itxg->itxg_lock); 1459219089Spjd } 1460219089Spjd} 1461219089Spjd 1462219089Spjdstatic void 1463219089Spjdzil_commit_writer(zilog_t *zilog) 1464219089Spjd{ 1465168404Spjd uint64_t txg; 1466219089Spjd itx_t *itx; 1467168404Spjd lwb_t *lwb; 1468219089Spjd spa_t *spa = zilog->zl_spa; 1469219089Spjd int error = 0; 1470168404Spjd 1471185029Spjd ASSERT(zilog->zl_root_zio == NULL); 1472168404Spjd 1473219089Spjd mutex_exit(&zilog->zl_lock); 1474219089Spjd 1475219089Spjd zil_get_commit_list(zilog); 1476219089Spjd 1477219089Spjd /* 1478219089Spjd * Return if there's nothing to commit before we dirty the fs by 1479219089Spjd * calling zil_create(). 1480219089Spjd */ 1481219089Spjd if (list_head(&zilog->zl_itx_commit_list) == NULL) { 1482219089Spjd mutex_enter(&zilog->zl_lock); 1483219089Spjd return; 1484219089Spjd } 1485219089Spjd 1486168404Spjd if (zilog->zl_suspend) { 1487168404Spjd lwb = NULL; 1488168404Spjd } else { 1489168404Spjd lwb = list_tail(&zilog->zl_lwb_list); 1490219089Spjd if (lwb == NULL) 1491219089Spjd lwb = zil_create(zilog); 1492168404Spjd } 1493168404Spjd 1494168404Spjd DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1495219089Spjd while (itx = list_head(&zilog->zl_itx_commit_list)) { 1496168404Spjd txg = itx->itx_lr.lrc_txg; 1497168404Spjd ASSERT(txg); 1498168404Spjd 1499219089Spjd if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa)) 1500168404Spjd lwb = zil_lwb_commit(zilog, itx, lwb); 1501219089Spjd list_remove(&zilog->zl_itx_commit_list, itx); 1502168404Spjd kmem_free(itx, offsetof(itx_t, itx_lr) 1503168404Spjd + itx->itx_lr.lrc_reclen); 1504168404Spjd } 1505168404Spjd DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1506168404Spjd 1507168404Spjd /* write the last block out */ 1508168404Spjd if (lwb != NULL && lwb->lwb_zio != NULL) 1509168404Spjd lwb = zil_lwb_write_start(zilog, lwb); 1510168404Spjd 1511168404Spjd zilog->zl_cur_used = 0; 1512168404Spjd 1513168404Spjd /* 1514168404Spjd * Wait if necessary for the log blocks to be on stable storage. 1515168404Spjd */ 1516168404Spjd if (zilog->zl_root_zio) { 1517219089Spjd error = zio_wait(zilog->zl_root_zio); 1518185029Spjd zilog->zl_root_zio = NULL; 1519185029Spjd zil_flush_vdevs(zilog); 1520168404Spjd } 1521168404Spjd 1522219089Spjd if (error || lwb == NULL) 1523168404Spjd txg_wait_synced(zilog->zl_dmu_pool, 0); 1524168404Spjd 1525168404Spjd mutex_enter(&zilog->zl_lock); 1526168404Spjd 1527219089Spjd /* 1528219089Spjd * Remember the highest committed log sequence number for ztest. 1529219089Spjd * We only update this value when all the log writes succeeded, 1530219089Spjd * because ztest wants to ASSERT that it got the whole log chain. 1531219089Spjd */ 1532219089Spjd if (error == 0 && lwb != NULL) 1533219089Spjd zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1534168404Spjd} 1535168404Spjd 1536168404Spjd/* 1537219089Spjd * Commit zfs transactions to stable storage. 1538168404Spjd * If foid is 0 push out all transactions, otherwise push only those 1539219089Spjd * for that object or might reference that object. 1540219089Spjd * 1541219089Spjd * itxs are committed in batches. In a heavily stressed zil there will be 1542219089Spjd * a commit writer thread who is writing out a bunch of itxs to the log 1543219089Spjd * for a set of committing threads (cthreads) in the same batch as the writer. 1544219089Spjd * Those cthreads are all waiting on the same cv for that batch. 1545219089Spjd * 1546219089Spjd * There will also be a different and growing batch of threads that are 1547219089Spjd * waiting to commit (qthreads). When the committing batch completes 1548219089Spjd * a transition occurs such that the cthreads exit and the qthreads become 1549219089Spjd * cthreads. One of the new cthreads becomes the writer thread for the 1550219089Spjd * batch. Any new threads arriving become new qthreads. 1551219089Spjd * 1552219089Spjd * Only 2 condition variables are needed and there's no transition 1553219089Spjd * between the two cvs needed. They just flip-flop between qthreads 1554219089Spjd * and cthreads. 1555219089Spjd * 1556219089Spjd * Using this scheme we can efficiently wakeup up only those threads 1557219089Spjd * that have been committed. 1558168404Spjd */ 1559168404Spjdvoid 1560219089Spjdzil_commit(zilog_t *zilog, uint64_t foid) 1561168404Spjd{ 1562219089Spjd uint64_t mybatch; 1563219089Spjd 1564219089Spjd if (zilog->zl_sync == ZFS_SYNC_DISABLED) 1565168404Spjd return; 1566168404Spjd 1567219089Spjd /* move the async itxs for the foid to the sync queues */ 1568219089Spjd zil_async_to_sync(zilog, foid); 1569219089Spjd 1570168404Spjd mutex_enter(&zilog->zl_lock); 1571219089Spjd mybatch = zilog->zl_next_batch; 1572168404Spjd while (zilog->zl_writer) { 1573219089Spjd cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock); 1574219089Spjd if (mybatch <= zilog->zl_com_batch) { 1575168404Spjd mutex_exit(&zilog->zl_lock); 1576168404Spjd return; 1577168404Spjd } 1578168404Spjd } 1579219089Spjd 1580219089Spjd zilog->zl_next_batch++; 1581219089Spjd zilog->zl_writer = B_TRUE; 1582219089Spjd zil_commit_writer(zilog); 1583219089Spjd zilog->zl_com_batch = mybatch; 1584219089Spjd zilog->zl_writer = B_FALSE; 1585168404Spjd mutex_exit(&zilog->zl_lock); 1586219089Spjd 1587219089Spjd /* wake up one thread to become the next writer */ 1588219089Spjd cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]); 1589219089Spjd 1590219089Spjd /* wake up all threads waiting for this batch to be committed */ 1591219089Spjd cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]); 1592168404Spjd} 1593168404Spjd 1594168404Spjd/* 1595168404Spjd * Called in syncing context to free committed log blocks and update log header. 1596168404Spjd */ 1597168404Spjdvoid 1598168404Spjdzil_sync(zilog_t *zilog, dmu_tx_t *tx) 1599168404Spjd{ 1600168404Spjd zil_header_t *zh = zil_header_in_syncing_context(zilog); 1601168404Spjd uint64_t txg = dmu_tx_get_txg(tx); 1602168404Spjd spa_t *spa = zilog->zl_spa; 1603219089Spjd uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1604168404Spjd lwb_t *lwb; 1605168404Spjd 1606209962Smm /* 1607209962Smm * We don't zero out zl_destroy_txg, so make sure we don't try 1608209962Smm * to destroy it twice. 1609209962Smm */ 1610209962Smm if (spa_sync_pass(spa) != 1) 1611209962Smm return; 1612209962Smm 1613168404Spjd mutex_enter(&zilog->zl_lock); 1614168404Spjd 1615168404Spjd ASSERT(zilog->zl_stop_sync == 0); 1616168404Spjd 1617219089Spjd if (*replayed_seq != 0) { 1618219089Spjd ASSERT(zh->zh_replay_seq < *replayed_seq); 1619219089Spjd zh->zh_replay_seq = *replayed_seq; 1620219089Spjd *replayed_seq = 0; 1621219089Spjd } 1622168404Spjd 1623168404Spjd if (zilog->zl_destroy_txg == txg) { 1624168404Spjd blkptr_t blk = zh->zh_log; 1625168404Spjd 1626168404Spjd ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1627168404Spjd 1628168404Spjd bzero(zh, sizeof (zil_header_t)); 1629209962Smm bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 1630168404Spjd 1631168404Spjd if (zilog->zl_keep_first) { 1632168404Spjd /* 1633168404Spjd * If this block was part of log chain that couldn't 1634168404Spjd * be claimed because a device was missing during 1635168404Spjd * zil_claim(), but that device later returns, 1636168404Spjd * then this block could erroneously appear valid. 1637168404Spjd * To guard against this, assign a new GUID to the new 1638168404Spjd * log chain so it doesn't matter what blk points to. 1639168404Spjd */ 1640168404Spjd zil_init_log_chain(zilog, &blk); 1641168404Spjd zh->zh_log = blk; 1642168404Spjd } 1643168404Spjd } 1644168404Spjd 1645213197Smm while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1646168404Spjd zh->zh_log = lwb->lwb_blk; 1647168404Spjd if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1648168404Spjd break; 1649168404Spjd list_remove(&zilog->zl_lwb_list, lwb); 1650219089Spjd zio_free_zil(spa, txg, &lwb->lwb_blk); 1651168404Spjd kmem_cache_free(zil_lwb_cache, lwb); 1652168404Spjd 1653168404Spjd /* 1654168404Spjd * If we don't have anything left in the lwb list then 1655168404Spjd * we've had an allocation failure and we need to zero 1656168404Spjd * out the zil_header blkptr so that we don't end 1657168404Spjd * up freeing the same block twice. 1658168404Spjd */ 1659168404Spjd if (list_head(&zilog->zl_lwb_list) == NULL) 1660168404Spjd BP_ZERO(&zh->zh_log); 1661168404Spjd } 1662168404Spjd mutex_exit(&zilog->zl_lock); 1663168404Spjd} 1664168404Spjd 1665168404Spjdvoid 1666168404Spjdzil_init(void) 1667168404Spjd{ 1668168404Spjd zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1669168404Spjd sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1670168404Spjd} 1671168404Spjd 1672168404Spjdvoid 1673168404Spjdzil_fini(void) 1674168404Spjd{ 1675168404Spjd kmem_cache_destroy(zil_lwb_cache); 1676168404Spjd} 1677168404Spjd 1678219089Spjdvoid 1679219089Spjdzil_set_sync(zilog_t *zilog, uint64_t sync) 1680219089Spjd{ 1681219089Spjd zilog->zl_sync = sync; 1682219089Spjd} 1683219089Spjd 1684219089Spjdvoid 1685219089Spjdzil_set_logbias(zilog_t *zilog, uint64_t logbias) 1686219089Spjd{ 1687219089Spjd zilog->zl_logbias = logbias; 1688219089Spjd} 1689219089Spjd 1690168404Spjdzilog_t * 1691168404Spjdzil_alloc(objset_t *os, zil_header_t *zh_phys) 1692168404Spjd{ 1693168404Spjd zilog_t *zilog; 1694168404Spjd 1695168404Spjd zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1696168404Spjd 1697168404Spjd zilog->zl_header = zh_phys; 1698168404Spjd zilog->zl_os = os; 1699168404Spjd zilog->zl_spa = dmu_objset_spa(os); 1700168404Spjd zilog->zl_dmu_pool = dmu_objset_pool(os); 1701168404Spjd zilog->zl_destroy_txg = TXG_INITIAL - 1; 1702219089Spjd zilog->zl_logbias = dmu_objset_logbias(os); 1703219089Spjd zilog->zl_sync = dmu_objset_syncprop(os); 1704219089Spjd zilog->zl_next_batch = 1; 1705168404Spjd 1706168404Spjd mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1707168404Spjd 1708219089Spjd for (int i = 0; i < TXG_SIZE; i++) { 1709219089Spjd mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 1710219089Spjd MUTEX_DEFAULT, NULL); 1711219089Spjd } 1712168404Spjd 1713168404Spjd list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1714168404Spjd offsetof(lwb_t, lwb_node)); 1715168404Spjd 1716219089Spjd list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 1717219089Spjd offsetof(itx_t, itx_node)); 1718219089Spjd 1719185029Spjd mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1720168404Spjd 1721185029Spjd avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1722185029Spjd sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1723185029Spjd 1724185029Spjd cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1725185029Spjd cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1726219089Spjd cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL); 1727219089Spjd cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL); 1728185029Spjd 1729168404Spjd return (zilog); 1730168404Spjd} 1731168404Spjd 1732168404Spjdvoid 1733168404Spjdzil_free(zilog_t *zilog) 1734168404Spjd{ 1735168404Spjd zilog->zl_stop_sync = 1; 1736168404Spjd 1737248571Smm ASSERT0(zilog->zl_suspend); 1738248571Smm ASSERT0(zilog->zl_suspending); 1739248571Smm 1740224526Smm ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1741168404Spjd list_destroy(&zilog->zl_lwb_list); 1742168404Spjd 1743185029Spjd avl_destroy(&zilog->zl_vdev_tree); 1744185029Spjd mutex_destroy(&zilog->zl_vdev_lock); 1745168404Spjd 1746219089Spjd ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 1747219089Spjd list_destroy(&zilog->zl_itx_commit_list); 1748219089Spjd 1749219089Spjd for (int i = 0; i < TXG_SIZE; i++) { 1750219089Spjd /* 1751219089Spjd * It's possible for an itx to be generated that doesn't dirty 1752219089Spjd * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 1753219089Spjd * callback to remove the entry. We remove those here. 1754219089Spjd * 1755219089Spjd * Also free up the ziltest itxs. 1756219089Spjd */ 1757219089Spjd if (zilog->zl_itxg[i].itxg_itxs) 1758219089Spjd zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 1759219089Spjd mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 1760219089Spjd } 1761219089Spjd 1762168404Spjd mutex_destroy(&zilog->zl_lock); 1763168404Spjd 1764185029Spjd cv_destroy(&zilog->zl_cv_writer); 1765185029Spjd cv_destroy(&zilog->zl_cv_suspend); 1766219089Spjd cv_destroy(&zilog->zl_cv_batch[0]); 1767219089Spjd cv_destroy(&zilog->zl_cv_batch[1]); 1768185029Spjd 1769168404Spjd kmem_free(zilog, sizeof (zilog_t)); 1770168404Spjd} 1771168404Spjd 1772168404Spjd/* 1773168404Spjd * Open an intent log. 1774168404Spjd */ 1775168404Spjdzilog_t * 1776168404Spjdzil_open(objset_t *os, zil_get_data_t *get_data) 1777168404Spjd{ 1778168404Spjd zilog_t *zilog = dmu_objset_zil(os); 1779168404Spjd 1780224526Smm ASSERT(zilog->zl_clean_taskq == NULL); 1781224526Smm ASSERT(zilog->zl_get_data == NULL); 1782224526Smm ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1783224526Smm 1784168404Spjd zilog->zl_get_data = get_data; 1785168404Spjd zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1786168404Spjd 2, 2, TASKQ_PREPOPULATE); 1787168404Spjd 1788168404Spjd return (zilog); 1789168404Spjd} 1790168404Spjd 1791168404Spjd/* 1792168404Spjd * Close an intent log. 1793168404Spjd */ 1794168404Spjdvoid 1795168404Spjdzil_close(zilog_t *zilog) 1796168404Spjd{ 1797224526Smm lwb_t *lwb; 1798219089Spjd uint64_t txg = 0; 1799219089Spjd 1800219089Spjd zil_commit(zilog, 0); /* commit all itx */ 1801219089Spjd 1802168404Spjd /* 1803219089Spjd * The lwb_max_txg for the stubby lwb will reflect the last activity 1804219089Spjd * for the zil. After a txg_wait_synced() on the txg we know all the 1805219089Spjd * callbacks have occurred that may clean the zil. Only then can we 1806219089Spjd * destroy the zl_clean_taskq. 1807168404Spjd */ 1808219089Spjd mutex_enter(&zilog->zl_lock); 1809224526Smm lwb = list_tail(&zilog->zl_lwb_list); 1810224526Smm if (lwb != NULL) 1811224526Smm txg = lwb->lwb_max_txg; 1812219089Spjd mutex_exit(&zilog->zl_lock); 1813219089Spjd if (txg) 1814168404Spjd txg_wait_synced(zilog->zl_dmu_pool, txg); 1815239620Smm ASSERT(!zilog_is_dirty(zilog)); 1816168404Spjd 1817168404Spjd taskq_destroy(zilog->zl_clean_taskq); 1818168404Spjd zilog->zl_clean_taskq = NULL; 1819168404Spjd zilog->zl_get_data = NULL; 1820224526Smm 1821224526Smm /* 1822224526Smm * We should have only one LWB left on the list; remove it now. 1823224526Smm */ 1824224526Smm mutex_enter(&zilog->zl_lock); 1825224526Smm lwb = list_head(&zilog->zl_lwb_list); 1826224526Smm if (lwb != NULL) { 1827224526Smm ASSERT(lwb == list_tail(&zilog->zl_lwb_list)); 1828224526Smm list_remove(&zilog->zl_lwb_list, lwb); 1829224526Smm zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1830224526Smm kmem_cache_free(zil_lwb_cache, lwb); 1831224526Smm } 1832224526Smm mutex_exit(&zilog->zl_lock); 1833168404Spjd} 1834168404Spjd 1835248571Smmstatic char *suspend_tag = "zil suspending"; 1836248571Smm 1837168404Spjd/* 1838168404Spjd * Suspend an intent log. While in suspended mode, we still honor 1839168404Spjd * synchronous semantics, but we rely on txg_wait_synced() to do it. 1840248571Smm * On old version pools, we suspend the log briefly when taking a 1841248571Smm * snapshot so that it will have an empty intent log. 1842248571Smm * 1843248571Smm * Long holds are not really intended to be used the way we do here -- 1844248571Smm * held for such a short time. A concurrent caller of dsl_dataset_long_held() 1845248571Smm * could fail. Therefore we take pains to only put a long hold if it is 1846248571Smm * actually necessary. Fortunately, it will only be necessary if the 1847248571Smm * objset is currently mounted (or the ZVOL equivalent). In that case it 1848248571Smm * will already have a long hold, so we are not really making things any worse. 1849248571Smm * 1850248571Smm * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 1851248571Smm * zvol_state_t), and use their mechanism to prevent their hold from being 1852248571Smm * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 1853248571Smm * very little gain. 1854248571Smm * 1855248571Smm * if cookiep == NULL, this does both the suspend & resume. 1856248571Smm * Otherwise, it returns with the dataset "long held", and the cookie 1857248571Smm * should be passed into zil_resume(). 1858168404Spjd */ 1859168404Spjdint 1860248571Smmzil_suspend(const char *osname, void **cookiep) 1861168404Spjd{ 1862248571Smm objset_t *os; 1863248571Smm zilog_t *zilog; 1864248571Smm const zil_header_t *zh; 1865248571Smm int error; 1866168404Spjd 1867248571Smm error = dmu_objset_hold(osname, suspend_tag, &os); 1868248571Smm if (error != 0) 1869248571Smm return (error); 1870248571Smm zilog = dmu_objset_zil(os); 1871248571Smm 1872168404Spjd mutex_enter(&zilog->zl_lock); 1873248571Smm zh = zilog->zl_header; 1874248571Smm 1875200724Sdelphij if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1876168404Spjd mutex_exit(&zilog->zl_lock); 1877248571Smm dmu_objset_rele(os, suspend_tag); 1878249195Smm return (SET_ERROR(EBUSY)); 1879168404Spjd } 1880248571Smm 1881248571Smm /* 1882248571Smm * Don't put a long hold in the cases where we can avoid it. This 1883248571Smm * is when there is no cookie so we are doing a suspend & resume 1884248571Smm * (i.e. called from zil_vdev_offline()), and there's nothing to do 1885248571Smm * for the suspend because it's already suspended, or there's no ZIL. 1886248571Smm */ 1887248571Smm if (cookiep == NULL && !zilog->zl_suspending && 1888248571Smm (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 1889248571Smm mutex_exit(&zilog->zl_lock); 1890248571Smm dmu_objset_rele(os, suspend_tag); 1891248571Smm return (0); 1892248571Smm } 1893248571Smm 1894248571Smm dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 1895248571Smm dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 1896248571Smm 1897248571Smm zilog->zl_suspend++; 1898248571Smm 1899248571Smm if (zilog->zl_suspend > 1) { 1900168404Spjd /* 1901248571Smm * Someone else is already suspending it. 1902168404Spjd * Just wait for them to finish. 1903168404Spjd */ 1904248571Smm 1905168404Spjd while (zilog->zl_suspending) 1906168404Spjd cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1907168404Spjd mutex_exit(&zilog->zl_lock); 1908248571Smm 1909248571Smm if (cookiep == NULL) 1910248571Smm zil_resume(os); 1911248571Smm else 1912248571Smm *cookiep = os; 1913168404Spjd return (0); 1914168404Spjd } 1915248571Smm 1916248571Smm /* 1917248571Smm * If there is no pointer to an on-disk block, this ZIL must not 1918248571Smm * be active (e.g. filesystem not mounted), so there's nothing 1919248571Smm * to clean up. 1920248571Smm */ 1921248571Smm if (BP_IS_HOLE(&zh->zh_log)) { 1922248571Smm ASSERT(cookiep != NULL); /* fast path already handled */ 1923248571Smm 1924248571Smm *cookiep = os; 1925248571Smm mutex_exit(&zilog->zl_lock); 1926248571Smm return (0); 1927248571Smm } 1928248571Smm 1929168404Spjd zilog->zl_suspending = B_TRUE; 1930168404Spjd mutex_exit(&zilog->zl_lock); 1931168404Spjd 1932219089Spjd zil_commit(zilog, 0); 1933168404Spjd 1934168404Spjd zil_destroy(zilog, B_FALSE); 1935168404Spjd 1936168404Spjd mutex_enter(&zilog->zl_lock); 1937168404Spjd zilog->zl_suspending = B_FALSE; 1938168404Spjd cv_broadcast(&zilog->zl_cv_suspend); 1939168404Spjd mutex_exit(&zilog->zl_lock); 1940168404Spjd 1941248571Smm if (cookiep == NULL) 1942248571Smm zil_resume(os); 1943248571Smm else 1944248571Smm *cookiep = os; 1945168404Spjd return (0); 1946168404Spjd} 1947168404Spjd 1948168404Spjdvoid 1949248571Smmzil_resume(void *cookie) 1950168404Spjd{ 1951248571Smm objset_t *os = cookie; 1952248571Smm zilog_t *zilog = dmu_objset_zil(os); 1953248571Smm 1954168404Spjd mutex_enter(&zilog->zl_lock); 1955168404Spjd ASSERT(zilog->zl_suspend != 0); 1956168404Spjd zilog->zl_suspend--; 1957168404Spjd mutex_exit(&zilog->zl_lock); 1958248571Smm dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 1959248571Smm dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 1960168404Spjd} 1961168404Spjd 1962219089Spjdtypedef struct zil_replay_arg { 1963219089Spjd zil_replay_func_t **zr_replay; 1964219089Spjd void *zr_arg; 1965219089Spjd boolean_t zr_byteswap; 1966219089Spjd char *zr_lr; 1967219089Spjd} zil_replay_arg_t; 1968219089Spjd 1969219089Spjdstatic int 1970219089Spjdzil_replay_error(zilog_t *zilog, lr_t *lr, int error) 1971209962Smm{ 1972307108Smav char name[ZFS_MAX_DATASET_NAME_LEN]; 1973209962Smm 1974219089Spjd zilog->zl_replaying_seq--; /* didn't actually replay this one */ 1975209962Smm 1976219089Spjd dmu_objset_name(zilog->zl_os, name); 1977209962Smm 1978219089Spjd cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1979219089Spjd "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 1980219089Spjd (u_longlong_t)lr->lrc_seq, 1981219089Spjd (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 1982219089Spjd (lr->lrc_txtype & TX_CI) ? "CI" : ""); 1983219089Spjd 1984219089Spjd return (error); 1985209962Smm} 1986209962Smm 1987219089Spjdstatic int 1988168404Spjdzil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1989168404Spjd{ 1990168404Spjd zil_replay_arg_t *zr = zra; 1991168404Spjd const zil_header_t *zh = zilog->zl_header; 1992168404Spjd uint64_t reclen = lr->lrc_reclen; 1993168404Spjd uint64_t txtype = lr->lrc_txtype; 1994219089Spjd int error = 0; 1995168404Spjd 1996219089Spjd zilog->zl_replaying_seq = lr->lrc_seq; 1997168404Spjd 1998219089Spjd if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1999219089Spjd return (0); 2000219089Spjd 2001168404Spjd if (lr->lrc_txg < claim_txg) /* already committed */ 2002219089Spjd return (0); 2003168404Spjd 2004185029Spjd /* Strip case-insensitive bit, still present in log record */ 2005185029Spjd txtype &= ~TX_CI; 2006185029Spjd 2007219089Spjd if (txtype == 0 || txtype >= TX_MAX_TYPE) 2008219089Spjd return (zil_replay_error(zilog, lr, EINVAL)); 2009219089Spjd 2010219089Spjd /* 2011219089Spjd * If this record type can be logged out of order, the object 2012219089Spjd * (lr_foid) may no longer exist. That's legitimate, not an error. 2013219089Spjd */ 2014219089Spjd if (TX_OOO(txtype)) { 2015219089Spjd error = dmu_object_info(zilog->zl_os, 2016219089Spjd ((lr_ooo_t *)lr)->lr_foid, NULL); 2017219089Spjd if (error == ENOENT || error == EEXIST) 2018219089Spjd return (0); 2019209962Smm } 2020209962Smm 2021168404Spjd /* 2022168404Spjd * Make a copy of the data so we can revise and extend it. 2023168404Spjd */ 2024219089Spjd bcopy(lr, zr->zr_lr, reclen); 2025168404Spjd 2026168404Spjd /* 2027219089Spjd * If this is a TX_WRITE with a blkptr, suck in the data. 2028219089Spjd */ 2029219089Spjd if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 2030219089Spjd error = zil_read_log_data(zilog, (lr_write_t *)lr, 2031219089Spjd zr->zr_lr + reclen); 2032248571Smm if (error != 0) 2033219089Spjd return (zil_replay_error(zilog, lr, error)); 2034219089Spjd } 2035219089Spjd 2036219089Spjd /* 2037168404Spjd * The log block containing this lr may have been byteswapped 2038168404Spjd * so that we can easily examine common fields like lrc_txtype. 2039219089Spjd * However, the log is a mix of different record types, and only the 2040168404Spjd * replay vectors know how to byteswap their records. Therefore, if 2041168404Spjd * the lr was byteswapped, undo it before invoking the replay vector. 2042168404Spjd */ 2043168404Spjd if (zr->zr_byteswap) 2044219089Spjd byteswap_uint64_array(zr->zr_lr, reclen); 2045168404Spjd 2046168404Spjd /* 2047168404Spjd * We must now do two things atomically: replay this log record, 2048209962Smm * and update the log header sequence number to reflect the fact that 2049209962Smm * we did so. At the end of each replay function the sequence number 2050209962Smm * is updated if we are in replay mode. 2051168404Spjd */ 2052219089Spjd error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 2053248571Smm if (error != 0) { 2054168404Spjd /* 2055168404Spjd * The DMU's dnode layer doesn't see removes until the txg 2056168404Spjd * commits, so a subsequent claim can spuriously fail with 2057209962Smm * EEXIST. So if we receive any error we try syncing out 2058219089Spjd * any removes then retry the transaction. Note that we 2059219089Spjd * specify B_FALSE for byteswap now, so we don't do it twice. 2060168404Spjd */ 2061219089Spjd txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 2062219089Spjd error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 2063248571Smm if (error != 0) 2064219089Spjd return (zil_replay_error(zilog, lr, error)); 2065168404Spjd } 2066219089Spjd return (0); 2067168404Spjd} 2068168404Spjd 2069168404Spjd/* ARGSUSED */ 2070219089Spjdstatic int 2071168404Spjdzil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 2072168404Spjd{ 2073168404Spjd zilog->zl_replay_blks++; 2074219089Spjd 2075219089Spjd return (0); 2076168404Spjd} 2077168404Spjd 2078168404Spjd/* 2079168404Spjd * If this dataset has a non-empty intent log, replay it and destroy it. 2080168404Spjd */ 2081168404Spjdvoid 2082209962Smmzil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 2083168404Spjd{ 2084168404Spjd zilog_t *zilog = dmu_objset_zil(os); 2085168404Spjd const zil_header_t *zh = zilog->zl_header; 2086168404Spjd zil_replay_arg_t zr; 2087168404Spjd 2088200724Sdelphij if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 2089168404Spjd zil_destroy(zilog, B_TRUE); 2090168404Spjd return; 2091168404Spjd } 2092168404Spjd 2093168404Spjd zr.zr_replay = replay_func; 2094168404Spjd zr.zr_arg = arg; 2095168404Spjd zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 2096219089Spjd zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 2097168404Spjd 2098168404Spjd /* 2099168404Spjd * Wait for in-progress removes to sync before starting replay. 2100168404Spjd */ 2101168404Spjd txg_wait_synced(zilog->zl_dmu_pool, 0); 2102168404Spjd 2103209962Smm zilog->zl_replay = B_TRUE; 2104219089Spjd zilog->zl_replay_time = ddi_get_lbolt(); 2105168404Spjd ASSERT(zilog->zl_replay_blks == 0); 2106168404Spjd (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 2107168404Spjd zh->zh_claim_txg); 2108219089Spjd kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 2109168404Spjd 2110168404Spjd zil_destroy(zilog, B_FALSE); 2111185029Spjd txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 2112209962Smm zilog->zl_replay = B_FALSE; 2113168404Spjd} 2114168404Spjd 2115219089Spjdboolean_t 2116219089Spjdzil_replaying(zilog_t *zilog, dmu_tx_t *tx) 2117168404Spjd{ 2118219089Spjd if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2119219089Spjd return (B_TRUE); 2120168404Spjd 2121219089Spjd if (zilog->zl_replay) { 2122219089Spjd dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 2123219089Spjd zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 2124219089Spjd zilog->zl_replaying_seq; 2125219089Spjd return (B_TRUE); 2126168404Spjd } 2127168404Spjd 2128219089Spjd return (B_FALSE); 2129168404Spjd} 2130213197Smm 2131213197Smm/* ARGSUSED */ 2132213197Smmint 2133219089Spjdzil_vdev_offline(const char *osname, void *arg) 2134213197Smm{ 2135213197Smm int error; 2136213197Smm 2137248571Smm error = zil_suspend(osname, NULL); 2138248571Smm if (error != 0) 2139249195Smm return (SET_ERROR(EEXIST)); 2140248571Smm return (0); 2141213197Smm} 2142