zil.c revision 224526
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011 by Delphix. All rights reserved. 24 */ 25 26/* Portions Copyright 2010 Robert Milkowski */ 27 28#include <sys/zfs_context.h> 29#include <sys/spa.h> 30#include <sys/dmu.h> 31#include <sys/zap.h> 32#include <sys/arc.h> 33#include <sys/stat.h> 34#include <sys/resource.h> 35#include <sys/zil.h> 36#include <sys/zil_impl.h> 37#include <sys/dsl_dataset.h> 38#include <sys/vdev_impl.h> 39#include <sys/dmu_tx.h> 40#include <sys/dsl_pool.h> 41 42/* 43 * The zfs intent log (ZIL) saves transaction records of system calls 44 * that change the file system in memory with enough information 45 * to be able to replay them. These are stored in memory until 46 * either the DMU transaction group (txg) commits them to the stable pool 47 * and they can be discarded, or they are flushed to the stable log 48 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 49 * requirement. In the event of a panic or power fail then those log 50 * records (transactions) are replayed. 51 * 52 * There is one ZIL per file system. Its on-disk (pool) format consists 53 * of 3 parts: 54 * 55 * - ZIL header 56 * - ZIL blocks 57 * - ZIL records 58 * 59 * A log record holds a system call transaction. Log blocks can 60 * hold many log records and the blocks are chained together. 61 * Each ZIL block contains a block pointer (blkptr_t) to the next 62 * ZIL block in the chain. The ZIL header points to the first 63 * block in the chain. Note there is not a fixed place in the pool 64 * to hold blocks. They are dynamically allocated and freed as 65 * needed from the blocks available. Figure X shows the ZIL structure: 66 */ 67 68/* 69 * This global ZIL switch affects all pools 70 */ 71int zil_replay_disable = 0; /* disable intent logging replay */ 72SYSCTL_DECL(_vfs_zfs); 73TUNABLE_INT("vfs.zfs.zil_replay_disable", &zil_replay_disable); 74SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW, 75 &zil_replay_disable, 0, "Disable intent logging replay"); 76 77/* 78 * Tunable parameter for debugging or performance analysis. Setting 79 * zfs_nocacheflush will cause corruption on power loss if a volatile 80 * out-of-order write cache is enabled. 81 */ 82boolean_t zfs_nocacheflush = B_FALSE; 83TUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush); 84SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN, 85 &zfs_nocacheflush, 0, "Disable cache flush"); 86 87static kmem_cache_t *zil_lwb_cache; 88 89static void zil_async_to_sync(zilog_t *zilog, uint64_t foid); 90 91#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 92 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 93 94 95/* 96 * ziltest is by and large an ugly hack, but very useful in 97 * checking replay without tedious work. 98 * When running ziltest we want to keep all itx's and so maintain 99 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG 100 * We subtract TXG_CONCURRENT_STATES to allow for common code. 101 */ 102#define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES) 103 104static int 105zil_bp_compare(const void *x1, const void *x2) 106{ 107 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 108 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 109 110 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 111 return (-1); 112 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 113 return (1); 114 115 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 116 return (-1); 117 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 118 return (1); 119 120 return (0); 121} 122 123static void 124zil_bp_tree_init(zilog_t *zilog) 125{ 126 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 127 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 128} 129 130static void 131zil_bp_tree_fini(zilog_t *zilog) 132{ 133 avl_tree_t *t = &zilog->zl_bp_tree; 134 zil_bp_node_t *zn; 135 void *cookie = NULL; 136 137 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 138 kmem_free(zn, sizeof (zil_bp_node_t)); 139 140 avl_destroy(t); 141} 142 143int 144zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 145{ 146 avl_tree_t *t = &zilog->zl_bp_tree; 147 const dva_t *dva = BP_IDENTITY(bp); 148 zil_bp_node_t *zn; 149 avl_index_t where; 150 151 if (avl_find(t, dva, &where) != NULL) 152 return (EEXIST); 153 154 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 155 zn->zn_dva = *dva; 156 avl_insert(t, zn, where); 157 158 return (0); 159} 160 161static zil_header_t * 162zil_header_in_syncing_context(zilog_t *zilog) 163{ 164 return ((zil_header_t *)zilog->zl_header); 165} 166 167static void 168zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 169{ 170 zio_cksum_t *zc = &bp->blk_cksum; 171 172 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 173 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 174 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 175 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 176} 177 178/* 179 * Read a log block and make sure it's valid. 180 */ 181static int 182zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 183 char **end) 184{ 185 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 186 uint32_t aflags = ARC_WAIT; 187 arc_buf_t *abuf = NULL; 188 zbookmark_t zb; 189 int error; 190 191 if (zilog->zl_header->zh_claim_txg == 0) 192 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 193 194 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 195 zio_flags |= ZIO_FLAG_SPECULATIVE; 196 197 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 198 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 199 200 error = dsl_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 201 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 202 203 if (error == 0) { 204 zio_cksum_t cksum = bp->blk_cksum; 205 206 /* 207 * Validate the checksummed log block. 208 * 209 * Sequence numbers should be... sequential. The checksum 210 * verifier for the next block should be bp's checksum plus 1. 211 * 212 * Also check the log chain linkage and size used. 213 */ 214 cksum.zc_word[ZIL_ZC_SEQ]++; 215 216 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 217 zil_chain_t *zilc = abuf->b_data; 218 char *lr = (char *)(zilc + 1); 219 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 220 221 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 222 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 223 error = ECKSUM; 224 } else { 225 bcopy(lr, dst, len); 226 *end = (char *)dst + len; 227 *nbp = zilc->zc_next_blk; 228 } 229 } else { 230 char *lr = abuf->b_data; 231 uint64_t size = BP_GET_LSIZE(bp); 232 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 233 234 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 235 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 236 (zilc->zc_nused > (size - sizeof (*zilc)))) { 237 error = ECKSUM; 238 } else { 239 bcopy(lr, dst, zilc->zc_nused); 240 *end = (char *)dst + zilc->zc_nused; 241 *nbp = zilc->zc_next_blk; 242 } 243 } 244 245 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 246 } 247 248 return (error); 249} 250 251/* 252 * Read a TX_WRITE log data block. 253 */ 254static int 255zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 256{ 257 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 258 const blkptr_t *bp = &lr->lr_blkptr; 259 uint32_t aflags = ARC_WAIT; 260 arc_buf_t *abuf = NULL; 261 zbookmark_t zb; 262 int error; 263 264 if (BP_IS_HOLE(bp)) { 265 if (wbuf != NULL) 266 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 267 return (0); 268 } 269 270 if (zilog->zl_header->zh_claim_txg == 0) 271 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 272 273 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 274 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 275 276 error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 277 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 278 279 if (error == 0) { 280 if (wbuf != NULL) 281 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 282 (void) arc_buf_remove_ref(abuf, &abuf); 283 } 284 285 return (error); 286} 287 288/* 289 * Parse the intent log, and call parse_func for each valid record within. 290 */ 291int 292zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 293 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 294{ 295 const zil_header_t *zh = zilog->zl_header; 296 boolean_t claimed = !!zh->zh_claim_txg; 297 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 298 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 299 uint64_t max_blk_seq = 0; 300 uint64_t max_lr_seq = 0; 301 uint64_t blk_count = 0; 302 uint64_t lr_count = 0; 303 blkptr_t blk, next_blk; 304 char *lrbuf, *lrp; 305 int error = 0; 306 307 /* 308 * Old logs didn't record the maximum zh_claim_lr_seq. 309 */ 310 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 311 claim_lr_seq = UINT64_MAX; 312 313 /* 314 * Starting at the block pointed to by zh_log we read the log chain. 315 * For each block in the chain we strongly check that block to 316 * ensure its validity. We stop when an invalid block is found. 317 * For each block pointer in the chain we call parse_blk_func(). 318 * For each record in each valid block we call parse_lr_func(). 319 * If the log has been claimed, stop if we encounter a sequence 320 * number greater than the highest claimed sequence number. 321 */ 322 lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 323 zil_bp_tree_init(zilog); 324 325 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 326 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 327 int reclen; 328 char *end; 329 330 if (blk_seq > claim_blk_seq) 331 break; 332 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 333 break; 334 ASSERT3U(max_blk_seq, <, blk_seq); 335 max_blk_seq = blk_seq; 336 blk_count++; 337 338 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 339 break; 340 341 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 342 if (error) 343 break; 344 345 for (lrp = lrbuf; lrp < end; lrp += reclen) { 346 lr_t *lr = (lr_t *)lrp; 347 reclen = lr->lrc_reclen; 348 ASSERT3U(reclen, >=, sizeof (lr_t)); 349 if (lr->lrc_seq > claim_lr_seq) 350 goto done; 351 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 352 goto done; 353 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 354 max_lr_seq = lr->lrc_seq; 355 lr_count++; 356 } 357 } 358done: 359 zilog->zl_parse_error = error; 360 zilog->zl_parse_blk_seq = max_blk_seq; 361 zilog->zl_parse_lr_seq = max_lr_seq; 362 zilog->zl_parse_blk_count = blk_count; 363 zilog->zl_parse_lr_count = lr_count; 364 365 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 366 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 367 368 zil_bp_tree_fini(zilog); 369 zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 370 371 return (error); 372} 373 374static int 375zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 376{ 377 /* 378 * Claim log block if not already committed and not already claimed. 379 * If tx == NULL, just verify that the block is claimable. 380 */ 381 if (bp->blk_birth < first_txg || zil_bp_tree_add(zilog, bp) != 0) 382 return (0); 383 384 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 385 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 386 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 387} 388 389static int 390zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 391{ 392 lr_write_t *lr = (lr_write_t *)lrc; 393 int error; 394 395 if (lrc->lrc_txtype != TX_WRITE) 396 return (0); 397 398 /* 399 * If the block is not readable, don't claim it. This can happen 400 * in normal operation when a log block is written to disk before 401 * some of the dmu_sync() blocks it points to. In this case, the 402 * transaction cannot have been committed to anyone (we would have 403 * waited for all writes to be stable first), so it is semantically 404 * correct to declare this the end of the log. 405 */ 406 if (lr->lr_blkptr.blk_birth >= first_txg && 407 (error = zil_read_log_data(zilog, lr, NULL)) != 0) 408 return (error); 409 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 410} 411 412/* ARGSUSED */ 413static int 414zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 415{ 416 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 417 418 return (0); 419} 420 421static int 422zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 423{ 424 lr_write_t *lr = (lr_write_t *)lrc; 425 blkptr_t *bp = &lr->lr_blkptr; 426 427 /* 428 * If we previously claimed it, we need to free it. 429 */ 430 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 431 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0) 432 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 433 434 return (0); 435} 436 437static lwb_t * 438zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 439{ 440 lwb_t *lwb; 441 442 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 443 lwb->lwb_zilog = zilog; 444 lwb->lwb_blk = *bp; 445 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 446 lwb->lwb_max_txg = txg; 447 lwb->lwb_zio = NULL; 448 lwb->lwb_tx = NULL; 449 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 450 lwb->lwb_nused = sizeof (zil_chain_t); 451 lwb->lwb_sz = BP_GET_LSIZE(bp); 452 } else { 453 lwb->lwb_nused = 0; 454 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 455 } 456 457 mutex_enter(&zilog->zl_lock); 458 list_insert_tail(&zilog->zl_lwb_list, lwb); 459 mutex_exit(&zilog->zl_lock); 460 461 return (lwb); 462} 463 464/* 465 * Create an on-disk intent log. 466 */ 467static lwb_t * 468zil_create(zilog_t *zilog) 469{ 470 const zil_header_t *zh = zilog->zl_header; 471 lwb_t *lwb = NULL; 472 uint64_t txg = 0; 473 dmu_tx_t *tx = NULL; 474 blkptr_t blk; 475 int error = 0; 476 477 /* 478 * Wait for any previous destroy to complete. 479 */ 480 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 481 482 ASSERT(zh->zh_claim_txg == 0); 483 ASSERT(zh->zh_replay_seq == 0); 484 485 blk = zh->zh_log; 486 487 /* 488 * Allocate an initial log block if: 489 * - there isn't one already 490 * - the existing block is the wrong endianess 491 */ 492 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 493 tx = dmu_tx_create(zilog->zl_os); 494 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 495 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 496 txg = dmu_tx_get_txg(tx); 497 498 if (!BP_IS_HOLE(&blk)) { 499 zio_free_zil(zilog->zl_spa, txg, &blk); 500 BP_ZERO(&blk); 501 } 502 503 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 504 ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 505 506 if (error == 0) 507 zil_init_log_chain(zilog, &blk); 508 } 509 510 /* 511 * Allocate a log write buffer (lwb) for the first log block. 512 */ 513 if (error == 0) 514 lwb = zil_alloc_lwb(zilog, &blk, txg); 515 516 /* 517 * If we just allocated the first log block, commit our transaction 518 * and wait for zil_sync() to stuff the block poiner into zh_log. 519 * (zh is part of the MOS, so we cannot modify it in open context.) 520 */ 521 if (tx != NULL) { 522 dmu_tx_commit(tx); 523 txg_wait_synced(zilog->zl_dmu_pool, txg); 524 } 525 526 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 527 528 return (lwb); 529} 530 531/* 532 * In one tx, free all log blocks and clear the log header. 533 * If keep_first is set, then we're replaying a log with no content. 534 * We want to keep the first block, however, so that the first 535 * synchronous transaction doesn't require a txg_wait_synced() 536 * in zil_create(). We don't need to txg_wait_synced() here either 537 * when keep_first is set, because both zil_create() and zil_destroy() 538 * will wait for any in-progress destroys to complete. 539 */ 540void 541zil_destroy(zilog_t *zilog, boolean_t keep_first) 542{ 543 const zil_header_t *zh = zilog->zl_header; 544 lwb_t *lwb; 545 dmu_tx_t *tx; 546 uint64_t txg; 547 548 /* 549 * Wait for any previous destroy to complete. 550 */ 551 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 552 553 zilog->zl_old_header = *zh; /* debugging aid */ 554 555 if (BP_IS_HOLE(&zh->zh_log)) 556 return; 557 558 tx = dmu_tx_create(zilog->zl_os); 559 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 560 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 561 txg = dmu_tx_get_txg(tx); 562 563 mutex_enter(&zilog->zl_lock); 564 565 ASSERT3U(zilog->zl_destroy_txg, <, txg); 566 zilog->zl_destroy_txg = txg; 567 zilog->zl_keep_first = keep_first; 568 569 if (!list_is_empty(&zilog->zl_lwb_list)) { 570 ASSERT(zh->zh_claim_txg == 0); 571 VERIFY(!keep_first); 572 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 573 list_remove(&zilog->zl_lwb_list, lwb); 574 if (lwb->lwb_buf != NULL) 575 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 576 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 577 kmem_cache_free(zil_lwb_cache, lwb); 578 } 579 } else if (!keep_first) { 580 (void) zil_parse(zilog, zil_free_log_block, 581 zil_free_log_record, tx, zh->zh_claim_txg); 582 } 583 mutex_exit(&zilog->zl_lock); 584 585 dmu_tx_commit(tx); 586} 587 588int 589zil_claim(const char *osname, void *txarg) 590{ 591 dmu_tx_t *tx = txarg; 592 uint64_t first_txg = dmu_tx_get_txg(tx); 593 zilog_t *zilog; 594 zil_header_t *zh; 595 objset_t *os; 596 int error; 597 598 error = dmu_objset_hold(osname, FTAG, &os); 599 if (error) { 600 cmn_err(CE_WARN, "can't open objset for %s", osname); 601 return (0); 602 } 603 604 zilog = dmu_objset_zil(os); 605 zh = zil_header_in_syncing_context(zilog); 606 607 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 608 if (!BP_IS_HOLE(&zh->zh_log)) 609 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 610 BP_ZERO(&zh->zh_log); 611 dsl_dataset_dirty(dmu_objset_ds(os), tx); 612 dmu_objset_rele(os, FTAG); 613 return (0); 614 } 615 616 /* 617 * Claim all log blocks if we haven't already done so, and remember 618 * the highest claimed sequence number. This ensures that if we can 619 * read only part of the log now (e.g. due to a missing device), 620 * but we can read the entire log later, we will not try to replay 621 * or destroy beyond the last block we successfully claimed. 622 */ 623 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 624 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 625 (void) zil_parse(zilog, zil_claim_log_block, 626 zil_claim_log_record, tx, first_txg); 627 zh->zh_claim_txg = first_txg; 628 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 629 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 630 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 631 zh->zh_flags |= ZIL_REPLAY_NEEDED; 632 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 633 dsl_dataset_dirty(dmu_objset_ds(os), tx); 634 } 635 636 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 637 dmu_objset_rele(os, FTAG); 638 return (0); 639} 640 641/* 642 * Check the log by walking the log chain. 643 * Checksum errors are ok as they indicate the end of the chain. 644 * Any other error (no device or read failure) returns an error. 645 */ 646int 647zil_check_log_chain(const char *osname, void *tx) 648{ 649 zilog_t *zilog; 650 objset_t *os; 651 blkptr_t *bp; 652 int error; 653 654 ASSERT(tx == NULL); 655 656 error = dmu_objset_hold(osname, FTAG, &os); 657 if (error) { 658 cmn_err(CE_WARN, "can't open objset for %s", osname); 659 return (0); 660 } 661 662 zilog = dmu_objset_zil(os); 663 bp = (blkptr_t *)&zilog->zl_header->zh_log; 664 665 /* 666 * Check the first block and determine if it's on a log device 667 * which may have been removed or faulted prior to loading this 668 * pool. If so, there's no point in checking the rest of the log 669 * as its content should have already been synced to the pool. 670 */ 671 if (!BP_IS_HOLE(bp)) { 672 vdev_t *vd; 673 boolean_t valid = B_TRUE; 674 675 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 676 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 677 if (vd->vdev_islog && vdev_is_dead(vd)) 678 valid = vdev_log_state_valid(vd); 679 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 680 681 if (!valid) { 682 dmu_objset_rele(os, FTAG); 683 return (0); 684 } 685 } 686 687 /* 688 * Because tx == NULL, zil_claim_log_block() will not actually claim 689 * any blocks, but just determine whether it is possible to do so. 690 * In addition to checking the log chain, zil_claim_log_block() 691 * will invoke zio_claim() with a done func of spa_claim_notify(), 692 * which will update spa_max_claim_txg. See spa_load() for details. 693 */ 694 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 695 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 696 697 dmu_objset_rele(os, FTAG); 698 699 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 700} 701 702static int 703zil_vdev_compare(const void *x1, const void *x2) 704{ 705 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 706 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 707 708 if (v1 < v2) 709 return (-1); 710 if (v1 > v2) 711 return (1); 712 713 return (0); 714} 715 716void 717zil_add_block(zilog_t *zilog, const blkptr_t *bp) 718{ 719 avl_tree_t *t = &zilog->zl_vdev_tree; 720 avl_index_t where; 721 zil_vdev_node_t *zv, zvsearch; 722 int ndvas = BP_GET_NDVAS(bp); 723 int i; 724 725 if (zfs_nocacheflush) 726 return; 727 728 ASSERT(zilog->zl_writer); 729 730 /* 731 * Even though we're zl_writer, we still need a lock because the 732 * zl_get_data() callbacks may have dmu_sync() done callbacks 733 * that will run concurrently. 734 */ 735 mutex_enter(&zilog->zl_vdev_lock); 736 for (i = 0; i < ndvas; i++) { 737 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 738 if (avl_find(t, &zvsearch, &where) == NULL) { 739 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 740 zv->zv_vdev = zvsearch.zv_vdev; 741 avl_insert(t, zv, where); 742 } 743 } 744 mutex_exit(&zilog->zl_vdev_lock); 745} 746 747static void 748zil_flush_vdevs(zilog_t *zilog) 749{ 750 spa_t *spa = zilog->zl_spa; 751 avl_tree_t *t = &zilog->zl_vdev_tree; 752 void *cookie = NULL; 753 zil_vdev_node_t *zv; 754 zio_t *zio; 755 756 ASSERT(zilog->zl_writer); 757 758 /* 759 * We don't need zl_vdev_lock here because we're the zl_writer, 760 * and all zl_get_data() callbacks are done. 761 */ 762 if (avl_numnodes(t) == 0) 763 return; 764 765 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 766 767 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 768 769 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 770 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 771 if (vd != NULL) 772 zio_flush(zio, vd); 773 kmem_free(zv, sizeof (*zv)); 774 } 775 776 /* 777 * Wait for all the flushes to complete. Not all devices actually 778 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 779 */ 780 (void) zio_wait(zio); 781 782 spa_config_exit(spa, SCL_STATE, FTAG); 783} 784 785/* 786 * Function called when a log block write completes 787 */ 788static void 789zil_lwb_write_done(zio_t *zio) 790{ 791 lwb_t *lwb = zio->io_private; 792 zilog_t *zilog = lwb->lwb_zilog; 793 dmu_tx_t *tx = lwb->lwb_tx; 794 795 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 796 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 797 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 798 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 799 ASSERT(!BP_IS_GANG(zio->io_bp)); 800 ASSERT(!BP_IS_HOLE(zio->io_bp)); 801 ASSERT(zio->io_bp->blk_fill == 0); 802 803 /* 804 * Ensure the lwb buffer pointer is cleared before releasing 805 * the txg. If we have had an allocation failure and 806 * the txg is waiting to sync then we want want zil_sync() 807 * to remove the lwb so that it's not picked up as the next new 808 * one in zil_commit_writer(). zil_sync() will only remove 809 * the lwb if lwb_buf is null. 810 */ 811 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 812 mutex_enter(&zilog->zl_lock); 813 lwb->lwb_buf = NULL; 814 lwb->lwb_tx = NULL; 815 mutex_exit(&zilog->zl_lock); 816 817 /* 818 * Now that we've written this log block, we have a stable pointer 819 * to the next block in the chain, so it's OK to let the txg in 820 * which we allocated the next block sync. 821 */ 822 dmu_tx_commit(tx); 823} 824 825/* 826 * Initialize the io for a log block. 827 */ 828static void 829zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 830{ 831 zbookmark_t zb; 832 833 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 834 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 835 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 836 837 if (zilog->zl_root_zio == NULL) { 838 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 839 ZIO_FLAG_CANFAIL); 840 } 841 if (lwb->lwb_zio == NULL) { 842 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 843 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 844 zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE, 845 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 846 } 847} 848 849/* 850 * Define a limited set of intent log block sizes. 851 * These must be a multiple of 4KB. Note only the amount used (again 852 * aligned to 4KB) actually gets written. However, we can't always just 853 * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted. 854 */ 855uint64_t zil_block_buckets[] = { 856 4096, /* non TX_WRITE */ 857 8192+4096, /* data base */ 858 32*1024 + 4096, /* NFS writes */ 859 UINT64_MAX 860}; 861 862/* 863 * Use the slog as long as the logbias is 'latency' and the current commit size 864 * is less than the limit or the total list size is less than 2X the limit. 865 * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 866 */ 867uint64_t zil_slog_limit = 1024 * 1024; 868#define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 869 (((zilog)->zl_cur_used < zil_slog_limit) || \ 870 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 871 872/* 873 * Start a log block write and advance to the next log block. 874 * Calls are serialized. 875 */ 876static lwb_t * 877zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 878{ 879 lwb_t *nlwb = NULL; 880 zil_chain_t *zilc; 881 spa_t *spa = zilog->zl_spa; 882 blkptr_t *bp; 883 dmu_tx_t *tx; 884 uint64_t txg; 885 uint64_t zil_blksz, wsz; 886 int i, error; 887 888 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 889 zilc = (zil_chain_t *)lwb->lwb_buf; 890 bp = &zilc->zc_next_blk; 891 } else { 892 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 893 bp = &zilc->zc_next_blk; 894 } 895 896 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 897 898 /* 899 * Allocate the next block and save its address in this block 900 * before writing it in order to establish the log chain. 901 * Note that if the allocation of nlwb synced before we wrote 902 * the block that points at it (lwb), we'd leak it if we crashed. 903 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 904 * We dirty the dataset to ensure that zil_sync() will be called 905 * to clean up in the event of allocation failure or I/O failure. 906 */ 907 tx = dmu_tx_create(zilog->zl_os); 908 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 909 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 910 txg = dmu_tx_get_txg(tx); 911 912 lwb->lwb_tx = tx; 913 914 /* 915 * Log blocks are pre-allocated. Here we select the size of the next 916 * block, based on size used in the last block. 917 * - first find the smallest bucket that will fit the block from a 918 * limited set of block sizes. This is because it's faster to write 919 * blocks allocated from the same metaslab as they are adjacent or 920 * close. 921 * - next find the maximum from the new suggested size and an array of 922 * previous sizes. This lessens a picket fence effect of wrongly 923 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 924 * requests. 925 * 926 * Note we only write what is used, but we can't just allocate 927 * the maximum block size because we can exhaust the available 928 * pool log space. 929 */ 930 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 931 for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 932 continue; 933 zil_blksz = zil_block_buckets[i]; 934 if (zil_blksz == UINT64_MAX) 935 zil_blksz = SPA_MAXBLOCKSIZE; 936 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 937 for (i = 0; i < ZIL_PREV_BLKS; i++) 938 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 939 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 940 941 BP_ZERO(bp); 942 /* pass the old blkptr in order to spread log blocks across devs */ 943 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 944 USE_SLOG(zilog)); 945 if (!error) { 946 ASSERT3U(bp->blk_birth, ==, txg); 947 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 948 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 949 950 /* 951 * Allocate a new log write buffer (lwb). 952 */ 953 nlwb = zil_alloc_lwb(zilog, bp, txg); 954 955 /* Record the block for later vdev flushing */ 956 zil_add_block(zilog, &lwb->lwb_blk); 957 } 958 959 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 960 /* For Slim ZIL only write what is used. */ 961 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 962 ASSERT3U(wsz, <=, lwb->lwb_sz); 963 zio_shrink(lwb->lwb_zio, wsz); 964 965 } else { 966 wsz = lwb->lwb_sz; 967 } 968 969 zilc->zc_pad = 0; 970 zilc->zc_nused = lwb->lwb_nused; 971 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 972 973 /* 974 * clear unused data for security 975 */ 976 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 977 978 zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 979 980 /* 981 * If there was an allocation failure then nlwb will be null which 982 * forces a txg_wait_synced(). 983 */ 984 return (nlwb); 985} 986 987static lwb_t * 988zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 989{ 990 lr_t *lrc = &itx->itx_lr; /* common log record */ 991 lr_write_t *lrw = (lr_write_t *)lrc; 992 char *lr_buf; 993 uint64_t txg = lrc->lrc_txg; 994 uint64_t reclen = lrc->lrc_reclen; 995 uint64_t dlen = 0; 996 997 if (lwb == NULL) 998 return (NULL); 999 1000 ASSERT(lwb->lwb_buf != NULL); 1001 1002 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 1003 dlen = P2ROUNDUP_TYPED( 1004 lrw->lr_length, sizeof (uint64_t), uint64_t); 1005 1006 zilog->zl_cur_used += (reclen + dlen); 1007 1008 zil_lwb_write_init(zilog, lwb); 1009 1010 /* 1011 * If this record won't fit in the current log block, start a new one. 1012 */ 1013 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1014 lwb = zil_lwb_write_start(zilog, lwb); 1015 if (lwb == NULL) 1016 return (NULL); 1017 zil_lwb_write_init(zilog, lwb); 1018 ASSERT(LWB_EMPTY(lwb)); 1019 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1020 txg_wait_synced(zilog->zl_dmu_pool, txg); 1021 return (lwb); 1022 } 1023 } 1024 1025 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1026 bcopy(lrc, lr_buf, reclen); 1027 lrc = (lr_t *)lr_buf; 1028 lrw = (lr_write_t *)lrc; 1029 1030 /* 1031 * If it's a write, fetch the data or get its blkptr as appropriate. 1032 */ 1033 if (lrc->lrc_txtype == TX_WRITE) { 1034 if (txg > spa_freeze_txg(zilog->zl_spa)) 1035 txg_wait_synced(zilog->zl_dmu_pool, txg); 1036 if (itx->itx_wr_state != WR_COPIED) { 1037 char *dbuf; 1038 int error; 1039 1040 if (dlen) { 1041 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 1042 dbuf = lr_buf + reclen; 1043 lrw->lr_common.lrc_reclen += dlen; 1044 } else { 1045 ASSERT(itx->itx_wr_state == WR_INDIRECT); 1046 dbuf = NULL; 1047 } 1048 error = zilog->zl_get_data( 1049 itx->itx_private, lrw, dbuf, lwb->lwb_zio); 1050 if (error == EIO) { 1051 txg_wait_synced(zilog->zl_dmu_pool, txg); 1052 return (lwb); 1053 } 1054 if (error) { 1055 ASSERT(error == ENOENT || error == EEXIST || 1056 error == EALREADY); 1057 return (lwb); 1058 } 1059 } 1060 } 1061 1062 /* 1063 * We're actually making an entry, so update lrc_seq to be the 1064 * log record sequence number. Note that this is generally not 1065 * equal to the itx sequence number because not all transactions 1066 * are synchronous, and sometimes spa_sync() gets there first. 1067 */ 1068 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 1069 lwb->lwb_nused += reclen + dlen; 1070 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1071 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1072 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 1073 1074 return (lwb); 1075} 1076 1077itx_t * 1078zil_itx_create(uint64_t txtype, size_t lrsize) 1079{ 1080 itx_t *itx; 1081 1082 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1083 1084 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1085 itx->itx_lr.lrc_txtype = txtype; 1086 itx->itx_lr.lrc_reclen = lrsize; 1087 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1088 itx->itx_lr.lrc_seq = 0; /* defensive */ 1089 itx->itx_sync = B_TRUE; /* default is synchronous */ 1090 1091 return (itx); 1092} 1093 1094void 1095zil_itx_destroy(itx_t *itx) 1096{ 1097 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1098} 1099 1100/* 1101 * Free up the sync and async itxs. The itxs_t has already been detached 1102 * so no locks are needed. 1103 */ 1104static void 1105zil_itxg_clean(itxs_t *itxs) 1106{ 1107 itx_t *itx; 1108 list_t *list; 1109 avl_tree_t *t; 1110 void *cookie; 1111 itx_async_node_t *ian; 1112 1113 list = &itxs->i_sync_list; 1114 while ((itx = list_head(list)) != NULL) { 1115 list_remove(list, itx); 1116 kmem_free(itx, offsetof(itx_t, itx_lr) + 1117 itx->itx_lr.lrc_reclen); 1118 } 1119 1120 cookie = NULL; 1121 t = &itxs->i_async_tree; 1122 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1123 list = &ian->ia_list; 1124 while ((itx = list_head(list)) != NULL) { 1125 list_remove(list, itx); 1126 kmem_free(itx, offsetof(itx_t, itx_lr) + 1127 itx->itx_lr.lrc_reclen); 1128 } 1129 list_destroy(list); 1130 kmem_free(ian, sizeof (itx_async_node_t)); 1131 } 1132 avl_destroy(t); 1133 1134 kmem_free(itxs, sizeof (itxs_t)); 1135} 1136 1137static int 1138zil_aitx_compare(const void *x1, const void *x2) 1139{ 1140 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1141 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1142 1143 if (o1 < o2) 1144 return (-1); 1145 if (o1 > o2) 1146 return (1); 1147 1148 return (0); 1149} 1150 1151/* 1152 * Remove all async itx with the given oid. 1153 */ 1154static void 1155zil_remove_async(zilog_t *zilog, uint64_t oid) 1156{ 1157 uint64_t otxg, txg; 1158 itx_async_node_t *ian; 1159 avl_tree_t *t; 1160 avl_index_t where; 1161 list_t clean_list; 1162 itx_t *itx; 1163 1164 ASSERT(oid != 0); 1165 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1166 1167 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1168 otxg = ZILTEST_TXG; 1169 else 1170 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1171 1172 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1173 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1174 1175 mutex_enter(&itxg->itxg_lock); 1176 if (itxg->itxg_txg != txg) { 1177 mutex_exit(&itxg->itxg_lock); 1178 continue; 1179 } 1180 1181 /* 1182 * Locate the object node and append its list. 1183 */ 1184 t = &itxg->itxg_itxs->i_async_tree; 1185 ian = avl_find(t, &oid, &where); 1186 if (ian != NULL) 1187 list_move_tail(&clean_list, &ian->ia_list); 1188 mutex_exit(&itxg->itxg_lock); 1189 } 1190 while ((itx = list_head(&clean_list)) != NULL) { 1191 list_remove(&clean_list, itx); 1192 kmem_free(itx, offsetof(itx_t, itx_lr) + 1193 itx->itx_lr.lrc_reclen); 1194 } 1195 list_destroy(&clean_list); 1196} 1197 1198void 1199zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1200{ 1201 uint64_t txg; 1202 itxg_t *itxg; 1203 itxs_t *itxs, *clean = NULL; 1204 1205 /* 1206 * Object ids can be re-instantiated in the next txg so 1207 * remove any async transactions to avoid future leaks. 1208 * This can happen if a fsync occurs on the re-instantiated 1209 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1210 * the new file data and flushes a write record for the old object. 1211 */ 1212 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1213 zil_remove_async(zilog, itx->itx_oid); 1214 1215 /* 1216 * Ensure the data of a renamed file is committed before the rename. 1217 */ 1218 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1219 zil_async_to_sync(zilog, itx->itx_oid); 1220 1221 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1222 txg = ZILTEST_TXG; 1223 else 1224 txg = dmu_tx_get_txg(tx); 1225 1226 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1227 mutex_enter(&itxg->itxg_lock); 1228 itxs = itxg->itxg_itxs; 1229 if (itxg->itxg_txg != txg) { 1230 if (itxs != NULL) { 1231 /* 1232 * The zil_clean callback hasn't got around to cleaning 1233 * this itxg. Save the itxs for release below. 1234 * This should be rare. 1235 */ 1236 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1237 itxg->itxg_sod = 0; 1238 clean = itxg->itxg_itxs; 1239 } 1240 ASSERT(itxg->itxg_sod == 0); 1241 itxg->itxg_txg = txg; 1242 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1243 1244 list_create(&itxs->i_sync_list, sizeof (itx_t), 1245 offsetof(itx_t, itx_node)); 1246 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1247 sizeof (itx_async_node_t), 1248 offsetof(itx_async_node_t, ia_node)); 1249 } 1250 if (itx->itx_sync) { 1251 list_insert_tail(&itxs->i_sync_list, itx); 1252 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod); 1253 itxg->itxg_sod += itx->itx_sod; 1254 } else { 1255 avl_tree_t *t = &itxs->i_async_tree; 1256 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1257 itx_async_node_t *ian; 1258 avl_index_t where; 1259 1260 ian = avl_find(t, &foid, &where); 1261 if (ian == NULL) { 1262 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1263 list_create(&ian->ia_list, sizeof (itx_t), 1264 offsetof(itx_t, itx_node)); 1265 ian->ia_foid = foid; 1266 avl_insert(t, ian, where); 1267 } 1268 list_insert_tail(&ian->ia_list, itx); 1269 } 1270 1271 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1272 mutex_exit(&itxg->itxg_lock); 1273 1274 /* Release the old itxs now we've dropped the lock */ 1275 if (clean != NULL) 1276 zil_itxg_clean(clean); 1277} 1278 1279/* 1280 * If there are any in-memory intent log transactions which have now been 1281 * synced then start up a taskq to free them. 1282 */ 1283void 1284zil_clean(zilog_t *zilog, uint64_t synced_txg) 1285{ 1286 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1287 itxs_t *clean_me; 1288 1289 mutex_enter(&itxg->itxg_lock); 1290 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1291 mutex_exit(&itxg->itxg_lock); 1292 return; 1293 } 1294 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1295 ASSERT(itxg->itxg_txg != 0); 1296 ASSERT(zilog->zl_clean_taskq != NULL); 1297 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1298 itxg->itxg_sod = 0; 1299 clean_me = itxg->itxg_itxs; 1300 itxg->itxg_itxs = NULL; 1301 itxg->itxg_txg = 0; 1302 mutex_exit(&itxg->itxg_lock); 1303 /* 1304 * Preferably start a task queue to free up the old itxs but 1305 * if taskq_dispatch can't allocate resources to do that then 1306 * free it in-line. This should be rare. Note, using TQ_SLEEP 1307 * created a bad performance problem. 1308 */ 1309 if (taskq_dispatch(zilog->zl_clean_taskq, 1310 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0) 1311 zil_itxg_clean(clean_me); 1312} 1313 1314/* 1315 * Get the list of itxs to commit into zl_itx_commit_list. 1316 */ 1317static void 1318zil_get_commit_list(zilog_t *zilog) 1319{ 1320 uint64_t otxg, txg; 1321 list_t *commit_list = &zilog->zl_itx_commit_list; 1322 uint64_t push_sod = 0; 1323 1324 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1325 otxg = ZILTEST_TXG; 1326 else 1327 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1328 1329 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1330 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1331 1332 mutex_enter(&itxg->itxg_lock); 1333 if (itxg->itxg_txg != txg) { 1334 mutex_exit(&itxg->itxg_lock); 1335 continue; 1336 } 1337 1338 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1339 push_sod += itxg->itxg_sod; 1340 itxg->itxg_sod = 0; 1341 1342 mutex_exit(&itxg->itxg_lock); 1343 } 1344 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod); 1345} 1346 1347/* 1348 * Move the async itxs for a specified object to commit into sync lists. 1349 */ 1350static void 1351zil_async_to_sync(zilog_t *zilog, uint64_t foid) 1352{ 1353 uint64_t otxg, txg; 1354 itx_async_node_t *ian; 1355 avl_tree_t *t; 1356 avl_index_t where; 1357 1358 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1359 otxg = ZILTEST_TXG; 1360 else 1361 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1362 1363 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1364 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1365 1366 mutex_enter(&itxg->itxg_lock); 1367 if (itxg->itxg_txg != txg) { 1368 mutex_exit(&itxg->itxg_lock); 1369 continue; 1370 } 1371 1372 /* 1373 * If a foid is specified then find that node and append its 1374 * list. Otherwise walk the tree appending all the lists 1375 * to the sync list. We add to the end rather than the 1376 * beginning to ensure the create has happened. 1377 */ 1378 t = &itxg->itxg_itxs->i_async_tree; 1379 if (foid != 0) { 1380 ian = avl_find(t, &foid, &where); 1381 if (ian != NULL) { 1382 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1383 &ian->ia_list); 1384 } 1385 } else { 1386 void *cookie = NULL; 1387 1388 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1389 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1390 &ian->ia_list); 1391 list_destroy(&ian->ia_list); 1392 kmem_free(ian, sizeof (itx_async_node_t)); 1393 } 1394 } 1395 mutex_exit(&itxg->itxg_lock); 1396 } 1397} 1398 1399static void 1400zil_commit_writer(zilog_t *zilog) 1401{ 1402 uint64_t txg; 1403 itx_t *itx; 1404 lwb_t *lwb; 1405 spa_t *spa = zilog->zl_spa; 1406 int error = 0; 1407 1408 ASSERT(zilog->zl_root_zio == NULL); 1409 1410 mutex_exit(&zilog->zl_lock); 1411 1412 zil_get_commit_list(zilog); 1413 1414 /* 1415 * Return if there's nothing to commit before we dirty the fs by 1416 * calling zil_create(). 1417 */ 1418 if (list_head(&zilog->zl_itx_commit_list) == NULL) { 1419 mutex_enter(&zilog->zl_lock); 1420 return; 1421 } 1422 1423 if (zilog->zl_suspend) { 1424 lwb = NULL; 1425 } else { 1426 lwb = list_tail(&zilog->zl_lwb_list); 1427 if (lwb == NULL) 1428 lwb = zil_create(zilog); 1429 } 1430 1431 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1432 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1433 txg = itx->itx_lr.lrc_txg; 1434 ASSERT(txg); 1435 1436 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa)) 1437 lwb = zil_lwb_commit(zilog, itx, lwb); 1438 list_remove(&zilog->zl_itx_commit_list, itx); 1439 kmem_free(itx, offsetof(itx_t, itx_lr) 1440 + itx->itx_lr.lrc_reclen); 1441 } 1442 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1443 1444 /* write the last block out */ 1445 if (lwb != NULL && lwb->lwb_zio != NULL) 1446 lwb = zil_lwb_write_start(zilog, lwb); 1447 1448 zilog->zl_cur_used = 0; 1449 1450 /* 1451 * Wait if necessary for the log blocks to be on stable storage. 1452 */ 1453 if (zilog->zl_root_zio) { 1454 error = zio_wait(zilog->zl_root_zio); 1455 zilog->zl_root_zio = NULL; 1456 zil_flush_vdevs(zilog); 1457 } 1458 1459 if (error || lwb == NULL) 1460 txg_wait_synced(zilog->zl_dmu_pool, 0); 1461 1462 mutex_enter(&zilog->zl_lock); 1463 1464 /* 1465 * Remember the highest committed log sequence number for ztest. 1466 * We only update this value when all the log writes succeeded, 1467 * because ztest wants to ASSERT that it got the whole log chain. 1468 */ 1469 if (error == 0 && lwb != NULL) 1470 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1471} 1472 1473/* 1474 * Commit zfs transactions to stable storage. 1475 * If foid is 0 push out all transactions, otherwise push only those 1476 * for that object or might reference that object. 1477 * 1478 * itxs are committed in batches. In a heavily stressed zil there will be 1479 * a commit writer thread who is writing out a bunch of itxs to the log 1480 * for a set of committing threads (cthreads) in the same batch as the writer. 1481 * Those cthreads are all waiting on the same cv for that batch. 1482 * 1483 * There will also be a different and growing batch of threads that are 1484 * waiting to commit (qthreads). When the committing batch completes 1485 * a transition occurs such that the cthreads exit and the qthreads become 1486 * cthreads. One of the new cthreads becomes the writer thread for the 1487 * batch. Any new threads arriving become new qthreads. 1488 * 1489 * Only 2 condition variables are needed and there's no transition 1490 * between the two cvs needed. They just flip-flop between qthreads 1491 * and cthreads. 1492 * 1493 * Using this scheme we can efficiently wakeup up only those threads 1494 * that have been committed. 1495 */ 1496void 1497zil_commit(zilog_t *zilog, uint64_t foid) 1498{ 1499 uint64_t mybatch; 1500 1501 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 1502 return; 1503 1504 /* move the async itxs for the foid to the sync queues */ 1505 zil_async_to_sync(zilog, foid); 1506 1507 mutex_enter(&zilog->zl_lock); 1508 mybatch = zilog->zl_next_batch; 1509 while (zilog->zl_writer) { 1510 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock); 1511 if (mybatch <= zilog->zl_com_batch) { 1512 mutex_exit(&zilog->zl_lock); 1513 return; 1514 } 1515 } 1516 1517 zilog->zl_next_batch++; 1518 zilog->zl_writer = B_TRUE; 1519 zil_commit_writer(zilog); 1520 zilog->zl_com_batch = mybatch; 1521 zilog->zl_writer = B_FALSE; 1522 mutex_exit(&zilog->zl_lock); 1523 1524 /* wake up one thread to become the next writer */ 1525 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]); 1526 1527 /* wake up all threads waiting for this batch to be committed */ 1528 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]); 1529} 1530 1531/* 1532 * Called in syncing context to free committed log blocks and update log header. 1533 */ 1534void 1535zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1536{ 1537 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1538 uint64_t txg = dmu_tx_get_txg(tx); 1539 spa_t *spa = zilog->zl_spa; 1540 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1541 lwb_t *lwb; 1542 1543 /* 1544 * We don't zero out zl_destroy_txg, so make sure we don't try 1545 * to destroy it twice. 1546 */ 1547 if (spa_sync_pass(spa) != 1) 1548 return; 1549 1550 mutex_enter(&zilog->zl_lock); 1551 1552 ASSERT(zilog->zl_stop_sync == 0); 1553 1554 if (*replayed_seq != 0) { 1555 ASSERT(zh->zh_replay_seq < *replayed_seq); 1556 zh->zh_replay_seq = *replayed_seq; 1557 *replayed_seq = 0; 1558 } 1559 1560 if (zilog->zl_destroy_txg == txg) { 1561 blkptr_t blk = zh->zh_log; 1562 1563 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1564 1565 bzero(zh, sizeof (zil_header_t)); 1566 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 1567 1568 if (zilog->zl_keep_first) { 1569 /* 1570 * If this block was part of log chain that couldn't 1571 * be claimed because a device was missing during 1572 * zil_claim(), but that device later returns, 1573 * then this block could erroneously appear valid. 1574 * To guard against this, assign a new GUID to the new 1575 * log chain so it doesn't matter what blk points to. 1576 */ 1577 zil_init_log_chain(zilog, &blk); 1578 zh->zh_log = blk; 1579 } 1580 } 1581 1582 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1583 zh->zh_log = lwb->lwb_blk; 1584 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1585 break; 1586 list_remove(&zilog->zl_lwb_list, lwb); 1587 zio_free_zil(spa, txg, &lwb->lwb_blk); 1588 kmem_cache_free(zil_lwb_cache, lwb); 1589 1590 /* 1591 * If we don't have anything left in the lwb list then 1592 * we've had an allocation failure and we need to zero 1593 * out the zil_header blkptr so that we don't end 1594 * up freeing the same block twice. 1595 */ 1596 if (list_head(&zilog->zl_lwb_list) == NULL) 1597 BP_ZERO(&zh->zh_log); 1598 } 1599 mutex_exit(&zilog->zl_lock); 1600} 1601 1602void 1603zil_init(void) 1604{ 1605 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1606 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1607} 1608 1609void 1610zil_fini(void) 1611{ 1612 kmem_cache_destroy(zil_lwb_cache); 1613} 1614 1615void 1616zil_set_sync(zilog_t *zilog, uint64_t sync) 1617{ 1618 zilog->zl_sync = sync; 1619} 1620 1621void 1622zil_set_logbias(zilog_t *zilog, uint64_t logbias) 1623{ 1624 zilog->zl_logbias = logbias; 1625} 1626 1627zilog_t * 1628zil_alloc(objset_t *os, zil_header_t *zh_phys) 1629{ 1630 zilog_t *zilog; 1631 1632 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1633 1634 zilog->zl_header = zh_phys; 1635 zilog->zl_os = os; 1636 zilog->zl_spa = dmu_objset_spa(os); 1637 zilog->zl_dmu_pool = dmu_objset_pool(os); 1638 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1639 zilog->zl_logbias = dmu_objset_logbias(os); 1640 zilog->zl_sync = dmu_objset_syncprop(os); 1641 zilog->zl_next_batch = 1; 1642 1643 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1644 1645 for (int i = 0; i < TXG_SIZE; i++) { 1646 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 1647 MUTEX_DEFAULT, NULL); 1648 } 1649 1650 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1651 offsetof(lwb_t, lwb_node)); 1652 1653 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 1654 offsetof(itx_t, itx_node)); 1655 1656 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1657 1658 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1659 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1660 1661 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1662 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1663 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL); 1664 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL); 1665 1666 return (zilog); 1667} 1668 1669void 1670zil_free(zilog_t *zilog) 1671{ 1672 zilog->zl_stop_sync = 1; 1673 1674 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1675 list_destroy(&zilog->zl_lwb_list); 1676 1677 avl_destroy(&zilog->zl_vdev_tree); 1678 mutex_destroy(&zilog->zl_vdev_lock); 1679 1680 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 1681 list_destroy(&zilog->zl_itx_commit_list); 1682 1683 for (int i = 0; i < TXG_SIZE; i++) { 1684 /* 1685 * It's possible for an itx to be generated that doesn't dirty 1686 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 1687 * callback to remove the entry. We remove those here. 1688 * 1689 * Also free up the ziltest itxs. 1690 */ 1691 if (zilog->zl_itxg[i].itxg_itxs) 1692 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 1693 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 1694 } 1695 1696 mutex_destroy(&zilog->zl_lock); 1697 1698 cv_destroy(&zilog->zl_cv_writer); 1699 cv_destroy(&zilog->zl_cv_suspend); 1700 cv_destroy(&zilog->zl_cv_batch[0]); 1701 cv_destroy(&zilog->zl_cv_batch[1]); 1702 1703 kmem_free(zilog, sizeof (zilog_t)); 1704} 1705 1706/* 1707 * Open an intent log. 1708 */ 1709zilog_t * 1710zil_open(objset_t *os, zil_get_data_t *get_data) 1711{ 1712 zilog_t *zilog = dmu_objset_zil(os); 1713 1714 ASSERT(zilog->zl_clean_taskq == NULL); 1715 ASSERT(zilog->zl_get_data == NULL); 1716 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1717 1718 zilog->zl_get_data = get_data; 1719 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1720 2, 2, TASKQ_PREPOPULATE); 1721 1722 return (zilog); 1723} 1724 1725/* 1726 * Close an intent log. 1727 */ 1728void 1729zil_close(zilog_t *zilog) 1730{ 1731 lwb_t *lwb; 1732 uint64_t txg = 0; 1733 1734 zil_commit(zilog, 0); /* commit all itx */ 1735 1736 /* 1737 * The lwb_max_txg for the stubby lwb will reflect the last activity 1738 * for the zil. After a txg_wait_synced() on the txg we know all the 1739 * callbacks have occurred that may clean the zil. Only then can we 1740 * destroy the zl_clean_taskq. 1741 */ 1742 mutex_enter(&zilog->zl_lock); 1743 lwb = list_tail(&zilog->zl_lwb_list); 1744 if (lwb != NULL) 1745 txg = lwb->lwb_max_txg; 1746 mutex_exit(&zilog->zl_lock); 1747 if (txg) 1748 txg_wait_synced(zilog->zl_dmu_pool, txg); 1749 1750 taskq_destroy(zilog->zl_clean_taskq); 1751 zilog->zl_clean_taskq = NULL; 1752 zilog->zl_get_data = NULL; 1753 1754 /* 1755 * We should have only one LWB left on the list; remove it now. 1756 */ 1757 mutex_enter(&zilog->zl_lock); 1758 lwb = list_head(&zilog->zl_lwb_list); 1759 if (lwb != NULL) { 1760 ASSERT(lwb == list_tail(&zilog->zl_lwb_list)); 1761 list_remove(&zilog->zl_lwb_list, lwb); 1762 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1763 kmem_cache_free(zil_lwb_cache, lwb); 1764 } 1765 mutex_exit(&zilog->zl_lock); 1766} 1767 1768/* 1769 * Suspend an intent log. While in suspended mode, we still honor 1770 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1771 * We suspend the log briefly when taking a snapshot so that the snapshot 1772 * contains all the data it's supposed to, and has an empty intent log. 1773 */ 1774int 1775zil_suspend(zilog_t *zilog) 1776{ 1777 const zil_header_t *zh = zilog->zl_header; 1778 1779 mutex_enter(&zilog->zl_lock); 1780 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1781 mutex_exit(&zilog->zl_lock); 1782 return (EBUSY); 1783 } 1784 if (zilog->zl_suspend++ != 0) { 1785 /* 1786 * Someone else already began a suspend. 1787 * Just wait for them to finish. 1788 */ 1789 while (zilog->zl_suspending) 1790 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1791 mutex_exit(&zilog->zl_lock); 1792 return (0); 1793 } 1794 zilog->zl_suspending = B_TRUE; 1795 mutex_exit(&zilog->zl_lock); 1796 1797 zil_commit(zilog, 0); 1798 1799 zil_destroy(zilog, B_FALSE); 1800 1801 mutex_enter(&zilog->zl_lock); 1802 zilog->zl_suspending = B_FALSE; 1803 cv_broadcast(&zilog->zl_cv_suspend); 1804 mutex_exit(&zilog->zl_lock); 1805 1806 return (0); 1807} 1808 1809void 1810zil_resume(zilog_t *zilog) 1811{ 1812 mutex_enter(&zilog->zl_lock); 1813 ASSERT(zilog->zl_suspend != 0); 1814 zilog->zl_suspend--; 1815 mutex_exit(&zilog->zl_lock); 1816} 1817 1818typedef struct zil_replay_arg { 1819 zil_replay_func_t **zr_replay; 1820 void *zr_arg; 1821 boolean_t zr_byteswap; 1822 char *zr_lr; 1823} zil_replay_arg_t; 1824 1825static int 1826zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 1827{ 1828 char name[MAXNAMELEN]; 1829 1830 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 1831 1832 dmu_objset_name(zilog->zl_os, name); 1833 1834 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1835 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 1836 (u_longlong_t)lr->lrc_seq, 1837 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 1838 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 1839 1840 return (error); 1841} 1842 1843static int 1844zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1845{ 1846 zil_replay_arg_t *zr = zra; 1847 const zil_header_t *zh = zilog->zl_header; 1848 uint64_t reclen = lr->lrc_reclen; 1849 uint64_t txtype = lr->lrc_txtype; 1850 int error = 0; 1851 1852 zilog->zl_replaying_seq = lr->lrc_seq; 1853 1854 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1855 return (0); 1856 1857 if (lr->lrc_txg < claim_txg) /* already committed */ 1858 return (0); 1859 1860 /* Strip case-insensitive bit, still present in log record */ 1861 txtype &= ~TX_CI; 1862 1863 if (txtype == 0 || txtype >= TX_MAX_TYPE) 1864 return (zil_replay_error(zilog, lr, EINVAL)); 1865 1866 /* 1867 * If this record type can be logged out of order, the object 1868 * (lr_foid) may no longer exist. That's legitimate, not an error. 1869 */ 1870 if (TX_OOO(txtype)) { 1871 error = dmu_object_info(zilog->zl_os, 1872 ((lr_ooo_t *)lr)->lr_foid, NULL); 1873 if (error == ENOENT || error == EEXIST) 1874 return (0); 1875 } 1876 1877 /* 1878 * Make a copy of the data so we can revise and extend it. 1879 */ 1880 bcopy(lr, zr->zr_lr, reclen); 1881 1882 /* 1883 * If this is a TX_WRITE with a blkptr, suck in the data. 1884 */ 1885 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1886 error = zil_read_log_data(zilog, (lr_write_t *)lr, 1887 zr->zr_lr + reclen); 1888 if (error) 1889 return (zil_replay_error(zilog, lr, error)); 1890 } 1891 1892 /* 1893 * The log block containing this lr may have been byteswapped 1894 * so that we can easily examine common fields like lrc_txtype. 1895 * However, the log is a mix of different record types, and only the 1896 * replay vectors know how to byteswap their records. Therefore, if 1897 * the lr was byteswapped, undo it before invoking the replay vector. 1898 */ 1899 if (zr->zr_byteswap) 1900 byteswap_uint64_array(zr->zr_lr, reclen); 1901 1902 /* 1903 * We must now do two things atomically: replay this log record, 1904 * and update the log header sequence number to reflect the fact that 1905 * we did so. At the end of each replay function the sequence number 1906 * is updated if we are in replay mode. 1907 */ 1908 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 1909 if (error) { 1910 /* 1911 * The DMU's dnode layer doesn't see removes until the txg 1912 * commits, so a subsequent claim can spuriously fail with 1913 * EEXIST. So if we receive any error we try syncing out 1914 * any removes then retry the transaction. Note that we 1915 * specify B_FALSE for byteswap now, so we don't do it twice. 1916 */ 1917 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1918 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 1919 if (error) 1920 return (zil_replay_error(zilog, lr, error)); 1921 } 1922 return (0); 1923} 1924 1925/* ARGSUSED */ 1926static int 1927zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1928{ 1929 zilog->zl_replay_blks++; 1930 1931 return (0); 1932} 1933 1934/* 1935 * If this dataset has a non-empty intent log, replay it and destroy it. 1936 */ 1937void 1938zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1939{ 1940 zilog_t *zilog = dmu_objset_zil(os); 1941 const zil_header_t *zh = zilog->zl_header; 1942 zil_replay_arg_t zr; 1943 1944 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 1945 zil_destroy(zilog, B_TRUE); 1946 return; 1947 } 1948 //printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name); 1949 1950 zr.zr_replay = replay_func; 1951 zr.zr_arg = arg; 1952 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1953 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1954 1955 /* 1956 * Wait for in-progress removes to sync before starting replay. 1957 */ 1958 txg_wait_synced(zilog->zl_dmu_pool, 0); 1959 1960 zilog->zl_replay = B_TRUE; 1961 zilog->zl_replay_time = ddi_get_lbolt(); 1962 ASSERT(zilog->zl_replay_blks == 0); 1963 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 1964 zh->zh_claim_txg); 1965 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 1966 1967 zil_destroy(zilog, B_FALSE); 1968 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1969 zilog->zl_replay = B_FALSE; 1970 //printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name); 1971} 1972 1973boolean_t 1974zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 1975{ 1976 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 1977 return (B_TRUE); 1978 1979 if (zilog->zl_replay) { 1980 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1981 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 1982 zilog->zl_replaying_seq; 1983 return (B_TRUE); 1984 } 1985 1986 return (B_FALSE); 1987} 1988 1989/* ARGSUSED */ 1990int 1991zil_vdev_offline(const char *osname, void *arg) 1992{ 1993 objset_t *os; 1994 zilog_t *zilog; 1995 int error; 1996 1997 error = dmu_objset_hold(osname, FTAG, &os); 1998 if (error) 1999 return (error); 2000 2001 zilog = dmu_objset_zil(os); 2002 if (zil_suspend(zilog) != 0) 2003 error = EEXIST; 2004 else 2005 zil_resume(zilog); 2006 dmu_objset_rele(os, FTAG); 2007 return (error); 2008} 2009