zil.c revision 207908
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/spa.h> 28#include <sys/dmu.h> 29#include <sys/zap.h> 30#include <sys/arc.h> 31#include <sys/stat.h> 32#include <sys/resource.h> 33#include <sys/zil.h> 34#include <sys/zil_impl.h> 35#include <sys/dsl_dataset.h> 36#include <sys/vdev.h> 37#include <sys/dmu_tx.h> 38 39/* 40 * The zfs intent log (ZIL) saves transaction records of system calls 41 * that change the file system in memory with enough information 42 * to be able to replay them. These are stored in memory until 43 * either the DMU transaction group (txg) commits them to the stable pool 44 * and they can be discarded, or they are flushed to the stable log 45 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 46 * requirement. In the event of a panic or power fail then those log 47 * records (transactions) are replayed. 48 * 49 * There is one ZIL per file system. Its on-disk (pool) format consists 50 * of 3 parts: 51 * 52 * - ZIL header 53 * - ZIL blocks 54 * - ZIL records 55 * 56 * A log record holds a system call transaction. Log blocks can 57 * hold many log records and the blocks are chained together. 58 * Each ZIL block contains a block pointer (blkptr_t) to the next 59 * ZIL block in the chain. The ZIL header points to the first 60 * block in the chain. Note there is not a fixed place in the pool 61 * to hold blocks. They are dynamically allocated and freed as 62 * needed from the blocks available. Figure X shows the ZIL structure: 63 */ 64 65/* 66 * This global ZIL switch affects all pools 67 */ 68int zil_disable = 0; /* disable intent logging */ 69SYSCTL_DECL(_vfs_zfs); 70TUNABLE_INT("vfs.zfs.zil_disable", &zil_disable); 71SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_disable, CTLFLAG_RW, &zil_disable, 0, 72 "Disable ZFS Intent Log (ZIL)"); 73 74/* 75 * Tunable parameter for debugging or performance analysis. Setting 76 * zfs_nocacheflush will cause corruption on power loss if a volatile 77 * out-of-order write cache is enabled. 78 */ 79boolean_t zfs_nocacheflush = B_FALSE; 80TUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush); 81SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN, 82 &zfs_nocacheflush, 0, "Disable cache flush"); 83 84static kmem_cache_t *zil_lwb_cache; 85 86static int 87zil_dva_compare(const void *x1, const void *x2) 88{ 89 const dva_t *dva1 = x1; 90 const dva_t *dva2 = x2; 91 92 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 93 return (-1); 94 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 95 return (1); 96 97 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 98 return (-1); 99 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 100 return (1); 101 102 return (0); 103} 104 105static void 106zil_dva_tree_init(avl_tree_t *t) 107{ 108 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 109 offsetof(zil_dva_node_t, zn_node)); 110} 111 112static void 113zil_dva_tree_fini(avl_tree_t *t) 114{ 115 zil_dva_node_t *zn; 116 void *cookie = NULL; 117 118 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 119 kmem_free(zn, sizeof (zil_dva_node_t)); 120 121 avl_destroy(t); 122} 123 124static int 125zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 126{ 127 zil_dva_node_t *zn; 128 avl_index_t where; 129 130 if (avl_find(t, dva, &where) != NULL) 131 return (EEXIST); 132 133 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 134 zn->zn_dva = *dva; 135 avl_insert(t, zn, where); 136 137 return (0); 138} 139 140static zil_header_t * 141zil_header_in_syncing_context(zilog_t *zilog) 142{ 143 return ((zil_header_t *)zilog->zl_header); 144} 145 146static void 147zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 148{ 149 zio_cksum_t *zc = &bp->blk_cksum; 150 151 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 152 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 153 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 154 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 155} 156 157/* 158 * Read a log block, make sure it's valid, and byteswap it if necessary. 159 */ 160static int 161zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 162{ 163 blkptr_t blk = *bp; 164 zbookmark_t zb; 165 uint32_t aflags = ARC_WAIT; 166 int error; 167 168 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 169 zb.zb_object = 0; 170 zb.zb_level = -1; 171 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 172 173 *abufpp = NULL; 174 175 /* 176 * We shouldn't be doing any scrubbing while we're doing log 177 * replay, it's OK to not lock. 178 */ 179 error = arc_read_nolock(NULL, zilog->zl_spa, &blk, 180 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 181 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 182 183 if (error == 0) { 184 char *data = (*abufpp)->b_data; 185 uint64_t blksz = BP_GET_LSIZE(bp); 186 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 187 zio_cksum_t cksum = bp->blk_cksum; 188 189 /* 190 * Validate the checksummed log block. 191 * 192 * Sequence numbers should be... sequential. The checksum 193 * verifier for the next block should be bp's checksum plus 1. 194 * 195 * Also check the log chain linkage and size used. 196 */ 197 cksum.zc_word[ZIL_ZC_SEQ]++; 198 199 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, 200 sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) || 201 (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) { 202 error = ECKSUM; 203 } 204 205 if (error) { 206 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 207 *abufpp = NULL; 208 } 209 } 210 211 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 212 213 return (error); 214} 215 216/* 217 * Parse the intent log, and call parse_func for each valid record within. 218 * Return the highest sequence number. 219 */ 220uint64_t 221zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 222 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 223{ 224 const zil_header_t *zh = zilog->zl_header; 225 uint64_t claim_seq = zh->zh_claim_seq; 226 uint64_t seq = 0; 227 uint64_t max_seq = 0; 228 blkptr_t blk = zh->zh_log; 229 arc_buf_t *abuf; 230 char *lrbuf, *lrp; 231 zil_trailer_t *ztp; 232 int reclen, error; 233 234 if (BP_IS_HOLE(&blk)) 235 return (max_seq); 236 237 /* 238 * Starting at the block pointed to by zh_log we read the log chain. 239 * For each block in the chain we strongly check that block to 240 * ensure its validity. We stop when an invalid block is found. 241 * For each block pointer in the chain we call parse_blk_func(). 242 * For each record in each valid block we call parse_lr_func(). 243 * If the log has been claimed, stop if we encounter a sequence 244 * number greater than the highest claimed sequence number. 245 */ 246 zil_dva_tree_init(&zilog->zl_dva_tree); 247 for (;;) { 248 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 249 250 if (claim_seq != 0 && seq > claim_seq) 251 break; 252 253 ASSERT(max_seq < seq); 254 max_seq = seq; 255 256 error = zil_read_log_block(zilog, &blk, &abuf); 257 258 if (parse_blk_func != NULL) 259 parse_blk_func(zilog, &blk, arg, txg); 260 261 if (error) 262 break; 263 264 lrbuf = abuf->b_data; 265 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 266 blk = ztp->zit_next_blk; 267 268 if (parse_lr_func == NULL) { 269 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 270 continue; 271 } 272 273 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 274 lr_t *lr = (lr_t *)lrp; 275 reclen = lr->lrc_reclen; 276 ASSERT3U(reclen, >=, sizeof (lr_t)); 277 parse_lr_func(zilog, lr, arg, txg); 278 } 279 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 280 } 281 zil_dva_tree_fini(&zilog->zl_dva_tree); 282 283 return (max_seq); 284} 285 286/* ARGSUSED */ 287static void 288zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 289{ 290 spa_t *spa = zilog->zl_spa; 291 int err; 292 293 /* 294 * Claim log block if not already committed and not already claimed. 295 */ 296 if (bp->blk_birth >= first_txg && 297 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 298 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL, 299 ZIO_FLAG_MUSTSUCCEED)); 300 ASSERT(err == 0); 301 } 302} 303 304static void 305zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 306{ 307 if (lrc->lrc_txtype == TX_WRITE) { 308 lr_write_t *lr = (lr_write_t *)lrc; 309 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 310 } 311} 312 313/* ARGSUSED */ 314static void 315zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 316{ 317 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 318} 319 320static void 321zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 322{ 323 /* 324 * If we previously claimed it, we need to free it. 325 */ 326 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 327 lr_write_t *lr = (lr_write_t *)lrc; 328 blkptr_t *bp = &lr->lr_blkptr; 329 if (bp->blk_birth >= claim_txg && 330 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 331 (void) arc_free(NULL, zilog->zl_spa, 332 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 333 } 334 } 335} 336 337/* 338 * Create an on-disk intent log. 339 */ 340static void 341zil_create(zilog_t *zilog) 342{ 343 const zil_header_t *zh = zilog->zl_header; 344 lwb_t *lwb; 345 uint64_t txg = 0; 346 dmu_tx_t *tx = NULL; 347 blkptr_t blk; 348 int error = 0; 349 350 /* 351 * Wait for any previous destroy to complete. 352 */ 353 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 354 355 ASSERT(zh->zh_claim_txg == 0); 356 ASSERT(zh->zh_replay_seq == 0); 357 358 blk = zh->zh_log; 359 360 /* 361 * If we don't already have an initial log block or we have one 362 * but it's the wrong endianness then allocate one. 363 */ 364 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 365 tx = dmu_tx_create(zilog->zl_os); 366 (void) dmu_tx_assign(tx, TXG_WAIT); 367 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 368 txg = dmu_tx_get_txg(tx); 369 370 if (!BP_IS_HOLE(&blk)) { 371 zio_free_blk(zilog->zl_spa, &blk, txg); 372 BP_ZERO(&blk); 373 } 374 375 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, 376 NULL, txg); 377 378 if (error == 0) 379 zil_init_log_chain(zilog, &blk); 380 } 381 382 /* 383 * Allocate a log write buffer (lwb) for the first log block. 384 */ 385 if (error == 0) { 386 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 387 lwb->lwb_zilog = zilog; 388 lwb->lwb_blk = blk; 389 lwb->lwb_nused = 0; 390 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 391 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 392 lwb->lwb_max_txg = txg; 393 lwb->lwb_zio = NULL; 394 395 mutex_enter(&zilog->zl_lock); 396 list_insert_tail(&zilog->zl_lwb_list, lwb); 397 mutex_exit(&zilog->zl_lock); 398 } 399 400 /* 401 * If we just allocated the first log block, commit our transaction 402 * and wait for zil_sync() to stuff the block poiner into zh_log. 403 * (zh is part of the MOS, so we cannot modify it in open context.) 404 */ 405 if (tx != NULL) { 406 dmu_tx_commit(tx); 407 txg_wait_synced(zilog->zl_dmu_pool, txg); 408 } 409 410 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 411} 412 413/* 414 * In one tx, free all log blocks and clear the log header. 415 * If keep_first is set, then we're replaying a log with no content. 416 * We want to keep the first block, however, so that the first 417 * synchronous transaction doesn't require a txg_wait_synced() 418 * in zil_create(). We don't need to txg_wait_synced() here either 419 * when keep_first is set, because both zil_create() and zil_destroy() 420 * will wait for any in-progress destroys to complete. 421 */ 422void 423zil_destroy(zilog_t *zilog, boolean_t keep_first) 424{ 425 const zil_header_t *zh = zilog->zl_header; 426 lwb_t *lwb; 427 dmu_tx_t *tx; 428 uint64_t txg; 429 430 /* 431 * Wait for any previous destroy to complete. 432 */ 433 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 434 435 if (BP_IS_HOLE(&zh->zh_log)) 436 return; 437 438 tx = dmu_tx_create(zilog->zl_os); 439 (void) dmu_tx_assign(tx, TXG_WAIT); 440 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 441 txg = dmu_tx_get_txg(tx); 442 443 mutex_enter(&zilog->zl_lock); 444 445 /* 446 * It is possible for the ZIL to get the previously mounted zilog 447 * structure of the same dataset if quickly remounted and the dbuf 448 * eviction has not completed. In this case we can see a non 449 * empty lwb list and keep_first will be set. We fix this by 450 * clearing the keep_first. This will be slower but it's very rare. 451 */ 452 if (!list_is_empty(&zilog->zl_lwb_list) && keep_first) 453 keep_first = B_FALSE; 454 455 ASSERT3U(zilog->zl_destroy_txg, <, txg); 456 zilog->zl_destroy_txg = txg; 457 zilog->zl_keep_first = keep_first; 458 459 if (!list_is_empty(&zilog->zl_lwb_list)) { 460 ASSERT(zh->zh_claim_txg == 0); 461 ASSERT(!keep_first); 462 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 463 list_remove(&zilog->zl_lwb_list, lwb); 464 if (lwb->lwb_buf != NULL) 465 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 466 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 467 kmem_cache_free(zil_lwb_cache, lwb); 468 } 469 } else { 470 if (!keep_first) { 471 (void) zil_parse(zilog, zil_free_log_block, 472 zil_free_log_record, tx, zh->zh_claim_txg); 473 } 474 } 475 mutex_exit(&zilog->zl_lock); 476 477 dmu_tx_commit(tx); 478} 479 480/* 481 * zil_rollback_destroy() is only called by the rollback code. 482 * We already have a syncing tx. Rollback has exclusive access to the 483 * dataset, so we don't have to worry about concurrent zil access. 484 * The actual freeing of any log blocks occurs in zil_sync() later in 485 * this txg syncing phase. 486 */ 487void 488zil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx) 489{ 490 const zil_header_t *zh = zilog->zl_header; 491 uint64_t txg; 492 493 if (BP_IS_HOLE(&zh->zh_log)) 494 return; 495 496 txg = dmu_tx_get_txg(tx); 497 ASSERT3U(zilog->zl_destroy_txg, <, txg); 498 zilog->zl_destroy_txg = txg; 499 zilog->zl_keep_first = B_FALSE; 500 501 /* 502 * Ensure there's no outstanding ZIL IO. No lwbs or just the 503 * unused one that allocated in advance is ok. 504 */ 505 ASSERT(zilog->zl_lwb_list.list_head.list_next == 506 zilog->zl_lwb_list.list_head.list_prev); 507 (void) zil_parse(zilog, zil_free_log_block, zil_free_log_record, 508 tx, zh->zh_claim_txg); 509} 510 511/* 512 * return true if the initial log block is not valid 513 */ 514static boolean_t 515zil_empty(zilog_t *zilog) 516{ 517 const zil_header_t *zh = zilog->zl_header; 518 arc_buf_t *abuf = NULL; 519 520 if (BP_IS_HOLE(&zh->zh_log)) 521 return (B_TRUE); 522 523 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 524 return (B_TRUE); 525 526 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 527 return (B_FALSE); 528} 529 530int 531zil_claim(char *osname, void *txarg) 532{ 533 dmu_tx_t *tx = txarg; 534 uint64_t first_txg = dmu_tx_get_txg(tx); 535 zilog_t *zilog; 536 zil_header_t *zh; 537 objset_t *os; 538 int error; 539 540 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); 541 if (error) { 542 cmn_err(CE_WARN, "can't open objset for %s", osname); 543 return (0); 544 } 545 546 zilog = dmu_objset_zil(os); 547 zh = zil_header_in_syncing_context(zilog); 548 549 /* 550 * Record here whether the zil has any records to replay. 551 * If the header block pointer is null or the block points 552 * to the stubby then we know there are no valid log records. 553 * We use the header to store this state as the the zilog gets 554 * freed later in dmu_objset_close(). 555 * The flags (and the rest of the header fields) are cleared in 556 * zil_sync() as a result of a zil_destroy(), after replaying the log. 557 * 558 * Note, the intent log can be empty but still need the 559 * stubby to be claimed. 560 */ 561 if (!zil_empty(zilog)) 562 zh->zh_flags |= ZIL_REPLAY_NEEDED; 563 564 /* 565 * Claim all log blocks if we haven't already done so, and remember 566 * the highest claimed sequence number. This ensures that if we can 567 * read only part of the log now (e.g. due to a missing device), 568 * but we can read the entire log later, we will not try to replay 569 * or destroy beyond the last block we successfully claimed. 570 */ 571 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 572 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 573 zh->zh_claim_txg = first_txg; 574 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 575 zil_claim_log_record, tx, first_txg); 576 dsl_dataset_dirty(dmu_objset_ds(os), tx); 577 } 578 579 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 580 dmu_objset_close(os); 581 return (0); 582} 583 584/* 585 * Check the log by walking the log chain. 586 * Checksum errors are ok as they indicate the end of the chain. 587 * Any other error (no device or read failure) returns an error. 588 */ 589/* ARGSUSED */ 590int 591zil_check_log_chain(char *osname, void *txarg) 592{ 593 zilog_t *zilog; 594 zil_header_t *zh; 595 blkptr_t blk; 596 arc_buf_t *abuf; 597 objset_t *os; 598 char *lrbuf; 599 zil_trailer_t *ztp; 600 int error; 601 602 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); 603 if (error) { 604 cmn_err(CE_WARN, "can't open objset for %s", osname); 605 return (0); 606 } 607 608 zilog = dmu_objset_zil(os); 609 zh = zil_header_in_syncing_context(zilog); 610 blk = zh->zh_log; 611 if (BP_IS_HOLE(&blk)) { 612 dmu_objset_close(os); 613 return (0); /* no chain */ 614 } 615 616 for (;;) { 617 error = zil_read_log_block(zilog, &blk, &abuf); 618 if (error) 619 break; 620 lrbuf = abuf->b_data; 621 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 622 blk = ztp->zit_next_blk; 623 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 624 } 625 dmu_objset_close(os); 626 if (error == ECKSUM) 627 return (0); /* normal end of chain */ 628 return (error); 629} 630 631/* 632 * Clear a log chain 633 */ 634/* ARGSUSED */ 635int 636zil_clear_log_chain(char *osname, void *txarg) 637{ 638 zilog_t *zilog; 639 zil_header_t *zh; 640 objset_t *os; 641 dmu_tx_t *tx; 642 int error; 643 644 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); 645 if (error) { 646 cmn_err(CE_WARN, "can't open objset for %s", osname); 647 return (0); 648 } 649 650 zilog = dmu_objset_zil(os); 651 tx = dmu_tx_create(zilog->zl_os); 652 (void) dmu_tx_assign(tx, TXG_WAIT); 653 zh = zil_header_in_syncing_context(zilog); 654 BP_ZERO(&zh->zh_log); 655 dsl_dataset_dirty(dmu_objset_ds(os), tx); 656 dmu_tx_commit(tx); 657 dmu_objset_close(os); 658 return (0); 659} 660 661static int 662zil_vdev_compare(const void *x1, const void *x2) 663{ 664 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 665 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 666 667 if (v1 < v2) 668 return (-1); 669 if (v1 > v2) 670 return (1); 671 672 return (0); 673} 674 675void 676zil_add_block(zilog_t *zilog, blkptr_t *bp) 677{ 678 avl_tree_t *t = &zilog->zl_vdev_tree; 679 avl_index_t where; 680 zil_vdev_node_t *zv, zvsearch; 681 int ndvas = BP_GET_NDVAS(bp); 682 int i; 683 684 if (zfs_nocacheflush) 685 return; 686 687 ASSERT(zilog->zl_writer); 688 689 /* 690 * Even though we're zl_writer, we still need a lock because the 691 * zl_get_data() callbacks may have dmu_sync() done callbacks 692 * that will run concurrently. 693 */ 694 mutex_enter(&zilog->zl_vdev_lock); 695 for (i = 0; i < ndvas; i++) { 696 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 697 if (avl_find(t, &zvsearch, &where) == NULL) { 698 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 699 zv->zv_vdev = zvsearch.zv_vdev; 700 avl_insert(t, zv, where); 701 } 702 } 703 mutex_exit(&zilog->zl_vdev_lock); 704} 705 706void 707zil_flush_vdevs(zilog_t *zilog) 708{ 709 spa_t *spa = zilog->zl_spa; 710 avl_tree_t *t = &zilog->zl_vdev_tree; 711 void *cookie = NULL; 712 zil_vdev_node_t *zv; 713 zio_t *zio; 714 715 ASSERT(zilog->zl_writer); 716 717 /* 718 * We don't need zl_vdev_lock here because we're the zl_writer, 719 * and all zl_get_data() callbacks are done. 720 */ 721 if (avl_numnodes(t) == 0) 722 return; 723 724 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 725 726 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 727 728 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 729 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 730 if (vd != NULL) 731 zio_flush(zio, vd); 732 kmem_free(zv, sizeof (*zv)); 733 } 734 735 /* 736 * Wait for all the flushes to complete. Not all devices actually 737 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 738 */ 739 (void) zio_wait(zio); 740 741 spa_config_exit(spa, SCL_STATE, FTAG); 742} 743 744/* 745 * Function called when a log block write completes 746 */ 747static void 748zil_lwb_write_done(zio_t *zio) 749{ 750 lwb_t *lwb = zio->io_private; 751 zilog_t *zilog = lwb->lwb_zilog; 752 753 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 754 ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG); 755 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 756 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 757 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 758 ASSERT(!BP_IS_GANG(zio->io_bp)); 759 ASSERT(!BP_IS_HOLE(zio->io_bp)); 760 ASSERT(zio->io_bp->blk_fill == 0); 761 762 /* 763 * Now that we've written this log block, we have a stable pointer 764 * to the next block in the chain, so it's OK to let the txg in 765 * which we allocated the next block sync. 766 */ 767 txg_rele_to_sync(&lwb->lwb_txgh); 768 769 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 770 mutex_enter(&zilog->zl_lock); 771 lwb->lwb_buf = NULL; 772 if (zio->io_error) 773 zilog->zl_log_error = B_TRUE; 774 mutex_exit(&zilog->zl_lock); 775} 776 777/* 778 * Initialize the io for a log block. 779 */ 780static void 781zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 782{ 783 zbookmark_t zb; 784 785 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 786 zb.zb_object = 0; 787 zb.zb_level = -1; 788 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 789 790 if (zilog->zl_root_zio == NULL) { 791 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 792 ZIO_FLAG_CANFAIL); 793 } 794 if (lwb->lwb_zio == NULL) { 795 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 796 0, &lwb->lwb_blk, lwb->lwb_buf, 797 lwb->lwb_sz, zil_lwb_write_done, lwb, 798 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_CANFAIL, &zb); 799 } 800} 801 802/* 803 * Start a log block write and advance to the next log block. 804 * Calls are serialized. 805 */ 806static lwb_t * 807zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 808{ 809 lwb_t *nlwb; 810 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 811 spa_t *spa = zilog->zl_spa; 812 blkptr_t *bp = &ztp->zit_next_blk; 813 uint64_t txg; 814 uint64_t zil_blksz; 815 int error; 816 817 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 818 819 /* 820 * Allocate the next block and save its address in this block 821 * before writing it in order to establish the log chain. 822 * Note that if the allocation of nlwb synced before we wrote 823 * the block that points at it (lwb), we'd leak it if we crashed. 824 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 825 */ 826 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 827 txg_rele_to_quiesce(&lwb->lwb_txgh); 828 829 /* 830 * Pick a ZIL blocksize. We request a size that is the 831 * maximum of the previous used size, the current used size and 832 * the amount waiting in the queue. 833 */ 834 zil_blksz = MAX(zilog->zl_prev_used, 835 zilog->zl_cur_used + sizeof (*ztp)); 836 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 837 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 838 if (zil_blksz > ZIL_MAX_BLKSZ) 839 zil_blksz = ZIL_MAX_BLKSZ; 840 841 BP_ZERO(bp); 842 /* pass the old blkptr in order to spread log blocks across devs */ 843 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg); 844 if (error) { 845 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); 846 847 /* 848 * We dirty the dataset to ensure that zil_sync() will 849 * be called to remove this lwb from our zl_lwb_list. 850 * Failing to do so, may leave an lwb with a NULL lwb_buf 851 * hanging around on the zl_lwb_list. 852 */ 853 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 854 dmu_tx_commit(tx); 855 856 /* 857 * Since we've just experienced an allocation failure so we 858 * terminate the current lwb and send it on its way. 859 */ 860 ztp->zit_pad = 0; 861 ztp->zit_nused = lwb->lwb_nused; 862 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 863 zio_nowait(lwb->lwb_zio); 864 865 /* 866 * By returning NULL the caller will call tx_wait_synced() 867 */ 868 return (NULL); 869 } 870 871 ASSERT3U(bp->blk_birth, ==, txg); 872 ztp->zit_pad = 0; 873 ztp->zit_nused = lwb->lwb_nused; 874 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 875 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 876 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 877 878 /* 879 * Allocate a new log write buffer (lwb). 880 */ 881 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 882 883 nlwb->lwb_zilog = zilog; 884 nlwb->lwb_blk = *bp; 885 nlwb->lwb_nused = 0; 886 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 887 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 888 nlwb->lwb_max_txg = txg; 889 nlwb->lwb_zio = NULL; 890 891 /* 892 * Put new lwb at the end of the log chain 893 */ 894 mutex_enter(&zilog->zl_lock); 895 list_insert_tail(&zilog->zl_lwb_list, nlwb); 896 mutex_exit(&zilog->zl_lock); 897 898 /* Record the block for later vdev flushing */ 899 zil_add_block(zilog, &lwb->lwb_blk); 900 901 /* 902 * kick off the write for the old log block 903 */ 904 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 905 ASSERT(lwb->lwb_zio); 906 zio_nowait(lwb->lwb_zio); 907 908 return (nlwb); 909} 910 911static lwb_t * 912zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 913{ 914 lr_t *lrc = &itx->itx_lr; /* common log record */ 915 lr_write_t *lr = (lr_write_t *)lrc; 916 uint64_t txg = lrc->lrc_txg; 917 uint64_t reclen = lrc->lrc_reclen; 918 uint64_t dlen; 919 920 if (lwb == NULL) 921 return (NULL); 922 ASSERT(lwb->lwb_buf != NULL); 923 924 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 925 dlen = P2ROUNDUP_TYPED( 926 lr->lr_length, sizeof (uint64_t), uint64_t); 927 else 928 dlen = 0; 929 930 zilog->zl_cur_used += (reclen + dlen); 931 932 zil_lwb_write_init(zilog, lwb); 933 934 /* 935 * If this record won't fit in the current log block, start a new one. 936 */ 937 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 938 lwb = zil_lwb_write_start(zilog, lwb); 939 if (lwb == NULL) 940 return (NULL); 941 zil_lwb_write_init(zilog, lwb); 942 ASSERT(lwb->lwb_nused == 0); 943 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 944 txg_wait_synced(zilog->zl_dmu_pool, txg); 945 return (lwb); 946 } 947 } 948 949 /* 950 * Update the lrc_seq, to be log record sequence number. See zil.h 951 * Then copy the record to the log buffer. 952 */ 953 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 954 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 955 956 /* 957 * If it's a write, fetch the data or get its blkptr as appropriate. 958 */ 959 if (lrc->lrc_txtype == TX_WRITE) { 960 if (txg > spa_freeze_txg(zilog->zl_spa)) 961 txg_wait_synced(zilog->zl_dmu_pool, txg); 962 if (itx->itx_wr_state != WR_COPIED) { 963 char *dbuf; 964 int error; 965 966 /* alignment is guaranteed */ 967 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 968 if (dlen) { 969 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 970 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 971 lr->lr_common.lrc_reclen += dlen; 972 } else { 973 ASSERT(itx->itx_wr_state == WR_INDIRECT); 974 dbuf = NULL; 975 } 976 error = zilog->zl_get_data( 977 itx->itx_private, lr, dbuf, lwb->lwb_zio); 978 if (error) { 979 ASSERT(error == ENOENT || error == EEXIST || 980 error == EALREADY); 981 return (lwb); 982 } 983 } 984 } 985 986 lwb->lwb_nused += reclen + dlen; 987 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 988 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 989 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 990 991 return (lwb); 992} 993 994itx_t * 995zil_itx_create(uint64_t txtype, size_t lrsize) 996{ 997 itx_t *itx; 998 999 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1000 1001 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1002 itx->itx_lr.lrc_txtype = txtype; 1003 itx->itx_lr.lrc_reclen = lrsize; 1004 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1005 itx->itx_lr.lrc_seq = 0; /* defensive */ 1006 1007 return (itx); 1008} 1009 1010uint64_t 1011zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1012{ 1013 uint64_t seq; 1014 1015 ASSERT(itx->itx_lr.lrc_seq == 0); 1016 1017 mutex_enter(&zilog->zl_lock); 1018 list_insert_tail(&zilog->zl_itx_list, itx); 1019 zilog->zl_itx_list_sz += itx->itx_sod; 1020 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1021 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 1022 mutex_exit(&zilog->zl_lock); 1023 1024 return (seq); 1025} 1026 1027/* 1028 * Free up all in-memory intent log transactions that have now been synced. 1029 */ 1030static void 1031zil_itx_clean(zilog_t *zilog) 1032{ 1033 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 1034 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 1035 list_t clean_list; 1036 itx_t *itx; 1037 1038 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1039 1040 mutex_enter(&zilog->zl_lock); 1041 /* wait for a log writer to finish walking list */ 1042 while (zilog->zl_writer) { 1043 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1044 } 1045 1046 /* 1047 * Move the sync'd log transactions to a separate list so we can call 1048 * kmem_free without holding the zl_lock. 1049 * 1050 * There is no need to set zl_writer as we don't drop zl_lock here 1051 */ 1052 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1053 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1054 list_remove(&zilog->zl_itx_list, itx); 1055 zilog->zl_itx_list_sz -= itx->itx_sod; 1056 list_insert_tail(&clean_list, itx); 1057 } 1058 cv_broadcast(&zilog->zl_cv_writer); 1059 mutex_exit(&zilog->zl_lock); 1060 1061 /* destroy sync'd log transactions */ 1062 while ((itx = list_head(&clean_list)) != NULL) { 1063 list_remove(&clean_list, itx); 1064 kmem_free(itx, offsetof(itx_t, itx_lr) 1065 + itx->itx_lr.lrc_reclen); 1066 } 1067 list_destroy(&clean_list); 1068} 1069 1070/* 1071 * If there are any in-memory intent log transactions which have now been 1072 * synced then start up a taskq to free them. 1073 */ 1074void 1075zil_clean(zilog_t *zilog) 1076{ 1077 itx_t *itx; 1078 1079 mutex_enter(&zilog->zl_lock); 1080 itx = list_head(&zilog->zl_itx_list); 1081 if ((itx != NULL) && 1082 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1083 (void) taskq_dispatch(zilog->zl_clean_taskq, 1084 (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP); 1085 } 1086 mutex_exit(&zilog->zl_lock); 1087} 1088 1089static void 1090zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1091{ 1092 uint64_t txg; 1093 uint64_t commit_seq = 0; 1094 itx_t *itx, *itx_next = (itx_t *)-1; 1095 lwb_t *lwb; 1096 spa_t *spa; 1097 1098 zilog->zl_writer = B_TRUE; 1099 ASSERT(zilog->zl_root_zio == NULL); 1100 spa = zilog->zl_spa; 1101 1102 if (zilog->zl_suspend) { 1103 lwb = NULL; 1104 } else { 1105 lwb = list_tail(&zilog->zl_lwb_list); 1106 if (lwb == NULL) { 1107 /* 1108 * Return if there's nothing to flush before we 1109 * dirty the fs by calling zil_create() 1110 */ 1111 if (list_is_empty(&zilog->zl_itx_list)) { 1112 zilog->zl_writer = B_FALSE; 1113 return; 1114 } 1115 mutex_exit(&zilog->zl_lock); 1116 zil_create(zilog); 1117 mutex_enter(&zilog->zl_lock); 1118 lwb = list_tail(&zilog->zl_lwb_list); 1119 } 1120 } 1121 1122 /* Loop through in-memory log transactions filling log blocks. */ 1123 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1124 for (;;) { 1125 /* 1126 * Find the next itx to push: 1127 * Push all transactions related to specified foid and all 1128 * other transactions except TX_WRITE, TX_TRUNCATE, 1129 * TX_SETATTR and TX_ACL for all other files. 1130 */ 1131 if (itx_next != (itx_t *)-1) 1132 itx = itx_next; 1133 else 1134 itx = list_head(&zilog->zl_itx_list); 1135 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 1136 if (foid == 0) /* push all foids? */ 1137 break; 1138 if (itx->itx_sync) /* push all O_[D]SYNC */ 1139 break; 1140 switch (itx->itx_lr.lrc_txtype) { 1141 case TX_SETATTR: 1142 case TX_WRITE: 1143 case TX_TRUNCATE: 1144 case TX_ACL: 1145 /* lr_foid is same offset for these records */ 1146 if (((lr_write_t *)&itx->itx_lr)->lr_foid 1147 != foid) { 1148 continue; /* skip this record */ 1149 } 1150 } 1151 break; 1152 } 1153 if (itx == NULL) 1154 break; 1155 1156 if ((itx->itx_lr.lrc_seq > seq) && 1157 ((lwb == NULL) || (lwb->lwb_nused == 0) || 1158 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) { 1159 break; 1160 } 1161 1162 /* 1163 * Save the next pointer. Even though we soon drop 1164 * zl_lock all threads that may change the list 1165 * (another writer or zil_itx_clean) can't do so until 1166 * they have zl_writer. 1167 */ 1168 itx_next = list_next(&zilog->zl_itx_list, itx); 1169 list_remove(&zilog->zl_itx_list, itx); 1170 zilog->zl_itx_list_sz -= itx->itx_sod; 1171 mutex_exit(&zilog->zl_lock); 1172 txg = itx->itx_lr.lrc_txg; 1173 ASSERT(txg); 1174 1175 if (txg > spa_last_synced_txg(spa) || 1176 txg > spa_freeze_txg(spa)) 1177 lwb = zil_lwb_commit(zilog, itx, lwb); 1178 kmem_free(itx, offsetof(itx_t, itx_lr) 1179 + itx->itx_lr.lrc_reclen); 1180 mutex_enter(&zilog->zl_lock); 1181 } 1182 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1183 /* determine commit sequence number */ 1184 itx = list_head(&zilog->zl_itx_list); 1185 if (itx) 1186 commit_seq = itx->itx_lr.lrc_seq; 1187 else 1188 commit_seq = zilog->zl_itx_seq; 1189 mutex_exit(&zilog->zl_lock); 1190 1191 /* write the last block out */ 1192 if (lwb != NULL && lwb->lwb_zio != NULL) 1193 lwb = zil_lwb_write_start(zilog, lwb); 1194 1195 zilog->zl_prev_used = zilog->zl_cur_used; 1196 zilog->zl_cur_used = 0; 1197 1198 /* 1199 * Wait if necessary for the log blocks to be on stable storage. 1200 */ 1201 if (zilog->zl_root_zio) { 1202 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 1203 (void) zio_wait(zilog->zl_root_zio); 1204 zilog->zl_root_zio = NULL; 1205 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 1206 zil_flush_vdevs(zilog); 1207 } 1208 1209 if (zilog->zl_log_error || lwb == NULL) { 1210 zilog->zl_log_error = 0; 1211 txg_wait_synced(zilog->zl_dmu_pool, 0); 1212 } 1213 1214 mutex_enter(&zilog->zl_lock); 1215 zilog->zl_writer = B_FALSE; 1216 1217 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 1218 zilog->zl_commit_seq = commit_seq; 1219} 1220 1221/* 1222 * Push zfs transactions to stable storage up to the supplied sequence number. 1223 * If foid is 0 push out all transactions, otherwise push only those 1224 * for that file or might have been used to create that file. 1225 */ 1226void 1227zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 1228{ 1229 if (zilog == NULL || seq == 0) 1230 return; 1231 1232 mutex_enter(&zilog->zl_lock); 1233 1234 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 1235 1236 while (zilog->zl_writer) { 1237 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1238 if (seq < zilog->zl_commit_seq) { 1239 mutex_exit(&zilog->zl_lock); 1240 return; 1241 } 1242 } 1243 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 1244 /* wake up others waiting on the commit */ 1245 cv_broadcast(&zilog->zl_cv_writer); 1246 mutex_exit(&zilog->zl_lock); 1247} 1248 1249/* 1250 * Called in syncing context to free committed log blocks and update log header. 1251 */ 1252void 1253zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1254{ 1255 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1256 uint64_t txg = dmu_tx_get_txg(tx); 1257 spa_t *spa = zilog->zl_spa; 1258 lwb_t *lwb; 1259 1260 mutex_enter(&zilog->zl_lock); 1261 1262 ASSERT(zilog->zl_stop_sync == 0); 1263 1264 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1265 1266 if (zilog->zl_destroy_txg == txg) { 1267 blkptr_t blk = zh->zh_log; 1268 1269 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1270 ASSERT(spa_sync_pass(spa) == 1); 1271 1272 bzero(zh, sizeof (zil_header_t)); 1273 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1274 1275 if (zilog->zl_keep_first) { 1276 /* 1277 * If this block was part of log chain that couldn't 1278 * be claimed because a device was missing during 1279 * zil_claim(), but that device later returns, 1280 * then this block could erroneously appear valid. 1281 * To guard against this, assign a new GUID to the new 1282 * log chain so it doesn't matter what blk points to. 1283 */ 1284 zil_init_log_chain(zilog, &blk); 1285 zh->zh_log = blk; 1286 } 1287 } 1288 1289 for (;;) { 1290 lwb = list_head(&zilog->zl_lwb_list); 1291 if (lwb == NULL) { 1292 mutex_exit(&zilog->zl_lock); 1293 return; 1294 } 1295 zh->zh_log = lwb->lwb_blk; 1296 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1297 break; 1298 list_remove(&zilog->zl_lwb_list, lwb); 1299 zio_free_blk(spa, &lwb->lwb_blk, txg); 1300 kmem_cache_free(zil_lwb_cache, lwb); 1301 1302 /* 1303 * If we don't have anything left in the lwb list then 1304 * we've had an allocation failure and we need to zero 1305 * out the zil_header blkptr so that we don't end 1306 * up freeing the same block twice. 1307 */ 1308 if (list_head(&zilog->zl_lwb_list) == NULL) 1309 BP_ZERO(&zh->zh_log); 1310 } 1311 mutex_exit(&zilog->zl_lock); 1312} 1313 1314void 1315zil_init(void) 1316{ 1317 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1318 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1319} 1320 1321void 1322zil_fini(void) 1323{ 1324 kmem_cache_destroy(zil_lwb_cache); 1325} 1326 1327zilog_t * 1328zil_alloc(objset_t *os, zil_header_t *zh_phys) 1329{ 1330 zilog_t *zilog; 1331 1332 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1333 1334 zilog->zl_header = zh_phys; 1335 zilog->zl_os = os; 1336 zilog->zl_spa = dmu_objset_spa(os); 1337 zilog->zl_dmu_pool = dmu_objset_pool(os); 1338 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1339 1340 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1341 1342 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1343 offsetof(itx_t, itx_node)); 1344 1345 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1346 offsetof(lwb_t, lwb_node)); 1347 1348 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1349 1350 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1351 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1352 1353 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1354 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1355 1356 return (zilog); 1357} 1358 1359void 1360zil_free(zilog_t *zilog) 1361{ 1362 lwb_t *lwb; 1363 1364 zilog->zl_stop_sync = 1; 1365 1366 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1367 list_remove(&zilog->zl_lwb_list, lwb); 1368 if (lwb->lwb_buf != NULL) 1369 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1370 kmem_cache_free(zil_lwb_cache, lwb); 1371 } 1372 list_destroy(&zilog->zl_lwb_list); 1373 1374 avl_destroy(&zilog->zl_vdev_tree); 1375 mutex_destroy(&zilog->zl_vdev_lock); 1376 1377 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1378 list_destroy(&zilog->zl_itx_list); 1379 mutex_destroy(&zilog->zl_lock); 1380 1381 cv_destroy(&zilog->zl_cv_writer); 1382 cv_destroy(&zilog->zl_cv_suspend); 1383 1384 kmem_free(zilog, sizeof (zilog_t)); 1385} 1386 1387/* 1388 * Open an intent log. 1389 */ 1390zilog_t * 1391zil_open(objset_t *os, zil_get_data_t *get_data) 1392{ 1393 zilog_t *zilog = dmu_objset_zil(os); 1394 1395 zilog->zl_get_data = get_data; 1396 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1397 2, 2, TASKQ_PREPOPULATE); 1398 1399 return (zilog); 1400} 1401 1402/* 1403 * Close an intent log. 1404 */ 1405void 1406zil_close(zilog_t *zilog) 1407{ 1408 /* 1409 * If the log isn't already committed, mark the objset dirty 1410 * (so zil_sync() will be called) and wait for that txg to sync. 1411 */ 1412 if (!zil_is_committed(zilog)) { 1413 uint64_t txg; 1414 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1415 (void) dmu_tx_assign(tx, TXG_WAIT); 1416 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1417 txg = dmu_tx_get_txg(tx); 1418 dmu_tx_commit(tx); 1419 txg_wait_synced(zilog->zl_dmu_pool, txg); 1420 } 1421 1422 taskq_destroy(zilog->zl_clean_taskq); 1423 zilog->zl_clean_taskq = NULL; 1424 zilog->zl_get_data = NULL; 1425 1426 zil_itx_clean(zilog); 1427 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1428} 1429 1430/* 1431 * Suspend an intent log. While in suspended mode, we still honor 1432 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1433 * We suspend the log briefly when taking a snapshot so that the snapshot 1434 * contains all the data it's supposed to, and has an empty intent log. 1435 */ 1436int 1437zil_suspend(zilog_t *zilog) 1438{ 1439 const zil_header_t *zh = zilog->zl_header; 1440 1441 mutex_enter(&zilog->zl_lock); 1442 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1443 mutex_exit(&zilog->zl_lock); 1444 return (EBUSY); 1445 } 1446 if (zilog->zl_suspend++ != 0) { 1447 /* 1448 * Someone else already began a suspend. 1449 * Just wait for them to finish. 1450 */ 1451 while (zilog->zl_suspending) 1452 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1453 mutex_exit(&zilog->zl_lock); 1454 return (0); 1455 } 1456 zilog->zl_suspending = B_TRUE; 1457 mutex_exit(&zilog->zl_lock); 1458 1459 zil_commit(zilog, UINT64_MAX, 0); 1460 1461 /* 1462 * Wait for any in-flight log writes to complete. 1463 */ 1464 mutex_enter(&zilog->zl_lock); 1465 while (zilog->zl_writer) 1466 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1467 mutex_exit(&zilog->zl_lock); 1468 1469 zil_destroy(zilog, B_FALSE); 1470 1471 mutex_enter(&zilog->zl_lock); 1472 zilog->zl_suspending = B_FALSE; 1473 cv_broadcast(&zilog->zl_cv_suspend); 1474 mutex_exit(&zilog->zl_lock); 1475 1476 return (0); 1477} 1478 1479void 1480zil_resume(zilog_t *zilog) 1481{ 1482 mutex_enter(&zilog->zl_lock); 1483 ASSERT(zilog->zl_suspend != 0); 1484 zilog->zl_suspend--; 1485 mutex_exit(&zilog->zl_lock); 1486} 1487 1488typedef struct zil_replay_arg { 1489 objset_t *zr_os; 1490 zil_replay_func_t **zr_replay; 1491 zil_replay_cleaner_t *zr_replay_cleaner; 1492 void *zr_arg; 1493 uint64_t *zr_txgp; 1494 boolean_t zr_byteswap; 1495 char *zr_lrbuf; 1496} zil_replay_arg_t; 1497 1498static void 1499zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1500{ 1501 zil_replay_arg_t *zr = zra; 1502 const zil_header_t *zh = zilog->zl_header; 1503 uint64_t reclen = lr->lrc_reclen; 1504 uint64_t txtype = lr->lrc_txtype; 1505 char *name; 1506 int pass, error, sunk; 1507 1508 if (zilog->zl_stop_replay) 1509 return; 1510 1511 if (lr->lrc_txg < claim_txg) /* already committed */ 1512 return; 1513 1514 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1515 return; 1516 1517 /* Strip case-insensitive bit, still present in log record */ 1518 txtype &= ~TX_CI; 1519 1520 /* 1521 * Make a copy of the data so we can revise and extend it. 1522 */ 1523 bcopy(lr, zr->zr_lrbuf, reclen); 1524 1525 /* 1526 * The log block containing this lr may have been byteswapped 1527 * so that we can easily examine common fields like lrc_txtype. 1528 * However, the log is a mix of different data types, and only the 1529 * replay vectors know how to byteswap their records. Therefore, if 1530 * the lr was byteswapped, undo it before invoking the replay vector. 1531 */ 1532 if (zr->zr_byteswap) 1533 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1534 1535 /* 1536 * If this is a TX_WRITE with a blkptr, suck in the data. 1537 */ 1538 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1539 lr_write_t *lrw = (lr_write_t *)lr; 1540 blkptr_t *wbp = &lrw->lr_blkptr; 1541 uint64_t wlen = lrw->lr_length; 1542 char *wbuf = zr->zr_lrbuf + reclen; 1543 1544 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1545 bzero(wbuf, wlen); 1546 } else { 1547 /* 1548 * A subsequent write may have overwritten this block, 1549 * in which case wbp may have been been freed and 1550 * reallocated, and our read of wbp may fail with a 1551 * checksum error. We can safely ignore this because 1552 * the later write will provide the correct data. 1553 */ 1554 zbookmark_t zb; 1555 1556 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1557 zb.zb_object = lrw->lr_foid; 1558 zb.zb_level = -1; 1559 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1560 1561 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1562 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1563 ZIO_PRIORITY_SYNC_READ, 1564 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1565 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1566 } 1567 } 1568 1569 /* 1570 * We must now do two things atomically: replay this log record, 1571 * and update the log header to reflect the fact that we did so. 1572 * We use the DMU's ability to assign into a specific txg to do this. 1573 */ 1574 for (pass = 1, sunk = B_FALSE; /* CONSTANTCONDITION */; pass++) { 1575 uint64_t replay_txg; 1576 dmu_tx_t *replay_tx; 1577 1578 replay_tx = dmu_tx_create(zr->zr_os); 1579 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1580 if (error) { 1581 dmu_tx_abort(replay_tx); 1582 break; 1583 } 1584 1585 replay_txg = dmu_tx_get_txg(replay_tx); 1586 1587 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1588 error = EINVAL; 1589 } else { 1590 /* 1591 * On the first pass, arrange for the replay vector 1592 * to fail its dmu_tx_assign(). That's the only way 1593 * to ensure that those code paths remain well tested. 1594 * 1595 * Only byteswap (if needed) on the 1st pass. 1596 */ 1597 *zr->zr_txgp = replay_txg - (pass == 1); 1598 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1599 zr->zr_byteswap && pass == 1); 1600 *zr->zr_txgp = TXG_NOWAIT; 1601 } 1602 1603 if (error == 0) { 1604 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1605 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1606 lr->lrc_seq; 1607 } 1608 1609 dmu_tx_commit(replay_tx); 1610 1611 if (!error) 1612 return; 1613 1614 /* 1615 * The DMU's dnode layer doesn't see removes until the txg 1616 * commits, so a subsequent claim can spuriously fail with 1617 * EEXIST. So if we receive any error other than ERESTART 1618 * we try syncing out any removes then retrying the 1619 * transaction. 1620 */ 1621 if (error != ERESTART && !sunk) { 1622 if (zr->zr_replay_cleaner) 1623 zr->zr_replay_cleaner(zr->zr_arg); 1624 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1625 sunk = B_TRUE; 1626 continue; /* retry */ 1627 } 1628 1629 if (error != ERESTART) 1630 break; 1631 1632 if (pass != 1) 1633 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1634 replay_txg + 1); 1635 1636 dprintf("pass %d, retrying\n", pass); 1637 } 1638 1639 ASSERT(error && error != ERESTART); 1640 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1641 dmu_objset_name(zr->zr_os, name); 1642 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1643 "dataset %s, seq 0x%llx, txtype %llu %s\n", 1644 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype, 1645 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 1646 zilog->zl_stop_replay = 1; 1647 kmem_free(name, MAXNAMELEN); 1648} 1649 1650/* ARGSUSED */ 1651static void 1652zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1653{ 1654 zilog->zl_replay_blks++; 1655} 1656 1657/* 1658 * If this dataset has a non-empty intent log, replay it and destroy it. 1659 */ 1660void 1661zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1662 zil_replay_func_t *replay_func[TX_MAX_TYPE], 1663 zil_replay_cleaner_t *replay_cleaner) 1664{ 1665 zilog_t *zilog = dmu_objset_zil(os); 1666 const zil_header_t *zh = zilog->zl_header; 1667 zil_replay_arg_t zr; 1668 1669 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 1670 zil_destroy(zilog, B_TRUE); 1671 return; 1672 } 1673 //printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name); 1674 1675 zr.zr_os = os; 1676 zr.zr_replay = replay_func; 1677 zr.zr_replay_cleaner = replay_cleaner; 1678 zr.zr_arg = arg; 1679 zr.zr_txgp = txgp; 1680 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1681 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1682 1683 /* 1684 * Wait for in-progress removes to sync before starting replay. 1685 */ 1686 txg_wait_synced(zilog->zl_dmu_pool, 0); 1687 1688 zilog->zl_stop_replay = 0; 1689 zilog->zl_replay_time = LBOLT; 1690 ASSERT(zilog->zl_replay_blks == 0); 1691 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 1692 zh->zh_claim_txg); 1693 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1694 1695 zil_destroy(zilog, B_FALSE); 1696 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1697 //printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name); 1698} 1699 1700/* 1701 * Report whether all transactions are committed 1702 */ 1703int 1704zil_is_committed(zilog_t *zilog) 1705{ 1706 lwb_t *lwb; 1707 int ret; 1708 1709 mutex_enter(&zilog->zl_lock); 1710 while (zilog->zl_writer) 1711 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1712 1713 /* recent unpushed intent log transactions? */ 1714 if (!list_is_empty(&zilog->zl_itx_list)) { 1715 ret = B_FALSE; 1716 goto out; 1717 } 1718 1719 /* intent log never used? */ 1720 lwb = list_head(&zilog->zl_lwb_list); 1721 if (lwb == NULL) { 1722 ret = B_TRUE; 1723 goto out; 1724 } 1725 1726 /* 1727 * more than 1 log buffer means zil_sync() hasn't yet freed 1728 * entries after a txg has committed 1729 */ 1730 if (list_next(&zilog->zl_lwb_list, lwb)) { 1731 ret = B_FALSE; 1732 goto out; 1733 } 1734 1735 ASSERT(zil_empty(zilog)); 1736 ret = B_TRUE; 1737out: 1738 cv_broadcast(&zilog->zl_cv_writer); 1739 mutex_exit(&zilog->zl_lock); 1740 return (ret); 1741} 1742