zil.c revision 191900
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/spa.h> 28#include <sys/dmu.h> 29#include <sys/zap.h> 30#include <sys/arc.h> 31#include <sys/stat.h> 32#include <sys/resource.h> 33#include <sys/zil.h> 34#include <sys/zil_impl.h> 35#include <sys/dsl_dataset.h> 36#include <sys/vdev.h> 37#include <sys/dmu_tx.h> 38 39/* 40 * The zfs intent log (ZIL) saves transaction records of system calls 41 * that change the file system in memory with enough information 42 * to be able to replay them. These are stored in memory until 43 * either the DMU transaction group (txg) commits them to the stable pool 44 * and they can be discarded, or they are flushed to the stable log 45 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 46 * requirement. In the event of a panic or power fail then those log 47 * records (transactions) are replayed. 48 * 49 * There is one ZIL per file system. Its on-disk (pool) format consists 50 * of 3 parts: 51 * 52 * - ZIL header 53 * - ZIL blocks 54 * - ZIL records 55 * 56 * A log record holds a system call transaction. Log blocks can 57 * hold many log records and the blocks are chained together. 58 * Each ZIL block contains a block pointer (blkptr_t) to the next 59 * ZIL block in the chain. The ZIL header points to the first 60 * block in the chain. Note there is not a fixed place in the pool 61 * to hold blocks. They are dynamically allocated and freed as 62 * needed from the blocks available. Figure X shows the ZIL structure: 63 */ 64 65/* 66 * This global ZIL switch affects all pools 67 */ 68int zil_disable = 0; /* disable intent logging */ 69SYSCTL_DECL(_vfs_zfs); 70TUNABLE_INT("vfs.zfs.zil_disable", &zil_disable); 71SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_disable, CTLFLAG_RW, &zil_disable, 0, 72 "Disable ZFS Intent Log (ZIL)"); 73 74/* 75 * Tunable parameter for debugging or performance analysis. Setting 76 * zfs_nocacheflush will cause corruption on power loss if a volatile 77 * out-of-order write cache is enabled. 78 */ 79boolean_t zfs_nocacheflush = B_FALSE; 80TUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush); 81SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN, 82 &zfs_nocacheflush, 0, "Disable cache flush"); 83 84static kmem_cache_t *zil_lwb_cache; 85 86static int 87zil_dva_compare(const void *x1, const void *x2) 88{ 89 const dva_t *dva1 = x1; 90 const dva_t *dva2 = x2; 91 92 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 93 return (-1); 94 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 95 return (1); 96 97 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 98 return (-1); 99 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 100 return (1); 101 102 return (0); 103} 104 105static void 106zil_dva_tree_init(avl_tree_t *t) 107{ 108 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 109 offsetof(zil_dva_node_t, zn_node)); 110} 111 112static void 113zil_dva_tree_fini(avl_tree_t *t) 114{ 115 zil_dva_node_t *zn; 116 void *cookie = NULL; 117 118 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 119 kmem_free(zn, sizeof (zil_dva_node_t)); 120 121 avl_destroy(t); 122} 123 124static int 125zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 126{ 127 zil_dva_node_t *zn; 128 avl_index_t where; 129 130 if (avl_find(t, dva, &where) != NULL) 131 return (EEXIST); 132 133 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 134 zn->zn_dva = *dva; 135 avl_insert(t, zn, where); 136 137 return (0); 138} 139 140static zil_header_t * 141zil_header_in_syncing_context(zilog_t *zilog) 142{ 143 return ((zil_header_t *)zilog->zl_header); 144} 145 146static void 147zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 148{ 149 zio_cksum_t *zc = &bp->blk_cksum; 150 151 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 152 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 153 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 154 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 155} 156 157/* 158 * Read a log block, make sure it's valid, and byteswap it if necessary. 159 */ 160static int 161zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 162{ 163 blkptr_t blk = *bp; 164 zbookmark_t zb; 165 uint32_t aflags = ARC_WAIT; 166 int error; 167 168 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 169 zb.zb_object = 0; 170 zb.zb_level = -1; 171 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 172 173 *abufpp = NULL; 174 175 /* 176 * We shouldn't be doing any scrubbing while we're doing log 177 * replay, it's OK to not lock. 178 */ 179 error = arc_read_nolock(NULL, zilog->zl_spa, &blk, 180 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 181 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 182 183 if (error == 0) { 184 char *data = (*abufpp)->b_data; 185 uint64_t blksz = BP_GET_LSIZE(bp); 186 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 187 zio_cksum_t cksum = bp->blk_cksum; 188 189 /* 190 * Validate the checksummed log block. 191 * 192 * Sequence numbers should be... sequential. The checksum 193 * verifier for the next block should be bp's checksum plus 1. 194 * 195 * Also check the log chain linkage and size used. 196 */ 197 cksum.zc_word[ZIL_ZC_SEQ]++; 198 199 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, 200 sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) || 201 (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) { 202 error = ECKSUM; 203 } 204 205 if (error) { 206 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 207 *abufpp = NULL; 208 } 209 } 210 211 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 212 213 return (error); 214} 215 216/* 217 * Parse the intent log, and call parse_func for each valid record within. 218 * Return the highest sequence number. 219 */ 220uint64_t 221zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 222 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 223{ 224 const zil_header_t *zh = zilog->zl_header; 225 uint64_t claim_seq = zh->zh_claim_seq; 226 uint64_t seq = 0; 227 uint64_t max_seq = 0; 228 blkptr_t blk = zh->zh_log; 229 arc_buf_t *abuf; 230 char *lrbuf, *lrp; 231 zil_trailer_t *ztp; 232 int reclen, error; 233 234 if (BP_IS_HOLE(&blk)) 235 return (max_seq); 236 237 /* 238 * Starting at the block pointed to by zh_log we read the log chain. 239 * For each block in the chain we strongly check that block to 240 * ensure its validity. We stop when an invalid block is found. 241 * For each block pointer in the chain we call parse_blk_func(). 242 * For each record in each valid block we call parse_lr_func(). 243 * If the log has been claimed, stop if we encounter a sequence 244 * number greater than the highest claimed sequence number. 245 */ 246 zil_dva_tree_init(&zilog->zl_dva_tree); 247 for (;;) { 248 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 249 250 if (claim_seq != 0 && seq > claim_seq) 251 break; 252 253 ASSERT(max_seq < seq); 254 max_seq = seq; 255 256 error = zil_read_log_block(zilog, &blk, &abuf); 257 258 if (parse_blk_func != NULL) 259 parse_blk_func(zilog, &blk, arg, txg); 260 261 if (error) 262 break; 263 264 lrbuf = abuf->b_data; 265 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 266 blk = ztp->zit_next_blk; 267 268 if (parse_lr_func == NULL) { 269 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 270 continue; 271 } 272 273 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 274 lr_t *lr = (lr_t *)lrp; 275 reclen = lr->lrc_reclen; 276 ASSERT3U(reclen, >=, sizeof (lr_t)); 277 parse_lr_func(zilog, lr, arg, txg); 278 } 279 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 280 } 281 zil_dva_tree_fini(&zilog->zl_dva_tree); 282 283 return (max_seq); 284} 285 286/* ARGSUSED */ 287static void 288zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 289{ 290 spa_t *spa = zilog->zl_spa; 291 int err; 292 293 /* 294 * Claim log block if not already committed and not already claimed. 295 */ 296 if (bp->blk_birth >= first_txg && 297 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 298 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL, 299 ZIO_FLAG_MUSTSUCCEED)); 300 ASSERT(err == 0); 301 } 302} 303 304static void 305zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 306{ 307 if (lrc->lrc_txtype == TX_WRITE) { 308 lr_write_t *lr = (lr_write_t *)lrc; 309 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 310 } 311} 312 313/* ARGSUSED */ 314static void 315zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 316{ 317 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 318} 319 320static void 321zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 322{ 323 /* 324 * If we previously claimed it, we need to free it. 325 */ 326 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 327 lr_write_t *lr = (lr_write_t *)lrc; 328 blkptr_t *bp = &lr->lr_blkptr; 329 if (bp->blk_birth >= claim_txg && 330 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 331 (void) arc_free(NULL, zilog->zl_spa, 332 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 333 } 334 } 335} 336 337/* 338 * Create an on-disk intent log. 339 */ 340static void 341zil_create(zilog_t *zilog) 342{ 343 const zil_header_t *zh = zilog->zl_header; 344 lwb_t *lwb; 345 uint64_t txg = 0; 346 dmu_tx_t *tx = NULL; 347 blkptr_t blk; 348 int error = 0; 349 350 /* 351 * Wait for any previous destroy to complete. 352 */ 353 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 354 355 ASSERT(zh->zh_claim_txg == 0); 356 ASSERT(zh->zh_replay_seq == 0); 357 358 blk = zh->zh_log; 359 360 /* 361 * If we don't already have an initial log block, allocate one now. 362 */ 363 if (BP_IS_HOLE(&blk)) { 364 tx = dmu_tx_create(zilog->zl_os); 365 (void) dmu_tx_assign(tx, TXG_WAIT); 366 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 367 txg = dmu_tx_get_txg(tx); 368 369 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, 370 NULL, txg); 371 372 if (error == 0) 373 zil_init_log_chain(zilog, &blk); 374 } 375 376 /* 377 * Allocate a log write buffer (lwb) for the first log block. 378 */ 379 if (error == 0) { 380 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 381 lwb->lwb_zilog = zilog; 382 lwb->lwb_blk = blk; 383 lwb->lwb_nused = 0; 384 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 385 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 386 lwb->lwb_max_txg = txg; 387 lwb->lwb_zio = NULL; 388 389 mutex_enter(&zilog->zl_lock); 390 list_insert_tail(&zilog->zl_lwb_list, lwb); 391 mutex_exit(&zilog->zl_lock); 392 } 393 394 /* 395 * If we just allocated the first log block, commit our transaction 396 * and wait for zil_sync() to stuff the block poiner into zh_log. 397 * (zh is part of the MOS, so we cannot modify it in open context.) 398 */ 399 if (tx != NULL) { 400 dmu_tx_commit(tx); 401 txg_wait_synced(zilog->zl_dmu_pool, txg); 402 } 403 404 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 405} 406 407/* 408 * In one tx, free all log blocks and clear the log header. 409 * If keep_first is set, then we're replaying a log with no content. 410 * We want to keep the first block, however, so that the first 411 * synchronous transaction doesn't require a txg_wait_synced() 412 * in zil_create(). We don't need to txg_wait_synced() here either 413 * when keep_first is set, because both zil_create() and zil_destroy() 414 * will wait for any in-progress destroys to complete. 415 */ 416void 417zil_destroy(zilog_t *zilog, boolean_t keep_first) 418{ 419 const zil_header_t *zh = zilog->zl_header; 420 lwb_t *lwb; 421 dmu_tx_t *tx; 422 uint64_t txg; 423 424 /* 425 * Wait for any previous destroy to complete. 426 */ 427 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 428 429 if (BP_IS_HOLE(&zh->zh_log)) 430 return; 431 432 tx = dmu_tx_create(zilog->zl_os); 433 (void) dmu_tx_assign(tx, TXG_WAIT); 434 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 435 txg = dmu_tx_get_txg(tx); 436 437 mutex_enter(&zilog->zl_lock); 438 439 /* 440 * It is possible for the ZIL to get the previously mounted zilog 441 * structure of the same dataset if quickly remounted and the dbuf 442 * eviction has not completed. In this case we can see a non 443 * empty lwb list and keep_first will be set. We fix this by 444 * clearing the keep_first. This will be slower but it's very rare. 445 */ 446 if (!list_is_empty(&zilog->zl_lwb_list) && keep_first) 447 keep_first = B_FALSE; 448 449 ASSERT3U(zilog->zl_destroy_txg, <, txg); 450 zilog->zl_destroy_txg = txg; 451 zilog->zl_keep_first = keep_first; 452 453 if (!list_is_empty(&zilog->zl_lwb_list)) { 454 ASSERT(zh->zh_claim_txg == 0); 455 ASSERT(!keep_first); 456 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 457 list_remove(&zilog->zl_lwb_list, lwb); 458 if (lwb->lwb_buf != NULL) 459 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 460 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 461 kmem_cache_free(zil_lwb_cache, lwb); 462 } 463 } else { 464 if (!keep_first) { 465 (void) zil_parse(zilog, zil_free_log_block, 466 zil_free_log_record, tx, zh->zh_claim_txg); 467 } 468 } 469 mutex_exit(&zilog->zl_lock); 470 471 dmu_tx_commit(tx); 472} 473 474/* 475 * zil_rollback_destroy() is only called by the rollback code. 476 * We already have a syncing tx. Rollback has exclusive access to the 477 * dataset, so we don't have to worry about concurrent zil access. 478 * The actual freeing of any log blocks occurs in zil_sync() later in 479 * this txg syncing phase. 480 */ 481void 482zil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx) 483{ 484 const zil_header_t *zh = zilog->zl_header; 485 uint64_t txg; 486 487 if (BP_IS_HOLE(&zh->zh_log)) 488 return; 489 490 txg = dmu_tx_get_txg(tx); 491 ASSERT3U(zilog->zl_destroy_txg, <, txg); 492 zilog->zl_destroy_txg = txg; 493 zilog->zl_keep_first = B_FALSE; 494 495 /* 496 * Ensure there's no outstanding ZIL IO. No lwbs or just the 497 * unused one that allocated in advance is ok. 498 */ 499 ASSERT(zilog->zl_lwb_list.list_head.list_next == 500 zilog->zl_lwb_list.list_head.list_prev); 501 (void) zil_parse(zilog, zil_free_log_block, zil_free_log_record, 502 tx, zh->zh_claim_txg); 503} 504 505int 506zil_claim(char *osname, void *txarg) 507{ 508 dmu_tx_t *tx = txarg; 509 uint64_t first_txg = dmu_tx_get_txg(tx); 510 zilog_t *zilog; 511 zil_header_t *zh; 512 objset_t *os; 513 int error; 514 515 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); 516 if (error) { 517 cmn_err(CE_WARN, "can't open objset for %s", osname); 518 return (0); 519 } 520 521 zilog = dmu_objset_zil(os); 522 zh = zil_header_in_syncing_context(zilog); 523 524 /* 525 * Claim all log blocks if we haven't already done so, and remember 526 * the highest claimed sequence number. This ensures that if we can 527 * read only part of the log now (e.g. due to a missing device), 528 * but we can read the entire log later, we will not try to replay 529 * or destroy beyond the last block we successfully claimed. 530 */ 531 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 532 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 533 zh->zh_claim_txg = first_txg; 534 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 535 zil_claim_log_record, tx, first_txg); 536 dsl_dataset_dirty(dmu_objset_ds(os), tx); 537 } 538 539 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 540 dmu_objset_close(os); 541 return (0); 542} 543 544/* 545 * Check the log by walking the log chain. 546 * Checksum errors are ok as they indicate the end of the chain. 547 * Any other error (no device or read failure) returns an error. 548 */ 549/* ARGSUSED */ 550int 551zil_check_log_chain(char *osname, void *txarg) 552{ 553 zilog_t *zilog; 554 zil_header_t *zh; 555 blkptr_t blk; 556 arc_buf_t *abuf; 557 objset_t *os; 558 char *lrbuf; 559 zil_trailer_t *ztp; 560 int error; 561 562 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); 563 if (error) { 564 cmn_err(CE_WARN, "can't open objset for %s", osname); 565 return (0); 566 } 567 568 zilog = dmu_objset_zil(os); 569 zh = zil_header_in_syncing_context(zilog); 570 blk = zh->zh_log; 571 if (BP_IS_HOLE(&blk)) { 572 dmu_objset_close(os); 573 return (0); /* no chain */ 574 } 575 576 for (;;) { 577 error = zil_read_log_block(zilog, &blk, &abuf); 578 if (error) 579 break; 580 lrbuf = abuf->b_data; 581 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 582 blk = ztp->zit_next_blk; 583 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 584 } 585 dmu_objset_close(os); 586 if (error == ECKSUM) 587 return (0); /* normal end of chain */ 588 return (error); 589} 590 591/* 592 * Clear a log chain 593 */ 594/* ARGSUSED */ 595int 596zil_clear_log_chain(char *osname, void *txarg) 597{ 598 zilog_t *zilog; 599 zil_header_t *zh; 600 objset_t *os; 601 dmu_tx_t *tx; 602 int error; 603 604 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); 605 if (error) { 606 cmn_err(CE_WARN, "can't open objset for %s", osname); 607 return (0); 608 } 609 610 zilog = dmu_objset_zil(os); 611 tx = dmu_tx_create(zilog->zl_os); 612 (void) dmu_tx_assign(tx, TXG_WAIT); 613 zh = zil_header_in_syncing_context(zilog); 614 BP_ZERO(&zh->zh_log); 615 dsl_dataset_dirty(dmu_objset_ds(os), tx); 616 dmu_tx_commit(tx); 617 dmu_objset_close(os); 618 return (0); 619} 620 621static int 622zil_vdev_compare(const void *x1, const void *x2) 623{ 624 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 625 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 626 627 if (v1 < v2) 628 return (-1); 629 if (v1 > v2) 630 return (1); 631 632 return (0); 633} 634 635void 636zil_add_block(zilog_t *zilog, blkptr_t *bp) 637{ 638 avl_tree_t *t = &zilog->zl_vdev_tree; 639 avl_index_t where; 640 zil_vdev_node_t *zv, zvsearch; 641 int ndvas = BP_GET_NDVAS(bp); 642 int i; 643 644 if (zfs_nocacheflush) 645 return; 646 647 ASSERT(zilog->zl_writer); 648 649 /* 650 * Even though we're zl_writer, we still need a lock because the 651 * zl_get_data() callbacks may have dmu_sync() done callbacks 652 * that will run concurrently. 653 */ 654 mutex_enter(&zilog->zl_vdev_lock); 655 for (i = 0; i < ndvas; i++) { 656 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 657 if (avl_find(t, &zvsearch, &where) == NULL) { 658 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 659 zv->zv_vdev = zvsearch.zv_vdev; 660 avl_insert(t, zv, where); 661 } 662 } 663 mutex_exit(&zilog->zl_vdev_lock); 664} 665 666void 667zil_flush_vdevs(zilog_t *zilog) 668{ 669 spa_t *spa = zilog->zl_spa; 670 avl_tree_t *t = &zilog->zl_vdev_tree; 671 void *cookie = NULL; 672 zil_vdev_node_t *zv; 673 zio_t *zio; 674 675 ASSERT(zilog->zl_writer); 676 677 /* 678 * We don't need zl_vdev_lock here because we're the zl_writer, 679 * and all zl_get_data() callbacks are done. 680 */ 681 if (avl_numnodes(t) == 0) 682 return; 683 684 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 685 686 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 687 688 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 689 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 690 if (vd != NULL) 691 zio_flush(zio, vd); 692 kmem_free(zv, sizeof (*zv)); 693 } 694 695 /* 696 * Wait for all the flushes to complete. Not all devices actually 697 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 698 */ 699 (void) zio_wait(zio); 700 701 spa_config_exit(spa, SCL_STATE, FTAG); 702} 703 704/* 705 * Function called when a log block write completes 706 */ 707static void 708zil_lwb_write_done(zio_t *zio) 709{ 710 lwb_t *lwb = zio->io_private; 711 zilog_t *zilog = lwb->lwb_zilog; 712 713 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 714 ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG); 715 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 716 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 717 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 718 ASSERT(!BP_IS_GANG(zio->io_bp)); 719 ASSERT(!BP_IS_HOLE(zio->io_bp)); 720 ASSERT(zio->io_bp->blk_fill == 0); 721 722 /* 723 * Now that we've written this log block, we have a stable pointer 724 * to the next block in the chain, so it's OK to let the txg in 725 * which we allocated the next block sync. 726 */ 727 txg_rele_to_sync(&lwb->lwb_txgh); 728 729 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 730 mutex_enter(&zilog->zl_lock); 731 lwb->lwb_buf = NULL; 732 if (zio->io_error) 733 zilog->zl_log_error = B_TRUE; 734 mutex_exit(&zilog->zl_lock); 735} 736 737/* 738 * Initialize the io for a log block. 739 */ 740static void 741zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 742{ 743 zbookmark_t zb; 744 745 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 746 zb.zb_object = 0; 747 zb.zb_level = -1; 748 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 749 750 if (zilog->zl_root_zio == NULL) { 751 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 752 ZIO_FLAG_CANFAIL); 753 } 754 if (lwb->lwb_zio == NULL) { 755 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 756 0, &lwb->lwb_blk, lwb->lwb_buf, 757 lwb->lwb_sz, zil_lwb_write_done, lwb, 758 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_CANFAIL, &zb); 759 } 760} 761 762/* 763 * Start a log block write and advance to the next log block. 764 * Calls are serialized. 765 */ 766static lwb_t * 767zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 768{ 769 lwb_t *nlwb; 770 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 771 spa_t *spa = zilog->zl_spa; 772 blkptr_t *bp = &ztp->zit_next_blk; 773 uint64_t txg; 774 uint64_t zil_blksz; 775 int error; 776 777 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 778 779 /* 780 * Allocate the next block and save its address in this block 781 * before writing it in order to establish the log chain. 782 * Note that if the allocation of nlwb synced before we wrote 783 * the block that points at it (lwb), we'd leak it if we crashed. 784 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 785 */ 786 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 787 txg_rele_to_quiesce(&lwb->lwb_txgh); 788 789 /* 790 * Pick a ZIL blocksize. We request a size that is the 791 * maximum of the previous used size, the current used size and 792 * the amount waiting in the queue. 793 */ 794 zil_blksz = MAX(zilog->zl_prev_used, 795 zilog->zl_cur_used + sizeof (*ztp)); 796 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 797 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 798 if (zil_blksz > ZIL_MAX_BLKSZ) 799 zil_blksz = ZIL_MAX_BLKSZ; 800 801 BP_ZERO(bp); 802 /* pass the old blkptr in order to spread log blocks across devs */ 803 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg); 804 if (error) { 805 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); 806 807 /* 808 * We dirty the dataset to ensure that zil_sync() will 809 * be called to remove this lwb from our zl_lwb_list. 810 * Failing to do so, may leave an lwb with a NULL lwb_buf 811 * hanging around on the zl_lwb_list. 812 */ 813 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 814 dmu_tx_commit(tx); 815 816 /* 817 * Since we've just experienced an allocation failure so we 818 * terminate the current lwb and send it on its way. 819 */ 820 ztp->zit_pad = 0; 821 ztp->zit_nused = lwb->lwb_nused; 822 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 823 zio_nowait(lwb->lwb_zio); 824 825 /* 826 * By returning NULL the caller will call tx_wait_synced() 827 */ 828 return (NULL); 829 } 830 831 ASSERT3U(bp->blk_birth, ==, txg); 832 ztp->zit_pad = 0; 833 ztp->zit_nused = lwb->lwb_nused; 834 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 835 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 836 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 837 838 /* 839 * Allocate a new log write buffer (lwb). 840 */ 841 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 842 843 nlwb->lwb_zilog = zilog; 844 nlwb->lwb_blk = *bp; 845 nlwb->lwb_nused = 0; 846 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 847 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 848 nlwb->lwb_max_txg = txg; 849 nlwb->lwb_zio = NULL; 850 851 /* 852 * Put new lwb at the end of the log chain 853 */ 854 mutex_enter(&zilog->zl_lock); 855 list_insert_tail(&zilog->zl_lwb_list, nlwb); 856 mutex_exit(&zilog->zl_lock); 857 858 /* Record the block for later vdev flushing */ 859 zil_add_block(zilog, &lwb->lwb_blk); 860 861 /* 862 * kick off the write for the old log block 863 */ 864 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 865 ASSERT(lwb->lwb_zio); 866 zio_nowait(lwb->lwb_zio); 867 868 return (nlwb); 869} 870 871static lwb_t * 872zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 873{ 874 lr_t *lrc = &itx->itx_lr; /* common log record */ 875 lr_write_t *lr = (lr_write_t *)lrc; 876 uint64_t txg = lrc->lrc_txg; 877 uint64_t reclen = lrc->lrc_reclen; 878 uint64_t dlen; 879 880 if (lwb == NULL) 881 return (NULL); 882 ASSERT(lwb->lwb_buf != NULL); 883 884 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 885 dlen = P2ROUNDUP_TYPED( 886 lr->lr_length, sizeof (uint64_t), uint64_t); 887 else 888 dlen = 0; 889 890 zilog->zl_cur_used += (reclen + dlen); 891 892 zil_lwb_write_init(zilog, lwb); 893 894 /* 895 * If this record won't fit in the current log block, start a new one. 896 */ 897 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 898 lwb = zil_lwb_write_start(zilog, lwb); 899 if (lwb == NULL) 900 return (NULL); 901 zil_lwb_write_init(zilog, lwb); 902 ASSERT(lwb->lwb_nused == 0); 903 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 904 txg_wait_synced(zilog->zl_dmu_pool, txg); 905 return (lwb); 906 } 907 } 908 909 /* 910 * Update the lrc_seq, to be log record sequence number. See zil.h 911 * Then copy the record to the log buffer. 912 */ 913 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 914 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 915 916 /* 917 * If it's a write, fetch the data or get its blkptr as appropriate. 918 */ 919 if (lrc->lrc_txtype == TX_WRITE) { 920 if (txg > spa_freeze_txg(zilog->zl_spa)) 921 txg_wait_synced(zilog->zl_dmu_pool, txg); 922 if (itx->itx_wr_state != WR_COPIED) { 923 char *dbuf; 924 int error; 925 926 /* alignment is guaranteed */ 927 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 928 if (dlen) { 929 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 930 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 931 lr->lr_common.lrc_reclen += dlen; 932 } else { 933 ASSERT(itx->itx_wr_state == WR_INDIRECT); 934 dbuf = NULL; 935 } 936 error = zilog->zl_get_data( 937 itx->itx_private, lr, dbuf, lwb->lwb_zio); 938 if (error) { 939 ASSERT(error == ENOENT || error == EEXIST || 940 error == EALREADY); 941 return (lwb); 942 } 943 } 944 } 945 946 lwb->lwb_nused += reclen + dlen; 947 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 948 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 949 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 950 951 return (lwb); 952} 953 954itx_t * 955zil_itx_create(uint64_t txtype, size_t lrsize) 956{ 957 itx_t *itx; 958 959 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 960 961 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 962 itx->itx_lr.lrc_txtype = txtype; 963 itx->itx_lr.lrc_reclen = lrsize; 964 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 965 itx->itx_lr.lrc_seq = 0; /* defensive */ 966 967 return (itx); 968} 969 970uint64_t 971zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 972{ 973 uint64_t seq; 974 975 ASSERT(itx->itx_lr.lrc_seq == 0); 976 977 mutex_enter(&zilog->zl_lock); 978 list_insert_tail(&zilog->zl_itx_list, itx); 979 zilog->zl_itx_list_sz += itx->itx_sod; 980 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 981 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 982 mutex_exit(&zilog->zl_lock); 983 984 return (seq); 985} 986 987/* 988 * Free up all in-memory intent log transactions that have now been synced. 989 */ 990static void 991zil_itx_clean(zilog_t *zilog) 992{ 993 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 994 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 995 list_t clean_list; 996 itx_t *itx; 997 998 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 999 1000 mutex_enter(&zilog->zl_lock); 1001 /* wait for a log writer to finish walking list */ 1002 while (zilog->zl_writer) { 1003 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1004 } 1005 1006 /* 1007 * Move the sync'd log transactions to a separate list so we can call 1008 * kmem_free without holding the zl_lock. 1009 * 1010 * There is no need to set zl_writer as we don't drop zl_lock here 1011 */ 1012 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 1013 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 1014 list_remove(&zilog->zl_itx_list, itx); 1015 zilog->zl_itx_list_sz -= itx->itx_sod; 1016 list_insert_tail(&clean_list, itx); 1017 } 1018 cv_broadcast(&zilog->zl_cv_writer); 1019 mutex_exit(&zilog->zl_lock); 1020 1021 /* destroy sync'd log transactions */ 1022 while ((itx = list_head(&clean_list)) != NULL) { 1023 list_remove(&clean_list, itx); 1024 kmem_free(itx, offsetof(itx_t, itx_lr) 1025 + itx->itx_lr.lrc_reclen); 1026 } 1027 list_destroy(&clean_list); 1028} 1029 1030/* 1031 * If there are any in-memory intent log transactions which have now been 1032 * synced then start up a taskq to free them. 1033 */ 1034void 1035zil_clean(zilog_t *zilog) 1036{ 1037 itx_t *itx; 1038 1039 mutex_enter(&zilog->zl_lock); 1040 itx = list_head(&zilog->zl_itx_list); 1041 if ((itx != NULL) && 1042 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 1043 (void) taskq_dispatch(zilog->zl_clean_taskq, 1044 (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP); 1045 } 1046 mutex_exit(&zilog->zl_lock); 1047} 1048 1049static void 1050zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 1051{ 1052 uint64_t txg; 1053 uint64_t commit_seq = 0; 1054 itx_t *itx, *itx_next = (itx_t *)-1; 1055 lwb_t *lwb; 1056 spa_t *spa; 1057 1058 zilog->zl_writer = B_TRUE; 1059 ASSERT(zilog->zl_root_zio == NULL); 1060 spa = zilog->zl_spa; 1061 1062 if (zilog->zl_suspend) { 1063 lwb = NULL; 1064 } else { 1065 lwb = list_tail(&zilog->zl_lwb_list); 1066 if (lwb == NULL) { 1067 /* 1068 * Return if there's nothing to flush before we 1069 * dirty the fs by calling zil_create() 1070 */ 1071 if (list_is_empty(&zilog->zl_itx_list)) { 1072 zilog->zl_writer = B_FALSE; 1073 return; 1074 } 1075 mutex_exit(&zilog->zl_lock); 1076 zil_create(zilog); 1077 mutex_enter(&zilog->zl_lock); 1078 lwb = list_tail(&zilog->zl_lwb_list); 1079 } 1080 } 1081 1082 /* Loop through in-memory log transactions filling log blocks. */ 1083 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1084 for (;;) { 1085 /* 1086 * Find the next itx to push: 1087 * Push all transactions related to specified foid and all 1088 * other transactions except TX_WRITE, TX_TRUNCATE, 1089 * TX_SETATTR and TX_ACL for all other files. 1090 */ 1091 if (itx_next != (itx_t *)-1) 1092 itx = itx_next; 1093 else 1094 itx = list_head(&zilog->zl_itx_list); 1095 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 1096 if (foid == 0) /* push all foids? */ 1097 break; 1098 if (itx->itx_sync) /* push all O_[D]SYNC */ 1099 break; 1100 switch (itx->itx_lr.lrc_txtype) { 1101 case TX_SETATTR: 1102 case TX_WRITE: 1103 case TX_TRUNCATE: 1104 case TX_ACL: 1105 /* lr_foid is same offset for these records */ 1106 if (((lr_write_t *)&itx->itx_lr)->lr_foid 1107 != foid) { 1108 continue; /* skip this record */ 1109 } 1110 } 1111 break; 1112 } 1113 if (itx == NULL) 1114 break; 1115 1116 if ((itx->itx_lr.lrc_seq > seq) && 1117 ((lwb == NULL) || (lwb->lwb_nused == 0) || 1118 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) { 1119 break; 1120 } 1121 1122 /* 1123 * Save the next pointer. Even though we soon drop 1124 * zl_lock all threads that may change the list 1125 * (another writer or zil_itx_clean) can't do so until 1126 * they have zl_writer. 1127 */ 1128 itx_next = list_next(&zilog->zl_itx_list, itx); 1129 list_remove(&zilog->zl_itx_list, itx); 1130 zilog->zl_itx_list_sz -= itx->itx_sod; 1131 mutex_exit(&zilog->zl_lock); 1132 txg = itx->itx_lr.lrc_txg; 1133 ASSERT(txg); 1134 1135 if (txg > spa_last_synced_txg(spa) || 1136 txg > spa_freeze_txg(spa)) 1137 lwb = zil_lwb_commit(zilog, itx, lwb); 1138 kmem_free(itx, offsetof(itx_t, itx_lr) 1139 + itx->itx_lr.lrc_reclen); 1140 mutex_enter(&zilog->zl_lock); 1141 } 1142 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1143 /* determine commit sequence number */ 1144 itx = list_head(&zilog->zl_itx_list); 1145 if (itx) 1146 commit_seq = itx->itx_lr.lrc_seq; 1147 else 1148 commit_seq = zilog->zl_itx_seq; 1149 mutex_exit(&zilog->zl_lock); 1150 1151 /* write the last block out */ 1152 if (lwb != NULL && lwb->lwb_zio != NULL) 1153 lwb = zil_lwb_write_start(zilog, lwb); 1154 1155 zilog->zl_prev_used = zilog->zl_cur_used; 1156 zilog->zl_cur_used = 0; 1157 1158 /* 1159 * Wait if necessary for the log blocks to be on stable storage. 1160 */ 1161 if (zilog->zl_root_zio) { 1162 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 1163 (void) zio_wait(zilog->zl_root_zio); 1164 zilog->zl_root_zio = NULL; 1165 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 1166 zil_flush_vdevs(zilog); 1167 } 1168 1169 if (zilog->zl_log_error || lwb == NULL) { 1170 zilog->zl_log_error = 0; 1171 txg_wait_synced(zilog->zl_dmu_pool, 0); 1172 } 1173 1174 mutex_enter(&zilog->zl_lock); 1175 zilog->zl_writer = B_FALSE; 1176 1177 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 1178 zilog->zl_commit_seq = commit_seq; 1179} 1180 1181/* 1182 * Push zfs transactions to stable storage up to the supplied sequence number. 1183 * If foid is 0 push out all transactions, otherwise push only those 1184 * for that file or might have been used to create that file. 1185 */ 1186void 1187zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 1188{ 1189 if (zilog == NULL || seq == 0) 1190 return; 1191 1192 mutex_enter(&zilog->zl_lock); 1193 1194 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 1195 1196 while (zilog->zl_writer) { 1197 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1198 if (seq < zilog->zl_commit_seq) { 1199 mutex_exit(&zilog->zl_lock); 1200 return; 1201 } 1202 } 1203 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 1204 /* wake up others waiting on the commit */ 1205 cv_broadcast(&zilog->zl_cv_writer); 1206 mutex_exit(&zilog->zl_lock); 1207} 1208 1209/* 1210 * Called in syncing context to free committed log blocks and update log header. 1211 */ 1212void 1213zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1214{ 1215 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1216 uint64_t txg = dmu_tx_get_txg(tx); 1217 spa_t *spa = zilog->zl_spa; 1218 lwb_t *lwb; 1219 1220 mutex_enter(&zilog->zl_lock); 1221 1222 ASSERT(zilog->zl_stop_sync == 0); 1223 1224 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1225 1226 if (zilog->zl_destroy_txg == txg) { 1227 blkptr_t blk = zh->zh_log; 1228 1229 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1230 ASSERT(spa_sync_pass(spa) == 1); 1231 1232 bzero(zh, sizeof (zil_header_t)); 1233 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1234 1235 if (zilog->zl_keep_first) { 1236 /* 1237 * If this block was part of log chain that couldn't 1238 * be claimed because a device was missing during 1239 * zil_claim(), but that device later returns, 1240 * then this block could erroneously appear valid. 1241 * To guard against this, assign a new GUID to the new 1242 * log chain so it doesn't matter what blk points to. 1243 */ 1244 zil_init_log_chain(zilog, &blk); 1245 zh->zh_log = blk; 1246 } 1247 } 1248 1249 for (;;) { 1250 lwb = list_head(&zilog->zl_lwb_list); 1251 if (lwb == NULL) { 1252 mutex_exit(&zilog->zl_lock); 1253 return; 1254 } 1255 zh->zh_log = lwb->lwb_blk; 1256 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1257 break; 1258 list_remove(&zilog->zl_lwb_list, lwb); 1259 zio_free_blk(spa, &lwb->lwb_blk, txg); 1260 kmem_cache_free(zil_lwb_cache, lwb); 1261 1262 /* 1263 * If we don't have anything left in the lwb list then 1264 * we've had an allocation failure and we need to zero 1265 * out the zil_header blkptr so that we don't end 1266 * up freeing the same block twice. 1267 */ 1268 if (list_head(&zilog->zl_lwb_list) == NULL) 1269 BP_ZERO(&zh->zh_log); 1270 } 1271 mutex_exit(&zilog->zl_lock); 1272} 1273 1274void 1275zil_init(void) 1276{ 1277 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1278 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1279} 1280 1281void 1282zil_fini(void) 1283{ 1284 kmem_cache_destroy(zil_lwb_cache); 1285} 1286 1287zilog_t * 1288zil_alloc(objset_t *os, zil_header_t *zh_phys) 1289{ 1290 zilog_t *zilog; 1291 1292 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1293 1294 zilog->zl_header = zh_phys; 1295 zilog->zl_os = os; 1296 zilog->zl_spa = dmu_objset_spa(os); 1297 zilog->zl_dmu_pool = dmu_objset_pool(os); 1298 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1299 1300 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1301 1302 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1303 offsetof(itx_t, itx_node)); 1304 1305 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1306 offsetof(lwb_t, lwb_node)); 1307 1308 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1309 1310 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1311 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1312 1313 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1314 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1315 1316 return (zilog); 1317} 1318 1319void 1320zil_free(zilog_t *zilog) 1321{ 1322 lwb_t *lwb; 1323 1324 zilog->zl_stop_sync = 1; 1325 1326 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1327 list_remove(&zilog->zl_lwb_list, lwb); 1328 if (lwb->lwb_buf != NULL) 1329 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1330 kmem_cache_free(zil_lwb_cache, lwb); 1331 } 1332 list_destroy(&zilog->zl_lwb_list); 1333 1334 avl_destroy(&zilog->zl_vdev_tree); 1335 mutex_destroy(&zilog->zl_vdev_lock); 1336 1337 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1338 list_destroy(&zilog->zl_itx_list); 1339 mutex_destroy(&zilog->zl_lock); 1340 1341 cv_destroy(&zilog->zl_cv_writer); 1342 cv_destroy(&zilog->zl_cv_suspend); 1343 1344 kmem_free(zilog, sizeof (zilog_t)); 1345} 1346 1347/* 1348 * return true if the initial log block is not valid 1349 */ 1350static boolean_t 1351zil_empty(zilog_t *zilog) 1352{ 1353 const zil_header_t *zh = zilog->zl_header; 1354 arc_buf_t *abuf = NULL; 1355 1356 if (BP_IS_HOLE(&zh->zh_log)) 1357 return (B_TRUE); 1358 1359 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 1360 return (B_TRUE); 1361 1362 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1363 return (B_FALSE); 1364} 1365 1366/* 1367 * Open an intent log. 1368 */ 1369zilog_t * 1370zil_open(objset_t *os, zil_get_data_t *get_data) 1371{ 1372 zilog_t *zilog = dmu_objset_zil(os); 1373 1374 zilog->zl_get_data = get_data; 1375 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1376 2, 2, TASKQ_PREPOPULATE); 1377 1378 return (zilog); 1379} 1380 1381/* 1382 * Close an intent log. 1383 */ 1384void 1385zil_close(zilog_t *zilog) 1386{ 1387 /* 1388 * If the log isn't already committed, mark the objset dirty 1389 * (so zil_sync() will be called) and wait for that txg to sync. 1390 */ 1391 if (!zil_is_committed(zilog)) { 1392 uint64_t txg; 1393 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1394 (void) dmu_tx_assign(tx, TXG_WAIT); 1395 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1396 txg = dmu_tx_get_txg(tx); 1397 dmu_tx_commit(tx); 1398 txg_wait_synced(zilog->zl_dmu_pool, txg); 1399 } 1400 1401 taskq_destroy(zilog->zl_clean_taskq); 1402 zilog->zl_clean_taskq = NULL; 1403 zilog->zl_get_data = NULL; 1404 1405 zil_itx_clean(zilog); 1406 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1407} 1408 1409/* 1410 * Suspend an intent log. While in suspended mode, we still honor 1411 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1412 * We suspend the log briefly when taking a snapshot so that the snapshot 1413 * contains all the data it's supposed to, and has an empty intent log. 1414 */ 1415int 1416zil_suspend(zilog_t *zilog) 1417{ 1418 const zil_header_t *zh = zilog->zl_header; 1419 1420 mutex_enter(&zilog->zl_lock); 1421 if (zh->zh_claim_txg != 0) { /* unplayed log */ 1422 mutex_exit(&zilog->zl_lock); 1423 return (EBUSY); 1424 } 1425 if (zilog->zl_suspend++ != 0) { 1426 /* 1427 * Someone else already began a suspend. 1428 * Just wait for them to finish. 1429 */ 1430 while (zilog->zl_suspending) 1431 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1432 mutex_exit(&zilog->zl_lock); 1433 return (0); 1434 } 1435 zilog->zl_suspending = B_TRUE; 1436 mutex_exit(&zilog->zl_lock); 1437 1438 zil_commit(zilog, UINT64_MAX, 0); 1439 1440 /* 1441 * Wait for any in-flight log writes to complete. 1442 */ 1443 mutex_enter(&zilog->zl_lock); 1444 while (zilog->zl_writer) 1445 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1446 mutex_exit(&zilog->zl_lock); 1447 1448 zil_destroy(zilog, B_FALSE); 1449 1450 mutex_enter(&zilog->zl_lock); 1451 zilog->zl_suspending = B_FALSE; 1452 cv_broadcast(&zilog->zl_cv_suspend); 1453 mutex_exit(&zilog->zl_lock); 1454 1455 return (0); 1456} 1457 1458void 1459zil_resume(zilog_t *zilog) 1460{ 1461 mutex_enter(&zilog->zl_lock); 1462 ASSERT(zilog->zl_suspend != 0); 1463 zilog->zl_suspend--; 1464 mutex_exit(&zilog->zl_lock); 1465} 1466 1467typedef struct zil_replay_arg { 1468 objset_t *zr_os; 1469 zil_replay_func_t **zr_replay; 1470 zil_replay_cleaner_t *zr_replay_cleaner; 1471 void *zr_arg; 1472 uint64_t *zr_txgp; 1473 boolean_t zr_byteswap; 1474 char *zr_lrbuf; 1475} zil_replay_arg_t; 1476 1477static void 1478zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1479{ 1480 zil_replay_arg_t *zr = zra; 1481 const zil_header_t *zh = zilog->zl_header; 1482 uint64_t reclen = lr->lrc_reclen; 1483 uint64_t txtype = lr->lrc_txtype; 1484 char *name; 1485 int pass, error, sunk; 1486 1487 if (zilog->zl_stop_replay) 1488 return; 1489 1490 if (lr->lrc_txg < claim_txg) /* already committed */ 1491 return; 1492 1493 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1494 return; 1495 1496 /* Strip case-insensitive bit, still present in log record */ 1497 txtype &= ~TX_CI; 1498 1499 /* 1500 * Make a copy of the data so we can revise and extend it. 1501 */ 1502 bcopy(lr, zr->zr_lrbuf, reclen); 1503 1504 /* 1505 * The log block containing this lr may have been byteswapped 1506 * so that we can easily examine common fields like lrc_txtype. 1507 * However, the log is a mix of different data types, and only the 1508 * replay vectors know how to byteswap their records. Therefore, if 1509 * the lr was byteswapped, undo it before invoking the replay vector. 1510 */ 1511 if (zr->zr_byteswap) 1512 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1513 1514 /* 1515 * If this is a TX_WRITE with a blkptr, suck in the data. 1516 */ 1517 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1518 lr_write_t *lrw = (lr_write_t *)lr; 1519 blkptr_t *wbp = &lrw->lr_blkptr; 1520 uint64_t wlen = lrw->lr_length; 1521 char *wbuf = zr->zr_lrbuf + reclen; 1522 1523 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1524 bzero(wbuf, wlen); 1525 } else { 1526 /* 1527 * A subsequent write may have overwritten this block, 1528 * in which case wbp may have been been freed and 1529 * reallocated, and our read of wbp may fail with a 1530 * checksum error. We can safely ignore this because 1531 * the later write will provide the correct data. 1532 */ 1533 zbookmark_t zb; 1534 1535 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1536 zb.zb_object = lrw->lr_foid; 1537 zb.zb_level = -1; 1538 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1539 1540 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1541 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1542 ZIO_PRIORITY_SYNC_READ, 1543 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1544 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1545 } 1546 } 1547 1548 /* 1549 * We must now do two things atomically: replay this log record, 1550 * and update the log header to reflect the fact that we did so. 1551 * We use the DMU's ability to assign into a specific txg to do this. 1552 */ 1553 for (pass = 1, sunk = B_FALSE; /* CONSTANTCONDITION */; pass++) { 1554 uint64_t replay_txg; 1555 dmu_tx_t *replay_tx; 1556 1557 replay_tx = dmu_tx_create(zr->zr_os); 1558 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1559 if (error) { 1560 dmu_tx_abort(replay_tx); 1561 break; 1562 } 1563 1564 replay_txg = dmu_tx_get_txg(replay_tx); 1565 1566 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1567 error = EINVAL; 1568 } else { 1569 /* 1570 * On the first pass, arrange for the replay vector 1571 * to fail its dmu_tx_assign(). That's the only way 1572 * to ensure that those code paths remain well tested. 1573 * 1574 * Only byteswap (if needed) on the 1st pass. 1575 */ 1576 *zr->zr_txgp = replay_txg - (pass == 1); 1577 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1578 zr->zr_byteswap && pass == 1); 1579 *zr->zr_txgp = TXG_NOWAIT; 1580 } 1581 1582 if (error == 0) { 1583 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1584 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1585 lr->lrc_seq; 1586 } 1587 1588 dmu_tx_commit(replay_tx); 1589 1590 if (!error) 1591 return; 1592 1593 /* 1594 * The DMU's dnode layer doesn't see removes until the txg 1595 * commits, so a subsequent claim can spuriously fail with 1596 * EEXIST. So if we receive any error other than ERESTART 1597 * we try syncing out any removes then retrying the 1598 * transaction. 1599 */ 1600 if (error != ERESTART && !sunk) { 1601 if (zr->zr_replay_cleaner) 1602 zr->zr_replay_cleaner(zr->zr_arg); 1603 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1604 sunk = B_TRUE; 1605 continue; /* retry */ 1606 } 1607 1608 if (error != ERESTART) 1609 break; 1610 1611 if (pass != 1) 1612 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1613 replay_txg + 1); 1614 1615 dprintf("pass %d, retrying\n", pass); 1616 } 1617 1618 ASSERT(error && error != ERESTART); 1619 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1620 dmu_objset_name(zr->zr_os, name); 1621 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1622 "dataset %s, seq 0x%llx, txtype %llu %s\n", 1623 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype, 1624 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 1625 zilog->zl_stop_replay = 1; 1626 kmem_free(name, MAXNAMELEN); 1627} 1628 1629/* ARGSUSED */ 1630static void 1631zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1632{ 1633 zilog->zl_replay_blks++; 1634} 1635 1636/* 1637 * If this dataset has a non-empty intent log, replay it and destroy it. 1638 */ 1639void 1640zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1641 zil_replay_func_t *replay_func[TX_MAX_TYPE], 1642 zil_replay_cleaner_t *replay_cleaner) 1643{ 1644 zilog_t *zilog = dmu_objset_zil(os); 1645 const zil_header_t *zh = zilog->zl_header; 1646 zil_replay_arg_t zr; 1647 1648 if (zil_empty(zilog)) { 1649 zil_destroy(zilog, B_TRUE); 1650 return; 1651 } 1652 //printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name); 1653 1654 zr.zr_os = os; 1655 zr.zr_replay = replay_func; 1656 zr.zr_replay_cleaner = replay_cleaner; 1657 zr.zr_arg = arg; 1658 zr.zr_txgp = txgp; 1659 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1660 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1661 1662 /* 1663 * Wait for in-progress removes to sync before starting replay. 1664 */ 1665 txg_wait_synced(zilog->zl_dmu_pool, 0); 1666 1667 zilog->zl_stop_replay = 0; 1668 zilog->zl_replay_time = LBOLT; 1669 ASSERT(zilog->zl_replay_blks == 0); 1670 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 1671 zh->zh_claim_txg); 1672 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1673 1674 zil_destroy(zilog, B_FALSE); 1675 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1676 //printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name); 1677} 1678 1679/* 1680 * Report whether all transactions are committed 1681 */ 1682int 1683zil_is_committed(zilog_t *zilog) 1684{ 1685 lwb_t *lwb; 1686 int ret; 1687 1688 mutex_enter(&zilog->zl_lock); 1689 while (zilog->zl_writer) 1690 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1691 1692 /* recent unpushed intent log transactions? */ 1693 if (!list_is_empty(&zilog->zl_itx_list)) { 1694 ret = B_FALSE; 1695 goto out; 1696 } 1697 1698 /* intent log never used? */ 1699 lwb = list_head(&zilog->zl_lwb_list); 1700 if (lwb == NULL) { 1701 ret = B_TRUE; 1702 goto out; 1703 } 1704 1705 /* 1706 * more than 1 log buffer means zil_sync() hasn't yet freed 1707 * entries after a txg has committed 1708 */ 1709 if (list_next(&zilog->zl_lwb_list, lwb)) { 1710 ret = B_FALSE; 1711 goto out; 1712 } 1713 1714 ASSERT(zil_empty(zilog)); 1715 ret = B_TRUE; 1716out: 1717 cv_broadcast(&zilog->zl_cv_writer); 1718 mutex_exit(&zilog->zl_lock); 1719 return (ret); 1720} 1721