zil.c revision 315387
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27/* Portions Copyright 2010 Robert Milkowski */ 28 29#include <sys/zfs_context.h> 30#include <sys/spa.h> 31#include <sys/dmu.h> 32#include <sys/zap.h> 33#include <sys/arc.h> 34#include <sys/stat.h> 35#include <sys/resource.h> 36#include <sys/zil.h> 37#include <sys/zil_impl.h> 38#include <sys/dsl_dataset.h> 39#include <sys/vdev_impl.h> 40#include <sys/dmu_tx.h> 41#include <sys/dsl_pool.h> 42 43/* 44 * The zfs intent log (ZIL) saves transaction records of system calls 45 * that change the file system in memory with enough information 46 * to be able to replay them. These are stored in memory until 47 * either the DMU transaction group (txg) commits them to the stable pool 48 * and they can be discarded, or they are flushed to the stable log 49 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 50 * requirement. In the event of a panic or power fail then those log 51 * records (transactions) are replayed. 52 * 53 * There is one ZIL per file system. Its on-disk (pool) format consists 54 * of 3 parts: 55 * 56 * - ZIL header 57 * - ZIL blocks 58 * - ZIL records 59 * 60 * A log record holds a system call transaction. Log blocks can 61 * hold many log records and the blocks are chained together. 62 * Each ZIL block contains a block pointer (blkptr_t) to the next 63 * ZIL block in the chain. The ZIL header points to the first 64 * block in the chain. Note there is not a fixed place in the pool 65 * to hold blocks. They are dynamically allocated and freed as 66 * needed from the blocks available. Figure X shows the ZIL structure: 67 */ 68 69/* 70 * Disable intent logging replay. This global ZIL switch affects all pools. 71 */ 72int zil_replay_disable = 0; 73SYSCTL_DECL(_vfs_zfs); 74SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN, 75 &zil_replay_disable, 0, "Disable intent logging replay"); 76 77/* 78 * Tunable parameter for debugging or performance analysis. Setting 79 * zfs_nocacheflush will cause corruption on power loss if a volatile 80 * out-of-order write cache is enabled. 81 */ 82boolean_t zfs_nocacheflush = B_FALSE; 83SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN, 84 &zfs_nocacheflush, 0, "Disable cache flush"); 85boolean_t zfs_trim_enabled = B_TRUE; 86SYSCTL_DECL(_vfs_zfs_trim); 87SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0, 88 "Enable ZFS TRIM"); 89 90static kmem_cache_t *zil_lwb_cache; 91 92#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 93 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 94 95 96/* 97 * ziltest is by and large an ugly hack, but very useful in 98 * checking replay without tedious work. 99 * When running ziltest we want to keep all itx's and so maintain 100 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG 101 * We subtract TXG_CONCURRENT_STATES to allow for common code. 102 */ 103#define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES) 104 105static int 106zil_bp_compare(const void *x1, const void *x2) 107{ 108 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 109 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 110 111 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 112 return (-1); 113 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 114 return (1); 115 116 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 117 return (-1); 118 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 119 return (1); 120 121 return (0); 122} 123 124static void 125zil_bp_tree_init(zilog_t *zilog) 126{ 127 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 128 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 129} 130 131static void 132zil_bp_tree_fini(zilog_t *zilog) 133{ 134 avl_tree_t *t = &zilog->zl_bp_tree; 135 zil_bp_node_t *zn; 136 void *cookie = NULL; 137 138 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 139 kmem_free(zn, sizeof (zil_bp_node_t)); 140 141 avl_destroy(t); 142} 143 144int 145zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 146{ 147 avl_tree_t *t = &zilog->zl_bp_tree; 148 const dva_t *dva; 149 zil_bp_node_t *zn; 150 avl_index_t where; 151 152 if (BP_IS_EMBEDDED(bp)) 153 return (0); 154 155 dva = BP_IDENTITY(bp); 156 157 if (avl_find(t, dva, &where) != NULL) 158 return (SET_ERROR(EEXIST)); 159 160 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 161 zn->zn_dva = *dva; 162 avl_insert(t, zn, where); 163 164 return (0); 165} 166 167static zil_header_t * 168zil_header_in_syncing_context(zilog_t *zilog) 169{ 170 return ((zil_header_t *)zilog->zl_header); 171} 172 173static void 174zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 175{ 176 zio_cksum_t *zc = &bp->blk_cksum; 177 178 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 179 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 180 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 181 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 182} 183 184/* 185 * Read a log block and make sure it's valid. 186 */ 187static int 188zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 189 char **end) 190{ 191 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 192 arc_flags_t aflags = ARC_FLAG_WAIT; 193 arc_buf_t *abuf = NULL; 194 zbookmark_phys_t zb; 195 int error; 196 197 if (zilog->zl_header->zh_claim_txg == 0) 198 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 199 200 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 201 zio_flags |= ZIO_FLAG_SPECULATIVE; 202 203 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 204 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 205 206 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 207 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 208 209 if (error == 0) { 210 zio_cksum_t cksum = bp->blk_cksum; 211 212 /* 213 * Validate the checksummed log block. 214 * 215 * Sequence numbers should be... sequential. The checksum 216 * verifier for the next block should be bp's checksum plus 1. 217 * 218 * Also check the log chain linkage and size used. 219 */ 220 cksum.zc_word[ZIL_ZC_SEQ]++; 221 222 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 223 zil_chain_t *zilc = abuf->b_data; 224 char *lr = (char *)(zilc + 1); 225 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 226 227 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 228 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 229 error = SET_ERROR(ECKSUM); 230 } else { 231 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 232 bcopy(lr, dst, len); 233 *end = (char *)dst + len; 234 *nbp = zilc->zc_next_blk; 235 } 236 } else { 237 char *lr = abuf->b_data; 238 uint64_t size = BP_GET_LSIZE(bp); 239 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 240 241 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 242 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 243 (zilc->zc_nused > (size - sizeof (*zilc)))) { 244 error = SET_ERROR(ECKSUM); 245 } else { 246 ASSERT3U(zilc->zc_nused, <=, 247 SPA_OLD_MAXBLOCKSIZE); 248 bcopy(lr, dst, zilc->zc_nused); 249 *end = (char *)dst + zilc->zc_nused; 250 *nbp = zilc->zc_next_blk; 251 } 252 } 253 254 arc_buf_destroy(abuf, &abuf); 255 } 256 257 return (error); 258} 259 260/* 261 * Read a TX_WRITE log data block. 262 */ 263static int 264zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 265{ 266 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 267 const blkptr_t *bp = &lr->lr_blkptr; 268 arc_flags_t aflags = ARC_FLAG_WAIT; 269 arc_buf_t *abuf = NULL; 270 zbookmark_phys_t zb; 271 int error; 272 273 if (BP_IS_HOLE(bp)) { 274 if (wbuf != NULL) 275 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 276 return (0); 277 } 278 279 if (zilog->zl_header->zh_claim_txg == 0) 280 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 281 282 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 283 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 284 285 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 286 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 287 288 if (error == 0) { 289 if (wbuf != NULL) 290 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 291 arc_buf_destroy(abuf, &abuf); 292 } 293 294 return (error); 295} 296 297/* 298 * Parse the intent log, and call parse_func for each valid record within. 299 */ 300int 301zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 302 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 303{ 304 const zil_header_t *zh = zilog->zl_header; 305 boolean_t claimed = !!zh->zh_claim_txg; 306 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 307 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 308 uint64_t max_blk_seq = 0; 309 uint64_t max_lr_seq = 0; 310 uint64_t blk_count = 0; 311 uint64_t lr_count = 0; 312 blkptr_t blk, next_blk; 313 char *lrbuf, *lrp; 314 int error = 0; 315 316 /* 317 * Old logs didn't record the maximum zh_claim_lr_seq. 318 */ 319 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 320 claim_lr_seq = UINT64_MAX; 321 322 /* 323 * Starting at the block pointed to by zh_log we read the log chain. 324 * For each block in the chain we strongly check that block to 325 * ensure its validity. We stop when an invalid block is found. 326 * For each block pointer in the chain we call parse_blk_func(). 327 * For each record in each valid block we call parse_lr_func(). 328 * If the log has been claimed, stop if we encounter a sequence 329 * number greater than the highest claimed sequence number. 330 */ 331 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 332 zil_bp_tree_init(zilog); 333 334 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 335 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 336 int reclen; 337 char *end; 338 339 if (blk_seq > claim_blk_seq) 340 break; 341 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 342 break; 343 ASSERT3U(max_blk_seq, <, blk_seq); 344 max_blk_seq = blk_seq; 345 blk_count++; 346 347 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 348 break; 349 350 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 351 if (error != 0) 352 break; 353 354 for (lrp = lrbuf; lrp < end; lrp += reclen) { 355 lr_t *lr = (lr_t *)lrp; 356 reclen = lr->lrc_reclen; 357 ASSERT3U(reclen, >=, sizeof (lr_t)); 358 if (lr->lrc_seq > claim_lr_seq) 359 goto done; 360 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 361 goto done; 362 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 363 max_lr_seq = lr->lrc_seq; 364 lr_count++; 365 } 366 } 367done: 368 zilog->zl_parse_error = error; 369 zilog->zl_parse_blk_seq = max_blk_seq; 370 zilog->zl_parse_lr_seq = max_lr_seq; 371 zilog->zl_parse_blk_count = blk_count; 372 zilog->zl_parse_lr_count = lr_count; 373 374 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 375 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 376 377 zil_bp_tree_fini(zilog); 378 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 379 380 return (error); 381} 382 383static int 384zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 385{ 386 /* 387 * Claim log block if not already committed and not already claimed. 388 * If tx == NULL, just verify that the block is claimable. 389 */ 390 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 391 zil_bp_tree_add(zilog, bp) != 0) 392 return (0); 393 394 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 395 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 396 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 397} 398 399static int 400zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 401{ 402 lr_write_t *lr = (lr_write_t *)lrc; 403 int error; 404 405 if (lrc->lrc_txtype != TX_WRITE) 406 return (0); 407 408 /* 409 * If the block is not readable, don't claim it. This can happen 410 * in normal operation when a log block is written to disk before 411 * some of the dmu_sync() blocks it points to. In this case, the 412 * transaction cannot have been committed to anyone (we would have 413 * waited for all writes to be stable first), so it is semantically 414 * correct to declare this the end of the log. 415 */ 416 if (lr->lr_blkptr.blk_birth >= first_txg && 417 (error = zil_read_log_data(zilog, lr, NULL)) != 0) 418 return (error); 419 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 420} 421 422/* ARGSUSED */ 423static int 424zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 425{ 426 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 427 428 return (0); 429} 430 431static int 432zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 433{ 434 lr_write_t *lr = (lr_write_t *)lrc; 435 blkptr_t *bp = &lr->lr_blkptr; 436 437 /* 438 * If we previously claimed it, we need to free it. 439 */ 440 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 441 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 442 !BP_IS_HOLE(bp)) 443 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 444 445 return (0); 446} 447 448static lwb_t * 449zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 450{ 451 lwb_t *lwb; 452 453 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 454 lwb->lwb_zilog = zilog; 455 lwb->lwb_blk = *bp; 456 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 457 lwb->lwb_max_txg = txg; 458 lwb->lwb_zio = NULL; 459 lwb->lwb_tx = NULL; 460 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 461 lwb->lwb_nused = sizeof (zil_chain_t); 462 lwb->lwb_sz = BP_GET_LSIZE(bp); 463 } else { 464 lwb->lwb_nused = 0; 465 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 466 } 467 468 mutex_enter(&zilog->zl_lock); 469 list_insert_tail(&zilog->zl_lwb_list, lwb); 470 mutex_exit(&zilog->zl_lock); 471 472 return (lwb); 473} 474 475/* 476 * Called when we create in-memory log transactions so that we know 477 * to cleanup the itxs at the end of spa_sync(). 478 */ 479void 480zilog_dirty(zilog_t *zilog, uint64_t txg) 481{ 482 dsl_pool_t *dp = zilog->zl_dmu_pool; 483 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 484 485 if (ds->ds_is_snapshot) 486 panic("dirtying snapshot!"); 487 488 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 489 /* up the hold count until we can be written out */ 490 dmu_buf_add_ref(ds->ds_dbuf, zilog); 491 } 492} 493 494/* 495 * Determine if the zil is dirty in the specified txg. Callers wanting to 496 * ensure that the dirty state does not change must hold the itxg_lock for 497 * the specified txg. Holding the lock will ensure that the zil cannot be 498 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 499 * state. 500 */ 501boolean_t 502zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 503{ 504 dsl_pool_t *dp = zilog->zl_dmu_pool; 505 506 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 507 return (B_TRUE); 508 return (B_FALSE); 509} 510 511/* 512 * Determine if the zil is dirty. The zil is considered dirty if it has 513 * any pending itx records that have not been cleaned by zil_clean(). 514 */ 515boolean_t 516zilog_is_dirty(zilog_t *zilog) 517{ 518 dsl_pool_t *dp = zilog->zl_dmu_pool; 519 520 for (int t = 0; t < TXG_SIZE; t++) { 521 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 522 return (B_TRUE); 523 } 524 return (B_FALSE); 525} 526 527/* 528 * Create an on-disk intent log. 529 */ 530static lwb_t * 531zil_create(zilog_t *zilog) 532{ 533 const zil_header_t *zh = zilog->zl_header; 534 lwb_t *lwb = NULL; 535 uint64_t txg = 0; 536 dmu_tx_t *tx = NULL; 537 blkptr_t blk; 538 int error = 0; 539 540 /* 541 * Wait for any previous destroy to complete. 542 */ 543 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 544 545 ASSERT(zh->zh_claim_txg == 0); 546 ASSERT(zh->zh_replay_seq == 0); 547 548 blk = zh->zh_log; 549 550 /* 551 * Allocate an initial log block if: 552 * - there isn't one already 553 * - the existing block is the wrong endianess 554 */ 555 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 556 tx = dmu_tx_create(zilog->zl_os); 557 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 558 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 559 txg = dmu_tx_get_txg(tx); 560 561 if (!BP_IS_HOLE(&blk)) { 562 zio_free_zil(zilog->zl_spa, txg, &blk); 563 BP_ZERO(&blk); 564 } 565 566 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 567 ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 568 569 if (error == 0) 570 zil_init_log_chain(zilog, &blk); 571 } 572 573 /* 574 * Allocate a log write buffer (lwb) for the first log block. 575 */ 576 if (error == 0) 577 lwb = zil_alloc_lwb(zilog, &blk, txg); 578 579 /* 580 * If we just allocated the first log block, commit our transaction 581 * and wait for zil_sync() to stuff the block poiner into zh_log. 582 * (zh is part of the MOS, so we cannot modify it in open context.) 583 */ 584 if (tx != NULL) { 585 dmu_tx_commit(tx); 586 txg_wait_synced(zilog->zl_dmu_pool, txg); 587 } 588 589 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 590 591 return (lwb); 592} 593 594/* 595 * In one tx, free all log blocks and clear the log header. 596 * If keep_first is set, then we're replaying a log with no content. 597 * We want to keep the first block, however, so that the first 598 * synchronous transaction doesn't require a txg_wait_synced() 599 * in zil_create(). We don't need to txg_wait_synced() here either 600 * when keep_first is set, because both zil_create() and zil_destroy() 601 * will wait for any in-progress destroys to complete. 602 */ 603void 604zil_destroy(zilog_t *zilog, boolean_t keep_first) 605{ 606 const zil_header_t *zh = zilog->zl_header; 607 lwb_t *lwb; 608 dmu_tx_t *tx; 609 uint64_t txg; 610 611 /* 612 * Wait for any previous destroy to complete. 613 */ 614 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 615 616 zilog->zl_old_header = *zh; /* debugging aid */ 617 618 if (BP_IS_HOLE(&zh->zh_log)) 619 return; 620 621 tx = dmu_tx_create(zilog->zl_os); 622 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 623 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 624 txg = dmu_tx_get_txg(tx); 625 626 mutex_enter(&zilog->zl_lock); 627 628 ASSERT3U(zilog->zl_destroy_txg, <, txg); 629 zilog->zl_destroy_txg = txg; 630 zilog->zl_keep_first = keep_first; 631 632 if (!list_is_empty(&zilog->zl_lwb_list)) { 633 ASSERT(zh->zh_claim_txg == 0); 634 VERIFY(!keep_first); 635 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 636 list_remove(&zilog->zl_lwb_list, lwb); 637 if (lwb->lwb_buf != NULL) 638 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 639 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 640 kmem_cache_free(zil_lwb_cache, lwb); 641 } 642 } else if (!keep_first) { 643 zil_destroy_sync(zilog, tx); 644 } 645 mutex_exit(&zilog->zl_lock); 646 647 dmu_tx_commit(tx); 648} 649 650void 651zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 652{ 653 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 654 (void) zil_parse(zilog, zil_free_log_block, 655 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 656} 657 658int 659zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 660{ 661 dmu_tx_t *tx = txarg; 662 uint64_t first_txg = dmu_tx_get_txg(tx); 663 zilog_t *zilog; 664 zil_header_t *zh; 665 objset_t *os; 666 int error; 667 668 error = dmu_objset_own_obj(dp, ds->ds_object, 669 DMU_OST_ANY, B_FALSE, FTAG, &os); 670 if (error != 0) { 671 /* 672 * EBUSY indicates that the objset is inconsistent, in which 673 * case it can not have a ZIL. 674 */ 675 if (error != EBUSY) { 676 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 677 (unsigned long long)ds->ds_object, error); 678 } 679 return (0); 680 } 681 682 zilog = dmu_objset_zil(os); 683 zh = zil_header_in_syncing_context(zilog); 684 685 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 686 if (!BP_IS_HOLE(&zh->zh_log)) 687 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 688 BP_ZERO(&zh->zh_log); 689 dsl_dataset_dirty(dmu_objset_ds(os), tx); 690 dmu_objset_disown(os, FTAG); 691 return (0); 692 } 693 694 /* 695 * Claim all log blocks if we haven't already done so, and remember 696 * the highest claimed sequence number. This ensures that if we can 697 * read only part of the log now (e.g. due to a missing device), 698 * but we can read the entire log later, we will not try to replay 699 * or destroy beyond the last block we successfully claimed. 700 */ 701 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 702 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 703 (void) zil_parse(zilog, zil_claim_log_block, 704 zil_claim_log_record, tx, first_txg); 705 zh->zh_claim_txg = first_txg; 706 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 707 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 708 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 709 zh->zh_flags |= ZIL_REPLAY_NEEDED; 710 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 711 dsl_dataset_dirty(dmu_objset_ds(os), tx); 712 } 713 714 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 715 dmu_objset_disown(os, FTAG); 716 return (0); 717} 718 719/* 720 * Check the log by walking the log chain. 721 * Checksum errors are ok as they indicate the end of the chain. 722 * Any other error (no device or read failure) returns an error. 723 */ 724/* ARGSUSED */ 725int 726zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 727{ 728 zilog_t *zilog; 729 objset_t *os; 730 blkptr_t *bp; 731 int error; 732 733 ASSERT(tx == NULL); 734 735 error = dmu_objset_from_ds(ds, &os); 736 if (error != 0) { 737 cmn_err(CE_WARN, "can't open objset %llu, error %d", 738 (unsigned long long)ds->ds_object, error); 739 return (0); 740 } 741 742 zilog = dmu_objset_zil(os); 743 bp = (blkptr_t *)&zilog->zl_header->zh_log; 744 745 /* 746 * Check the first block and determine if it's on a log device 747 * which may have been removed or faulted prior to loading this 748 * pool. If so, there's no point in checking the rest of the log 749 * as its content should have already been synced to the pool. 750 */ 751 if (!BP_IS_HOLE(bp)) { 752 vdev_t *vd; 753 boolean_t valid = B_TRUE; 754 755 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 756 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 757 if (vd->vdev_islog && vdev_is_dead(vd)) 758 valid = vdev_log_state_valid(vd); 759 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 760 761 if (!valid) 762 return (0); 763 } 764 765 /* 766 * Because tx == NULL, zil_claim_log_block() will not actually claim 767 * any blocks, but just determine whether it is possible to do so. 768 * In addition to checking the log chain, zil_claim_log_block() 769 * will invoke zio_claim() with a done func of spa_claim_notify(), 770 * which will update spa_max_claim_txg. See spa_load() for details. 771 */ 772 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 773 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 774 775 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 776} 777 778static int 779zil_vdev_compare(const void *x1, const void *x2) 780{ 781 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 782 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 783 784 if (v1 < v2) 785 return (-1); 786 if (v1 > v2) 787 return (1); 788 789 return (0); 790} 791 792void 793zil_add_block(zilog_t *zilog, const blkptr_t *bp) 794{ 795 avl_tree_t *t = &zilog->zl_vdev_tree; 796 avl_index_t where; 797 zil_vdev_node_t *zv, zvsearch; 798 int ndvas = BP_GET_NDVAS(bp); 799 int i; 800 801 if (zfs_nocacheflush) 802 return; 803 804 ASSERT(zilog->zl_writer); 805 806 /* 807 * Even though we're zl_writer, we still need a lock because the 808 * zl_get_data() callbacks may have dmu_sync() done callbacks 809 * that will run concurrently. 810 */ 811 mutex_enter(&zilog->zl_vdev_lock); 812 for (i = 0; i < ndvas; i++) { 813 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 814 if (avl_find(t, &zvsearch, &where) == NULL) { 815 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 816 zv->zv_vdev = zvsearch.zv_vdev; 817 avl_insert(t, zv, where); 818 } 819 } 820 mutex_exit(&zilog->zl_vdev_lock); 821} 822 823static void 824zil_flush_vdevs(zilog_t *zilog) 825{ 826 spa_t *spa = zilog->zl_spa; 827 avl_tree_t *t = &zilog->zl_vdev_tree; 828 void *cookie = NULL; 829 zil_vdev_node_t *zv; 830 zio_t *zio = NULL; 831 832 ASSERT(zilog->zl_writer); 833 834 /* 835 * We don't need zl_vdev_lock here because we're the zl_writer, 836 * and all zl_get_data() callbacks are done. 837 */ 838 if (avl_numnodes(t) == 0) 839 return; 840 841 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 842 843 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 844 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 845 if (vd != NULL && !vd->vdev_nowritecache) { 846 if (zio == NULL) 847 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 848 zio_flush(zio, vd); 849 } 850 kmem_free(zv, sizeof (*zv)); 851 } 852 853 /* 854 * Wait for all the flushes to complete. Not all devices actually 855 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 856 */ 857 if (zio) 858 (void) zio_wait(zio); 859 860 spa_config_exit(spa, SCL_STATE, FTAG); 861} 862 863/* 864 * Function called when a log block write completes 865 */ 866static void 867zil_lwb_write_done(zio_t *zio) 868{ 869 lwb_t *lwb = zio->io_private; 870 zilog_t *zilog = lwb->lwb_zilog; 871 dmu_tx_t *tx = lwb->lwb_tx; 872 873 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 874 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 875 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 876 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 877 ASSERT(!BP_IS_GANG(zio->io_bp)); 878 ASSERT(!BP_IS_HOLE(zio->io_bp)); 879 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 880 881 /* 882 * Ensure the lwb buffer pointer is cleared before releasing 883 * the txg. If we have had an allocation failure and 884 * the txg is waiting to sync then we want want zil_sync() 885 * to remove the lwb so that it's not picked up as the next new 886 * one in zil_commit_writer(). zil_sync() will only remove 887 * the lwb if lwb_buf is null. 888 */ 889 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 890 mutex_enter(&zilog->zl_lock); 891 lwb->lwb_buf = NULL; 892 lwb->lwb_tx = NULL; 893 mutex_exit(&zilog->zl_lock); 894 895 /* 896 * Now that we've written this log block, we have a stable pointer 897 * to the next block in the chain, so it's OK to let the txg in 898 * which we allocated the next block sync. 899 */ 900 dmu_tx_commit(tx); 901} 902 903/* 904 * Initialize the io for a log block. 905 */ 906static void 907zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 908{ 909 zbookmark_phys_t zb; 910 911 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 912 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 913 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 914 915 if (zilog->zl_root_zio == NULL) { 916 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 917 ZIO_FLAG_CANFAIL); 918 } 919 if (lwb->lwb_zio == NULL) { 920 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 921 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 922 zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE, 923 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 924 } 925} 926 927/* 928 * Define a limited set of intent log block sizes. 929 * 930 * These must be a multiple of 4KB. Note only the amount used (again 931 * aligned to 4KB) actually gets written. However, we can't always just 932 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 933 */ 934uint64_t zil_block_buckets[] = { 935 4096, /* non TX_WRITE */ 936 8192+4096, /* data base */ 937 32*1024 + 4096, /* NFS writes */ 938 UINT64_MAX 939}; 940 941/* 942 * Use the slog as long as the logbias is 'latency' and the current commit size 943 * is less than the limit or the total list size is less than 2X the limit. 944 * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 945 */ 946uint64_t zil_slog_limit = 1024 * 1024; 947#define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 948 (((zilog)->zl_cur_used < zil_slog_limit) || \ 949 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 950 951/* 952 * Start a log block write and advance to the next log block. 953 * Calls are serialized. 954 */ 955static lwb_t * 956zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb, boolean_t last) 957{ 958 lwb_t *nlwb = NULL; 959 zil_chain_t *zilc; 960 spa_t *spa = zilog->zl_spa; 961 blkptr_t *bp; 962 dmu_tx_t *tx; 963 uint64_t txg; 964 uint64_t zil_blksz, wsz; 965 int i, error; 966 967 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 968 zilc = (zil_chain_t *)lwb->lwb_buf; 969 bp = &zilc->zc_next_blk; 970 } else { 971 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 972 bp = &zilc->zc_next_blk; 973 } 974 975 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 976 977 /* 978 * Allocate the next block and save its address in this block 979 * before writing it in order to establish the log chain. 980 * Note that if the allocation of nlwb synced before we wrote 981 * the block that points at it (lwb), we'd leak it if we crashed. 982 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 983 * We dirty the dataset to ensure that zil_sync() will be called 984 * to clean up in the event of allocation failure or I/O failure. 985 */ 986 tx = dmu_tx_create(zilog->zl_os); 987 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 988 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 989 txg = dmu_tx_get_txg(tx); 990 991 lwb->lwb_tx = tx; 992 993 /* 994 * Log blocks are pre-allocated. Here we select the size of the next 995 * block, based on size used in the last block. 996 * - first find the smallest bucket that will fit the block from a 997 * limited set of block sizes. This is because it's faster to write 998 * blocks allocated from the same metaslab as they are adjacent or 999 * close. 1000 * - next find the maximum from the new suggested size and an array of 1001 * previous sizes. This lessens a picket fence effect of wrongly 1002 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 1003 * requests. 1004 * 1005 * Note we only write what is used, but we can't just allocate 1006 * the maximum block size because we can exhaust the available 1007 * pool log space. 1008 */ 1009 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 1010 for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 1011 continue; 1012 zil_blksz = zil_block_buckets[i]; 1013 if (zil_blksz == UINT64_MAX) 1014 zil_blksz = SPA_OLD_MAXBLOCKSIZE; 1015 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 1016 for (i = 0; i < ZIL_PREV_BLKS; i++) 1017 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 1018 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1019 1020 BP_ZERO(bp); 1021 /* pass the old blkptr in order to spread log blocks across devs */ 1022 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 1023 USE_SLOG(zilog)); 1024 if (error == 0) { 1025 ASSERT3U(bp->blk_birth, ==, txg); 1026 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1027 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1028 1029 /* 1030 * Allocate a new log write buffer (lwb). 1031 */ 1032 nlwb = zil_alloc_lwb(zilog, bp, txg); 1033 1034 /* Record the block for later vdev flushing */ 1035 zil_add_block(zilog, &lwb->lwb_blk); 1036 } 1037 1038 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1039 /* For Slim ZIL only write what is used. */ 1040 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1041 ASSERT3U(wsz, <=, lwb->lwb_sz); 1042 zio_shrink(lwb->lwb_zio, wsz); 1043 1044 } else { 1045 wsz = lwb->lwb_sz; 1046 } 1047 1048 zilc->zc_pad = 0; 1049 zilc->zc_nused = lwb->lwb_nused; 1050 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1051 1052 /* 1053 * clear unused data for security 1054 */ 1055 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1056 1057 if (last) 1058 lwb->lwb_zio->io_pipeline &= ~ZIO_STAGE_ISSUE_ASYNC; 1059 zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 1060 1061 /* 1062 * If there was an allocation failure then nlwb will be null which 1063 * forces a txg_wait_synced(). 1064 */ 1065 return (nlwb); 1066} 1067 1068static lwb_t * 1069zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1070{ 1071 lr_t *lrc = &itx->itx_lr; /* common log record */ 1072 lr_write_t *lrw = (lr_write_t *)lrc; 1073 char *lr_buf; 1074 uint64_t txg = lrc->lrc_txg; 1075 uint64_t reclen = lrc->lrc_reclen; 1076 uint64_t dlen = 0; 1077 1078 if (lwb == NULL) 1079 return (NULL); 1080 1081 ASSERT(lwb->lwb_buf != NULL); 1082 1083 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 1084 dlen = P2ROUNDUP_TYPED( 1085 lrw->lr_length, sizeof (uint64_t), uint64_t); 1086 1087 zilog->zl_cur_used += (reclen + dlen); 1088 1089 zil_lwb_write_init(zilog, lwb); 1090 1091 /* 1092 * If this record won't fit in the current log block, start a new one. 1093 */ 1094 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1095 lwb = zil_lwb_write_start(zilog, lwb, B_FALSE); 1096 if (lwb == NULL) 1097 return (NULL); 1098 zil_lwb_write_init(zilog, lwb); 1099 ASSERT(LWB_EMPTY(lwb)); 1100 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1101 txg_wait_synced(zilog->zl_dmu_pool, txg); 1102 return (lwb); 1103 } 1104 } 1105 1106 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1107 bcopy(lrc, lr_buf, reclen); 1108 lrc = (lr_t *)lr_buf; 1109 lrw = (lr_write_t *)lrc; 1110 1111 /* 1112 * If it's a write, fetch the data or get its blkptr as appropriate. 1113 */ 1114 if (lrc->lrc_txtype == TX_WRITE) { 1115 if (txg > spa_freeze_txg(zilog->zl_spa)) 1116 txg_wait_synced(zilog->zl_dmu_pool, txg); 1117 if (itx->itx_wr_state != WR_COPIED) { 1118 char *dbuf; 1119 int error; 1120 1121 if (dlen) { 1122 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 1123 dbuf = lr_buf + reclen; 1124 lrw->lr_common.lrc_reclen += dlen; 1125 } else { 1126 ASSERT(itx->itx_wr_state == WR_INDIRECT); 1127 dbuf = NULL; 1128 } 1129 error = zilog->zl_get_data( 1130 itx->itx_private, lrw, dbuf, lwb->lwb_zio); 1131 if (error == EIO) { 1132 txg_wait_synced(zilog->zl_dmu_pool, txg); 1133 return (lwb); 1134 } 1135 if (error != 0) { 1136 ASSERT(error == ENOENT || error == EEXIST || 1137 error == EALREADY); 1138 return (lwb); 1139 } 1140 } 1141 } 1142 1143 /* 1144 * We're actually making an entry, so update lrc_seq to be the 1145 * log record sequence number. Note that this is generally not 1146 * equal to the itx sequence number because not all transactions 1147 * are synchronous, and sometimes spa_sync() gets there first. 1148 */ 1149 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 1150 lwb->lwb_nused += reclen + dlen; 1151 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1152 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1153 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1154 1155 return (lwb); 1156} 1157 1158itx_t * 1159zil_itx_create(uint64_t txtype, size_t lrsize) 1160{ 1161 itx_t *itx; 1162 1163 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1164 1165 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1166 itx->itx_lr.lrc_txtype = txtype; 1167 itx->itx_lr.lrc_reclen = lrsize; 1168 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1169 itx->itx_lr.lrc_seq = 0; /* defensive */ 1170 itx->itx_sync = B_TRUE; /* default is synchronous */ 1171 1172 return (itx); 1173} 1174 1175void 1176zil_itx_destroy(itx_t *itx) 1177{ 1178 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1179} 1180 1181/* 1182 * Free up the sync and async itxs. The itxs_t has already been detached 1183 * so no locks are needed. 1184 */ 1185static void 1186zil_itxg_clean(itxs_t *itxs) 1187{ 1188 itx_t *itx; 1189 list_t *list; 1190 avl_tree_t *t; 1191 void *cookie; 1192 itx_async_node_t *ian; 1193 1194 list = &itxs->i_sync_list; 1195 while ((itx = list_head(list)) != NULL) { 1196 list_remove(list, itx); 1197 kmem_free(itx, offsetof(itx_t, itx_lr) + 1198 itx->itx_lr.lrc_reclen); 1199 } 1200 1201 cookie = NULL; 1202 t = &itxs->i_async_tree; 1203 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1204 list = &ian->ia_list; 1205 while ((itx = list_head(list)) != NULL) { 1206 list_remove(list, itx); 1207 kmem_free(itx, offsetof(itx_t, itx_lr) + 1208 itx->itx_lr.lrc_reclen); 1209 } 1210 list_destroy(list); 1211 kmem_free(ian, sizeof (itx_async_node_t)); 1212 } 1213 avl_destroy(t); 1214 1215 kmem_free(itxs, sizeof (itxs_t)); 1216} 1217 1218static int 1219zil_aitx_compare(const void *x1, const void *x2) 1220{ 1221 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1222 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1223 1224 if (o1 < o2) 1225 return (-1); 1226 if (o1 > o2) 1227 return (1); 1228 1229 return (0); 1230} 1231 1232/* 1233 * Remove all async itx with the given oid. 1234 */ 1235static void 1236zil_remove_async(zilog_t *zilog, uint64_t oid) 1237{ 1238 uint64_t otxg, txg; 1239 itx_async_node_t *ian; 1240 avl_tree_t *t; 1241 avl_index_t where; 1242 list_t clean_list; 1243 itx_t *itx; 1244 1245 ASSERT(oid != 0); 1246 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1247 1248 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1249 otxg = ZILTEST_TXG; 1250 else 1251 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1252 1253 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1254 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1255 1256 mutex_enter(&itxg->itxg_lock); 1257 if (itxg->itxg_txg != txg) { 1258 mutex_exit(&itxg->itxg_lock); 1259 continue; 1260 } 1261 1262 /* 1263 * Locate the object node and append its list. 1264 */ 1265 t = &itxg->itxg_itxs->i_async_tree; 1266 ian = avl_find(t, &oid, &where); 1267 if (ian != NULL) 1268 list_move_tail(&clean_list, &ian->ia_list); 1269 mutex_exit(&itxg->itxg_lock); 1270 } 1271 while ((itx = list_head(&clean_list)) != NULL) { 1272 list_remove(&clean_list, itx); 1273 kmem_free(itx, offsetof(itx_t, itx_lr) + 1274 itx->itx_lr.lrc_reclen); 1275 } 1276 list_destroy(&clean_list); 1277} 1278 1279void 1280zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1281{ 1282 uint64_t txg; 1283 itxg_t *itxg; 1284 itxs_t *itxs, *clean = NULL; 1285 1286 /* 1287 * Object ids can be re-instantiated in the next txg so 1288 * remove any async transactions to avoid future leaks. 1289 * This can happen if a fsync occurs on the re-instantiated 1290 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1291 * the new file data and flushes a write record for the old object. 1292 */ 1293 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1294 zil_remove_async(zilog, itx->itx_oid); 1295 1296 /* 1297 * Ensure the data of a renamed file is committed before the rename. 1298 */ 1299 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1300 zil_async_to_sync(zilog, itx->itx_oid); 1301 1302 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1303 txg = ZILTEST_TXG; 1304 else 1305 txg = dmu_tx_get_txg(tx); 1306 1307 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1308 mutex_enter(&itxg->itxg_lock); 1309 itxs = itxg->itxg_itxs; 1310 if (itxg->itxg_txg != txg) { 1311 if (itxs != NULL) { 1312 /* 1313 * The zil_clean callback hasn't got around to cleaning 1314 * this itxg. Save the itxs for release below. 1315 * This should be rare. 1316 */ 1317 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1318 itxg->itxg_sod = 0; 1319 clean = itxg->itxg_itxs; 1320 } 1321 ASSERT(itxg->itxg_sod == 0); 1322 itxg->itxg_txg = txg; 1323 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1324 1325 list_create(&itxs->i_sync_list, sizeof (itx_t), 1326 offsetof(itx_t, itx_node)); 1327 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1328 sizeof (itx_async_node_t), 1329 offsetof(itx_async_node_t, ia_node)); 1330 } 1331 if (itx->itx_sync) { 1332 list_insert_tail(&itxs->i_sync_list, itx); 1333 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod); 1334 itxg->itxg_sod += itx->itx_sod; 1335 } else { 1336 avl_tree_t *t = &itxs->i_async_tree; 1337 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1338 itx_async_node_t *ian; 1339 avl_index_t where; 1340 1341 ian = avl_find(t, &foid, &where); 1342 if (ian == NULL) { 1343 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1344 list_create(&ian->ia_list, sizeof (itx_t), 1345 offsetof(itx_t, itx_node)); 1346 ian->ia_foid = foid; 1347 avl_insert(t, ian, where); 1348 } 1349 list_insert_tail(&ian->ia_list, itx); 1350 } 1351 1352 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1353 zilog_dirty(zilog, txg); 1354 mutex_exit(&itxg->itxg_lock); 1355 1356 /* Release the old itxs now we've dropped the lock */ 1357 if (clean != NULL) 1358 zil_itxg_clean(clean); 1359} 1360 1361/* 1362 * If there are any in-memory intent log transactions which have now been 1363 * synced then start up a taskq to free them. We should only do this after we 1364 * have written out the uberblocks (i.e. txg has been comitted) so that 1365 * don't inadvertently clean out in-memory log records that would be required 1366 * by zil_commit(). 1367 */ 1368void 1369zil_clean(zilog_t *zilog, uint64_t synced_txg) 1370{ 1371 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1372 itxs_t *clean_me; 1373 1374 mutex_enter(&itxg->itxg_lock); 1375 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1376 mutex_exit(&itxg->itxg_lock); 1377 return; 1378 } 1379 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1380 ASSERT(itxg->itxg_txg != 0); 1381 ASSERT(zilog->zl_clean_taskq != NULL); 1382 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1383 itxg->itxg_sod = 0; 1384 clean_me = itxg->itxg_itxs; 1385 itxg->itxg_itxs = NULL; 1386 itxg->itxg_txg = 0; 1387 mutex_exit(&itxg->itxg_lock); 1388 /* 1389 * Preferably start a task queue to free up the old itxs but 1390 * if taskq_dispatch can't allocate resources to do that then 1391 * free it in-line. This should be rare. Note, using TQ_SLEEP 1392 * created a bad performance problem. 1393 */ 1394 if (taskq_dispatch(zilog->zl_clean_taskq, 1395 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0) 1396 zil_itxg_clean(clean_me); 1397} 1398 1399/* 1400 * Get the list of itxs to commit into zl_itx_commit_list. 1401 */ 1402static void 1403zil_get_commit_list(zilog_t *zilog) 1404{ 1405 uint64_t otxg, txg; 1406 list_t *commit_list = &zilog->zl_itx_commit_list; 1407 uint64_t push_sod = 0; 1408 1409 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1410 otxg = ZILTEST_TXG; 1411 else 1412 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1413 1414 /* 1415 * This is inherently racy, since there is nothing to prevent 1416 * the last synced txg from changing. That's okay since we'll 1417 * only commit things in the future. 1418 */ 1419 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1420 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1421 1422 mutex_enter(&itxg->itxg_lock); 1423 if (itxg->itxg_txg != txg) { 1424 mutex_exit(&itxg->itxg_lock); 1425 continue; 1426 } 1427 1428 /* 1429 * If we're adding itx records to the zl_itx_commit_list, 1430 * then the zil better be dirty in this "txg". We can assert 1431 * that here since we're holding the itxg_lock which will 1432 * prevent spa_sync from cleaning it. Once we add the itxs 1433 * to the zl_itx_commit_list we must commit it to disk even 1434 * if it's unnecessary (i.e. the txg was synced). 1435 */ 1436 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 1437 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1438 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1439 push_sod += itxg->itxg_sod; 1440 itxg->itxg_sod = 0; 1441 1442 mutex_exit(&itxg->itxg_lock); 1443 } 1444 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod); 1445} 1446 1447/* 1448 * Move the async itxs for a specified object to commit into sync lists. 1449 */ 1450void 1451zil_async_to_sync(zilog_t *zilog, uint64_t foid) 1452{ 1453 uint64_t otxg, txg; 1454 itx_async_node_t *ian; 1455 avl_tree_t *t; 1456 avl_index_t where; 1457 1458 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1459 otxg = ZILTEST_TXG; 1460 else 1461 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1462 1463 /* 1464 * This is inherently racy, since there is nothing to prevent 1465 * the last synced txg from changing. 1466 */ 1467 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1468 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1469 1470 mutex_enter(&itxg->itxg_lock); 1471 if (itxg->itxg_txg != txg) { 1472 mutex_exit(&itxg->itxg_lock); 1473 continue; 1474 } 1475 1476 /* 1477 * If a foid is specified then find that node and append its 1478 * list. Otherwise walk the tree appending all the lists 1479 * to the sync list. We add to the end rather than the 1480 * beginning to ensure the create has happened. 1481 */ 1482 t = &itxg->itxg_itxs->i_async_tree; 1483 if (foid != 0) { 1484 ian = avl_find(t, &foid, &where); 1485 if (ian != NULL) { 1486 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1487 &ian->ia_list); 1488 } 1489 } else { 1490 void *cookie = NULL; 1491 1492 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1493 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1494 &ian->ia_list); 1495 list_destroy(&ian->ia_list); 1496 kmem_free(ian, sizeof (itx_async_node_t)); 1497 } 1498 } 1499 mutex_exit(&itxg->itxg_lock); 1500 } 1501} 1502 1503static void 1504zil_commit_writer(zilog_t *zilog) 1505{ 1506 uint64_t txg; 1507 itx_t *itx; 1508 lwb_t *lwb; 1509 spa_t *spa = zilog->zl_spa; 1510 int error = 0; 1511 1512 ASSERT(zilog->zl_root_zio == NULL); 1513 1514 mutex_exit(&zilog->zl_lock); 1515 1516 zil_get_commit_list(zilog); 1517 1518 /* 1519 * Return if there's nothing to commit before we dirty the fs by 1520 * calling zil_create(). 1521 */ 1522 if (list_head(&zilog->zl_itx_commit_list) == NULL) { 1523 mutex_enter(&zilog->zl_lock); 1524 return; 1525 } 1526 1527 if (zilog->zl_suspend) { 1528 lwb = NULL; 1529 } else { 1530 lwb = list_tail(&zilog->zl_lwb_list); 1531 if (lwb == NULL) 1532 lwb = zil_create(zilog); 1533 } 1534 1535 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1536 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1537 txg = itx->itx_lr.lrc_txg; 1538 ASSERT3U(txg, !=, 0); 1539 1540 /* 1541 * This is inherently racy and may result in us writing 1542 * out a log block for a txg that was just synced. This is 1543 * ok since we'll end cleaning up that log block the next 1544 * time we call zil_sync(). 1545 */ 1546 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa)) 1547 lwb = zil_lwb_commit(zilog, itx, lwb); 1548 list_remove(&zilog->zl_itx_commit_list, itx); 1549 kmem_free(itx, offsetof(itx_t, itx_lr) 1550 + itx->itx_lr.lrc_reclen); 1551 } 1552 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1553 1554 /* write the last block out */ 1555 if (lwb != NULL && lwb->lwb_zio != NULL) 1556 lwb = zil_lwb_write_start(zilog, lwb, B_TRUE); 1557 1558 zilog->zl_cur_used = 0; 1559 1560 /* 1561 * Wait if necessary for the log blocks to be on stable storage. 1562 */ 1563 if (zilog->zl_root_zio) { 1564 error = zio_wait(zilog->zl_root_zio); 1565 zilog->zl_root_zio = NULL; 1566 zil_flush_vdevs(zilog); 1567 } 1568 1569 if (error || lwb == NULL) 1570 txg_wait_synced(zilog->zl_dmu_pool, 0); 1571 1572 mutex_enter(&zilog->zl_lock); 1573 1574 /* 1575 * Remember the highest committed log sequence number for ztest. 1576 * We only update this value when all the log writes succeeded, 1577 * because ztest wants to ASSERT that it got the whole log chain. 1578 */ 1579 if (error == 0 && lwb != NULL) 1580 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1581} 1582 1583/* 1584 * Commit zfs transactions to stable storage. 1585 * If foid is 0 push out all transactions, otherwise push only those 1586 * for that object or might reference that object. 1587 * 1588 * itxs are committed in batches. In a heavily stressed zil there will be 1589 * a commit writer thread who is writing out a bunch of itxs to the log 1590 * for a set of committing threads (cthreads) in the same batch as the writer. 1591 * Those cthreads are all waiting on the same cv for that batch. 1592 * 1593 * There will also be a different and growing batch of threads that are 1594 * waiting to commit (qthreads). When the committing batch completes 1595 * a transition occurs such that the cthreads exit and the qthreads become 1596 * cthreads. One of the new cthreads becomes the writer thread for the 1597 * batch. Any new threads arriving become new qthreads. 1598 * 1599 * Only 2 condition variables are needed and there's no transition 1600 * between the two cvs needed. They just flip-flop between qthreads 1601 * and cthreads. 1602 * 1603 * Using this scheme we can efficiently wakeup up only those threads 1604 * that have been committed. 1605 */ 1606void 1607zil_commit(zilog_t *zilog, uint64_t foid) 1608{ 1609 uint64_t mybatch; 1610 1611 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 1612 return; 1613 1614 /* move the async itxs for the foid to the sync queues */ 1615 zil_async_to_sync(zilog, foid); 1616 1617 mutex_enter(&zilog->zl_lock); 1618 mybatch = zilog->zl_next_batch; 1619 while (zilog->zl_writer) { 1620 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock); 1621 if (mybatch <= zilog->zl_com_batch) { 1622 mutex_exit(&zilog->zl_lock); 1623 return; 1624 } 1625 } 1626 1627 zilog->zl_next_batch++; 1628 zilog->zl_writer = B_TRUE; 1629 zil_commit_writer(zilog); 1630 zilog->zl_com_batch = mybatch; 1631 zilog->zl_writer = B_FALSE; 1632 mutex_exit(&zilog->zl_lock); 1633 1634 /* wake up one thread to become the next writer */ 1635 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]); 1636 1637 /* wake up all threads waiting for this batch to be committed */ 1638 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]); 1639} 1640 1641/* 1642 * Called in syncing context to free committed log blocks and update log header. 1643 */ 1644void 1645zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1646{ 1647 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1648 uint64_t txg = dmu_tx_get_txg(tx); 1649 spa_t *spa = zilog->zl_spa; 1650 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1651 lwb_t *lwb; 1652 1653 /* 1654 * We don't zero out zl_destroy_txg, so make sure we don't try 1655 * to destroy it twice. 1656 */ 1657 if (spa_sync_pass(spa) != 1) 1658 return; 1659 1660 mutex_enter(&zilog->zl_lock); 1661 1662 ASSERT(zilog->zl_stop_sync == 0); 1663 1664 if (*replayed_seq != 0) { 1665 ASSERT(zh->zh_replay_seq < *replayed_seq); 1666 zh->zh_replay_seq = *replayed_seq; 1667 *replayed_seq = 0; 1668 } 1669 1670 if (zilog->zl_destroy_txg == txg) { 1671 blkptr_t blk = zh->zh_log; 1672 1673 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1674 1675 bzero(zh, sizeof (zil_header_t)); 1676 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 1677 1678 if (zilog->zl_keep_first) { 1679 /* 1680 * If this block was part of log chain that couldn't 1681 * be claimed because a device was missing during 1682 * zil_claim(), but that device later returns, 1683 * then this block could erroneously appear valid. 1684 * To guard against this, assign a new GUID to the new 1685 * log chain so it doesn't matter what blk points to. 1686 */ 1687 zil_init_log_chain(zilog, &blk); 1688 zh->zh_log = blk; 1689 } 1690 } 1691 1692 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1693 zh->zh_log = lwb->lwb_blk; 1694 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1695 break; 1696 list_remove(&zilog->zl_lwb_list, lwb); 1697 zio_free_zil(spa, txg, &lwb->lwb_blk); 1698 kmem_cache_free(zil_lwb_cache, lwb); 1699 1700 /* 1701 * If we don't have anything left in the lwb list then 1702 * we've had an allocation failure and we need to zero 1703 * out the zil_header blkptr so that we don't end 1704 * up freeing the same block twice. 1705 */ 1706 if (list_head(&zilog->zl_lwb_list) == NULL) 1707 BP_ZERO(&zh->zh_log); 1708 } 1709 mutex_exit(&zilog->zl_lock); 1710} 1711 1712void 1713zil_init(void) 1714{ 1715 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1716 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1717} 1718 1719void 1720zil_fini(void) 1721{ 1722 kmem_cache_destroy(zil_lwb_cache); 1723} 1724 1725void 1726zil_set_sync(zilog_t *zilog, uint64_t sync) 1727{ 1728 zilog->zl_sync = sync; 1729} 1730 1731void 1732zil_set_logbias(zilog_t *zilog, uint64_t logbias) 1733{ 1734 zilog->zl_logbias = logbias; 1735} 1736 1737zilog_t * 1738zil_alloc(objset_t *os, zil_header_t *zh_phys) 1739{ 1740 zilog_t *zilog; 1741 1742 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1743 1744 zilog->zl_header = zh_phys; 1745 zilog->zl_os = os; 1746 zilog->zl_spa = dmu_objset_spa(os); 1747 zilog->zl_dmu_pool = dmu_objset_pool(os); 1748 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1749 zilog->zl_logbias = dmu_objset_logbias(os); 1750 zilog->zl_sync = dmu_objset_syncprop(os); 1751 zilog->zl_next_batch = 1; 1752 1753 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1754 1755 for (int i = 0; i < TXG_SIZE; i++) { 1756 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 1757 MUTEX_DEFAULT, NULL); 1758 } 1759 1760 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1761 offsetof(lwb_t, lwb_node)); 1762 1763 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 1764 offsetof(itx_t, itx_node)); 1765 1766 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1767 1768 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1769 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1770 1771 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1772 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1773 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL); 1774 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL); 1775 1776 return (zilog); 1777} 1778 1779void 1780zil_free(zilog_t *zilog) 1781{ 1782 zilog->zl_stop_sync = 1; 1783 1784 ASSERT0(zilog->zl_suspend); 1785 ASSERT0(zilog->zl_suspending); 1786 1787 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1788 list_destroy(&zilog->zl_lwb_list); 1789 1790 avl_destroy(&zilog->zl_vdev_tree); 1791 mutex_destroy(&zilog->zl_vdev_lock); 1792 1793 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 1794 list_destroy(&zilog->zl_itx_commit_list); 1795 1796 for (int i = 0; i < TXG_SIZE; i++) { 1797 /* 1798 * It's possible for an itx to be generated that doesn't dirty 1799 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 1800 * callback to remove the entry. We remove those here. 1801 * 1802 * Also free up the ziltest itxs. 1803 */ 1804 if (zilog->zl_itxg[i].itxg_itxs) 1805 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 1806 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 1807 } 1808 1809 mutex_destroy(&zilog->zl_lock); 1810 1811 cv_destroy(&zilog->zl_cv_writer); 1812 cv_destroy(&zilog->zl_cv_suspend); 1813 cv_destroy(&zilog->zl_cv_batch[0]); 1814 cv_destroy(&zilog->zl_cv_batch[1]); 1815 1816 kmem_free(zilog, sizeof (zilog_t)); 1817} 1818 1819/* 1820 * Open an intent log. 1821 */ 1822zilog_t * 1823zil_open(objset_t *os, zil_get_data_t *get_data) 1824{ 1825 zilog_t *zilog = dmu_objset_zil(os); 1826 1827 ASSERT(zilog->zl_clean_taskq == NULL); 1828 ASSERT(zilog->zl_get_data == NULL); 1829 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1830 1831 zilog->zl_get_data = get_data; 1832 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1833 2, 2, TASKQ_PREPOPULATE); 1834 1835 return (zilog); 1836} 1837 1838/* 1839 * Close an intent log. 1840 */ 1841void 1842zil_close(zilog_t *zilog) 1843{ 1844 lwb_t *lwb; 1845 uint64_t txg = 0; 1846 1847 zil_commit(zilog, 0); /* commit all itx */ 1848 1849 /* 1850 * The lwb_max_txg for the stubby lwb will reflect the last activity 1851 * for the zil. After a txg_wait_synced() on the txg we know all the 1852 * callbacks have occurred that may clean the zil. Only then can we 1853 * destroy the zl_clean_taskq. 1854 */ 1855 mutex_enter(&zilog->zl_lock); 1856 lwb = list_tail(&zilog->zl_lwb_list); 1857 if (lwb != NULL) 1858 txg = lwb->lwb_max_txg; 1859 mutex_exit(&zilog->zl_lock); 1860 if (txg) 1861 txg_wait_synced(zilog->zl_dmu_pool, txg); 1862 1863 if (zilog_is_dirty(zilog)) 1864 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg); 1865 VERIFY(!zilog_is_dirty(zilog)); 1866 1867 taskq_destroy(zilog->zl_clean_taskq); 1868 zilog->zl_clean_taskq = NULL; 1869 zilog->zl_get_data = NULL; 1870 1871 /* 1872 * We should have only one LWB left on the list; remove it now. 1873 */ 1874 mutex_enter(&zilog->zl_lock); 1875 lwb = list_head(&zilog->zl_lwb_list); 1876 if (lwb != NULL) { 1877 ASSERT(lwb == list_tail(&zilog->zl_lwb_list)); 1878 list_remove(&zilog->zl_lwb_list, lwb); 1879 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1880 kmem_cache_free(zil_lwb_cache, lwb); 1881 } 1882 mutex_exit(&zilog->zl_lock); 1883} 1884 1885static char *suspend_tag = "zil suspending"; 1886 1887/* 1888 * Suspend an intent log. While in suspended mode, we still honor 1889 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1890 * On old version pools, we suspend the log briefly when taking a 1891 * snapshot so that it will have an empty intent log. 1892 * 1893 * Long holds are not really intended to be used the way we do here -- 1894 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 1895 * could fail. Therefore we take pains to only put a long hold if it is 1896 * actually necessary. Fortunately, it will only be necessary if the 1897 * objset is currently mounted (or the ZVOL equivalent). In that case it 1898 * will already have a long hold, so we are not really making things any worse. 1899 * 1900 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 1901 * zvol_state_t), and use their mechanism to prevent their hold from being 1902 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 1903 * very little gain. 1904 * 1905 * if cookiep == NULL, this does both the suspend & resume. 1906 * Otherwise, it returns with the dataset "long held", and the cookie 1907 * should be passed into zil_resume(). 1908 */ 1909int 1910zil_suspend(const char *osname, void **cookiep) 1911{ 1912 objset_t *os; 1913 zilog_t *zilog; 1914 const zil_header_t *zh; 1915 int error; 1916 1917 error = dmu_objset_hold(osname, suspend_tag, &os); 1918 if (error != 0) 1919 return (error); 1920 zilog = dmu_objset_zil(os); 1921 1922 mutex_enter(&zilog->zl_lock); 1923 zh = zilog->zl_header; 1924 1925 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1926 mutex_exit(&zilog->zl_lock); 1927 dmu_objset_rele(os, suspend_tag); 1928 return (SET_ERROR(EBUSY)); 1929 } 1930 1931 /* 1932 * Don't put a long hold in the cases where we can avoid it. This 1933 * is when there is no cookie so we are doing a suspend & resume 1934 * (i.e. called from zil_vdev_offline()), and there's nothing to do 1935 * for the suspend because it's already suspended, or there's no ZIL. 1936 */ 1937 if (cookiep == NULL && !zilog->zl_suspending && 1938 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 1939 mutex_exit(&zilog->zl_lock); 1940 dmu_objset_rele(os, suspend_tag); 1941 return (0); 1942 } 1943 1944 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 1945 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 1946 1947 zilog->zl_suspend++; 1948 1949 if (zilog->zl_suspend > 1) { 1950 /* 1951 * Someone else is already suspending it. 1952 * Just wait for them to finish. 1953 */ 1954 1955 while (zilog->zl_suspending) 1956 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1957 mutex_exit(&zilog->zl_lock); 1958 1959 if (cookiep == NULL) 1960 zil_resume(os); 1961 else 1962 *cookiep = os; 1963 return (0); 1964 } 1965 1966 /* 1967 * If there is no pointer to an on-disk block, this ZIL must not 1968 * be active (e.g. filesystem not mounted), so there's nothing 1969 * to clean up. 1970 */ 1971 if (BP_IS_HOLE(&zh->zh_log)) { 1972 ASSERT(cookiep != NULL); /* fast path already handled */ 1973 1974 *cookiep = os; 1975 mutex_exit(&zilog->zl_lock); 1976 return (0); 1977 } 1978 1979 zilog->zl_suspending = B_TRUE; 1980 mutex_exit(&zilog->zl_lock); 1981 1982 zil_commit(zilog, 0); 1983 1984 zil_destroy(zilog, B_FALSE); 1985 1986 mutex_enter(&zilog->zl_lock); 1987 zilog->zl_suspending = B_FALSE; 1988 cv_broadcast(&zilog->zl_cv_suspend); 1989 mutex_exit(&zilog->zl_lock); 1990 1991 if (cookiep == NULL) 1992 zil_resume(os); 1993 else 1994 *cookiep = os; 1995 return (0); 1996} 1997 1998void 1999zil_resume(void *cookie) 2000{ 2001 objset_t *os = cookie; 2002 zilog_t *zilog = dmu_objset_zil(os); 2003 2004 mutex_enter(&zilog->zl_lock); 2005 ASSERT(zilog->zl_suspend != 0); 2006 zilog->zl_suspend--; 2007 mutex_exit(&zilog->zl_lock); 2008 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 2009 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 2010} 2011 2012typedef struct zil_replay_arg { 2013 zil_replay_func_t **zr_replay; 2014 void *zr_arg; 2015 boolean_t zr_byteswap; 2016 char *zr_lr; 2017} zil_replay_arg_t; 2018 2019static int 2020zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 2021{ 2022 char name[ZFS_MAX_DATASET_NAME_LEN]; 2023 2024 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 2025 2026 dmu_objset_name(zilog->zl_os, name); 2027 2028 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 2029 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 2030 (u_longlong_t)lr->lrc_seq, 2031 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 2032 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 2033 2034 return (error); 2035} 2036 2037static int 2038zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 2039{ 2040 zil_replay_arg_t *zr = zra; 2041 const zil_header_t *zh = zilog->zl_header; 2042 uint64_t reclen = lr->lrc_reclen; 2043 uint64_t txtype = lr->lrc_txtype; 2044 int error = 0; 2045 2046 zilog->zl_replaying_seq = lr->lrc_seq; 2047 2048 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 2049 return (0); 2050 2051 if (lr->lrc_txg < claim_txg) /* already committed */ 2052 return (0); 2053 2054 /* Strip case-insensitive bit, still present in log record */ 2055 txtype &= ~TX_CI; 2056 2057 if (txtype == 0 || txtype >= TX_MAX_TYPE) 2058 return (zil_replay_error(zilog, lr, EINVAL)); 2059 2060 /* 2061 * If this record type can be logged out of order, the object 2062 * (lr_foid) may no longer exist. That's legitimate, not an error. 2063 */ 2064 if (TX_OOO(txtype)) { 2065 error = dmu_object_info(zilog->zl_os, 2066 ((lr_ooo_t *)lr)->lr_foid, NULL); 2067 if (error == ENOENT || error == EEXIST) 2068 return (0); 2069 } 2070 2071 /* 2072 * Make a copy of the data so we can revise and extend it. 2073 */ 2074 bcopy(lr, zr->zr_lr, reclen); 2075 2076 /* 2077 * If this is a TX_WRITE with a blkptr, suck in the data. 2078 */ 2079 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 2080 error = zil_read_log_data(zilog, (lr_write_t *)lr, 2081 zr->zr_lr + reclen); 2082 if (error != 0) 2083 return (zil_replay_error(zilog, lr, error)); 2084 } 2085 2086 /* 2087 * The log block containing this lr may have been byteswapped 2088 * so that we can easily examine common fields like lrc_txtype. 2089 * However, the log is a mix of different record types, and only the 2090 * replay vectors know how to byteswap their records. Therefore, if 2091 * the lr was byteswapped, undo it before invoking the replay vector. 2092 */ 2093 if (zr->zr_byteswap) 2094 byteswap_uint64_array(zr->zr_lr, reclen); 2095 2096 /* 2097 * We must now do two things atomically: replay this log record, 2098 * and update the log header sequence number to reflect the fact that 2099 * we did so. At the end of each replay function the sequence number 2100 * is updated if we are in replay mode. 2101 */ 2102 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 2103 if (error != 0) { 2104 /* 2105 * The DMU's dnode layer doesn't see removes until the txg 2106 * commits, so a subsequent claim can spuriously fail with 2107 * EEXIST. So if we receive any error we try syncing out 2108 * any removes then retry the transaction. Note that we 2109 * specify B_FALSE for byteswap now, so we don't do it twice. 2110 */ 2111 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 2112 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 2113 if (error != 0) 2114 return (zil_replay_error(zilog, lr, error)); 2115 } 2116 return (0); 2117} 2118 2119/* ARGSUSED */ 2120static int 2121zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 2122{ 2123 zilog->zl_replay_blks++; 2124 2125 return (0); 2126} 2127 2128/* 2129 * If this dataset has a non-empty intent log, replay it and destroy it. 2130 */ 2131void 2132zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 2133{ 2134 zilog_t *zilog = dmu_objset_zil(os); 2135 const zil_header_t *zh = zilog->zl_header; 2136 zil_replay_arg_t zr; 2137 2138 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 2139 zil_destroy(zilog, B_TRUE); 2140 return; 2141 } 2142 2143 zr.zr_replay = replay_func; 2144 zr.zr_arg = arg; 2145 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 2146 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 2147 2148 /* 2149 * Wait for in-progress removes to sync before starting replay. 2150 */ 2151 txg_wait_synced(zilog->zl_dmu_pool, 0); 2152 2153 zilog->zl_replay = B_TRUE; 2154 zilog->zl_replay_time = ddi_get_lbolt(); 2155 ASSERT(zilog->zl_replay_blks == 0); 2156 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 2157 zh->zh_claim_txg); 2158 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 2159 2160 zil_destroy(zilog, B_FALSE); 2161 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 2162 zilog->zl_replay = B_FALSE; 2163} 2164 2165boolean_t 2166zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 2167{ 2168 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2169 return (B_TRUE); 2170 2171 if (zilog->zl_replay) { 2172 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 2173 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 2174 zilog->zl_replaying_seq; 2175 return (B_TRUE); 2176 } 2177 2178 return (B_FALSE); 2179} 2180 2181/* ARGSUSED */ 2182int 2183zil_vdev_offline(const char *osname, void *arg) 2184{ 2185 int error; 2186 2187 error = zil_suspend(osname, NULL); 2188 if (error != 0) 2189 return (SET_ERROR(EEXIST)); 2190 return (0); 2191} 2192