1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27/* Portions Copyright 2010 Robert Milkowski */ 28 29#include <sys/zfs_context.h> 30#include <sys/spa.h> 31#include <sys/spa_impl.h> 32#include <sys/dmu.h> 33#include <sys/zap.h> 34#include <sys/arc.h> 35#include <sys/stat.h> 36#include <sys/resource.h> 37#include <sys/zil.h> 38#include <sys/zil_impl.h> 39#include <sys/dsl_dataset.h> 40#include <sys/vdev_impl.h> 41#include <sys/dmu_tx.h> 42#include <sys/dsl_pool.h> 43#include <sys/abd.h> 44 45/* 46 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 47 * calls that change the file system. Each itx has enough information to 48 * be able to replay them after a system crash, power loss, or 49 * equivalent failure mode. These are stored in memory until either: 50 * 51 * 1. they are committed to the pool by the DMU transaction group 52 * (txg), at which point they can be discarded; or 53 * 2. they are committed to the on-disk ZIL for the dataset being 54 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 55 * requirement). 56 * 57 * In the event of a crash or power loss, the itxs contained by each 58 * dataset's on-disk ZIL will be replayed when that dataset is first 59 * instantianted (e.g. if the dataset is a normal fileystem, when it is 60 * first mounted). 61 * 62 * As hinted at above, there is one ZIL per dataset (both the in-memory 63 * representation, and the on-disk representation). The on-disk format 64 * consists of 3 parts: 65 * 66 * - a single, per-dataset, ZIL header; which points to a chain of 67 * - zero or more ZIL blocks; each of which contains 68 * - zero or more ZIL records 69 * 70 * A ZIL record holds the information necessary to replay a single 71 * system call transaction. A ZIL block can hold many ZIL records, and 72 * the blocks are chained together, similarly to a singly linked list. 73 * 74 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 75 * block in the chain, and the ZIL header points to the first block in 76 * the chain. 77 * 78 * Note, there is not a fixed place in the pool to hold these ZIL 79 * blocks; they are dynamically allocated and freed as needed from the 80 * blocks available on the pool, though they can be preferentially 81 * allocated from a dedicated "log" vdev. 82 */ 83 84/* 85 * This controls the amount of time that a ZIL block (lwb) will remain 86 * "open" when it isn't "full", and it has a thread waiting for it to be 87 * committed to stable storage. Please refer to the zil_commit_waiter() 88 * function (and the comments within it) for more details. 89 */ 90int zfs_commit_timeout_pct = 5; 91 92/* 93 * Disable intent logging replay. This global ZIL switch affects all pools. 94 */ 95int zil_replay_disable = 0; 96SYSCTL_DECL(_vfs_zfs); 97SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN, 98 &zil_replay_disable, 0, "Disable intent logging replay"); 99 100/* 101 * Tunable parameter for debugging or performance analysis. Setting 102 * zfs_nocacheflush will cause corruption on power loss if a volatile 103 * out-of-order write cache is enabled. 104 */ 105boolean_t zfs_nocacheflush = B_FALSE; 106SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RWTUN, 107 &zfs_nocacheflush, 0, "Disable cache flush"); 108boolean_t zfs_trim_enabled = B_TRUE; 109SYSCTL_DECL(_vfs_zfs_trim); 110SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0, 111 "Enable ZFS TRIM"); 112 113/* 114 * Limit SLOG write size per commit executed with synchronous priority. 115 * Any writes above that will be executed with lower (asynchronous) priority 116 * to limit potential SLOG device abuse by single active ZIL writer. 117 */ 118uint64_t zil_slog_bulk = 768 * 1024; 119SYSCTL_QUAD(_vfs_zfs, OID_AUTO, zil_slog_bulk, CTLFLAG_RWTUN, 120 &zil_slog_bulk, 0, "Maximal SLOG commit size with sync priority"); 121 122static kmem_cache_t *zil_lwb_cache; 123static kmem_cache_t *zil_zcw_cache; 124 125#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 126 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 127 128static int 129zil_bp_compare(const void *x1, const void *x2) 130{ 131 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 132 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 133 134 int cmp = AVL_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); 135 if (likely(cmp)) 136 return (cmp); 137 138 return (AVL_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); 139} 140 141static void 142zil_bp_tree_init(zilog_t *zilog) 143{ 144 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 145 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 146} 147 148static void 149zil_bp_tree_fini(zilog_t *zilog) 150{ 151 avl_tree_t *t = &zilog->zl_bp_tree; 152 zil_bp_node_t *zn; 153 void *cookie = NULL; 154 155 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 156 kmem_free(zn, sizeof (zil_bp_node_t)); 157 158 avl_destroy(t); 159} 160 161int 162zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 163{ 164 avl_tree_t *t = &zilog->zl_bp_tree; 165 const dva_t *dva; 166 zil_bp_node_t *zn; 167 avl_index_t where; 168 169 if (BP_IS_EMBEDDED(bp)) 170 return (0); 171 172 dva = BP_IDENTITY(bp); 173 174 if (avl_find(t, dva, &where) != NULL) 175 return (SET_ERROR(EEXIST)); 176 177 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 178 zn->zn_dva = *dva; 179 avl_insert(t, zn, where); 180 181 return (0); 182} 183 184static zil_header_t * 185zil_header_in_syncing_context(zilog_t *zilog) 186{ 187 return ((zil_header_t *)zilog->zl_header); 188} 189 190static void 191zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 192{ 193 zio_cksum_t *zc = &bp->blk_cksum; 194 195 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 196 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 197 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 198 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 199} 200 201/* 202 * Read a log block and make sure it's valid. 203 */ 204static int 205zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 206 char **end) 207{ 208 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 209 arc_flags_t aflags = ARC_FLAG_WAIT; 210 arc_buf_t *abuf = NULL; 211 zbookmark_phys_t zb; 212 int error; 213 214 if (zilog->zl_header->zh_claim_txg == 0) 215 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 216 217 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 218 zio_flags |= ZIO_FLAG_SPECULATIVE; 219 220 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 221 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 222 223 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 224 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 225 226 if (error == 0) { 227 zio_cksum_t cksum = bp->blk_cksum; 228 229 /* 230 * Validate the checksummed log block. 231 * 232 * Sequence numbers should be... sequential. The checksum 233 * verifier for the next block should be bp's checksum plus 1. 234 * 235 * Also check the log chain linkage and size used. 236 */ 237 cksum.zc_word[ZIL_ZC_SEQ]++; 238 239 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 240 zil_chain_t *zilc = abuf->b_data; 241 char *lr = (char *)(zilc + 1); 242 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 243 244 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 245 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 246 error = SET_ERROR(ECKSUM); 247 } else { 248 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 249 bcopy(lr, dst, len); 250 *end = (char *)dst + len; 251 *nbp = zilc->zc_next_blk; 252 } 253 } else { 254 char *lr = abuf->b_data; 255 uint64_t size = BP_GET_LSIZE(bp); 256 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 257 258 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 259 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 260 (zilc->zc_nused > (size - sizeof (*zilc)))) { 261 error = SET_ERROR(ECKSUM); 262 } else { 263 ASSERT3U(zilc->zc_nused, <=, 264 SPA_OLD_MAXBLOCKSIZE); 265 bcopy(lr, dst, zilc->zc_nused); 266 *end = (char *)dst + zilc->zc_nused; 267 *nbp = zilc->zc_next_blk; 268 } 269 } 270 271 arc_buf_destroy(abuf, &abuf); 272 } 273 274 return (error); 275} 276 277/* 278 * Read a TX_WRITE log data block. 279 */ 280static int 281zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 282{ 283 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 284 const blkptr_t *bp = &lr->lr_blkptr; 285 arc_flags_t aflags = ARC_FLAG_WAIT; 286 arc_buf_t *abuf = NULL; 287 zbookmark_phys_t zb; 288 int error; 289 290 if (BP_IS_HOLE(bp)) { 291 if (wbuf != NULL) 292 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 293 return (0); 294 } 295 296 if (zilog->zl_header->zh_claim_txg == 0) 297 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 298 299 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 300 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 301 302 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 303 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 304 305 if (error == 0) { 306 if (wbuf != NULL) 307 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 308 arc_buf_destroy(abuf, &abuf); 309 } 310 311 return (error); 312} 313 314/* 315 * Parse the intent log, and call parse_func for each valid record within. 316 */ 317int 318zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 319 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 320{ 321 const zil_header_t *zh = zilog->zl_header; 322 boolean_t claimed = !!zh->zh_claim_txg; 323 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 324 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 325 uint64_t max_blk_seq = 0; 326 uint64_t max_lr_seq = 0; 327 uint64_t blk_count = 0; 328 uint64_t lr_count = 0; 329 blkptr_t blk, next_blk; 330 char *lrbuf, *lrp; 331 int error = 0; 332 333 /* 334 * Old logs didn't record the maximum zh_claim_lr_seq. 335 */ 336 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 337 claim_lr_seq = UINT64_MAX; 338 339 /* 340 * Starting at the block pointed to by zh_log we read the log chain. 341 * For each block in the chain we strongly check that block to 342 * ensure its validity. We stop when an invalid block is found. 343 * For each block pointer in the chain we call parse_blk_func(). 344 * For each record in each valid block we call parse_lr_func(). 345 * If the log has been claimed, stop if we encounter a sequence 346 * number greater than the highest claimed sequence number. 347 */ 348 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 349 zil_bp_tree_init(zilog); 350 351 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 352 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 353 int reclen; 354 char *end; 355 356 if (blk_seq > claim_blk_seq) 357 break; 358 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 359 break; 360 ASSERT3U(max_blk_seq, <, blk_seq); 361 max_blk_seq = blk_seq; 362 blk_count++; 363 364 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 365 break; 366 367 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 368 if (error != 0) 369 break; 370 371 for (lrp = lrbuf; lrp < end; lrp += reclen) { 372 lr_t *lr = (lr_t *)lrp; 373 reclen = lr->lrc_reclen; 374 ASSERT3U(reclen, >=, sizeof (lr_t)); 375 if (lr->lrc_seq > claim_lr_seq) 376 goto done; 377 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 378 goto done; 379 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 380 max_lr_seq = lr->lrc_seq; 381 lr_count++; 382 } 383 } 384done: 385 zilog->zl_parse_error = error; 386 zilog->zl_parse_blk_seq = max_blk_seq; 387 zilog->zl_parse_lr_seq = max_lr_seq; 388 zilog->zl_parse_blk_count = blk_count; 389 zilog->zl_parse_lr_count = lr_count; 390 391 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 392 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 393 394 zil_bp_tree_fini(zilog); 395 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 396 397 return (error); 398} 399 400/* ARGSUSED */ 401static int 402zil_clear_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 403{ 404 ASSERT(!BP_IS_HOLE(bp)); 405 406 /* 407 * As we call this function from the context of a rewind to a 408 * checkpoint, each ZIL block whose txg is later than the txg 409 * that we rewind to is invalid. Thus, we return -1 so 410 * zil_parse() doesn't attempt to read it. 411 */ 412 if (bp->blk_birth >= first_txg) 413 return (-1); 414 415 if (zil_bp_tree_add(zilog, bp) != 0) 416 return (0); 417 418 zio_free(zilog->zl_spa, first_txg, bp); 419 return (0); 420} 421 422/* ARGSUSED */ 423static int 424zil_noop_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 425{ 426 return (0); 427} 428 429static int 430zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 431{ 432 /* 433 * Claim log block if not already committed and not already claimed. 434 * If tx == NULL, just verify that the block is claimable. 435 */ 436 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 437 zil_bp_tree_add(zilog, bp) != 0) 438 return (0); 439 440 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 441 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 442 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 443} 444 445static int 446zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 447{ 448 lr_write_t *lr = (lr_write_t *)lrc; 449 int error; 450 451 if (lrc->lrc_txtype != TX_WRITE) 452 return (0); 453 454 /* 455 * If the block is not readable, don't claim it. This can happen 456 * in normal operation when a log block is written to disk before 457 * some of the dmu_sync() blocks it points to. In this case, the 458 * transaction cannot have been committed to anyone (we would have 459 * waited for all writes to be stable first), so it is semantically 460 * correct to declare this the end of the log. 461 */ 462 if (lr->lr_blkptr.blk_birth >= first_txg && 463 (error = zil_read_log_data(zilog, lr, NULL)) != 0) 464 return (error); 465 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 466} 467 468/* ARGSUSED */ 469static int 470zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 471{ 472 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 473 474 return (0); 475} 476 477static int 478zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 479{ 480 lr_write_t *lr = (lr_write_t *)lrc; 481 blkptr_t *bp = &lr->lr_blkptr; 482 483 /* 484 * If we previously claimed it, we need to free it. 485 */ 486 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 487 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 488 !BP_IS_HOLE(bp)) 489 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 490 491 return (0); 492} 493 494static int 495zil_lwb_vdev_compare(const void *x1, const void *x2) 496{ 497 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 498 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 499 500 return (AVL_CMP(v1, v2)); 501} 502 503static lwb_t * 504zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) 505{ 506 lwb_t *lwb; 507 508 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 509 lwb->lwb_zilog = zilog; 510 lwb->lwb_blk = *bp; 511 lwb->lwb_slog = slog; 512 lwb->lwb_state = LWB_STATE_CLOSED; 513 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 514 lwb->lwb_max_txg = txg; 515 lwb->lwb_write_zio = NULL; 516 lwb->lwb_root_zio = NULL; 517 lwb->lwb_tx = NULL; 518 lwb->lwb_issued_timestamp = 0; 519 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 520 lwb->lwb_nused = sizeof (zil_chain_t); 521 lwb->lwb_sz = BP_GET_LSIZE(bp); 522 } else { 523 lwb->lwb_nused = 0; 524 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 525 } 526 527 mutex_enter(&zilog->zl_lock); 528 list_insert_tail(&zilog->zl_lwb_list, lwb); 529 mutex_exit(&zilog->zl_lock); 530 531 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 532 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 533 VERIFY(list_is_empty(&lwb->lwb_waiters)); 534 535 return (lwb); 536} 537 538static void 539zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 540{ 541 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 542 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 543 VERIFY(list_is_empty(&lwb->lwb_waiters)); 544 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 545 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 546 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 547 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); 548 ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || 549 lwb->lwb_state == LWB_STATE_DONE); 550 551 /* 552 * Clear the zilog's field to indicate this lwb is no longer 553 * valid, and prevent use-after-free errors. 554 */ 555 if (zilog->zl_last_lwb_opened == lwb) 556 zilog->zl_last_lwb_opened = NULL; 557 558 kmem_cache_free(zil_lwb_cache, lwb); 559} 560 561/* 562 * Called when we create in-memory log transactions so that we know 563 * to cleanup the itxs at the end of spa_sync(). 564 */ 565void 566zilog_dirty(zilog_t *zilog, uint64_t txg) 567{ 568 dsl_pool_t *dp = zilog->zl_dmu_pool; 569 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 570 571 ASSERT(spa_writeable(zilog->zl_spa)); 572 573 if (ds->ds_is_snapshot) 574 panic("dirtying snapshot!"); 575 576 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 577 /* up the hold count until we can be written out */ 578 dmu_buf_add_ref(ds->ds_dbuf, zilog); 579 580 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 581 } 582} 583 584/* 585 * Determine if the zil is dirty in the specified txg. Callers wanting to 586 * ensure that the dirty state does not change must hold the itxg_lock for 587 * the specified txg. Holding the lock will ensure that the zil cannot be 588 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 589 * state. 590 */ 591boolean_t 592zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 593{ 594 dsl_pool_t *dp = zilog->zl_dmu_pool; 595 596 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 597 return (B_TRUE); 598 return (B_FALSE); 599} 600 601/* 602 * Determine if the zil is dirty. The zil is considered dirty if it has 603 * any pending itx records that have not been cleaned by zil_clean(). 604 */ 605boolean_t 606zilog_is_dirty(zilog_t *zilog) 607{ 608 dsl_pool_t *dp = zilog->zl_dmu_pool; 609 610 for (int t = 0; t < TXG_SIZE; t++) { 611 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 612 return (B_TRUE); 613 } 614 return (B_FALSE); 615} 616 617/* 618 * Create an on-disk intent log. 619 */ 620static lwb_t * 621zil_create(zilog_t *zilog) 622{ 623 const zil_header_t *zh = zilog->zl_header; 624 lwb_t *lwb = NULL; 625 uint64_t txg = 0; 626 dmu_tx_t *tx = NULL; 627 blkptr_t blk; 628 int error = 0; 629 boolean_t slog = FALSE; 630 631 /* 632 * Wait for any previous destroy to complete. 633 */ 634 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 635 636 ASSERT(zh->zh_claim_txg == 0); 637 ASSERT(zh->zh_replay_seq == 0); 638 639 blk = zh->zh_log; 640 641 /* 642 * Allocate an initial log block if: 643 * - there isn't one already 644 * - the existing block is the wrong endianess 645 */ 646 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 647 tx = dmu_tx_create(zilog->zl_os); 648 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 649 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 650 txg = dmu_tx_get_txg(tx); 651 652 if (!BP_IS_HOLE(&blk)) { 653 zio_free(zilog->zl_spa, txg, &blk); 654 BP_ZERO(&blk); 655 } 656 657 error = zio_alloc_zil(zilog->zl_spa, 658 zilog->zl_os->os_dsl_dataset->ds_object, txg, &blk, NULL, 659 ZIL_MIN_BLKSZ, &slog); 660 661 if (error == 0) 662 zil_init_log_chain(zilog, &blk); 663 } 664 665 /* 666 * Allocate a log write block (lwb) for the first log block. 667 */ 668 if (error == 0) 669 lwb = zil_alloc_lwb(zilog, &blk, slog, txg); 670 671 /* 672 * If we just allocated the first log block, commit our transaction 673 * and wait for zil_sync() to stuff the block poiner into zh_log. 674 * (zh is part of the MOS, so we cannot modify it in open context.) 675 */ 676 if (tx != NULL) { 677 dmu_tx_commit(tx); 678 txg_wait_synced(zilog->zl_dmu_pool, txg); 679 } 680 681 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 682 683 return (lwb); 684} 685 686/* 687 * In one tx, free all log blocks and clear the log header. If keep_first 688 * is set, then we're replaying a log with no content. We want to keep the 689 * first block, however, so that the first synchronous transaction doesn't 690 * require a txg_wait_synced() in zil_create(). We don't need to 691 * txg_wait_synced() here either when keep_first is set, because both 692 * zil_create() and zil_destroy() will wait for any in-progress destroys 693 * to complete. 694 */ 695void 696zil_destroy(zilog_t *zilog, boolean_t keep_first) 697{ 698 const zil_header_t *zh = zilog->zl_header; 699 lwb_t *lwb; 700 dmu_tx_t *tx; 701 uint64_t txg; 702 703 /* 704 * Wait for any previous destroy to complete. 705 */ 706 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 707 708 zilog->zl_old_header = *zh; /* debugging aid */ 709 710 if (BP_IS_HOLE(&zh->zh_log)) 711 return; 712 713 tx = dmu_tx_create(zilog->zl_os); 714 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 715 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 716 txg = dmu_tx_get_txg(tx); 717 718 mutex_enter(&zilog->zl_lock); 719 720 ASSERT3U(zilog->zl_destroy_txg, <, txg); 721 zilog->zl_destroy_txg = txg; 722 zilog->zl_keep_first = keep_first; 723 724 if (!list_is_empty(&zilog->zl_lwb_list)) { 725 ASSERT(zh->zh_claim_txg == 0); 726 VERIFY(!keep_first); 727 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 728 list_remove(&zilog->zl_lwb_list, lwb); 729 if (lwb->lwb_buf != NULL) 730 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 731 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 732 zil_free_lwb(zilog, lwb); 733 } 734 } else if (!keep_first) { 735 zil_destroy_sync(zilog, tx); 736 } 737 mutex_exit(&zilog->zl_lock); 738 739 dmu_tx_commit(tx); 740} 741 742void 743zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 744{ 745 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 746 (void) zil_parse(zilog, zil_free_log_block, 747 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 748} 749 750int 751zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 752{ 753 dmu_tx_t *tx = txarg; 754 zilog_t *zilog; 755 uint64_t first_txg; 756 zil_header_t *zh; 757 objset_t *os; 758 int error; 759 760 error = dmu_objset_own_obj(dp, ds->ds_object, 761 DMU_OST_ANY, B_FALSE, FTAG, &os); 762 if (error != 0) { 763 /* 764 * EBUSY indicates that the objset is inconsistent, in which 765 * case it can not have a ZIL. 766 */ 767 if (error != EBUSY) { 768 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 769 (unsigned long long)ds->ds_object, error); 770 } 771 return (0); 772 } 773 774 zilog = dmu_objset_zil(os); 775 zh = zil_header_in_syncing_context(zilog); 776 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); 777 first_txg = spa_min_claim_txg(zilog->zl_spa); 778 779 /* 780 * If the spa_log_state is not set to be cleared, check whether 781 * the current uberblock is a checkpoint one and if the current 782 * header has been claimed before moving on. 783 * 784 * If the current uberblock is a checkpointed uberblock then 785 * one of the following scenarios took place: 786 * 787 * 1] We are currently rewinding to the checkpoint of the pool. 788 * 2] We crashed in the middle of a checkpoint rewind but we 789 * did manage to write the checkpointed uberblock to the 790 * vdev labels, so when we tried to import the pool again 791 * the checkpointed uberblock was selected from the import 792 * procedure. 793 * 794 * In both cases we want to zero out all the ZIL blocks, except 795 * the ones that have been claimed at the time of the checkpoint 796 * (their zh_claim_txg != 0). The reason is that these blocks 797 * may be corrupted since we may have reused their locations on 798 * disk after we took the checkpoint. 799 * 800 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier 801 * when we first figure out whether the current uberblock is 802 * checkpointed or not. Unfortunately, that would discard all 803 * the logs, including the ones that are claimed, and we would 804 * leak space. 805 */ 806 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || 807 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 808 zh->zh_claim_txg == 0)) { 809 if (!BP_IS_HOLE(&zh->zh_log)) { 810 (void) zil_parse(zilog, zil_clear_log_block, 811 zil_noop_log_record, tx, first_txg); 812 } 813 BP_ZERO(&zh->zh_log); 814 dsl_dataset_dirty(dmu_objset_ds(os), tx); 815 dmu_objset_disown(os, FTAG); 816 return (0); 817 } 818 819 /* 820 * If we are not rewinding and opening the pool normally, then 821 * the min_claim_txg should be equal to the first txg of the pool. 822 */ 823 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); 824 825 /* 826 * Claim all log blocks if we haven't already done so, and remember 827 * the highest claimed sequence number. This ensures that if we can 828 * read only part of the log now (e.g. due to a missing device), 829 * but we can read the entire log later, we will not try to replay 830 * or destroy beyond the last block we successfully claimed. 831 */ 832 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 833 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 834 (void) zil_parse(zilog, zil_claim_log_block, 835 zil_claim_log_record, tx, first_txg); 836 zh->zh_claim_txg = first_txg; 837 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 838 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 839 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 840 zh->zh_flags |= ZIL_REPLAY_NEEDED; 841 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 842 dsl_dataset_dirty(dmu_objset_ds(os), tx); 843 } 844 845 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 846 dmu_objset_disown(os, FTAG); 847 return (0); 848} 849 850/* 851 * Check the log by walking the log chain. 852 * Checksum errors are ok as they indicate the end of the chain. 853 * Any other error (no device or read failure) returns an error. 854 */ 855/* ARGSUSED */ 856int 857zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 858{ 859 zilog_t *zilog; 860 objset_t *os; 861 blkptr_t *bp; 862 int error; 863 864 ASSERT(tx == NULL); 865 866 error = dmu_objset_from_ds(ds, &os); 867 if (error != 0) { 868 cmn_err(CE_WARN, "can't open objset %llu, error %d", 869 (unsigned long long)ds->ds_object, error); 870 return (0); 871 } 872 873 zilog = dmu_objset_zil(os); 874 bp = (blkptr_t *)&zilog->zl_header->zh_log; 875 876 if (!BP_IS_HOLE(bp)) { 877 vdev_t *vd; 878 boolean_t valid = B_TRUE; 879 880 /* 881 * Check the first block and determine if it's on a log device 882 * which may have been removed or faulted prior to loading this 883 * pool. If so, there's no point in checking the rest of the 884 * log as its content should have already been synced to the 885 * pool. 886 */ 887 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 888 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 889 if (vd->vdev_islog && vdev_is_dead(vd)) 890 valid = vdev_log_state_valid(vd); 891 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 892 893 if (!valid) 894 return (0); 895 896 /* 897 * Check whether the current uberblock is checkpointed (e.g. 898 * we are rewinding) and whether the current header has been 899 * claimed or not. If it hasn't then skip verifying it. We 900 * do this because its ZIL blocks may be part of the pool's 901 * state before the rewind, which is no longer valid. 902 */ 903 zil_header_t *zh = zil_header_in_syncing_context(zilog); 904 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 905 zh->zh_claim_txg == 0) 906 return (0); 907 } 908 909 /* 910 * Because tx == NULL, zil_claim_log_block() will not actually claim 911 * any blocks, but just determine whether it is possible to do so. 912 * In addition to checking the log chain, zil_claim_log_block() 913 * will invoke zio_claim() with a done func of spa_claim_notify(), 914 * which will update spa_max_claim_txg. See spa_load() for details. 915 */ 916 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 917 zilog->zl_header->zh_claim_txg ? -1ULL : 918 spa_min_claim_txg(os->os_spa)); 919 920 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 921} 922 923/* 924 * When an itx is "skipped", this function is used to properly mark the 925 * waiter as "done, and signal any thread(s) waiting on it. An itx can 926 * be skipped (and not committed to an lwb) for a variety of reasons, 927 * one of them being that the itx was committed via spa_sync(), prior to 928 * it being committed to an lwb; this can happen if a thread calling 929 * zil_commit() is racing with spa_sync(). 930 */ 931static void 932zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 933{ 934 mutex_enter(&zcw->zcw_lock); 935 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 936 zcw->zcw_done = B_TRUE; 937 cv_broadcast(&zcw->zcw_cv); 938 mutex_exit(&zcw->zcw_lock); 939} 940 941/* 942 * This function is used when the given waiter is to be linked into an 943 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 944 * At this point, the waiter will no longer be referenced by the itx, 945 * and instead, will be referenced by the lwb. 946 */ 947static void 948zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 949{ 950 /* 951 * The lwb_waiters field of the lwb is protected by the zilog's 952 * zl_lock, thus it must be held when calling this function. 953 */ 954 ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); 955 956 mutex_enter(&zcw->zcw_lock); 957 ASSERT(!list_link_active(&zcw->zcw_node)); 958 ASSERT3P(zcw->zcw_lwb, ==, NULL); 959 ASSERT3P(lwb, !=, NULL); 960 ASSERT(lwb->lwb_state == LWB_STATE_OPENED || 961 lwb->lwb_state == LWB_STATE_ISSUED); 962 963 list_insert_tail(&lwb->lwb_waiters, zcw); 964 zcw->zcw_lwb = lwb; 965 mutex_exit(&zcw->zcw_lock); 966} 967 968/* 969 * This function is used when zio_alloc_zil() fails to allocate a ZIL 970 * block, and the given waiter must be linked to the "nolwb waiters" 971 * list inside of zil_process_commit_list(). 972 */ 973static void 974zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 975{ 976 mutex_enter(&zcw->zcw_lock); 977 ASSERT(!list_link_active(&zcw->zcw_node)); 978 ASSERT3P(zcw->zcw_lwb, ==, NULL); 979 list_insert_tail(nolwb, zcw); 980 mutex_exit(&zcw->zcw_lock); 981} 982 983void 984zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 985{ 986 avl_tree_t *t = &lwb->lwb_vdev_tree; 987 avl_index_t where; 988 zil_vdev_node_t *zv, zvsearch; 989 int ndvas = BP_GET_NDVAS(bp); 990 int i; 991 992 if (zfs_nocacheflush) 993 return; 994 995 mutex_enter(&lwb->lwb_vdev_lock); 996 for (i = 0; i < ndvas; i++) { 997 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 998 if (avl_find(t, &zvsearch, &where) == NULL) { 999 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 1000 zv->zv_vdev = zvsearch.zv_vdev; 1001 avl_insert(t, zv, where); 1002 } 1003 } 1004 mutex_exit(&lwb->lwb_vdev_lock); 1005} 1006 1007void 1008zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 1009{ 1010 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1011} 1012 1013/* 1014 * This function is a called after all VDEVs associated with a given lwb 1015 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon 1016 * as the lwb write completes, if "zfs_nocacheflush" is set. 1017 * 1018 * The intention is for this function to be called as soon as the 1019 * contents of an lwb are considered "stable" on disk, and will survive 1020 * any sudden loss of power. At this point, any threads waiting for the 1021 * lwb to reach this state are signalled, and the "waiter" structures 1022 * are marked "done". 1023 */ 1024static void 1025zil_lwb_flush_vdevs_done(zio_t *zio) 1026{ 1027 lwb_t *lwb = zio->io_private; 1028 zilog_t *zilog = lwb->lwb_zilog; 1029 dmu_tx_t *tx = lwb->lwb_tx; 1030 zil_commit_waiter_t *zcw; 1031 1032 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 1033 1034 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1035 1036 mutex_enter(&zilog->zl_lock); 1037 1038 /* 1039 * Ensure the lwb buffer pointer is cleared before releasing the 1040 * txg. If we have had an allocation failure and the txg is 1041 * waiting to sync then we want zil_sync() to remove the lwb so 1042 * that it's not picked up as the next new one in 1043 * zil_process_commit_list(). zil_sync() will only remove the 1044 * lwb if lwb_buf is null. 1045 */ 1046 lwb->lwb_buf = NULL; 1047 lwb->lwb_tx = NULL; 1048 1049 ASSERT3U(lwb->lwb_issued_timestamp, >, 0); 1050 zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp; 1051 1052 lwb->lwb_root_zio = NULL; 1053 lwb->lwb_state = LWB_STATE_DONE; 1054 1055 if (zilog->zl_last_lwb_opened == lwb) { 1056 /* 1057 * Remember the highest committed log sequence number 1058 * for ztest. We only update this value when all the log 1059 * writes succeeded, because ztest wants to ASSERT that 1060 * it got the whole log chain. 1061 */ 1062 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1063 } 1064 1065 while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { 1066 mutex_enter(&zcw->zcw_lock); 1067 1068 ASSERT(list_link_active(&zcw->zcw_node)); 1069 list_remove(&lwb->lwb_waiters, zcw); 1070 1071 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1072 zcw->zcw_lwb = NULL; 1073 1074 zcw->zcw_zio_error = zio->io_error; 1075 1076 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1077 zcw->zcw_done = B_TRUE; 1078 cv_broadcast(&zcw->zcw_cv); 1079 1080 mutex_exit(&zcw->zcw_lock); 1081 } 1082 1083 mutex_exit(&zilog->zl_lock); 1084 1085 /* 1086 * Now that we've written this log block, we have a stable pointer 1087 * to the next block in the chain, so it's OK to let the txg in 1088 * which we allocated the next block sync. 1089 */ 1090 dmu_tx_commit(tx); 1091} 1092 1093/* 1094 * This is called when an lwb write completes. This means, this specific 1095 * lwb was written to disk, and all dependent lwb have also been 1096 * written to disk. 1097 * 1098 * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to 1099 * the VDEVs involved in writing out this specific lwb. The lwb will be 1100 * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the 1101 * zio completion callback for the lwb's root zio. 1102 */ 1103static void 1104zil_lwb_write_done(zio_t *zio) 1105{ 1106 lwb_t *lwb = zio->io_private; 1107 spa_t *spa = zio->io_spa; 1108 zilog_t *zilog = lwb->lwb_zilog; 1109 avl_tree_t *t = &lwb->lwb_vdev_tree; 1110 void *cookie = NULL; 1111 zil_vdev_node_t *zv; 1112 1113 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1114 1115 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1116 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 1117 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 1118 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 1119 ASSERT(!BP_IS_GANG(zio->io_bp)); 1120 ASSERT(!BP_IS_HOLE(zio->io_bp)); 1121 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 1122 1123 abd_put(zio->io_abd); 1124 1125 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1126 1127 mutex_enter(&zilog->zl_lock); 1128 lwb->lwb_write_zio = NULL; 1129 mutex_exit(&zilog->zl_lock); 1130 1131 if (avl_numnodes(t) == 0) 1132 return; 1133 1134 /* 1135 * If there was an IO error, we're not going to call zio_flush() 1136 * on these vdevs, so we simply empty the tree and free the 1137 * nodes. We avoid calling zio_flush() since there isn't any 1138 * good reason for doing so, after the lwb block failed to be 1139 * written out. 1140 */ 1141 if (zio->io_error != 0) { 1142 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1143 kmem_free(zv, sizeof (*zv)); 1144 return; 1145 } 1146 1147 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1148 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1149 if (vd != NULL) 1150 zio_flush(lwb->lwb_root_zio, vd); 1151 kmem_free(zv, sizeof (*zv)); 1152 } 1153} 1154 1155/* 1156 * This function's purpose is to "open" an lwb such that it is ready to 1157 * accept new itxs being committed to it. To do this, the lwb's zio 1158 * structures are created, and linked to the lwb. This function is 1159 * idempotent; if the passed in lwb has already been opened, this 1160 * function is essentially a no-op. 1161 */ 1162static void 1163zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1164{ 1165 zbookmark_phys_t zb; 1166 zio_priority_t prio; 1167 1168 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1169 ASSERT3P(lwb, !=, NULL); 1170 EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); 1171 EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); 1172 1173 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1174 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1175 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1176 1177 if (lwb->lwb_root_zio == NULL) { 1178 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, 1179 BP_GET_LSIZE(&lwb->lwb_blk)); 1180 1181 if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) 1182 prio = ZIO_PRIORITY_SYNC_WRITE; 1183 else 1184 prio = ZIO_PRIORITY_ASYNC_WRITE; 1185 1186 lwb->lwb_root_zio = zio_root(zilog->zl_spa, 1187 zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); 1188 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1189 1190 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, 1191 zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, 1192 BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, 1193 prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 1194 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1195 1196 lwb->lwb_state = LWB_STATE_OPENED; 1197 1198 mutex_enter(&zilog->zl_lock); 1199 1200 /* 1201 * The zilog's "zl_last_lwb_opened" field is used to 1202 * build the lwb/zio dependency chain, which is used to 1203 * preserve the ordering of lwb completions that is 1204 * required by the semantics of the ZIL. Each new lwb 1205 * zio becomes a parent of the "previous" lwb zio, such 1206 * that the new lwb's zio cannot complete until the 1207 * "previous" lwb's zio completes. 1208 * 1209 * This is required by the semantics of zil_commit(); 1210 * the commit waiters attached to the lwbs will be woken 1211 * in the lwb zio's completion callback, so this zio 1212 * dependency graph ensures the waiters are woken in the 1213 * correct order (the same order the lwbs were created). 1214 */ 1215 lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; 1216 if (last_lwb_opened != NULL && 1217 last_lwb_opened->lwb_state != LWB_STATE_DONE) { 1218 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1219 last_lwb_opened->lwb_state == LWB_STATE_ISSUED); 1220 ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); 1221 zio_add_child(lwb->lwb_root_zio, 1222 last_lwb_opened->lwb_root_zio); 1223 } 1224 zilog->zl_last_lwb_opened = lwb; 1225 1226 mutex_exit(&zilog->zl_lock); 1227 } 1228 1229 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1230 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1231 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1232} 1233 1234/* 1235 * Define a limited set of intent log block sizes. 1236 * 1237 * These must be a multiple of 4KB. Note only the amount used (again 1238 * aligned to 4KB) actually gets written. However, we can't always just 1239 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 1240 */ 1241struct { 1242 uint64_t limit; 1243 uint64_t blksz; 1244} zil_block_buckets[] = { 1245 { 4096, 4096 }, /* non TX_WRITE */ 1246 { 8192 + 4096, 8192 + 4096 }, /* database */ 1247 { 32768 + 4096, 32768 + 4096 }, /* NFS writes */ 1248 { 65536 + 4096, 65536 + 4096 }, /* 64KB writes */ 1249 { 131072, 131072 }, /* < 128KB writes */ 1250 { 131072 + 4096, 65536 + 4096 }, /* 128KB writes */ 1251 { UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */ 1252}; 1253 1254/* 1255 * Maximum block size used by the ZIL. This is picked up when the ZIL is 1256 * initialized. Otherwise this should not be used directly; see 1257 * zl_max_block_size instead. 1258 */ 1259int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; 1260SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_maxblocksize, CTLFLAG_RWTUN, 1261 &zil_maxblocksize, 0, "Limit in bytes of ZIL log block size"); 1262 1263/* 1264 * Start a log block write and advance to the next log block. 1265 * Calls are serialized. 1266 */ 1267static lwb_t * 1268zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1269{ 1270 lwb_t *nlwb = NULL; 1271 zil_chain_t *zilc; 1272 spa_t *spa = zilog->zl_spa; 1273 blkptr_t *bp; 1274 dmu_tx_t *tx; 1275 uint64_t txg; 1276 uint64_t zil_blksz, wsz; 1277 int i, error; 1278 boolean_t slog; 1279 1280 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1281 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1282 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1283 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1284 1285 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1286 zilc = (zil_chain_t *)lwb->lwb_buf; 1287 bp = &zilc->zc_next_blk; 1288 } else { 1289 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 1290 bp = &zilc->zc_next_blk; 1291 } 1292 1293 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 1294 1295 /* 1296 * Allocate the next block and save its address in this block 1297 * before writing it in order to establish the log chain. 1298 * Note that if the allocation of nlwb synced before we wrote 1299 * the block that points at it (lwb), we'd leak it if we crashed. 1300 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 1301 * We dirty the dataset to ensure that zil_sync() will be called 1302 * to clean up in the event of allocation failure or I/O failure. 1303 */ 1304 1305 tx = dmu_tx_create(zilog->zl_os); 1306 1307 /* 1308 * Since we are not going to create any new dirty data, and we 1309 * can even help with clearing the existing dirty data, we 1310 * should not be subject to the dirty data based delays. We 1311 * use TXG_NOTHROTTLE to bypass the delay mechanism. 1312 */ 1313 VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); 1314 1315 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1316 txg = dmu_tx_get_txg(tx); 1317 1318 lwb->lwb_tx = tx; 1319 1320 /* 1321 * Log blocks are pre-allocated. Here we select the size of the next 1322 * block, based on size used in the last block. 1323 * - first find the smallest bucket that will fit the block from a 1324 * limited set of block sizes. This is because it's faster to write 1325 * blocks allocated from the same metaslab as they are adjacent or 1326 * close. 1327 * - next find the maximum from the new suggested size and an array of 1328 * previous sizes. This lessens a picket fence effect of wrongly 1329 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 1330 * requests. 1331 * 1332 * Note we only write what is used, but we can't just allocate 1333 * the maximum block size because we can exhaust the available 1334 * pool log space. 1335 */ 1336 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 1337 for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++) 1338 continue; 1339 zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size); 1340 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 1341 for (i = 0; i < ZIL_PREV_BLKS; i++) 1342 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 1343 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1344 1345 BP_ZERO(bp); 1346 1347 /* pass the old blkptr in order to spread log blocks across devs */ 1348 error = zio_alloc_zil(spa, zilog->zl_os->os_dsl_dataset->ds_object, 1349 txg, bp, &lwb->lwb_blk, zil_blksz, &slog); 1350 if (error == 0) { 1351 ASSERT3U(bp->blk_birth, ==, txg); 1352 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1353 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1354 1355 /* 1356 * Allocate a new log write block (lwb). 1357 */ 1358 nlwb = zil_alloc_lwb(zilog, bp, slog, txg); 1359 } 1360 1361 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1362 /* For Slim ZIL only write what is used. */ 1363 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1364 ASSERT3U(wsz, <=, lwb->lwb_sz); 1365 zio_shrink(lwb->lwb_write_zio, wsz); 1366 1367 } else { 1368 wsz = lwb->lwb_sz; 1369 } 1370 1371 zilc->zc_pad = 0; 1372 zilc->zc_nused = lwb->lwb_nused; 1373 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1374 1375 /* 1376 * clear unused data for security 1377 */ 1378 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1379 1380 spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); 1381 1382 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1383 lwb->lwb_issued_timestamp = gethrtime(); 1384 lwb->lwb_state = LWB_STATE_ISSUED; 1385 1386 zio_nowait(lwb->lwb_root_zio); 1387 zio_nowait(lwb->lwb_write_zio); 1388 1389 /* 1390 * If there was an allocation failure then nlwb will be null which 1391 * forces a txg_wait_synced(). 1392 */ 1393 return (nlwb); 1394} 1395 1396/* 1397 * Maximum amount of write data that can be put into single log block. 1398 */ 1399uint64_t 1400zil_max_log_data(zilog_t *zilog) 1401{ 1402 return (zilog->zl_max_block_size - 1403 sizeof (zil_chain_t) - sizeof (lr_write_t)); 1404} 1405 1406/* 1407 * Maximum amount of log space we agree to waste to reduce number of 1408 * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%). 1409 */ 1410static inline uint64_t 1411zil_max_waste_space(zilog_t *zilog) 1412{ 1413 return (zil_max_log_data(zilog) / 8); 1414} 1415 1416/* 1417 * Maximum amount of write data for WR_COPIED. For correctness, consumers 1418 * must fall back to WR_NEED_COPY if we can't fit the entire record into one 1419 * maximum sized log block, because each WR_COPIED record must fit in a 1420 * single log block. For space efficiency, we want to fit two records into a 1421 * max-sized log block. 1422 */ 1423uint64_t 1424zil_max_copied_data(zilog_t *zilog) 1425{ 1426 return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 - 1427 sizeof (lr_write_t)); 1428} 1429 1430static lwb_t * 1431zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1432{ 1433 lr_t *lrcb, *lrc; 1434 lr_write_t *lrwb, *lrw; 1435 char *lr_buf; 1436 uint64_t dlen, dnow, lwb_sp, reclen, txg, max_log_data; 1437 1438 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1439 ASSERT3P(lwb, !=, NULL); 1440 ASSERT3P(lwb->lwb_buf, !=, NULL); 1441 1442 zil_lwb_write_open(zilog, lwb); 1443 1444 lrc = &itx->itx_lr; 1445 lrw = (lr_write_t *)lrc; 1446 1447 /* 1448 * A commit itx doesn't represent any on-disk state; instead 1449 * it's simply used as a place holder on the commit list, and 1450 * provides a mechanism for attaching a "commit waiter" onto the 1451 * correct lwb (such that the waiter can be signalled upon 1452 * completion of that lwb). Thus, we don't process this itx's 1453 * log record if it's a commit itx (these itx's don't have log 1454 * records), and instead link the itx's waiter onto the lwb's 1455 * list of waiters. 1456 * 1457 * For more details, see the comment above zil_commit(). 1458 */ 1459 if (lrc->lrc_txtype == TX_COMMIT) { 1460 mutex_enter(&zilog->zl_lock); 1461 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 1462 itx->itx_private = NULL; 1463 mutex_exit(&zilog->zl_lock); 1464 return (lwb); 1465 } 1466 1467 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 1468 dlen = P2ROUNDUP_TYPED( 1469 lrw->lr_length, sizeof (uint64_t), uint64_t); 1470 } else { 1471 dlen = 0; 1472 } 1473 reclen = lrc->lrc_reclen; 1474 zilog->zl_cur_used += (reclen + dlen); 1475 txg = lrc->lrc_txg; 1476 1477 ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); 1478 1479cont: 1480 /* 1481 * If this record won't fit in the current log block, start a new one. 1482 * For WR_NEED_COPY optimize layout for minimal number of chunks. 1483 */ 1484 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1485 max_log_data = zil_max_log_data(zilog); 1486 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 1487 lwb_sp < zil_max_waste_space(zilog) && 1488 (dlen % max_log_data == 0 || 1489 lwb_sp < reclen + dlen % max_log_data))) { 1490 lwb = zil_lwb_write_issue(zilog, lwb); 1491 if (lwb == NULL) 1492 return (NULL); 1493 zil_lwb_write_open(zilog, lwb); 1494 ASSERT(LWB_EMPTY(lwb)); 1495 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1496 1497 /* 1498 * There must be enough space in the new, empty log block to 1499 * hold reclen. For WR_COPIED, we need to fit the whole 1500 * record in one block, and reclen is the header size + the 1501 * data size. For WR_NEED_COPY, we can create multiple 1502 * records, splitting the data into multiple blocks, so we 1503 * only need to fit one word of data per block; in this case 1504 * reclen is just the header size (no data). 1505 */ 1506 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 1507 } 1508 1509 dnow = MIN(dlen, lwb_sp - reclen); 1510 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1511 bcopy(lrc, lr_buf, reclen); 1512 lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ 1513 lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ 1514 1515 /* 1516 * If it's a write, fetch the data or get its blkptr as appropriate. 1517 */ 1518 if (lrc->lrc_txtype == TX_WRITE) { 1519 if (txg > spa_freeze_txg(zilog->zl_spa)) 1520 txg_wait_synced(zilog->zl_dmu_pool, txg); 1521 if (itx->itx_wr_state != WR_COPIED) { 1522 char *dbuf; 1523 int error; 1524 1525 if (itx->itx_wr_state == WR_NEED_COPY) { 1526 dbuf = lr_buf + reclen; 1527 lrcb->lrc_reclen += dnow; 1528 if (lrwb->lr_length > dnow) 1529 lrwb->lr_length = dnow; 1530 lrw->lr_offset += dnow; 1531 lrw->lr_length -= dnow; 1532 } else { 1533 ASSERT(itx->itx_wr_state == WR_INDIRECT); 1534 dbuf = NULL; 1535 } 1536 1537 /* 1538 * We pass in the "lwb_write_zio" rather than 1539 * "lwb_root_zio" so that the "lwb_write_zio" 1540 * becomes the parent of any zio's created by 1541 * the "zl_get_data" callback. The vdevs are 1542 * flushed after the "lwb_write_zio" completes, 1543 * so we want to make sure that completion 1544 * callback waits for these additional zio's, 1545 * such that the vdevs used by those zio's will 1546 * be included in the lwb's vdev tree, and those 1547 * vdevs will be properly flushed. If we passed 1548 * in "lwb_root_zio" here, then these additional 1549 * vdevs may not be flushed; e.g. if these zio's 1550 * completed after "lwb_write_zio" completed. 1551 */ 1552 error = zilog->zl_get_data(itx->itx_private, 1553 lrwb, dbuf, lwb, lwb->lwb_write_zio); 1554 1555 if (error == EIO) { 1556 txg_wait_synced(zilog->zl_dmu_pool, txg); 1557 return (lwb); 1558 } 1559 if (error != 0) { 1560 ASSERT(error == ENOENT || error == EEXIST || 1561 error == EALREADY); 1562 return (lwb); 1563 } 1564 } 1565 } 1566 1567 /* 1568 * We're actually making an entry, so update lrc_seq to be the 1569 * log record sequence number. Note that this is generally not 1570 * equal to the itx sequence number because not all transactions 1571 * are synchronous, and sometimes spa_sync() gets there first. 1572 */ 1573 lrcb->lrc_seq = ++zilog->zl_lr_seq; 1574 lwb->lwb_nused += reclen + dnow; 1575 1576 zil_lwb_add_txg(lwb, txg); 1577 1578 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1579 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1580 1581 dlen -= dnow; 1582 if (dlen > 0) { 1583 zilog->zl_cur_used += reclen; 1584 goto cont; 1585 } 1586 1587 return (lwb); 1588} 1589 1590itx_t * 1591zil_itx_create(uint64_t txtype, size_t lrsize) 1592{ 1593 itx_t *itx; 1594 1595 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1596 1597 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1598 itx->itx_lr.lrc_txtype = txtype; 1599 itx->itx_lr.lrc_reclen = lrsize; 1600 itx->itx_lr.lrc_seq = 0; /* defensive */ 1601 itx->itx_sync = B_TRUE; /* default is synchronous */ 1602 1603 return (itx); 1604} 1605 1606void 1607zil_itx_destroy(itx_t *itx) 1608{ 1609 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1610} 1611 1612/* 1613 * Free up the sync and async itxs. The itxs_t has already been detached 1614 * so no locks are needed. 1615 */ 1616static void 1617zil_itxg_clean(itxs_t *itxs) 1618{ 1619 itx_t *itx; 1620 list_t *list; 1621 avl_tree_t *t; 1622 void *cookie; 1623 itx_async_node_t *ian; 1624 1625 list = &itxs->i_sync_list; 1626 while ((itx = list_head(list)) != NULL) { 1627 /* 1628 * In the general case, commit itxs will not be found 1629 * here, as they'll be committed to an lwb via 1630 * zil_lwb_commit(), and free'd in that function. Having 1631 * said that, it is still possible for commit itxs to be 1632 * found here, due to the following race: 1633 * 1634 * - a thread calls zil_commit() which assigns the 1635 * commit itx to a per-txg i_sync_list 1636 * - zil_itxg_clean() is called (e.g. via spa_sync()) 1637 * while the waiter is still on the i_sync_list 1638 * 1639 * There's nothing to prevent syncing the txg while the 1640 * waiter is on the i_sync_list. This normally doesn't 1641 * happen because spa_sync() is slower than zil_commit(), 1642 * but if zil_commit() calls txg_wait_synced() (e.g. 1643 * because zil_create() or zil_commit_writer_stall() is 1644 * called) we will hit this case. 1645 */ 1646 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 1647 zil_commit_waiter_skip(itx->itx_private); 1648 1649 list_remove(list, itx); 1650 zil_itx_destroy(itx); 1651 } 1652 1653 cookie = NULL; 1654 t = &itxs->i_async_tree; 1655 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1656 list = &ian->ia_list; 1657 while ((itx = list_head(list)) != NULL) { 1658 list_remove(list, itx); 1659 /* commit itxs should never be on the async lists. */ 1660 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1661 zil_itx_destroy(itx); 1662 } 1663 list_destroy(list); 1664 kmem_free(ian, sizeof (itx_async_node_t)); 1665 } 1666 avl_destroy(t); 1667 1668 kmem_free(itxs, sizeof (itxs_t)); 1669} 1670 1671static int 1672zil_aitx_compare(const void *x1, const void *x2) 1673{ 1674 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1675 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1676 1677 return (AVL_CMP(o1, o2)); 1678} 1679 1680/* 1681 * Remove all async itx with the given oid. 1682 */ 1683static void 1684zil_remove_async(zilog_t *zilog, uint64_t oid) 1685{ 1686 uint64_t otxg, txg; 1687 itx_async_node_t *ian; 1688 avl_tree_t *t; 1689 avl_index_t where; 1690 list_t clean_list; 1691 itx_t *itx; 1692 1693 ASSERT(oid != 0); 1694 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1695 1696 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1697 otxg = ZILTEST_TXG; 1698 else 1699 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1700 1701 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1702 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1703 1704 mutex_enter(&itxg->itxg_lock); 1705 if (itxg->itxg_txg != txg) { 1706 mutex_exit(&itxg->itxg_lock); 1707 continue; 1708 } 1709 1710 /* 1711 * Locate the object node and append its list. 1712 */ 1713 t = &itxg->itxg_itxs->i_async_tree; 1714 ian = avl_find(t, &oid, &where); 1715 if (ian != NULL) 1716 list_move_tail(&clean_list, &ian->ia_list); 1717 mutex_exit(&itxg->itxg_lock); 1718 } 1719 while ((itx = list_head(&clean_list)) != NULL) { 1720 list_remove(&clean_list, itx); 1721 /* commit itxs should never be on the async lists. */ 1722 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1723 zil_itx_destroy(itx); 1724 } 1725 list_destroy(&clean_list); 1726} 1727 1728void 1729zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1730{ 1731 uint64_t txg; 1732 itxg_t *itxg; 1733 itxs_t *itxs, *clean = NULL; 1734 1735 /* 1736 * Object ids can be re-instantiated in the next txg so 1737 * remove any async transactions to avoid future leaks. 1738 * This can happen if a fsync occurs on the re-instantiated 1739 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1740 * the new file data and flushes a write record for the old object. 1741 */ 1742 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1743 zil_remove_async(zilog, itx->itx_oid); 1744 1745 /* 1746 * Ensure the data of a renamed file is committed before the rename. 1747 */ 1748 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1749 zil_async_to_sync(zilog, itx->itx_oid); 1750 1751 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1752 txg = ZILTEST_TXG; 1753 else 1754 txg = dmu_tx_get_txg(tx); 1755 1756 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1757 mutex_enter(&itxg->itxg_lock); 1758 itxs = itxg->itxg_itxs; 1759 if (itxg->itxg_txg != txg) { 1760 if (itxs != NULL) { 1761 /* 1762 * The zil_clean callback hasn't got around to cleaning 1763 * this itxg. Save the itxs for release below. 1764 * This should be rare. 1765 */ 1766 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 1767 "txg %llu", itxg->itxg_txg); 1768 clean = itxg->itxg_itxs; 1769 } 1770 itxg->itxg_txg = txg; 1771 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1772 1773 list_create(&itxs->i_sync_list, sizeof (itx_t), 1774 offsetof(itx_t, itx_node)); 1775 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1776 sizeof (itx_async_node_t), 1777 offsetof(itx_async_node_t, ia_node)); 1778 } 1779 if (itx->itx_sync) { 1780 list_insert_tail(&itxs->i_sync_list, itx); 1781 } else { 1782 avl_tree_t *t = &itxs->i_async_tree; 1783 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1784 itx_async_node_t *ian; 1785 avl_index_t where; 1786 1787 ian = avl_find(t, &foid, &where); 1788 if (ian == NULL) { 1789 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1790 list_create(&ian->ia_list, sizeof (itx_t), 1791 offsetof(itx_t, itx_node)); 1792 ian->ia_foid = foid; 1793 avl_insert(t, ian, where); 1794 } 1795 list_insert_tail(&ian->ia_list, itx); 1796 } 1797 1798 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1799 1800 /* 1801 * We don't want to dirty the ZIL using ZILTEST_TXG, because 1802 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 1803 * need to be careful to always dirty the ZIL using the "real" 1804 * TXG (not itxg_txg) even when the SPA is frozen. 1805 */ 1806 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 1807 mutex_exit(&itxg->itxg_lock); 1808 1809 /* Release the old itxs now we've dropped the lock */ 1810 if (clean != NULL) 1811 zil_itxg_clean(clean); 1812} 1813 1814/* 1815 * If there are any in-memory intent log transactions which have now been 1816 * synced then start up a taskq to free them. We should only do this after we 1817 * have written out the uberblocks (i.e. txg has been comitted) so that 1818 * don't inadvertently clean out in-memory log records that would be required 1819 * by zil_commit(). 1820 */ 1821void 1822zil_clean(zilog_t *zilog, uint64_t synced_txg) 1823{ 1824 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1825 itxs_t *clean_me; 1826 1827 ASSERT3U(synced_txg, <, ZILTEST_TXG); 1828 1829 mutex_enter(&itxg->itxg_lock); 1830 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1831 mutex_exit(&itxg->itxg_lock); 1832 return; 1833 } 1834 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1835 ASSERT3U(itxg->itxg_txg, !=, 0); 1836 clean_me = itxg->itxg_itxs; 1837 itxg->itxg_itxs = NULL; 1838 itxg->itxg_txg = 0; 1839 mutex_exit(&itxg->itxg_lock); 1840 /* 1841 * Preferably start a task queue to free up the old itxs but 1842 * if taskq_dispatch can't allocate resources to do that then 1843 * free it in-line. This should be rare. Note, using TQ_SLEEP 1844 * created a bad performance problem. 1845 */ 1846 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 1847 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 1848 if (taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 1849 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0) 1850 zil_itxg_clean(clean_me); 1851} 1852 1853/* 1854 * This function will traverse the queue of itxs that need to be 1855 * committed, and move them onto the ZIL's zl_itx_commit_list. 1856 */ 1857static void 1858zil_get_commit_list(zilog_t *zilog) 1859{ 1860 uint64_t otxg, txg; 1861 list_t *commit_list = &zilog->zl_itx_commit_list; 1862 1863 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1864 1865 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1866 otxg = ZILTEST_TXG; 1867 else 1868 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1869 1870 /* 1871 * This is inherently racy, since there is nothing to prevent 1872 * the last synced txg from changing. That's okay since we'll 1873 * only commit things in the future. 1874 */ 1875 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1876 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1877 1878 mutex_enter(&itxg->itxg_lock); 1879 if (itxg->itxg_txg != txg) { 1880 mutex_exit(&itxg->itxg_lock); 1881 continue; 1882 } 1883 1884 /* 1885 * If we're adding itx records to the zl_itx_commit_list, 1886 * then the zil better be dirty in this "txg". We can assert 1887 * that here since we're holding the itxg_lock which will 1888 * prevent spa_sync from cleaning it. Once we add the itxs 1889 * to the zl_itx_commit_list we must commit it to disk even 1890 * if it's unnecessary (i.e. the txg was synced). 1891 */ 1892 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 1893 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1894 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1895 1896 mutex_exit(&itxg->itxg_lock); 1897 } 1898} 1899 1900/* 1901 * Move the async itxs for a specified object to commit into sync lists. 1902 */ 1903void 1904zil_async_to_sync(zilog_t *zilog, uint64_t foid) 1905{ 1906 uint64_t otxg, txg; 1907 itx_async_node_t *ian; 1908 avl_tree_t *t; 1909 avl_index_t where; 1910 1911 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1912 otxg = ZILTEST_TXG; 1913 else 1914 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1915 1916 /* 1917 * This is inherently racy, since there is nothing to prevent 1918 * the last synced txg from changing. 1919 */ 1920 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1921 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1922 1923 mutex_enter(&itxg->itxg_lock); 1924 if (itxg->itxg_txg != txg) { 1925 mutex_exit(&itxg->itxg_lock); 1926 continue; 1927 } 1928 1929 /* 1930 * If a foid is specified then find that node and append its 1931 * list. Otherwise walk the tree appending all the lists 1932 * to the sync list. We add to the end rather than the 1933 * beginning to ensure the create has happened. 1934 */ 1935 t = &itxg->itxg_itxs->i_async_tree; 1936 if (foid != 0) { 1937 ian = avl_find(t, &foid, &where); 1938 if (ian != NULL) { 1939 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1940 &ian->ia_list); 1941 } 1942 } else { 1943 void *cookie = NULL; 1944 1945 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1946 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1947 &ian->ia_list); 1948 list_destroy(&ian->ia_list); 1949 kmem_free(ian, sizeof (itx_async_node_t)); 1950 } 1951 } 1952 mutex_exit(&itxg->itxg_lock); 1953 } 1954} 1955 1956/* 1957 * This function will prune commit itxs that are at the head of the 1958 * commit list (it won't prune past the first non-commit itx), and 1959 * either: a) attach them to the last lwb that's still pending 1960 * completion, or b) skip them altogether. 1961 * 1962 * This is used as a performance optimization to prevent commit itxs 1963 * from generating new lwbs when it's unnecessary to do so. 1964 */ 1965static void 1966zil_prune_commit_list(zilog_t *zilog) 1967{ 1968 itx_t *itx; 1969 1970 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1971 1972 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1973 lr_t *lrc = &itx->itx_lr; 1974 if (lrc->lrc_txtype != TX_COMMIT) 1975 break; 1976 1977 mutex_enter(&zilog->zl_lock); 1978 1979 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 1980 if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) { 1981 /* 1982 * All of the itxs this waiter was waiting on 1983 * must have already completed (or there were 1984 * never any itx's for it to wait on), so it's 1985 * safe to skip this waiter and mark it done. 1986 */ 1987 zil_commit_waiter_skip(itx->itx_private); 1988 } else { 1989 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 1990 itx->itx_private = NULL; 1991 } 1992 1993 mutex_exit(&zilog->zl_lock); 1994 1995 list_remove(&zilog->zl_itx_commit_list, itx); 1996 zil_itx_destroy(itx); 1997 } 1998 1999 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2000} 2001 2002static void 2003zil_commit_writer_stall(zilog_t *zilog) 2004{ 2005 /* 2006 * When zio_alloc_zil() fails to allocate the next lwb block on 2007 * disk, we must call txg_wait_synced() to ensure all of the 2008 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 2009 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 2010 * to zil_process_commit_list()) will have to call zil_create(), 2011 * and start a new ZIL chain. 2012 * 2013 * Since zil_alloc_zil() failed, the lwb that was previously 2014 * issued does not have a pointer to the "next" lwb on disk. 2015 * Thus, if another ZIL writer thread was to allocate the "next" 2016 * on-disk lwb, that block could be leaked in the event of a 2017 * crash (because the previous lwb on-disk would not point to 2018 * it). 2019 * 2020 * We must hold the zilog's zl_issuer_lock while we do this, to 2021 * ensure no new threads enter zil_process_commit_list() until 2022 * all lwb's in the zl_lwb_list have been synced and freed 2023 * (which is achieved via the txg_wait_synced() call). 2024 */ 2025 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2026 txg_wait_synced(zilog->zl_dmu_pool, 0); 2027 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 2028} 2029 2030/* 2031 * This function will traverse the commit list, creating new lwbs as 2032 * needed, and committing the itxs from the commit list to these newly 2033 * created lwbs. Additionally, as a new lwb is created, the previous 2034 * lwb will be issued to the zio layer to be written to disk. 2035 */ 2036static void 2037zil_process_commit_list(zilog_t *zilog) 2038{ 2039 spa_t *spa = zilog->zl_spa; 2040 list_t nolwb_waiters; 2041 lwb_t *lwb; 2042 itx_t *itx; 2043 2044 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2045 2046 /* 2047 * Return if there's nothing to commit before we dirty the fs by 2048 * calling zil_create(). 2049 */ 2050 if (list_head(&zilog->zl_itx_commit_list) == NULL) 2051 return; 2052 2053 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 2054 offsetof(zil_commit_waiter_t, zcw_node)); 2055 2056 lwb = list_tail(&zilog->zl_lwb_list); 2057 if (lwb == NULL) { 2058 lwb = zil_create(zilog); 2059 } else { 2060 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2061 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); 2062 } 2063 2064 while (itx = list_head(&zilog->zl_itx_commit_list)) { 2065 lr_t *lrc = &itx->itx_lr; 2066 uint64_t txg = lrc->lrc_txg; 2067 2068 ASSERT3U(txg, !=, 0); 2069 2070 if (lrc->lrc_txtype == TX_COMMIT) { 2071 DTRACE_PROBE2(zil__process__commit__itx, 2072 zilog_t *, zilog, itx_t *, itx); 2073 } else { 2074 DTRACE_PROBE2(zil__process__normal__itx, 2075 zilog_t *, zilog, itx_t *, itx); 2076 } 2077 2078 boolean_t synced = txg <= spa_last_synced_txg(spa); 2079 boolean_t frozen = txg > spa_freeze_txg(spa); 2080 2081 /* 2082 * If the txg of this itx has already been synced out, then 2083 * we don't need to commit this itx to an lwb. This is 2084 * because the data of this itx will have already been 2085 * written to the main pool. This is inherently racy, and 2086 * it's still ok to commit an itx whose txg has already 2087 * been synced; this will result in a write that's 2088 * unnecessary, but will do no harm. 2089 * 2090 * With that said, we always want to commit TX_COMMIT itxs 2091 * to an lwb, regardless of whether or not that itx's txg 2092 * has been synced out. We do this to ensure any OPENED lwb 2093 * will always have at least one zil_commit_waiter_t linked 2094 * to the lwb. 2095 * 2096 * As a counter-example, if we skipped TX_COMMIT itx's 2097 * whose txg had already been synced, the following 2098 * situation could occur if we happened to be racing with 2099 * spa_sync: 2100 * 2101 * 1. we commit a non-TX_COMMIT itx to an lwb, where the 2102 * itx's txg is 10 and the last synced txg is 9. 2103 * 2. spa_sync finishes syncing out txg 10. 2104 * 3. we move to the next itx in the list, it's a TX_COMMIT 2105 * whose txg is 10, so we skip it rather than committing 2106 * it to the lwb used in (1). 2107 * 2108 * If the itx that is skipped in (3) is the last TX_COMMIT 2109 * itx in the commit list, than it's possible for the lwb 2110 * used in (1) to remain in the OPENED state indefinitely. 2111 * 2112 * To prevent the above scenario from occuring, ensuring 2113 * that once an lwb is OPENED it will transition to ISSUED 2114 * and eventually DONE, we always commit TX_COMMIT itx's to 2115 * an lwb here, even if that itx's txg has already been 2116 * synced. 2117 * 2118 * Finally, if the pool is frozen, we _always_ commit the 2119 * itx. The point of freezing the pool is to prevent data 2120 * from being written to the main pool via spa_sync, and 2121 * instead rely solely on the ZIL to persistently store the 2122 * data; i.e. when the pool is frozen, the last synced txg 2123 * value can't be trusted. 2124 */ 2125 if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { 2126 if (lwb != NULL) { 2127 lwb = zil_lwb_commit(zilog, itx, lwb); 2128 } else if (lrc->lrc_txtype == TX_COMMIT) { 2129 ASSERT3P(lwb, ==, NULL); 2130 zil_commit_waiter_link_nolwb( 2131 itx->itx_private, &nolwb_waiters); 2132 } 2133 } 2134 2135 list_remove(&zilog->zl_itx_commit_list, itx); 2136 zil_itx_destroy(itx); 2137 } 2138 2139 if (lwb == NULL) { 2140 /* 2141 * This indicates zio_alloc_zil() failed to allocate the 2142 * "next" lwb on-disk. When this happens, we must stall 2143 * the ZIL write pipeline; see the comment within 2144 * zil_commit_writer_stall() for more details. 2145 */ 2146 zil_commit_writer_stall(zilog); 2147 2148 /* 2149 * Additionally, we have to signal and mark the "nolwb" 2150 * waiters as "done" here, since without an lwb, we 2151 * can't do this via zil_lwb_flush_vdevs_done() like 2152 * normal. 2153 */ 2154 zil_commit_waiter_t *zcw; 2155 while (zcw = list_head(&nolwb_waiters)) { 2156 zil_commit_waiter_skip(zcw); 2157 list_remove(&nolwb_waiters, zcw); 2158 } 2159 } else { 2160 ASSERT(list_is_empty(&nolwb_waiters)); 2161 ASSERT3P(lwb, !=, NULL); 2162 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2163 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); 2164 2165 /* 2166 * At this point, the ZIL block pointed at by the "lwb" 2167 * variable is in one of the following states: "closed" 2168 * or "open". 2169 * 2170 * If its "closed", then no itxs have been committed to 2171 * it, so there's no point in issuing its zio (i.e. 2172 * it's "empty"). 2173 * 2174 * If its "open" state, then it contains one or more 2175 * itxs that eventually need to be committed to stable 2176 * storage. In this case we intentionally do not issue 2177 * the lwb's zio to disk yet, and instead rely on one of 2178 * the following two mechanisms for issuing the zio: 2179 * 2180 * 1. Ideally, there will be more ZIL activity occuring 2181 * on the system, such that this function will be 2182 * immediately called again (not necessarily by the same 2183 * thread) and this lwb's zio will be issued via 2184 * zil_lwb_commit(). This way, the lwb is guaranteed to 2185 * be "full" when it is issued to disk, and we'll make 2186 * use of the lwb's size the best we can. 2187 * 2188 * 2. If there isn't sufficient ZIL activity occuring on 2189 * the system, such that this lwb's zio isn't issued via 2190 * zil_lwb_commit(), zil_commit_waiter() will issue the 2191 * lwb's zio. If this occurs, the lwb is not guaranteed 2192 * to be "full" by the time its zio is issued, and means 2193 * the size of the lwb was "too large" given the amount 2194 * of ZIL activity occuring on the system at that time. 2195 * 2196 * We do this for a couple of reasons: 2197 * 2198 * 1. To try and reduce the number of IOPs needed to 2199 * write the same number of itxs. If an lwb has space 2200 * available in it's buffer for more itxs, and more itxs 2201 * will be committed relatively soon (relative to the 2202 * latency of performing a write), then it's beneficial 2203 * to wait for these "next" itxs. This way, more itxs 2204 * can be committed to stable storage with fewer writes. 2205 * 2206 * 2. To try and use the largest lwb block size that the 2207 * incoming rate of itxs can support. Again, this is to 2208 * try and pack as many itxs into as few lwbs as 2209 * possible, without significantly impacting the latency 2210 * of each individual itx. 2211 */ 2212 } 2213} 2214 2215/* 2216 * This function is responsible for ensuring the passed in commit waiter 2217 * (and associated commit itx) is committed to an lwb. If the waiter is 2218 * not already committed to an lwb, all itxs in the zilog's queue of 2219 * itxs will be processed. The assumption is the passed in waiter's 2220 * commit itx will found in the queue just like the other non-commit 2221 * itxs, such that when the entire queue is processed, the waiter will 2222 * have been commited to an lwb. 2223 * 2224 * The lwb associated with the passed in waiter is not guaranteed to 2225 * have been issued by the time this function completes. If the lwb is 2226 * not issued, we rely on future calls to zil_commit_writer() to issue 2227 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 2228 */ 2229static void 2230zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 2231{ 2232 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2233 ASSERT(spa_writeable(zilog->zl_spa)); 2234 2235 mutex_enter(&zilog->zl_issuer_lock); 2236 2237 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 2238 /* 2239 * It's possible that, while we were waiting to acquire 2240 * the "zl_issuer_lock", another thread committed this 2241 * waiter to an lwb. If that occurs, we bail out early, 2242 * without processing any of the zilog's queue of itxs. 2243 * 2244 * On certain workloads and system configurations, the 2245 * "zl_issuer_lock" can become highly contended. In an 2246 * attempt to reduce this contention, we immediately drop 2247 * the lock if the waiter has already been processed. 2248 * 2249 * We've measured this optimization to reduce CPU spent 2250 * contending on this lock by up to 5%, using a system 2251 * with 32 CPUs, low latency storage (~50 usec writes), 2252 * and 1024 threads performing sync writes. 2253 */ 2254 goto out; 2255 } 2256 2257 zil_get_commit_list(zilog); 2258 zil_prune_commit_list(zilog); 2259 zil_process_commit_list(zilog); 2260 2261out: 2262 mutex_exit(&zilog->zl_issuer_lock); 2263} 2264 2265static void 2266zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 2267{ 2268 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2269 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2270 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 2271 2272 lwb_t *lwb = zcw->zcw_lwb; 2273 ASSERT3P(lwb, !=, NULL); 2274 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); 2275 2276 /* 2277 * If the lwb has already been issued by another thread, we can 2278 * immediately return since there's no work to be done (the 2279 * point of this function is to issue the lwb). Additionally, we 2280 * do this prior to acquiring the zl_issuer_lock, to avoid 2281 * acquiring it when it's not necessary to do so. 2282 */ 2283 if (lwb->lwb_state == LWB_STATE_ISSUED || 2284 lwb->lwb_state == LWB_STATE_DONE) 2285 return; 2286 2287 /* 2288 * In order to call zil_lwb_write_issue() we must hold the 2289 * zilog's "zl_issuer_lock". We can't simply acquire that lock, 2290 * since we're already holding the commit waiter's "zcw_lock", 2291 * and those two locks are aquired in the opposite order 2292 * elsewhere. 2293 */ 2294 mutex_exit(&zcw->zcw_lock); 2295 mutex_enter(&zilog->zl_issuer_lock); 2296 mutex_enter(&zcw->zcw_lock); 2297 2298 /* 2299 * Since we just dropped and re-acquired the commit waiter's 2300 * lock, we have to re-check to see if the waiter was marked 2301 * "done" during that process. If the waiter was marked "done", 2302 * the "lwb" pointer is no longer valid (it can be free'd after 2303 * the waiter is marked "done"), so without this check we could 2304 * wind up with a use-after-free error below. 2305 */ 2306 if (zcw->zcw_done) 2307 goto out; 2308 2309 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2310 2311 /* 2312 * We've already checked this above, but since we hadn't acquired 2313 * the zilog's zl_issuer_lock, we have to perform this check a 2314 * second time while holding the lock. 2315 * 2316 * We don't need to hold the zl_lock since the lwb cannot transition 2317 * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb 2318 * _can_ transition from ISSUED to DONE, but it's OK to race with 2319 * that transition since we treat the lwb the same, whether it's in 2320 * the ISSUED or DONE states. 2321 * 2322 * The important thing, is we treat the lwb differently depending on 2323 * if it's ISSUED or OPENED, and block any other threads that might 2324 * attempt to issue this lwb. For that reason we hold the 2325 * zl_issuer_lock when checking the lwb_state; we must not call 2326 * zil_lwb_write_issue() if the lwb had already been issued. 2327 * 2328 * See the comment above the lwb_state_t structure definition for 2329 * more details on the lwb states, and locking requirements. 2330 */ 2331 if (lwb->lwb_state == LWB_STATE_ISSUED || 2332 lwb->lwb_state == LWB_STATE_DONE) 2333 goto out; 2334 2335 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 2336 2337 /* 2338 * As described in the comments above zil_commit_waiter() and 2339 * zil_process_commit_list(), we need to issue this lwb's zio 2340 * since we've reached the commit waiter's timeout and it still 2341 * hasn't been issued. 2342 */ 2343 lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); 2344 2345 IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED); 2346 2347 /* 2348 * Since the lwb's zio hadn't been issued by the time this thread 2349 * reached its timeout, we reset the zilog's "zl_cur_used" field 2350 * to influence the zil block size selection algorithm. 2351 * 2352 * By having to issue the lwb's zio here, it means the size of the 2353 * lwb was too large, given the incoming throughput of itxs. By 2354 * setting "zl_cur_used" to zero, we communicate this fact to the 2355 * block size selection algorithm, so it can take this informaiton 2356 * into account, and potentially select a smaller size for the 2357 * next lwb block that is allocated. 2358 */ 2359 zilog->zl_cur_used = 0; 2360 2361 if (nlwb == NULL) { 2362 /* 2363 * When zil_lwb_write_issue() returns NULL, this 2364 * indicates zio_alloc_zil() failed to allocate the 2365 * "next" lwb on-disk. When this occurs, the ZIL write 2366 * pipeline must be stalled; see the comment within the 2367 * zil_commit_writer_stall() function for more details. 2368 * 2369 * We must drop the commit waiter's lock prior to 2370 * calling zil_commit_writer_stall() or else we can wind 2371 * up with the following deadlock: 2372 * 2373 * - This thread is waiting for the txg to sync while 2374 * holding the waiter's lock; txg_wait_synced() is 2375 * used within txg_commit_writer_stall(). 2376 * 2377 * - The txg can't sync because it is waiting for this 2378 * lwb's zio callback to call dmu_tx_commit(). 2379 * 2380 * - The lwb's zio callback can't call dmu_tx_commit() 2381 * because it's blocked trying to acquire the waiter's 2382 * lock, which occurs prior to calling dmu_tx_commit() 2383 */ 2384 mutex_exit(&zcw->zcw_lock); 2385 zil_commit_writer_stall(zilog); 2386 mutex_enter(&zcw->zcw_lock); 2387 } 2388 2389out: 2390 mutex_exit(&zilog->zl_issuer_lock); 2391 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2392} 2393 2394/* 2395 * This function is responsible for performing the following two tasks: 2396 * 2397 * 1. its primary responsibility is to block until the given "commit 2398 * waiter" is considered "done". 2399 * 2400 * 2. its secondary responsibility is to issue the zio for the lwb that 2401 * the given "commit waiter" is waiting on, if this function has 2402 * waited "long enough" and the lwb is still in the "open" state. 2403 * 2404 * Given a sufficient amount of itxs being generated and written using 2405 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() 2406 * function. If this does not occur, this secondary responsibility will 2407 * ensure the lwb is issued even if there is not other synchronous 2408 * activity on the system. 2409 * 2410 * For more details, see zil_process_commit_list(); more specifically, 2411 * the comment at the bottom of that function. 2412 */ 2413static void 2414zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 2415{ 2416 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2417 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2418 ASSERT(spa_writeable(zilog->zl_spa)); 2419 2420 mutex_enter(&zcw->zcw_lock); 2421 2422 /* 2423 * The timeout is scaled based on the lwb latency to avoid 2424 * significantly impacting the latency of each individual itx. 2425 * For more details, see the comment at the bottom of the 2426 * zil_process_commit_list() function. 2427 */ 2428 int pct = MAX(zfs_commit_timeout_pct, 1); 2429#if defined(illumos) || !defined(_KERNEL) 2430 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 2431 hrtime_t wakeup = gethrtime() + sleep; 2432#else 2433 sbintime_t sleep = nstosbt((zilog->zl_last_lwb_latency * pct) / 100); 2434 sbintime_t wakeup = getsbinuptime() + sleep; 2435#endif 2436 boolean_t timedout = B_FALSE; 2437 2438 while (!zcw->zcw_done) { 2439 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2440 2441 lwb_t *lwb = zcw->zcw_lwb; 2442 2443 /* 2444 * Usually, the waiter will have a non-NULL lwb field here, 2445 * but it's possible for it to be NULL as a result of 2446 * zil_commit() racing with spa_sync(). 2447 * 2448 * When zil_clean() is called, it's possible for the itxg 2449 * list (which may be cleaned via a taskq) to contain 2450 * commit itxs. When this occurs, the commit waiters linked 2451 * off of these commit itxs will not be committed to an 2452 * lwb. Additionally, these commit waiters will not be 2453 * marked done until zil_commit_waiter_skip() is called via 2454 * zil_itxg_clean(). 2455 * 2456 * Thus, it's possible for this commit waiter (i.e. the 2457 * "zcw" variable) to be found in this "in between" state; 2458 * where it's "zcw_lwb" field is NULL, and it hasn't yet 2459 * been skipped, so it's "zcw_done" field is still B_FALSE. 2460 */ 2461 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); 2462 2463 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 2464 ASSERT3B(timedout, ==, B_FALSE); 2465 2466 /* 2467 * If the lwb hasn't been issued yet, then we 2468 * need to wait with a timeout, in case this 2469 * function needs to issue the lwb after the 2470 * timeout is reached; responsibility (2) from 2471 * the comment above this function. 2472 */ 2473#if defined(illumos) || !defined(_KERNEL) 2474 clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv, 2475 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 2476 CALLOUT_FLAG_ABSOLUTE); 2477 2478 if (timeleft >= 0 || zcw->zcw_done) 2479 continue; 2480#else 2481 int wait_err = cv_timedwait_sbt(&zcw->zcw_cv, 2482 &zcw->zcw_lock, wakeup, SBT_1NS, C_ABSOLUTE); 2483 if (wait_err != EWOULDBLOCK || zcw->zcw_done) 2484 continue; 2485#endif 2486 2487 timedout = B_TRUE; 2488 zil_commit_waiter_timeout(zilog, zcw); 2489 2490 if (!zcw->zcw_done) { 2491 /* 2492 * If the commit waiter has already been 2493 * marked "done", it's possible for the 2494 * waiter's lwb structure to have already 2495 * been freed. Thus, we can only reliably 2496 * make these assertions if the waiter 2497 * isn't done. 2498 */ 2499 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2500 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 2501 } 2502 } else { 2503 /* 2504 * If the lwb isn't open, then it must have already 2505 * been issued. In that case, there's no need to 2506 * use a timeout when waiting for the lwb to 2507 * complete. 2508 * 2509 * Additionally, if the lwb is NULL, the waiter 2510 * will soon be signalled and marked done via 2511 * zil_clean() and zil_itxg_clean(), so no timeout 2512 * is required. 2513 */ 2514 2515 IMPLY(lwb != NULL, 2516 lwb->lwb_state == LWB_STATE_ISSUED || 2517 lwb->lwb_state == LWB_STATE_DONE); 2518 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 2519 } 2520 } 2521 2522 mutex_exit(&zcw->zcw_lock); 2523} 2524 2525static zil_commit_waiter_t * 2526zil_alloc_commit_waiter() 2527{ 2528 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 2529 2530 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 2531 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 2532 list_link_init(&zcw->zcw_node); 2533 zcw->zcw_lwb = NULL; 2534 zcw->zcw_done = B_FALSE; 2535 zcw->zcw_zio_error = 0; 2536 2537 return (zcw); 2538} 2539 2540static void 2541zil_free_commit_waiter(zil_commit_waiter_t *zcw) 2542{ 2543 ASSERT(!list_link_active(&zcw->zcw_node)); 2544 ASSERT3P(zcw->zcw_lwb, ==, NULL); 2545 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 2546 mutex_destroy(&zcw->zcw_lock); 2547 cv_destroy(&zcw->zcw_cv); 2548 kmem_cache_free(zil_zcw_cache, zcw); 2549} 2550 2551/* 2552 * This function is used to create a TX_COMMIT itx and assign it. This 2553 * way, it will be linked into the ZIL's list of synchronous itxs, and 2554 * then later committed to an lwb (or skipped) when 2555 * zil_process_commit_list() is called. 2556 */ 2557static void 2558zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 2559{ 2560 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 2561 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 2562 2563 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 2564 itx->itx_sync = B_TRUE; 2565 itx->itx_private = zcw; 2566 2567 zil_itx_assign(zilog, itx, tx); 2568 2569 dmu_tx_commit(tx); 2570} 2571 2572/* 2573 * Commit ZFS Intent Log transactions (itxs) to stable storage. 2574 * 2575 * When writing ZIL transactions to the on-disk representation of the 2576 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 2577 * itxs can be committed to a single lwb. Once a lwb is written and 2578 * committed to stable storage (i.e. the lwb is written, and vdevs have 2579 * been flushed), each itx that was committed to that lwb is also 2580 * considered to be committed to stable storage. 2581 * 2582 * When an itx is committed to an lwb, the log record (lr_t) contained 2583 * by the itx is copied into the lwb's zio buffer, and once this buffer 2584 * is written to disk, it becomes an on-disk ZIL block. 2585 * 2586 * As itxs are generated, they're inserted into the ZIL's queue of 2587 * uncommitted itxs. The semantics of zil_commit() are such that it will 2588 * block until all itxs that were in the queue when it was called, are 2589 * committed to stable storage. 2590 * 2591 * If "foid" is zero, this means all "synchronous" and "asynchronous" 2592 * itxs, for all objects in the dataset, will be committed to stable 2593 * storage prior to zil_commit() returning. If "foid" is non-zero, all 2594 * "synchronous" itxs for all objects, but only "asynchronous" itxs 2595 * that correspond to the foid passed in, will be committed to stable 2596 * storage prior to zil_commit() returning. 2597 * 2598 * Generally speaking, when zil_commit() is called, the consumer doesn't 2599 * actually care about _all_ of the uncommitted itxs. Instead, they're 2600 * simply trying to waiting for a specific itx to be committed to disk, 2601 * but the interface(s) for interacting with the ZIL don't allow such 2602 * fine-grained communication. A better interface would allow a consumer 2603 * to create and assign an itx, and then pass a reference to this itx to 2604 * zil_commit(); such that zil_commit() would return as soon as that 2605 * specific itx was committed to disk (instead of waiting for _all_ 2606 * itxs to be committed). 2607 * 2608 * When a thread calls zil_commit() a special "commit itx" will be 2609 * generated, along with a corresponding "waiter" for this commit itx. 2610 * zil_commit() will wait on this waiter's CV, such that when the waiter 2611 * is marked done, and signalled, zil_commit() will return. 2612 * 2613 * This commit itx is inserted into the queue of uncommitted itxs. This 2614 * provides an easy mechanism for determining which itxs were in the 2615 * queue prior to zil_commit() having been called, and which itxs were 2616 * added after zil_commit() was called. 2617 * 2618 * The commit it is special; it doesn't have any on-disk representation. 2619 * When a commit itx is "committed" to an lwb, the waiter associated 2620 * with it is linked onto the lwb's list of waiters. Then, when that lwb 2621 * completes, each waiter on the lwb's list is marked done and signalled 2622 * -- allowing the thread waiting on the waiter to return from zil_commit(). 2623 * 2624 * It's important to point out a few critical factors that allow us 2625 * to make use of the commit itxs, commit waiters, per-lwb lists of 2626 * commit waiters, and zio completion callbacks like we're doing: 2627 * 2628 * 1. The list of waiters for each lwb is traversed, and each commit 2629 * waiter is marked "done" and signalled, in the zio completion 2630 * callback of the lwb's zio[*]. 2631 * 2632 * * Actually, the waiters are signalled in the zio completion 2633 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands 2634 * that are sent to the vdevs upon completion of the lwb zio. 2635 * 2636 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 2637 * itxs, the order in which they are inserted is preserved[*]; as 2638 * itxs are added to the queue, they are added to the tail of 2639 * in-memory linked lists. 2640 * 2641 * When committing the itxs to lwbs (to be written to disk), they 2642 * are committed in the same order in which the itxs were added to 2643 * the uncommitted queue's linked list(s); i.e. the linked list of 2644 * itxs to commit is traversed from head to tail, and each itx is 2645 * committed to an lwb in that order. 2646 * 2647 * * To clarify: 2648 * 2649 * - the order of "sync" itxs is preserved w.r.t. other 2650 * "sync" itxs, regardless of the corresponding objects. 2651 * - the order of "async" itxs is preserved w.r.t. other 2652 * "async" itxs corresponding to the same object. 2653 * - the order of "async" itxs is *not* preserved w.r.t. other 2654 * "async" itxs corresponding to different objects. 2655 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 2656 * versa) is *not* preserved, even for itxs that correspond 2657 * to the same object. 2658 * 2659 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 2660 * zil_get_commit_list(), and zil_process_commit_list(). 2661 * 2662 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 2663 * lwb cannot be considered committed to stable storage, until its 2664 * "previous" lwb is also committed to stable storage. This fact, 2665 * coupled with the fact described above, means that itxs are 2666 * committed in (roughly) the order in which they were generated. 2667 * This is essential because itxs are dependent on prior itxs. 2668 * Thus, we *must not* deem an itx as being committed to stable 2669 * storage, until *all* prior itxs have also been committed to 2670 * stable storage. 2671 * 2672 * To enforce this ordering of lwb zio's, while still leveraging as 2673 * much of the underlying storage performance as possible, we rely 2674 * on two fundamental concepts: 2675 * 2676 * 1. The creation and issuance of lwb zio's is protected by 2677 * the zilog's "zl_issuer_lock", which ensures only a single 2678 * thread is creating and/or issuing lwb's at a time 2679 * 2. The "previous" lwb is a child of the "current" lwb 2680 * (leveraging the zio parent-child depenency graph) 2681 * 2682 * By relying on this parent-child zio relationship, we can have 2683 * many lwb zio's concurrently issued to the underlying storage, 2684 * but the order in which they complete will be the same order in 2685 * which they were created. 2686 */ 2687void 2688zil_commit(zilog_t *zilog, uint64_t foid) 2689{ 2690 /* 2691 * We should never attempt to call zil_commit on a snapshot for 2692 * a couple of reasons: 2693 * 2694 * 1. A snapshot may never be modified, thus it cannot have any 2695 * in-flight itxs that would have modified the dataset. 2696 * 2697 * 2. By design, when zil_commit() is called, a commit itx will 2698 * be assigned to this zilog; as a result, the zilog will be 2699 * dirtied. We must not dirty the zilog of a snapshot; there's 2700 * checks in the code that enforce this invariant, and will 2701 * cause a panic if it's not upheld. 2702 */ 2703 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 2704 2705 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2706 return; 2707 2708 if (!spa_writeable(zilog->zl_spa)) { 2709 /* 2710 * If the SPA is not writable, there should never be any 2711 * pending itxs waiting to be committed to disk. If that 2712 * weren't true, we'd skip writing those itxs out, and 2713 * would break the sematics of zil_commit(); thus, we're 2714 * verifying that truth before we return to the caller. 2715 */ 2716 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2717 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2718 for (int i = 0; i < TXG_SIZE; i++) 2719 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 2720 return; 2721 } 2722 2723 /* 2724 * If the ZIL is suspended, we don't want to dirty it by calling 2725 * zil_commit_itx_assign() below, nor can we write out 2726 * lwbs like would be done in zil_commit_write(). Thus, we 2727 * simply rely on txg_wait_synced() to maintain the necessary 2728 * semantics, and avoid calling those functions altogether. 2729 */ 2730 if (zilog->zl_suspend > 0) { 2731 txg_wait_synced(zilog->zl_dmu_pool, 0); 2732 return; 2733 } 2734 2735 zil_commit_impl(zilog, foid); 2736} 2737 2738void 2739zil_commit_impl(zilog_t *zilog, uint64_t foid) 2740{ 2741 /* 2742 * Move the "async" itxs for the specified foid to the "sync" 2743 * queues, such that they will be later committed (or skipped) 2744 * to an lwb when zil_process_commit_list() is called. 2745 * 2746 * Since these "async" itxs must be committed prior to this 2747 * call to zil_commit returning, we must perform this operation 2748 * before we call zil_commit_itx_assign(). 2749 */ 2750 zil_async_to_sync(zilog, foid); 2751 2752 /* 2753 * We allocate a new "waiter" structure which will initially be 2754 * linked to the commit itx using the itx's "itx_private" field. 2755 * Since the commit itx doesn't represent any on-disk state, 2756 * when it's committed to an lwb, rather than copying the its 2757 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 2758 * added to the lwb's list of waiters. Then, when the lwb is 2759 * committed to stable storage, each waiter in the lwb's list of 2760 * waiters will be marked "done", and signalled. 2761 * 2762 * We must create the waiter and assign the commit itx prior to 2763 * calling zil_commit_writer(), or else our specific commit itx 2764 * is not guaranteed to be committed to an lwb prior to calling 2765 * zil_commit_waiter(). 2766 */ 2767 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 2768 zil_commit_itx_assign(zilog, zcw); 2769 2770 zil_commit_writer(zilog, zcw); 2771 zil_commit_waiter(zilog, zcw); 2772 2773 if (zcw->zcw_zio_error != 0) { 2774 /* 2775 * If there was an error writing out the ZIL blocks that 2776 * this thread is waiting on, then we fallback to 2777 * relying on spa_sync() to write out the data this 2778 * thread is waiting on. Obviously this has performance 2779 * implications, but the expectation is for this to be 2780 * an exceptional case, and shouldn't occur often. 2781 */ 2782 DTRACE_PROBE2(zil__commit__io__error, 2783 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 2784 txg_wait_synced(zilog->zl_dmu_pool, 0); 2785 } 2786 2787 zil_free_commit_waiter(zcw); 2788} 2789 2790/* 2791 * Called in syncing context to free committed log blocks and update log header. 2792 */ 2793void 2794zil_sync(zilog_t *zilog, dmu_tx_t *tx) 2795{ 2796 zil_header_t *zh = zil_header_in_syncing_context(zilog); 2797 uint64_t txg = dmu_tx_get_txg(tx); 2798 spa_t *spa = zilog->zl_spa; 2799 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 2800 lwb_t *lwb; 2801 2802 /* 2803 * We don't zero out zl_destroy_txg, so make sure we don't try 2804 * to destroy it twice. 2805 */ 2806 if (spa_sync_pass(spa) != 1) 2807 return; 2808 2809 mutex_enter(&zilog->zl_lock); 2810 2811 ASSERT(zilog->zl_stop_sync == 0); 2812 2813 if (*replayed_seq != 0) { 2814 ASSERT(zh->zh_replay_seq < *replayed_seq); 2815 zh->zh_replay_seq = *replayed_seq; 2816 *replayed_seq = 0; 2817 } 2818 2819 if (zilog->zl_destroy_txg == txg) { 2820 blkptr_t blk = zh->zh_log; 2821 2822 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 2823 2824 bzero(zh, sizeof (zil_header_t)); 2825 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 2826 2827 if (zilog->zl_keep_first) { 2828 /* 2829 * If this block was part of log chain that couldn't 2830 * be claimed because a device was missing during 2831 * zil_claim(), but that device later returns, 2832 * then this block could erroneously appear valid. 2833 * To guard against this, assign a new GUID to the new 2834 * log chain so it doesn't matter what blk points to. 2835 */ 2836 zil_init_log_chain(zilog, &blk); 2837 zh->zh_log = blk; 2838 } 2839 } 2840 2841 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 2842 zh->zh_log = lwb->lwb_blk; 2843 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 2844 break; 2845 list_remove(&zilog->zl_lwb_list, lwb); 2846 zio_free(spa, txg, &lwb->lwb_blk); 2847 zil_free_lwb(zilog, lwb); 2848 2849 /* 2850 * If we don't have anything left in the lwb list then 2851 * we've had an allocation failure and we need to zero 2852 * out the zil_header blkptr so that we don't end 2853 * up freeing the same block twice. 2854 */ 2855 if (list_head(&zilog->zl_lwb_list) == NULL) 2856 BP_ZERO(&zh->zh_log); 2857 } 2858 mutex_exit(&zilog->zl_lock); 2859} 2860 2861/* ARGSUSED */ 2862static int 2863zil_lwb_cons(void *vbuf, void *unused, int kmflag) 2864{ 2865 lwb_t *lwb = vbuf; 2866 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 2867 offsetof(zil_commit_waiter_t, zcw_node)); 2868 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 2869 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 2870 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 2871 return (0); 2872} 2873 2874/* ARGSUSED */ 2875static void 2876zil_lwb_dest(void *vbuf, void *unused) 2877{ 2878 lwb_t *lwb = vbuf; 2879 mutex_destroy(&lwb->lwb_vdev_lock); 2880 avl_destroy(&lwb->lwb_vdev_tree); 2881 list_destroy(&lwb->lwb_waiters); 2882} 2883 2884void 2885zil_init(void) 2886{ 2887 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 2888 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 2889 2890 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 2891 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 2892} 2893 2894void 2895zil_fini(void) 2896{ 2897 kmem_cache_destroy(zil_zcw_cache); 2898 kmem_cache_destroy(zil_lwb_cache); 2899} 2900 2901void 2902zil_set_sync(zilog_t *zilog, uint64_t sync) 2903{ 2904 zilog->zl_sync = sync; 2905} 2906 2907void 2908zil_set_logbias(zilog_t *zilog, uint64_t logbias) 2909{ 2910 zilog->zl_logbias = logbias; 2911} 2912 2913zilog_t * 2914zil_alloc(objset_t *os, zil_header_t *zh_phys) 2915{ 2916 zilog_t *zilog; 2917 2918 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 2919 2920 zilog->zl_header = zh_phys; 2921 zilog->zl_os = os; 2922 zilog->zl_spa = dmu_objset_spa(os); 2923 zilog->zl_dmu_pool = dmu_objset_pool(os); 2924 zilog->zl_destroy_txg = TXG_INITIAL - 1; 2925 zilog->zl_logbias = dmu_objset_logbias(os); 2926 zilog->zl_sync = dmu_objset_syncprop(os); 2927 zilog->zl_dirty_max_txg = 0; 2928 zilog->zl_last_lwb_opened = NULL; 2929 zilog->zl_last_lwb_latency = 0; 2930 zilog->zl_max_block_size = zil_maxblocksize; 2931 2932 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 2933 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); 2934 2935 for (int i = 0; i < TXG_SIZE; i++) { 2936 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 2937 MUTEX_DEFAULT, NULL); 2938 } 2939 2940 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 2941 offsetof(lwb_t, lwb_node)); 2942 2943 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 2944 offsetof(itx_t, itx_node)); 2945 2946 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 2947 2948 return (zilog); 2949} 2950 2951void 2952zil_free(zilog_t *zilog) 2953{ 2954 zilog->zl_stop_sync = 1; 2955 2956 ASSERT0(zilog->zl_suspend); 2957 ASSERT0(zilog->zl_suspending); 2958 2959 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2960 list_destroy(&zilog->zl_lwb_list); 2961 2962 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 2963 list_destroy(&zilog->zl_itx_commit_list); 2964 2965 for (int i = 0; i < TXG_SIZE; i++) { 2966 /* 2967 * It's possible for an itx to be generated that doesn't dirty 2968 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 2969 * callback to remove the entry. We remove those here. 2970 * 2971 * Also free up the ziltest itxs. 2972 */ 2973 if (zilog->zl_itxg[i].itxg_itxs) 2974 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 2975 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 2976 } 2977 2978 mutex_destroy(&zilog->zl_issuer_lock); 2979 mutex_destroy(&zilog->zl_lock); 2980 2981 cv_destroy(&zilog->zl_cv_suspend); 2982 2983 kmem_free(zilog, sizeof (zilog_t)); 2984} 2985 2986/* 2987 * Open an intent log. 2988 */ 2989zilog_t * 2990zil_open(objset_t *os, zil_get_data_t *get_data) 2991{ 2992 zilog_t *zilog = dmu_objset_zil(os); 2993 2994 ASSERT3P(zilog->zl_get_data, ==, NULL); 2995 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2996 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2997 2998 zilog->zl_get_data = get_data; 2999 3000 return (zilog); 3001} 3002 3003/* 3004 * Close an intent log. 3005 */ 3006void 3007zil_close(zilog_t *zilog) 3008{ 3009 lwb_t *lwb; 3010 uint64_t txg; 3011 3012 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 3013 zil_commit(zilog, 0); 3014 } else { 3015 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 3016 ASSERT0(zilog->zl_dirty_max_txg); 3017 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 3018 } 3019 3020 mutex_enter(&zilog->zl_lock); 3021 lwb = list_tail(&zilog->zl_lwb_list); 3022 if (lwb == NULL) 3023 txg = zilog->zl_dirty_max_txg; 3024 else 3025 txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); 3026 mutex_exit(&zilog->zl_lock); 3027 3028 /* 3029 * We need to use txg_wait_synced() to wait long enough for the 3030 * ZIL to be clean, and to wait for all pending lwbs to be 3031 * written out. 3032 */ 3033 if (txg != 0) 3034 txg_wait_synced(zilog->zl_dmu_pool, txg); 3035 3036 if (zilog_is_dirty(zilog)) 3037 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg); 3038 VERIFY(!zilog_is_dirty(zilog)); 3039 3040 zilog->zl_get_data = NULL; 3041 3042 /* 3043 * We should have only one lwb left on the list; remove it now. 3044 */ 3045 mutex_enter(&zilog->zl_lock); 3046 lwb = list_head(&zilog->zl_lwb_list); 3047 if (lwb != NULL) { 3048 ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); 3049 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 3050 list_remove(&zilog->zl_lwb_list, lwb); 3051 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 3052 zil_free_lwb(zilog, lwb); 3053 } 3054 mutex_exit(&zilog->zl_lock); 3055} 3056 3057static char *suspend_tag = "zil suspending"; 3058 3059/* 3060 * Suspend an intent log. While in suspended mode, we still honor 3061 * synchronous semantics, but we rely on txg_wait_synced() to do it. 3062 * On old version pools, we suspend the log briefly when taking a 3063 * snapshot so that it will have an empty intent log. 3064 * 3065 * Long holds are not really intended to be used the way we do here -- 3066 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 3067 * could fail. Therefore we take pains to only put a long hold if it is 3068 * actually necessary. Fortunately, it will only be necessary if the 3069 * objset is currently mounted (or the ZVOL equivalent). In that case it 3070 * will already have a long hold, so we are not really making things any worse. 3071 * 3072 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 3073 * zvol_state_t), and use their mechanism to prevent their hold from being 3074 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 3075 * very little gain. 3076 * 3077 * if cookiep == NULL, this does both the suspend & resume. 3078 * Otherwise, it returns with the dataset "long held", and the cookie 3079 * should be passed into zil_resume(). 3080 */ 3081int 3082zil_suspend(const char *osname, void **cookiep) 3083{ 3084 objset_t *os; 3085 zilog_t *zilog; 3086 const zil_header_t *zh; 3087 int error; 3088 3089 error = dmu_objset_hold(osname, suspend_tag, &os); 3090 if (error != 0) 3091 return (error); 3092 zilog = dmu_objset_zil(os); 3093 3094 mutex_enter(&zilog->zl_lock); 3095 zh = zilog->zl_header; 3096 3097 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 3098 mutex_exit(&zilog->zl_lock); 3099 dmu_objset_rele(os, suspend_tag); 3100 return (SET_ERROR(EBUSY)); 3101 } 3102 3103 /* 3104 * Don't put a long hold in the cases where we can avoid it. This 3105 * is when there is no cookie so we are doing a suspend & resume 3106 * (i.e. called from zil_vdev_offline()), and there's nothing to do 3107 * for the suspend because it's already suspended, or there's no ZIL. 3108 */ 3109 if (cookiep == NULL && !zilog->zl_suspending && 3110 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 3111 mutex_exit(&zilog->zl_lock); 3112 dmu_objset_rele(os, suspend_tag); 3113 return (0); 3114 } 3115 3116 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 3117 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 3118 3119 zilog->zl_suspend++; 3120 3121 if (zilog->zl_suspend > 1) { 3122 /* 3123 * Someone else is already suspending it. 3124 * Just wait for them to finish. 3125 */ 3126 3127 while (zilog->zl_suspending) 3128 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 3129 mutex_exit(&zilog->zl_lock); 3130 3131 if (cookiep == NULL) 3132 zil_resume(os); 3133 else 3134 *cookiep = os; 3135 return (0); 3136 } 3137 3138 /* 3139 * If there is no pointer to an on-disk block, this ZIL must not 3140 * be active (e.g. filesystem not mounted), so there's nothing 3141 * to clean up. 3142 */ 3143 if (BP_IS_HOLE(&zh->zh_log)) { 3144 ASSERT(cookiep != NULL); /* fast path already handled */ 3145 3146 *cookiep = os; 3147 mutex_exit(&zilog->zl_lock); 3148 return (0); 3149 } 3150 3151 zilog->zl_suspending = B_TRUE; 3152 mutex_exit(&zilog->zl_lock); 3153 3154 /* 3155 * We need to use zil_commit_impl to ensure we wait for all 3156 * LWB_STATE_OPENED and LWB_STATE_ISSUED lwb's to be committed 3157 * to disk before proceeding. If we used zil_commit instead, it 3158 * would just call txg_wait_synced(), because zl_suspend is set. 3159 * txg_wait_synced() doesn't wait for these lwb's to be 3160 * LWB_STATE_DONE before returning. 3161 */ 3162 zil_commit_impl(zilog, 0); 3163 3164 /* 3165 * Now that we've ensured all lwb's are LWB_STATE_DONE, we use 3166 * txg_wait_synced() to ensure the data from the zilog has 3167 * migrated to the main pool before calling zil_destroy(). 3168 */ 3169 txg_wait_synced(zilog->zl_dmu_pool, 0); 3170 3171 zil_destroy(zilog, B_FALSE); 3172 3173 mutex_enter(&zilog->zl_lock); 3174 zilog->zl_suspending = B_FALSE; 3175 cv_broadcast(&zilog->zl_cv_suspend); 3176 mutex_exit(&zilog->zl_lock); 3177 3178 if (cookiep == NULL) 3179 zil_resume(os); 3180 else 3181 *cookiep = os; 3182 return (0); 3183} 3184 3185void 3186zil_resume(void *cookie) 3187{ 3188 objset_t *os = cookie; 3189 zilog_t *zilog = dmu_objset_zil(os); 3190 3191 mutex_enter(&zilog->zl_lock); 3192 ASSERT(zilog->zl_suspend != 0); 3193 zilog->zl_suspend--; 3194 mutex_exit(&zilog->zl_lock); 3195 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3196 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3197} 3198 3199typedef struct zil_replay_arg { 3200 zil_replay_func_t **zr_replay; 3201 void *zr_arg; 3202 boolean_t zr_byteswap; 3203 char *zr_lr; 3204} zil_replay_arg_t; 3205 3206static int 3207zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 3208{ 3209 char name[ZFS_MAX_DATASET_NAME_LEN]; 3210 3211 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 3212 3213 dmu_objset_name(zilog->zl_os, name); 3214 3215 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 3216 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 3217 (u_longlong_t)lr->lrc_seq, 3218 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 3219 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 3220 3221 return (error); 3222} 3223 3224static int 3225zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 3226{ 3227 zil_replay_arg_t *zr = zra; 3228 const zil_header_t *zh = zilog->zl_header; 3229 uint64_t reclen = lr->lrc_reclen; 3230 uint64_t txtype = lr->lrc_txtype; 3231 int error = 0; 3232 3233 zilog->zl_replaying_seq = lr->lrc_seq; 3234 3235 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 3236 return (0); 3237 3238 if (lr->lrc_txg < claim_txg) /* already committed */ 3239 return (0); 3240 3241 /* Strip case-insensitive bit, still present in log record */ 3242 txtype &= ~TX_CI; 3243 3244 if (txtype == 0 || txtype >= TX_MAX_TYPE) 3245 return (zil_replay_error(zilog, lr, EINVAL)); 3246 3247 /* 3248 * If this record type can be logged out of order, the object 3249 * (lr_foid) may no longer exist. That's legitimate, not an error. 3250 */ 3251 if (TX_OOO(txtype)) { 3252 error = dmu_object_info(zilog->zl_os, 3253 ((lr_ooo_t *)lr)->lr_foid, NULL); 3254 if (error == ENOENT || error == EEXIST) 3255 return (0); 3256 } 3257 3258 /* 3259 * Make a copy of the data so we can revise and extend it. 3260 */ 3261 bcopy(lr, zr->zr_lr, reclen); 3262 3263 /* 3264 * If this is a TX_WRITE with a blkptr, suck in the data. 3265 */ 3266 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 3267 error = zil_read_log_data(zilog, (lr_write_t *)lr, 3268 zr->zr_lr + reclen); 3269 if (error != 0) 3270 return (zil_replay_error(zilog, lr, error)); 3271 } 3272 3273 /* 3274 * The log block containing this lr may have been byteswapped 3275 * so that we can easily examine common fields like lrc_txtype. 3276 * However, the log is a mix of different record types, and only the 3277 * replay vectors know how to byteswap their records. Therefore, if 3278 * the lr was byteswapped, undo it before invoking the replay vector. 3279 */ 3280 if (zr->zr_byteswap) 3281 byteswap_uint64_array(zr->zr_lr, reclen); 3282 3283 /* 3284 * We must now do two things atomically: replay this log record, 3285 * and update the log header sequence number to reflect the fact that 3286 * we did so. At the end of each replay function the sequence number 3287 * is updated if we are in replay mode. 3288 */ 3289 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 3290 if (error != 0) { 3291 /* 3292 * The DMU's dnode layer doesn't see removes until the txg 3293 * commits, so a subsequent claim can spuriously fail with 3294 * EEXIST. So if we receive any error we try syncing out 3295 * any removes then retry the transaction. Note that we 3296 * specify B_FALSE for byteswap now, so we don't do it twice. 3297 */ 3298 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 3299 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 3300 if (error != 0) 3301 return (zil_replay_error(zilog, lr, error)); 3302 } 3303 return (0); 3304} 3305 3306/* ARGSUSED */ 3307static int 3308zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 3309{ 3310 zilog->zl_replay_blks++; 3311 3312 return (0); 3313} 3314 3315/* 3316 * If this dataset has a non-empty intent log, replay it and destroy it. 3317 */ 3318void 3319zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 3320{ 3321 zilog_t *zilog = dmu_objset_zil(os); 3322 const zil_header_t *zh = zilog->zl_header; 3323 zil_replay_arg_t zr; 3324 3325 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 3326 zil_destroy(zilog, B_TRUE); 3327 return; 3328 } 3329 3330 zr.zr_replay = replay_func; 3331 zr.zr_arg = arg; 3332 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 3333 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 3334 3335 /* 3336 * Wait for in-progress removes to sync before starting replay. 3337 */ 3338 txg_wait_synced(zilog->zl_dmu_pool, 0); 3339 3340 zilog->zl_replay = B_TRUE; 3341 zilog->zl_replay_time = ddi_get_lbolt(); 3342 ASSERT(zilog->zl_replay_blks == 0); 3343 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 3344 zh->zh_claim_txg); 3345 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 3346 3347 zil_destroy(zilog, B_FALSE); 3348 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 3349 zilog->zl_replay = B_FALSE; 3350} 3351 3352boolean_t 3353zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 3354{ 3355 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3356 return (B_TRUE); 3357 3358 if (zilog->zl_replay) { 3359 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 3360 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 3361 zilog->zl_replaying_seq; 3362 return (B_TRUE); 3363 } 3364 3365 return (B_FALSE); 3366} 3367 3368/* ARGSUSED */ 3369int 3370zil_reset(const char *osname, void *arg) 3371{ 3372 int error; 3373 3374 error = zil_suspend(osname, NULL); 3375 if (error != 0) 3376 return (SET_ERROR(EEXIST)); 3377 return (0); 3378} 3379