Lines Matching defs:zilog

94 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
129 zil_bp_tree_init(zilog_t *zilog)
131 avl_create(&zilog->zl_bp_tree, zil_bp_compare,
136 zil_bp_tree_fini(zilog_t *zilog)
138 avl_tree_t *t = &zilog->zl_bp_tree;
149 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
151 avl_tree_t *t = &zilog->zl_bp_tree;
172 zil_header_in_syncing_context(zilog_t *zilog)
174 return ((zil_header_t *)zilog->zl_header);
178 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
184 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
192 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
201 if (zilog->zl_header->zh_claim_txg == 0)
204 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
210 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
268 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
283 if (zilog->zl_header->zh_claim_txg == 0)
286 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
289 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
305 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
308 const zil_header_t *zh = zilog->zl_header;
336 zil_bp_tree_init(zilog);
345 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
354 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
364 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
372 zilog->zl_parse_error = error;
373 zilog->zl_parse_blk_seq = max_blk_seq;
374 zilog->zl_parse_lr_seq = max_lr_seq;
375 zilog->zl_parse_blk_count = blk_count;
376 zilog->zl_parse_lr_count = lr_count;
381 zil_bp_tree_fini(zilog);
388 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
395 zil_bp_tree_add(zilog, bp) != 0)
398 return (zio_wait(zio_claim(NULL, zilog->zl_spa,
404 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
421 (error = zil_read_log_data(zilog, lr, NULL)) != 0)
423 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
428 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
430 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
436 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
445 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
447 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
453 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
458 lwb->lwb_zilog = zilog;
472 mutex_enter(&zilog->zl_lock);
473 list_insert_tail(&zilog->zl_lwb_list, lwb);
474 mutex_exit(&zilog->zl_lock);
484 zilog_dirty(zilog_t *zilog, uint64_t txg)
486 dsl_pool_t *dp = zilog->zl_dmu_pool;
487 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
492 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
494 dmu_buf_add_ref(ds->ds_dbuf, zilog);
499 zilog_is_dirty(zilog_t *zilog)
501 dsl_pool_t *dp = zilog->zl_dmu_pool;
504 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
514 zil_create(zilog_t *zilog)
516 const zil_header_t *zh = zilog->zl_header;
526 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
539 tx = dmu_tx_create(zilog->zl_os);
541 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
545 zio_free_zil(zilog->zl_spa, txg, &blk);
549 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
550 ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
553 zil_init_log_chain(zilog, &blk);
560 lwb = zil_alloc_lwb(zilog, &blk, txg);
569 txg_wait_synced(zilog->zl_dmu_pool, txg);
587 zil_destroy(zilog_t *zilog, boolean_t keep_first)
589 const zil_header_t *zh = zilog->zl_header;
597 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
599 zilog->zl_old_header = *zh; /* debugging aid */
604 tx = dmu_tx_create(zilog->zl_os);
606 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
609 mutex_enter(&zilog->zl_lock);
611 ASSERT3U(zilog->zl_destroy_txg, <, txg);
612 zilog->zl_destroy_txg = txg;
613 zilog->zl_keep_first = keep_first;
615 if (!list_is_empty(&zilog->zl_lwb_list)) {
618 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
619 list_remove(&zilog->zl_lwb_list, lwb);
622 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
626 zil_destroy_sync(zilog, tx);
628 mutex_exit(&zilog->zl_lock);
634 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
636 ASSERT(list_is_empty(&zilog->zl_lwb_list));
637 (void) zil_parse(zilog, zil_free_log_block,
638 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
646 zilog_t *zilog;
665 zilog = dmu_objset_zil(os);
666 zh = zil_header_in_syncing_context(zilog);
668 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
670 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
686 (void) zil_parse(zilog, zil_claim_log_block,
689 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
690 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
691 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
697 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
711 zilog_t *zilog;
725 zilog = dmu_objset_zil(os);
726 bp = (blkptr_t *)&zilog->zl_header->zh_log;
755 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
756 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
776 zil_add_block(zilog_t *zilog, const blkptr_t *bp)
778 avl_tree_t *t = &zilog->zl_vdev_tree;
787 ASSERT(zilog->zl_writer);
794 mutex_enter(&zilog->zl_vdev_lock);
803 mutex_exit(&zilog->zl_vdev_lock);
807 zil_flush_vdevs(zilog_t *zilog)
809 spa_t *spa = zilog->zl_spa;
810 avl_tree_t *t = &zilog->zl_vdev_tree;
815 ASSERT(zilog->zl_writer);
851 zilog_t *zilog = lwb->lwb_zilog;
871 mutex_enter(&zilog->zl_lock);
874 mutex_exit(&zilog->zl_lock);
888 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
896 if (zilog->zl_root_zio == NULL) {
897 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
901 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
928 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
929 (((zilog)->zl_cur_used < zil_slog_limit) || \
930 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
937 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
941 spa_t *spa = zilog->zl_spa;
967 tx = dmu_tx_create(zilog->zl_os);
969 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
990 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
996 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
998 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
999 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1004 USE_SLOG(zilog));
1013 nlwb = zil_alloc_lwb(zilog, bp, txg);
1016 zil_add_block(zilog, &lwb->lwb_blk);
1048 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1061 ASSERT(zilog_is_dirty(zilog) ||
1062 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1068 zilog->zl_cur_used += (reclen + dlen);
1070 zil_lwb_write_init(zilog, lwb);
1076 lwb = zil_lwb_write_start(zilog, lwb);
1079 zil_lwb_write_init(zilog, lwb);
1082 txg_wait_synced(zilog->zl_dmu_pool, txg);
1096 if (txg > spa_freeze_txg(zilog->zl_spa))
1097 txg_wait_synced(zilog->zl_dmu_pool, txg);
1110 error = zilog->zl_get_data(
1113 txg_wait_synced(zilog->zl_dmu_pool, txg);
1130 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1217 zil_remove_async(zilog_t *zilog, uint64_t oid)
1229 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1232 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1235 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1261 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1275 zil_remove_async(zilog, itx->itx_oid);
1281 zil_async_to_sync(zilog, itx->itx_oid);
1283 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1288 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1298 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1314 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1334 zilog_dirty(zilog, txg);
1350 zil_clean(zilog_t *zilog, uint64_t synced_txg)
1352 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1362 ASSERT(zilog->zl_clean_taskq != NULL);
1363 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1375 if (taskq_dispatch(zilog->zl_clean_taskq,
1384 zil_get_commit_list(zilog_t *zilog)
1387 list_t *commit_list = &zilog->zl_itx_commit_list;
1390 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1393 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1396 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1410 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1417 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1424 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1427 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1430 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1466 zil_commit_writer(zilog_t *zilog)
1471 spa_t *spa = zilog->zl_spa;
1474 ASSERT(zilog->zl_root_zio == NULL);
1476 mutex_exit(&zilog->zl_lock);
1478 zil_get_commit_list(zilog);
1484 if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1485 mutex_enter(&zilog->zl_lock);
1489 if (zilog->zl_suspend) {
1492 lwb = list_tail(&zilog->zl_lwb_list);
1494 lwb = zil_create(zilog);
1497 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1498 while (itx = list_head(&zilog->zl_itx_commit_list)) {
1503 lwb = zil_lwb_commit(zilog, itx, lwb);
1504 list_remove(&zilog->zl_itx_commit_list, itx);
1508 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1512 lwb = zil_lwb_write_start(zilog, lwb);
1514 zilog->zl_cur_used = 0;
1519 if (zilog->zl_root_zio) {
1520 error = zio_wait(zilog->zl_root_zio);
1521 zilog->zl_root_zio = NULL;
1522 zil_flush_vdevs(zilog);
1526 txg_wait_synced(zilog->zl_dmu_pool, 0);
1528 mutex_enter(&zilog->zl_lock);
1536 zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1563 zil_commit(zilog_t *zilog, uint64_t foid)
1567 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1571 zil_async_to_sync(zilog, foid);
1573 mutex_enter(&zilog->zl_lock);
1574 mybatch = zilog->zl_next_batch;
1575 while (zilog->zl_writer) {
1576 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1577 if (mybatch <= zilog->zl_com_batch) {
1578 mutex_exit(&zilog->zl_lock);
1583 zilog->zl_next_batch++;
1584 zilog->zl_writer = B_TRUE;
1585 zil_commit_writer(zilog);
1586 zilog->zl_com_batch = mybatch;
1587 zilog->zl_writer = B_FALSE;
1588 mutex_exit(&zilog->zl_lock);
1591 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1594 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1601 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1603 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1605 spa_t *spa = zilog->zl_spa;
1606 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1616 mutex_enter(&zilog->zl_lock);
1618 ASSERT(zilog->zl_stop_sync == 0);
1626 if (zilog->zl_destroy_txg == txg) {
1629 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1632 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1634 if (zilog->zl_keep_first) {
1643 zil_init_log_chain(zilog, &blk);
1648 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1652 list_remove(&zilog->zl_lwb_list, lwb);
1662 if (list_head(&zilog->zl_lwb_list) == NULL)
1665 mutex_exit(&zilog->zl_lock);
1682 zil_set_sync(zilog_t *zilog, uint64_t sync)
1684 zilog->zl_sync = sync;
1688 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1690 zilog->zl_logbias = logbias;
1696 zilog_t *zilog;
1698 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1700 zilog->zl_header = zh_phys;
1701 zilog->zl_os = os;
1702 zilog->zl_spa = dmu_objset_spa(os);
1703 zilog->zl_dmu_pool = dmu_objset_pool(os);
1704 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1705 zilog->zl_logbias = dmu_objset_logbias(os);
1706 zilog->zl_sync = dmu_objset_syncprop(os);
1707 zilog->zl_next_batch = 1;
1709 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1712 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1716 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1719 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1722 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1724 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1727 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1728 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1729 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1730 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1732 return (zilog);
1736 zil_free(zilog_t *zilog)
1738 zilog->zl_stop_sync = 1;
1740 ASSERT0(zilog->zl_suspend);
1741 ASSERT0(zilog->zl_suspending);
1743 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1744 list_destroy(&zilog->zl_lwb_list);
1746 avl_destroy(&zilog->zl_vdev_tree);
1747 mutex_destroy(&zilog->zl_vdev_lock);
1749 ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1750 list_destroy(&zilog->zl_itx_commit_list);
1760 if (zilog->zl_itxg[i].itxg_itxs)
1761 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1762 mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1765 mutex_destroy(&zilog->zl_lock);
1767 cv_destroy(&zilog->zl_cv_writer);
1768 cv_destroy(&zilog->zl_cv_suspend);
1769 cv_destroy(&zilog->zl_cv_batch[0]);
1770 cv_destroy(&zilog->zl_cv_batch[1]);
1772 kmem_free(zilog, sizeof (zilog_t));
1781 zilog_t *zilog = dmu_objset_zil(os);
1783 ASSERT(zilog->zl_clean_taskq == NULL);
1784 ASSERT(zilog->zl_get_data == NULL);
1785 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1787 zilog->zl_get_data = get_data;
1788 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1791 return (zilog);
1798 zil_close(zilog_t *zilog)
1803 zil_commit(zilog, 0); /* commit all itx */
1811 mutex_enter(&zilog->zl_lock);
1812 lwb = list_tail(&zilog->zl_lwb_list);
1815 mutex_exit(&zilog->zl_lock);
1817 txg_wait_synced(zilog->zl_dmu_pool, txg);
1818 ASSERT(!zilog_is_dirty(zilog));
1820 taskq_destroy(zilog->zl_clean_taskq);
1821 zilog->zl_clean_taskq = NULL;
1822 zilog->zl_get_data = NULL;
1827 mutex_enter(&zilog->zl_lock);
1828 lwb = list_head(&zilog->zl_lwb_list);
1830 ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1831 list_remove(&zilog->zl_lwb_list, lwb);
1835 mutex_exit(&zilog->zl_lock);
1866 zilog_t *zilog;
1873 zilog = dmu_objset_zil(os);
1875 mutex_enter(&zilog->zl_lock);
1876 zh = zilog->zl_header;
1879 mutex_exit(&zilog->zl_lock);
1890 if (cookiep == NULL && !zilog->zl_suspending &&
1891 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1892 mutex_exit(&zilog->zl_lock);
1900 zilog->zl_suspend++;
1902 if (zilog->zl_suspend > 1) {
1908 while (zilog->zl_suspending)
1909 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1910 mutex_exit(&zilog->zl_lock);
1928 mutex_exit(&zilog->zl_lock);
1932 zilog->zl_suspending = B_TRUE;
1933 mutex_exit(&zilog->zl_lock);
1935 zil_commit(zilog, 0);
1937 zil_destroy(zilog, B_FALSE);
1939 mutex_enter(&zilog->zl_lock);
1940 zilog->zl_suspending = B_FALSE;
1941 cv_broadcast(&zilog->zl_cv_suspend);
1942 mutex_exit(&zilog->zl_lock);
1955 zilog_t *zilog = dmu_objset_zil(os);
1957 mutex_enter(&zilog->zl_lock);
1958 ASSERT(zilog->zl_suspend != 0);
1959 zilog->zl_suspend--;
1960 mutex_exit(&zilog->zl_lock);
1973 zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
1977 zilog->zl_replaying_seq--; /* didn't actually replay this one */
1979 dmu_objset_name(zilog->zl_os, name);
1991 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1994 const zil_header_t *zh = zilog->zl_header;
1999 zilog->zl_replaying_seq = lr->lrc_seq;
2011 return (zil_replay_error(zilog, lr, EINVAL));
2018 error = dmu_object_info(zilog->zl_os,
2033 error = zil_read_log_data(zilog, (lr_write_t *)lr,
2036 return (zil_replay_error(zilog, lr, error));
2064 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2067 return (zil_replay_error(zilog, lr, error));
2074 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2076 zilog->zl_replay_blks++;
2087 zilog_t *zilog = dmu_objset_zil(os);
2088 const zil_header_t *zh = zilog->zl_header;
2092 zil_destroy(zilog, B_TRUE);
2105 txg_wait_synced(zilog->zl_dmu_pool, 0);
2107 zilog->zl_replay = B_TRUE;
2108 zilog->zl_replay_time = ddi_get_lbolt();
2109 ASSERT(zilog->zl_replay_blks == 0);
2110 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2114 zil_destroy(zilog, B_FALSE);
2115 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2116 zilog->zl_replay = B_FALSE;
2121 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2123 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2126 if (zilog->zl_replay) {
2127 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2128 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2129 zilog->zl_replaying_seq;