Lines Matching defs:txg

47  * either the DMU transaction group (txg) commits them to the stable pool
112 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
314 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
353 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
372 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
461 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg)
470 lwb->lwb_max_txg = txg;
493 zilog_dirty(zilog_t *zilog, uint64_t txg)
501 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
508 * Determine if the zil is dirty in the specified txg. Callers wanting to
510 * the specified txg. Holding the lock will ensure that the zil cannot be
515 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
519 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
548 uint64_t txg = 0;
573 txg = dmu_tx_get_txg(tx);
576 zio_free_zil(zilog->zl_spa, txg, &blk);
580 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
591 lwb = zil_alloc_lwb(zilog, &blk, slog, txg);
600 txg_wait_synced(zilog->zl_dmu_pool, txg);
623 uint64_t txg;
638 txg = dmu_tx_get_txg(tx);
642 ASSERT3U(zilog->zl_destroy_txg, <, txg);
643 zilog->zl_destroy_txg = txg;
653 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
897 * the txg. If we have had an allocation failure and
898 * the txg is waiting to sync then we want want zil_sync()
911 * to the next block in the chain, so it's OK to let the txg in
972 uint64_t txg;
1007 txg = dmu_tx_get_txg(tx);
1040 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog);
1042 ASSERT3U(bp->blk_birth, ==, txg);
1049 nlwb = zil_alloc_lwb(zilog, bp, slog, txg);
1091 uint64_t txg = lrc->lrc_txg;
1138 if (txg > spa_freeze_txg(zilog->zl_spa))
1139 txg_wait_synced(zilog->zl_dmu_pool, txg);
1158 txg_wait_synced(zilog->zl_dmu_pool, txg);
1177 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1269 uint64_t otxg, txg;
1284 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1285 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1288 if (itxg->itxg_txg != txg) {
1313 uint64_t txg;
1318 * Object ids can be re-instantiated in the next txg so
1334 txg = ZILTEST_TXG;
1336 txg = dmu_tx_get_txg(tx);
1338 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1341 if (itxg->itxg_txg != txg) {
1350 itxg->itxg_txg = txg;
1379 zilog_dirty(zilog, txg);
1390 * have written out the uberblocks (i.e. txg has been comitted) so that
1429 uint64_t otxg, txg;
1439 * the last synced txg from changing. That's okay since we'll
1442 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1443 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1446 if (itxg->itxg_txg != txg) {
1453 * then the zil better be dirty in this "txg". We can assert
1457 * if it's unnecessary (i.e. the txg was synced).
1459 ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
1473 uint64_t otxg, txg;
1485 * the last synced txg from changing.
1487 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1488 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1491 if (itxg->itxg_txg != txg) {
1526 uint64_t txg;
1557 txg = itx->itx_lr.lrc_txg;
1558 ASSERT3U(txg, !=, 0);
1562 * out a log block for a txg that was just synced. This is
1566 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1668 uint64_t txg = dmu_tx_get_txg(tx);
1670 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1690 if (zilog->zl_destroy_txg == txg) {
1714 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1717 zio_free_zil(spa, txg, &lwb->lwb_blk);
1819 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1865 uint64_t txg = 0;
1871 * for the zil. After a txg_wait_synced() on the txg we know all the
1878 txg = lwb->lwb_max_txg;
1880 if (txg)
1881 txg_wait_synced(zilog->zl_dmu_pool, txg);
1884 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
2125 * The DMU's dnode layer doesn't see removes until the txg