Lines Matching defs:txg

45  * either the DMU transaction group (txg) commits them to the stable pool
91 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
285 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
324 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
343 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
430 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
438 lwb->lwb_max_txg = txg;
464 uint64_t txg = 0;
488 txg = dmu_tx_get_txg(tx);
491 zio_free_zil(zilog->zl_spa, txg, &blk);
495 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
506 lwb = zil_alloc_lwb(zilog, &blk, txg);
515 txg_wait_synced(zilog->zl_dmu_pool, txg);
538 uint64_t txg;
553 txg = dmu_tx_get_txg(tx);
557 ASSERT3U(zilog->zl_destroy_txg, <, txg);
558 zilog->zl_destroy_txg = txg;
568 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
797 * the txg. If we have had an allocation failure and
798 * the txg is waiting to sync then we want want zil_sync()
811 * to the next block in the chain, so it's OK to let the txg in
876 uint64_t txg;
902 txg = dmu_tx_get_txg(tx);
935 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
938 ASSERT3U(bp->blk_birth, ==, txg);
945 nlwb = zil_alloc_lwb(zilog, bp, txg);
985 uint64_t txg = lrc->lrc_txg;
1012 txg_wait_synced(zilog->zl_dmu_pool, txg);
1026 if (txg > spa_freeze_txg(zilog->zl_spa))
1027 txg_wait_synced(zilog->zl_dmu_pool, txg);
1043 txg_wait_synced(zilog->zl_dmu_pool, txg);
1062 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1149 uint64_t otxg, txg;
1164 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1165 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1168 if (itxg->itxg_txg != txg) {
1193 uint64_t txg;
1198 * Object ids can be re-instantiated in the next txg so
1214 txg = ZILTEST_TXG;
1216 txg = dmu_tx_get_txg(tx);
1218 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1221 if (itxg->itxg_txg != txg) {
1233 itxg->itxg_txg = txg;
1312 uint64_t otxg, txg;
1321 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1322 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1325 if (itxg->itxg_txg != txg) {
1345 uint64_t otxg, txg;
1355 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1356 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1359 if (itxg->itxg_txg != txg) {
1394 uint64_t txg;
1425 txg = itx->itx_lr.lrc_txg;
1426 ASSERT(txg);
1428 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1530 uint64_t txg = dmu_tx_get_txg(tx);
1532 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1552 if (zilog->zl_destroy_txg == txg) {
1576 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1579 zio_free_zil(spa, txg, &lwb->lwb_blk);
1689 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1731 uint64_t txg = 0;
1737 * for the zil. After a txg_wait_synced() on the txg we know all the
1744 txg = tail_lwb->lwb_max_txg;
1746 if (txg)
1747 txg_wait_synced(zilog->zl_dmu_pool, txg);
1897 * The DMU's dnode layer doesn't see removes until the txg