Lines Matching defs:txg

40 int zfs_txg_timeout = 5;	/* max seconds worth of delta per txg */
43 * Prepare the txg subsystem.
46 txg_init(dsl_pool_t *dp, uint64_t txg)
75 tx->tx_open_txg = txg;
79 * Close down the txg subsystem.
222 uint64_t txg;
226 txg = tx->tx_open_txg;
227 tc->tc_count[txg & TXG_MASK]++;
230 th->th_txg = txg;
232 return (txg);
270 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
273 int g = txg & TXG_MASK;
277 * Grab all tx_cpu locks so nobody else can get into this txg.
282 ASSERT(txg == tx->tx_open_txg);
315 * Dispatch the commit callbacks registered on this txg to worker threads.
318 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
328 int g = txg & TXG_MASK;
366 uint64_t txg;
370 * on us, or the quiesce thread has handed off a txg to
386 * Wait until the quiesce thread hands off a txg to us,
400 * Consume the quiesced txg which has been handed off to
402 * able to quiesce another txg, so we must signal it.
404 txg = tx->tx_quiesced_txg;
406 tx->tx_syncing_txg = txg;
409 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
410 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
414 spa_sync(spa, txg);
418 tx->tx_synced_txg = txg;
425 txg_dispatch_callbacks(dp, txg);
438 uint64_t txg;
442 * However, we can only have one txg in "quiescing" or
444 * the "quiesced, waiting to sync" txg has been consumed
455 txg = tx->tx_open_txg;
456 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
457 txg, tx->tx_quiesce_txg_waiting,
460 txg_quiesce(dp, txg);
464 * Hand this txg off to the sync thread.
466 dprintf("quiesce done, handing off txg %llu\n", txg);
467 tx->tx_quiesced_txg = txg;
475 * group and there is already a waiting txg quiesing or quiesced. Abort
476 * the delay if this txg stalls or enters the quiesing state.
479 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
484 /* don't delay if this txg could transition to quiesing immediately */
485 if (tx->tx_open_txg > txg ||
486 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
490 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
496 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
504 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
510 if (txg == 0)
511 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
512 if (tx->tx_sync_txg_waiting < txg)
513 tx->tx_sync_txg_waiting = txg;
514 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
515 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
516 while (tx->tx_synced_txg < txg) {
527 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
533 if (txg == 0)
534 txg = tx->tx_open_txg + 1;
535 if (tx->tx_quiesce_txg_waiting < txg)
536 tx->tx_quiesce_txg_waiting = txg;
537 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
538 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
539 while (tx->tx_open_txg < txg) {
563 * Per-txg object lists.
590 txg_list_empty(txg_list_t *tl, uint64_t txg)
592 return (tl->tl_head[txg & TXG_MASK] == NULL);
600 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
602 int t = txg & TXG_MASK;
623 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
625 int t = txg & TXG_MASK;
650 txg_list_remove(txg_list_t *tl, uint64_t txg)
652 int t = txg & TXG_MASK;
672 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
674 int t = txg & TXG_MASK;
695 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
697 int t = txg & TXG_MASK;
704 * Walk a txg list -- only safe if you know it's not changing.
707 txg_list_head(txg_list_t *tl, uint64_t txg)
709 int t = txg & TXG_MASK;
716 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
718 int t = txg & TXG_MASK;