Searched refs:txg (Results 1 - 25 of 67) sorted by relevance

123

/freebsd-11-stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/
H A Dtxg.h42 #define TXG_INITIAL TXG_SIZE /* initial txg */
43 #define TXG_IDX (txg & TXG_MASK)
69 extern void txg_init(struct dsl_pool *dp, uint64_t txg);
78 extern void txg_delay(struct dsl_pool *dp, uint64_t txg, hrtime_t delta,
85 * necessary syncs immediately). If txg==0, wait for the currently open
86 * txg to finish syncing.
88 extern void txg_wait_synced(struct dsl_pool *dp, uint64_t txg);
94 * If txg == 0, wait for the next open txg.
96 extern void txg_wait_open(struct dsl_pool *dp, uint64_t txg);
[all...]
H A Dtrim_map.h39 extern void trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg);
H A Dvdev.h60 extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace);
70 uint64_t txg, uint64_t size);
72 uint64_t txg, uint64_t size);
75 extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
93 extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg);
97 extern void vdev_expand(vdev_t *vd, uint64_t txg);
146 extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
169 extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
181 extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason);
H A Dzfeature.h30 #include <sys/txg.h>
53 uint64_t *txg);
H A Ddsl_pool.h31 #include <sys/txg.h>
143 int dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp);
146 dsl_pool_t *dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg);
147 void dsl_pool_sync(dsl_pool_t *dp, uint64_t txg);
148 void dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg);
154 void dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg);
155 void dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp);
156 void dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg,
H A Dtxg_impl.h35 #include <sys/txg.h>
74 uint64_t tc_count[TXG_SIZE]; /* tx hold count on each txg */
94 uint64_t tx_open_txg; /* currently open txg id */
95 uint64_t tx_quiescing_txg; /* currently quiescing txg id */
96 uint64_t tx_quiesced_txg; /* quiesced txg waiting for sync */
97 uint64_t tx_syncing_txg; /* currently syncing txg id */
98 uint64_t tx_synced_txg; /* last synced txg id */
102 uint64_t tx_sync_txg_waiting; /* txg we're waiting to sync */
103 uint64_t tx_quiesce_txg_waiting; /* txg we're waiting to open */
H A Ddsl_synctask.h29 #include <sys/txg.h>
H A Ddmu_tx.h33 #include <sys/txg.h>
130 extern dmu_tx_t *dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg);
H A Ddsl_scan.h87 * scn_suspending - a scan that cannot be completed in a single txg or
93 * a scan at the specified txg value.
96 * the completion txg to the next txg. This is necessary
124 boolean_t scn_suspending; /* scan is suspending until next txg */
137 /* per txg statistics */
138 uint64_t scn_visited_this_txg; /* total bps visited this txg */
162 int dsl_scan_init(struct dsl_pool *dp, uint64_t txg);
169 void dsl_resilver_restart(struct dsl_pool *, uint64_t txg);
H A Dvdev_indirect_births.h68 uint64_t offset, uint64_t txg, dmu_tx_t *tx);
H A Dzil.h54 * with a common structure that defines the type, length, and txg.
62 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
103 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
235 uint64_t lr_gen; /* generation (txg of creation) */
354 * When the txg commits the block is linked in.
389 uint64_t txg);
391 uint64_t txg);
397 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg);
435 extern void zil_lwb_add_txg(struct lwb *lwb, uint64_t txg);
H A Darc.h187 zio_t *arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
197 int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg);
H A Dbpobj.h31 #include <sys/txg.h>
/freebsd-11-stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/
H A Duberblock.c47 uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg) argument
49 ASSERT(ub->ub_txg < txg);
56 ub->ub_txg = txg;
62 return (ub->ub_rootbp.blk_birth == txg);
H A Dtxg.c42 * these transaction groups. Each successive transaction group (txg) is
45 * there may be an active txg associated with each state; each active txg may
47 * be up to three active txgs, and there is always a txg in the open state
50 * accepted into the txg in the open state, and are completed while the txg is
56 * When a new txg becomes active, it first enters the open state. New
58 * currently open txg. There is always a txg in the open state so that ZFS can
59 * accept new changes (though the txg ma
123 txg_init(dsl_pool_t *dp, uint64_t txg) argument
301 uint64_t txg; local
358 txg_quiesce(dsl_pool_t *dp, uint64_t txg) argument
415 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) argument
492 uint64_t txg; local
569 uint64_t txg; local
614 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) argument
640 txg_wait_synced(dsl_pool_t *dp, uint64_t txg) argument
665 txg_wait_open(dsl_pool_t *dp, uint64_t txg) argument
730 txg_verify(spa_t *spa, uint64_t txg) argument
769 txg_list_empty(txg_list_t *tl, uint64_t txg) argument
798 txg_list_add(txg_list_t *tl, void *p, uint64_t txg) argument
823 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) argument
851 txg_list_remove(txg_list_t *tl, uint64_t txg) argument
876 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) argument
900 txg_list_member(txg_list_t *tl, void *p, uint64_t txg) argument
913 txg_list_head(txg_list_t *tl, uint64_t txg) argument
923 txg_list_next(txg_list_t *tl, void *p, uint64_t txg) argument
[all...]
H A Dvdev_removal.c36 #include <sys/txg.h>
221 * This is called as a synctask in the txg in which we will mark this vdev
240 uint64_t txg = dmu_tx_get_txg(tx); local
297 * Space which we are freeing this txg does not need to
312 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
341 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
487 uint64_t txg = spa_syncing_txg(spa); local
550 * this txg and iterating forward, we might find that this region
554 int txgoff = (txg + i) & TXG_MASK;
558 * will be synced in txg
721 uint64_t txg = dmu_tx_get_txg(tx); local
909 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, uint64_t maxalloc, uint64_t txg, vdev_copy_arg_t *vca, zio_alloc_list_t *zal) argument
1093 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) argument
1140 uint64_t txg; local
1213 uint64_t txg = dmu_tx_get_txg(tx); local
1476 uint64_t txg = dmu_tx_get_txg(tx); local
1772 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) argument
1951 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) argument
2040 uint64_t txg = 0; local
[all...]
H A Ddsl_pool.c83 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
86 * relevant, the per-txg value is useful for debugging. The tunable
91 * ensure that there is a txg syncing (see the comment in txg.c for a full
111 * If there is at least this much dirty data, push out a txg.
195 "Force a txg if the number of dirty buffer bytes exceed this value");
283 dsl_pool_open_impl(spa_t *spa, uint64_t txg) argument
292 txg_init(dp, txg);
325 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) argument
328 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
538 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) argument
656 dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) argument
677 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) argument
830 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) argument
951 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) argument
[all...]
H A Dzil.c52 * (txg), at which point they can be discarded; or
319 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
358 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
377 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
408 * checkpoint, each ZIL block whose txg is later than the txg
504 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) argument
514 lwb->lwb_max_txg = txg;
566 zilog_dirty(zilog_t *zilog, uint64_t txg) argument
576 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
318 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) argument
592 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) argument
625 uint64_t txg = 0; local
701 uint64_t txg; local
1008 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) argument
1275 uint64_t txg; local
1436 uint64_t dlen, dnow, lwb_sp, reclen, txg, max_log_data; local
1686 uint64_t otxg, txg; local
1731 uint64_t txg; local
1860 uint64_t otxg, txg; local
1906 uint64_t otxg, txg; local
2066 uint64_t txg = lrc->lrc_txg; local
2797 uint64_t txg = dmu_tx_get_txg(tx); local
3010 uint64_t txg; local
[all...]
H A Dtrim_map.c59 list_t tm_head; /* List of segments sorted by txg. */
73 uint64_t ts_txg; /* Segment creation txg. */
204 trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg) argument
221 trim_map_segment_add(tm, start, ts->ts_start, txg);
223 trim_map_segment_add(tm, ts->ts_end, end, txg);
238 ts_after->ts_txg = txg;
245 ts_before->ts_txg = txg;
251 ts_after->ts_txg = txg;
258 ts->ts_txg = txg;
301 trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg) argument
322 trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) argument
414 trim_map_first(trim_map_t *tm, uint64_t txg, uint64_t txgsafe, hrtime_t time, boolean_t force) argument
[all...]
H A Dvdev_label.c123 * txg Transaction group in which this label was written
545 * which don't have a txg value stored on their label (i.e. spares/cache)
546 * or have not been completely initialized (txg = 0) just return
549 * 'txg' value.
552 vdev_label_read_config(vdev_t *vd, uint64_t txg) argument
587 * Auxiliary vdevs won't have txg values in their
598 } else if (label_txg <= txg && label_txg > best_txg) {
617 * We found a valid label but it didn't pass txg restrictions.
620 vdev_dbgmsg(vd, "label discarded as txg is too large "
622 (u_longlong_t)txg);
639 uint64_t state, pool_guid, device_guid, txg, spare_pool; local
899 uint64_t txg = 0ULL; local
1285 vdev_label_sync(zio_t *zio, uint64_t *good_writes, vdev_t *vd, int l, uint64_t txg, int flags) argument
1332 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) argument
1385 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) argument
[all...]
H A Dspa_config.c386 spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) argument
405 * If txg is -1, report the current value of spa->spa_config_txg.
407 if (txg == -1ULL)
408 txg = spa->spa_config_txg;
432 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
543 uint64_t txg; local
549 txg = spa_last_synced_txg(spa) + 1;
578 vdev_expand(tvd, txg);
586 txg_wait_synced(spa->spa_dsl_pool, txg);
H A Dmetaslab.c584 metaslab_verify_space(metaslab_t *msp, uint64_t txg) argument
602 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
615 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
661 * have any space until we finish syncing out this txg.
1543 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, argument
1592 * If we're opening an existing pool (txg == 0) or creating
1593 * a new one (txg == TXG_INITIAL), all space is available now.
1595 * does not become available until after this txg has synced.
1597 * out this txg. This ensures that we don't attempt to allocate
1600 if (txg <
1736 uint64_t txg = spa_syncing_txg(spa); local
2324 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) argument
2402 metaslab_sync(metaslab_t *msp, uint64_t txg) argument
2618 metaslab_sync_done(metaslab_t *msp, uint64_t txg) argument
2976 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) argument
3084 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int allocator) argument
3286 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int allocator) argument
3333 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, zio_alloc_list_t *zal, int allocator) argument
3766 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) argument
3889 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) argument
3960 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) argument
3994 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) argument
4014 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, zio_alloc_list_t *zal, zio_t *zio, int allocator) argument
4071 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) argument
4102 ASSERT3U(spa_syncing_txg(spa), ==, txg); local
4121 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) argument
[all...]
H A Dspa.c62 #include <sys/txg.h>
1993 * current txg so that the "stubby" block can be removed
2677 spa_load_note(spa, "using uberblock with txg=%llu",
3430 * This must all happen in a single txg.
3625 * We need to update the txg and timestamp of the checkpointed
3794 * Retrieve the checkpoint txg if the pool has a checkpoint.
3903 * In case of a checkpoint rewind, log the original txg
3908 NULL, "rewound state to txg=%llu",
3991 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
3998 * If spa_load() fails this function will try loading prior txg'
4720 uint64_t txg = TXG_INITIAL; local
5034 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) argument
5079 uint64_t guid, txg; local
5211 uint64_t txg; local
5308 uint64_t txg; local
5860 uint64_t txg, id; local
6005 uint64_t txg, dtl_max_txg; local
6219 uint64_t txg; local
6559 uint64_t txg, *glist; local
7925 spa_sync(spa_t *spa, uint64_t txg) argument
[all...]
H A Dvdev.c1180 vdev_metaslab_init(vdev_t *vd, uint64_t txg) argument
1190 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1219 if (txg == 0 && vd->vdev_ms_array != 0) {
1230 error = metaslab_init(vd->vdev_mg, m, object, txg,
1239 if (txg == 0)
1250 if (txg == 0)
1770 uint64_t txg; local
1789 * was modified at a point after the current txg.
1790 * If config lock is not held do not check for the txg. spa_sync could
1795 txg
2166 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) argument
2281 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) argument
2299 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) argument
2347 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) argument
2362 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) argument
2493 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) argument
2690 vdev_dtl_sync(vdev_t *vd, uint64_t txg) argument
3034 vdev_remove_empty(vdev_t *vd, uint64_t txg) argument
3093 vdev_sync_done(vdev_t *vd, uint64_t txg) argument
3109 vdev_sync(vdev_t *vd, uint64_t txg) argument
3717 uint64_t txg = zio->io_txg; local
4306 vdev_expand(vdev_t *vd, uint64_t txg) argument
[all...]
/freebsd-11-stable/cddl/contrib/opensolaris/cmd/ztest/
H A Dztest.c66 * the transaction group number is less than the current, open txg.
92 #include <sys/txg.h>
404 * The callbacks are ordered by txg number.
1242 uint64_t txg; local
1260 txg = dmu_tx_get_txg(tx);
1261 ASSERT(txg != 0);
1262 return (txg);
1290 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1297 bt->bt_txg = txg;
1303 uint64_t offset, uint64_t gen, uint64_t txg, uint64_
1289 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) argument
1302 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) argument
1450 uint64_t txg; local
1535 uint64_t object, txg; local
1589 uint64_t gen, txg, lrtxg, crtxg; local
1713 uint64_t txg; local
1756 uint64_t txg, lrtxg, crtxg; local
1874 uint64_t txg = lr->lr_common.lrc_txg; local
2167 uint64_t txg; local
2184 txg_wait_synced(dmu_objset_pool(os), txg); local
3753 uint64_t n, s, txg; local
3972 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) argument
4028 uint64_t n, s, txg; local
4316 uint64_t txg, last_txg; local
4455 uint64_t object, txg; local
4496 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; local
4659 ztest_create_cb_data(objset_t *os, uint64_t txg) argument
4688 uint64_t old_txg, txg; local
5278 uint64_t object, blocksize, txg, pattern, psize; local
5348 txg_wait_synced(spa_get_dsl(spa), txg); local
[all...]

Completed in 360 milliseconds

123