Lines Matching refs:trans

22 static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
25 struct bch_fs *c = trans->c;
27 struct bkey_s_c k = bch2_btree_path_peek_slot_exact(trans->paths + i->path, &u);
29 if (unlikely(trans->journal_replay_not_finished)) {
45 static inline struct btree_path_level *insert_l(struct btree_trans *trans, struct btree_insert_entry *i)
47 return (trans->paths + i->path)->l + i->level;
50 static inline bool same_leaf_as_prev(struct btree_trans *trans,
53 return i != trans->updates &&
54 insert_l(trans, &i[0])->b == insert_l(trans, &i[-1])->b;
57 static inline bool same_leaf_as_next(struct btree_trans *trans,
60 return i + 1 < trans->updates + trans->nr_updates &&
61 insert_l(trans, &i[0])->b == insert_l(trans, &i[1])->b;
64 inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
68 struct bch_fs *c = trans->c;
72 bch2_trans_node_reinit_iter(trans, b);
79 bch2_btree_init_next(trans, b);
82 static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
84 while (--i >= trans->updates) {
85 if (same_leaf_as_prev(trans, i))
88 bch2_btree_node_unlock_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
91 trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
92 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
95 static inline int bch2_trans_lock_write(struct btree_trans *trans)
97 EBUG_ON(trans->write_locked);
99 trans_for_each_update(trans, i) {
100 if (same_leaf_as_prev(trans, i))
103 if (bch2_btree_node_lock_write(trans, trans->paths + i->path, &insert_l(trans, i)->b->c))
104 return trans_lock_write_fail(trans, i);
107 bch2_btree_node_prep_for_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
110 trans->write_locked = true;
114 static inline void bch2_trans_unlock_write(struct btree_trans *trans)
116 if (likely(trans->write_locked)) {
117 trans_for_each_update(trans, i)
118 if (!same_leaf_as_prev(trans, i))
119 bch2_btree_node_unlock_write_inlined(trans,
120 trans->paths + i->path, insert_l(trans, i)->b);
121 trans->write_locked = false;
128 bool bch2_btree_bset_insert_key(struct btree_trans *trans,
170 bch2_btree_path_fix_key_modified(trans, b, k);
188 bch2_btree_path_fix_key_modified(trans, b, k);
198 bch2_btree_node_iter_fix(trans, path, b, node_iter, k,
209 struct btree_trans *trans = bch2_trans_get(c);
213 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
232 bch2_trans_put(trans);
259 * @trans: btree transaction object
264 inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
269 struct bch_fs *c = trans->c;
277 if (unlikely(!bch2_btree_bset_insert_key(trans, path, b,
300 bch2_trans_node_reinit_iter(trans, b);
307 static inline void btree_insert_entry_checks(struct btree_trans *trans,
310 struct btree_path *path = trans->paths + i->path;
319 test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
321 bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
324 static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
327 return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
328 trans->journal_u64s, flags);
333 static noinline void journal_transaction_name(struct btree_trans *trans)
335 struct bch_fs *c = trans->c;
338 bch2_journal_add_entry(j, &trans->journal_res,
344 strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
347 static inline int btree_key_can_insert(struct btree_trans *trans,
357 btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
364 bch2_trans_unlock_write(trans);
365 bch2_trans_unlock(trans);
369 bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
374 ret = bch2_trans_relock(trans) ?:
375 bch2_trans_lock_write(trans);
383 trans_for_each_update(trans, i)
393 static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags,
396 struct bch_fs *c = trans->c;
421 return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
423 trans_for_each_update(trans, i)
434 static int run_one_mem_trigger(struct btree_trans *trans,
444 verify_update_old_key(trans, i);
450 ret = bch2_key_trigger(trans, i->btree_id, i->level,
454 ret = bch2_key_trigger_new(trans, i->btree_id, i->level,
456 bch2_key_trigger_old(trans, i->btree_id, i->level,
463 static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
477 verify_update_old_key(trans, i);
488 return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
493 return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
496 return bch2_key_trigger_new(trans, i->btree_id, i->level, bkey_i_to_s(i->k), flags) ?: 1;
502 static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
518 i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
520 if (trans->updates[i].btree_id != btree_id)
523 ret = run_one_trans_trigger(trans, trans->updates + i, overwrite);
535 static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
551 while (btree_id_start < trans->nr_updates &&
552 trans->updates[btree_id_start].btree_id < btree_id)
555 ret = run_btree_triggers(trans, btree_id, btree_id_start);
560 for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
561 struct btree_insert_entry *i = trans->updates + idx;
566 ret = run_btree_triggers(trans, BTREE_ID_alloc, idx);
574 trans_for_each_update(trans, i)
582 static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
584 trans_for_each_update(trans, i) {
592 gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
593 int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
603 bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
607 struct bch_fs *c = trans->c;
613 trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
614 return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
623 prefetch(&trans->c->journal.flags);
625 trans_for_each_update(trans, i) {
627 if (!same_leaf_as_prev(trans, i))
632 ? btree_key_can_insert(trans, insert_l(trans, i)->b, u64s)
633 : btree_key_can_insert_cached(trans, flags, trans->paths + i->path, u64s);
647 ret = bch2_trans_journal_res_get(trans,
653 if (unlikely(trans->journal_transaction_names))
654 journal_transaction_name(trans);
665 trans_for_each_update(trans, i)
666 i->k->k.version.lo = trans->journal_res.seq;
668 trans_for_each_update(trans, i)
672 if (trans->fs_usage_deltas &&
673 bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
677 bch2_trans_account_disk_usage_change(trans);
679 h = trans->hooks;
681 ret = h->fn(trans, h);
687 trans_for_each_update(trans, i)
689 ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
695 ret = bch2_trans_commit_run_gc_triggers(trans);
704 trans_for_each_update(trans, i) {
711 verify_update_old_key(trans, i);
713 if (trans->journal_transaction_names) {
714 entry = bch2_journal_add_entry(j, &trans->journal_res,
722 entry = bch2_journal_add_entry(j, &trans->journal_res,
729 memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
730 trans->journal_entries,
731 trans->journal_entries_u64s);
733 trans->journal_res.offset += trans->journal_entries_u64s;
734 trans->journal_res.u64s -= trans->journal_entries_u64s;
736 if (trans->journal_seq)
737 *trans->journal_seq = trans->journal_res.seq;
740 trans_for_each_update(trans, i) {
741 struct btree_path *path = trans->paths + i->path;
744 bch2_btree_insert_key_leaf(trans, path, i->k, trans->journal_res.seq);
746 bch2_btree_insert_key_cached(trans, flags, i);
748 bch2_btree_key_cache_drop(trans, path);
757 if (trans->fs_usage_deltas)
758 bch2_trans_fs_usage_revert(trans, trans->fs_usage_deltas);
762 static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
764 trans_for_each_update(trans, i)
765 bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
768 static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
773 struct bch_fs *c = trans->c;
777 trans->fn, (void *) i->ip_allocated);
788 bch2_dump_trans_updates(trans);
793 static noinline int bch2_trans_commit_journal_entry_invalid(struct btree_trans *trans,
796 struct bch_fs *c = trans->c;
799 prt_printf(&buf, "invalid bkey on insert from %s", trans->fn);
809 bch2_dump_trans_updates(trans);
823 static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
827 struct bch_fs *c = trans->c;
830 for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
831 struct btree_insert_entry *i = trans->updates + idx;
838 if (!same_leaf_as_next(trans, i)) {
840 ret = bch2_foreground_maybe_merge(trans, i->path,
850 ret = bch2_trans_lock_write(trans);
854 ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip);
856 if (!ret && unlikely(trans->journal_replay_not_finished))
857 bch2_drop_overwrites_from_journal(trans);
859 bch2_trans_unlock_write(trans);
861 if (!ret && trans->journal_pin)
862 bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
863 trans->journal_pin,
871 bch2_journal_res_put(&c->journal, &trans->journal_res);
887 int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
891 struct bch_fs *c = trans->c;
896 ret = bch2_btree_split_leaf(trans, i->path, flags);
898 trace_and_count(c, trans_restart_btree_node_split, trans,
899 trace_ip, trans->paths + i->path);
902 ret = drop_locks_do(trans,
903 bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas));
916 ret = drop_locks_do(trans,
917 bch2_trans_journal_res_get(trans,
922 bch2_trans_unlock(trans);
924 trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
931 ret = bch2_trans_relock(trans);
938 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
948 bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
950 struct bch_fs *c = trans->c;
957 ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
971 do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
973 struct bch_fs *c = trans->c;
976 trans_for_each_update(trans, i) {
985 int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
988 struct bch_fs *c = trans->c;
991 if (!trans->nr_updates &&
992 !trans->journal_entries_u64s)
995 memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
997 ret = bch2_trans_commit_run_triggers(trans);
1001 trans_for_each_update(trans, i) {
1010 ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf);
1011 btree_insert_entry_checks(trans, i);
1018 for (struct jset_entry *i = trans->journal_entries;
1019 i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
1029 ret = bch2_trans_commit_journal_entry_invalid(trans, i);
1036 ret = do_bch2_trans_commit_to_journal_replay(trans);
1042 ret = bch2_trans_commit_get_rw_cold(trans, flags);
1049 trans->journal_u64s = trans->journal_entries_u64s;
1050 trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
1051 if (trans->journal_transaction_names)
1052 trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
1054 trans_for_each_update(trans, i) {
1055 struct btree_path *path = trans->paths + i->path;
1059 ret = bch2_btree_path_upgrade(trans, path, i->level + 1);
1072 trans->journal_u64s += jset_u64s(i->k->k.u64s);
1075 if (trans->journal_transaction_names)
1076 trans->journal_u64s += jset_u64s(i->old_k.u64s);
1079 if (trans->extra_disk_res) {
1080 ret = bch2_disk_reservation_add(c, trans->disk_res,
1081 trans->extra_disk_res,
1089 bch2_trans_verify_not_in_restart(trans);
1091 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
1093 ret = do_bch2_trans_commit(trans, flags, &errored_at, _RET_IP_);
1096 bch2_trans_verify_locks(trans);
1101 trace_and_count(c, transaction_commit, trans, _RET_IP_);
1107 bch2_trans_downgrade(trans);
1108 bch2_trans_reset_updates(trans);
1112 ret = bch2_trans_commit_error(trans, flags, errored_at, ret, _RET_IP_);
1119 * trans->journal_res, but with BCH_TRANS_COMMIT_no_journal_res that is