Lines Matching defs:paths

261 		bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
659 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
1021 if (trans->paths[idx].uptodate) {
1022 __btree_path_get(&trans->paths[idx], false);
1024 __btree_path_put(&trans->paths[idx], false);
1037 * We used to assert that all paths had been traversed here
1133 struct btree_path *path = &trans->paths[path_idx];
1157 path = &trans->paths[path_idx];
1236 btree_path_copy(trans, trans->paths + new, trans->paths + src);
1237 __btree_path_get(trans->paths + new, intent);
1239 trans->paths[new].ip_allocated = ip;
1248 __btree_path_put(trans->paths + path, intent);
1250 trans->paths[path].preserve = false;
1259 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1262 EBUG_ON(!trans->paths[path_idx].ref);
1266 struct btree_path *path = trans->paths + path_idx;
1344 __bch2_btree_path_unlock(trans, trans->paths + path);
1345 btree_path_list_remove(trans, trans->paths + path);
1371 struct btree_path *path = trans->paths + path_idx, *dup;
1404 if (!__btree_path_put(trans->paths + path, intent))
1474 struct btree_path *path = trans->paths + path_idx;
1510 struct btree_path *path = trans->paths + path_idx;
1630 struct btree_path *paths = p;
1631 *trans_paths_nr(paths) = nr;
1632 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1645 rcu_assign_pointer(trans->paths, paths);
1678 struct btree_path *path = &trans->paths[idx];
1717 trans->paths[path_pos].cached == cached &&
1718 trans->paths[path_pos].btree_id == btree_id &&
1719 trans->paths[path_pos].level == level) {
1720 __btree_path_get(trans->paths + path_pos, intent);
1722 path = trans->paths + path_idx;
1725 path = trans->paths + path_idx;
1775 struct btree_path *path = trans->paths + path_idx;
1874 EBUG_ON(trans->paths[iter->path].cached);
1922 EBUG_ON(trans->paths[iter->path].cached);
2154 btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
2156 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2302 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2325 __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2384 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2388 btree_path_set_should_be_locked(trans->paths + iter->update_path);
2627 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2742 BUG_ON(trans->paths[idx].sorted_idx != i);
2757 panic("trans paths out of order!\n");
2787 if (btree_path_cmp(trans->paths + trans->sorted[i],
2788 trans->paths + trans->sorted[i + 1]) > 0) {
2790 trans->paths[trans->sorted[i]].sorted_idx = i;
2791 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2822 trans->paths[trans->sorted[i]].sorted_idx = i;
2829 struct btree_path *path = trans->paths + path_idx;
2831 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2845 trans->paths[trans->sorted[i]].sorted_idx = i;
2910 __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
2912 __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3175 trans->paths = trans->_paths;
3178 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3217 bch_err(c, "btree paths leaked from %s!", trans->fn);
3236 __btree_path_put(trans->paths + i->path, true);
3261 trans->paths = NULL;
3324 /* trans->paths is rcu protected vs. freeing */
3328 struct btree_path *paths = rcu_dereference(trans->paths);
3329 if (!paths)
3332 unsigned long *paths_allocated = trans_paths_allocated(paths);
3334 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3335 struct btree_path *path = paths + idx;