Lines Matching refs:bp

20 			      struct bch_backpointer bp)
35 !memcmp(&bp, &bp2, sizeof(bp)))
46 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
49 if (!bch2_dev_exists2(c, bp.k->p.inode))
52 struct bch_dev *ca = bch_dev_bkey_exists(c, bp.k->p.inode);
53 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
56 bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size ||
57 !bpos_eq(bp.k->p, bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset)),
65 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
68 bch2_btree_id_str(bp->btree_id),
69 bp->level,
70 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
71 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
72 bp->bucket_len);
73 bch2_bpos_to_text(out, bp->pos);
89 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
91 bp.v->bucket_offset = swab40(bp.v->bucket_offset);
92 bp.v->bucket_len = swab32(bp.v->bucket_len);
93 bch2_bpos_swab(&bp.v->pos);
97 struct bch_backpointer bp,
107 bch2_backpointer_to_text(&buf, &bp);
125 bch2_backpointer_to_text(&buf, &bp);
149 struct bch_backpointer bp,
164 bp_k->k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
165 bp_k->v = bp;
184 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
185 ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
202 struct bch_backpointer *bp,
234 *bp = *bkey_s_c_to_backpointer(k).v;
247 struct bch_backpointer bp,
263 bp.level ? "btree node" : "extent");
272 bch2_backpointer_to_text(&buf, &bp);
286 struct bch_backpointer bp,
289 if (likely(!bp.level)) {
295 bp.btree_id,
296 bp.pos,
305 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
309 backpointer_not_found(trans, bp_pos, bp, k);
312 struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
325 struct bch_backpointer bp)
331 BUG_ON(!bp.level);
334 bp.btree_id,
335 bp.pos,
337 bp.level - 1,
343 BUG_ON(b->c.level != bp.level - 1);
345 if (extent_matches_bp(c, bp.btree_id, bp.level,
347 bucket, bp))
353 backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
505 struct bch_backpointer bp,
532 bucket_pos_to_bp(c, bucket, bp.bucket_offset),
539 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
543 if (bp.level) {
599 ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode);
606 ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode);
614 ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode);
633 bch2_btree_id_str(bp.btree_id), bp.level);
640 n_bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
641 n_bp_k.v = bp;
646 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
665 struct bch_backpointer bp;
670 bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bucket_pos, &bp);
672 ret = check_bp_exists(trans, s, bucket_pos, bp, k);
712 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
715 .btree = bp.btree_id,
716 .pos = bp.pos,
889 struct bkey_s_c_backpointer bp,
894 struct bbpos pos = bp_to_bbpos(*bp.v);
903 k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
910 if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
911 *last_flushed_pos = bp.k->p;
920 bp.v->level ? "btree node" : "extent",
921 (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
922 ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);