Lines Matching defs:rbio

142 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
146 trace_and_count(op->write.op.c, read_promote, &rbio->bio);
149 BUG_ON(!rbio->bounce);
150 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
152 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
153 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
154 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
156 bch2_data_update_read_done(&op->write, rbio->pick.crc);
166 struct bch_read_bio **rbio)
190 *rbio = kzalloc(sizeof(struct bch_read_bio) +
193 if (!*rbio) {
198 rbio_init(&(*rbio)->bio, opts);
199 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
201 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
206 (*rbio)->bounce = true;
207 (*rbio)->split = true;
208 (*rbio)->kmalloc = true;
242 if (*rbio)
243 bio_free_pages(&(*rbio)->bio);
244 kfree(*rbio);
245 *rbio = NULL;
258 struct bch_read_bio **rbio,
282 k, pos, pick, opts, sectors, rbio);
308 bch2_rbio_parent(struct bch_read_bio *rbio)
310 return rbio->split ? rbio->parent : rbio;
314 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
318 if (context <= rbio->context) {
319 fn(&rbio->work);
321 rbio->work.func = fn;
322 rbio->context = context;
323 queue_work(wq, &rbio->work);
327 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
329 BUG_ON(rbio->bounce && !rbio->split);
331 if (rbio->promote)
332 promote_free(rbio->c, rbio->promote);
333 rbio->promote = NULL;
335 if (rbio->bounce)
336 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
338 if (rbio->split) {
339 struct bch_read_bio *parent = rbio->parent;
341 if (rbio->kmalloc)
342 kfree(rbio);
344 bio_put(&rbio->bio);
346 rbio = parent;
349 return rbio;
356 static void bch2_rbio_done(struct bch_read_bio *rbio)
358 if (rbio->start_time)
359 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
360 rbio->start_time);
361 bio_endio(&rbio->bio);
364 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
380 bch2_trans_iter_init(trans, &iter, rbio->data_btree,
381 rbio->read_pos, BTREE_ITER_SLOTS);
383 rbio->bio.bi_status = 0;
394 rbio->pick.ptr,
395 rbio->data_pos.offset -
396 rbio->pick.crc.offset)) {
398 rbio->hole = true;
402 ret = __bch2_read_extent(trans, rbio, bvec_iter,
403 rbio->read_pos,
404 rbio->data_btree,
411 bch2_rbio_done(rbio);
417 rbio->bio.bi_status = BLK_STS_IOERR;
423 struct bch_read_bio *rbio =
425 struct bch_fs *c = rbio->c;
426 struct bvec_iter iter = rbio->bvec_iter;
427 unsigned flags = rbio->flags;
429 .subvol = rbio->subvol,
430 .inum = rbio->read_pos.inode,
434 trace_and_count(c, read_retry, &rbio->bio);
436 if (rbio->retry == READ_RETRY_AVOID)
437 bch2_mark_io_failure(&failed, &rbio->pick);
439 rbio->bio.bi_status = 0;
441 rbio = bch2_rbio_free(rbio);
447 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
452 __bch2_read(c, rbio, iter, inum, &failed, flags);
456 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
459 rbio->retry = retry;
461 if (rbio->flags & BCH_READ_IN_RETRY)
465 rbio = bch2_rbio_free(rbio);
467 rbio->bio.bi_status = error;
468 bch2_rbio_done(rbio);
470 bch2_rbio_punt(rbio, bch2_rbio_retry,
476 struct bch_read_bio *rbio)
478 struct bch_fs *c = rbio->c;
479 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
486 if (crc_is_compressed(rbio->pick.crc))
489 k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
494 if (bversion_cmp(k.k->version, rbio->version) ||
495 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
500 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
503 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
504 rbio->pick.crc, NULL, &new_crc,
506 rbio->pick.crc.csum_type)) {
532 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
534 bch2_trans_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
535 __bch2_rbio_narrow_crcs(trans, rbio));
541 struct bch_read_bio *rbio =
543 struct bch_fs *c = rbio->c;
544 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
545 struct bio *src = &rbio->bio;
546 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
547 struct bvec_iter dst_iter = rbio->bvec_iter;
548 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
549 struct nonce nonce = extent_nonce(rbio->version, crc);
557 if (rbio->bounce) {
562 src->bi_iter = rbio->bvec_iter;
566 if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
576 if (unlikely(rbio->narrow_crcs))
577 bch2_rbio_narrow_crcs(rbio);
579 if (rbio->flags & BCH_READ_NODECODE)
583 crc.offset += rbio->offset_into_extent;
584 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
606 if (rbio->bounce) {
613 if (rbio->promote) {
616 * rbio->crc:
622 promote_start(rbio->promote, rbio);
623 rbio->promote = NULL;
626 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
627 rbio = bch2_rbio_free(rbio);
628 bch2_rbio_done(rbio);
639 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
640 rbio->flags |= BCH_READ_MUST_BOUNCE;
641 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
648 bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum);
651 rbio->read_pos.inode,
652 rbio->read_pos.offset << 9,
657 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
660 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
661 rbio->read_pos.offset << 9,
663 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
666 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
667 rbio->read_pos.offset << 9,
669 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
675 struct bch_read_bio *rbio =
677 struct bch_fs *c = rbio->c;
678 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
682 if (rbio->have_ioref) {
683 bch2_latency_acct(ca, rbio->submit_time, READ);
687 if (!rbio->split)
688 rbio->bio.bi_end_io = rbio->end_io;
691 rbio->read_pos.inode,
692 rbio->read_pos.offset,
695 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
699 if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
700 ptr_stale(ca, &rbio->pick.ptr)) {
701 trace_and_count(c, read_reuse_race, &rbio->bio);
703 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
704 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
706 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
710 if (rbio->narrow_crcs ||
711 rbio->promote ||
712 crc_is_compressed(rbio->pick.crc) ||
713 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
715 else if (rbio->pick.crc.csum_type)
718 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
803 struct bch_read_bio *rbio = NULL;
893 &rbio, &bounce, &read_full);
913 if (rbio) {
915 * promote already allocated bounce rbio:
920 EBUG_ON(rbio->bio.bi_iter.bi_size <
922 rbio->bio.bi_iter.bi_size =
927 rbio = rbio_init(bio_alloc_bioset(NULL,
934 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
935 rbio->bounce = true;
936 rbio->split = true;
946 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
949 rbio->bio.bi_iter = iter;
950 rbio->split = true;
952 rbio = orig;
953 rbio->bio.bi_iter = iter;
954 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
957 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
959 rbio->c = c;
960 rbio->submit_time = local_clock();
961 if (rbio->split)
962 rbio->parent = orig;
964 rbio->end_io = orig->bio.bi_end_io;
965 rbio->bvec_iter = iter;
966 rbio->offset_into_extent= offset_into_extent;
967 rbio->flags = flags;
968 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
969 rbio->narrow_crcs = narrow_crcs;
970 rbio->hole = 0;
971 rbio->retry = 0;
972 rbio->context = 0;
974 rbio->devs_have = bch2_bkey_devs(k);
975 rbio->pick = pick;
976 rbio->subvol = orig->subvol;
977 rbio->read_pos = read_pos;
978 rbio->data_btree = data_btree;
979 rbio->data_pos = data_pos;
980 rbio->version = k.k->version;
981 rbio->promote = promote;
982 INIT_WORK(&rbio->work, NULL);
984 rbio->bio.bi_opf = orig->bio.bi_opf;
985 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
986 rbio->bio.bi_end_io = bch2_read_endio;
988 if (rbio->bounce)
989 trace_and_count(c, read_bounce, &rbio->bio);
991 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
992 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1007 if (!rbio->pick.idx) {
1008 if (!rbio->have_ioref) {
1013 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1018 bio_sectors(&rbio->bio));
1019 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1023 bio_endio(&rbio->bio);
1026 submit_bio(&rbio->bio);
1028 submit_bio_wait(&rbio->bio);
1038 if (bch2_ec_read_extent(trans, rbio)) {
1039 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1044 bio_endio(&rbio->bio);
1052 rbio->context = RBIO_CONTEXT_UNBOUND;
1053 bch2_read_endio(&rbio->bio);
1055 ret = rbio->retry;
1056 rbio = bch2_rbio_free(rbio);
1092 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1162 ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
1172 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1193 rbio->bio.bi_status = BLK_STS_IOERR;
1194 bch2_rbio_done(rbio);