Lines Matching refs:bi

1135 raid5_end_read_request(struct bio *bi);
1137 raid5_end_write_request(struct bio *bi);
1159 struct bio *bi, *rbi;
1182 bi = &dev->req;
1250 bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
1251 bi->bi_end_io = op_is_write(op)
1254 bi->bi_private = sh;
1258 bi->bi_opf, i);
1263 bi->bi_iter.bi_sector = (sh->sector
1266 bi->bi_iter.bi_sector = (sh->sector
1269 bi->bi_opf |= REQ_NOMERGE;
1284 bi->bi_vcnt = 1;
1285 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1286 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
1287 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1293 bi->bi_vcnt = 0;
1297 mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector);
1299 bio_list_add(&pending_bios, bi);
1301 submit_bio_noacct(bi);
1351 bi->bi_opf, i, (unsigned long long)sh->sector);
2717 static void raid5_end_read_request(struct bio * bi)
2719 struct stripe_head *sh = bi->bi_private;
2726 if (bi == &sh->dev[i].req)
2731 bi->bi_status);
2750 if (!bi->bi_status) {
2782 if (!(bi->bi_status == BLK_STS_PROTECTION))
2839 bio_uninit(bi);
2845 static void raid5_end_write_request(struct bio *bi)
2847 struct stripe_head *sh = bi->bi_private;
2854 if (bi == &sh->dev[i].req) {
2858 if (bi == &sh->dev[i].rreq) {
2873 bi->bi_status);
2880 if (bi->bi_status)
2886 if (bi->bi_status) {
2906 if (sh->batch_head && bi->bi_status && !replacement)
2909 bio_uninit(bi);
3444 static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
3450 pr_debug("checking bi b#%llu to stripe s#%llu\n",
3451 bi->bi_iter.bi_sector, sh->sector);
3462 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
3463 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
3468 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
3504 static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
3519 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector)
3525 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
3527 bi->bi_next = *bip;
3528 *bip = bi;
3529 bio_inc_remaining(bi);
3530 md_write_inc(conf->mddev, bi);
3535 for (bi=sh->dev[dd_idx].towrite;
3537 bi && bi->bi_iter.bi_sector <= sector;
3538 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
3539 if (bio_end_sector(bi) >= sector)
3540 sector = bio_end_sector(bi);
3547 pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
3582 static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
3587 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
3593 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
3623 struct bio *bi;
3645 bi = sh->dev[i].towrite;
3649 if (bi)
3657 while (bi && bi->bi_iter.bi_sector <
3659 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector);
3662 bio_io_error(bi);
3663 bi = nextbi;
3670 bi = sh->dev[i].written;
3677 if (bi) bitmap_end = 1;
3678 while (bi && bi->bi_iter.bi_sector <
3680 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
3683 bio_io_error(bi);
3684 bi = bi2;
3695 bi = sh->dev[i].toread;
3700 if (bi)
3702 while (bi && bi->bi_iter.bi_sector <
3705 r5_next_bio(conf, bi, sh->dev[i].sector);
3707 bio_io_error(bi);
3708 bi = nextbi;
5384 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
5390 bi->bi_next = conf->retry_read_aligned_list;
5391 conf->retry_read_aligned_list = bi;
5400 struct bio *bi;
5402 bi = conf->retry_read_aligned;
5403 if (bi) {
5406 return bi;
5408 bi = conf->retry_read_aligned_list;
5409 if(bi) {
5410 conf->retry_read_aligned_list = bi->bi_next;
5411 bi->bi_next = NULL;
5415 return bi;
5424 static void raid5_align_endio(struct bio *bi)
5426 struct bio *raid_bi = bi->bi_private;
5430 blk_status_t error = bi->bi_status;
5432 bio_put(bi);
5721 static void make_discard_request(struct mddev *mddev, struct bio *bi)
5729 if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT))
5736 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
5737 last_sector = bio_end_sector(bi);
5739 bi->bi_next = NULL;
5783 sh->dev[d].towrite = bi;
5785 bio_inc_remaining(bi);
5786 md_write_inc(mddev, bi);
5809 bio_endio(bi);
5855 struct bio *bi, int forwrite, int previous)
5872 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
5892 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
5904 sector_t logical_sector, struct bio *bi)
5906 const int rw = bio_data_dir(bi);
5947 if (bi->bi_opf & REQ_RAHEAD)
5952 bi->bi_status = BLK_STS_IOERR;
5977 !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
6004 (bi->bi_opf & REQ_SYNC) &&
6015 bi->bi_status = BLK_STS_RESOURCE;
6027 struct bio *bi)
6034 sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6040 if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
6054 static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
6060 const int rw = bio_data_dir(bi);
6064 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
6065 int ret = log_handle_flush_request(conf, bi);
6070 if (md_flush_request(mddev, bi))
6078 ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
6081 if (!md_write_start(mddev, bi))
6090 bi = chunk_aligned_read(mddev, bi);
6091 if (!bi)
6095 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
6096 make_discard_request(mddev, bi);
6101 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6103 ctx.last_sector = bio_end_sector(bi);
6104 bi->bi_next = NULL;
6111 bi->bi_iter.bi_sector, ctx.last_sector);
6114 if ((bi->bi_opf & REQ_NOWAIT) &&
6118 bio_wouldblock_error(bi);
6123 md_account_bio(mddev, &bi);
6133 logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
6139 bi);
6179 md_free_cloned_bio(bi);
6183 bio_endio(bi);