Lines Matching refs:bio

7 #include <linux/bio.h>
21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
28 struct bvec_iter iter = bio->bi_iter;
31 bio_get_first_bvec(bio, bv);
32 if (bv->bv_len == bio->bi_iter.bi_size)
33 return; /* this bio only has a single bvec */
35 bio_advance_iter(bio, &iter, iter.bi_size);
42 *bv = bio->bi_io_vec[idx];
46 * if this bio ends in the middle of one io vector
53 struct request *prev_rq, struct bio *prev, struct bio *next)
61 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
66 bio_get_first_bvec(prev_rq->bio, &pb);
77 * - if 'pb' ends unaligned, the next bio must include
88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
90 return bio_will_gap(req->q, req, req->biotail, bio);
93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
95 return bio_will_gap(req->q, NULL, bio, req->bio);
99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
108 static struct bio *bio_split_discard(struct bio *bio,
126 if (bio_sectors(bio) <= max_discard_sectors)
135 tmp = bio->bi_iter.bi_sector + split_sectors -
142 return bio_split(bio, split_sectors, GFP_NOIO, bs);
145 static struct bio *bio_split_write_zeroes(struct bio *bio,
152 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
154 return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
158 * Return the maximum number of sectors from the start of a bio that may be
162 * requests that are submitted to a block device if the start of a bio is not
165 static inline unsigned get_max_io_size(struct bio *bio,
174 blk_chunk_sectors_left(bio->bi_iter.bi_sector,
178 start = bio->bi_iter.bi_sector & (pbs - 1);
211 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
213 * bio without exceeding @max_segs
214 * @bytes: [in,out] Number of bytes in the bio being built. Incremented
216 * bio without exceeding @max_bytes
220 * When splitting a bio, it can happen that a bvec is encountered that is too
223 * %true is returned if and only if appending the entire @bv to a bio with
224 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
256 * bio_split_rw - split a bio in two bios
257 * @bio: [in] bio to be split
259 * @segs: [out] number of segments in the bio with the first half of the sectors
260 * @bs: [in] bio set to allocate the clone from
261 * @max_bytes: [in] maximum number of bytes per bio
263 * Clone @bio, update the bi_iter of the clone to represent the first sectors
264 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
265 * following is guaranteed for the cloned bio:
269 * Except for discard requests the cloned bio will point at the bi_io_vec of
270 * the original bio. It is the responsibility of the caller to ensure that the
271 * original bio is not freed before the cloned bio. The caller is also
273 * split bio has finished.
275 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
282 bio_for_each_bvec(bv, bio, iter) {
309 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
312 if (bio->bi_opf & REQ_NOWAIT) {
313 bio->bi_status = BLK_STS_AGAIN;
314 bio_endio(bio);
322 * split size so that each bio is properly block size aligned, even if
332 bio_clear_polled(bio);
333 return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
338 * __bio_split_to_limits - split a bio to fit the queue limits
339 * @bio: bio to be split
341 * @nr_segs: returns the number of segments in the returned bio
343 * Check if @bio needs splitting based on the queue limits, and if so split off
344 * a bio fitting the limits from the beginning of @bio and return it. @bio is
347 * The split bio is allocated from @q->bio_split, which is provided by the
350 struct bio *__bio_split_to_limits(struct bio *bio,
354 struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
355 struct bio *split;
357 switch (bio_op(bio)) {
360 split = bio_split_discard(bio, lim, nr_segs, bs);
363 split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
366 split = bio_split_rw(bio, lim, nr_segs, bs,
367 get_max_io_size(bio, lim) << SECTOR_SHIFT);
374 /* there isn't chance to merge the split bio */
378 bio_chain(split, bio);
379 trace_block_split(split, bio->bi_iter.bi_sector);
380 submit_bio_noacct(bio);
383 return bio;
387 * bio_split_to_limits - split a bio to fit the queue limits
388 * @bio: bio to be split
390 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
391 * if so split off a bio fitting the limits from the beginning of @bio and
392 * return it. @bio is shortened to the remainder and re-submitted.
394 * The split bio is allocated from @q->bio_split, which is provided by the
397 struct bio *bio_split_to_limits(struct bio *bio)
399 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
402 if (bio_may_exceed_limits(bio, lim))
403 return __bio_split_to_limits(bio, lim, &nr_segs);
404 return bio;
415 if (!rq->bio)
418 switch (bio_op(rq->bio)) {
422 struct bio *bio = rq->bio;
424 for_each_bio(bio)
523 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
532 for_each_bio(bio) {
533 bio_for_each_bvec(bvec, bio, iter) {
536 * have done bio internal merge when adding pages
537 * to bio
550 if (likely(bio->bi_iter.bi_size)) {
570 else if (rq->bio)
571 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
604 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
607 if (!blk_cgroup_mergeable(req, bio))
610 if (blk_integrity_merge_bio(req->q, req, bio) == false)
632 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
634 if (req_gap_back_merge(req, bio))
637 integrity_req_gap_back_merge(req, bio))
639 if (!bio_crypt_ctx_back_mergeable(req, bio))
641 if (blk_rq_sectors(req) + bio_sectors(bio) >
647 return ll_new_hw_segment(req, bio, nr_segs);
650 static int ll_front_merge_fn(struct request *req, struct bio *bio,
653 if (req_gap_front_merge(req, bio))
656 integrity_req_gap_front_merge(req, bio))
658 if (!bio_crypt_ctx_front_mergeable(req, bio))
660 if (blk_rq_sectors(req) + bio_sectors(bio) >
661 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
666 return ll_new_hw_segment(req, bio, nr_segs);
676 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
692 if (req_gap_back_merge(req, next->bio))
706 if (!blk_cgroup_mergeable(req, next->bio))
726 * which can be mixed are set in each bio and mark @rq as mixed
732 struct bio *bio;
740 * Distributes the attributs to each bio.
742 for (bio = rq->bio; bio; bio = bio->bi_next) {
743 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
744 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
745 bio->bi_opf |= ff;
750 static inline blk_opf_t bio_failfast(const struct bio *bio)
752 if (bio->bi_opf & REQ_RAHEAD)
755 return bio->bi_opf & REQ_FAILFAST_MASK;
759 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
764 struct bio *bio, bool front_merge)
767 if (bio->bi_opf & REQ_RAHEAD)
768 bio->bi_opf |= REQ_FAILFAST_MASK;
772 req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
821 * If we are allowed to merge, then append bio list
862 req->biotail->bi_next = next->bio;
880 * ownership of bio passed from next to req, return 'next' for
883 next->bio = NULL;
920 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
922 if (!rq_mergeable(rq) || !bio_mergeable(bio))
925 if (req_op(rq) != bio_op(bio))
929 if (bio_data_dir(bio) != rq_data_dir(rq))
933 if (!blk_cgroup_mergeable(rq, bio))
936 /* only merge integrity protected bio into ditto rq */
937 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
941 if (!bio_crypt_rq_ctx_compatible(rq, bio))
945 if (rq->write_hint != bio->bi_write_hint)
948 if (rq->ioprio != bio_prio(bio))
954 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
958 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
960 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
982 struct bio *bio, unsigned int nr_segs)
984 const blk_opf_t ff = bio_failfast(bio);
986 if (!ll_back_merge_fn(req, bio, nr_segs))
989 trace_block_bio_backmerge(bio);
990 rq_qos_merge(req->q, req, bio);
995 blk_update_mixed_merge(req, bio, false);
997 req->biotail->bi_next = bio;
998 req->biotail = bio;
999 req->__data_len += bio->bi_iter.bi_size;
1001 bio_crypt_free_ctx(bio);
1008 struct bio *bio, unsigned int nr_segs)
1010 const blk_opf_t ff = bio_failfast(bio);
1012 if (!ll_front_merge_fn(req, bio, nr_segs))
1015 trace_block_bio_frontmerge(bio);
1016 rq_qos_merge(req->q, req, bio);
1021 blk_update_mixed_merge(req, bio, true);
1023 bio->bi_next = req->bio;
1024 req->bio = bio;
1026 req->__sector = bio->bi_iter.bi_sector;
1027 req->__data_len += bio->bi_iter.bi_size;
1029 bio_crypt_do_front_merge(req, bio);
1036 struct request *req, struct bio *bio)
1042 if (blk_rq_sectors(req) + bio_sectors(bio) >
1046 rq_qos_merge(q, req, bio);
1048 req->biotail->bi_next = bio;
1049 req->biotail = bio;
1050 req->__data_len += bio->bi_iter.bi_size;
1062 struct bio *bio,
1066 if (!blk_rq_merge_ok(rq, bio))
1069 switch (blk_try_merge(rq, bio)) {
1071 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1072 return bio_attempt_back_merge(rq, bio, nr_segs);
1075 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1076 return bio_attempt_front_merge(rq, bio, nr_segs);
1079 return bio_attempt_discard_merge(q, rq, bio);
1089 * @q: request_queue new bio is being queued at
1090 * @bio: new bio being queued
1091 * @nr_segs: number of segments in @bio
1094 * Determine whether @bio being queued on @q can be merged with the previous
1107 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1113 plug = blk_mq_plug(bio);
1119 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1136 * Iterate list of requests and see if we can merge this bio with any
1140 struct bio *bio, unsigned int nr_segs)
1149 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1164 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1169 switch (elv_merge(q, &rq, bio)) {
1171 if (!blk_mq_sched_allow_merge(q, rq, bio))
1173 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1180 if (!blk_mq_sched_allow_merge(q, rq, bio))
1182 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1189 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;