Lines Matching refs:rq

18 #include "blk-rq-qos.h"
409 unsigned int blk_recalc_rq_segments(struct request *rq)
416 if (!rq->bio)
419 switch (bio_op(rq->bio)) {
422 if (queue_max_discard_segments(rq->q) > 1) {
423 struct bio *bio = rq->bio;
436 rq_for_each_bvec(bv, rq, iter)
437 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
562 * must make sure sg can hold rq->nr_phys_segments entries
564 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
569 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
570 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
571 else if (rq->bio)
572 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
581 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
587 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
590 struct request_queue *q = rq->q;
593 if (blk_rq_is_passthrough(rq))
596 max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
598 req_op(rq) == REQ_OP_DISCARD ||
599 req_op(rq) == REQ_OP_SECURE_ERASE)
723 * @rq: request to mark as mixed merge
726 * @rq is about to be mixed merged. Make sure the attributes
727 * which can be mixed are set in each bio and mark @rq as mixed
730 static void blk_rq_set_mixed_merge(struct request *rq)
732 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
735 if (rq->rq_flags & RQF_MIXED_MERGE)
739 * @rq will no longer represent mixable attributes for all the
743 for (bio = rq->bio; bio; bio = bio->bi_next) {
748 rq->rq_flags |= RQF_MIXED_MERGE;
825 * from next to rq and release next. merge_requests_fn
891 struct request *rq)
893 struct request *next = elv_latter_request(q, rq);
896 return attempt_merge(q, rq, next);
902 struct request *rq)
904 struct request *prev = elv_former_request(q, rq);
907 return attempt_merge(q, prev, rq);
913 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
917 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
920 return attempt_merge(q, rq, next);
923 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
925 if (!rq_mergeable(rq) || !bio_mergeable(bio))
928 if (req_op(rq) != bio_op(bio))
932 if (bio_data_dir(bio) != rq_data_dir(rq))
936 if (!blk_cgroup_mergeable(rq, bio))
939 /* only merge integrity protected bio into ditto rq */
940 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
944 if (!bio_crypt_rq_ctx_compatible(rq, bio))
948 if (rq->write_hint != bio->bi_write_hint)
951 if (rq->ioprio != bio_prio(bio))
957 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
959 if (blk_discard_mergable(rq))
961 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
963 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
1069 struct request *rq,
1074 if (!blk_rq_merge_ok(rq, bio))
1077 switch (blk_try_merge(rq, bio)) {
1079 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1080 return bio_attempt_back_merge(rq, bio, nr_segs);
1083 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1084 return bio_attempt_front_merge(rq, bio, nr_segs);
1087 return bio_attempt_discard_merge(q, rq, bio);
1119 struct request *rq;
1124 rq_list_for_each(&plug->mq_list, rq) {
1125 if (rq->q == q) {
1126 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1149 struct request *rq;
1152 list_for_each_entry_reverse(rq, list, queuelist) {
1156 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1174 struct request *rq;
1176 switch (elv_merge(q, &rq, bio)) {
1178 if (!blk_mq_sched_allow_merge(q, rq, bio))
1180 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1182 *merged_request = attempt_back_merge(q, rq);
1184 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1187 if (!blk_mq_sched_allow_merge(q, rq, bio))
1189 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1191 *merged_request = attempt_front_merge(q, rq);
1193 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1196 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;