Lines Matching refs:rq

42 #include "blk-rq-qos.h"
47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
48 static void blk_mq_request_bypass_insert(struct request *rq,
91 static bool blk_mq_check_inflight(struct request *rq, void *priv)
95 if (rq->part && blk_do_io_stat(rq) &&
96 (!bdev_is_partition(mi->part) || rq->part == mi->part) &&
97 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
98 mi->inflight[rq_data_dir(rq)]++;
314 void blk_rq_init(struct request_queue *q, struct request *rq)
316 memset(rq, 0, sizeof(*rq));
318 INIT_LIST_HEAD(&rq->queuelist);
319 rq->q = q;
320 rq->__sector = (sector_t) -1;
321 INIT_HLIST_NODE(&rq->hash);
322 RB_CLEAR_NODE(&rq->rb_node);
323 rq->tag = BLK_MQ_NO_TAG;
324 rq->internal_tag = BLK_MQ_NO_TAG;
325 rq->start_time_ns = blk_time_get_ns();
326 rq->part = NULL;
327 blk_crypto_rq_set_defaults(rq);
332 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
334 if (blk_mq_need_time_stamp(rq))
335 rq->start_time_ns = blk_time_get_ns();
337 rq->start_time_ns = 0;
340 if (blk_queue_rq_alloc_time(rq->q))
341 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
343 rq->alloc_time_ns = 0;
353 struct request *rq = tags->static_rqs[tag];
355 rq->q = q;
356 rq->mq_ctx = ctx;
357 rq->mq_hctx = hctx;
358 rq->cmd_flags = data->cmd_flags;
364 rq->rq_flags = data->rq_flags;
367 rq->tag = BLK_MQ_NO_TAG;
368 rq->internal_tag = tag;
370 rq->tag = tag;
371 rq->internal_tag = BLK_MQ_NO_TAG;
373 rq->timeout = 0;
375 rq->part = NULL;
376 rq->io_start_time_ns = 0;
377 rq->stats_sectors = 0;
378 rq->nr_phys_segments = 0;
380 rq->nr_integrity_segments = 0;
382 rq->end_io = NULL;
383 rq->end_io_data = NULL;
385 blk_crypto_rq_set_defaults(rq);
386 INIT_LIST_HEAD(&rq->queuelist);
388 WRITE_ONCE(rq->deadline, 0);
389 req_ref_set(rq, 1);
391 if (rq->rq_flags & RQF_USE_SCHED) {
394 INIT_HLIST_NODE(&rq->hash);
395 RB_CLEAR_NODE(&rq->rb_node);
398 e->type->ops.prepare_request(rq);
401 return rq;
409 struct request *rq;
424 rq = blk_mq_rq_ctx_init(data, tags, tag);
425 rq_list_add(data->cached_rq, rq);
441 struct request *rq;
487 rq = __blk_mq_alloc_requests_batch(data);
488 if (rq) {
489 blk_mq_rq_time_init(rq, alloc_time_ns);
490 return rq;
516 rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
517 blk_mq_rq_time_init(rq, alloc_time_ns);
518 return rq;
533 struct request *rq;
540 rq = __blk_mq_alloc_requests(&data);
541 if (unlikely(!rq))
543 return rq;
551 struct request *rq;
559 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
560 if (!rq)
563 rq = rq_list_peek(&plug->cached_rq);
564 if (!rq || rq->q != q)
567 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
569 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
572 plug->cached_rq = rq_list_next(rq);
573 blk_mq_rq_time_init(rq, 0);
576 rq->cmd_flags = opf;
577 INIT_LIST_HEAD(&rq->queuelist);
578 return rq;
584 struct request *rq;
586 rq = blk_mq_alloc_cached_request(q, opf, flags);
587 if (!rq) {
600 rq = __blk_mq_alloc_requests(&data);
601 if (!rq)
604 rq->__data_len = 0;
605 rq->__sector = (sector_t) -1;
606 rq->bio = rq->biotail = NULL;
607 return rq;
624 struct request *rq;
677 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
678 blk_mq_rq_time_init(rq, alloc_time_ns);
679 rq->__data_len = 0;
680 rq->__sector = (sector_t) -1;
681 rq->bio = rq->biotail = NULL;
682 return rq;
690 static void blk_mq_finish_request(struct request *rq)
692 struct request_queue *q = rq->q;
694 blk_zone_finish_request(rq);
696 if (rq->rq_flags & RQF_USE_SCHED) {
697 q->elevator->type->ops.finish_request(rq);
701 * to avoid double finish_request() on the rq.
703 rq->rq_flags &= ~RQF_USE_SCHED;
707 static void __blk_mq_free_request(struct request *rq)
709 struct request_queue *q = rq->q;
710 struct blk_mq_ctx *ctx = rq->mq_ctx;
711 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
712 const int sched_tag = rq->internal_tag;
714 blk_crypto_free_request(rq);
715 blk_pm_mark_last_busy(rq);
716 rq->mq_hctx = NULL;
718 if (rq->tag != BLK_MQ_NO_TAG) {
720 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
728 void blk_mq_free_request(struct request *rq)
730 struct request_queue *q = rq->q;
732 blk_mq_finish_request(rq);
734 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
737 rq_qos_done(q, rq);
739 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
740 if (req_ref_put_and_test(rq))
741 __blk_mq_free_request(rq);
747 struct request *rq;
749 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
750 blk_mq_free_request(rq);
753 void blk_dump_rq_flags(struct request *rq, char *msg)
756 rq->q->disk ? rq->q->disk->disk_name : "?",
757 (__force unsigned long long) rq->cmd_flags);
760 (unsigned long long)blk_rq_pos(rq),
761 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
763 rq->bio, rq->biotail, blk_rq_bytes(rq));
1029 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1031 if (rq->rq_flags & RQF_STATS)
1032 blk_stat_add(rq, now);
1034 blk_mq_sched_completed_request(rq, now);
1035 blk_account_io_done(rq, now);
1038 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1040 if (blk_mq_need_time_stamp(rq))
1041 __blk_mq_end_request_acct(rq, blk_time_get_ns());
1043 blk_mq_finish_request(rq);
1045 if (rq->end_io) {
1046 rq_qos_done(rq->q, rq);
1047 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1048 blk_mq_free_request(rq);
1050 blk_mq_free_request(rq);
1055 void blk_mq_end_request(struct request *rq, blk_status_t error)
1057 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1059 __blk_mq_end_request(rq, error);
1080 struct request *rq;
1086 while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1087 prefetch(rq->bio);
1088 prefetch(rq->rq_next);
1090 blk_complete_request(rq);
1092 __blk_mq_end_request_acct(rq, now);
1094 blk_mq_finish_request(rq);
1096 rq_qos_done(rq->q, rq);
1102 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1105 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1106 if (!req_ref_put_and_test(rq))
1109 blk_crypto_free_request(rq);
1110 blk_pm_mark_last_busy(rq);
1112 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1116 cur_hctx = rq->mq_hctx;
1118 tags[nr_tags++] = rq->tag;
1129 struct request *rq, *next;
1131 llist_for_each_entry_safe(rq, next, entry, ipi_list)
1132 rq->q->mq_ops->complete(rq);
1151 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1156 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1168 if (cpu == rq->mq_ctx->cpu ||
1169 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1170 cpus_share_cache(cpu, rq->mq_ctx->cpu) &&
1171 cpus_equal_capacity(cpu, rq->mq_ctx->cpu)))
1175 return cpu_online(rq->mq_ctx->cpu);
1178 static void blk_mq_complete_send_ipi(struct request *rq)
1182 cpu = rq->mq_ctx->cpu;
1183 if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1187 static void blk_mq_raise_softirq(struct request *rq)
1193 if (llist_add(&rq->ipi_list, list))
1198 bool blk_mq_complete_request_remote(struct request *rq)
1200 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1207 if ((rq->mq_hctx->nr_ctx == 1 &&
1208 rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1209 rq->cmd_flags & REQ_POLLED)
1212 if (blk_mq_complete_need_ipi(rq)) {
1213 blk_mq_complete_send_ipi(rq);
1217 if (rq->q->nr_hw_queues == 1) {
1218 blk_mq_raise_softirq(rq);
1227 * @rq: the request being processed
1232 void blk_mq_complete_request(struct request *rq)
1234 if (!blk_mq_complete_request_remote(rq))
1235 rq->q->mq_ops->complete(rq);
1241 * @rq: Pointer to request to be started
1247 void blk_mq_start_request(struct request *rq)
1249 struct request_queue *q = rq->q;
1251 trace_block_rq_issue(rq);
1254 !blk_rq_is_passthrough(rq)) {
1255 rq->io_start_time_ns = blk_time_get_ns();
1256 rq->stats_sectors = blk_rq_sectors(rq);
1257 rq->rq_flags |= RQF_STATS;
1258 rq_qos_issue(q, rq);
1261 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1263 blk_add_timer(rq);
1264 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1265 rq->mq_hctx->tags->rqs[rq->tag] = rq;
1268 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1269 q->integrity.profile->prepare_fn(rq);
1271 if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1272 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1288 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1293 trace_block_plug(rq->q);
1295 (!blk_queue_nomerges(rq->q) &&
1299 trace_block_plug(rq->q);
1302 if (!plug->multiple_queues && last && last->q != rq->q)
1308 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1310 rq->rq_next = NULL;
1311 rq_list_add(&plug->mq_list, rq);
1317 * @rq: request to insert
1327 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1329 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1332 WARN_ON(!blk_rq_is_passthrough(rq));
1334 blk_account_io_start(rq);
1337 blk_add_rq_to_plug(current->plug, rq);
1341 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1351 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1353 struct blk_rq_wait *wait = rq->end_io_data;
1360 bool blk_rq_is_poll(struct request *rq)
1362 if (!rq->mq_hctx)
1364 if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1370 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1373 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1380 * @rq: request to insert
1388 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1390 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1396 WARN_ON(!blk_rq_is_passthrough(rq));
1398 rq->end_io_data = &wait;
1399 rq->end_io = blk_end_sync_rq;
1401 blk_account_io_start(rq);
1402 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1405 if (blk_rq_is_poll(rq))
1406 blk_rq_poll_completion(rq, &wait.done);
1414 static void __blk_mq_requeue_request(struct request *rq)
1416 struct request_queue *q = rq->q;
1418 blk_mq_put_driver_tag(rq);
1420 trace_block_rq_requeue(rq);
1421 rq_qos_requeue(q, rq);
1423 if (blk_mq_request_started(rq)) {
1424 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1425 rq->rq_flags &= ~RQF_TIMED_OUT;
1429 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1431 struct request_queue *q = rq->q;
1434 __blk_mq_requeue_request(rq);
1437 blk_mq_sched_requeue_request(rq);
1440 list_add_tail(&rq->queuelist, &q->requeue_list);
1454 struct request *rq;
1462 rq = list_entry(rq_list.next, struct request, queuelist);
1469 if (rq->rq_flags & RQF_DONTPREP) {
1470 list_del_init(&rq->queuelist);
1471 blk_mq_request_bypass_insert(rq, 0);
1473 list_del_init(&rq->queuelist);
1474 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1479 rq = list_entry(flush_list.next, struct request, queuelist);
1480 list_del_init(&rq->queuelist);
1481 blk_mq_insert_request(rq, 0);
1501 static bool blk_is_flush_data_rq(struct request *rq)
1503 return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1506 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1518 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1519 blk_is_flush_data_rq(rq) &&
1520 blk_mq_request_completed(rq))) {
1560 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1564 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1566 if (rq->rq_flags & RQF_TIMED_OUT)
1569 deadline = READ_ONCE(rq->deadline);
1580 void blk_mq_put_rq_ref(struct request *rq)
1582 if (is_flush_rq(rq)) {
1583 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1584 blk_mq_free_request(rq);
1585 } else if (req_ref_put_and_test(rq)) {
1586 __blk_mq_free_request(rq);
1590 static bool blk_mq_check_expired(struct request *rq, void *priv)
1601 if (blk_mq_req_expired(rq, expired)) {
1608 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1612 if (blk_mq_req_expired(rq, expired))
1613 blk_mq_rq_timed_out(rq);
1712 struct request *rq;
1725 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1726 list_del_init(&dispatch_data->rq->queuelist);
1732 return !dispatch_data->rq;
1741 .rq = NULL,
1747 return data.rq;
1750 bool __blk_mq_alloc_driver_tag(struct request *rq)
1752 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1753 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1756 blk_mq_tag_busy(rq->mq_hctx);
1758 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1759 bt = &rq->mq_hctx->tags->breserved_tags;
1762 if (!hctx_may_queue(rq->mq_hctx, bt))
1770 rq->tag = tag + tag_offset;
1771 blk_mq_inc_active_requests(rq->mq_hctx);
1803 struct request *rq)
1822 return blk_mq_get_driver_tag(rq);
1829 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1868 ret = blk_mq_get_driver_tag(rq);
1915 static void blk_mq_handle_dev_resource(struct request *rq,
1918 list_add(&rq->queuelist, list);
1919 __blk_mq_requeue_request(rq);
1928 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1931 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1935 budget_token = blk_mq_get_dispatch_budget(rq->q);
1937 blk_mq_put_driver_tag(rq);
1940 blk_mq_set_rq_budget_token(rq, budget_token);
1943 if (!blk_mq_get_driver_tag(rq)) {
1951 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1957 blk_mq_put_dispatch_budget(rq->q, budget_token);
1969 struct request *rq;
1971 list_for_each_entry(rq, list, queuelist) {
1972 int budget_token = blk_mq_get_rq_budget_token(rq);
2004 struct request *rq;
2019 rq = list_first_entry(list, struct request, queuelist);
2021 WARN_ON_ONCE(hctx != rq->mq_hctx);
2022 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2026 list_del_init(&rq->queuelist);
2028 bd.rq = rq;
2046 blk_mq_handle_dev_resource(rq, list);
2049 blk_mq_end_request(rq, ret);
2104 * and dm-rq.
2426 * @rq: Pointer to request to be inserted.
2432 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2434 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2438 list_add(&rq->queuelist, &hctx->dispatch);
2440 list_add_tail(&rq->queuelist, &hctx->dispatch);
2448 struct request *rq;
2466 list_for_each_entry(rq, list, queuelist) {
2467 BUG_ON(rq->mq_ctx != ctx);
2468 trace_block_rq_insert(rq);
2469 if (rq->cmd_flags & REQ_NOWAIT)
2481 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2483 struct request_queue *q = rq->q;
2484 struct blk_mq_ctx *ctx = rq->mq_ctx;
2485 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2487 if (blk_rq_is_passthrough(rq)) {
2498 blk_mq_request_bypass_insert(rq, flags);
2499 } else if (req_op(rq) == REQ_OP_FLUSH) {
2510 * rq to the front of hctx->dispatch, it is easier to introduce
2511 * extra time to flush rq's latency because of S_SCHED_RESTART
2516 * drive when adding flush rq to the front of hctx->dispatch.
2518 * Simply queue flush rq to the front of hctx->dispatch so that
2521 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2525 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2527 list_add(&rq->queuelist, &list);
2530 trace_block_rq_insert(rq);
2534 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2536 list_add_tail(&rq->queuelist,
2543 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2549 rq->cmd_flags |= REQ_FAILFAST_MASK;
2551 rq->__sector = bio->bi_iter.bi_sector;
2552 rq->write_hint = bio->bi_write_hint;
2553 blk_rq_bio_prep(rq, bio, nr_segs);
2556 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2559 blk_account_io_start(rq);
2563 struct request *rq, bool last)
2565 struct request_queue *q = rq->q;
2567 .rq = rq,
2585 __blk_mq_requeue_request(rq);
2595 static bool blk_mq_get_budget_and_tag(struct request *rq)
2599 budget_token = blk_mq_get_dispatch_budget(rq->q);
2602 blk_mq_set_rq_budget_token(rq, budget_token);
2603 if (!blk_mq_get_driver_tag(rq)) {
2604 blk_mq_put_dispatch_budget(rq->q, budget_token);
2613 * @rq: Pointer to request to be sent.
2621 struct request *rq)
2625 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2626 blk_mq_insert_request(rq, 0);
2630 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2631 blk_mq_insert_request(rq, 0);
2632 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2636 ret = __blk_mq_issue_directly(hctx, rq, true);
2642 blk_mq_request_bypass_insert(rq, 0);
2646 blk_mq_end_request(rq, ret);
2651 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2653 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2655 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2656 blk_mq_insert_request(rq, 0);
2660 if (!blk_mq_get_budget_and_tag(rq))
2662 return __blk_mq_issue_directly(hctx, rq, last);
2668 struct request *rq;
2672 while ((rq = rq_list_pop(&plug->mq_list))) {
2675 if (hctx != rq->mq_hctx) {
2680 hctx = rq->mq_hctx;
2683 ret = blk_mq_request_issue_directly(rq, last);
2690 blk_mq_request_bypass_insert(rq, 0);
2694 blk_mq_end_request(rq, ret);
2723 struct request *rq = rq_list_pop(&plug->mq_list);
2726 this_hctx = rq->mq_hctx;
2727 this_ctx = rq->mq_ctx;
2728 is_passthrough = blk_rq_is_passthrough(rq);
2729 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2730 is_passthrough != blk_rq_is_passthrough(rq)) {
2731 rq_list_add_tail(&requeue_lastp, rq);
2734 list_add(&rq->queuelist, &list);
2760 struct request *rq;
2776 rq = rq_list_peek(&plug->mq_list);
2777 q = rq->q;
2810 struct request *rq = list_first_entry(list, struct request,
2813 list_del_init(&rq->queuelist);
2814 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2821 blk_mq_request_bypass_insert(rq, 0);
2826 blk_mq_end_request(rq, ret);
2858 struct request *rq;
2868 rq = __blk_mq_alloc_requests(&data);
2869 if (rq)
2870 return rq;
2884 struct request *rq;
2888 rq = rq_list_peek(&plug->cached_rq);
2889 if (!rq || rq->q != q)
2891 if (type != rq->mq_hctx->type &&
2892 (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT))
2894 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
2896 return rq;
2899 static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
2902 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2909 plug->cached_rq = rq_list_next(rq);
2910 rq_qos_throttle(rq->q, bio);
2912 blk_mq_rq_time_init(rq, 0);
2913 rq->cmd_flags = bio->bi_opf;
2914 INIT_LIST_HEAD(&rq->queuelist);
2937 struct request *rq;
2943 rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
2953 if (rq)
2964 if (!rq) {
2984 if (!rq) {
2985 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2986 if (unlikely(!rq))
2989 blk_mq_use_cached_rq(rq, plug, bio);
2994 rq_qos_track(q, rq, bio);
2996 blk_mq_bio_to_request(rq, bio, nr_segs);
2998 ret = blk_crypto_rq_get_keyslot(rq);
3002 blk_mq_free_request(rq);
3007 blk_zone_write_plug_init_request(rq);
3009 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3013 blk_add_rq_to_plug(plug, rq);
3017 hctx = rq->mq_hctx;
3018 if ((rq->rq_flags & RQF_USE_SCHED) ||
3020 blk_mq_insert_request(rq, 0);
3023 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3032 if (!rq)
3039 * @rq: the request being queued
3041 blk_status_t blk_insert_cloned_request(struct request *rq)
3043 struct request_queue *q = rq->q;
3044 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
3045 unsigned int max_segments = blk_rq_get_max_segments(rq);
3048 if (blk_rq_sectors(rq) > max_sectors) {
3063 __func__, blk_rq_sectors(rq), max_sectors);
3071 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3072 if (rq->nr_phys_segments > max_segments) {
3074 __func__, rq->nr_phys_segments, max_segments);
3078 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3081 ret = blk_crypto_rq_get_keyslot(rq);
3085 blk_account_io_start(rq);
3093 ret = blk_mq_request_issue_directly(rq, true));
3095 blk_account_io_done(rq, blk_time_get_ns());
3102 * @rq: the clone request to be cleaned up
3105 * Free all bios in @rq for a cloned request.
3107 void blk_rq_unprep_clone(struct request *rq)
3111 while ((bio = rq->bio) != NULL) {
3112 rq->bio = bio->bi_next;
3121 * @rq: the request to be setup
3130 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3134 * the caller must complete @rq before @rq_src.
3136 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3147 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3155 if (rq->bio) {
3156 rq->biotail->bi_next = bio;
3157 rq->biotail = bio;
3159 rq->bio = rq->biotail = bio;
3165 rq->__sector = blk_rq_pos(rq_src);
3166 rq->__data_len = blk_rq_bytes(rq_src);
3168 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3169 rq->special_vec = rq_src->special_vec;
3171 rq->nr_phys_segments = rq_src->nr_phys_segments;
3172 rq->ioprio = rq_src->ioprio;
3173 rq->write_hint = rq_src->write_hint;
3175 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3183 blk_rq_unprep_clone(rq);
3194 void blk_steal_bios(struct bio_list *list, struct request *rq)
3196 if (rq->bio) {
3198 list->tail->bi_next = rq->bio;
3200 list->head = rq->bio;
3201 list->tail = rq->biotail;
3203 rq->bio = NULL;
3204 rq->biotail = NULL;
3207 rq->__data_len = 0;
3236 struct request *rq = drv_tags->rqs[i];
3237 unsigned long rq_addr = (unsigned long)rq;
3240 WARN_ON_ONCE(req_ref_read(rq) != 0);
3241 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3274 struct request *rq = tags->static_rqs[i];
3276 if (!rq)
3278 set->ops->exit_request(set, rq, hctx_idx);
3371 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3377 ret = set->ops->init_request(set, rq, hctx_idx, node);
3382 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3444 struct request *rq = p;
3446 tags->static_rqs[i] = rq;
3447 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3468 static bool blk_mq_has_request(struct request *rq, void *data)
3472 if (rq->mq_hctx != iter_data->hctx)
4878 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4881 struct request_queue *q = rq->q;
4884 if (!blk_rq_is_poll(rq))
4889 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4896 unsigned int blk_mq_rq_cpu(struct request *rq)
4898 return rq->mq_ctx->cpu;