Lines Matching refs:rq

231 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
232 (get_sdist(last_pos, rq) > \
235 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
376 #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
377 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
999 struct request *rq;
1006 rq = rq_entry_fifo(bfqq->fifo.next);
1008 if (rq == last || blk_time_get_ns() < rq->fifo_time)
1011 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
1012 return rq;
1043 static unsigned long bfq_serv_to_charge(struct request *rq,
1048 return blk_rq_sectors(rq);
1050 return blk_rq_sectors(rq) * bfq_async_charge_factor;
1087 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
1821 struct request *rq,
1835 unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
1837 bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged;
2204 static void bfq_add_request(struct request *rq)
2206 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2213 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
2214 bfqq->queued[rq_is_sync(rq)]++;
2221 if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
2268 * total service time of rq: setting
2270 * be set when rq will be dispatched.
2276 * arrival of rq will not affect the total
2277 * service time of rq. So the injection limit
2280 * occurs before rq is completed. To have the
2284 * on bfqq before rq is completed).
2294 elv_rb_add(&bfqq->sort_list, rq);
2300 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
2312 rq, &interactive);
2314 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
2372 static sector_t get_sdist(sector_t last_pos, struct request *rq)
2375 return abs(blk_rq_pos(rq) - last_pos);
2381 struct request *rq)
2383 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2385 const int sync = rq_is_sync(rq);
2387 if (bfqq->next_rq == rq) {
2388 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
2392 if (rq->queuelist.prev != &rq->queuelist)
2393 list_del_init(&rq->queuelist);
2400 elv_rb_del(&bfqq->sort_list, rq);
2402 elv_rqhash_del(q, rq);
2403 if (q->last_merge == rq)
2440 if (rq->cmd_flags & REQ_META)
2548 * rq and 'next' have been merged, with 'next' going away. BFQ
2550 * fifo_time lower that rq, then the fifo_time of rq must be set to
2553 * NOTE: in this function we assume that rq is in a bfq_queue, basing
2554 * on that rq is picked from the hash table q->elevator->hash, which,
2560 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
2563 struct bfq_queue *bfqq = RQ_BFQQ(rq),
2570 * If next and rq belong to the same bfq_queue and next is older
2571 * than rq, then reposition rq in the fifo (by substituting next
2572 * with rq). Otherwise, if next and rq belong to different
2573 * bfq_queues, never reposition rq: in fact, we would have to
2579 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2580 next->fifo_time < rq->fifo_time) {
2581 list_del_init(&rq->queuelist);
2582 list_replace_init(&next->queuelist, &rq->queuelist);
2583 rq->fifo_time = next->fifo_time;
2587 bfqq->next_rq = rq;
3227 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
3237 if (is_sync && !rq_is_sync(rq))
3242 * merge only if rq is queued there.
3264 * so use new_bfqq to decide whether bio and rq can be
3278 return bfqq == RQ_BFQQ(rq);
3378 * seeks. So allow a little bit of time for him to submit a new rq.
3433 struct request *rq)
3435 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
3440 blk_rq_sectors(rq);
3441 } else /* no new rq dispatched, just reset the number of samples */
3450 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
3556 bfq_reset_rate_computation(bfqd, rq);
3591 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
3598 bfq_reset_rate_computation(bfqd, rq);
3623 && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
3626 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
3628 /* Reset max observed rq size every 32 dispatches */
3631 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
3633 bfqd->last_rq_max_size = blk_rq_sectors(rq);
3642 bfq_update_rate_reset(bfqd, rq);
3644 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
3645 if (RQ_BFQQ(rq) == bfqd->in_service_queue)
3653 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
3655 struct bfq_queue *bfqq = RQ_BFQQ(rq);
3670 bfq_update_peak_rate(q->elevator->elevator_data, rq);
3672 bfq_remove_request(q, rq);
4428 * Not setting service to 0, because, if the next rq
5106 struct request *rq = bfqq->next_rq;
5109 service_to_charge = bfq_serv_to_charge(rq, bfqq);
5115 bfqd->waited_rq = rq;
5118 bfq_dispatch_remove(bfqd->queue, rq);
5121 return rq;
5144 return rq;
5162 struct request *rq = NULL;
5166 rq = list_first_entry(&bfqd->dispatch, struct request,
5168 list_del_init(&rq->queuelist);
5170 bfqq = RQ_BFQQ(rq);
5235 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
5237 if (rq) {
5242 rq->rq_flags |= RQF_STARTED;
5245 return rq;
5250 struct request *rq,
5254 struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
5260 * rq and bfqq are guaranteed to exist until this function
5261 * ends, for the following reasons. First, rq can be
5263 * freed, only after this function ends. Second, rq cannot be
5265 * because it has already started. Thus rq cannot be freed
5266 * before this function ends, and, since rq has a reference to
5278 * implies that rq was picked exactly from
5289 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
5295 struct request *rq,
5303 struct request *rq;
5312 rq = __bfq_dispatch_request(hctx);
5319 bfq_update_dispatch_stats(hctx->queue, rq,
5323 return rq;
5327 * Task holds one reference to the queue, dropped when task exits. Each rq
5328 * in-flight on this queue also holds a reference, dropped when rq is freed.
5917 struct request *rq)
5920 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
6075 * Called when a new fs request (rq) is added to bfqq. Check if there's
6079 struct request *rq)
6081 if (rq->cmd_flags & REQ_META)
6084 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
6087 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
6088 blk_rq_sectors(rq) < 32;
6151 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
6153 struct bfq_queue *bfqq = RQ_BFQQ(rq),
6154 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
6155 RQ_BIC(rq));
6174 if (bic_to_bfqq(RQ_BIC(rq), true,
6175 bfq_actuator_index(bfqd, rq->bio)) == bfqq)
6176 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
6181 * rq is about to be enqueued into new_bfqq,
6182 * release rq reference on bfqq
6185 rq->elv.priv[1] = new_bfqq;
6190 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
6191 bfq_update_io_seektime(bfqd, bfqq, rq);
6194 bfq_add_request(rq);
6197 rq->fifo_time = blk_time_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
6198 list_add_tail(&rq->queuelist, &bfqq->fifo);
6200 bfq_rq_enqueued(bfqd, bfqq, rq);
6237 static struct bfq_queue *bfq_init_rq(struct request *rq);
6239 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
6250 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
6251 bfqg_stats_update_legacy_io(q, rq);
6254 bfqq = bfq_init_rq(rq);
6255 if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
6261 trace_block_rq_insert(rq);
6264 list_add(&rq->queuelist, &bfqd->dispatch);
6266 list_add_tail(&rq->queuelist, &bfqd->dispatch);
6268 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
6271 * in __bfq_insert_request, then rq has been
6274 bfqq = RQ_BFQQ(rq);
6276 if (rq_mergeable(rq)) {
6277 elv_rqhash_add(q, rq);
6279 q->last_merge = rq;
6284 * Cache cmd_flags before releasing scheduler lock, because rq
6288 cmd_flags = rq->cmd_flags;
6300 struct request *rq;
6302 rq = list_first_entry(list, struct request, queuelist);
6303 list_del_init(&rq->queuelist);
6304 bfq_insert_request(hctx, rq, flags);
6643 * Handle either a requeue or a finish for rq. The things to do are
6644 * the same in both cases: all references to rq are to be dropped. In
6645 * particular, rq is considered completed from the point of view of
6648 static void bfq_finish_requeue_request(struct request *rq)
6650 struct bfq_queue *bfqq = RQ_BFQQ(rq);
6655 * rq either is not associated with any icq, or is an already
6659 if (!rq->elv.icq || !bfqq)
6664 if (rq->rq_flags & RQF_STARTED)
6666 rq->start_time_ns,
6667 rq->io_start_time_ns,
6668 rq->cmd_flags);
6671 if (likely(rq->rq_flags & RQF_STARTED)) {
6672 if (rq == bfqd->waited_rq)
6679 RQ_BIC(rq)->requests--;
6692 * request-insertion logic if rq is re-inserted into a bfq
6699 rq->elv.priv[0] = NULL;
6700 rq->elv.priv[1] = NULL;
6703 static void bfq_finish_request(struct request *rq)
6705 bfq_finish_requeue_request(rq);
6707 if (rq->elv.icq) {
6708 put_io_context(rq->elv.icq->ioc);
6709 rq->elv.icq = NULL;
6806 * performed by bfq_init_rq, when rq is either inserted or merged. See
6810 static void bfq_prepare_request(struct request *rq)
6812 rq->elv.icq = ioc_find_get_icq(rq->q);
6819 rq->elv.priv[0] = rq->elv.priv[1] = NULL;
6823 * If needed, init rq, allocate bfq data structures associated with
6824 * rq, and increment reference counters in the destination bfq_queue
6825 * for rq. Return the destination bfq_queue for rq, or NULL is rq is
6828 * This function is invoked by the functions that perform rq insertion
6830 * to be performed in bfq_prepare_request, and not delayed to when rq
6833 * rq, rq may still be transformed into a request with no icq, i.e., a
6837 * is invoked, and should rq be transformed one moment later, bfq
6839 * incremented some queue counters for an rq destined to
6842 * rq after rq has been inserted or merged. So, it is safe to execute
6843 * these preparation operations when rq is finally inserted or merged.
6845 static struct bfq_queue *bfq_init_rq(struct request *rq)
6847 struct request_queue *q = rq->q;
6848 struct bio *bio = rq->bio;
6851 const int is_sync = rq_is_sync(rq);
6857 if (unlikely(!rq->elv.icq))
6861 * Assuming that RQ_BFQQ(rq) is set only if everything is set
6862 * for this rq. This holds true, because this function is
6867 if (RQ_BFQQ(rq))
6868 return RQ_BFQQ(rq);
6870 bic = icq_to_bic(rq->elv.icq);
6924 rq, bfqq, bfqq->ref);
6926 rq->elv.priv[0] = bic;
6927 rq->elv.priv[1] = bfqq;