Lines Matching defs:rwb

145 static inline bool rwb_enabled(struct rq_wb *rwb)
147 return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
148 rwb->enable_state != WBT_STATE_OFF_MANUAL;
151 static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
153 if (rwb_enabled(rwb)) {
165 static bool wb_recent_wait(struct rq_wb *rwb)
167 struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
172 static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
176 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
178 return &rwb->rq_wait[WBT_RWQ_DISCARD];
180 return &rwb->rq_wait[WBT_RWQ_BG];
183 static void rwb_wake_all(struct rq_wb *rwb)
188 struct rq_wait *rqw = &rwb->rq_wait[i];
195 static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
208 limit = rwb->wb_background;
209 else if (test_bit(QUEUE_FLAG_WC, &rwb->rqos.disk->queue->queue_flags) &&
210 !wb_recent_wait(rwb))
213 limit = rwb->wb_normal;
224 if (!inflight || diff >= rwb->wb_background / 2)
231 struct rq_wb *rwb = RQWB(rqos);
237 rqw = get_rq_wait(rwb, wb_acct);
238 wbt_rqw_done(rwb, rqw, wb_acct);
247 struct rq_wb *rwb = RQWB(rqos);
250 if (rwb->sync_cookie == rq) {
251 rwb->sync_issue = 0;
252 rwb->sync_cookie = NULL;
256 wb_timestamp(rwb, &rwb->last_comp);
258 WARN_ON_ONCE(rq == rwb->sync_cookie);
276 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
278 u64 issue = READ_ONCE(rwb->sync_issue);
280 if (!issue || !rwb->sync_cookie)
286 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
291 ret += atomic_read(&rwb->rq_wait[i].inflight);
303 static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
305 struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
306 struct rq_depth *rqd = &rwb->rq_depth;
318 thislat = rwb_sync_issue_lat(rwb);
319 if (thislat > rwb->cur_win_nsec ||
320 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
335 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
336 wbt_inflight(rwb))
344 if (stat[READ].min > rwb->min_lat_nsec) {
356 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
358 struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
359 struct rq_depth *rqd = &rwb->rq_depth;
361 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
362 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
365 static void calc_wb_limits(struct rq_wb *rwb)
367 if (rwb->min_lat_nsec == 0) {
368 rwb->wb_normal = rwb->wb_background = 0;
369 } else if (rwb->rq_depth.max_depth <= 2) {
370 rwb->wb_normal = rwb->rq_depth.max_depth;
371 rwb->wb_background = 1;
373 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
374 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
378 static void scale_up(struct rq_wb *rwb)
380 if (!rq_depth_scale_up(&rwb->rq_depth))
382 calc_wb_limits(rwb);
383 rwb->unknown_cnt = 0;
384 rwb_wake_all(rwb);
385 rwb_trace_step(rwb, tracepoint_string("scale up"));
388 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
390 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
392 calc_wb_limits(rwb);
393 rwb->unknown_cnt = 0;
394 rwb_trace_step(rwb, tracepoint_string("scale down"));
397 static void rwb_arm_timer(struct rq_wb *rwb)
399 struct rq_depth *rqd = &rwb->rq_depth;
408 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
415 rwb->cur_win_nsec = rwb->win_nsec;
418 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
423 struct rq_wb *rwb = cb->data;
424 struct rq_depth *rqd = &rwb->rq_depth;
425 unsigned int inflight = wbt_inflight(rwb);
428 if (!rwb->rqos.disk)
431 status = latency_exceeded(rwb, cb->stat);
433 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight);
442 scale_down(rwb, true);
445 scale_up(rwb);
453 scale_up(rwb);
456 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
464 scale_up(rwb);
466 scale_down(rwb, false);
476 rwb_arm_timer(rwb);
479 static void wbt_update_limits(struct rq_wb *rwb)
481 struct rq_depth *rqd = &rwb->rq_depth;
487 calc_wb_limits(rwb);
489 rwb_wake_all(rwb);
523 static bool close_io(struct rq_wb *rwb)
527 return time_before(now, rwb->last_issue + HZ / 10) ||
528 time_before(now, rwb->last_comp + HZ / 10);
533 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
538 return rwb->wb_background;
548 if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
549 limit = rwb->rq_depth.max_depth;
550 else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
555 limit = rwb->wb_background;
557 limit = rwb->wb_normal;
563 struct rq_wb *rwb;
571 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
577 wbt_rqw_done(data->rwb, rqw, data->wb_acct);
584 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
587 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
589 .rwb = rwb,
615 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
619 if (!rwb_enabled(rwb))
636 struct rq_wb *rwb = RQWB(rqos);
637 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
648 struct rq_wb *rwb = RQWB(rqos);
651 flags = bio_to_wbt_flags(rwb, bio);
654 wb_timestamp(rwb, &rwb->last_issue);
658 __wbt_wait(rwb, flags, bio->bi_opf);
660 if (!blk_stat_is_active(rwb->cb))
661 rwb_arm_timer(rwb);
666 struct rq_wb *rwb = RQWB(rqos);
667 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
672 struct rq_wb *rwb = RQWB(rqos);
674 if (!rwb_enabled(rwb))
684 if (wbt_is_read(rq) && !rwb->sync_issue) {
685 rwb->sync_cookie = rq;
686 rwb->sync_issue = rq->io_start_time_ns;
692 struct rq_wb *rwb = RQWB(rqos);
693 if (!rwb_enabled(rwb))
695 if (rq == rwb->sync_cookie) {
696 rwb->sync_issue = 0;
697 rwb->sync_cookie = NULL;
764 struct rq_wb *rwb = RQWB(rqos);
766 blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
767 blk_stat_free_callback(rwb->cb);
768 kfree(rwb);
777 struct rq_wb *rwb;
780 rwb = RQWB(rqos);
781 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
782 blk_stat_deactivate(rwb->cb);
783 rwb->enable_state = WBT_STATE_OFF_DEFAULT;
792 struct rq_wb *rwb = RQWB(rqos);
794 seq_printf(m, "%llu\n", rwb->cur_win_nsec);
801 struct rq_wb *rwb = RQWB(rqos);
803 seq_printf(m, "%d\n", rwb->enable_state);
818 struct rq_wb *rwb = RQWB(rqos);
823 atomic_read(&rwb->rq_wait[i].inflight));
830 struct rq_wb *rwb = RQWB(rqos);
832 seq_printf(m, "%lu\n", rwb->min_lat_nsec);
839 struct rq_wb *rwb = RQWB(rqos);
841 seq_printf(m, "%u\n", rwb->unknown_cnt);
848 struct rq_wb *rwb = RQWB(rqos);
850 seq_printf(m, "%u\n", rwb->wb_normal);
857 struct rq_wb *rwb = RQWB(rqos);
859 seq_printf(m, "%u\n", rwb->wb_background);
893 struct rq_wb *rwb;
897 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
898 if (!rwb)
901 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
902 if (!rwb->cb) {
903 kfree(rwb);
908 rq_wait_init(&rwb->rq_wait[i]);
910 rwb->last_comp = rwb->last_issue = jiffies;
911 rwb->win_nsec = RWB_WINDOW_NSEC;
912 rwb->enable_state = WBT_STATE_ON_DEFAULT;
913 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
914 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
915 rwb->rq_depth.queue_depth = blk_queue_depth(q);
916 wbt_update_limits(rwb);
919 * Assign rwb and add the stats callback.
922 ret = rq_qos_add(&rwb->rqos, disk, RQ_QOS_WBT, &wbt_rqos_ops);
927 blk_stat_add_callback(q, rwb->cb);
932 blk_stat_free_callback(rwb->cb);
933 kfree(rwb);