1// SPDX-License-Identifier: GPL-2.0
2/*
3 * blk-mq scheduling framework
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list_sort.h>
10
11#include <trace/events/block.h>
12
13#include "blk.h"
14#include "blk-mq.h"
15#include "blk-mq-debugfs.h"
16#include "blk-mq-sched.h"
17#include "blk-wbt.h"
18
19/*
20 * Mark a hardware queue as needing a restart.
21 */
22void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
23{
24	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
25		return;
26
27	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
28}
29EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
30
31void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
32{
33	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
34
35	/*
36	 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
37	 * in blk_mq_run_hw_queue(). Its pair is the barrier in
38	 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
39	 * meantime new request added to hctx->dispatch is missed to check in
40	 * blk_mq_run_hw_queue().
41	 */
42	smp_mb();
43
44	blk_mq_run_hw_queue(hctx, true);
45}
46
47static int sched_rq_cmp(void *priv, const struct list_head *a,
48			const struct list_head *b)
49{
50	struct request *rqa = container_of(a, struct request, queuelist);
51	struct request *rqb = container_of(b, struct request, queuelist);
52
53	return rqa->mq_hctx > rqb->mq_hctx;
54}
55
56static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
57{
58	struct blk_mq_hw_ctx *hctx =
59		list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
60	struct request *rq;
61	LIST_HEAD(hctx_list);
62	unsigned int count = 0;
63
64	list_for_each_entry(rq, rq_list, queuelist) {
65		if (rq->mq_hctx != hctx) {
66			list_cut_before(&hctx_list, rq_list, &rq->queuelist);
67			goto dispatch;
68		}
69		count++;
70	}
71	list_splice_tail_init(rq_list, &hctx_list);
72
73dispatch:
74	return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
75}
76
77#define BLK_MQ_BUDGET_DELAY	3		/* ms units */
78
79/*
80 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
81 * its queue by itself in its completion handler, so we don't need to
82 * restart queue if .get_budget() fails to get the budget.
83 *
84 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
85 * be run again.  This is necessary to avoid starving flushes.
86 */
87static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
88{
89	struct request_queue *q = hctx->queue;
90	struct elevator_queue *e = q->elevator;
91	bool multi_hctxs = false, run_queue = false;
92	bool dispatched = false, busy = false;
93	unsigned int max_dispatch;
94	LIST_HEAD(rq_list);
95	int count = 0;
96
97	if (hctx->dispatch_busy)
98		max_dispatch = 1;
99	else
100		max_dispatch = hctx->queue->nr_requests;
101
102	do {
103		struct request *rq;
104		int budget_token;
105
106		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
107			break;
108
109		if (!list_empty_careful(&hctx->dispatch)) {
110			busy = true;
111			break;
112		}
113
114		budget_token = blk_mq_get_dispatch_budget(q);
115		if (budget_token < 0)
116			break;
117
118		rq = e->type->ops.dispatch_request(hctx);
119		if (!rq) {
120			blk_mq_put_dispatch_budget(q, budget_token);
121			/*
122			 * We're releasing without dispatching. Holding the
123			 * budget could have blocked any "hctx"s with the
124			 * same queue and if we didn't dispatch then there's
125			 * no guarantee anyone will kick the queue.  Kick it
126			 * ourselves.
127			 */
128			run_queue = true;
129			break;
130		}
131
132		blk_mq_set_rq_budget_token(rq, budget_token);
133
134		/*
135		 * Now this rq owns the budget which has to be released
136		 * if this rq won't be queued to driver via .queue_rq()
137		 * in blk_mq_dispatch_rq_list().
138		 */
139		list_add_tail(&rq->queuelist, &rq_list);
140		count++;
141		if (rq->mq_hctx != hctx)
142			multi_hctxs = true;
143
144		/*
145		 * If we cannot get tag for the request, stop dequeueing
146		 * requests from the IO scheduler. We are unlikely to be able
147		 * to submit them anyway and it creates false impression for
148		 * scheduling heuristics that the device can take more IO.
149		 */
150		if (!blk_mq_get_driver_tag(rq))
151			break;
152	} while (count < max_dispatch);
153
154	if (!count) {
155		if (run_queue)
156			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
157	} else if (multi_hctxs) {
158		/*
159		 * Requests from different hctx may be dequeued from some
160		 * schedulers, such as bfq and deadline.
161		 *
162		 * Sort the requests in the list according to their hctx,
163		 * dispatch batching requests from same hctx at a time.
164		 */
165		list_sort(NULL, &rq_list, sched_rq_cmp);
166		do {
167			dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
168		} while (!list_empty(&rq_list));
169	} else {
170		dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
171	}
172
173	if (busy)
174		return -EAGAIN;
175	return !!dispatched;
176}
177
178static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
179{
180	unsigned long end = jiffies + HZ;
181	int ret;
182
183	do {
184		ret = __blk_mq_do_dispatch_sched(hctx);
185		if (ret != 1)
186			break;
187		if (need_resched() || time_is_before_jiffies(end)) {
188			blk_mq_delay_run_hw_queue(hctx, 0);
189			break;
190		}
191	} while (1);
192
193	return ret;
194}
195
196static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
197					  struct blk_mq_ctx *ctx)
198{
199	unsigned short idx = ctx->index_hw[hctx->type];
200
201	if (++idx == hctx->nr_ctx)
202		idx = 0;
203
204	return hctx->ctxs[idx];
205}
206
207/*
208 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
209 * its queue by itself in its completion handler, so we don't need to
210 * restart queue if .get_budget() fails to get the budget.
211 *
212 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
213 * be run again.  This is necessary to avoid starving flushes.
214 */
215static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
216{
217	struct request_queue *q = hctx->queue;
218	LIST_HEAD(rq_list);
219	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
220	int ret = 0;
221	struct request *rq;
222
223	do {
224		int budget_token;
225
226		if (!list_empty_careful(&hctx->dispatch)) {
227			ret = -EAGAIN;
228			break;
229		}
230
231		if (!sbitmap_any_bit_set(&hctx->ctx_map))
232			break;
233
234		budget_token = blk_mq_get_dispatch_budget(q);
235		if (budget_token < 0)
236			break;
237
238		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
239		if (!rq) {
240			blk_mq_put_dispatch_budget(q, budget_token);
241			/*
242			 * We're releasing without dispatching. Holding the
243			 * budget could have blocked any "hctx"s with the
244			 * same queue and if we didn't dispatch then there's
245			 * no guarantee anyone will kick the queue.  Kick it
246			 * ourselves.
247			 */
248			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
249			break;
250		}
251
252		blk_mq_set_rq_budget_token(rq, budget_token);
253
254		/*
255		 * Now this rq owns the budget which has to be released
256		 * if this rq won't be queued to driver via .queue_rq()
257		 * in blk_mq_dispatch_rq_list().
258		 */
259		list_add(&rq->queuelist, &rq_list);
260
261		/* round robin for fair dispatch */
262		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
263
264	} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
265
266	WRITE_ONCE(hctx->dispatch_from, ctx);
267	return ret;
268}
269
270static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
271{
272	bool need_dispatch = false;
273	LIST_HEAD(rq_list);
274
275	/*
276	 * If we have previous entries on our dispatch list, grab them first for
277	 * more fair dispatch.
278	 */
279	if (!list_empty_careful(&hctx->dispatch)) {
280		spin_lock(&hctx->lock);
281		if (!list_empty(&hctx->dispatch))
282			list_splice_init(&hctx->dispatch, &rq_list);
283		spin_unlock(&hctx->lock);
284	}
285
286	/*
287	 * Only ask the scheduler for requests, if we didn't have residual
288	 * requests from the dispatch list. This is to avoid the case where
289	 * we only ever dispatch a fraction of the requests available because
290	 * of low device queue depth. Once we pull requests out of the IO
291	 * scheduler, we can no longer merge or sort them. So it's best to
292	 * leave them there for as long as we can. Mark the hw queue as
293	 * needing a restart in that case.
294	 *
295	 * We want to dispatch from the scheduler if there was nothing
296	 * on the dispatch list or we were able to dispatch from the
297	 * dispatch list.
298	 */
299	if (!list_empty(&rq_list)) {
300		blk_mq_sched_mark_restart_hctx(hctx);
301		if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
302			return 0;
303		need_dispatch = true;
304	} else {
305		need_dispatch = hctx->dispatch_busy;
306	}
307
308	if (hctx->queue->elevator)
309		return blk_mq_do_dispatch_sched(hctx);
310
311	/* dequeue request one by one from sw queue if queue is busy */
312	if (need_dispatch)
313		return blk_mq_do_dispatch_ctx(hctx);
314	blk_mq_flush_busy_ctxs(hctx, &rq_list);
315	blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
316	return 0;
317}
318
319void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
320{
321	struct request_queue *q = hctx->queue;
322
323	/* RCU or SRCU read lock is needed before checking quiesced flag */
324	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
325		return;
326
327	/*
328	 * A return of -EAGAIN is an indication that hctx->dispatch is not
329	 * empty and we must run again in order to avoid starving flushes.
330	 */
331	if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
332		if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
333			blk_mq_run_hw_queue(hctx, true);
334	}
335}
336
337bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
338		unsigned int nr_segs)
339{
340	struct elevator_queue *e = q->elevator;
341	struct blk_mq_ctx *ctx;
342	struct blk_mq_hw_ctx *hctx;
343	bool ret = false;
344	enum hctx_type type;
345
346	if (e && e->type->ops.bio_merge) {
347		ret = e->type->ops.bio_merge(q, bio, nr_segs);
348		goto out_put;
349	}
350
351	ctx = blk_mq_get_ctx(q);
352	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
353	type = hctx->type;
354	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
355	    list_empty_careful(&ctx->rq_lists[type]))
356		goto out_put;
357
358	/* default per sw-queue merge */
359	spin_lock(&ctx->lock);
360	/*
361	 * Reverse check our software queue for entries that we could
362	 * potentially merge with. Currently includes a hand-wavy stop
363	 * count of 8, to not spend too much time checking for merges.
364	 */
365	if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
366		ret = true;
367
368	spin_unlock(&ctx->lock);
369out_put:
370	return ret;
371}
372
373bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
374				   struct list_head *free)
375{
376	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
377}
378EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
379
380static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
381					  struct blk_mq_hw_ctx *hctx,
382					  unsigned int hctx_idx)
383{
384	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
385		hctx->sched_tags = q->sched_shared_tags;
386		return 0;
387	}
388
389	hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
390						    q->nr_requests);
391
392	if (!hctx->sched_tags)
393		return -ENOMEM;
394	return 0;
395}
396
397static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
398{
399	blk_mq_free_rq_map(queue->sched_shared_tags);
400	queue->sched_shared_tags = NULL;
401}
402
403/* called in queue's release handler, tagset has gone away */
404static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
405{
406	struct blk_mq_hw_ctx *hctx;
407	unsigned long i;
408
409	queue_for_each_hw_ctx(q, hctx, i) {
410		if (hctx->sched_tags) {
411			if (!blk_mq_is_shared_tags(flags))
412				blk_mq_free_rq_map(hctx->sched_tags);
413			hctx->sched_tags = NULL;
414		}
415	}
416
417	if (blk_mq_is_shared_tags(flags))
418		blk_mq_exit_sched_shared_tags(q);
419}
420
421static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
422{
423	struct blk_mq_tag_set *set = queue->tag_set;
424
425	/*
426	 * Set initial depth at max so that we don't need to reallocate for
427	 * updating nr_requests.
428	 */
429	queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
430						BLK_MQ_NO_HCTX_IDX,
431						MAX_SCHED_RQ);
432	if (!queue->sched_shared_tags)
433		return -ENOMEM;
434
435	blk_mq_tag_update_sched_shared_tags(queue);
436
437	return 0;
438}
439
440/* caller must have a reference to @e, will grab another one if successful */
441int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
442{
443	unsigned int flags = q->tag_set->flags;
444	struct blk_mq_hw_ctx *hctx;
445	struct elevator_queue *eq;
446	unsigned long i;
447	int ret;
448
449	/*
450	 * Default to double of smaller one between hw queue_depth and 128,
451	 * since we don't split into sync/async like the old code did.
452	 * Additionally, this is a per-hw queue depth.
453	 */
454	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
455				   BLKDEV_DEFAULT_RQ);
456
457	if (blk_mq_is_shared_tags(flags)) {
458		ret = blk_mq_init_sched_shared_tags(q);
459		if (ret)
460			return ret;
461	}
462
463	queue_for_each_hw_ctx(q, hctx, i) {
464		ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
465		if (ret)
466			goto err_free_map_and_rqs;
467	}
468
469	ret = e->ops.init_sched(q, e);
470	if (ret)
471		goto err_free_map_and_rqs;
472
473	mutex_lock(&q->debugfs_mutex);
474	blk_mq_debugfs_register_sched(q);
475	mutex_unlock(&q->debugfs_mutex);
476
477	queue_for_each_hw_ctx(q, hctx, i) {
478		if (e->ops.init_hctx) {
479			ret = e->ops.init_hctx(hctx, i);
480			if (ret) {
481				eq = q->elevator;
482				blk_mq_sched_free_rqs(q);
483				blk_mq_exit_sched(q, eq);
484				kobject_put(&eq->kobj);
485				return ret;
486			}
487		}
488		mutex_lock(&q->debugfs_mutex);
489		blk_mq_debugfs_register_sched_hctx(q, hctx);
490		mutex_unlock(&q->debugfs_mutex);
491	}
492
493	return 0;
494
495err_free_map_and_rqs:
496	blk_mq_sched_free_rqs(q);
497	blk_mq_sched_tags_teardown(q, flags);
498
499	q->elevator = NULL;
500	return ret;
501}
502
503/*
504 * called in either blk_queue_cleanup or elevator_switch, tagset
505 * is required for freeing requests
506 */
507void blk_mq_sched_free_rqs(struct request_queue *q)
508{
509	struct blk_mq_hw_ctx *hctx;
510	unsigned long i;
511
512	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
513		blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
514				BLK_MQ_NO_HCTX_IDX);
515	} else {
516		queue_for_each_hw_ctx(q, hctx, i) {
517			if (hctx->sched_tags)
518				blk_mq_free_rqs(q->tag_set,
519						hctx->sched_tags, i);
520		}
521	}
522}
523
524void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
525{
526	struct blk_mq_hw_ctx *hctx;
527	unsigned long i;
528	unsigned int flags = 0;
529
530	queue_for_each_hw_ctx(q, hctx, i) {
531		mutex_lock(&q->debugfs_mutex);
532		blk_mq_debugfs_unregister_sched_hctx(hctx);
533		mutex_unlock(&q->debugfs_mutex);
534
535		if (e->type->ops.exit_hctx && hctx->sched_data) {
536			e->type->ops.exit_hctx(hctx, i);
537			hctx->sched_data = NULL;
538		}
539		flags = hctx->flags;
540	}
541
542	mutex_lock(&q->debugfs_mutex);
543	blk_mq_debugfs_unregister_sched(q);
544	mutex_unlock(&q->debugfs_mutex);
545
546	if (e->type->ops.exit_sched)
547		e->type->ops.exit_sched(e);
548	blk_mq_sched_tags_teardown(q, flags);
549	q->elevator = NULL;
550}
551