1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 *                    Juri Lelli <juri.lelli@gmail.com>,
15 *                    Michael Trimarchi <michael@amarulasolutions.com>,
16 *                    Fabio Checconi <fchecconi@gmail.com>
17 */
18
19#include <linux/cpuset.h>
20
21/*
22 * Default limits for DL period; on the top end we guard against small util
23 * tasks still getting ridiculously long effective runtimes, on the bottom end we
24 * guard against timer DoS.
25 */
26static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
27static unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
28#ifdef CONFIG_SYSCTL
29static struct ctl_table sched_dl_sysctls[] = {
30	{
31		.procname       = "sched_deadline_period_max_us",
32		.data           = &sysctl_sched_dl_period_max,
33		.maxlen         = sizeof(unsigned int),
34		.mode           = 0644,
35		.proc_handler   = proc_douintvec_minmax,
36		.extra1         = (void *)&sysctl_sched_dl_period_min,
37	},
38	{
39		.procname       = "sched_deadline_period_min_us",
40		.data           = &sysctl_sched_dl_period_min,
41		.maxlen         = sizeof(unsigned int),
42		.mode           = 0644,
43		.proc_handler   = proc_douintvec_minmax,
44		.extra2         = (void *)&sysctl_sched_dl_period_max,
45	},
46	{}
47};
48
49static int __init sched_dl_sysctl_init(void)
50{
51	register_sysctl_init("kernel", sched_dl_sysctls);
52	return 0;
53}
54late_initcall(sched_dl_sysctl_init);
55#endif
56
57static bool dl_server(struct sched_dl_entity *dl_se)
58{
59	return dl_se->dl_server;
60}
61
62static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
63{
64	BUG_ON(dl_server(dl_se));
65	return container_of(dl_se, struct task_struct, dl);
66}
67
68static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
69{
70	return container_of(dl_rq, struct rq, dl);
71}
72
73static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
74{
75	struct rq *rq = dl_se->rq;
76
77	if (!dl_server(dl_se))
78		rq = task_rq(dl_task_of(dl_se));
79
80	return rq;
81}
82
83static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
84{
85	return &rq_of_dl_se(dl_se)->dl;
86}
87
88static inline int on_dl_rq(struct sched_dl_entity *dl_se)
89{
90	return !RB_EMPTY_NODE(&dl_se->rb_node);
91}
92
93#ifdef CONFIG_RT_MUTEXES
94static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
95{
96	return dl_se->pi_se;
97}
98
99static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
100{
101	return pi_of(dl_se) != dl_se;
102}
103#else
104static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
105{
106	return dl_se;
107}
108
109static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
110{
111	return false;
112}
113#endif
114
115#ifdef CONFIG_SMP
116static inline struct dl_bw *dl_bw_of(int i)
117{
118	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
119			 "sched RCU must be held");
120	return &cpu_rq(i)->rd->dl_bw;
121}
122
123static inline int dl_bw_cpus(int i)
124{
125	struct root_domain *rd = cpu_rq(i)->rd;
126	int cpus;
127
128	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
129			 "sched RCU must be held");
130
131	if (cpumask_subset(rd->span, cpu_active_mask))
132		return cpumask_weight(rd->span);
133
134	cpus = 0;
135
136	for_each_cpu_and(i, rd->span, cpu_active_mask)
137		cpus++;
138
139	return cpus;
140}
141
142static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
143{
144	unsigned long cap = 0;
145	int i;
146
147	for_each_cpu_and(i, mask, cpu_active_mask)
148		cap += arch_scale_cpu_capacity(i);
149
150	return cap;
151}
152
153/*
154 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
155 * of the CPU the task is running on rather rd's \Sum CPU capacity.
156 */
157static inline unsigned long dl_bw_capacity(int i)
158{
159	if (!sched_asym_cpucap_active() &&
160	    arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
161		return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
162	} else {
163		RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
164				 "sched RCU must be held");
165
166		return __dl_bw_capacity(cpu_rq(i)->rd->span);
167	}
168}
169
170static inline bool dl_bw_visited(int cpu, u64 gen)
171{
172	struct root_domain *rd = cpu_rq(cpu)->rd;
173
174	if (rd->visit_gen == gen)
175		return true;
176
177	rd->visit_gen = gen;
178	return false;
179}
180
181static inline
182void __dl_update(struct dl_bw *dl_b, s64 bw)
183{
184	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
185	int i;
186
187	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
188			 "sched RCU must be held");
189	for_each_cpu_and(i, rd->span, cpu_active_mask) {
190		struct rq *rq = cpu_rq(i);
191
192		rq->dl.extra_bw += bw;
193	}
194}
195#else
196static inline struct dl_bw *dl_bw_of(int i)
197{
198	return &cpu_rq(i)->dl.dl_bw;
199}
200
201static inline int dl_bw_cpus(int i)
202{
203	return 1;
204}
205
206static inline unsigned long dl_bw_capacity(int i)
207{
208	return SCHED_CAPACITY_SCALE;
209}
210
211static inline bool dl_bw_visited(int cpu, u64 gen)
212{
213	return false;
214}
215
216static inline
217void __dl_update(struct dl_bw *dl_b, s64 bw)
218{
219	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
220
221	dl->extra_bw += bw;
222}
223#endif
224
225static inline
226void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
227{
228	dl_b->total_bw -= tsk_bw;
229	__dl_update(dl_b, (s32)tsk_bw / cpus);
230}
231
232static inline
233void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
234{
235	dl_b->total_bw += tsk_bw;
236	__dl_update(dl_b, -((s32)tsk_bw / cpus));
237}
238
239static inline bool
240__dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
241{
242	return dl_b->bw != -1 &&
243	       cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
244}
245
246static inline
247void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
248{
249	u64 old = dl_rq->running_bw;
250
251	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
252	dl_rq->running_bw += dl_bw;
253	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
254	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
255	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
256	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
257}
258
259static inline
260void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
261{
262	u64 old = dl_rq->running_bw;
263
264	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
265	dl_rq->running_bw -= dl_bw;
266	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
267	if (dl_rq->running_bw > old)
268		dl_rq->running_bw = 0;
269	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
270	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
271}
272
273static inline
274void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
275{
276	u64 old = dl_rq->this_bw;
277
278	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
279	dl_rq->this_bw += dl_bw;
280	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
281}
282
283static inline
284void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
285{
286	u64 old = dl_rq->this_bw;
287
288	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
289	dl_rq->this_bw -= dl_bw;
290	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
291	if (dl_rq->this_bw > old)
292		dl_rq->this_bw = 0;
293	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
294}
295
296static inline
297void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
298{
299	if (!dl_entity_is_special(dl_se))
300		__add_rq_bw(dl_se->dl_bw, dl_rq);
301}
302
303static inline
304void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
305{
306	if (!dl_entity_is_special(dl_se))
307		__sub_rq_bw(dl_se->dl_bw, dl_rq);
308}
309
310static inline
311void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
312{
313	if (!dl_entity_is_special(dl_se))
314		__add_running_bw(dl_se->dl_bw, dl_rq);
315}
316
317static inline
318void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
319{
320	if (!dl_entity_is_special(dl_se))
321		__sub_running_bw(dl_se->dl_bw, dl_rq);
322}
323
324static void dl_change_utilization(struct task_struct *p, u64 new_bw)
325{
326	struct rq *rq;
327
328	WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
329
330	if (task_on_rq_queued(p))
331		return;
332
333	rq = task_rq(p);
334	if (p->dl.dl_non_contending) {
335		sub_running_bw(&p->dl, &rq->dl);
336		p->dl.dl_non_contending = 0;
337		/*
338		 * If the timer handler is currently running and the
339		 * timer cannot be canceled, inactive_task_timer()
340		 * will see that dl_not_contending is not set, and
341		 * will not touch the rq's active utilization,
342		 * so we are still safe.
343		 */
344		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
345			put_task_struct(p);
346	}
347	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
348	__add_rq_bw(new_bw, &rq->dl);
349}
350
351static void __dl_clear_params(struct sched_dl_entity *dl_se);
352
353/*
354 * The utilization of a task cannot be immediately removed from
355 * the rq active utilization (running_bw) when the task blocks.
356 * Instead, we have to wait for the so called "0-lag time".
357 *
358 * If a task blocks before the "0-lag time", a timer (the inactive
359 * timer) is armed, and running_bw is decreased when the timer
360 * fires.
361 *
362 * If the task wakes up again before the inactive timer fires,
363 * the timer is canceled, whereas if the task wakes up after the
364 * inactive timer fired (and running_bw has been decreased) the
365 * task's utilization has to be added to running_bw again.
366 * A flag in the deadline scheduling entity (dl_non_contending)
367 * is used to avoid race conditions between the inactive timer handler
368 * and task wakeups.
369 *
370 * The following diagram shows how running_bw is updated. A task is
371 * "ACTIVE" when its utilization contributes to running_bw; an
372 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
373 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
374 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
375 * time already passed, which does not contribute to running_bw anymore.
376 *                              +------------------+
377 *             wakeup           |    ACTIVE        |
378 *          +------------------>+   contending     |
379 *          | add_running_bw    |                  |
380 *          |                   +----+------+------+
381 *          |                        |      ^
382 *          |                dequeue |      |
383 * +--------+-------+                |      |
384 * |                |   t >= 0-lag   |      | wakeup
385 * |    INACTIVE    |<---------------+      |
386 * |                | sub_running_bw |      |
387 * +--------+-------+                |      |
388 *          ^                        |      |
389 *          |              t < 0-lag |      |
390 *          |                        |      |
391 *          |                        V      |
392 *          |                   +----+------+------+
393 *          | sub_running_bw    |    ACTIVE        |
394 *          +-------------------+                  |
395 *            inactive timer    |  non contending  |
396 *            fired             +------------------+
397 *
398 * The task_non_contending() function is invoked when a task
399 * blocks, and checks if the 0-lag time already passed or
400 * not (in the first case, it directly updates running_bw;
401 * in the second case, it arms the inactive timer).
402 *
403 * The task_contending() function is invoked when a task wakes
404 * up, and checks if the task is still in the "ACTIVE non contending"
405 * state or not (in the second case, it updates running_bw).
406 */
407static void task_non_contending(struct sched_dl_entity *dl_se)
408{
409	struct hrtimer *timer = &dl_se->inactive_timer;
410	struct rq *rq = rq_of_dl_se(dl_se);
411	struct dl_rq *dl_rq = &rq->dl;
412	s64 zerolag_time;
413
414	/*
415	 * If this is a non-deadline task that has been boosted,
416	 * do nothing
417	 */
418	if (dl_se->dl_runtime == 0)
419		return;
420
421	if (dl_entity_is_special(dl_se))
422		return;
423
424	WARN_ON(dl_se->dl_non_contending);
425
426	zerolag_time = dl_se->deadline -
427		 div64_long((dl_se->runtime * dl_se->dl_period),
428			dl_se->dl_runtime);
429
430	/*
431	 * Using relative times instead of the absolute "0-lag time"
432	 * allows to simplify the code
433	 */
434	zerolag_time -= rq_clock(rq);
435
436	/*
437	 * If the "0-lag time" already passed, decrease the active
438	 * utilization now, instead of starting a timer
439	 */
440	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
441		if (dl_server(dl_se)) {
442			sub_running_bw(dl_se, dl_rq);
443		} else {
444			struct task_struct *p = dl_task_of(dl_se);
445
446			if (dl_task(p))
447				sub_running_bw(dl_se, dl_rq);
448
449			if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
450				struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
451
452				if (READ_ONCE(p->__state) == TASK_DEAD)
453					sub_rq_bw(dl_se, &rq->dl);
454				raw_spin_lock(&dl_b->lock);
455				__dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
456				raw_spin_unlock(&dl_b->lock);
457				__dl_clear_params(dl_se);
458			}
459		}
460
461		return;
462	}
463
464	dl_se->dl_non_contending = 1;
465	if (!dl_server(dl_se))
466		get_task_struct(dl_task_of(dl_se));
467
468	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
469}
470
471static void task_contending(struct sched_dl_entity *dl_se, int flags)
472{
473	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
474
475	/*
476	 * If this is a non-deadline task that has been boosted,
477	 * do nothing
478	 */
479	if (dl_se->dl_runtime == 0)
480		return;
481
482	if (flags & ENQUEUE_MIGRATED)
483		add_rq_bw(dl_se, dl_rq);
484
485	if (dl_se->dl_non_contending) {
486		dl_se->dl_non_contending = 0;
487		/*
488		 * If the timer handler is currently running and the
489		 * timer cannot be canceled, inactive_task_timer()
490		 * will see that dl_not_contending is not set, and
491		 * will not touch the rq's active utilization,
492		 * so we are still safe.
493		 */
494		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
495			if (!dl_server(dl_se))
496				put_task_struct(dl_task_of(dl_se));
497		}
498	} else {
499		/*
500		 * Since "dl_non_contending" is not set, the
501		 * task's utilization has already been removed from
502		 * active utilization (either when the task blocked,
503		 * when the "inactive timer" fired).
504		 * So, add it back.
505		 */
506		add_running_bw(dl_se, dl_rq);
507	}
508}
509
510static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
511{
512	return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
513}
514
515static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
516
517void init_dl_bw(struct dl_bw *dl_b)
518{
519	raw_spin_lock_init(&dl_b->lock);
520	if (global_rt_runtime() == RUNTIME_INF)
521		dl_b->bw = -1;
522	else
523		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
524	dl_b->total_bw = 0;
525}
526
527void init_dl_rq(struct dl_rq *dl_rq)
528{
529	dl_rq->root = RB_ROOT_CACHED;
530
531#ifdef CONFIG_SMP
532	/* zero means no -deadline tasks */
533	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
534
535	dl_rq->overloaded = 0;
536	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
537#else
538	init_dl_bw(&dl_rq->dl_bw);
539#endif
540
541	dl_rq->running_bw = 0;
542	dl_rq->this_bw = 0;
543	init_dl_rq_bw_ratio(dl_rq);
544}
545
546#ifdef CONFIG_SMP
547
548static inline int dl_overloaded(struct rq *rq)
549{
550	return atomic_read(&rq->rd->dlo_count);
551}
552
553static inline void dl_set_overload(struct rq *rq)
554{
555	if (!rq->online)
556		return;
557
558	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
559	/*
560	 * Must be visible before the overload count is
561	 * set (as in sched_rt.c).
562	 *
563	 * Matched by the barrier in pull_dl_task().
564	 */
565	smp_wmb();
566	atomic_inc(&rq->rd->dlo_count);
567}
568
569static inline void dl_clear_overload(struct rq *rq)
570{
571	if (!rq->online)
572		return;
573
574	atomic_dec(&rq->rd->dlo_count);
575	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
576}
577
578#define __node_2_pdl(node) \
579	rb_entry((node), struct task_struct, pushable_dl_tasks)
580
581static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
582{
583	return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
584}
585
586static inline int has_pushable_dl_tasks(struct rq *rq)
587{
588	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
589}
590
591/*
592 * The list of pushable -deadline task is not a plist, like in
593 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
594 */
595static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
596{
597	struct rb_node *leftmost;
598
599	WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
600
601	leftmost = rb_add_cached(&p->pushable_dl_tasks,
602				 &rq->dl.pushable_dl_tasks_root,
603				 __pushable_less);
604	if (leftmost)
605		rq->dl.earliest_dl.next = p->dl.deadline;
606
607	if (!rq->dl.overloaded) {
608		dl_set_overload(rq);
609		rq->dl.overloaded = 1;
610	}
611}
612
613static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
614{
615	struct dl_rq *dl_rq = &rq->dl;
616	struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
617	struct rb_node *leftmost;
618
619	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
620		return;
621
622	leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
623	if (leftmost)
624		dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
625
626	RB_CLEAR_NODE(&p->pushable_dl_tasks);
627
628	if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
629		dl_clear_overload(rq);
630		rq->dl.overloaded = 0;
631	}
632}
633
634static int push_dl_task(struct rq *rq);
635
636static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
637{
638	return rq->online && dl_task(prev);
639}
640
641static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
642static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
643
644static void push_dl_tasks(struct rq *);
645static void pull_dl_task(struct rq *);
646
647static inline void deadline_queue_push_tasks(struct rq *rq)
648{
649	if (!has_pushable_dl_tasks(rq))
650		return;
651
652	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
653}
654
655static inline void deadline_queue_pull_task(struct rq *rq)
656{
657	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
658}
659
660static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
661
662static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
663{
664	struct rq *later_rq = NULL;
665	struct dl_bw *dl_b;
666
667	later_rq = find_lock_later_rq(p, rq);
668	if (!later_rq) {
669		int cpu;
670
671		/*
672		 * If we cannot preempt any rq, fall back to pick any
673		 * online CPU:
674		 */
675		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
676		if (cpu >= nr_cpu_ids) {
677			/*
678			 * Failed to find any suitable CPU.
679			 * The task will never come back!
680			 */
681			WARN_ON_ONCE(dl_bandwidth_enabled());
682
683			/*
684			 * If admission control is disabled we
685			 * try a little harder to let the task
686			 * run.
687			 */
688			cpu = cpumask_any(cpu_active_mask);
689		}
690		later_rq = cpu_rq(cpu);
691		double_lock_balance(rq, later_rq);
692	}
693
694	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
695		/*
696		 * Inactive timer is armed (or callback is running, but
697		 * waiting for us to release rq locks). In any case, when it
698		 * will fire (or continue), it will see running_bw of this
699		 * task migrated to later_rq (and correctly handle it).
700		 */
701		sub_running_bw(&p->dl, &rq->dl);
702		sub_rq_bw(&p->dl, &rq->dl);
703
704		add_rq_bw(&p->dl, &later_rq->dl);
705		add_running_bw(&p->dl, &later_rq->dl);
706	} else {
707		sub_rq_bw(&p->dl, &rq->dl);
708		add_rq_bw(&p->dl, &later_rq->dl);
709	}
710
711	/*
712	 * And we finally need to fixup root_domain(s) bandwidth accounting,
713	 * since p is still hanging out in the old (now moved to default) root
714	 * domain.
715	 */
716	dl_b = &rq->rd->dl_bw;
717	raw_spin_lock(&dl_b->lock);
718	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
719	raw_spin_unlock(&dl_b->lock);
720
721	dl_b = &later_rq->rd->dl_bw;
722	raw_spin_lock(&dl_b->lock);
723	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
724	raw_spin_unlock(&dl_b->lock);
725
726	set_task_cpu(p, later_rq->cpu);
727	double_unlock_balance(later_rq, rq);
728
729	return later_rq;
730}
731
732#else
733
734static inline
735void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
736{
737}
738
739static inline
740void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
741{
742}
743
744static inline
745void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
746{
747}
748
749static inline
750void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
751{
752}
753
754static inline void deadline_queue_push_tasks(struct rq *rq)
755{
756}
757
758static inline void deadline_queue_pull_task(struct rq *rq)
759{
760}
761#endif /* CONFIG_SMP */
762
763static void
764enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
765static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
766static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
767static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
768
769static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
770					    struct rq *rq)
771{
772	/* for non-boosted task, pi_of(dl_se) == dl_se */
773	dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
774	dl_se->runtime = pi_of(dl_se)->dl_runtime;
775}
776
777/*
778 * We are being explicitly informed that a new instance is starting,
779 * and this means that:
780 *  - the absolute deadline of the entity has to be placed at
781 *    current time + relative deadline;
782 *  - the runtime of the entity has to be set to the maximum value.
783 *
784 * The capability of specifying such event is useful whenever a -deadline
785 * entity wants to (try to!) synchronize its behaviour with the scheduler's
786 * one, and to (try to!) reconcile itself with its own scheduling
787 * parameters.
788 */
789static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
790{
791	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
792	struct rq *rq = rq_of_dl_rq(dl_rq);
793
794	WARN_ON(is_dl_boosted(dl_se));
795	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
796
797	/*
798	 * We are racing with the deadline timer. So, do nothing because
799	 * the deadline timer handler will take care of properly recharging
800	 * the runtime and postponing the deadline
801	 */
802	if (dl_se->dl_throttled)
803		return;
804
805	/*
806	 * We use the regular wall clock time to set deadlines in the
807	 * future; in fact, we must consider execution overheads (time
808	 * spent on hardirq context, etc.).
809	 */
810	replenish_dl_new_period(dl_se, rq);
811}
812
813/*
814 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
815 * possibility of a entity lasting more than what it declared, and thus
816 * exhausting its runtime.
817 *
818 * Here we are interested in making runtime overrun possible, but we do
819 * not want a entity which is misbehaving to affect the scheduling of all
820 * other entities.
821 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
822 * is used, in order to confine each entity within its own bandwidth.
823 *
824 * This function deals exactly with that, and ensures that when the runtime
825 * of a entity is replenished, its deadline is also postponed. That ensures
826 * the overrunning entity can't interfere with other entity in the system and
827 * can't make them miss their deadlines. Reasons why this kind of overruns
828 * could happen are, typically, a entity voluntarily trying to overcome its
829 * runtime, or it just underestimated it during sched_setattr().
830 */
831static void replenish_dl_entity(struct sched_dl_entity *dl_se)
832{
833	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
834	struct rq *rq = rq_of_dl_rq(dl_rq);
835
836	WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
837
838	/*
839	 * This could be the case for a !-dl task that is boosted.
840	 * Just go with full inherited parameters.
841	 */
842	if (dl_se->dl_deadline == 0)
843		replenish_dl_new_period(dl_se, rq);
844
845	if (dl_se->dl_yielded && dl_se->runtime > 0)
846		dl_se->runtime = 0;
847
848	/*
849	 * We keep moving the deadline away until we get some
850	 * available runtime for the entity. This ensures correct
851	 * handling of situations where the runtime overrun is
852	 * arbitrary large.
853	 */
854	while (dl_se->runtime <= 0) {
855		dl_se->deadline += pi_of(dl_se)->dl_period;
856		dl_se->runtime += pi_of(dl_se)->dl_runtime;
857	}
858
859	/*
860	 * At this point, the deadline really should be "in
861	 * the future" with respect to rq->clock. If it's
862	 * not, we are, for some reason, lagging too much!
863	 * Anyway, after having warn userspace abut that,
864	 * we still try to keep the things running by
865	 * resetting the deadline and the budget of the
866	 * entity.
867	 */
868	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
869		printk_deferred_once("sched: DL replenish lagged too much\n");
870		replenish_dl_new_period(dl_se, rq);
871	}
872
873	if (dl_se->dl_yielded)
874		dl_se->dl_yielded = 0;
875	if (dl_se->dl_throttled)
876		dl_se->dl_throttled = 0;
877}
878
879/*
880 * Here we check if --at time t-- an entity (which is probably being
881 * [re]activated or, in general, enqueued) can use its remaining runtime
882 * and its current deadline _without_ exceeding the bandwidth it is
883 * assigned (function returns true if it can't). We are in fact applying
884 * one of the CBS rules: when a task wakes up, if the residual runtime
885 * over residual deadline fits within the allocated bandwidth, then we
886 * can keep the current (absolute) deadline and residual budget without
887 * disrupting the schedulability of the system. Otherwise, we should
888 * refill the runtime and set the deadline a period in the future,
889 * because keeping the current (absolute) deadline of the task would
890 * result in breaking guarantees promised to other tasks (refer to
891 * Documentation/scheduler/sched-deadline.rst for more information).
892 *
893 * This function returns true if:
894 *
895 *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
896 *
897 * IOW we can't recycle current parameters.
898 *
899 * Notice that the bandwidth check is done against the deadline. For
900 * task with deadline equal to period this is the same of using
901 * dl_period instead of dl_deadline in the equation above.
902 */
903static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
904{
905	u64 left, right;
906
907	/*
908	 * left and right are the two sides of the equation above,
909	 * after a bit of shuffling to use multiplications instead
910	 * of divisions.
911	 *
912	 * Note that none of the time values involved in the two
913	 * multiplications are absolute: dl_deadline and dl_runtime
914	 * are the relative deadline and the maximum runtime of each
915	 * instance, runtime is the runtime left for the last instance
916	 * and (deadline - t), since t is rq->clock, is the time left
917	 * to the (absolute) deadline. Even if overflowing the u64 type
918	 * is very unlikely to occur in both cases, here we scale down
919	 * as we want to avoid that risk at all. Scaling down by 10
920	 * means that we reduce granularity to 1us. We are fine with it,
921	 * since this is only a true/false check and, anyway, thinking
922	 * of anything below microseconds resolution is actually fiction
923	 * (but still we want to give the user that illusion >;).
924	 */
925	left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
926	right = ((dl_se->deadline - t) >> DL_SCALE) *
927		(pi_of(dl_se)->dl_runtime >> DL_SCALE);
928
929	return dl_time_before(right, left);
930}
931
932/*
933 * Revised wakeup rule [1]: For self-suspending tasks, rather then
934 * re-initializing task's runtime and deadline, the revised wakeup
935 * rule adjusts the task's runtime to avoid the task to overrun its
936 * density.
937 *
938 * Reasoning: a task may overrun the density if:
939 *    runtime / (deadline - t) > dl_runtime / dl_deadline
940 *
941 * Therefore, runtime can be adjusted to:
942 *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
943 *
944 * In such way that runtime will be equal to the maximum density
945 * the task can use without breaking any rule.
946 *
947 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
948 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
949 */
950static void
951update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
952{
953	u64 laxity = dl_se->deadline - rq_clock(rq);
954
955	/*
956	 * If the task has deadline < period, and the deadline is in the past,
957	 * it should already be throttled before this check.
958	 *
959	 * See update_dl_entity() comments for further details.
960	 */
961	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
962
963	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
964}
965
966/*
967 * Regarding the deadline, a task with implicit deadline has a relative
968 * deadline == relative period. A task with constrained deadline has a
969 * relative deadline <= relative period.
970 *
971 * We support constrained deadline tasks. However, there are some restrictions
972 * applied only for tasks which do not have an implicit deadline. See
973 * update_dl_entity() to know more about such restrictions.
974 *
975 * The dl_is_implicit() returns true if the task has an implicit deadline.
976 */
977static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
978{
979	return dl_se->dl_deadline == dl_se->dl_period;
980}
981
982/*
983 * When a deadline entity is placed in the runqueue, its runtime and deadline
984 * might need to be updated. This is done by a CBS wake up rule. There are two
985 * different rules: 1) the original CBS; and 2) the Revisited CBS.
986 *
987 * When the task is starting a new period, the Original CBS is used. In this
988 * case, the runtime is replenished and a new absolute deadline is set.
989 *
990 * When a task is queued before the begin of the next period, using the
991 * remaining runtime and deadline could make the entity to overflow, see
992 * dl_entity_overflow() to find more about runtime overflow. When such case
993 * is detected, the runtime and deadline need to be updated.
994 *
995 * If the task has an implicit deadline, i.e., deadline == period, the Original
996 * CBS is applied. the runtime is replenished and a new absolute deadline is
997 * set, as in the previous cases.
998 *
999 * However, the Original CBS does not work properly for tasks with
1000 * deadline < period, which are said to have a constrained deadline. By
1001 * applying the Original CBS, a constrained deadline task would be able to run
1002 * runtime/deadline in a period. With deadline < period, the task would
1003 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1004 *
1005 * In order to prevent this misbehave, the Revisited CBS is used for
1006 * constrained deadline tasks when a runtime overflow is detected. In the
1007 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1008 * the remaining runtime of the task is reduced to avoid runtime overflow.
1009 * Please refer to the comments update_dl_revised_wakeup() function to find
1010 * more about the Revised CBS rule.
1011 */
1012static void update_dl_entity(struct sched_dl_entity *dl_se)
1013{
1014	struct rq *rq = rq_of_dl_se(dl_se);
1015
1016	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1017	    dl_entity_overflow(dl_se, rq_clock(rq))) {
1018
1019		if (unlikely(!dl_is_implicit(dl_se) &&
1020			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1021			     !is_dl_boosted(dl_se))) {
1022			update_dl_revised_wakeup(dl_se, rq);
1023			return;
1024		}
1025
1026		replenish_dl_new_period(dl_se, rq);
1027	}
1028}
1029
1030static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1031{
1032	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1033}
1034
1035/*
1036 * If the entity depleted all its runtime, and if we want it to sleep
1037 * while waiting for some new execution time to become available, we
1038 * set the bandwidth replenishment timer to the replenishment instant
1039 * and try to activate it.
1040 *
1041 * Notice that it is important for the caller to know if the timer
1042 * actually started or not (i.e., the replenishment instant is in
1043 * the future or in the past).
1044 */
1045static int start_dl_timer(struct sched_dl_entity *dl_se)
1046{
1047	struct hrtimer *timer = &dl_se->dl_timer;
1048	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1049	struct rq *rq = rq_of_dl_rq(dl_rq);
1050	ktime_t now, act;
1051	s64 delta;
1052
1053	lockdep_assert_rq_held(rq);
1054
1055	/*
1056	 * We want the timer to fire at the deadline, but considering
1057	 * that it is actually coming from rq->clock and not from
1058	 * hrtimer's time base reading.
1059	 */
1060	act = ns_to_ktime(dl_next_period(dl_se));
1061	now = hrtimer_cb_get_time(timer);
1062	delta = ktime_to_ns(now) - rq_clock(rq);
1063	act = ktime_add_ns(act, delta);
1064
1065	/*
1066	 * If the expiry time already passed, e.g., because the value
1067	 * chosen as the deadline is too small, don't even try to
1068	 * start the timer in the past!
1069	 */
1070	if (ktime_us_delta(act, now) < 0)
1071		return 0;
1072
1073	/*
1074	 * !enqueued will guarantee another callback; even if one is already in
1075	 * progress. This ensures a balanced {get,put}_task_struct().
1076	 *
1077	 * The race against __run_timer() clearing the enqueued state is
1078	 * harmless because we're holding task_rq()->lock, therefore the timer
1079	 * expiring after we've done the check will wait on its task_rq_lock()
1080	 * and observe our state.
1081	 */
1082	if (!hrtimer_is_queued(timer)) {
1083		if (!dl_server(dl_se))
1084			get_task_struct(dl_task_of(dl_se));
1085		hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1086	}
1087
1088	return 1;
1089}
1090
1091static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1092{
1093#ifdef CONFIG_SMP
1094	/*
1095	 * Queueing this task back might have overloaded rq, check if we need
1096	 * to kick someone away.
1097	 */
1098	if (has_pushable_dl_tasks(rq)) {
1099		/*
1100		 * Nothing relies on rq->lock after this, so its safe to drop
1101		 * rq->lock.
1102		 */
1103		rq_unpin_lock(rq, rf);
1104		push_dl_task(rq);
1105		rq_repin_lock(rq, rf);
1106	}
1107#endif
1108}
1109
1110/*
1111 * This is the bandwidth enforcement timer callback. If here, we know
1112 * a task is not on its dl_rq, since the fact that the timer was running
1113 * means the task is throttled and needs a runtime replenishment.
1114 *
1115 * However, what we actually do depends on the fact the task is active,
1116 * (it is on its rq) or has been removed from there by a call to
1117 * dequeue_task_dl(). In the former case we must issue the runtime
1118 * replenishment and add the task back to the dl_rq; in the latter, we just
1119 * do nothing but clearing dl_throttled, so that runtime and deadline
1120 * updating (and the queueing back to dl_rq) will be done by the
1121 * next call to enqueue_task_dl().
1122 */
1123static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1124{
1125	struct sched_dl_entity *dl_se = container_of(timer,
1126						     struct sched_dl_entity,
1127						     dl_timer);
1128	struct task_struct *p;
1129	struct rq_flags rf;
1130	struct rq *rq;
1131
1132	if (dl_server(dl_se)) {
1133		struct rq *rq = rq_of_dl_se(dl_se);
1134		struct rq_flags rf;
1135
1136		rq_lock(rq, &rf);
1137		if (dl_se->dl_throttled) {
1138			sched_clock_tick();
1139			update_rq_clock(rq);
1140
1141			if (dl_se->server_has_tasks(dl_se)) {
1142				enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1143				resched_curr(rq);
1144				__push_dl_task(rq, &rf);
1145			} else {
1146				replenish_dl_entity(dl_se);
1147			}
1148
1149		}
1150		rq_unlock(rq, &rf);
1151
1152		return HRTIMER_NORESTART;
1153	}
1154
1155	p = dl_task_of(dl_se);
1156	rq = task_rq_lock(p, &rf);
1157
1158	/*
1159	 * The task might have changed its scheduling policy to something
1160	 * different than SCHED_DEADLINE (through switched_from_dl()).
1161	 */
1162	if (!dl_task(p))
1163		goto unlock;
1164
1165	/*
1166	 * The task might have been boosted by someone else and might be in the
1167	 * boosting/deboosting path, its not throttled.
1168	 */
1169	if (is_dl_boosted(dl_se))
1170		goto unlock;
1171
1172	/*
1173	 * Spurious timer due to start_dl_timer() race; or we already received
1174	 * a replenishment from rt_mutex_setprio().
1175	 */
1176	if (!dl_se->dl_throttled)
1177		goto unlock;
1178
1179	sched_clock_tick();
1180	update_rq_clock(rq);
1181
1182	/*
1183	 * If the throttle happened during sched-out; like:
1184	 *
1185	 *   schedule()
1186	 *     deactivate_task()
1187	 *       dequeue_task_dl()
1188	 *         update_curr_dl()
1189	 *           start_dl_timer()
1190	 *         __dequeue_task_dl()
1191	 *     prev->on_rq = 0;
1192	 *
1193	 * We can be both throttled and !queued. Replenish the counter
1194	 * but do not enqueue -- wait for our wakeup to do that.
1195	 */
1196	if (!task_on_rq_queued(p)) {
1197		replenish_dl_entity(dl_se);
1198		goto unlock;
1199	}
1200
1201#ifdef CONFIG_SMP
1202	if (unlikely(!rq->online)) {
1203		/*
1204		 * If the runqueue is no longer available, migrate the
1205		 * task elsewhere. This necessarily changes rq.
1206		 */
1207		lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1208		rq = dl_task_offline_migration(rq, p);
1209		rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1210		update_rq_clock(rq);
1211
1212		/*
1213		 * Now that the task has been migrated to the new RQ and we
1214		 * have that locked, proceed as normal and enqueue the task
1215		 * there.
1216		 */
1217	}
1218#endif
1219
1220	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1221	if (dl_task(rq->curr))
1222		wakeup_preempt_dl(rq, p, 0);
1223	else
1224		resched_curr(rq);
1225
1226	__push_dl_task(rq, &rf);
1227
1228unlock:
1229	task_rq_unlock(rq, p, &rf);
1230
1231	/*
1232	 * This can free the task_struct, including this hrtimer, do not touch
1233	 * anything related to that after this.
1234	 */
1235	put_task_struct(p);
1236
1237	return HRTIMER_NORESTART;
1238}
1239
1240static void init_dl_task_timer(struct sched_dl_entity *dl_se)
1241{
1242	struct hrtimer *timer = &dl_se->dl_timer;
1243
1244	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1245	timer->function = dl_task_timer;
1246}
1247
1248/*
1249 * During the activation, CBS checks if it can reuse the current task's
1250 * runtime and period. If the deadline of the task is in the past, CBS
1251 * cannot use the runtime, and so it replenishes the task. This rule
1252 * works fine for implicit deadline tasks (deadline == period), and the
1253 * CBS was designed for implicit deadline tasks. However, a task with
1254 * constrained deadline (deadline < period) might be awakened after the
1255 * deadline, but before the next period. In this case, replenishing the
1256 * task would allow it to run for runtime / deadline. As in this case
1257 * deadline < period, CBS enables a task to run for more than the
1258 * runtime / period. In a very loaded system, this can cause a domino
1259 * effect, making other tasks miss their deadlines.
1260 *
1261 * To avoid this problem, in the activation of a constrained deadline
1262 * task after the deadline but before the next period, throttle the
1263 * task and set the replenishing timer to the begin of the next period,
1264 * unless it is boosted.
1265 */
1266static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1267{
1268	struct rq *rq = rq_of_dl_se(dl_se);
1269
1270	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1271	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1272		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
1273			return;
1274		dl_se->dl_throttled = 1;
1275		if (dl_se->runtime > 0)
1276			dl_se->runtime = 0;
1277	}
1278}
1279
1280static
1281int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1282{
1283	return (dl_se->runtime <= 0);
1284}
1285
1286/*
1287 * This function implements the GRUB accounting rule. According to the
1288 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1289 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1290 * where u is the utilization of the task, Umax is the maximum reclaimable
1291 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1292 * as the difference between the "total runqueue utilization" and the
1293 * "runqueue active utilization", and Uextra is the (per runqueue) extra
1294 * reclaimable utilization.
1295 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1296 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1297 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1298 * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1299 * Since delta is a 64 bit variable, to have an overflow its value should be
1300 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1301 * not an issue here.
1302 */
1303static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1304{
1305	u64 u_act;
1306	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1307
1308	/*
1309	 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1310	 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1311	 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1312	 * negative leading to wrong results.
1313	 */
1314	if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1315		u_act = dl_se->dl_bw;
1316	else
1317		u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1318
1319	u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1320	return (delta * u_act) >> BW_SHIFT;
1321}
1322
1323static inline void
1324update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1325                        int flags);
1326static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1327{
1328	s64 scaled_delta_exec;
1329
1330	if (unlikely(delta_exec <= 0)) {
1331		if (unlikely(dl_se->dl_yielded))
1332			goto throttle;
1333		return;
1334	}
1335
1336	if (dl_entity_is_special(dl_se))
1337		return;
1338
1339	/*
1340	 * For tasks that participate in GRUB, we implement GRUB-PA: the
1341	 * spare reclaimed bandwidth is used to clock down frequency.
1342	 *
1343	 * For the others, we still need to scale reservation parameters
1344	 * according to current frequency and CPU maximum capacity.
1345	 */
1346	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1347		scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
1348	} else {
1349		int cpu = cpu_of(rq);
1350		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1351		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1352
1353		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1354		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1355	}
1356
1357	dl_se->runtime -= scaled_delta_exec;
1358
1359throttle:
1360	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1361		dl_se->dl_throttled = 1;
1362
1363		/* If requested, inform the user about runtime overruns. */
1364		if (dl_runtime_exceeded(dl_se) &&
1365		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1366			dl_se->dl_overrun = 1;
1367
1368		dequeue_dl_entity(dl_se, 0);
1369		if (!dl_server(dl_se)) {
1370			update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1371			dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1372		}
1373
1374		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1375			if (dl_server(dl_se))
1376				enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1377			else
1378				enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1379		}
1380
1381		if (!is_leftmost(dl_se, &rq->dl))
1382			resched_curr(rq);
1383	}
1384
1385	/*
1386	 * Because -- for now -- we share the rt bandwidth, we need to
1387	 * account our runtime there too, otherwise actual rt tasks
1388	 * would be able to exceed the shared quota.
1389	 *
1390	 * Account to the root rt group for now.
1391	 *
1392	 * The solution we're working towards is having the RT groups scheduled
1393	 * using deadline servers -- however there's a few nasties to figure
1394	 * out before that can happen.
1395	 */
1396	if (rt_bandwidth_enabled()) {
1397		struct rt_rq *rt_rq = &rq->rt;
1398
1399		raw_spin_lock(&rt_rq->rt_runtime_lock);
1400		/*
1401		 * We'll let actual RT tasks worry about the overflow here, we
1402		 * have our own CBS to keep us inline; only account when RT
1403		 * bandwidth is relevant.
1404		 */
1405		if (sched_rt_bandwidth_account(rt_rq))
1406			rt_rq->rt_time += delta_exec;
1407		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1408	}
1409}
1410
1411void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1412{
1413	update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1414}
1415
1416void dl_server_start(struct sched_dl_entity *dl_se)
1417{
1418	if (!dl_server(dl_se)) {
1419		dl_se->dl_server = 1;
1420		setup_new_dl_entity(dl_se);
1421	}
1422	enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
1423}
1424
1425void dl_server_stop(struct sched_dl_entity *dl_se)
1426{
1427	dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
1428}
1429
1430void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1431		    dl_server_has_tasks_f has_tasks,
1432		    dl_server_pick_f pick)
1433{
1434	dl_se->rq = rq;
1435	dl_se->server_has_tasks = has_tasks;
1436	dl_se->server_pick = pick;
1437}
1438
1439/*
1440 * Update the current task's runtime statistics (provided it is still
1441 * a -deadline task and has not been removed from the dl_rq).
1442 */
1443static void update_curr_dl(struct rq *rq)
1444{
1445	struct task_struct *curr = rq->curr;
1446	struct sched_dl_entity *dl_se = &curr->dl;
1447	s64 delta_exec;
1448
1449	if (!dl_task(curr) || !on_dl_rq(dl_se))
1450		return;
1451
1452	/*
1453	 * Consumed budget is computed considering the time as
1454	 * observed by schedulable tasks (excluding time spent
1455	 * in hardirq context, etc.). Deadlines are instead
1456	 * computed using hard walltime. This seems to be the more
1457	 * natural solution, but the full ramifications of this
1458	 * approach need further study.
1459	 */
1460	delta_exec = update_curr_common(rq);
1461	update_curr_dl_se(rq, dl_se, delta_exec);
1462}
1463
1464static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1465{
1466	struct sched_dl_entity *dl_se = container_of(timer,
1467						     struct sched_dl_entity,
1468						     inactive_timer);
1469	struct task_struct *p = NULL;
1470	struct rq_flags rf;
1471	struct rq *rq;
1472
1473	if (!dl_server(dl_se)) {
1474		p = dl_task_of(dl_se);
1475		rq = task_rq_lock(p, &rf);
1476	} else {
1477		rq = dl_se->rq;
1478		rq_lock(rq, &rf);
1479	}
1480
1481	sched_clock_tick();
1482	update_rq_clock(rq);
1483
1484	if (dl_server(dl_se))
1485		goto no_task;
1486
1487	if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1488		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1489
1490		if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1491			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1492			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1493			dl_se->dl_non_contending = 0;
1494		}
1495
1496		raw_spin_lock(&dl_b->lock);
1497		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1498		raw_spin_unlock(&dl_b->lock);
1499		__dl_clear_params(dl_se);
1500
1501		goto unlock;
1502	}
1503
1504no_task:
1505	if (dl_se->dl_non_contending == 0)
1506		goto unlock;
1507
1508	sub_running_bw(dl_se, &rq->dl);
1509	dl_se->dl_non_contending = 0;
1510unlock:
1511
1512	if (!dl_server(dl_se)) {
1513		task_rq_unlock(rq, p, &rf);
1514		put_task_struct(p);
1515	} else {
1516		rq_unlock(rq, &rf);
1517	}
1518
1519	return HRTIMER_NORESTART;
1520}
1521
1522static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1523{
1524	struct hrtimer *timer = &dl_se->inactive_timer;
1525
1526	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1527	timer->function = inactive_task_timer;
1528}
1529
1530#define __node_2_dle(node) \
1531	rb_entry((node), struct sched_dl_entity, rb_node)
1532
1533#ifdef CONFIG_SMP
1534
1535static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1536{
1537	struct rq *rq = rq_of_dl_rq(dl_rq);
1538
1539	if (dl_rq->earliest_dl.curr == 0 ||
1540	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1541		if (dl_rq->earliest_dl.curr == 0)
1542			cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1543		dl_rq->earliest_dl.curr = deadline;
1544		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1545	}
1546}
1547
1548static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1549{
1550	struct rq *rq = rq_of_dl_rq(dl_rq);
1551
1552	/*
1553	 * Since we may have removed our earliest (and/or next earliest)
1554	 * task we must recompute them.
1555	 */
1556	if (!dl_rq->dl_nr_running) {
1557		dl_rq->earliest_dl.curr = 0;
1558		dl_rq->earliest_dl.next = 0;
1559		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1560		cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1561	} else {
1562		struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1563		struct sched_dl_entity *entry = __node_2_dle(leftmost);
1564
1565		dl_rq->earliest_dl.curr = entry->deadline;
1566		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1567	}
1568}
1569
1570#else
1571
1572static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1573static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1574
1575#endif /* CONFIG_SMP */
1576
1577static inline
1578void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1579{
1580	u64 deadline = dl_se->deadline;
1581
1582	dl_rq->dl_nr_running++;
1583	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1584
1585	inc_dl_deadline(dl_rq, deadline);
1586}
1587
1588static inline
1589void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1590{
1591	WARN_ON(!dl_rq->dl_nr_running);
1592	dl_rq->dl_nr_running--;
1593	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1594
1595	dec_dl_deadline(dl_rq, dl_se->deadline);
1596}
1597
1598static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1599{
1600	return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1601}
1602
1603static inline struct sched_statistics *
1604__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1605{
1606	return &dl_task_of(dl_se)->stats;
1607}
1608
1609static inline void
1610update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1611{
1612	struct sched_statistics *stats;
1613
1614	if (!schedstat_enabled())
1615		return;
1616
1617	stats = __schedstats_from_dl_se(dl_se);
1618	__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1619}
1620
1621static inline void
1622update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1623{
1624	struct sched_statistics *stats;
1625
1626	if (!schedstat_enabled())
1627		return;
1628
1629	stats = __schedstats_from_dl_se(dl_se);
1630	__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1631}
1632
1633static inline void
1634update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1635{
1636	struct sched_statistics *stats;
1637
1638	if (!schedstat_enabled())
1639		return;
1640
1641	stats = __schedstats_from_dl_se(dl_se);
1642	__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1643}
1644
1645static inline void
1646update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1647			int flags)
1648{
1649	if (!schedstat_enabled())
1650		return;
1651
1652	if (flags & ENQUEUE_WAKEUP)
1653		update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1654}
1655
1656static inline void
1657update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1658			int flags)
1659{
1660	struct task_struct *p = dl_task_of(dl_se);
1661
1662	if (!schedstat_enabled())
1663		return;
1664
1665	if ((flags & DEQUEUE_SLEEP)) {
1666		unsigned int state;
1667
1668		state = READ_ONCE(p->__state);
1669		if (state & TASK_INTERRUPTIBLE)
1670			__schedstat_set(p->stats.sleep_start,
1671					rq_clock(rq_of_dl_rq(dl_rq)));
1672
1673		if (state & TASK_UNINTERRUPTIBLE)
1674			__schedstat_set(p->stats.block_start,
1675					rq_clock(rq_of_dl_rq(dl_rq)));
1676	}
1677}
1678
1679static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1680{
1681	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1682
1683	WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
1684
1685	rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1686
1687	inc_dl_tasks(dl_se, dl_rq);
1688}
1689
1690static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1691{
1692	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1693
1694	if (RB_EMPTY_NODE(&dl_se->rb_node))
1695		return;
1696
1697	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1698
1699	RB_CLEAR_NODE(&dl_se->rb_node);
1700
1701	dec_dl_tasks(dl_se, dl_rq);
1702}
1703
1704static void
1705enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1706{
1707	WARN_ON_ONCE(on_dl_rq(dl_se));
1708
1709	update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1710
1711	/*
1712	 * Check if a constrained deadline task was activated
1713	 * after the deadline but before the next period.
1714	 * If that is the case, the task will be throttled and
1715	 * the replenishment timer will be set to the next period.
1716	 */
1717	if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
1718		dl_check_constrained_dl(dl_se);
1719
1720	if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
1721		struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1722
1723		add_rq_bw(dl_se, dl_rq);
1724		add_running_bw(dl_se, dl_rq);
1725	}
1726
1727	/*
1728	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1729	 * its budget it needs a replenishment and, since it now is on
1730	 * its rq, the bandwidth timer callback (which clearly has not
1731	 * run yet) will take care of this.
1732	 * However, the active utilization does not depend on the fact
1733	 * that the task is on the runqueue or not (but depends on the
1734	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1735	 * In other words, even if a task is throttled its utilization must
1736	 * be counted in the active utilization; hence, we need to call
1737	 * add_running_bw().
1738	 */
1739	if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1740		if (flags & ENQUEUE_WAKEUP)
1741			task_contending(dl_se, flags);
1742
1743		return;
1744	}
1745
1746	/*
1747	 * If this is a wakeup or a new instance, the scheduling
1748	 * parameters of the task might need updating. Otherwise,
1749	 * we want a replenishment of its runtime.
1750	 */
1751	if (flags & ENQUEUE_WAKEUP) {
1752		task_contending(dl_se, flags);
1753		update_dl_entity(dl_se);
1754	} else if (flags & ENQUEUE_REPLENISH) {
1755		replenish_dl_entity(dl_se);
1756	} else if ((flags & ENQUEUE_RESTORE) &&
1757		   dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
1758		setup_new_dl_entity(dl_se);
1759	}
1760
1761	__enqueue_dl_entity(dl_se);
1762}
1763
1764static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1765{
1766	__dequeue_dl_entity(dl_se);
1767
1768	if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
1769		struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1770
1771		sub_running_bw(dl_se, dl_rq);
1772		sub_rq_bw(dl_se, dl_rq);
1773	}
1774
1775	/*
1776	 * This check allows to start the inactive timer (or to immediately
1777	 * decrease the active utilization, if needed) in two cases:
1778	 * when the task blocks and when it is terminating
1779	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1780	 * way, because from GRUB's point of view the same thing is happening
1781	 * (the task moves from "active contending" to "active non contending"
1782	 * or "inactive")
1783	 */
1784	if (flags & DEQUEUE_SLEEP)
1785		task_non_contending(dl_se);
1786}
1787
1788static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1789{
1790	if (is_dl_boosted(&p->dl)) {
1791		/*
1792		 * Because of delays in the detection of the overrun of a
1793		 * thread's runtime, it might be the case that a thread
1794		 * goes to sleep in a rt mutex with negative runtime. As
1795		 * a consequence, the thread will be throttled.
1796		 *
1797		 * While waiting for the mutex, this thread can also be
1798		 * boosted via PI, resulting in a thread that is throttled
1799		 * and boosted at the same time.
1800		 *
1801		 * In this case, the boost overrides the throttle.
1802		 */
1803		if (p->dl.dl_throttled) {
1804			/*
1805			 * The replenish timer needs to be canceled. No
1806			 * problem if it fires concurrently: boosted threads
1807			 * are ignored in dl_task_timer().
1808			 */
1809			hrtimer_try_to_cancel(&p->dl.dl_timer);
1810			p->dl.dl_throttled = 0;
1811		}
1812	} else if (!dl_prio(p->normal_prio)) {
1813		/*
1814		 * Special case in which we have a !SCHED_DEADLINE task that is going
1815		 * to be deboosted, but exceeds its runtime while doing so. No point in
1816		 * replenishing it, as it's going to return back to its original
1817		 * scheduling class after this. If it has been throttled, we need to
1818		 * clear the flag, otherwise the task may wake up as throttled after
1819		 * being boosted again with no means to replenish the runtime and clear
1820		 * the throttle.
1821		 */
1822		p->dl.dl_throttled = 0;
1823		if (!(flags & ENQUEUE_REPLENISH))
1824			printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
1825					     task_pid_nr(p));
1826
1827		return;
1828	}
1829
1830	check_schedstat_required();
1831	update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1832
1833	if (p->on_rq == TASK_ON_RQ_MIGRATING)
1834		flags |= ENQUEUE_MIGRATING;
1835
1836	enqueue_dl_entity(&p->dl, flags);
1837
1838	if (dl_server(&p->dl))
1839		return;
1840
1841	if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1842		enqueue_pushable_dl_task(rq, p);
1843}
1844
1845static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1846{
1847	update_curr_dl(rq);
1848
1849	if (p->on_rq == TASK_ON_RQ_MIGRATING)
1850		flags |= DEQUEUE_MIGRATING;
1851
1852	dequeue_dl_entity(&p->dl, flags);
1853	if (!p->dl.dl_throttled && !dl_server(&p->dl))
1854		dequeue_pushable_dl_task(rq, p);
1855}
1856
1857/*
1858 * Yield task semantic for -deadline tasks is:
1859 *
1860 *   get off from the CPU until our next instance, with
1861 *   a new runtime. This is of little use now, since we
1862 *   don't have a bandwidth reclaiming mechanism. Anyway,
1863 *   bandwidth reclaiming is planned for the future, and
1864 *   yield_task_dl will indicate that some spare budget
1865 *   is available for other task instances to use it.
1866 */
1867static void yield_task_dl(struct rq *rq)
1868{
1869	/*
1870	 * We make the task go to sleep until its current deadline by
1871	 * forcing its runtime to zero. This way, update_curr_dl() stops
1872	 * it and the bandwidth timer will wake it up and will give it
1873	 * new scheduling parameters (thanks to dl_yielded=1).
1874	 */
1875	rq->curr->dl.dl_yielded = 1;
1876
1877	update_rq_clock(rq);
1878	update_curr_dl(rq);
1879	/*
1880	 * Tell update_rq_clock() that we've just updated,
1881	 * so we don't do microscopic update in schedule()
1882	 * and double the fastpath cost.
1883	 */
1884	rq_clock_skip_update(rq);
1885}
1886
1887#ifdef CONFIG_SMP
1888
1889static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
1890						 struct rq *rq)
1891{
1892	return (!rq->dl.dl_nr_running ||
1893		dl_time_before(p->dl.deadline,
1894			       rq->dl.earliest_dl.curr));
1895}
1896
1897static int find_later_rq(struct task_struct *task);
1898
1899static int
1900select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1901{
1902	struct task_struct *curr;
1903	bool select_rq;
1904	struct rq *rq;
1905
1906	if (!(flags & WF_TTWU))
1907		goto out;
1908
1909	rq = cpu_rq(cpu);
1910
1911	rcu_read_lock();
1912	curr = READ_ONCE(rq->curr); /* unlocked access */
1913
1914	/*
1915	 * If we are dealing with a -deadline task, we must
1916	 * decide where to wake it up.
1917	 * If it has a later deadline and the current task
1918	 * on this rq can't move (provided the waking task
1919	 * can!) we prefer to send it somewhere else. On the
1920	 * other hand, if it has a shorter deadline, we
1921	 * try to make it stay here, it might be important.
1922	 */
1923	select_rq = unlikely(dl_task(curr)) &&
1924		    (curr->nr_cpus_allowed < 2 ||
1925		     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1926		    p->nr_cpus_allowed > 1;
1927
1928	/*
1929	 * Take the capacity of the CPU into account to
1930	 * ensure it fits the requirement of the task.
1931	 */
1932	if (sched_asym_cpucap_active())
1933		select_rq |= !dl_task_fits_capacity(p, cpu);
1934
1935	if (select_rq) {
1936		int target = find_later_rq(p);
1937
1938		if (target != -1 &&
1939		    dl_task_is_earliest_deadline(p, cpu_rq(target)))
1940			cpu = target;
1941	}
1942	rcu_read_unlock();
1943
1944out:
1945	return cpu;
1946}
1947
1948static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1949{
1950	struct rq_flags rf;
1951	struct rq *rq;
1952
1953	if (READ_ONCE(p->__state) != TASK_WAKING)
1954		return;
1955
1956	rq = task_rq(p);
1957	/*
1958	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1959	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1960	 * rq->lock is not... So, lock it
1961	 */
1962	rq_lock(rq, &rf);
1963	if (p->dl.dl_non_contending) {
1964		update_rq_clock(rq);
1965		sub_running_bw(&p->dl, &rq->dl);
1966		p->dl.dl_non_contending = 0;
1967		/*
1968		 * If the timer handler is currently running and the
1969		 * timer cannot be canceled, inactive_task_timer()
1970		 * will see that dl_not_contending is not set, and
1971		 * will not touch the rq's active utilization,
1972		 * so we are still safe.
1973		 */
1974		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1975			put_task_struct(p);
1976	}
1977	sub_rq_bw(&p->dl, &rq->dl);
1978	rq_unlock(rq, &rf);
1979}
1980
1981static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1982{
1983	/*
1984	 * Current can't be migrated, useless to reschedule,
1985	 * let's hope p can move out.
1986	 */
1987	if (rq->curr->nr_cpus_allowed == 1 ||
1988	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1989		return;
1990
1991	/*
1992	 * p is migratable, so let's not schedule it and
1993	 * see if it is pushed or pulled somewhere else.
1994	 */
1995	if (p->nr_cpus_allowed != 1 &&
1996	    cpudl_find(&rq->rd->cpudl, p, NULL))
1997		return;
1998
1999	resched_curr(rq);
2000}
2001
2002static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2003{
2004	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2005		/*
2006		 * This is OK, because current is on_cpu, which avoids it being
2007		 * picked for load-balance and preemption/IRQs are still
2008		 * disabled avoiding further scheduler activity on it and we've
2009		 * not yet started the picking loop.
2010		 */
2011		rq_unpin_lock(rq, rf);
2012		pull_dl_task(rq);
2013		rq_repin_lock(rq, rf);
2014	}
2015
2016	return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2017}
2018#endif /* CONFIG_SMP */
2019
2020/*
2021 * Only called when both the current and waking task are -deadline
2022 * tasks.
2023 */
2024static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
2025				  int flags)
2026{
2027	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2028		resched_curr(rq);
2029		return;
2030	}
2031
2032#ifdef CONFIG_SMP
2033	/*
2034	 * In the unlikely case current and p have the same deadline
2035	 * let us try to decide what's the best thing to do...
2036	 */
2037	if ((p->dl.deadline == rq->curr->dl.deadline) &&
2038	    !test_tsk_need_resched(rq->curr))
2039		check_preempt_equal_dl(rq, p);
2040#endif /* CONFIG_SMP */
2041}
2042
2043#ifdef CONFIG_SCHED_HRTICK
2044static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2045{
2046	hrtick_start(rq, dl_se->runtime);
2047}
2048#else /* !CONFIG_SCHED_HRTICK */
2049static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2050{
2051}
2052#endif
2053
2054static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2055{
2056	struct sched_dl_entity *dl_se = &p->dl;
2057	struct dl_rq *dl_rq = &rq->dl;
2058
2059	p->se.exec_start = rq_clock_task(rq);
2060	if (on_dl_rq(&p->dl))
2061		update_stats_wait_end_dl(dl_rq, dl_se);
2062
2063	/* You can't push away the running task */
2064	dequeue_pushable_dl_task(rq, p);
2065
2066	if (!first)
2067		return;
2068
2069	if (rq->curr->sched_class != &dl_sched_class)
2070		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2071
2072	deadline_queue_push_tasks(rq);
2073}
2074
2075static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2076{
2077	struct rb_node *left = rb_first_cached(&dl_rq->root);
2078
2079	if (!left)
2080		return NULL;
2081
2082	return __node_2_dle(left);
2083}
2084
2085static struct task_struct *pick_task_dl(struct rq *rq)
2086{
2087	struct sched_dl_entity *dl_se;
2088	struct dl_rq *dl_rq = &rq->dl;
2089	struct task_struct *p;
2090
2091again:
2092	if (!sched_dl_runnable(rq))
2093		return NULL;
2094
2095	dl_se = pick_next_dl_entity(dl_rq);
2096	WARN_ON_ONCE(!dl_se);
2097
2098	if (dl_server(dl_se)) {
2099		p = dl_se->server_pick(dl_se);
2100		if (!p) {
2101			WARN_ON_ONCE(1);
2102			dl_se->dl_yielded = 1;
2103			update_curr_dl_se(rq, dl_se, 0);
2104			goto again;
2105		}
2106		p->dl_server = dl_se;
2107	} else {
2108		p = dl_task_of(dl_se);
2109	}
2110
2111	return p;
2112}
2113
2114static struct task_struct *pick_next_task_dl(struct rq *rq)
2115{
2116	struct task_struct *p;
2117
2118	p = pick_task_dl(rq);
2119	if (!p)
2120		return p;
2121
2122	if (!p->dl_server)
2123		set_next_task_dl(rq, p, true);
2124
2125	if (hrtick_enabled(rq))
2126		start_hrtick_dl(rq, &p->dl);
2127
2128	return p;
2129}
2130
2131static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2132{
2133	struct sched_dl_entity *dl_se = &p->dl;
2134	struct dl_rq *dl_rq = &rq->dl;
2135
2136	if (on_dl_rq(&p->dl))
2137		update_stats_wait_start_dl(dl_rq, dl_se);
2138
2139	update_curr_dl(rq);
2140
2141	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2142	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2143		enqueue_pushable_dl_task(rq, p);
2144}
2145
2146/*
2147 * scheduler tick hitting a task of our scheduling class.
2148 *
2149 * NOTE: This function can be called remotely by the tick offload that
2150 * goes along full dynticks. Therefore no local assumption can be made
2151 * and everything must be accessed through the @rq and @curr passed in
2152 * parameters.
2153 */
2154static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2155{
2156	update_curr_dl(rq);
2157
2158	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2159	/*
2160	 * Even when we have runtime, update_curr_dl() might have resulted in us
2161	 * not being the leftmost task anymore. In that case NEED_RESCHED will
2162	 * be set and schedule() will start a new hrtick for the next task.
2163	 */
2164	if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2165	    is_leftmost(&p->dl, &rq->dl))
2166		start_hrtick_dl(rq, &p->dl);
2167}
2168
2169static void task_fork_dl(struct task_struct *p)
2170{
2171	/*
2172	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2173	 * sched_fork()
2174	 */
2175}
2176
2177#ifdef CONFIG_SMP
2178
2179/* Only try algorithms three times */
2180#define DL_MAX_TRIES 3
2181
2182static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2183{
2184	if (!task_on_cpu(rq, p) &&
2185	    cpumask_test_cpu(cpu, &p->cpus_mask))
2186		return 1;
2187	return 0;
2188}
2189
2190/*
2191 * Return the earliest pushable rq's task, which is suitable to be executed
2192 * on the CPU, NULL otherwise:
2193 */
2194static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2195{
2196	struct task_struct *p = NULL;
2197	struct rb_node *next_node;
2198
2199	if (!has_pushable_dl_tasks(rq))
2200		return NULL;
2201
2202	next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2203
2204next_node:
2205	if (next_node) {
2206		p = __node_2_pdl(next_node);
2207
2208		if (pick_dl_task(rq, p, cpu))
2209			return p;
2210
2211		next_node = rb_next(next_node);
2212		goto next_node;
2213	}
2214
2215	return NULL;
2216}
2217
2218static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2219
2220static int find_later_rq(struct task_struct *task)
2221{
2222	struct sched_domain *sd;
2223	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2224	int this_cpu = smp_processor_id();
2225	int cpu = task_cpu(task);
2226
2227	/* Make sure the mask is initialized first */
2228	if (unlikely(!later_mask))
2229		return -1;
2230
2231	if (task->nr_cpus_allowed == 1)
2232		return -1;
2233
2234	/*
2235	 * We have to consider system topology and task affinity
2236	 * first, then we can look for a suitable CPU.
2237	 */
2238	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2239		return -1;
2240
2241	/*
2242	 * If we are here, some targets have been found, including
2243	 * the most suitable which is, among the runqueues where the
2244	 * current tasks have later deadlines than the task's one, the
2245	 * rq with the latest possible one.
2246	 *
2247	 * Now we check how well this matches with task's
2248	 * affinity and system topology.
2249	 *
2250	 * The last CPU where the task run is our first
2251	 * guess, since it is most likely cache-hot there.
2252	 */
2253	if (cpumask_test_cpu(cpu, later_mask))
2254		return cpu;
2255	/*
2256	 * Check if this_cpu is to be skipped (i.e., it is
2257	 * not in the mask) or not.
2258	 */
2259	if (!cpumask_test_cpu(this_cpu, later_mask))
2260		this_cpu = -1;
2261
2262	rcu_read_lock();
2263	for_each_domain(cpu, sd) {
2264		if (sd->flags & SD_WAKE_AFFINE) {
2265			int best_cpu;
2266
2267			/*
2268			 * If possible, preempting this_cpu is
2269			 * cheaper than migrating.
2270			 */
2271			if (this_cpu != -1 &&
2272			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2273				rcu_read_unlock();
2274				return this_cpu;
2275			}
2276
2277			best_cpu = cpumask_any_and_distribute(later_mask,
2278							      sched_domain_span(sd));
2279			/*
2280			 * Last chance: if a CPU being in both later_mask
2281			 * and current sd span is valid, that becomes our
2282			 * choice. Of course, the latest possible CPU is
2283			 * already under consideration through later_mask.
2284			 */
2285			if (best_cpu < nr_cpu_ids) {
2286				rcu_read_unlock();
2287				return best_cpu;
2288			}
2289		}
2290	}
2291	rcu_read_unlock();
2292
2293	/*
2294	 * At this point, all our guesses failed, we just return
2295	 * 'something', and let the caller sort the things out.
2296	 */
2297	if (this_cpu != -1)
2298		return this_cpu;
2299
2300	cpu = cpumask_any_distribute(later_mask);
2301	if (cpu < nr_cpu_ids)
2302		return cpu;
2303
2304	return -1;
2305}
2306
2307/* Locks the rq it finds */
2308static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2309{
2310	struct rq *later_rq = NULL;
2311	int tries;
2312	int cpu;
2313
2314	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2315		cpu = find_later_rq(task);
2316
2317		if ((cpu == -1) || (cpu == rq->cpu))
2318			break;
2319
2320		later_rq = cpu_rq(cpu);
2321
2322		if (!dl_task_is_earliest_deadline(task, later_rq)) {
2323			/*
2324			 * Target rq has tasks of equal or earlier deadline,
2325			 * retrying does not release any lock and is unlikely
2326			 * to yield a different result.
2327			 */
2328			later_rq = NULL;
2329			break;
2330		}
2331
2332		/* Retry if something changed. */
2333		if (double_lock_balance(rq, later_rq)) {
2334			if (unlikely(task_rq(task) != rq ||
2335				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2336				     task_on_cpu(rq, task) ||
2337				     !dl_task(task) ||
2338				     is_migration_disabled(task) ||
2339				     !task_on_rq_queued(task))) {
2340				double_unlock_balance(rq, later_rq);
2341				later_rq = NULL;
2342				break;
2343			}
2344		}
2345
2346		/*
2347		 * If the rq we found has no -deadline task, or
2348		 * its earliest one has a later deadline than our
2349		 * task, the rq is a good one.
2350		 */
2351		if (dl_task_is_earliest_deadline(task, later_rq))
2352			break;
2353
2354		/* Otherwise we try again. */
2355		double_unlock_balance(rq, later_rq);
2356		later_rq = NULL;
2357	}
2358
2359	return later_rq;
2360}
2361
2362static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2363{
2364	struct task_struct *p;
2365
2366	if (!has_pushable_dl_tasks(rq))
2367		return NULL;
2368
2369	p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2370
2371	WARN_ON_ONCE(rq->cpu != task_cpu(p));
2372	WARN_ON_ONCE(task_current(rq, p));
2373	WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2374
2375	WARN_ON_ONCE(!task_on_rq_queued(p));
2376	WARN_ON_ONCE(!dl_task(p));
2377
2378	return p;
2379}
2380
2381/*
2382 * See if the non running -deadline tasks on this rq
2383 * can be sent to some other CPU where they can preempt
2384 * and start executing.
2385 */
2386static int push_dl_task(struct rq *rq)
2387{
2388	struct task_struct *next_task;
2389	struct rq *later_rq;
2390	int ret = 0;
2391
2392	next_task = pick_next_pushable_dl_task(rq);
2393	if (!next_task)
2394		return 0;
2395
2396retry:
2397	/*
2398	 * If next_task preempts rq->curr, and rq->curr
2399	 * can move away, it makes sense to just reschedule
2400	 * without going further in pushing next_task.
2401	 */
2402	if (dl_task(rq->curr) &&
2403	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2404	    rq->curr->nr_cpus_allowed > 1) {
2405		resched_curr(rq);
2406		return 0;
2407	}
2408
2409	if (is_migration_disabled(next_task))
2410		return 0;
2411
2412	if (WARN_ON(next_task == rq->curr))
2413		return 0;
2414
2415	/* We might release rq lock */
2416	get_task_struct(next_task);
2417
2418	/* Will lock the rq it'll find */
2419	later_rq = find_lock_later_rq(next_task, rq);
2420	if (!later_rq) {
2421		struct task_struct *task;
2422
2423		/*
2424		 * We must check all this again, since
2425		 * find_lock_later_rq releases rq->lock and it is
2426		 * then possible that next_task has migrated.
2427		 */
2428		task = pick_next_pushable_dl_task(rq);
2429		if (task == next_task) {
2430			/*
2431			 * The task is still there. We don't try
2432			 * again, some other CPU will pull it when ready.
2433			 */
2434			goto out;
2435		}
2436
2437		if (!task)
2438			/* No more tasks */
2439			goto out;
2440
2441		put_task_struct(next_task);
2442		next_task = task;
2443		goto retry;
2444	}
2445
2446	deactivate_task(rq, next_task, 0);
2447	set_task_cpu(next_task, later_rq->cpu);
2448	activate_task(later_rq, next_task, 0);
2449	ret = 1;
2450
2451	resched_curr(later_rq);
2452
2453	double_unlock_balance(rq, later_rq);
2454
2455out:
2456	put_task_struct(next_task);
2457
2458	return ret;
2459}
2460
2461static void push_dl_tasks(struct rq *rq)
2462{
2463	/* push_dl_task() will return true if it moved a -deadline task */
2464	while (push_dl_task(rq))
2465		;
2466}
2467
2468static void pull_dl_task(struct rq *this_rq)
2469{
2470	int this_cpu = this_rq->cpu, cpu;
2471	struct task_struct *p, *push_task;
2472	bool resched = false;
2473	struct rq *src_rq;
2474	u64 dmin = LONG_MAX;
2475
2476	if (likely(!dl_overloaded(this_rq)))
2477		return;
2478
2479	/*
2480	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2481	 * see overloaded we must also see the dlo_mask bit.
2482	 */
2483	smp_rmb();
2484
2485	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2486		if (this_cpu == cpu)
2487			continue;
2488
2489		src_rq = cpu_rq(cpu);
2490
2491		/*
2492		 * It looks racy, abd it is! However, as in sched_rt.c,
2493		 * we are fine with this.
2494		 */
2495		if (this_rq->dl.dl_nr_running &&
2496		    dl_time_before(this_rq->dl.earliest_dl.curr,
2497				   src_rq->dl.earliest_dl.next))
2498			continue;
2499
2500		/* Might drop this_rq->lock */
2501		push_task = NULL;
2502		double_lock_balance(this_rq, src_rq);
2503
2504		/*
2505		 * If there are no more pullable tasks on the
2506		 * rq, we're done with it.
2507		 */
2508		if (src_rq->dl.dl_nr_running <= 1)
2509			goto skip;
2510
2511		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2512
2513		/*
2514		 * We found a task to be pulled if:
2515		 *  - it preempts our current (if there's one),
2516		 *  - it will preempt the last one we pulled (if any).
2517		 */
2518		if (p && dl_time_before(p->dl.deadline, dmin) &&
2519		    dl_task_is_earliest_deadline(p, this_rq)) {
2520			WARN_ON(p == src_rq->curr);
2521			WARN_ON(!task_on_rq_queued(p));
2522
2523			/*
2524			 * Then we pull iff p has actually an earlier
2525			 * deadline than the current task of its runqueue.
2526			 */
2527			if (dl_time_before(p->dl.deadline,
2528					   src_rq->curr->dl.deadline))
2529				goto skip;
2530
2531			if (is_migration_disabled(p)) {
2532				push_task = get_push_task(src_rq);
2533			} else {
2534				deactivate_task(src_rq, p, 0);
2535				set_task_cpu(p, this_cpu);
2536				activate_task(this_rq, p, 0);
2537				dmin = p->dl.deadline;
2538				resched = true;
2539			}
2540
2541			/* Is there any other task even earlier? */
2542		}
2543skip:
2544		double_unlock_balance(this_rq, src_rq);
2545
2546		if (push_task) {
2547			preempt_disable();
2548			raw_spin_rq_unlock(this_rq);
2549			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2550					    push_task, &src_rq->push_work);
2551			preempt_enable();
2552			raw_spin_rq_lock(this_rq);
2553		}
2554	}
2555
2556	if (resched)
2557		resched_curr(this_rq);
2558}
2559
2560/*
2561 * Since the task is not running and a reschedule is not going to happen
2562 * anytime soon on its runqueue, we try pushing it away now.
2563 */
2564static void task_woken_dl(struct rq *rq, struct task_struct *p)
2565{
2566	if (!task_on_cpu(rq, p) &&
2567	    !test_tsk_need_resched(rq->curr) &&
2568	    p->nr_cpus_allowed > 1 &&
2569	    dl_task(rq->curr) &&
2570	    (rq->curr->nr_cpus_allowed < 2 ||
2571	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2572		push_dl_tasks(rq);
2573	}
2574}
2575
2576static void set_cpus_allowed_dl(struct task_struct *p,
2577				struct affinity_context *ctx)
2578{
2579	struct root_domain *src_rd;
2580	struct rq *rq;
2581
2582	WARN_ON_ONCE(!dl_task(p));
2583
2584	rq = task_rq(p);
2585	src_rd = rq->rd;
2586	/*
2587	 * Migrating a SCHED_DEADLINE task between exclusive
2588	 * cpusets (different root_domains) entails a bandwidth
2589	 * update. We already made space for us in the destination
2590	 * domain (see cpuset_can_attach()).
2591	 */
2592	if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
2593		struct dl_bw *src_dl_b;
2594
2595		src_dl_b = dl_bw_of(cpu_of(rq));
2596		/*
2597		 * We now free resources of the root_domain we are migrating
2598		 * off. In the worst case, sched_setattr() may temporary fail
2599		 * until we complete the update.
2600		 */
2601		raw_spin_lock(&src_dl_b->lock);
2602		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2603		raw_spin_unlock(&src_dl_b->lock);
2604	}
2605
2606	set_cpus_allowed_common(p, ctx);
2607}
2608
2609/* Assumes rq->lock is held */
2610static void rq_online_dl(struct rq *rq)
2611{
2612	if (rq->dl.overloaded)
2613		dl_set_overload(rq);
2614
2615	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2616	if (rq->dl.dl_nr_running > 0)
2617		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2618}
2619
2620/* Assumes rq->lock is held */
2621static void rq_offline_dl(struct rq *rq)
2622{
2623	if (rq->dl.overloaded)
2624		dl_clear_overload(rq);
2625
2626	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2627	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2628}
2629
2630void __init init_sched_dl_class(void)
2631{
2632	unsigned int i;
2633
2634	for_each_possible_cpu(i)
2635		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2636					GFP_KERNEL, cpu_to_node(i));
2637}
2638
2639void dl_add_task_root_domain(struct task_struct *p)
2640{
2641	struct rq_flags rf;
2642	struct rq *rq;
2643	struct dl_bw *dl_b;
2644
2645	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2646	if (!dl_task(p)) {
2647		raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2648		return;
2649	}
2650
2651	rq = __task_rq_lock(p, &rf);
2652
2653	dl_b = &rq->rd->dl_bw;
2654	raw_spin_lock(&dl_b->lock);
2655
2656	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2657
2658	raw_spin_unlock(&dl_b->lock);
2659
2660	task_rq_unlock(rq, p, &rf);
2661}
2662
2663void dl_clear_root_domain(struct root_domain *rd)
2664{
2665	unsigned long flags;
2666
2667	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2668	rd->dl_bw.total_bw = 0;
2669	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2670}
2671
2672#endif /* CONFIG_SMP */
2673
2674static void switched_from_dl(struct rq *rq, struct task_struct *p)
2675{
2676	/*
2677	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2678	 * time is in the future). If the task switches back to dl before
2679	 * the "inactive timer" fires, it can continue to consume its current
2680	 * runtime using its current deadline. If it stays outside of
2681	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2682	 * will reset the task parameters.
2683	 */
2684	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2685		task_non_contending(&p->dl);
2686
2687	/*
2688	 * In case a task is setscheduled out from SCHED_DEADLINE we need to
2689	 * keep track of that on its cpuset (for correct bandwidth tracking).
2690	 */
2691	dec_dl_tasks_cs(p);
2692
2693	if (!task_on_rq_queued(p)) {
2694		/*
2695		 * Inactive timer is armed. However, p is leaving DEADLINE and
2696		 * might migrate away from this rq while continuing to run on
2697		 * some other class. We need to remove its contribution from
2698		 * this rq running_bw now, or sub_rq_bw (below) will complain.
2699		 */
2700		if (p->dl.dl_non_contending)
2701			sub_running_bw(&p->dl, &rq->dl);
2702		sub_rq_bw(&p->dl, &rq->dl);
2703	}
2704
2705	/*
2706	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2707	 * at the 0-lag time, because the task could have been migrated
2708	 * while SCHED_OTHER in the meanwhile.
2709	 */
2710	if (p->dl.dl_non_contending)
2711		p->dl.dl_non_contending = 0;
2712
2713	/*
2714	 * Since this might be the only -deadline task on the rq,
2715	 * this is the right place to try to pull some other one
2716	 * from an overloaded CPU, if any.
2717	 */
2718	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2719		return;
2720
2721	deadline_queue_pull_task(rq);
2722}
2723
2724/*
2725 * When switching to -deadline, we may overload the rq, then
2726 * we try to push someone off, if possible.
2727 */
2728static void switched_to_dl(struct rq *rq, struct task_struct *p)
2729{
2730	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2731		put_task_struct(p);
2732
2733	/*
2734	 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2735	 * track of that on its cpuset (for correct bandwidth tracking).
2736	 */
2737	inc_dl_tasks_cs(p);
2738
2739	/* If p is not queued we will update its parameters at next wakeup. */
2740	if (!task_on_rq_queued(p)) {
2741		add_rq_bw(&p->dl, &rq->dl);
2742
2743		return;
2744	}
2745
2746	if (rq->curr != p) {
2747#ifdef CONFIG_SMP
2748		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2749			deadline_queue_push_tasks(rq);
2750#endif
2751		if (dl_task(rq->curr))
2752			wakeup_preempt_dl(rq, p, 0);
2753		else
2754			resched_curr(rq);
2755	} else {
2756		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2757	}
2758}
2759
2760/*
2761 * If the scheduling parameters of a -deadline task changed,
2762 * a push or pull operation might be needed.
2763 */
2764static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2765			    int oldprio)
2766{
2767	if (!task_on_rq_queued(p))
2768		return;
2769
2770#ifdef CONFIG_SMP
2771	/*
2772	 * This might be too much, but unfortunately
2773	 * we don't have the old deadline value, and
2774	 * we can't argue if the task is increasing
2775	 * or lowering its prio, so...
2776	 */
2777	if (!rq->dl.overloaded)
2778		deadline_queue_pull_task(rq);
2779
2780	if (task_current(rq, p)) {
2781		/*
2782		 * If we now have a earlier deadline task than p,
2783		 * then reschedule, provided p is still on this
2784		 * runqueue.
2785		 */
2786		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2787			resched_curr(rq);
2788	} else {
2789		/*
2790		 * Current may not be deadline in case p was throttled but we
2791		 * have just replenished it (e.g. rt_mutex_setprio()).
2792		 *
2793		 * Otherwise, if p was given an earlier deadline, reschedule.
2794		 */
2795		if (!dl_task(rq->curr) ||
2796		    dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
2797			resched_curr(rq);
2798	}
2799#else
2800	/*
2801	 * We don't know if p has a earlier or later deadline, so let's blindly
2802	 * set a (maybe not needed) rescheduling point.
2803	 */
2804	resched_curr(rq);
2805#endif
2806}
2807
2808#ifdef CONFIG_SCHED_CORE
2809static int task_is_throttled_dl(struct task_struct *p, int cpu)
2810{
2811	return p->dl.dl_throttled;
2812}
2813#endif
2814
2815DEFINE_SCHED_CLASS(dl) = {
2816
2817	.enqueue_task		= enqueue_task_dl,
2818	.dequeue_task		= dequeue_task_dl,
2819	.yield_task		= yield_task_dl,
2820
2821	.wakeup_preempt		= wakeup_preempt_dl,
2822
2823	.pick_next_task		= pick_next_task_dl,
2824	.put_prev_task		= put_prev_task_dl,
2825	.set_next_task		= set_next_task_dl,
2826
2827#ifdef CONFIG_SMP
2828	.balance		= balance_dl,
2829	.pick_task		= pick_task_dl,
2830	.select_task_rq		= select_task_rq_dl,
2831	.migrate_task_rq	= migrate_task_rq_dl,
2832	.set_cpus_allowed       = set_cpus_allowed_dl,
2833	.rq_online              = rq_online_dl,
2834	.rq_offline             = rq_offline_dl,
2835	.task_woken		= task_woken_dl,
2836	.find_lock_rq		= find_lock_later_rq,
2837#endif
2838
2839	.task_tick		= task_tick_dl,
2840	.task_fork              = task_fork_dl,
2841
2842	.prio_changed           = prio_changed_dl,
2843	.switched_from		= switched_from_dl,
2844	.switched_to		= switched_to_dl,
2845
2846	.update_curr		= update_curr_dl,
2847#ifdef CONFIG_SCHED_CORE
2848	.task_is_throttled	= task_is_throttled_dl,
2849#endif
2850};
2851
2852/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2853static u64 dl_generation;
2854
2855int sched_dl_global_validate(void)
2856{
2857	u64 runtime = global_rt_runtime();
2858	u64 period = global_rt_period();
2859	u64 new_bw = to_ratio(period, runtime);
2860	u64 gen = ++dl_generation;
2861	struct dl_bw *dl_b;
2862	int cpu, cpus, ret = 0;
2863	unsigned long flags;
2864
2865	/*
2866	 * Here we want to check the bandwidth not being set to some
2867	 * value smaller than the currently allocated bandwidth in
2868	 * any of the root_domains.
2869	 */
2870	for_each_possible_cpu(cpu) {
2871		rcu_read_lock_sched();
2872
2873		if (dl_bw_visited(cpu, gen))
2874			goto next;
2875
2876		dl_b = dl_bw_of(cpu);
2877		cpus = dl_bw_cpus(cpu);
2878
2879		raw_spin_lock_irqsave(&dl_b->lock, flags);
2880		if (new_bw * cpus < dl_b->total_bw)
2881			ret = -EBUSY;
2882		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2883
2884next:
2885		rcu_read_unlock_sched();
2886
2887		if (ret)
2888			break;
2889	}
2890
2891	return ret;
2892}
2893
2894static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2895{
2896	if (global_rt_runtime() == RUNTIME_INF) {
2897		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2898		dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
2899	} else {
2900		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2901			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2902		dl_rq->max_bw = dl_rq->extra_bw =
2903			to_ratio(global_rt_period(), global_rt_runtime());
2904	}
2905}
2906
2907void sched_dl_do_global(void)
2908{
2909	u64 new_bw = -1;
2910	u64 gen = ++dl_generation;
2911	struct dl_bw *dl_b;
2912	int cpu;
2913	unsigned long flags;
2914
2915	if (global_rt_runtime() != RUNTIME_INF)
2916		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2917
2918	for_each_possible_cpu(cpu) {
2919		rcu_read_lock_sched();
2920
2921		if (dl_bw_visited(cpu, gen)) {
2922			rcu_read_unlock_sched();
2923			continue;
2924		}
2925
2926		dl_b = dl_bw_of(cpu);
2927
2928		raw_spin_lock_irqsave(&dl_b->lock, flags);
2929		dl_b->bw = new_bw;
2930		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2931
2932		rcu_read_unlock_sched();
2933		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2934	}
2935}
2936
2937/*
2938 * We must be sure that accepting a new task (or allowing changing the
2939 * parameters of an existing one) is consistent with the bandwidth
2940 * constraints. If yes, this function also accordingly updates the currently
2941 * allocated bandwidth to reflect the new situation.
2942 *
2943 * This function is called while holding p's rq->lock.
2944 */
2945int sched_dl_overflow(struct task_struct *p, int policy,
2946		      const struct sched_attr *attr)
2947{
2948	u64 period = attr->sched_period ?: attr->sched_deadline;
2949	u64 runtime = attr->sched_runtime;
2950	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2951	int cpus, err = -1, cpu = task_cpu(p);
2952	struct dl_bw *dl_b = dl_bw_of(cpu);
2953	unsigned long cap;
2954
2955	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2956		return 0;
2957
2958	/* !deadline task may carry old deadline bandwidth */
2959	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2960		return 0;
2961
2962	/*
2963	 * Either if a task, enters, leave, or stays -deadline but changes
2964	 * its parameters, we may need to update accordingly the total
2965	 * allocated bandwidth of the container.
2966	 */
2967	raw_spin_lock(&dl_b->lock);
2968	cpus = dl_bw_cpus(cpu);
2969	cap = dl_bw_capacity(cpu);
2970
2971	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2972	    !__dl_overflow(dl_b, cap, 0, new_bw)) {
2973		if (hrtimer_active(&p->dl.inactive_timer))
2974			__dl_sub(dl_b, p->dl.dl_bw, cpus);
2975		__dl_add(dl_b, new_bw, cpus);
2976		err = 0;
2977	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2978		   !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2979		/*
2980		 * XXX this is slightly incorrect: when the task
2981		 * utilization decreases, we should delay the total
2982		 * utilization change until the task's 0-lag point.
2983		 * But this would require to set the task's "inactive
2984		 * timer" when the task is not inactive.
2985		 */
2986		__dl_sub(dl_b, p->dl.dl_bw, cpus);
2987		__dl_add(dl_b, new_bw, cpus);
2988		dl_change_utilization(p, new_bw);
2989		err = 0;
2990	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2991		/*
2992		 * Do not decrease the total deadline utilization here,
2993		 * switched_from_dl() will take care to do it at the correct
2994		 * (0-lag) time.
2995		 */
2996		err = 0;
2997	}
2998	raw_spin_unlock(&dl_b->lock);
2999
3000	return err;
3001}
3002
3003/*
3004 * This function initializes the sched_dl_entity of a newly becoming
3005 * SCHED_DEADLINE task.
3006 *
3007 * Only the static values are considered here, the actual runtime and the
3008 * absolute deadline will be properly calculated when the task is enqueued
3009 * for the first time with its new policy.
3010 */
3011void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3012{
3013	struct sched_dl_entity *dl_se = &p->dl;
3014
3015	dl_se->dl_runtime = attr->sched_runtime;
3016	dl_se->dl_deadline = attr->sched_deadline;
3017	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3018	dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
3019	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3020	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3021}
3022
3023void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3024{
3025	struct sched_dl_entity *dl_se = &p->dl;
3026
3027	attr->sched_priority = p->rt_priority;
3028	attr->sched_runtime = dl_se->dl_runtime;
3029	attr->sched_deadline = dl_se->dl_deadline;
3030	attr->sched_period = dl_se->dl_period;
3031	attr->sched_flags &= ~SCHED_DL_FLAGS;
3032	attr->sched_flags |= dl_se->flags;
3033}
3034
3035/*
3036 * This function validates the new parameters of a -deadline task.
3037 * We ask for the deadline not being zero, and greater or equal
3038 * than the runtime, as well as the period of being zero or
3039 * greater than deadline. Furthermore, we have to be sure that
3040 * user parameters are above the internal resolution of 1us (we
3041 * check sched_runtime only since it is always the smaller one) and
3042 * below 2^63 ns (we have to check both sched_deadline and
3043 * sched_period, as the latter can be zero).
3044 */
3045bool __checkparam_dl(const struct sched_attr *attr)
3046{
3047	u64 period, max, min;
3048
3049	/* special dl tasks don't actually use any parameter */
3050	if (attr->sched_flags & SCHED_FLAG_SUGOV)
3051		return true;
3052
3053	/* deadline != 0 */
3054	if (attr->sched_deadline == 0)
3055		return false;
3056
3057	/*
3058	 * Since we truncate DL_SCALE bits, make sure we're at least
3059	 * that big.
3060	 */
3061	if (attr->sched_runtime < (1ULL << DL_SCALE))
3062		return false;
3063
3064	/*
3065	 * Since we use the MSB for wrap-around and sign issues, make
3066	 * sure it's not set (mind that period can be equal to zero).
3067	 */
3068	if (attr->sched_deadline & (1ULL << 63) ||
3069	    attr->sched_period & (1ULL << 63))
3070		return false;
3071
3072	period = attr->sched_period;
3073	if (!period)
3074		period = attr->sched_deadline;
3075
3076	/* runtime <= deadline <= period (if period != 0) */
3077	if (period < attr->sched_deadline ||
3078	    attr->sched_deadline < attr->sched_runtime)
3079		return false;
3080
3081	max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3082	min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3083
3084	if (period < min || period > max)
3085		return false;
3086
3087	return true;
3088}
3089
3090/*
3091 * This function clears the sched_dl_entity static params.
3092 */
3093static void __dl_clear_params(struct sched_dl_entity *dl_se)
3094{
3095	dl_se->dl_runtime		= 0;
3096	dl_se->dl_deadline		= 0;
3097	dl_se->dl_period		= 0;
3098	dl_se->flags			= 0;
3099	dl_se->dl_bw			= 0;
3100	dl_se->dl_density		= 0;
3101
3102	dl_se->dl_throttled		= 0;
3103	dl_se->dl_yielded		= 0;
3104	dl_se->dl_non_contending	= 0;
3105	dl_se->dl_overrun		= 0;
3106	dl_se->dl_server		= 0;
3107
3108#ifdef CONFIG_RT_MUTEXES
3109	dl_se->pi_se			= dl_se;
3110#endif
3111}
3112
3113void init_dl_entity(struct sched_dl_entity *dl_se)
3114{
3115	RB_CLEAR_NODE(&dl_se->rb_node);
3116	init_dl_task_timer(dl_se);
3117	init_dl_inactive_task_timer(dl_se);
3118	__dl_clear_params(dl_se);
3119}
3120
3121bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3122{
3123	struct sched_dl_entity *dl_se = &p->dl;
3124
3125	if (dl_se->dl_runtime != attr->sched_runtime ||
3126	    dl_se->dl_deadline != attr->sched_deadline ||
3127	    dl_se->dl_period != attr->sched_period ||
3128	    dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3129		return true;
3130
3131	return false;
3132}
3133
3134#ifdef CONFIG_SMP
3135int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3136				 const struct cpumask *trial)
3137{
3138	unsigned long flags, cap;
3139	struct dl_bw *cur_dl_b;
3140	int ret = 1;
3141
3142	rcu_read_lock_sched();
3143	cur_dl_b = dl_bw_of(cpumask_any(cur));
3144	cap = __dl_bw_capacity(trial);
3145	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3146	if (__dl_overflow(cur_dl_b, cap, 0, 0))
3147		ret = 0;
3148	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3149	rcu_read_unlock_sched();
3150
3151	return ret;
3152}
3153
3154enum dl_bw_request {
3155	dl_bw_req_check_overflow = 0,
3156	dl_bw_req_alloc,
3157	dl_bw_req_free
3158};
3159
3160static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3161{
3162	unsigned long flags;
3163	struct dl_bw *dl_b;
3164	bool overflow = 0;
3165
3166	rcu_read_lock_sched();
3167	dl_b = dl_bw_of(cpu);
3168	raw_spin_lock_irqsave(&dl_b->lock, flags);
3169
3170	if (req == dl_bw_req_free) {
3171		__dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3172	} else {
3173		unsigned long cap = dl_bw_capacity(cpu);
3174
3175		overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3176
3177		if (req == dl_bw_req_alloc && !overflow) {
3178			/*
3179			 * We reserve space in the destination
3180			 * root_domain, as we can't fail after this point.
3181			 * We will free resources in the source root_domain
3182			 * later on (see set_cpus_allowed_dl()).
3183			 */
3184			__dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3185		}
3186	}
3187
3188	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3189	rcu_read_unlock_sched();
3190
3191	return overflow ? -EBUSY : 0;
3192}
3193
3194int dl_bw_check_overflow(int cpu)
3195{
3196	return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3197}
3198
3199int dl_bw_alloc(int cpu, u64 dl_bw)
3200{
3201	return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3202}
3203
3204void dl_bw_free(int cpu, u64 dl_bw)
3205{
3206	dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3207}
3208#endif
3209
3210#ifdef CONFIG_SCHED_DEBUG
3211void print_dl_stats(struct seq_file *m, int cpu)
3212{
3213	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3214}
3215#endif /* CONFIG_SCHED_DEBUG */
3216