1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Scheduler internal types and methods:
4 */
5#ifndef _KERNEL_SCHED_SCHED_H
6#define _KERNEL_SCHED_SCHED_H
7
8#include <linux/sched/affinity.h>
9#include <linux/sched/autogroup.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/deadline.h>
12#include <linux/sched.h>
13#include <linux/sched/loadavg.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/rseq_api.h>
16#include <linux/sched/signal.h>
17#include <linux/sched/smt.h>
18#include <linux/sched/stat.h>
19#include <linux/sched/sysctl.h>
20#include <linux/sched/task_flags.h>
21#include <linux/sched/task.h>
22#include <linux/sched/topology.h>
23
24#include <linux/atomic.h>
25#include <linux/bitmap.h>
26#include <linux/bug.h>
27#include <linux/capability.h>
28#include <linux/cgroup_api.h>
29#include <linux/cgroup.h>
30#include <linux/context_tracking.h>
31#include <linux/cpufreq.h>
32#include <linux/cpumask_api.h>
33#include <linux/ctype.h>
34#include <linux/file.h>
35#include <linux/fs_api.h>
36#include <linux/hrtimer_api.h>
37#include <linux/interrupt.h>
38#include <linux/irq_work.h>
39#include <linux/jiffies.h>
40#include <linux/kref_api.h>
41#include <linux/kthread.h>
42#include <linux/ktime_api.h>
43#include <linux/lockdep_api.h>
44#include <linux/lockdep.h>
45#include <linux/minmax.h>
46#include <linux/mm.h>
47#include <linux/module.h>
48#include <linux/mutex_api.h>
49#include <linux/plist.h>
50#include <linux/poll.h>
51#include <linux/proc_fs.h>
52#include <linux/profile.h>
53#include <linux/psi.h>
54#include <linux/rcupdate.h>
55#include <linux/seq_file.h>
56#include <linux/seqlock.h>
57#include <linux/softirq.h>
58#include <linux/spinlock_api.h>
59#include <linux/static_key.h>
60#include <linux/stop_machine.h>
61#include <linux/syscalls_api.h>
62#include <linux/syscalls.h>
63#include <linux/tick.h>
64#include <linux/topology.h>
65#include <linux/types.h>
66#include <linux/u64_stats_sync_api.h>
67#include <linux/uaccess.h>
68#include <linux/wait_api.h>
69#include <linux/wait_bit.h>
70#include <linux/workqueue_api.h>
71
72#include <trace/events/power.h>
73#include <trace/events/sched.h>
74
75#include "../workqueue_internal.h"
76
77#ifdef CONFIG_PARAVIRT
78# include <asm/paravirt.h>
79# include <asm/paravirt_api_clock.h>
80#endif
81
82#include <asm/barrier.h>
83
84#include "cpupri.h"
85#include "cpudeadline.h"
86
87#ifdef CONFIG_SCHED_DEBUG
88# define SCHED_WARN_ON(x)      WARN_ONCE(x, #x)
89#else
90# define SCHED_WARN_ON(x)      ({ (void)(x), 0; })
91#endif
92
93struct rq;
94struct cpuidle_state;
95
96/* task_struct::on_rq states: */
97#define TASK_ON_RQ_QUEUED	1
98#define TASK_ON_RQ_MIGRATING	2
99
100extern __read_mostly int scheduler_running;
101
102extern unsigned long calc_load_update;
103extern atomic_long_t calc_load_tasks;
104
105extern void calc_global_load_tick(struct rq *this_rq);
106extern long calc_load_fold_active(struct rq *this_rq, long adjust);
107
108extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
109
110extern int sysctl_sched_rt_period;
111extern int sysctl_sched_rt_runtime;
112extern int sched_rr_timeslice;
113
114/*
115 * Helpers for converting nanosecond timing to jiffy resolution
116 */
117#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
118
119/*
120 * Increase resolution of nice-level calculations for 64-bit architectures.
121 * The extra resolution improves shares distribution and load balancing of
122 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
123 * hierarchies, especially on larger systems. This is not a user-visible change
124 * and does not change the user-interface for setting shares/weights.
125 *
126 * We increase resolution only if we have enough bits to allow this increased
127 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
128 * are pretty high and the returns do not justify the increased costs.
129 *
130 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
131 * increase coverage and consistency always enable it on 64-bit platforms.
132 */
133#ifdef CONFIG_64BIT
134# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
135# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
136# define scale_load_down(w) \
137({ \
138	unsigned long __w = (w); \
139	if (__w) \
140		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
141	__w; \
142})
143#else
144# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
145# define scale_load(w)		(w)
146# define scale_load_down(w)	(w)
147#endif
148
149/*
150 * Task weight (visible to users) and its load (invisible to users) have
151 * independent resolution, but they should be well calibrated. We use
152 * scale_load() and scale_load_down(w) to convert between them. The
153 * following must be true:
154 *
155 *  scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
156 *
157 */
158#define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
159
160/*
161 * Single value that decides SCHED_DEADLINE internal math precision.
162 * 10 -> just above 1us
163 * 9  -> just above 0.5us
164 */
165#define DL_SCALE		10
166
167/*
168 * Single value that denotes runtime == period, ie unlimited time.
169 */
170#define RUNTIME_INF		((u64)~0ULL)
171
172static inline int idle_policy(int policy)
173{
174	return policy == SCHED_IDLE;
175}
176static inline int fair_policy(int policy)
177{
178	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
179}
180
181static inline int rt_policy(int policy)
182{
183	return policy == SCHED_FIFO || policy == SCHED_RR;
184}
185
186static inline int dl_policy(int policy)
187{
188	return policy == SCHED_DEADLINE;
189}
190static inline bool valid_policy(int policy)
191{
192	return idle_policy(policy) || fair_policy(policy) ||
193		rt_policy(policy) || dl_policy(policy);
194}
195
196static inline int task_has_idle_policy(struct task_struct *p)
197{
198	return idle_policy(p->policy);
199}
200
201static inline int task_has_rt_policy(struct task_struct *p)
202{
203	return rt_policy(p->policy);
204}
205
206static inline int task_has_dl_policy(struct task_struct *p)
207{
208	return dl_policy(p->policy);
209}
210
211#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
212
213static inline void update_avg(u64 *avg, u64 sample)
214{
215	s64 diff = sample - *avg;
216	*avg += diff / 8;
217}
218
219/*
220 * Shifting a value by an exponent greater *or equal* to the size of said value
221 * is UB; cap at size-1.
222 */
223#define shr_bound(val, shift)							\
224	(val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
225
226/*
227 * !! For sched_setattr_nocheck() (kernel) only !!
228 *
229 * This is actually gross. :(
230 *
231 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
232 * tasks, but still be able to sleep. We need this on platforms that cannot
233 * atomically change clock frequency. Remove once fast switching will be
234 * available on such platforms.
235 *
236 * SUGOV stands for SchedUtil GOVernor.
237 */
238#define SCHED_FLAG_SUGOV	0x10000000
239
240#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
241
242static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
243{
244#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
245	return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
246#else
247	return false;
248#endif
249}
250
251/*
252 * Tells if entity @a should preempt entity @b.
253 */
254static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
255				     const struct sched_dl_entity *b)
256{
257	return dl_entity_is_special(a) ||
258	       dl_time_before(a->deadline, b->deadline);
259}
260
261/*
262 * This is the priority-queue data structure of the RT scheduling class:
263 */
264struct rt_prio_array {
265	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
266	struct list_head queue[MAX_RT_PRIO];
267};
268
269struct rt_bandwidth {
270	/* nests inside the rq lock: */
271	raw_spinlock_t		rt_runtime_lock;
272	ktime_t			rt_period;
273	u64			rt_runtime;
274	struct hrtimer		rt_period_timer;
275	unsigned int		rt_period_active;
276};
277
278static inline int dl_bandwidth_enabled(void)
279{
280	return sysctl_sched_rt_runtime >= 0;
281}
282
283/*
284 * To keep the bandwidth of -deadline tasks under control
285 * we need some place where:
286 *  - store the maximum -deadline bandwidth of each cpu;
287 *  - cache the fraction of bandwidth that is currently allocated in
288 *    each root domain;
289 *
290 * This is all done in the data structure below. It is similar to the
291 * one used for RT-throttling (rt_bandwidth), with the main difference
292 * that, since here we are only interested in admission control, we
293 * do not decrease any runtime while the group "executes", neither we
294 * need a timer to replenish it.
295 *
296 * With respect to SMP, bandwidth is given on a per root domain basis,
297 * meaning that:
298 *  - bw (< 100%) is the deadline bandwidth of each CPU;
299 *  - total_bw is the currently allocated bandwidth in each root domain;
300 */
301struct dl_bw {
302	raw_spinlock_t		lock;
303	u64			bw;
304	u64			total_bw;
305};
306
307extern void init_dl_bw(struct dl_bw *dl_b);
308extern int  sched_dl_global_validate(void);
309extern void sched_dl_do_global(void);
310extern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
311extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
312extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
313extern bool __checkparam_dl(const struct sched_attr *attr);
314extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
315extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
316extern int  dl_bw_check_overflow(int cpu);
317
318/*
319 * SCHED_DEADLINE supports servers (nested scheduling) with the following
320 * interface:
321 *
322 *   dl_se::rq -- runqueue we belong to.
323 *
324 *   dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the
325 *                                server when it runs out of tasks to run.
326 *
327 *   dl_se::server_pick() -- nested pick_next_task(); we yield the period if this
328 *                           returns NULL.
329 *
330 *   dl_server_update() -- called from update_curr_common(), propagates runtime
331 *                         to the server.
332 *
333 *   dl_server_start()
334 *   dl_server_stop()  -- start/stop the server when it has (no) tasks.
335 *
336 *   dl_server_init() -- initializes the server.
337 */
338extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec);
339extern void dl_server_start(struct sched_dl_entity *dl_se);
340extern void dl_server_stop(struct sched_dl_entity *dl_se);
341extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
342		    dl_server_has_tasks_f has_tasks,
343		    dl_server_pick_f pick);
344
345#ifdef CONFIG_CGROUP_SCHED
346
347struct cfs_rq;
348struct rt_rq;
349
350extern struct list_head task_groups;
351
352struct cfs_bandwidth {
353#ifdef CONFIG_CFS_BANDWIDTH
354	raw_spinlock_t		lock;
355	ktime_t			period;
356	u64			quota;
357	u64			runtime;
358	u64			burst;
359	u64			runtime_snap;
360	s64			hierarchical_quota;
361
362	u8			idle;
363	u8			period_active;
364	u8			slack_started;
365	struct hrtimer		period_timer;
366	struct hrtimer		slack_timer;
367	struct list_head	throttled_cfs_rq;
368
369	/* Statistics: */
370	int			nr_periods;
371	int			nr_throttled;
372	int			nr_burst;
373	u64			throttled_time;
374	u64			burst_time;
375#endif
376};
377
378/* Task group related information */
379struct task_group {
380	struct cgroup_subsys_state css;
381
382#ifdef CONFIG_FAIR_GROUP_SCHED
383	/* schedulable entities of this group on each CPU */
384	struct sched_entity	**se;
385	/* runqueue "owned" by this group on each CPU */
386	struct cfs_rq		**cfs_rq;
387	unsigned long		shares;
388
389	/* A positive value indicates that this is a SCHED_IDLE group. */
390	int			idle;
391
392#ifdef	CONFIG_SMP
393	/*
394	 * load_avg can be heavily contended at clock tick time, so put
395	 * it in its own cacheline separated from the fields above which
396	 * will also be accessed at each tick.
397	 */
398	atomic_long_t		load_avg ____cacheline_aligned;
399#endif
400#endif
401
402#ifdef CONFIG_RT_GROUP_SCHED
403	struct sched_rt_entity	**rt_se;
404	struct rt_rq		**rt_rq;
405
406	struct rt_bandwidth	rt_bandwidth;
407#endif
408
409	struct rcu_head		rcu;
410	struct list_head	list;
411
412	struct task_group	*parent;
413	struct list_head	siblings;
414	struct list_head	children;
415
416#ifdef CONFIG_SCHED_AUTOGROUP
417	struct autogroup	*autogroup;
418#endif
419
420	struct cfs_bandwidth	cfs_bandwidth;
421
422#ifdef CONFIG_UCLAMP_TASK_GROUP
423	/* The two decimal precision [%] value requested from user-space */
424	unsigned int		uclamp_pct[UCLAMP_CNT];
425	/* Clamp values requested for a task group */
426	struct uclamp_se	uclamp_req[UCLAMP_CNT];
427	/* Effective clamp values used for a task group */
428	struct uclamp_se	uclamp[UCLAMP_CNT];
429#endif
430
431};
432
433#ifdef CONFIG_FAIR_GROUP_SCHED
434#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
435
436/*
437 * A weight of 0 or 1 can cause arithmetics problems.
438 * A weight of a cfs_rq is the sum of weights of which entities
439 * are queued on this cfs_rq, so a weight of a entity should not be
440 * too large, so as the shares value of a task group.
441 * (The default weight is 1024 - so there's no practical
442 *  limitation from this.)
443 */
444#define MIN_SHARES		(1UL <<  1)
445#define MAX_SHARES		(1UL << 18)
446#endif
447
448typedef int (*tg_visitor)(struct task_group *, void *);
449
450extern int walk_tg_tree_from(struct task_group *from,
451			     tg_visitor down, tg_visitor up, void *data);
452
453/*
454 * Iterate the full tree, calling @down when first entering a node and @up when
455 * leaving it for the final time.
456 *
457 * Caller must hold rcu_lock or sufficient equivalent.
458 */
459static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
460{
461	return walk_tg_tree_from(&root_task_group, down, up, data);
462}
463
464extern int tg_nop(struct task_group *tg, void *data);
465
466#ifdef CONFIG_FAIR_GROUP_SCHED
467extern void free_fair_sched_group(struct task_group *tg);
468extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
469extern void online_fair_sched_group(struct task_group *tg);
470extern void unregister_fair_sched_group(struct task_group *tg);
471#else
472static inline void free_fair_sched_group(struct task_group *tg) { }
473static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
474{
475       return 1;
476}
477static inline void online_fair_sched_group(struct task_group *tg) { }
478static inline void unregister_fair_sched_group(struct task_group *tg) { }
479#endif
480
481extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
482			struct sched_entity *se, int cpu,
483			struct sched_entity *parent);
484extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent);
485
486extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
487extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
488extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
489extern bool cfs_task_bw_constrained(struct task_struct *p);
490
491extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
492		struct sched_rt_entity *rt_se, int cpu,
493		struct sched_rt_entity *parent);
494extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
495extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
496extern long sched_group_rt_runtime(struct task_group *tg);
497extern long sched_group_rt_period(struct task_group *tg);
498extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
499
500extern struct task_group *sched_create_group(struct task_group *parent);
501extern void sched_online_group(struct task_group *tg,
502			       struct task_group *parent);
503extern void sched_destroy_group(struct task_group *tg);
504extern void sched_release_group(struct task_group *tg);
505
506extern void sched_move_task(struct task_struct *tsk);
507
508#ifdef CONFIG_FAIR_GROUP_SCHED
509extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
510
511extern int sched_group_set_idle(struct task_group *tg, long idle);
512
513#ifdef CONFIG_SMP
514extern void set_task_rq_fair(struct sched_entity *se,
515			     struct cfs_rq *prev, struct cfs_rq *next);
516#else /* !CONFIG_SMP */
517static inline void set_task_rq_fair(struct sched_entity *se,
518			     struct cfs_rq *prev, struct cfs_rq *next) { }
519#endif /* CONFIG_SMP */
520#endif /* CONFIG_FAIR_GROUP_SCHED */
521
522#else /* CONFIG_CGROUP_SCHED */
523
524struct cfs_bandwidth { };
525static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; }
526
527#endif	/* CONFIG_CGROUP_SCHED */
528
529extern void unregister_rt_sched_group(struct task_group *tg);
530extern void free_rt_sched_group(struct task_group *tg);
531extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
532
533/*
534 * u64_u32_load/u64_u32_store
535 *
536 * Use a copy of a u64 value to protect against data race. This is only
537 * applicable for 32-bits architectures.
538 */
539#ifdef CONFIG_64BIT
540# define u64_u32_load_copy(var, copy)       var
541# define u64_u32_store_copy(var, copy, val) (var = val)
542#else
543# define u64_u32_load_copy(var, copy)					\
544({									\
545	u64 __val, __val_copy;						\
546	do {								\
547		__val_copy = copy;					\
548		/*							\
549		 * paired with u64_u32_store_copy(), ordering access	\
550		 * to var and copy.					\
551		 */							\
552		smp_rmb();						\
553		__val = var;						\
554	} while (__val != __val_copy);					\
555	__val;								\
556})
557# define u64_u32_store_copy(var, copy, val)				\
558do {									\
559	typeof(val) __val = (val);					\
560	var = __val;							\
561	/*								\
562	 * paired with u64_u32_load_copy(), ordering access to var and	\
563	 * copy.							\
564	 */								\
565	smp_wmb();							\
566	copy = __val;							\
567} while (0)
568#endif
569# define u64_u32_load(var)      u64_u32_load_copy(var, var##_copy)
570# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
571
572/* CFS-related fields in a runqueue */
573struct cfs_rq {
574	struct load_weight	load;
575	unsigned int		nr_running;
576	unsigned int		h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
577	unsigned int		idle_nr_running;   /* SCHED_IDLE */
578	unsigned int		idle_h_nr_running; /* SCHED_IDLE */
579
580	s64			avg_vruntime;
581	u64			avg_load;
582
583	u64			exec_clock;
584	u64			min_vruntime;
585#ifdef CONFIG_SCHED_CORE
586	unsigned int		forceidle_seq;
587	u64			min_vruntime_fi;
588#endif
589
590#ifndef CONFIG_64BIT
591	u64			min_vruntime_copy;
592#endif
593
594	struct rb_root_cached	tasks_timeline;
595
596	/*
597	 * 'curr' points to currently running entity on this cfs_rq.
598	 * It is set to NULL otherwise (i.e when none are currently running).
599	 */
600	struct sched_entity	*curr;
601	struct sched_entity	*next;
602
603#ifdef	CONFIG_SCHED_DEBUG
604	unsigned int		nr_spread_over;
605#endif
606
607#ifdef CONFIG_SMP
608	/*
609	 * CFS load tracking
610	 */
611	struct sched_avg	avg;
612#ifndef CONFIG_64BIT
613	u64			last_update_time_copy;
614#endif
615	struct {
616		raw_spinlock_t	lock ____cacheline_aligned;
617		int		nr;
618		unsigned long	load_avg;
619		unsigned long	util_avg;
620		unsigned long	runnable_avg;
621	} removed;
622
623#ifdef CONFIG_FAIR_GROUP_SCHED
624	u64			last_update_tg_load_avg;
625	unsigned long		tg_load_avg_contrib;
626	long			propagate;
627	long			prop_runnable_sum;
628
629	/*
630	 *   h_load = weight * f(tg)
631	 *
632	 * Where f(tg) is the recursive weight fraction assigned to
633	 * this group.
634	 */
635	unsigned long		h_load;
636	u64			last_h_load_update;
637	struct sched_entity	*h_load_next;
638#endif /* CONFIG_FAIR_GROUP_SCHED */
639#endif /* CONFIG_SMP */
640
641#ifdef CONFIG_FAIR_GROUP_SCHED
642	struct rq		*rq;	/* CPU runqueue to which this cfs_rq is attached */
643
644	/*
645	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
646	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
647	 * (like users, containers etc.)
648	 *
649	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
650	 * This list is used during load balance.
651	 */
652	int			on_list;
653	struct list_head	leaf_cfs_rq_list;
654	struct task_group	*tg;	/* group that "owns" this runqueue */
655
656	/* Locally cached copy of our task_group's idle value */
657	int			idle;
658
659#ifdef CONFIG_CFS_BANDWIDTH
660	int			runtime_enabled;
661	s64			runtime_remaining;
662
663	u64			throttled_pelt_idle;
664#ifndef CONFIG_64BIT
665	u64                     throttled_pelt_idle_copy;
666#endif
667	u64			throttled_clock;
668	u64			throttled_clock_pelt;
669	u64			throttled_clock_pelt_time;
670	u64			throttled_clock_self;
671	u64			throttled_clock_self_time;
672	int			throttled;
673	int			throttle_count;
674	struct list_head	throttled_list;
675	struct list_head	throttled_csd_list;
676#endif /* CONFIG_CFS_BANDWIDTH */
677#endif /* CONFIG_FAIR_GROUP_SCHED */
678};
679
680static inline int rt_bandwidth_enabled(void)
681{
682	return sysctl_sched_rt_runtime >= 0;
683}
684
685/* RT IPI pull logic requires IRQ_WORK */
686#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
687# define HAVE_RT_PUSH_IPI
688#endif
689
690/* Real-Time classes' related field in a runqueue: */
691struct rt_rq {
692	struct rt_prio_array	active;
693	unsigned int		rt_nr_running;
694	unsigned int		rr_nr_running;
695#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
696	struct {
697		int		curr; /* highest queued rt task prio */
698#ifdef CONFIG_SMP
699		int		next; /* next highest */
700#endif
701	} highest_prio;
702#endif
703#ifdef CONFIG_SMP
704	int			overloaded;
705	struct plist_head	pushable_tasks;
706
707#endif /* CONFIG_SMP */
708	int			rt_queued;
709
710	int			rt_throttled;
711	u64			rt_time;
712	u64			rt_runtime;
713	/* Nests inside the rq lock: */
714	raw_spinlock_t		rt_runtime_lock;
715
716#ifdef CONFIG_RT_GROUP_SCHED
717	unsigned int		rt_nr_boosted;
718
719	struct rq		*rq;
720	struct task_group	*tg;
721#endif
722};
723
724static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
725{
726	return rt_rq->rt_queued && rt_rq->rt_nr_running;
727}
728
729/* Deadline class' related fields in a runqueue */
730struct dl_rq {
731	/* runqueue is an rbtree, ordered by deadline */
732	struct rb_root_cached	root;
733
734	unsigned int		dl_nr_running;
735
736#ifdef CONFIG_SMP
737	/*
738	 * Deadline values of the currently executing and the
739	 * earliest ready task on this rq. Caching these facilitates
740	 * the decision whether or not a ready but not running task
741	 * should migrate somewhere else.
742	 */
743	struct {
744		u64		curr;
745		u64		next;
746	} earliest_dl;
747
748	int			overloaded;
749
750	/*
751	 * Tasks on this rq that can be pushed away. They are kept in
752	 * an rb-tree, ordered by tasks' deadlines, with caching
753	 * of the leftmost (earliest deadline) element.
754	 */
755	struct rb_root_cached	pushable_dl_tasks_root;
756#else
757	struct dl_bw		dl_bw;
758#endif
759	/*
760	 * "Active utilization" for this runqueue: increased when a
761	 * task wakes up (becomes TASK_RUNNING) and decreased when a
762	 * task blocks
763	 */
764	u64			running_bw;
765
766	/*
767	 * Utilization of the tasks "assigned" to this runqueue (including
768	 * the tasks that are in runqueue and the tasks that executed on this
769	 * CPU and blocked). Increased when a task moves to this runqueue, and
770	 * decreased when the task moves away (migrates, changes scheduling
771	 * policy, or terminates).
772	 * This is needed to compute the "inactive utilization" for the
773	 * runqueue (inactive utilization = this_bw - running_bw).
774	 */
775	u64			this_bw;
776	u64			extra_bw;
777
778	/*
779	 * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM
780	 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB).
781	 */
782	u64			max_bw;
783
784	/*
785	 * Inverse of the fraction of CPU utilization that can be reclaimed
786	 * by the GRUB algorithm.
787	 */
788	u64			bw_ratio;
789};
790
791#ifdef CONFIG_FAIR_GROUP_SCHED
792/* An entity is a task if it doesn't "own" a runqueue */
793#define entity_is_task(se)	(!se->my_q)
794
795static inline void se_update_runnable(struct sched_entity *se)
796{
797	if (!entity_is_task(se))
798		se->runnable_weight = se->my_q->h_nr_running;
799}
800
801static inline long se_runnable(struct sched_entity *se)
802{
803	if (entity_is_task(se))
804		return !!se->on_rq;
805	else
806		return se->runnable_weight;
807}
808
809#else
810#define entity_is_task(se)	1
811
812static inline void se_update_runnable(struct sched_entity *se) {}
813
814static inline long se_runnable(struct sched_entity *se)
815{
816	return !!se->on_rq;
817}
818#endif
819
820#ifdef CONFIG_SMP
821/*
822 * XXX we want to get rid of these helpers and use the full load resolution.
823 */
824static inline long se_weight(struct sched_entity *se)
825{
826	return scale_load_down(se->load.weight);
827}
828
829
830static inline bool sched_asym_prefer(int a, int b)
831{
832	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
833}
834
835struct perf_domain {
836	struct em_perf_domain *em_pd;
837	struct perf_domain *next;
838	struct rcu_head rcu;
839};
840
841/* Scheduling group status flags */
842#define SG_OVERLOAD		0x1 /* More than one runnable task on a CPU. */
843#define SG_OVERUTILIZED		0x2 /* One or more CPUs are over-utilized. */
844
845/*
846 * We add the notion of a root-domain which will be used to define per-domain
847 * variables. Each exclusive cpuset essentially defines an island domain by
848 * fully partitioning the member CPUs from any other cpuset. Whenever a new
849 * exclusive cpuset is created, we also create and attach a new root-domain
850 * object.
851 *
852 */
853struct root_domain {
854	atomic_t		refcount;
855	atomic_t		rto_count;
856	struct rcu_head		rcu;
857	cpumask_var_t		span;
858	cpumask_var_t		online;
859
860	/*
861	 * Indicate pullable load on at least one CPU, e.g:
862	 * - More than one runnable task
863	 * - Running task is misfit
864	 */
865	int			overload;
866
867	/* Indicate one or more cpus over-utilized (tipping point) */
868	int			overutilized;
869
870	/*
871	 * The bit corresponding to a CPU gets set here if such CPU has more
872	 * than one runnable -deadline task (as it is below for RT tasks).
873	 */
874	cpumask_var_t		dlo_mask;
875	atomic_t		dlo_count;
876	struct dl_bw		dl_bw;
877	struct cpudl		cpudl;
878
879	/*
880	 * Indicate whether a root_domain's dl_bw has been checked or
881	 * updated. It's monotonously increasing value.
882	 *
883	 * Also, some corner cases, like 'wrap around' is dangerous, but given
884	 * that u64 is 'big enough'. So that shouldn't be a concern.
885	 */
886	u64 visit_gen;
887
888#ifdef HAVE_RT_PUSH_IPI
889	/*
890	 * For IPI pull requests, loop across the rto_mask.
891	 */
892	struct irq_work		rto_push_work;
893	raw_spinlock_t		rto_lock;
894	/* These are only updated and read within rto_lock */
895	int			rto_loop;
896	int			rto_cpu;
897	/* These atomics are updated outside of a lock */
898	atomic_t		rto_loop_next;
899	atomic_t		rto_loop_start;
900#endif
901	/*
902	 * The "RT overload" flag: it gets set if a CPU has more than
903	 * one runnable RT task.
904	 */
905	cpumask_var_t		rto_mask;
906	struct cpupri		cpupri;
907
908	unsigned long		max_cpu_capacity;
909
910	/*
911	 * NULL-terminated list of performance domains intersecting with the
912	 * CPUs of the rd. Protected by RCU.
913	 */
914	struct perf_domain __rcu *pd;
915};
916
917extern void init_defrootdomain(void);
918extern int sched_init_domains(const struct cpumask *cpu_map);
919extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
920extern void sched_get_rd(struct root_domain *rd);
921extern void sched_put_rd(struct root_domain *rd);
922
923#ifdef HAVE_RT_PUSH_IPI
924extern void rto_push_irq_work_func(struct irq_work *work);
925#endif
926#endif /* CONFIG_SMP */
927
928#ifdef CONFIG_UCLAMP_TASK
929/*
930 * struct uclamp_bucket - Utilization clamp bucket
931 * @value: utilization clamp value for tasks on this clamp bucket
932 * @tasks: number of RUNNABLE tasks on this clamp bucket
933 *
934 * Keep track of how many tasks are RUNNABLE for a given utilization
935 * clamp value.
936 */
937struct uclamp_bucket {
938	unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
939	unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
940};
941
942/*
943 * struct uclamp_rq - rq's utilization clamp
944 * @value: currently active clamp values for a rq
945 * @bucket: utilization clamp buckets affecting a rq
946 *
947 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
948 * A clamp value is affecting a rq when there is at least one task RUNNABLE
949 * (or actually running) with that value.
950 *
951 * There are up to UCLAMP_CNT possible different clamp values, currently there
952 * are only two: minimum utilization and maximum utilization.
953 *
954 * All utilization clamping values are MAX aggregated, since:
955 * - for util_min: we want to run the CPU at least at the max of the minimum
956 *   utilization required by its currently RUNNABLE tasks.
957 * - for util_max: we want to allow the CPU to run up to the max of the
958 *   maximum utilization allowed by its currently RUNNABLE tasks.
959 *
960 * Since on each system we expect only a limited number of different
961 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track
962 * the metrics required to compute all the per-rq utilization clamp values.
963 */
964struct uclamp_rq {
965	unsigned int value;
966	struct uclamp_bucket bucket[UCLAMP_BUCKETS];
967};
968
969DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
970#endif /* CONFIG_UCLAMP_TASK */
971
972struct rq;
973struct balance_callback {
974	struct balance_callback *next;
975	void (*func)(struct rq *rq);
976};
977
978/*
979 * This is the main, per-CPU runqueue data structure.
980 *
981 * Locking rule: those places that want to lock multiple runqueues
982 * (such as the load balancing or the thread migration code), lock
983 * acquire operations must be ordered by ascending &runqueue.
984 */
985struct rq {
986	/* runqueue lock: */
987	raw_spinlock_t		__lock;
988
989	unsigned int		nr_running;
990#ifdef CONFIG_NUMA_BALANCING
991	unsigned int		nr_numa_running;
992	unsigned int		nr_preferred_running;
993	unsigned int		numa_migrate_on;
994#endif
995#ifdef CONFIG_NO_HZ_COMMON
996#ifdef CONFIG_SMP
997	unsigned long		last_blocked_load_update_tick;
998	unsigned int		has_blocked_load;
999	call_single_data_t	nohz_csd;
1000#endif /* CONFIG_SMP */
1001	unsigned int		nohz_tick_stopped;
1002	atomic_t		nohz_flags;
1003#endif /* CONFIG_NO_HZ_COMMON */
1004
1005#ifdef CONFIG_SMP
1006	unsigned int		ttwu_pending;
1007#endif
1008	u64			nr_switches;
1009
1010#ifdef CONFIG_UCLAMP_TASK
1011	/* Utilization clamp values based on CPU's RUNNABLE tasks */
1012	struct uclamp_rq	uclamp[UCLAMP_CNT] ____cacheline_aligned;
1013	unsigned int		uclamp_flags;
1014#define UCLAMP_FLAG_IDLE 0x01
1015#endif
1016
1017	struct cfs_rq		cfs;
1018	struct rt_rq		rt;
1019	struct dl_rq		dl;
1020
1021#ifdef CONFIG_FAIR_GROUP_SCHED
1022	/* list of leaf cfs_rq on this CPU: */
1023	struct list_head	leaf_cfs_rq_list;
1024	struct list_head	*tmp_alone_branch;
1025#endif /* CONFIG_FAIR_GROUP_SCHED */
1026
1027	/*
1028	 * This is part of a global counter where only the total sum
1029	 * over all CPUs matters. A task can increase this counter on
1030	 * one CPU and if it got migrated afterwards it may decrease
1031	 * it on another CPU. Always updated under the runqueue lock:
1032	 */
1033	unsigned int		nr_uninterruptible;
1034
1035	struct task_struct __rcu	*curr;
1036	struct task_struct	*idle;
1037	struct task_struct	*stop;
1038	unsigned long		next_balance;
1039	struct mm_struct	*prev_mm;
1040
1041	unsigned int		clock_update_flags;
1042	u64			clock;
1043	/* Ensure that all clocks are in the same cache line */
1044	u64			clock_task ____cacheline_aligned;
1045	u64			clock_pelt;
1046	unsigned long		lost_idle_time;
1047	u64			clock_pelt_idle;
1048	u64			clock_idle;
1049#ifndef CONFIG_64BIT
1050	u64			clock_pelt_idle_copy;
1051	u64			clock_idle_copy;
1052#endif
1053
1054	atomic_t		nr_iowait;
1055
1056#ifdef CONFIG_SCHED_DEBUG
1057	u64 last_seen_need_resched_ns;
1058	int ticks_without_resched;
1059#endif
1060
1061#ifdef CONFIG_MEMBARRIER
1062	int membarrier_state;
1063#endif
1064
1065#ifdef CONFIG_SMP
1066	struct root_domain		*rd;
1067	struct sched_domain __rcu	*sd;
1068
1069	unsigned long		cpu_capacity;
1070
1071	struct balance_callback *balance_callback;
1072
1073	unsigned char		nohz_idle_balance;
1074	unsigned char		idle_balance;
1075
1076	unsigned long		misfit_task_load;
1077
1078	/* For active balancing */
1079	int			active_balance;
1080	int			push_cpu;
1081	struct cpu_stop_work	active_balance_work;
1082
1083	/* CPU of this runqueue: */
1084	int			cpu;
1085	int			online;
1086
1087	struct list_head cfs_tasks;
1088
1089	struct sched_avg	avg_rt;
1090	struct sched_avg	avg_dl;
1091#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
1092	struct sched_avg	avg_irq;
1093#endif
1094#ifdef CONFIG_SCHED_THERMAL_PRESSURE
1095	struct sched_avg	avg_thermal;
1096#endif
1097	u64			idle_stamp;
1098	u64			avg_idle;
1099
1100	/* This is used to determine avg_idle's max value */
1101	u64			max_idle_balance_cost;
1102
1103#ifdef CONFIG_HOTPLUG_CPU
1104	struct rcuwait		hotplug_wait;
1105#endif
1106#endif /* CONFIG_SMP */
1107
1108#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1109	u64			prev_irq_time;
1110#endif
1111#ifdef CONFIG_PARAVIRT
1112	u64			prev_steal_time;
1113#endif
1114#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
1115	u64			prev_steal_time_rq;
1116#endif
1117
1118	/* calc_load related fields */
1119	unsigned long		calc_load_update;
1120	long			calc_load_active;
1121
1122#ifdef CONFIG_SCHED_HRTICK
1123#ifdef CONFIG_SMP
1124	call_single_data_t	hrtick_csd;
1125#endif
1126	struct hrtimer		hrtick_timer;
1127	ktime_t 		hrtick_time;
1128#endif
1129
1130#ifdef CONFIG_SCHEDSTATS
1131	/* latency stats */
1132	struct sched_info	rq_sched_info;
1133	unsigned long long	rq_cpu_time;
1134	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1135
1136	/* sys_sched_yield() stats */
1137	unsigned int		yld_count;
1138
1139	/* schedule() stats */
1140	unsigned int		sched_count;
1141	unsigned int		sched_goidle;
1142
1143	/* try_to_wake_up() stats */
1144	unsigned int		ttwu_count;
1145	unsigned int		ttwu_local;
1146#endif
1147
1148#ifdef CONFIG_CPU_IDLE
1149	/* Must be inspected within a rcu lock section */
1150	struct cpuidle_state	*idle_state;
1151#endif
1152
1153#ifdef CONFIG_SMP
1154	unsigned int		nr_pinned;
1155#endif
1156	unsigned int		push_busy;
1157	struct cpu_stop_work	push_work;
1158
1159#ifdef CONFIG_SCHED_CORE
1160	/* per rq */
1161	struct rq		*core;
1162	struct task_struct	*core_pick;
1163	unsigned int		core_enabled;
1164	unsigned int		core_sched_seq;
1165	struct rb_root		core_tree;
1166
1167	/* shared state -- careful with sched_core_cpu_deactivate() */
1168	unsigned int		core_task_seq;
1169	unsigned int		core_pick_seq;
1170	unsigned long		core_cookie;
1171	unsigned int		core_forceidle_count;
1172	unsigned int		core_forceidle_seq;
1173	unsigned int		core_forceidle_occupation;
1174	u64			core_forceidle_start;
1175#endif
1176
1177	/* Scratch cpumask to be temporarily used under rq_lock */
1178	cpumask_var_t		scratch_mask;
1179
1180#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP)
1181	call_single_data_t	cfsb_csd;
1182	struct list_head	cfsb_csd_list;
1183#endif
1184};
1185
1186#ifdef CONFIG_FAIR_GROUP_SCHED
1187
1188/* CPU runqueue to which this cfs_rq is attached */
1189static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1190{
1191	return cfs_rq->rq;
1192}
1193
1194#else
1195
1196static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1197{
1198	return container_of(cfs_rq, struct rq, cfs);
1199}
1200#endif
1201
1202static inline int cpu_of(struct rq *rq)
1203{
1204#ifdef CONFIG_SMP
1205	return rq->cpu;
1206#else
1207	return 0;
1208#endif
1209}
1210
1211#define MDF_PUSH	0x01
1212
1213static inline bool is_migration_disabled(struct task_struct *p)
1214{
1215#ifdef CONFIG_SMP
1216	return p->migration_disabled;
1217#else
1218	return false;
1219#endif
1220}
1221
1222DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1223
1224#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
1225#define this_rq()		this_cpu_ptr(&runqueues)
1226#define task_rq(p)		cpu_rq(task_cpu(p))
1227#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
1228#define raw_rq()		raw_cpu_ptr(&runqueues)
1229
1230struct sched_group;
1231#ifdef CONFIG_SCHED_CORE
1232static inline struct cpumask *sched_group_span(struct sched_group *sg);
1233
1234DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
1235
1236static inline bool sched_core_enabled(struct rq *rq)
1237{
1238	return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
1239}
1240
1241static inline bool sched_core_disabled(void)
1242{
1243	return !static_branch_unlikely(&__sched_core_enabled);
1244}
1245
1246/*
1247 * Be careful with this function; not for general use. The return value isn't
1248 * stable unless you actually hold a relevant rq->__lock.
1249 */
1250static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1251{
1252	if (sched_core_enabled(rq))
1253		return &rq->core->__lock;
1254
1255	return &rq->__lock;
1256}
1257
1258static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1259{
1260	if (rq->core_enabled)
1261		return &rq->core->__lock;
1262
1263	return &rq->__lock;
1264}
1265
1266bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
1267			bool fi);
1268void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
1269
1270/*
1271 * Helpers to check if the CPU's core cookie matches with the task's cookie
1272 * when core scheduling is enabled.
1273 * A special case is that the task's cookie always matches with CPU's core
1274 * cookie if the CPU is in an idle core.
1275 */
1276static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1277{
1278	/* Ignore cookie match if core scheduler is not enabled on the CPU. */
1279	if (!sched_core_enabled(rq))
1280		return true;
1281
1282	return rq->core->core_cookie == p->core_cookie;
1283}
1284
1285static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1286{
1287	bool idle_core = true;
1288	int cpu;
1289
1290	/* Ignore cookie match if core scheduler is not enabled on the CPU. */
1291	if (!sched_core_enabled(rq))
1292		return true;
1293
1294	for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
1295		if (!available_idle_cpu(cpu)) {
1296			idle_core = false;
1297			break;
1298		}
1299	}
1300
1301	/*
1302	 * A CPU in an idle core is always the best choice for tasks with
1303	 * cookies.
1304	 */
1305	return idle_core || rq->core->core_cookie == p->core_cookie;
1306}
1307
1308static inline bool sched_group_cookie_match(struct rq *rq,
1309					    struct task_struct *p,
1310					    struct sched_group *group)
1311{
1312	int cpu;
1313
1314	/* Ignore cookie match if core scheduler is not enabled on the CPU. */
1315	if (!sched_core_enabled(rq))
1316		return true;
1317
1318	for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
1319		if (sched_core_cookie_match(cpu_rq(cpu), p))
1320			return true;
1321	}
1322	return false;
1323}
1324
1325static inline bool sched_core_enqueued(struct task_struct *p)
1326{
1327	return !RB_EMPTY_NODE(&p->core_node);
1328}
1329
1330extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1331extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1332
1333extern void sched_core_get(void);
1334extern void sched_core_put(void);
1335
1336#else /* !CONFIG_SCHED_CORE */
1337
1338static inline bool sched_core_enabled(struct rq *rq)
1339{
1340	return false;
1341}
1342
1343static inline bool sched_core_disabled(void)
1344{
1345	return true;
1346}
1347
1348static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1349{
1350	return &rq->__lock;
1351}
1352
1353static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1354{
1355	return &rq->__lock;
1356}
1357
1358static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1359{
1360	return true;
1361}
1362
1363static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1364{
1365	return true;
1366}
1367
1368static inline bool sched_group_cookie_match(struct rq *rq,
1369					    struct task_struct *p,
1370					    struct sched_group *group)
1371{
1372	return true;
1373}
1374#endif /* CONFIG_SCHED_CORE */
1375
1376static inline void lockdep_assert_rq_held(struct rq *rq)
1377{
1378	lockdep_assert_held(__rq_lockp(rq));
1379}
1380
1381extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1382extern bool raw_spin_rq_trylock(struct rq *rq);
1383extern void raw_spin_rq_unlock(struct rq *rq);
1384
1385static inline void raw_spin_rq_lock(struct rq *rq)
1386{
1387	raw_spin_rq_lock_nested(rq, 0);
1388}
1389
1390static inline void raw_spin_rq_lock_irq(struct rq *rq)
1391{
1392	local_irq_disable();
1393	raw_spin_rq_lock(rq);
1394}
1395
1396static inline void raw_spin_rq_unlock_irq(struct rq *rq)
1397{
1398	raw_spin_rq_unlock(rq);
1399	local_irq_enable();
1400}
1401
1402static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
1403{
1404	unsigned long flags;
1405	local_irq_save(flags);
1406	raw_spin_rq_lock(rq);
1407	return flags;
1408}
1409
1410static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
1411{
1412	raw_spin_rq_unlock(rq);
1413	local_irq_restore(flags);
1414}
1415
1416#define raw_spin_rq_lock_irqsave(rq, flags)	\
1417do {						\
1418	flags = _raw_spin_rq_lock_irqsave(rq);	\
1419} while (0)
1420
1421#ifdef CONFIG_SCHED_SMT
1422extern void __update_idle_core(struct rq *rq);
1423
1424static inline void update_idle_core(struct rq *rq)
1425{
1426	if (static_branch_unlikely(&sched_smt_present))
1427		__update_idle_core(rq);
1428}
1429
1430#else
1431static inline void update_idle_core(struct rq *rq) { }
1432#endif
1433
1434#ifdef CONFIG_FAIR_GROUP_SCHED
1435static inline struct task_struct *task_of(struct sched_entity *se)
1436{
1437	SCHED_WARN_ON(!entity_is_task(se));
1438	return container_of(se, struct task_struct, se);
1439}
1440
1441static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
1442{
1443	return p->se.cfs_rq;
1444}
1445
1446/* runqueue on which this entity is (to be) queued */
1447static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
1448{
1449	return se->cfs_rq;
1450}
1451
1452/* runqueue "owned" by this group */
1453static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
1454{
1455	return grp->my_q;
1456}
1457
1458#else
1459
1460#define task_of(_se)	container_of(_se, struct task_struct, se)
1461
1462static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
1463{
1464	return &task_rq(p)->cfs;
1465}
1466
1467static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
1468{
1469	const struct task_struct *p = task_of(se);
1470	struct rq *rq = task_rq(p);
1471
1472	return &rq->cfs;
1473}
1474
1475/* runqueue "owned" by this group */
1476static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
1477{
1478	return NULL;
1479}
1480#endif
1481
1482extern void update_rq_clock(struct rq *rq);
1483
1484/*
1485 * rq::clock_update_flags bits
1486 *
1487 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
1488 *  call to __schedule(). This is an optimisation to avoid
1489 *  neighbouring rq clock updates.
1490 *
1491 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
1492 *  in effect and calls to update_rq_clock() are being ignored.
1493 *
1494 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
1495 *  made to update_rq_clock() since the last time rq::lock was pinned.
1496 *
1497 * If inside of __schedule(), clock_update_flags will have been
1498 * shifted left (a left shift is a cheap operation for the fast path
1499 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
1500 *
1501 *	if (rq-clock_update_flags >= RQCF_UPDATED)
1502 *
1503 * to check if %RQCF_UPDATED is set. It'll never be shifted more than
1504 * one position though, because the next rq_unpin_lock() will shift it
1505 * back.
1506 */
1507#define RQCF_REQ_SKIP		0x01
1508#define RQCF_ACT_SKIP		0x02
1509#define RQCF_UPDATED		0x04
1510
1511static inline void assert_clock_updated(struct rq *rq)
1512{
1513	/*
1514	 * The only reason for not seeing a clock update since the
1515	 * last rq_pin_lock() is if we're currently skipping updates.
1516	 */
1517	SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1518}
1519
1520static inline u64 rq_clock(struct rq *rq)
1521{
1522	lockdep_assert_rq_held(rq);
1523	assert_clock_updated(rq);
1524
1525	return rq->clock;
1526}
1527
1528static inline u64 rq_clock_task(struct rq *rq)
1529{
1530	lockdep_assert_rq_held(rq);
1531	assert_clock_updated(rq);
1532
1533	return rq->clock_task;
1534}
1535
1536/**
1537 * By default the decay is the default pelt decay period.
1538 * The decay shift can change the decay period in
1539 * multiples of 32.
1540 *  Decay shift		Decay period(ms)
1541 *	0			32
1542 *	1			64
1543 *	2			128
1544 *	3			256
1545 *	4			512
1546 */
1547extern int sched_thermal_decay_shift;
1548
1549static inline u64 rq_clock_thermal(struct rq *rq)
1550{
1551	return rq_clock_task(rq) >> sched_thermal_decay_shift;
1552}
1553
1554static inline void rq_clock_skip_update(struct rq *rq)
1555{
1556	lockdep_assert_rq_held(rq);
1557	rq->clock_update_flags |= RQCF_REQ_SKIP;
1558}
1559
1560/*
1561 * See rt task throttling, which is the only time a skip
1562 * request is canceled.
1563 */
1564static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1565{
1566	lockdep_assert_rq_held(rq);
1567	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1568}
1569
1570/*
1571 * During cpu offlining and rq wide unthrottling, we can trigger
1572 * an update_rq_clock() for several cfs and rt runqueues (Typically
1573 * when using list_for_each_entry_*)
1574 * rq_clock_start_loop_update() can be called after updating the clock
1575 * once and before iterating over the list to prevent multiple update.
1576 * After the iterative traversal, we need to call rq_clock_stop_loop_update()
1577 * to clear RQCF_ACT_SKIP of rq->clock_update_flags.
1578 */
1579static inline void rq_clock_start_loop_update(struct rq *rq)
1580{
1581	lockdep_assert_rq_held(rq);
1582	SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP);
1583	rq->clock_update_flags |= RQCF_ACT_SKIP;
1584}
1585
1586static inline void rq_clock_stop_loop_update(struct rq *rq)
1587{
1588	lockdep_assert_rq_held(rq);
1589	rq->clock_update_flags &= ~RQCF_ACT_SKIP;
1590}
1591
1592struct rq_flags {
1593	unsigned long flags;
1594	struct pin_cookie cookie;
1595#ifdef CONFIG_SCHED_DEBUG
1596	/*
1597	 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1598	 * current pin context is stashed here in case it needs to be
1599	 * restored in rq_repin_lock().
1600	 */
1601	unsigned int clock_update_flags;
1602#endif
1603};
1604
1605extern struct balance_callback balance_push_callback;
1606
1607/*
1608 * Lockdep annotation that avoids accidental unlocks; it's like a
1609 * sticky/continuous lockdep_assert_held().
1610 *
1611 * This avoids code that has access to 'struct rq *rq' (basically everything in
1612 * the scheduler) from accidentally unlocking the rq if they do not also have a
1613 * copy of the (on-stack) 'struct rq_flags rf'.
1614 *
1615 * Also see Documentation/locking/lockdep-design.rst.
1616 */
1617static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1618{
1619	rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
1620
1621#ifdef CONFIG_SCHED_DEBUG
1622	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1623	rf->clock_update_flags = 0;
1624#ifdef CONFIG_SMP
1625	SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
1626#endif
1627#endif
1628}
1629
1630static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1631{
1632#ifdef CONFIG_SCHED_DEBUG
1633	if (rq->clock_update_flags > RQCF_ACT_SKIP)
1634		rf->clock_update_flags = RQCF_UPDATED;
1635#endif
1636
1637	lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
1638}
1639
1640static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1641{
1642	lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
1643
1644#ifdef CONFIG_SCHED_DEBUG
1645	/*
1646	 * Restore the value we stashed in @rf for this pin context.
1647	 */
1648	rq->clock_update_flags |= rf->clock_update_flags;
1649#endif
1650}
1651
1652struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1653	__acquires(rq->lock);
1654
1655struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1656	__acquires(p->pi_lock)
1657	__acquires(rq->lock);
1658
1659static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1660	__releases(rq->lock)
1661{
1662	rq_unpin_lock(rq, rf);
1663	raw_spin_rq_unlock(rq);
1664}
1665
1666static inline void
1667task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1668	__releases(rq->lock)
1669	__releases(p->pi_lock)
1670{
1671	rq_unpin_lock(rq, rf);
1672	raw_spin_rq_unlock(rq);
1673	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1674}
1675
1676DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
1677		    _T->rq = task_rq_lock(_T->lock, &_T->rf),
1678		    task_rq_unlock(_T->rq, _T->lock, &_T->rf),
1679		    struct rq *rq; struct rq_flags rf)
1680
1681static inline void
1682rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1683	__acquires(rq->lock)
1684{
1685	raw_spin_rq_lock_irqsave(rq, rf->flags);
1686	rq_pin_lock(rq, rf);
1687}
1688
1689static inline void
1690rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1691	__acquires(rq->lock)
1692{
1693	raw_spin_rq_lock_irq(rq);
1694	rq_pin_lock(rq, rf);
1695}
1696
1697static inline void
1698rq_lock(struct rq *rq, struct rq_flags *rf)
1699	__acquires(rq->lock)
1700{
1701	raw_spin_rq_lock(rq);
1702	rq_pin_lock(rq, rf);
1703}
1704
1705static inline void
1706rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1707	__releases(rq->lock)
1708{
1709	rq_unpin_lock(rq, rf);
1710	raw_spin_rq_unlock_irqrestore(rq, rf->flags);
1711}
1712
1713static inline void
1714rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1715	__releases(rq->lock)
1716{
1717	rq_unpin_lock(rq, rf);
1718	raw_spin_rq_unlock_irq(rq);
1719}
1720
1721static inline void
1722rq_unlock(struct rq *rq, struct rq_flags *rf)
1723	__releases(rq->lock)
1724{
1725	rq_unpin_lock(rq, rf);
1726	raw_spin_rq_unlock(rq);
1727}
1728
1729DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
1730		    rq_lock(_T->lock, &_T->rf),
1731		    rq_unlock(_T->lock, &_T->rf),
1732		    struct rq_flags rf)
1733
1734DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
1735		    rq_lock_irq(_T->lock, &_T->rf),
1736		    rq_unlock_irq(_T->lock, &_T->rf),
1737		    struct rq_flags rf)
1738
1739DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
1740		    rq_lock_irqsave(_T->lock, &_T->rf),
1741		    rq_unlock_irqrestore(_T->lock, &_T->rf),
1742		    struct rq_flags rf)
1743
1744static inline struct rq *
1745this_rq_lock_irq(struct rq_flags *rf)
1746	__acquires(rq->lock)
1747{
1748	struct rq *rq;
1749
1750	local_irq_disable();
1751	rq = this_rq();
1752	rq_lock(rq, rf);
1753	return rq;
1754}
1755
1756#ifdef CONFIG_NUMA
1757enum numa_topology_type {
1758	NUMA_DIRECT,
1759	NUMA_GLUELESS_MESH,
1760	NUMA_BACKPLANE,
1761};
1762extern enum numa_topology_type sched_numa_topology_type;
1763extern int sched_max_numa_distance;
1764extern bool find_numa_distance(int distance);
1765extern void sched_init_numa(int offline_node);
1766extern void sched_update_numa(int cpu, bool online);
1767extern void sched_domains_numa_masks_set(unsigned int cpu);
1768extern void sched_domains_numa_masks_clear(unsigned int cpu);
1769extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1770#else
1771static inline void sched_init_numa(int offline_node) { }
1772static inline void sched_update_numa(int cpu, bool online) { }
1773static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1774static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1775static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1776{
1777	return nr_cpu_ids;
1778}
1779#endif
1780
1781#ifdef CONFIG_NUMA_BALANCING
1782/* The regions in numa_faults array from task_struct */
1783enum numa_faults_stats {
1784	NUMA_MEM = 0,
1785	NUMA_CPU,
1786	NUMA_MEMBUF,
1787	NUMA_CPUBUF
1788};
1789extern void sched_setnuma(struct task_struct *p, int node);
1790extern int migrate_task_to(struct task_struct *p, int cpu);
1791extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1792			int cpu, int scpu);
1793extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1794#else
1795static inline void
1796init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1797{
1798}
1799#endif /* CONFIG_NUMA_BALANCING */
1800
1801#ifdef CONFIG_SMP
1802
1803static inline void
1804queue_balance_callback(struct rq *rq,
1805		       struct balance_callback *head,
1806		       void (*func)(struct rq *rq))
1807{
1808	lockdep_assert_rq_held(rq);
1809
1810	/*
1811	 * Don't (re)queue an already queued item; nor queue anything when
1812	 * balance_push() is active, see the comment with
1813	 * balance_push_callback.
1814	 */
1815	if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
1816		return;
1817
1818	head->func = func;
1819	head->next = rq->balance_callback;
1820	rq->balance_callback = head;
1821}
1822
1823#define rcu_dereference_check_sched_domain(p) \
1824	rcu_dereference_check((p), \
1825			      lockdep_is_held(&sched_domains_mutex))
1826
1827/*
1828 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1829 * See destroy_sched_domains: call_rcu for details.
1830 *
1831 * The domain tree of any CPU may only be accessed from within
1832 * preempt-disabled sections.
1833 */
1834#define for_each_domain(cpu, __sd) \
1835	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1836			__sd; __sd = __sd->parent)
1837
1838/* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */
1839#define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) |
1840static const unsigned int SD_SHARED_CHILD_MASK =
1841#include <linux/sched/sd_flags.h>
18420;
1843#undef SD_FLAG
1844
1845/**
1846 * highest_flag_domain - Return highest sched_domain containing flag.
1847 * @cpu:	The CPU whose highest level of sched domain is to
1848 *		be returned.
1849 * @flag:	The flag to check for the highest sched_domain
1850 *		for the given CPU.
1851 *
1852 * Returns the highest sched_domain of a CPU which contains @flag. If @flag has
1853 * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag.
1854 */
1855static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1856{
1857	struct sched_domain *sd, *hsd = NULL;
1858
1859	for_each_domain(cpu, sd) {
1860		if (sd->flags & flag) {
1861			hsd = sd;
1862			continue;
1863		}
1864
1865		/*
1866		 * Stop the search if @flag is known to be shared at lower
1867		 * levels. It will not be found further up.
1868		 */
1869		if (flag & SD_SHARED_CHILD_MASK)
1870			break;
1871	}
1872
1873	return hsd;
1874}
1875
1876static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1877{
1878	struct sched_domain *sd;
1879
1880	for_each_domain(cpu, sd) {
1881		if (sd->flags & flag)
1882			break;
1883	}
1884
1885	return sd;
1886}
1887
1888DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1889DECLARE_PER_CPU(int, sd_llc_size);
1890DECLARE_PER_CPU(int, sd_llc_id);
1891DECLARE_PER_CPU(int, sd_share_id);
1892DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1893DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1894DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1895DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1896extern struct static_key_false sched_asym_cpucapacity;
1897extern struct static_key_false sched_cluster_active;
1898
1899static __always_inline bool sched_asym_cpucap_active(void)
1900{
1901	return static_branch_unlikely(&sched_asym_cpucapacity);
1902}
1903
1904struct sched_group_capacity {
1905	atomic_t		ref;
1906	/*
1907	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
1908	 * for a single CPU.
1909	 */
1910	unsigned long		capacity;
1911	unsigned long		min_capacity;		/* Min per-CPU capacity in group */
1912	unsigned long		max_capacity;		/* Max per-CPU capacity in group */
1913	unsigned long		next_update;
1914	int			imbalance;		/* XXX unrelated to capacity but shared group state */
1915
1916#ifdef CONFIG_SCHED_DEBUG
1917	int			id;
1918#endif
1919
1920	unsigned long		cpumask[];		/* Balance mask */
1921};
1922
1923struct sched_group {
1924	struct sched_group	*next;			/* Must be a circular list */
1925	atomic_t		ref;
1926
1927	unsigned int		group_weight;
1928	unsigned int		cores;
1929	struct sched_group_capacity *sgc;
1930	int			asym_prefer_cpu;	/* CPU of highest priority in group */
1931	int			flags;
1932
1933	/*
1934	 * The CPUs this group covers.
1935	 *
1936	 * NOTE: this field is variable length. (Allocated dynamically
1937	 * by attaching extra space to the end of the structure,
1938	 * depending on how many CPUs the kernel has booted up with)
1939	 */
1940	unsigned long		cpumask[];
1941};
1942
1943static inline struct cpumask *sched_group_span(struct sched_group *sg)
1944{
1945	return to_cpumask(sg->cpumask);
1946}
1947
1948/*
1949 * See build_balance_mask().
1950 */
1951static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1952{
1953	return to_cpumask(sg->sgc->cpumask);
1954}
1955
1956extern int group_balance_cpu(struct sched_group *sg);
1957
1958#ifdef CONFIG_SCHED_DEBUG
1959void update_sched_domain_debugfs(void);
1960void dirty_sched_domain_sysctl(int cpu);
1961#else
1962static inline void update_sched_domain_debugfs(void)
1963{
1964}
1965static inline void dirty_sched_domain_sysctl(int cpu)
1966{
1967}
1968#endif
1969
1970extern int sched_update_scaling(void);
1971
1972static inline const struct cpumask *task_user_cpus(struct task_struct *p)
1973{
1974	if (!p->user_cpus_ptr)
1975		return cpu_possible_mask; /* &init_task.cpus_mask */
1976	return p->user_cpus_ptr;
1977}
1978#endif /* CONFIG_SMP */
1979
1980#include "stats.h"
1981
1982#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
1983
1984extern void __sched_core_account_forceidle(struct rq *rq);
1985
1986static inline void sched_core_account_forceidle(struct rq *rq)
1987{
1988	if (schedstat_enabled())
1989		__sched_core_account_forceidle(rq);
1990}
1991
1992extern void __sched_core_tick(struct rq *rq);
1993
1994static inline void sched_core_tick(struct rq *rq)
1995{
1996	if (sched_core_enabled(rq) && schedstat_enabled())
1997		__sched_core_tick(rq);
1998}
1999
2000#else
2001
2002static inline void sched_core_account_forceidle(struct rq *rq) {}
2003
2004static inline void sched_core_tick(struct rq *rq) {}
2005
2006#endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */
2007
2008#ifdef CONFIG_CGROUP_SCHED
2009
2010/*
2011 * Return the group to which this tasks belongs.
2012 *
2013 * We cannot use task_css() and friends because the cgroup subsystem
2014 * changes that value before the cgroup_subsys::attach() method is called,
2015 * therefore we cannot pin it and might observe the wrong value.
2016 *
2017 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
2018 * core changes this before calling sched_move_task().
2019 *
2020 * Instead we use a 'copy' which is updated from sched_move_task() while
2021 * holding both task_struct::pi_lock and rq::lock.
2022 */
2023static inline struct task_group *task_group(struct task_struct *p)
2024{
2025	return p->sched_task_group;
2026}
2027
2028/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
2029static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
2030{
2031#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
2032	struct task_group *tg = task_group(p);
2033#endif
2034
2035#ifdef CONFIG_FAIR_GROUP_SCHED
2036	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
2037	p->se.cfs_rq = tg->cfs_rq[cpu];
2038	p->se.parent = tg->se[cpu];
2039	p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
2040#endif
2041
2042#ifdef CONFIG_RT_GROUP_SCHED
2043	p->rt.rt_rq  = tg->rt_rq[cpu];
2044	p->rt.parent = tg->rt_se[cpu];
2045#endif
2046}
2047
2048#else /* CONFIG_CGROUP_SCHED */
2049
2050static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
2051static inline struct task_group *task_group(struct task_struct *p)
2052{
2053	return NULL;
2054}
2055
2056#endif /* CONFIG_CGROUP_SCHED */
2057
2058static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
2059{
2060	set_task_rq(p, cpu);
2061#ifdef CONFIG_SMP
2062	/*
2063	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
2064	 * successfully executed on another CPU. We must ensure that updates of
2065	 * per-task data have been completed by this moment.
2066	 */
2067	smp_wmb();
2068	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
2069	p->wake_cpu = cpu;
2070#endif
2071}
2072
2073/*
2074 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
2075 */
2076#ifdef CONFIG_SCHED_DEBUG
2077# define const_debug __read_mostly
2078#else
2079# define const_debug const
2080#endif
2081
2082#define SCHED_FEAT(name, enabled)	\
2083	__SCHED_FEAT_##name ,
2084
2085enum {
2086#include "features.h"
2087	__SCHED_FEAT_NR,
2088};
2089
2090#undef SCHED_FEAT
2091
2092#ifdef CONFIG_SCHED_DEBUG
2093
2094/*
2095 * To support run-time toggling of sched features, all the translation units
2096 * (but core.c) reference the sysctl_sched_features defined in core.c.
2097 */
2098extern const_debug unsigned int sysctl_sched_features;
2099
2100#ifdef CONFIG_JUMP_LABEL
2101#define SCHED_FEAT(name, enabled)					\
2102static __always_inline bool static_branch_##name(struct static_key *key) \
2103{									\
2104	return static_key_##enabled(key);				\
2105}
2106
2107#include "features.h"
2108#undef SCHED_FEAT
2109
2110extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
2111#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
2112
2113#else /* !CONFIG_JUMP_LABEL */
2114
2115#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
2116
2117#endif /* CONFIG_JUMP_LABEL */
2118
2119#else /* !SCHED_DEBUG */
2120
2121/*
2122 * Each translation unit has its own copy of sysctl_sched_features to allow
2123 * constants propagation at compile time and compiler optimization based on
2124 * features default.
2125 */
2126#define SCHED_FEAT(name, enabled)	\
2127	(1UL << __SCHED_FEAT_##name) * enabled |
2128static const_debug __maybe_unused unsigned int sysctl_sched_features =
2129#include "features.h"
2130	0;
2131#undef SCHED_FEAT
2132
2133#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
2134
2135#endif /* SCHED_DEBUG */
2136
2137extern struct static_key_false sched_numa_balancing;
2138extern struct static_key_false sched_schedstats;
2139
2140static inline u64 global_rt_period(void)
2141{
2142	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
2143}
2144
2145static inline u64 global_rt_runtime(void)
2146{
2147	if (sysctl_sched_rt_runtime < 0)
2148		return RUNTIME_INF;
2149
2150	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
2151}
2152
2153static inline int task_current(struct rq *rq, struct task_struct *p)
2154{
2155	return rq->curr == p;
2156}
2157
2158static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
2159{
2160#ifdef CONFIG_SMP
2161	return p->on_cpu;
2162#else
2163	return task_current(rq, p);
2164#endif
2165}
2166
2167static inline int task_on_rq_queued(struct task_struct *p)
2168{
2169	return p->on_rq == TASK_ON_RQ_QUEUED;
2170}
2171
2172static inline int task_on_rq_migrating(struct task_struct *p)
2173{
2174	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
2175}
2176
2177/* Wake flags. The first three directly map to some SD flag value */
2178#define WF_EXEC         0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
2179#define WF_FORK         0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
2180#define WF_TTWU         0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */
2181
2182#define WF_SYNC         0x10 /* Waker goes to sleep after wakeup */
2183#define WF_MIGRATED     0x20 /* Internal use, task got migrated */
2184#define WF_CURRENT_CPU  0x40 /* Prefer to move the wakee to the current CPU. */
2185
2186#ifdef CONFIG_SMP
2187static_assert(WF_EXEC == SD_BALANCE_EXEC);
2188static_assert(WF_FORK == SD_BALANCE_FORK);
2189static_assert(WF_TTWU == SD_BALANCE_WAKE);
2190#endif
2191
2192/*
2193 * To aid in avoiding the subversion of "niceness" due to uneven distribution
2194 * of tasks with abnormal "nice" values across CPUs the contribution that
2195 * each task makes to its run queue's load is weighted according to its
2196 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
2197 * scaled version of the new time slice allocation that they receive on time
2198 * slice expiry etc.
2199 */
2200
2201#define WEIGHT_IDLEPRIO		3
2202#define WMULT_IDLEPRIO		1431655765
2203
2204extern const int		sched_prio_to_weight[40];
2205extern const u32		sched_prio_to_wmult[40];
2206
2207/*
2208 * {de,en}queue flags:
2209 *
2210 * DEQUEUE_SLEEP  - task is no longer runnable
2211 * ENQUEUE_WAKEUP - task just became runnable
2212 *
2213 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
2214 *                are in a known state which allows modification. Such pairs
2215 *                should preserve as much state as possible.
2216 *
2217 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
2218 *        in the runqueue.
2219 *
2220 * NOCLOCK - skip the update_rq_clock() (avoids double updates)
2221 *
2222 * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE)
2223 *
2224 * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
2225 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
2226 * ENQUEUE_MIGRATED  - the task was migrated during wakeup
2227 *
2228 */
2229
2230#define DEQUEUE_SLEEP		0x01
2231#define DEQUEUE_SAVE		0x02 /* Matches ENQUEUE_RESTORE */
2232#define DEQUEUE_MOVE		0x04 /* Matches ENQUEUE_MOVE */
2233#define DEQUEUE_NOCLOCK		0x08 /* Matches ENQUEUE_NOCLOCK */
2234#define DEQUEUE_MIGRATING	0x100 /* Matches ENQUEUE_MIGRATING */
2235
2236#define ENQUEUE_WAKEUP		0x01
2237#define ENQUEUE_RESTORE		0x02
2238#define ENQUEUE_MOVE		0x04
2239#define ENQUEUE_NOCLOCK		0x08
2240
2241#define ENQUEUE_HEAD		0x10
2242#define ENQUEUE_REPLENISH	0x20
2243#ifdef CONFIG_SMP
2244#define ENQUEUE_MIGRATED	0x40
2245#else
2246#define ENQUEUE_MIGRATED	0x00
2247#endif
2248#define ENQUEUE_INITIAL		0x80
2249#define ENQUEUE_MIGRATING	0x100
2250
2251#define RETRY_TASK		((void *)-1UL)
2252
2253struct affinity_context {
2254	const struct cpumask *new_mask;
2255	struct cpumask *user_mask;
2256	unsigned int flags;
2257};
2258
2259extern s64 update_curr_common(struct rq *rq);
2260
2261struct sched_class {
2262
2263#ifdef CONFIG_UCLAMP_TASK
2264	int uclamp_enabled;
2265#endif
2266
2267	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2268	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2269	void (*yield_task)   (struct rq *rq);
2270	bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2271
2272	void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
2273
2274	struct task_struct *(*pick_next_task)(struct rq *rq);
2275
2276	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2277	void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2278
2279#ifdef CONFIG_SMP
2280	int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2281	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
2282
2283	struct task_struct * (*pick_task)(struct rq *rq);
2284
2285	void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
2286
2287	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2288
2289	void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
2290
2291	void (*rq_online)(struct rq *rq);
2292	void (*rq_offline)(struct rq *rq);
2293
2294	struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2295#endif
2296
2297	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2298	void (*task_fork)(struct task_struct *p);
2299	void (*task_dead)(struct task_struct *p);
2300
2301	/*
2302	 * The switched_from() call is allowed to drop rq->lock, therefore we
2303	 * cannot assume the switched_from/switched_to pair is serialized by
2304	 * rq->lock. They are however serialized by p->pi_lock.
2305	 */
2306	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2307	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
2308	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2309			      int oldprio);
2310
2311	unsigned int (*get_rr_interval)(struct rq *rq,
2312					struct task_struct *task);
2313
2314	void (*update_curr)(struct rq *rq);
2315
2316#ifdef CONFIG_FAIR_GROUP_SCHED
2317	void (*task_change_group)(struct task_struct *p);
2318#endif
2319
2320#ifdef CONFIG_SCHED_CORE
2321	int (*task_is_throttled)(struct task_struct *p, int cpu);
2322#endif
2323};
2324
2325static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
2326{
2327	WARN_ON_ONCE(rq->curr != prev);
2328	prev->sched_class->put_prev_task(rq, prev);
2329}
2330
2331static inline void set_next_task(struct rq *rq, struct task_struct *next)
2332{
2333	next->sched_class->set_next_task(rq, next, false);
2334}
2335
2336
2337/*
2338 * Helper to define a sched_class instance; each one is placed in a separate
2339 * section which is ordered by the linker script:
2340 *
2341 *   include/asm-generic/vmlinux.lds.h
2342 *
2343 * *CAREFUL* they are laid out in *REVERSE* order!!!
2344 *
2345 * Also enforce alignment on the instance, not the type, to guarantee layout.
2346 */
2347#define DEFINE_SCHED_CLASS(name) \
2348const struct sched_class name##_sched_class \
2349	__aligned(__alignof__(struct sched_class)) \
2350	__section("__" #name "_sched_class")
2351
2352/* Defined in include/asm-generic/vmlinux.lds.h */
2353extern struct sched_class __sched_class_highest[];
2354extern struct sched_class __sched_class_lowest[];
2355
2356#define for_class_range(class, _from, _to) \
2357	for (class = (_from); class < (_to); class++)
2358
2359#define for_each_class(class) \
2360	for_class_range(class, __sched_class_highest, __sched_class_lowest)
2361
2362#define sched_class_above(_a, _b)	((_a) < (_b))
2363
2364extern const struct sched_class stop_sched_class;
2365extern const struct sched_class dl_sched_class;
2366extern const struct sched_class rt_sched_class;
2367extern const struct sched_class fair_sched_class;
2368extern const struct sched_class idle_sched_class;
2369
2370static inline bool sched_stop_runnable(struct rq *rq)
2371{
2372	return rq->stop && task_on_rq_queued(rq->stop);
2373}
2374
2375static inline bool sched_dl_runnable(struct rq *rq)
2376{
2377	return rq->dl.dl_nr_running > 0;
2378}
2379
2380static inline bool sched_rt_runnable(struct rq *rq)
2381{
2382	return rq->rt.rt_queued > 0;
2383}
2384
2385static inline bool sched_fair_runnable(struct rq *rq)
2386{
2387	return rq->cfs.nr_running > 0;
2388}
2389
2390extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2391extern struct task_struct *pick_next_task_idle(struct rq *rq);
2392
2393#define SCA_CHECK		0x01
2394#define SCA_MIGRATE_DISABLE	0x02
2395#define SCA_MIGRATE_ENABLE	0x04
2396#define SCA_USER		0x08
2397
2398#ifdef CONFIG_SMP
2399
2400extern void update_group_capacity(struct sched_domain *sd, int cpu);
2401
2402extern void trigger_load_balance(struct rq *rq);
2403
2404extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
2405
2406static inline struct task_struct *get_push_task(struct rq *rq)
2407{
2408	struct task_struct *p = rq->curr;
2409
2410	lockdep_assert_rq_held(rq);
2411
2412	if (rq->push_busy)
2413		return NULL;
2414
2415	if (p->nr_cpus_allowed == 1)
2416		return NULL;
2417
2418	if (p->migration_disabled)
2419		return NULL;
2420
2421	rq->push_busy = true;
2422	return get_task_struct(p);
2423}
2424
2425extern int push_cpu_stop(void *arg);
2426
2427#endif
2428
2429#ifdef CONFIG_CPU_IDLE
2430static inline void idle_set_state(struct rq *rq,
2431				  struct cpuidle_state *idle_state)
2432{
2433	rq->idle_state = idle_state;
2434}
2435
2436static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2437{
2438	SCHED_WARN_ON(!rcu_read_lock_held());
2439
2440	return rq->idle_state;
2441}
2442#else
2443static inline void idle_set_state(struct rq *rq,
2444				  struct cpuidle_state *idle_state)
2445{
2446}
2447
2448static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2449{
2450	return NULL;
2451}
2452#endif
2453
2454extern void schedule_idle(void);
2455asmlinkage void schedule_user(void);
2456
2457extern void sysrq_sched_debug_show(void);
2458extern void sched_init_granularity(void);
2459extern void update_max_interval(void);
2460
2461extern void init_sched_dl_class(void);
2462extern void init_sched_rt_class(void);
2463extern void init_sched_fair_class(void);
2464
2465extern void reweight_task(struct task_struct *p, int prio);
2466
2467extern void resched_curr(struct rq *rq);
2468extern void resched_cpu(int cpu);
2469
2470extern struct rt_bandwidth def_rt_bandwidth;
2471extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
2472extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
2473
2474extern void init_dl_entity(struct sched_dl_entity *dl_se);
2475
2476#define BW_SHIFT		20
2477#define BW_UNIT			(1 << BW_SHIFT)
2478#define RATIO_SHIFT		8
2479#define MAX_BW_BITS		(64 - BW_SHIFT)
2480#define MAX_BW			((1ULL << MAX_BW_BITS) - 1)
2481unsigned long to_ratio(u64 period, u64 runtime);
2482
2483extern void init_entity_runnable_average(struct sched_entity *se);
2484extern void post_init_entity_util_avg(struct task_struct *p);
2485
2486#ifdef CONFIG_NO_HZ_FULL
2487extern bool sched_can_stop_tick(struct rq *rq);
2488extern int __init sched_tick_offload_init(void);
2489
2490/*
2491 * Tick may be needed by tasks in the runqueue depending on their policy and
2492 * requirements. If tick is needed, lets send the target an IPI to kick it out of
2493 * nohz mode if necessary.
2494 */
2495static inline void sched_update_tick_dependency(struct rq *rq)
2496{
2497	int cpu = cpu_of(rq);
2498
2499	if (!tick_nohz_full_cpu(cpu))
2500		return;
2501
2502	if (sched_can_stop_tick(rq))
2503		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
2504	else
2505		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
2506}
2507#else
2508static inline int sched_tick_offload_init(void) { return 0; }
2509static inline void sched_update_tick_dependency(struct rq *rq) { }
2510#endif
2511
2512static inline void add_nr_running(struct rq *rq, unsigned count)
2513{
2514	unsigned prev_nr = rq->nr_running;
2515
2516	rq->nr_running = prev_nr + count;
2517	if (trace_sched_update_nr_running_tp_enabled()) {
2518		call_trace_sched_update_nr_running(rq, count);
2519	}
2520
2521#ifdef CONFIG_SMP
2522	if (prev_nr < 2 && rq->nr_running >= 2) {
2523		if (!READ_ONCE(rq->rd->overload))
2524			WRITE_ONCE(rq->rd->overload, 1);
2525	}
2526#endif
2527
2528	sched_update_tick_dependency(rq);
2529}
2530
2531static inline void sub_nr_running(struct rq *rq, unsigned count)
2532{
2533	rq->nr_running -= count;
2534	if (trace_sched_update_nr_running_tp_enabled()) {
2535		call_trace_sched_update_nr_running(rq, -count);
2536	}
2537
2538	/* Check if we still need preemption */
2539	sched_update_tick_dependency(rq);
2540}
2541
2542extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2543extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2544
2545extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
2546
2547#ifdef CONFIG_PREEMPT_RT
2548#define SCHED_NR_MIGRATE_BREAK 8
2549#else
2550#define SCHED_NR_MIGRATE_BREAK 32
2551#endif
2552
2553extern const_debug unsigned int sysctl_sched_nr_migrate;
2554extern const_debug unsigned int sysctl_sched_migration_cost;
2555
2556extern unsigned int sysctl_sched_base_slice;
2557
2558#ifdef CONFIG_SCHED_DEBUG
2559extern int sysctl_resched_latency_warn_ms;
2560extern int sysctl_resched_latency_warn_once;
2561
2562extern unsigned int sysctl_sched_tunable_scaling;
2563
2564extern unsigned int sysctl_numa_balancing_scan_delay;
2565extern unsigned int sysctl_numa_balancing_scan_period_min;
2566extern unsigned int sysctl_numa_balancing_scan_period_max;
2567extern unsigned int sysctl_numa_balancing_scan_size;
2568extern unsigned int sysctl_numa_balancing_hot_threshold;
2569#endif
2570
2571#ifdef CONFIG_SCHED_HRTICK
2572
2573/*
2574 * Use hrtick when:
2575 *  - enabled by features
2576 *  - hrtimer is actually high res
2577 */
2578static inline int hrtick_enabled(struct rq *rq)
2579{
2580	if (!cpu_active(cpu_of(rq)))
2581		return 0;
2582	return hrtimer_is_hres_active(&rq->hrtick_timer);
2583}
2584
2585static inline int hrtick_enabled_fair(struct rq *rq)
2586{
2587	if (!sched_feat(HRTICK))
2588		return 0;
2589	return hrtick_enabled(rq);
2590}
2591
2592static inline int hrtick_enabled_dl(struct rq *rq)
2593{
2594	if (!sched_feat(HRTICK_DL))
2595		return 0;
2596	return hrtick_enabled(rq);
2597}
2598
2599void hrtick_start(struct rq *rq, u64 delay);
2600
2601#else
2602
2603static inline int hrtick_enabled_fair(struct rq *rq)
2604{
2605	return 0;
2606}
2607
2608static inline int hrtick_enabled_dl(struct rq *rq)
2609{
2610	return 0;
2611}
2612
2613static inline int hrtick_enabled(struct rq *rq)
2614{
2615	return 0;
2616}
2617
2618#endif /* CONFIG_SCHED_HRTICK */
2619
2620#ifndef arch_scale_freq_tick
2621static __always_inline
2622void arch_scale_freq_tick(void)
2623{
2624}
2625#endif
2626
2627#ifndef arch_scale_freq_capacity
2628/**
2629 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2630 * @cpu: the CPU in question.
2631 *
2632 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
2633 *
2634 *     f_curr
2635 *     ------ * SCHED_CAPACITY_SCALE
2636 *     f_max
2637 */
2638static __always_inline
2639unsigned long arch_scale_freq_capacity(int cpu)
2640{
2641	return SCHED_CAPACITY_SCALE;
2642}
2643#endif
2644
2645#ifdef CONFIG_SCHED_DEBUG
2646/*
2647 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to
2648 * acquire rq lock instead of rq_lock(). So at the end of these two functions
2649 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of
2650 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning.
2651 */
2652static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
2653{
2654	rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
2655	/* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */
2656#ifdef CONFIG_SMP
2657	rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
2658#endif
2659}
2660#else
2661static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {}
2662#endif
2663
2664#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...)		\
2665__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \
2666static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \
2667{ class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t;	\
2668  _lock; return _t; }
2669
2670#ifdef CONFIG_SMP
2671
2672static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
2673{
2674#ifdef CONFIG_SCHED_CORE
2675	/*
2676	 * In order to not have {0,2},{1,3} turn into into an AB-BA,
2677	 * order by core-id first and cpu-id second.
2678	 *
2679	 * Notably:
2680	 *
2681	 *	double_rq_lock(0,3); will take core-0, core-1 lock
2682	 *	double_rq_lock(1,2); will take core-1, core-0 lock
2683	 *
2684	 * when only cpu-id is considered.
2685	 */
2686	if (rq1->core->cpu < rq2->core->cpu)
2687		return true;
2688	if (rq1->core->cpu > rq2->core->cpu)
2689		return false;
2690
2691	/*
2692	 * __sched_core_flip() relies on SMT having cpu-id lock order.
2693	 */
2694#endif
2695	return rq1->cpu < rq2->cpu;
2696}
2697
2698extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2699
2700#ifdef CONFIG_PREEMPTION
2701
2702/*
2703 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2704 * way at the expense of forcing extra atomic operations in all
2705 * invocations.  This assures that the double_lock is acquired using the
2706 * same underlying policy as the spinlock_t on this architecture, which
2707 * reduces latency compared to the unfair variant below.  However, it
2708 * also adds more overhead and therefore may reduce throughput.
2709 */
2710static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2711	__releases(this_rq->lock)
2712	__acquires(busiest->lock)
2713	__acquires(this_rq->lock)
2714{
2715	raw_spin_rq_unlock(this_rq);
2716	double_rq_lock(this_rq, busiest);
2717
2718	return 1;
2719}
2720
2721#else
2722/*
2723 * Unfair double_lock_balance: Optimizes throughput at the expense of
2724 * latency by eliminating extra atomic operations when the locks are
2725 * already in proper order on entry.  This favors lower CPU-ids and will
2726 * grant the double lock to lower CPUs over higher ids under contention,
2727 * regardless of entry order into the function.
2728 */
2729static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2730	__releases(this_rq->lock)
2731	__acquires(busiest->lock)
2732	__acquires(this_rq->lock)
2733{
2734	if (__rq_lockp(this_rq) == __rq_lockp(busiest) ||
2735	    likely(raw_spin_rq_trylock(busiest))) {
2736		double_rq_clock_clear_update(this_rq, busiest);
2737		return 0;
2738	}
2739
2740	if (rq_order_less(this_rq, busiest)) {
2741		raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
2742		double_rq_clock_clear_update(this_rq, busiest);
2743		return 0;
2744	}
2745
2746	raw_spin_rq_unlock(this_rq);
2747	double_rq_lock(this_rq, busiest);
2748
2749	return 1;
2750}
2751
2752#endif /* CONFIG_PREEMPTION */
2753
2754/*
2755 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2756 */
2757static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2758{
2759	lockdep_assert_irqs_disabled();
2760
2761	return _double_lock_balance(this_rq, busiest);
2762}
2763
2764static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2765	__releases(busiest->lock)
2766{
2767	if (__rq_lockp(this_rq) != __rq_lockp(busiest))
2768		raw_spin_rq_unlock(busiest);
2769	lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
2770}
2771
2772static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2773{
2774	if (l1 > l2)
2775		swap(l1, l2);
2776
2777	spin_lock(l1);
2778	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2779}
2780
2781static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2782{
2783	if (l1 > l2)
2784		swap(l1, l2);
2785
2786	spin_lock_irq(l1);
2787	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2788}
2789
2790static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2791{
2792	if (l1 > l2)
2793		swap(l1, l2);
2794
2795	raw_spin_lock(l1);
2796	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2797}
2798
2799static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2800{
2801	raw_spin_unlock(l1);
2802	raw_spin_unlock(l2);
2803}
2804
2805DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t,
2806		    double_raw_lock(_T->lock, _T->lock2),
2807		    double_raw_unlock(_T->lock, _T->lock2))
2808
2809/*
2810 * double_rq_unlock - safely unlock two runqueues
2811 *
2812 * Note this does not restore interrupts like task_rq_unlock,
2813 * you need to do so manually after calling.
2814 */
2815static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2816	__releases(rq1->lock)
2817	__releases(rq2->lock)
2818{
2819	if (__rq_lockp(rq1) != __rq_lockp(rq2))
2820		raw_spin_rq_unlock(rq2);
2821	else
2822		__release(rq2->lock);
2823	raw_spin_rq_unlock(rq1);
2824}
2825
2826extern void set_rq_online (struct rq *rq);
2827extern void set_rq_offline(struct rq *rq);
2828extern bool sched_smp_initialized;
2829
2830#else /* CONFIG_SMP */
2831
2832/*
2833 * double_rq_lock - safely lock two runqueues
2834 *
2835 * Note this does not disable interrupts like task_rq_lock,
2836 * you need to do so manually before calling.
2837 */
2838static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2839	__acquires(rq1->lock)
2840	__acquires(rq2->lock)
2841{
2842	WARN_ON_ONCE(!irqs_disabled());
2843	WARN_ON_ONCE(rq1 != rq2);
2844	raw_spin_rq_lock(rq1);
2845	__acquire(rq2->lock);	/* Fake it out ;) */
2846	double_rq_clock_clear_update(rq1, rq2);
2847}
2848
2849/*
2850 * double_rq_unlock - safely unlock two runqueues
2851 *
2852 * Note this does not restore interrupts like task_rq_unlock,
2853 * you need to do so manually after calling.
2854 */
2855static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2856	__releases(rq1->lock)
2857	__releases(rq2->lock)
2858{
2859	WARN_ON_ONCE(rq1 != rq2);
2860	raw_spin_rq_unlock(rq1);
2861	__release(rq2->lock);
2862}
2863
2864#endif
2865
2866DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
2867		    double_rq_lock(_T->lock, _T->lock2),
2868		    double_rq_unlock(_T->lock, _T->lock2))
2869
2870extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq);
2871extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2872extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2873
2874#ifdef	CONFIG_SCHED_DEBUG
2875extern bool sched_debug_verbose;
2876
2877extern void print_cfs_stats(struct seq_file *m, int cpu);
2878extern void print_rt_stats(struct seq_file *m, int cpu);
2879extern void print_dl_stats(struct seq_file *m, int cpu);
2880extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2881extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2882extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2883
2884extern void resched_latency_warn(int cpu, u64 latency);
2885#ifdef CONFIG_NUMA_BALANCING
2886extern void
2887show_numa_stats(struct task_struct *p, struct seq_file *m);
2888extern void
2889print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2890	unsigned long tpf, unsigned long gsf, unsigned long gpf);
2891#endif /* CONFIG_NUMA_BALANCING */
2892#else
2893static inline void resched_latency_warn(int cpu, u64 latency) {}
2894#endif /* CONFIG_SCHED_DEBUG */
2895
2896extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2897extern void init_rt_rq(struct rt_rq *rt_rq);
2898extern void init_dl_rq(struct dl_rq *dl_rq);
2899
2900extern void cfs_bandwidth_usage_inc(void);
2901extern void cfs_bandwidth_usage_dec(void);
2902
2903#ifdef CONFIG_NO_HZ_COMMON
2904#define NOHZ_BALANCE_KICK_BIT	0
2905#define NOHZ_STATS_KICK_BIT	1
2906#define NOHZ_NEWILB_KICK_BIT	2
2907#define NOHZ_NEXT_KICK_BIT	3
2908
2909/* Run rebalance_domains() */
2910#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
2911/* Update blocked load */
2912#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
2913/* Update blocked load when entering idle */
2914#define NOHZ_NEWILB_KICK	BIT(NOHZ_NEWILB_KICK_BIT)
2915/* Update nohz.next_balance */
2916#define NOHZ_NEXT_KICK		BIT(NOHZ_NEXT_KICK_BIT)
2917
2918#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
2919
2920#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
2921
2922extern void nohz_balance_exit_idle(struct rq *rq);
2923#else
2924static inline void nohz_balance_exit_idle(struct rq *rq) { }
2925#endif
2926
2927#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
2928extern void nohz_run_idle_balance(int cpu);
2929#else
2930static inline void nohz_run_idle_balance(int cpu) { }
2931#endif
2932
2933#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2934struct irqtime {
2935	u64			total;
2936	u64			tick_delta;
2937	u64			irq_start_time;
2938	struct u64_stats_sync	sync;
2939};
2940
2941DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2942
2943/*
2944 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2945 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime
2946 * and never move forward.
2947 */
2948static inline u64 irq_time_read(int cpu)
2949{
2950	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2951	unsigned int seq;
2952	u64 total;
2953
2954	do {
2955		seq = __u64_stats_fetch_begin(&irqtime->sync);
2956		total = irqtime->total;
2957	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2958
2959	return total;
2960}
2961#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2962
2963#ifdef CONFIG_CPU_FREQ
2964DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2965
2966/**
2967 * cpufreq_update_util - Take a note about CPU utilization changes.
2968 * @rq: Runqueue to carry out the update for.
2969 * @flags: Update reason flags.
2970 *
2971 * This function is called by the scheduler on the CPU whose utilization is
2972 * being updated.
2973 *
2974 * It can only be called from RCU-sched read-side critical sections.
2975 *
2976 * The way cpufreq is currently arranged requires it to evaluate the CPU
2977 * performance state (frequency/voltage) on a regular basis to prevent it from
2978 * being stuck in a completely inadequate performance level for too long.
2979 * That is not guaranteed to happen if the updates are only triggered from CFS
2980 * and DL, though, because they may not be coming in if only RT tasks are
2981 * active all the time (or there are RT tasks only).
2982 *
2983 * As a workaround for that issue, this function is called periodically by the
2984 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2985 * but that really is a band-aid.  Going forward it should be replaced with
2986 * solutions targeted more specifically at RT tasks.
2987 */
2988static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2989{
2990	struct update_util_data *data;
2991
2992	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2993						  cpu_of(rq)));
2994	if (data)
2995		data->func(data, rq_clock(rq), flags);
2996}
2997#else
2998static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2999#endif /* CONFIG_CPU_FREQ */
3000
3001#ifdef arch_scale_freq_capacity
3002# ifndef arch_scale_freq_invariant
3003#  define arch_scale_freq_invariant()	true
3004# endif
3005#else
3006# define arch_scale_freq_invariant()	false
3007#endif
3008
3009#ifdef CONFIG_SMP
3010unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
3011				 unsigned long *min,
3012				 unsigned long *max);
3013
3014unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
3015				 unsigned long min,
3016				 unsigned long max);
3017
3018
3019/*
3020 * Verify the fitness of task @p to run on @cpu taking into account the
3021 * CPU original capacity and the runtime/deadline ratio of the task.
3022 *
3023 * The function will return true if the original capacity of @cpu is
3024 * greater than or equal to task's deadline density right shifted by
3025 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise.
3026 */
3027static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
3028{
3029	unsigned long cap = arch_scale_cpu_capacity(cpu);
3030
3031	return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT);
3032}
3033
3034static inline unsigned long cpu_bw_dl(struct rq *rq)
3035{
3036	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
3037}
3038
3039static inline unsigned long cpu_util_dl(struct rq *rq)
3040{
3041	return READ_ONCE(rq->avg_dl.util_avg);
3042}
3043
3044
3045extern unsigned long cpu_util_cfs(int cpu);
3046extern unsigned long cpu_util_cfs_boost(int cpu);
3047
3048static inline unsigned long cpu_util_rt(struct rq *rq)
3049{
3050	return READ_ONCE(rq->avg_rt.util_avg);
3051}
3052#endif
3053
3054#ifdef CONFIG_UCLAMP_TASK
3055unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
3056
3057static inline unsigned long uclamp_rq_get(struct rq *rq,
3058					  enum uclamp_id clamp_id)
3059{
3060	return READ_ONCE(rq->uclamp[clamp_id].value);
3061}
3062
3063static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
3064				 unsigned int value)
3065{
3066	WRITE_ONCE(rq->uclamp[clamp_id].value, value);
3067}
3068
3069static inline bool uclamp_rq_is_idle(struct rq *rq)
3070{
3071	return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
3072}
3073
3074/* Is the rq being capped/throttled by uclamp_max? */
3075static inline bool uclamp_rq_is_capped(struct rq *rq)
3076{
3077	unsigned long rq_util;
3078	unsigned long max_util;
3079
3080	if (!static_branch_likely(&sched_uclamp_used))
3081		return false;
3082
3083	rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
3084	max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
3085
3086	return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
3087}
3088
3089/*
3090 * When uclamp is compiled in, the aggregation at rq level is 'turned off'
3091 * by default in the fast path and only gets turned on once userspace performs
3092 * an operation that requires it.
3093 *
3094 * Returns true if userspace opted-in to use uclamp and aggregation at rq level
3095 * hence is active.
3096 */
3097static inline bool uclamp_is_used(void)
3098{
3099	return static_branch_likely(&sched_uclamp_used);
3100}
3101#else /* CONFIG_UCLAMP_TASK */
3102static inline unsigned long uclamp_eff_value(struct task_struct *p,
3103					     enum uclamp_id clamp_id)
3104{
3105	if (clamp_id == UCLAMP_MIN)
3106		return 0;
3107
3108	return SCHED_CAPACITY_SCALE;
3109}
3110
3111static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
3112
3113static inline bool uclamp_is_used(void)
3114{
3115	return false;
3116}
3117
3118static inline unsigned long uclamp_rq_get(struct rq *rq,
3119					  enum uclamp_id clamp_id)
3120{
3121	if (clamp_id == UCLAMP_MIN)
3122		return 0;
3123
3124	return SCHED_CAPACITY_SCALE;
3125}
3126
3127static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
3128				 unsigned int value)
3129{
3130}
3131
3132static inline bool uclamp_rq_is_idle(struct rq *rq)
3133{
3134	return false;
3135}
3136#endif /* CONFIG_UCLAMP_TASK */
3137
3138#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
3139static inline unsigned long cpu_util_irq(struct rq *rq)
3140{
3141	return READ_ONCE(rq->avg_irq.util_avg);
3142}
3143
3144static inline
3145unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
3146{
3147	util *= (max - irq);
3148	util /= max;
3149
3150	return util;
3151
3152}
3153#else
3154static inline unsigned long cpu_util_irq(struct rq *rq)
3155{
3156	return 0;
3157}
3158
3159static inline
3160unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
3161{
3162	return util;
3163}
3164#endif
3165
3166#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
3167
3168#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
3169
3170DECLARE_STATIC_KEY_FALSE(sched_energy_present);
3171
3172static inline bool sched_energy_enabled(void)
3173{
3174	return static_branch_unlikely(&sched_energy_present);
3175}
3176
3177extern struct cpufreq_governor schedutil_gov;
3178
3179#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
3180
3181#define perf_domain_span(pd) NULL
3182static inline bool sched_energy_enabled(void) { return false; }
3183
3184#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
3185
3186#ifdef CONFIG_MEMBARRIER
3187/*
3188 * The scheduler provides memory barriers required by membarrier between:
3189 * - prior user-space memory accesses and store to rq->membarrier_state,
3190 * - store to rq->membarrier_state and following user-space memory accesses.
3191 * In the same way it provides those guarantees around store to rq->curr.
3192 */
3193static inline void membarrier_switch_mm(struct rq *rq,
3194					struct mm_struct *prev_mm,
3195					struct mm_struct *next_mm)
3196{
3197	int membarrier_state;
3198
3199	if (prev_mm == next_mm)
3200		return;
3201
3202	membarrier_state = atomic_read(&next_mm->membarrier_state);
3203	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
3204		return;
3205
3206	WRITE_ONCE(rq->membarrier_state, membarrier_state);
3207}
3208#else
3209static inline void membarrier_switch_mm(struct rq *rq,
3210					struct mm_struct *prev_mm,
3211					struct mm_struct *next_mm)
3212{
3213}
3214#endif
3215
3216#ifdef CONFIG_SMP
3217static inline bool is_per_cpu_kthread(struct task_struct *p)
3218{
3219	if (!(p->flags & PF_KTHREAD))
3220		return false;
3221
3222	if (p->nr_cpus_allowed != 1)
3223		return false;
3224
3225	return true;
3226}
3227#endif
3228
3229extern void swake_up_all_locked(struct swait_queue_head *q);
3230extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
3231
3232extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
3233
3234#ifdef CONFIG_PREEMPT_DYNAMIC
3235extern int preempt_dynamic_mode;
3236extern int sched_dynamic_mode(const char *str);
3237extern void sched_dynamic_update(int mode);
3238#endif
3239
3240#ifdef CONFIG_SCHED_MM_CID
3241
3242#define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
3243#define MM_CID_SCAN_DELAY	100			/* 100ms */
3244
3245extern raw_spinlock_t cid_lock;
3246extern int use_cid_lock;
3247
3248extern void sched_mm_cid_migrate_from(struct task_struct *t);
3249extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
3250extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
3251extern void init_sched_mm_cid(struct task_struct *t);
3252
3253static inline void __mm_cid_put(struct mm_struct *mm, int cid)
3254{
3255	if (cid < 0)
3256		return;
3257	cpumask_clear_cpu(cid, mm_cidmask(mm));
3258}
3259
3260/*
3261 * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
3262 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
3263 * be held to transition to other states.
3264 *
3265 * State transitions synchronized with cmpxchg or try_cmpxchg need to be
3266 * consistent across cpus, which prevents use of this_cpu_cmpxchg.
3267 */
3268static inline void mm_cid_put_lazy(struct task_struct *t)
3269{
3270	struct mm_struct *mm = t->mm;
3271	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
3272	int cid;
3273
3274	lockdep_assert_irqs_disabled();
3275	cid = __this_cpu_read(pcpu_cid->cid);
3276	if (!mm_cid_is_lazy_put(cid) ||
3277	    !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
3278		return;
3279	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
3280}
3281
3282static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
3283{
3284	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
3285	int cid, res;
3286
3287	lockdep_assert_irqs_disabled();
3288	cid = __this_cpu_read(pcpu_cid->cid);
3289	for (;;) {
3290		if (mm_cid_is_unset(cid))
3291			return MM_CID_UNSET;
3292		/*
3293		 * Attempt transition from valid or lazy-put to unset.
3294		 */
3295		res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET);
3296		if (res == cid)
3297			break;
3298		cid = res;
3299	}
3300	return cid;
3301}
3302
3303static inline void mm_cid_put(struct mm_struct *mm)
3304{
3305	int cid;
3306
3307	lockdep_assert_irqs_disabled();
3308	cid = mm_cid_pcpu_unset(mm);
3309	if (cid == MM_CID_UNSET)
3310		return;
3311	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
3312}
3313
3314static inline int __mm_cid_try_get(struct mm_struct *mm)
3315{
3316	struct cpumask *cpumask;
3317	int cid;
3318
3319	cpumask = mm_cidmask(mm);
3320	/*
3321	 * Retry finding first zero bit if the mask is temporarily
3322	 * filled. This only happens during concurrent remote-clear
3323	 * which owns a cid without holding a rq lock.
3324	 */
3325	for (;;) {
3326		cid = cpumask_first_zero(cpumask);
3327		if (cid < nr_cpu_ids)
3328			break;
3329		cpu_relax();
3330	}
3331	if (cpumask_test_and_set_cpu(cid, cpumask))
3332		return -1;
3333	return cid;
3334}
3335
3336/*
3337 * Save a snapshot of the current runqueue time of this cpu
3338 * with the per-cpu cid value, allowing to estimate how recently it was used.
3339 */
3340static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
3341{
3342	struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
3343
3344	lockdep_assert_rq_held(rq);
3345	WRITE_ONCE(pcpu_cid->time, rq->clock);
3346}
3347
3348static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
3349{
3350	int cid;
3351
3352	/*
3353	 * All allocations (even those using the cid_lock) are lock-free. If
3354	 * use_cid_lock is set, hold the cid_lock to perform cid allocation to
3355	 * guarantee forward progress.
3356	 */
3357	if (!READ_ONCE(use_cid_lock)) {
3358		cid = __mm_cid_try_get(mm);
3359		if (cid >= 0)
3360			goto end;
3361		raw_spin_lock(&cid_lock);
3362	} else {
3363		raw_spin_lock(&cid_lock);
3364		cid = __mm_cid_try_get(mm);
3365		if (cid >= 0)
3366			goto unlock;
3367	}
3368
3369	/*
3370	 * cid concurrently allocated. Retry while forcing following
3371	 * allocations to use the cid_lock to ensure forward progress.
3372	 */
3373	WRITE_ONCE(use_cid_lock, 1);
3374	/*
3375	 * Set use_cid_lock before allocation. Only care about program order
3376	 * because this is only required for forward progress.
3377	 */
3378	barrier();
3379	/*
3380	 * Retry until it succeeds. It is guaranteed to eventually succeed once
3381	 * all newcoming allocations observe the use_cid_lock flag set.
3382	 */
3383	do {
3384		cid = __mm_cid_try_get(mm);
3385		cpu_relax();
3386	} while (cid < 0);
3387	/*
3388	 * Allocate before clearing use_cid_lock. Only care about
3389	 * program order because this is for forward progress.
3390	 */
3391	barrier();
3392	WRITE_ONCE(use_cid_lock, 0);
3393unlock:
3394	raw_spin_unlock(&cid_lock);
3395end:
3396	mm_cid_snapshot_time(rq, mm);
3397	return cid;
3398}
3399
3400static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
3401{
3402	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
3403	struct cpumask *cpumask;
3404	int cid;
3405
3406	lockdep_assert_rq_held(rq);
3407	cpumask = mm_cidmask(mm);
3408	cid = __this_cpu_read(pcpu_cid->cid);
3409	if (mm_cid_is_valid(cid)) {
3410		mm_cid_snapshot_time(rq, mm);
3411		return cid;
3412	}
3413	if (mm_cid_is_lazy_put(cid)) {
3414		if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
3415			__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
3416	}
3417	cid = __mm_cid_get(rq, mm);
3418	__this_cpu_write(pcpu_cid->cid, cid);
3419	return cid;
3420}
3421
3422static inline void switch_mm_cid(struct rq *rq,
3423				 struct task_struct *prev,
3424				 struct task_struct *next)
3425{
3426	/*
3427	 * Provide a memory barrier between rq->curr store and load of
3428	 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
3429	 *
3430	 * Should be adapted if context_switch() is modified.
3431	 */
3432	if (!next->mm) {                                // to kernel
3433		/*
3434		 * user -> kernel transition does not guarantee a barrier, but
3435		 * we can use the fact that it performs an atomic operation in
3436		 * mmgrab().
3437		 */
3438		if (prev->mm)                           // from user
3439			smp_mb__after_mmgrab();
3440		/*
3441		 * kernel -> kernel transition does not change rq->curr->mm
3442		 * state. It stays NULL.
3443		 */
3444	} else {                                        // to user
3445		/*
3446		 * kernel -> user transition does not provide a barrier
3447		 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
3448		 * Provide it here.
3449		 */
3450		if (!prev->mm) {                        // from kernel
3451			smp_mb();
3452		} else {				// from user
3453			/*
3454			 * user->user transition relies on an implicit
3455			 * memory barrier in switch_mm() when
3456			 * current->mm changes. If the architecture
3457			 * switch_mm() does not have an implicit memory
3458			 * barrier, it is emitted here.  If current->mm
3459			 * is unchanged, no barrier is needed.
3460			 */
3461			smp_mb__after_switch_mm();
3462		}
3463	}
3464	if (prev->mm_cid_active) {
3465		mm_cid_snapshot_time(rq, prev->mm);
3466		mm_cid_put_lazy(prev);
3467		prev->mm_cid = -1;
3468	}
3469	if (next->mm_cid_active)
3470		next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
3471}
3472
3473#else
3474static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
3475static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
3476static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
3477static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
3478static inline void init_sched_mm_cid(struct task_struct *t) { }
3479#endif
3480
3481extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
3482extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
3483
3484#endif /* _KERNEL_SCHED_SCHED_H */
3485