1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic entry points for the idle threads and
4 * implementation of the idle task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
7 *        tasks which are handled in sched/fair.c )
8 */
9
10/* Linker adds these: start and end of __cpuidle functions */
11extern char __cpuidle_text_start[], __cpuidle_text_end[];
12
13/**
14 * sched_idle_set_state - Record idle state for the current CPU.
15 * @idle_state: State to record.
16 */
17void sched_idle_set_state(struct cpuidle_state *idle_state)
18{
19	idle_set_state(this_rq(), idle_state);
20}
21
22static int __read_mostly cpu_idle_force_poll;
23
24void cpu_idle_poll_ctrl(bool enable)
25{
26	if (enable) {
27		cpu_idle_force_poll++;
28	} else {
29		cpu_idle_force_poll--;
30		WARN_ON_ONCE(cpu_idle_force_poll < 0);
31	}
32}
33
34#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
35static int __init cpu_idle_poll_setup(char *__unused)
36{
37	cpu_idle_force_poll = 1;
38
39	return 1;
40}
41__setup("nohlt", cpu_idle_poll_setup);
42
43static int __init cpu_idle_nopoll_setup(char *__unused)
44{
45	cpu_idle_force_poll = 0;
46
47	return 1;
48}
49__setup("hlt", cpu_idle_nopoll_setup);
50#endif
51
52static noinline int __cpuidle cpu_idle_poll(void)
53{
54	instrumentation_begin();
55	trace_cpu_idle(0, smp_processor_id());
56	stop_critical_timings();
57	ct_cpuidle_enter();
58
59	raw_local_irq_enable();
60	while (!tif_need_resched() &&
61	       (cpu_idle_force_poll || tick_check_broadcast_expired()))
62		cpu_relax();
63	raw_local_irq_disable();
64
65	ct_cpuidle_exit();
66	start_critical_timings();
67	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
68	local_irq_enable();
69	instrumentation_end();
70
71	return 1;
72}
73
74/* Weak implementations for optional arch specific functions */
75void __weak arch_cpu_idle_prepare(void) { }
76void __weak arch_cpu_idle_enter(void) { }
77void __weak arch_cpu_idle_exit(void) { }
78void __weak __noreturn arch_cpu_idle_dead(void) { while (1); }
79void __weak arch_cpu_idle(void)
80{
81	cpu_idle_force_poll = 1;
82}
83
84#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE
85DEFINE_STATIC_KEY_FALSE(arch_needs_tick_broadcast);
86
87static inline void cond_tick_broadcast_enter(void)
88{
89	if (static_branch_unlikely(&arch_needs_tick_broadcast))
90		tick_broadcast_enter();
91}
92
93static inline void cond_tick_broadcast_exit(void)
94{
95	if (static_branch_unlikely(&arch_needs_tick_broadcast))
96		tick_broadcast_exit();
97}
98#else
99static inline void cond_tick_broadcast_enter(void) { }
100static inline void cond_tick_broadcast_exit(void) { }
101#endif
102
103/**
104 * default_idle_call - Default CPU idle routine.
105 *
106 * To use when the cpuidle framework cannot be used.
107 */
108void __cpuidle default_idle_call(void)
109{
110	instrumentation_begin();
111	if (!current_clr_polling_and_test()) {
112		cond_tick_broadcast_enter();
113		trace_cpu_idle(1, smp_processor_id());
114		stop_critical_timings();
115
116		ct_cpuidle_enter();
117		arch_cpu_idle();
118		ct_cpuidle_exit();
119
120		start_critical_timings();
121		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
122		cond_tick_broadcast_exit();
123	}
124	local_irq_enable();
125	instrumentation_end();
126}
127
128static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
129			       struct cpuidle_device *dev)
130{
131	if (current_clr_polling_and_test())
132		return -EBUSY;
133
134	return cpuidle_enter_s2idle(drv, dev);
135}
136
137static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
138		      int next_state)
139{
140	/*
141	 * The idle task must be scheduled, it is pointless to go to idle, just
142	 * update no idle residency and return.
143	 */
144	if (current_clr_polling_and_test()) {
145		dev->last_residency_ns = 0;
146		local_irq_enable();
147		return -EBUSY;
148	}
149
150	/*
151	 * Enter the idle state previously returned by the governor decision.
152	 * This function will block until an interrupt occurs and will take
153	 * care of re-enabling the local interrupts
154	 */
155	return cpuidle_enter(drv, dev, next_state);
156}
157
158/**
159 * cpuidle_idle_call - the main idle function
160 *
161 * NOTE: no locks or semaphores should be used here
162 *
163 * On architectures that support TIF_POLLING_NRFLAG, is called with polling
164 * set, and it returns with polling set.  If it ever stops polling, it
165 * must clear the polling bit.
166 */
167static void cpuidle_idle_call(void)
168{
169	struct cpuidle_device *dev = cpuidle_get_device();
170	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
171	int next_state, entered_state;
172
173	/*
174	 * Check if the idle task must be rescheduled. If it is the
175	 * case, exit the function after re-enabling the local irq.
176	 */
177	if (need_resched()) {
178		local_irq_enable();
179		return;
180	}
181
182	/*
183	 * The RCU framework needs to be told that we are entering an idle
184	 * section, so no more rcu read side critical sections and one more
185	 * step to the grace period
186	 */
187
188	if (cpuidle_not_available(drv, dev)) {
189		tick_nohz_idle_stop_tick();
190
191		default_idle_call();
192		goto exit_idle;
193	}
194
195	/*
196	 * Suspend-to-idle ("s2idle") is a system state in which all user space
197	 * has been frozen, all I/O devices have been suspended and the only
198	 * activity happens here and in interrupts (if any). In that case bypass
199	 * the cpuidle governor and go straight for the deepest idle state
200	 * available.  Possibly also suspend the local tick and the entire
201	 * timekeeping to prevent timer interrupts from kicking us out of idle
202	 * until a proper wakeup interrupt happens.
203	 */
204
205	if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
206		u64 max_latency_ns;
207
208		if (idle_should_enter_s2idle()) {
209
210			entered_state = call_cpuidle_s2idle(drv, dev);
211			if (entered_state > 0)
212				goto exit_idle;
213
214			max_latency_ns = U64_MAX;
215		} else {
216			max_latency_ns = dev->forced_idle_latency_limit_ns;
217		}
218
219		tick_nohz_idle_stop_tick();
220
221		next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
222		call_cpuidle(drv, dev, next_state);
223	} else {
224		bool stop_tick = true;
225
226		/*
227		 * Ask the cpuidle framework to choose a convenient idle state.
228		 */
229		next_state = cpuidle_select(drv, dev, &stop_tick);
230
231		if (stop_tick || tick_nohz_tick_stopped())
232			tick_nohz_idle_stop_tick();
233		else
234			tick_nohz_idle_retain_tick();
235
236		entered_state = call_cpuidle(drv, dev, next_state);
237		/*
238		 * Give the governor an opportunity to reflect on the outcome
239		 */
240		cpuidle_reflect(dev, entered_state);
241	}
242
243exit_idle:
244	__current_set_polling();
245
246	/*
247	 * It is up to the idle functions to reenable local interrupts
248	 */
249	if (WARN_ON_ONCE(irqs_disabled()))
250		local_irq_enable();
251}
252
253/*
254 * Generic idle loop implementation
255 *
256 * Called with polling cleared.
257 */
258static void do_idle(void)
259{
260	int cpu = smp_processor_id();
261
262	/*
263	 * Check if we need to update blocked load
264	 */
265	nohz_run_idle_balance(cpu);
266
267	/*
268	 * If the arch has a polling bit, we maintain an invariant:
269	 *
270	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
271	 * rq->idle). This means that, if rq->idle has the polling bit set,
272	 * then setting need_resched is guaranteed to cause the CPU to
273	 * reschedule.
274	 */
275
276	__current_set_polling();
277	tick_nohz_idle_enter();
278
279	while (!need_resched()) {
280		rmb();
281
282		/*
283		 * Interrupts shouldn't be re-enabled from that point on until
284		 * the CPU sleeping instruction is reached. Otherwise an interrupt
285		 * may fire and queue a timer that would be ignored until the CPU
286		 * wakes from the sleeping instruction. And testing need_resched()
287		 * doesn't tell about pending needed timer reprogram.
288		 *
289		 * Several cases to consider:
290		 *
291		 * - SLEEP-UNTIL-PENDING-INTERRUPT based instructions such as
292		 *   "wfi" or "mwait" are fine because they can be entered with
293		 *   interrupt disabled.
294		 *
295		 * - sti;mwait() couple is fine because the interrupts are
296		 *   re-enabled only upon the execution of mwait, leaving no gap
297		 *   in-between.
298		 *
299		 * - ROLLBACK based idle handlers with the sleeping instruction
300		 *   called with interrupts enabled are NOT fine. In this scheme
301		 *   when the interrupt detects it has interrupted an idle handler,
302		 *   it rolls back to its beginning which performs the
303		 *   need_resched() check before re-executing the sleeping
304		 *   instruction. This can leak a pending needed timer reprogram.
305		 *   If such a scheme is really mandatory due to the lack of an
306		 *   appropriate CPU sleeping instruction, then a FAST-FORWARD
307		 *   must instead be applied: when the interrupt detects it has
308		 *   interrupted an idle handler, it must resume to the end of
309		 *   this idle handler so that the generic idle loop is iterated
310		 *   again to reprogram the tick.
311		 */
312		local_irq_disable();
313
314		if (cpu_is_offline(cpu)) {
315			cpuhp_report_idle_dead();
316			arch_cpu_idle_dead();
317		}
318
319		arch_cpu_idle_enter();
320		rcu_nocb_flush_deferred_wakeup();
321
322		/*
323		 * In poll mode we reenable interrupts and spin. Also if we
324		 * detected in the wakeup from idle path that the tick
325		 * broadcast device expired for us, we don't want to go deep
326		 * idle as we know that the IPI is going to arrive right away.
327		 */
328		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
329			tick_nohz_idle_restart_tick();
330			cpu_idle_poll();
331		} else {
332			cpuidle_idle_call();
333		}
334		arch_cpu_idle_exit();
335	}
336
337	/*
338	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
339	 * be set, propagate it into PREEMPT_NEED_RESCHED.
340	 *
341	 * This is required because for polling idle loops we will not have had
342	 * an IPI to fold the state for us.
343	 */
344	preempt_set_need_resched();
345	tick_nohz_idle_exit();
346	__current_clr_polling();
347
348	/*
349	 * We promise to call sched_ttwu_pending() and reschedule if
350	 * need_resched() is set while polling is set. That means that clearing
351	 * polling needs to be visible before doing these things.
352	 */
353	smp_mb__after_atomic();
354
355	/*
356	 * RCU relies on this call to be done outside of an RCU read-side
357	 * critical section.
358	 */
359	flush_smp_call_function_queue();
360	schedule_idle();
361
362	if (unlikely(klp_patch_pending(current)))
363		klp_update_patch_state(current);
364}
365
366bool cpu_in_idle(unsigned long pc)
367{
368	return pc >= (unsigned long)__cpuidle_text_start &&
369		pc < (unsigned long)__cpuidle_text_end;
370}
371
372struct idle_timer {
373	struct hrtimer timer;
374	int done;
375};
376
377static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
378{
379	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
380
381	WRITE_ONCE(it->done, 1);
382	set_tsk_need_resched(current);
383
384	return HRTIMER_NORESTART;
385}
386
387void play_idle_precise(u64 duration_ns, u64 latency_ns)
388{
389	struct idle_timer it;
390
391	/*
392	 * Only FIFO tasks can disable the tick since they don't need the forced
393	 * preemption.
394	 */
395	WARN_ON_ONCE(current->policy != SCHED_FIFO);
396	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
397	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
398	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
399	WARN_ON_ONCE(!duration_ns);
400	WARN_ON_ONCE(current->mm);
401
402	rcu_sleep_check();
403	preempt_disable();
404	current->flags |= PF_IDLE;
405	cpuidle_use_deepest_state(latency_ns);
406
407	it.done = 0;
408	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
409	it.timer.function = idle_inject_timer_fn;
410	hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
411		      HRTIMER_MODE_REL_PINNED_HARD);
412
413	while (!READ_ONCE(it.done))
414		do_idle();
415
416	cpuidle_use_deepest_state(0);
417	current->flags &= ~PF_IDLE;
418
419	preempt_fold_need_resched();
420	preempt_enable();
421}
422EXPORT_SYMBOL_GPL(play_idle_precise);
423
424void cpu_startup_entry(enum cpuhp_state state)
425{
426	current->flags |= PF_IDLE;
427	arch_cpu_idle_prepare();
428	cpuhp_online_idle(state);
429	while (1)
430		do_idle();
431}
432
433/*
434 * idle-task scheduling class.
435 */
436
437#ifdef CONFIG_SMP
438static int
439select_task_rq_idle(struct task_struct *p, int cpu, int flags)
440{
441	return task_cpu(p); /* IDLE tasks as never migrated */
442}
443
444static int
445balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
446{
447	return WARN_ON_ONCE(1);
448}
449#endif
450
451/*
452 * Idle tasks are unconditionally rescheduled:
453 */
454static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
455{
456	resched_curr(rq);
457}
458
459static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
460{
461}
462
463static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
464{
465	update_idle_core(rq);
466	schedstat_inc(rq->sched_goidle);
467}
468
469#ifdef CONFIG_SMP
470static struct task_struct *pick_task_idle(struct rq *rq)
471{
472	return rq->idle;
473}
474#endif
475
476struct task_struct *pick_next_task_idle(struct rq *rq)
477{
478	struct task_struct *next = rq->idle;
479
480	set_next_task_idle(rq, next, true);
481
482	return next;
483}
484
485/*
486 * It is not legal to sleep in the idle task - print a warning
487 * message if some code attempts to do it:
488 */
489static void
490dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
491{
492	raw_spin_rq_unlock_irq(rq);
493	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
494	dump_stack();
495	raw_spin_rq_lock_irq(rq);
496}
497
498/*
499 * scheduler tick hitting a task of our scheduling class.
500 *
501 * NOTE: This function can be called remotely by the tick offload that
502 * goes along full dynticks. Therefore no local assumption can be made
503 * and everything must be accessed through the @rq and @curr passed in
504 * parameters.
505 */
506static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
507{
508}
509
510static void switched_to_idle(struct rq *rq, struct task_struct *p)
511{
512	BUG();
513}
514
515static void
516prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
517{
518	BUG();
519}
520
521static void update_curr_idle(struct rq *rq)
522{
523}
524
525/*
526 * Simple, special scheduling class for the per-CPU idle tasks:
527 */
528DEFINE_SCHED_CLASS(idle) = {
529
530	/* no enqueue/yield_task for idle tasks */
531
532	/* dequeue is not valid, we print a debug message there: */
533	.dequeue_task		= dequeue_task_idle,
534
535	.wakeup_preempt		= wakeup_preempt_idle,
536
537	.pick_next_task		= pick_next_task_idle,
538	.put_prev_task		= put_prev_task_idle,
539	.set_next_task          = set_next_task_idle,
540
541#ifdef CONFIG_SMP
542	.balance		= balance_idle,
543	.pick_task		= pick_task_idle,
544	.select_task_rq		= select_task_rq_idle,
545	.set_cpus_allowed	= set_cpus_allowed_common,
546#endif
547
548	.task_tick		= task_tick_idle,
549
550	.prio_changed		= prio_changed_idle,
551	.switched_to		= switched_to_idle,
552	.update_curr		= update_curr_idle,
553};
554