1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	sched_prim.h
60 *	Author:	David Golub
61 *
62 *	Scheduling primitive definitions file
63 *
64 */
65
66#ifndef	_KERN_SCHED_PRIM_H_
67#define _KERN_SCHED_PRIM_H_
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/kern_return.h>
72#include <kern/clock.h>
73#include <kern/kern_types.h>
74#include <kern/thread.h>
75#include <sys/cdefs.h>
76
77#ifdef	MACH_KERNEL_PRIVATE
78
79/* Initialization */
80extern void		sched_init(void);
81
82extern void		sched_startup(void);
83
84extern void		sched_timebase_init(void);
85
86/* Force a preemption point for a thread and wait for it to stop running */
87extern boolean_t	thread_stop(
88						thread_t	thread,
89						boolean_t	until_not_runnable);
90
91/* Release a previous stop request */
92extern void			thread_unstop(
93						thread_t	thread);
94
95/* Wait for a thread to stop running */
96extern void			thread_wait(
97						thread_t	thread,
98						boolean_t	until_not_runnable);
99
100/* Unblock thread on wake up */
101extern boolean_t	thread_unblock(
102						thread_t		thread,
103						wait_result_t	wresult);
104
105/* Unblock and dispatch thread */
106extern kern_return_t	thread_go(
107						 	thread_t		thread,
108							wait_result_t	wresult);
109
110/* Handle threads at context switch */
111extern void			thread_dispatch(
112						thread_t		old_thread,
113						thread_t		new_thread);
114
115/* Switch directly to a particular thread */
116extern int			thread_run(
117						thread_t			self,
118						thread_continue_t	continuation,
119						void				*parameter,
120						thread_t			new_thread);
121
122/* Resume thread with new stack */
123extern void			thread_continue(
124						thread_t		old_thread);
125
126/* Invoke continuation */
127extern void		call_continuation(
128					thread_continue_t	continuation,
129					void				*parameter,
130					wait_result_t		wresult);
131
132/* Set the current scheduled priority */
133extern void		set_sched_pri(
134					thread_t		thread,
135					int				priority);
136
137/* Set base priority of the specified thread */
138extern void		sched_set_thread_base_priority(
139					thread_t		thread,
140					int				priority);
141
142/* Set the thread to be categorized as 'background' */
143extern void             sched_set_thread_throttled(thread_t thread,
144                                                   boolean_t wants_throttle);
145
146/* Set the thread's true scheduling mode */
147extern void             sched_set_thread_mode(thread_t thread,
148                                              sched_mode_t mode);
149/* Demote the true scheduler mode */
150extern void             sched_thread_mode_demote(thread_t thread,
151                                                 uint32_t reason);
152/* Un-demote the true scheduler mode */
153extern void             sched_thread_mode_undemote(thread_t thread,
154                                                   uint32_t reason);
155
156/* Reset scheduled priority of thread */
157extern void		compute_priority(
158					thread_t		thread,
159					boolean_t		override_depress);
160
161/* Adjust scheduled priority of thread during execution */
162extern void		compute_my_priority(
163					thread_t		thread);
164
165/* Periodic scheduler activity */
166extern void		sched_init_thread(void (*)(void));
167
168/* Perform sched_tick housekeeping activities */
169extern boolean_t		can_update_priority(
170					thread_t		thread);
171
172extern void		update_priority(
173											thread_t		thread);
174
175extern void		lightweight_update_priority(
176								thread_t		thread);
177
178extern void		sched_traditional_quantum_expire(thread_t	thread);
179
180/* Idle processor thread */
181extern void		idle_thread(void);
182
183extern kern_return_t	idle_thread_create(
184							processor_t		processor);
185
186/* Continuation return from syscall */
187extern void     thread_syscall_return(
188                        kern_return_t   ret);
189
190/* Context switch */
191extern wait_result_t	thread_block_reason(
192							thread_continue_t	continuation,
193							void				*parameter,
194							ast_t				reason);
195
196/* Reschedule thread for execution */
197extern void		thread_setrun(
198					thread_t	thread,
199					integer_t	options);
200
201#define SCHED_TAILQ		1
202#define SCHED_HEADQ		2
203#define SCHED_PREEMPT	4
204
205extern processor_set_t	task_choose_pset(
206							task_t			task);
207
208/* Bind the current thread to a particular processor */
209extern processor_t		thread_bind(
210							processor_t		processor);
211
212/* Choose the best processor to run a thread */
213extern processor_t	choose_processor(
214									 processor_set_t		pset,
215									 processor_t			processor,
216									 thread_t			thread);
217
218/* Choose a thread from a processor's priority-based runq */
219extern thread_t choose_thread_from_runq(
220							  processor_t		processor,
221							  run_queue_t		runq,
222							  int				priority);
223
224
225extern void thread_quantum_init(
226								thread_t thread);
227
228extern void		run_queue_init(
229					run_queue_t		runq);
230
231extern thread_t	run_queue_dequeue(
232							  run_queue_t		runq,
233							  integer_t		options);
234
235extern boolean_t	run_queue_enqueue(
236							  run_queue_t		runq,
237							  thread_t			thread,
238							  integer_t		options);
239
240extern void	run_queue_remove(
241									 run_queue_t		runq,
242									 thread_t			thread);
243
244
245#if defined(CONFIG_SCHED_TIMESHARE_CORE)
246
247extern boolean_t        thread_update_add_thread(
248                                                 thread_t thread);
249extern void             thread_update_process_threads(void);
250extern boolean_t        runq_scan(
251                                  run_queue_t runq);
252
253void sched_traditional_timebase_init(void);
254void sched_traditional_maintenance_continue(void);
255boolean_t priority_is_urgent(
256                             int priority);
257uint32_t sched_traditional_initial_quantum_size(
258                                                thread_t thread);
259void sched_traditional_init(void);
260
261#endif /* CONFIG_SCHED_TIMESHARE_CORE */
262
263/* Remove thread from its run queue */
264extern boolean_t	thread_run_queue_remove(
265						thread_t	thread);
266
267extern void		thread_timer_expire(
268					void			*thread,
269					void			*p1);
270
271extern boolean_t	thread_eager_preemption(
272						thread_t thread);
273
274/* Fair Share routines */
275#if defined(CONFIG_SCHED_FAIRSHARE_CORE)
276void		sched_traditional_fairshare_init(void);
277
278int			sched_traditional_fairshare_runq_count(void);
279
280uint64_t	sched_traditional_fairshare_runq_stats_count_sum(void);
281
282void		sched_traditional_fairshare_enqueue(thread_t thread);
283
284thread_t	sched_traditional_fairshare_dequeue(void);
285
286boolean_t	sched_traditional_fairshare_queue_remove(thread_t thread);
287#endif /* CONFIG_SCHED_FAIRSHARE_CORE */
288
289#if defined(CONFIG_SCHED_GRRR)
290void		sched_grrr_fairshare_init(void);
291
292int			sched_grrr_fairshare_runq_count(void);
293
294uint64_t	sched_grrr_fairshare_runq_stats_count_sum(void);
295
296void		sched_grrr_fairshare_enqueue(thread_t thread);
297
298thread_t	sched_grrr_fairshare_dequeue(void);
299
300boolean_t	sched_grrr_fairshare_queue_remove(thread_t thread);
301#endif
302
303extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
304
305/* Set the maximum interrupt level for the thread */
306__private_extern__ wait_interrupt_t thread_interrupt_level(
307						wait_interrupt_t interruptible);
308
309__private_extern__ wait_result_t thread_mark_wait_locked(
310						thread_t		 thread,
311						wait_interrupt_t interruptible);
312
313/* Wake up locked thread directly, passing result */
314__private_extern__ kern_return_t clear_wait_internal(
315						thread_t		thread,
316						wait_result_t	result);
317
318extern void sched_stats_handle_csw(
319							processor_t processor,
320							int reasons,
321							int selfpri,
322							int otherpri);
323
324extern void sched_stats_handle_runq_change(
325									struct runq_stats *stats,
326									int old_count);
327
328
329
330#define	SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) 		\
331do { 								\
332	if (__builtin_expect(sched_stats_active, 0)) { 	\
333		sched_stats_handle_csw((processor), 		\
334				(reasons), (selfpri), (otherpri)); 	\
335	}							\
336} while (0)
337
338
339#define SCHED_STATS_RUNQ_CHANGE(stats, old_count)		\
340do { 								\
341	if (__builtin_expect(sched_stats_active, 0)) { 	\
342		sched_stats_handle_runq_change((stats), 	\
343								(old_count));		\
344	}							\
345} while (0)
346
347#define THREAD_URGENCY_NONE		0	/* indicates that there is no currently runnable */
348#define THREAD_URGENCY_BACKGROUND	1	/* indicates that the thread is marked as a "background" thread */
349#define THREAD_URGENCY_NORMAL		2	/* indicates that the thread is marked as a "normal" thread */
350#define THREAD_URGENCY_REAL_TIME	3	/* indicates that the thread is marked as a "real-time" or urgent thread */
351#define	THREAD_URGENCY_MAX		4	/* Marker */
352/* Returns the "urgency" of a thread (provided by scheduler) */
353extern int	thread_get_urgency(
354					thread_t	thread,
355    				   	uint64_t	*rt_period,
356					uint64_t	*rt_deadline);
357
358/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
359extern void	thread_tell_urgency(
360    					int		urgency,
361					uint64_t	rt_period,
362					uint64_t	rt_deadline,
363				    thread_t nthread);
364
365/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
366extern void	active_rt_threads(
367    					boolean_t	active);
368
369#endif /* MACH_KERNEL_PRIVATE */
370
371__BEGIN_DECLS
372
373#ifdef	XNU_KERNEL_PRIVATE
374
375extern boolean_t		assert_wait_possible(void);
376
377/* Toggles a global override to turn off CPU Throttling */
378#define CPU_THROTTLE_DISABLE	0
379#define CPU_THROTTLE_ENABLE	1
380extern void	sys_override_cpu_throttle(int flag);
381
382/*
383 ****************** Only exported until BSD stops using ********************
384 */
385
386/* Wake up thread directly, passing result */
387extern kern_return_t clear_wait(
388						thread_t		thread,
389						wait_result_t	result);
390
391/* Start thread running */
392extern void		thread_bootstrap_return(void);
393
394/* Return from exception (BSD-visible interface) */
395extern void		thread_exception_return(void) __dead2;
396
397#endif	/* XNU_KERNEL_PRIVATE */
398
399/* Context switch */
400extern wait_result_t	thread_block(
401							thread_continue_t	continuation);
402
403extern wait_result_t	thread_block_parameter(
404							thread_continue_t	continuation,
405							void				*parameter);
406
407/* Declare thread will wait on a particular event */
408extern wait_result_t	assert_wait(
409							event_t				event,
410							wait_interrupt_t	interruptible);
411
412/* Assert that the thread intends to wait with a timeout */
413extern wait_result_t	assert_wait_timeout(
414							event_t				event,
415							wait_interrupt_t	interruptible,
416							uint32_t			interval,
417							uint32_t			scale_factor);
418
419/* Assert that the thread intends to wait with an urgency, timeout and leeway */
420extern wait_result_t	assert_wait_timeout_with_leeway(
421							event_t				event,
422							wait_interrupt_t	interruptible,
423							wait_timeout_urgency_t	urgency,
424							uint32_t			interval,
425							uint32_t			leeway,
426							uint32_t			scale_factor);
427
428extern wait_result_t	assert_wait_deadline(
429							event_t				event,
430							wait_interrupt_t	interruptible,
431							uint64_t			deadline);
432
433/* Assert that the thread intends to wait with an urgency, deadline, and leeway */
434extern wait_result_t	assert_wait_deadline_with_leeway(
435							event_t				event,
436							wait_interrupt_t	interruptible,
437							wait_timeout_urgency_t	urgency,
438							uint64_t			deadline,
439							uint64_t			leeway);
440
441/* Wake up thread (or threads) waiting on a particular event */
442extern kern_return_t	thread_wakeup_prim(
443							event_t				event,
444							boolean_t			one_thread,
445							wait_result_t			result);
446
447extern kern_return_t    thread_wakeup_prim_internal(
448	                                                event_t				event,
449							boolean_t			one_thread,
450							wait_result_t			result,
451							int				priority);
452
453
454#define thread_wakeup(x)					\
455			thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
456#define thread_wakeup_with_result(x, z)		\
457			thread_wakeup_prim((x), FALSE, (z))
458#define thread_wakeup_one(x)				\
459			thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
460
461#ifdef MACH_KERNEL_PRIVATE
462#define thread_wakeup_one_with_pri(x, pri)                              \
463	                thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
464#endif
465
466extern boolean_t		preemption_enabled(void);
467
468#ifdef MACH_KERNEL_PRIVATE
469
470/*
471 * Scheduler algorithm indirection. If only one algorithm is
472 * enabled at compile-time, a direction function call is used.
473 * If more than one is enabled, calls are dispatched through
474 * a function pointer table.
475 */
476
477#if   !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ)
478#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
479#endif
480
481#define SCHED(f) (sched_current_dispatch->f)
482
483struct sched_dispatch_table {
484	void	(*init)(void);				/* Init global state */
485	void	(*timebase_init)(void);		/* Timebase-dependent initialization */
486	void	(*processor_init)(processor_t processor);	/* Per-processor scheduler init */
487	void	(*pset_init)(processor_set_t pset);	/* Per-processor set scheduler init */
488
489	void	(*maintenance_continuation)(void);	/* Function called regularly */
490
491	/*
492	 * Choose a thread of greater or equal priority from the per-processor
493	 * runqueue for timeshare/fixed threads
494	 */
495	thread_t	(*choose_thread)(
496								  processor_t		processor,
497								  int				priority,
498								  ast_t reason);
499
500	/*
501	 * Steal a thread from another processor in the pset so that it can run
502	 * immediately
503	 */
504	thread_t	(*steal_thread)(
505								processor_set_t		pset);
506
507	/*
508	 * Recalculate sched_pri based on base priority, past running time,
509	 * and scheduling class.
510	 */
511	void		(*compute_priority)(
512					 thread_t	thread,
513					 boolean_t			override_depress);
514
515	/*
516	 * Pick the best processor for a thread (any kind of thread) to run on.
517	 */
518	processor_t	(*choose_processor)(
519										 processor_set_t		pset,
520										 processor_t			processor,
521										 thread_t			thread);
522	/*
523	 * Enqueue a timeshare or fixed priority thread onto the per-processor
524	 * runqueue
525	 */
526	boolean_t (*processor_enqueue)(
527								 processor_t			processor,
528								 thread_t			thread,
529								 integer_t			options);
530
531	/* Migrate threads away in preparation for processor shutdown */
532	void (*processor_queue_shutdown)(
533									 processor_t			processor);
534
535	/* Remove the specific thread from the per-processor runqueue */
536	boolean_t	(*processor_queue_remove)(
537									processor_t		processor,
538									thread_t		thread);
539
540	/*
541	 * Does the per-processor runqueue have any timeshare or fixed priority
542	 * threads on it? Called without pset lock held, so should
543	 * not assume immutability while executing.
544	 */
545	boolean_t	(*processor_queue_empty)(processor_t		processor);
546
547	/*
548	 * Would this priority trigger an urgent preemption if it's sitting
549	 * on the per-processor runqueue?
550	 */
551	boolean_t	(*priority_is_urgent)(int priority);
552
553	/*
554	 * Does the per-processor runqueue contain runnable threads that
555	 * should cause the currently-running thread to be preempted?
556	 */
557	ast_t		(*processor_csw_check)(processor_t processor);
558
559	/*
560	 * Does the per-processor runqueue contain a runnable thread
561	 * of > or >= priority, as a preflight for choose_thread() or other
562	 * thread selection
563	 */
564	boolean_t	(*processor_queue_has_priority)(processor_t		processor,
565												int				priority,
566												boolean_t		gte);
567
568	/* Quantum size for the specified non-realtime thread. */
569	uint32_t	(*initial_quantum_size)(thread_t thread);
570
571	/* Scheduler mode for a new thread */
572	sched_mode_t	(*initial_thread_sched_mode)(task_t parent_task);
573
574	/*
575	 * Is it safe to call update_priority, which may change a thread's
576	 * runqueue or other state. This can be used to throttle changes
577	 * to dynamic priority.
578	 */
579	boolean_t	(*can_update_priority)(thread_t thread);
580
581	/*
582	 * Update both scheduled priority and other persistent state.
583	 * Side effects may including migration to another processor's runqueue.
584	 */
585	void		(*update_priority)(thread_t thread);
586
587	/* Lower overhead update to scheduled priority and state. */
588	void		(*lightweight_update_priority)(thread_t thread);
589
590	/* Callback for non-realtime threads when the quantum timer fires */
591	void		(*quantum_expire)(thread_t thread);
592
593	/*
594	 * Even though we could continue executing on this processor, does the
595	 * topology (SMT, for instance) indicate that a better processor could be
596	 * chosen
597	 */
598	boolean_t	(*should_current_thread_rechoose_processor)(processor_t			processor);
599
600	/*
601	 * Runnable threads on per-processor runqueue. Should only
602	 * be used for relative comparisons of load between processors.
603	 */
604	int			(*processor_runq_count)(processor_t	processor);
605
606	/* Aggregate runcount statistics for per-processor runqueue */
607    uint64_t    (*processor_runq_stats_count_sum)(processor_t   processor);
608
609	/* Initialize structures to track demoted fairshare threads */
610	void		(*fairshare_init)(void);
611
612	/* Number of runnable fairshare threads */
613	int			(*fairshare_runq_count)(void);
614
615	/* Aggregate runcount statistics for fairshare runqueue */
616	uint64_t	(*fairshare_runq_stats_count_sum)(void);
617
618	void		(*fairshare_enqueue)(thread_t thread);
619
620	thread_t	(*fairshare_dequeue)(void);
621
622	boolean_t	(*fairshare_queue_remove)(thread_t thread);
623
624	boolean_t	(*processor_bound_count)(processor_t processor);
625
626	void		(*thread_update_scan)(void);
627
628	/*
629	* Use processor->next_thread to pin a thread to an idle
630	* processor. If FALSE, threads are enqueued and can
631	* be stolen by other processors.
632	*/
633	boolean_t   direct_dispatch_to_idle_processors;
634};
635
636#if defined(CONFIG_SCHED_TRADITIONAL)
637#define kSchedTraditionalString "traditional"
638#define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
639extern const struct sched_dispatch_table sched_traditional_dispatch;
640extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
641#endif
642
643#if defined(CONFIG_SCHED_MULTIQ)
644extern const struct sched_dispatch_table sched_multiq_dispatch;
645#define kSchedMultiQString "multiq"
646extern const struct sched_dispatch_table sched_dualq_dispatch;
647#define kSchedDualQString "dualq"
648#endif
649
650#if defined(CONFIG_SCHED_PROTO)
651#define kSchedProtoString "proto"
652extern const struct sched_dispatch_table sched_proto_dispatch;
653#endif
654
655#if defined(CONFIG_SCHED_GRRR)
656#define kSchedGRRRString "grrr"
657extern const struct sched_dispatch_table sched_grrr_dispatch;
658#endif
659
660/*
661 * It is an error to invoke any scheduler-related code
662 * before this is set up
663 */
664enum sched_enum {
665	sched_enum_unknown = 0,
666#if defined(CONFIG_SCHED_TRADITIONAL)
667	sched_enum_traditional = 1,
668	sched_enum_traditional_with_pset_runqueue = 2,
669#endif
670#if defined(CONFIG_SCHED_PROTO)
671	sched_enum_proto = 3,
672#endif
673#if defined(CONFIG_SCHED_GRRR)
674	sched_enum_grrr = 4,
675#endif
676#if defined(CONFIG_SCHED_MULTIQ)
677	sched_enum_multiq = 5,
678	sched_enum_dualq = 6,
679#endif
680	sched_enum_max = 7,
681};
682
683extern const struct sched_dispatch_table *sched_current_dispatch;
684
685#endif	/* MACH_KERNEL_PRIVATE */
686
687__END_DECLS
688
689#endif	/* _KERN_SCHED_PRIM_H_ */
690