1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/policy.h>
32#include <mach/sync_policy.h>
33#include <mach/thread_act.h>
34
35#include <machine/machine_routines.h>
36#include <machine/sched_param.h>
37#include <machine/machine_cpu.h>
38
39#include <kern/kern_types.h>
40#include <kern/clock.h>
41#include <kern/counters.h>
42#include <kern/cpu_number.h>
43#include <kern/cpu_data.h>
44#include <kern/debug.h>
45#include <kern/lock.h>
46#include <kern/macro_help.h>
47#include <kern/machine.h>
48#include <kern/misc_protos.h>
49#include <kern/processor.h>
50#include <kern/queue.h>
51#include <kern/sched.h>
52#include <kern/sched_prim.h>
53#include <kern/syscall_subr.h>
54#include <kern/task.h>
55#include <kern/thread.h>
56#include <kern/wait_queue.h>
57
58#include <vm/pmap.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_map.h>
61
62#include <mach/sdt.h>
63
64#include <sys/kdebug.h>
65
66static void
67sched_fixedpriority_init(void);
68
69static void
70sched_fixedpriority_with_pset_runqueue_init(void);
71
72static void
73sched_fixedpriority_timebase_init(void);
74
75static void
76sched_fixedpriority_processor_init(processor_t processor);
77
78static void
79sched_fixedpriority_pset_init(processor_set_t pset);
80
81static void
82sched_fixedpriority_maintenance_continuation(void);
83
84static thread_t
85sched_fixedpriority_choose_thread(processor_t		processor,
86							 int				priority);
87
88static thread_t
89sched_fixedpriority_steal_thread(processor_set_t		pset);
90
91static void
92sched_fixedpriority_compute_priority(thread_t	thread,
93							 boolean_t			override_depress);
94
95static processor_t
96sched_fixedpriority_choose_processor(	processor_set_t		pset,
97								processor_t			processor,
98								thread_t			thread);
99
100
101static boolean_t
102sched_fixedpriority_processor_enqueue(
103							 processor_t			processor,
104							 thread_t			thread,
105							 integer_t			options);
106
107static void
108sched_fixedpriority_processor_queue_shutdown(
109									 processor_t			processor);
110
111static boolean_t
112sched_fixedpriority_processor_queue_remove(
113						    processor_t			processor,
114							thread_t		thread);
115
116static boolean_t
117sched_fixedpriority_processor_queue_empty(processor_t		processor);
118
119static boolean_t
120sched_fixedpriority_processor_queue_has_priority(processor_t		processor,
121										 int				priority,
122										 boolean_t		gte);
123
124static boolean_t
125sched_fixedpriority_priority_is_urgent(int priority);
126
127static ast_t
128sched_fixedpriority_processor_csw_check(processor_t processor);
129
130static uint32_t
131sched_fixedpriority_initial_quantum_size(thread_t thread);
132
133static sched_mode_t
134sched_fixedpriority_initial_thread_sched_mode(task_t parent_task);
135
136static boolean_t
137sched_fixedpriority_supports_timeshare_mode(void);
138
139static boolean_t
140sched_fixedpriority_can_update_priority(thread_t	thread);
141
142static void
143sched_fixedpriority_update_priority(thread_t	thread);
144
145static void
146sched_fixedpriority_lightweight_update_priority(thread_t	thread);
147
148static void
149sched_fixedpriority_quantum_expire(thread_t	thread);
150
151static boolean_t
152sched_fixedpriority_should_current_thread_rechoose_processor(processor_t			processor);
153
154static int
155sched_fixedpriority_processor_runq_count(processor_t	processor);
156
157static uint64_t
158sched_fixedpriority_processor_runq_stats_count_sum(processor_t   processor);
159
160const struct sched_dispatch_table sched_fixedpriority_dispatch = {
161	sched_fixedpriority_init,
162	sched_fixedpriority_timebase_init,
163	sched_fixedpriority_processor_init,
164	sched_fixedpriority_pset_init,
165	sched_fixedpriority_maintenance_continuation,
166	sched_fixedpriority_choose_thread,
167	sched_fixedpriority_steal_thread,
168	sched_fixedpriority_compute_priority,
169	sched_fixedpriority_choose_processor,
170	sched_fixedpriority_processor_enqueue,
171	sched_fixedpriority_processor_queue_shutdown,
172	sched_fixedpriority_processor_queue_remove,
173	sched_fixedpriority_processor_queue_empty,
174	sched_fixedpriority_priority_is_urgent,
175	sched_fixedpriority_processor_csw_check,
176	sched_fixedpriority_processor_queue_has_priority,
177	sched_fixedpriority_initial_quantum_size,
178	sched_fixedpriority_initial_thread_sched_mode,
179	sched_fixedpriority_supports_timeshare_mode,
180	sched_fixedpriority_can_update_priority,
181	sched_fixedpriority_update_priority,
182	sched_fixedpriority_lightweight_update_priority,
183	sched_fixedpriority_quantum_expire,
184	sched_fixedpriority_should_current_thread_rechoose_processor,
185	sched_fixedpriority_processor_runq_count,
186	sched_fixedpriority_processor_runq_stats_count_sum,
187	sched_traditional_fairshare_init,
188	sched_traditional_fairshare_runq_count,
189	sched_traditional_fairshare_runq_stats_count_sum,
190	sched_traditional_fairshare_enqueue,
191	sched_traditional_fairshare_dequeue,
192	sched_traditional_fairshare_queue_remove,
193	TRUE /* direct_dispatch_to_idle_processors */
194};
195
196const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch = {
197	sched_fixedpriority_with_pset_runqueue_init,
198	sched_fixedpriority_timebase_init,
199	sched_fixedpriority_processor_init,
200	sched_fixedpriority_pset_init,
201	sched_fixedpriority_maintenance_continuation,
202	sched_fixedpriority_choose_thread,
203	sched_fixedpriority_steal_thread,
204	sched_fixedpriority_compute_priority,
205	sched_fixedpriority_choose_processor,
206	sched_fixedpriority_processor_enqueue,
207	sched_fixedpriority_processor_queue_shutdown,
208	sched_fixedpriority_processor_queue_remove,
209	sched_fixedpriority_processor_queue_empty,
210	sched_fixedpriority_priority_is_urgent,
211	sched_fixedpriority_processor_csw_check,
212	sched_fixedpriority_processor_queue_has_priority,
213	sched_fixedpriority_initial_quantum_size,
214	sched_fixedpriority_initial_thread_sched_mode,
215	sched_fixedpriority_supports_timeshare_mode,
216	sched_fixedpriority_can_update_priority,
217	sched_fixedpriority_update_priority,
218	sched_fixedpriority_lightweight_update_priority,
219	sched_fixedpriority_quantum_expire,
220	sched_fixedpriority_should_current_thread_rechoose_processor,
221	sched_fixedpriority_processor_runq_count,
222	sched_fixedpriority_processor_runq_stats_count_sum,
223	sched_traditional_fairshare_init,
224	sched_traditional_fairshare_runq_count,
225	sched_traditional_fairshare_runq_stats_count_sum,
226	sched_traditional_fairshare_enqueue,
227	sched_traditional_fairshare_dequeue,
228	sched_traditional_fairshare_queue_remove,
229	FALSE /* direct_dispatch_to_idle_processors */
230};
231
232extern int	max_unsafe_quanta;
233
234#define		SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM		5		/* in ms */
235static uint32_t sched_fixedpriority_quantum_ms = SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM;
236static uint32_t sched_fixedpriority_quantum;
237
238#define SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME 100 /* ms */
239static uint32_t fairshare_minimum_blocked_time_ms = SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME;
240static uint32_t fairshare_minimum_blocked_time;
241
242static uint32_t			sched_fixedpriority_tick;
243static uint64_t			sched_fixedpriority_tick_deadline;
244extern uint32_t			grrr_rescale_tick;
245
246static boolean_t sched_fixedpriority_use_pset_runqueue = FALSE;
247
248__attribute__((always_inline))
249static inline run_queue_t runq_for_processor(processor_t processor)
250{
251	if (sched_fixedpriority_use_pset_runqueue)
252		return &processor->processor_set->pset_runq;
253	else
254		return &processor->runq;
255}
256
257__attribute__((always_inline))
258static inline void runq_consider_incr_bound_count(processor_t processor, thread_t thread)
259{
260	if (thread->bound_processor == PROCESSOR_NULL)
261		return;
262
263	assert(thread->bound_processor == processor);
264
265	if (sched_fixedpriority_use_pset_runqueue)
266		processor->processor_set->pset_runq_bound_count++;
267
268	processor->runq_bound_count++;
269}
270
271__attribute__((always_inline))
272static inline void runq_consider_decr_bound_count(processor_t processor, thread_t thread)
273{
274	if (thread->bound_processor == PROCESSOR_NULL)
275		return;
276
277	assert(thread->bound_processor == processor);
278
279	if (sched_fixedpriority_use_pset_runqueue)
280		processor->processor_set->pset_runq_bound_count--;
281
282	processor->runq_bound_count--;
283}
284
285static void
286sched_fixedpriority_init(void)
287{
288	if (!PE_parse_boot_argn("fixedpriority_quantum", &sched_fixedpriority_quantum_ms, sizeof (sched_fixedpriority_quantum_ms))) {
289		sched_fixedpriority_quantum_ms = SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM;
290	}
291
292	if (sched_fixedpriority_quantum_ms < 1)
293		sched_fixedpriority_quantum_ms = SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM;
294
295	printf("standard fixed priority timeslicing quantum is %u ms\n", sched_fixedpriority_quantum_ms);
296}
297
298static void
299sched_fixedpriority_with_pset_runqueue_init(void)
300{
301	sched_fixedpriority_init();
302	sched_fixedpriority_use_pset_runqueue = TRUE;
303}
304
305static void
306sched_fixedpriority_timebase_init(void)
307{
308	uint64_t	abstime;
309
310	/* standard timeslicing quantum */
311	clock_interval_to_absolutetime_interval(
312											sched_fixedpriority_quantum_ms, NSEC_PER_MSEC, &abstime);
313	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
314	sched_fixedpriority_quantum = (uint32_t)abstime;
315
316	thread_depress_time = 1 * sched_fixedpriority_quantum;
317	default_timeshare_computation = sched_fixedpriority_quantum / 2;
318	default_timeshare_constraint = sched_fixedpriority_quantum;
319
320	max_unsafe_computation = max_unsafe_quanta * sched_fixedpriority_quantum;
321	sched_safe_duration = 2 * max_unsafe_quanta * sched_fixedpriority_quantum;
322
323	if (!PE_parse_boot_argn("fairshare_minblockedtime", &fairshare_minimum_blocked_time_ms, sizeof (fairshare_minimum_blocked_time_ms))) {
324		fairshare_minimum_blocked_time_ms = SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME;
325	}
326
327	clock_interval_to_absolutetime_interval(
328											fairshare_minimum_blocked_time_ms, NSEC_PER_MSEC, &abstime);
329
330	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
331	fairshare_minimum_blocked_time = (uint32_t)abstime;
332}
333
334static void
335sched_fixedpriority_processor_init(processor_t processor)
336{
337	if (!sched_fixedpriority_use_pset_runqueue) {
338		run_queue_init(&processor->runq);
339	}
340	processor->runq_bound_count = 0;
341}
342
343static void
344sched_fixedpriority_pset_init(processor_set_t pset)
345{
346	if (sched_fixedpriority_use_pset_runqueue) {
347		run_queue_init(&pset->pset_runq);
348	}
349	pset->pset_runq_bound_count = 0;
350}
351
352
353static void
354sched_fixedpriority_maintenance_continuation(void)
355{
356	uint64_t			abstime = mach_absolute_time();
357
358	sched_fixedpriority_tick++;
359	grrr_rescale_tick++;
360
361	/*
362	 *  Compute various averages.
363	 */
364	compute_averages();
365
366	if (sched_fixedpriority_tick_deadline == 0)
367		sched_fixedpriority_tick_deadline = abstime;
368
369	clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
370						&sched_fixedpriority_tick_deadline);
371
372	assert_wait_deadline((event_t)sched_fixedpriority_maintenance_continuation, THREAD_UNINT, sched_fixedpriority_tick_deadline);
373	thread_block((thread_continue_t)sched_fixedpriority_maintenance_continuation);
374	/*NOTREACHED*/
375}
376
377
378static thread_t
379sched_fixedpriority_choose_thread(processor_t		processor,
380						  int				priority)
381{
382	thread_t thread;
383
384	thread = choose_thread(processor, runq_for_processor(processor), priority);
385	if (thread != THREAD_NULL) {
386		runq_consider_decr_bound_count(processor, thread);
387	}
388
389	return thread;
390}
391
392static thread_t
393sched_fixedpriority_steal_thread(processor_set_t		pset)
394{
395	pset_unlock(pset);
396
397	return (THREAD_NULL);
398
399}
400
401static void
402sched_fixedpriority_compute_priority(thread_t	thread,
403							 boolean_t			override_depress)
404{
405	/* Reset current priority to base priority */
406	if (	!(thread->sched_flags & TH_SFLAG_PROMOTED)			&&
407		(!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)	||
408		 override_depress							)		) {
409			set_sched_pri(thread, thread->priority);
410		}
411}
412
413static processor_t
414sched_fixedpriority_choose_processor(	processor_set_t		pset,
415							 processor_t			processor,
416							 thread_t			thread)
417{
418	return choose_processor(pset, processor, thread);
419}
420static boolean_t
421sched_fixedpriority_processor_enqueue(
422							 processor_t			processor,
423							 thread_t			thread,
424							 integer_t			options)
425{
426	run_queue_t		rq = runq_for_processor(processor);
427	boolean_t		result;
428
429	result = run_queue_enqueue(rq, thread, options);
430	thread->runq = processor;
431	runq_consider_incr_bound_count(processor, thread);
432
433	return (result);
434}
435
436static void
437sched_fixedpriority_processor_queue_shutdown(
438									 processor_t			processor)
439{
440	processor_set_t		pset = processor->processor_set;
441	thread_t			thread;
442	queue_head_t		tqueue, bqueue;
443
444	queue_init(&tqueue);
445	queue_init(&bqueue);
446
447	while ((thread = sched_fixedpriority_choose_thread(processor, IDLEPRI)) != THREAD_NULL) {
448		if (thread->bound_processor == PROCESSOR_NULL) {
449			enqueue_tail(&tqueue, (queue_entry_t)thread);
450		} else {
451			enqueue_tail(&bqueue, (queue_entry_t)thread);
452		}
453	}
454
455	while ((thread = (thread_t)dequeue_head(&bqueue)) != THREAD_NULL) {
456		sched_fixedpriority_processor_enqueue(processor, thread, SCHED_TAILQ);
457	}
458
459	pset_unlock(pset);
460
461	while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
462		thread_lock(thread);
463
464		thread_setrun(thread, SCHED_TAILQ);
465
466		thread_unlock(thread);
467	}
468}
469
470static boolean_t
471sched_fixedpriority_processor_queue_remove(
472								processor_t			processor,
473								thread_t		thread)
474{
475	void *			rqlock;
476	run_queue_t		rq;
477
478	rqlock = &processor->processor_set->sched_lock;
479	rq = runq_for_processor(processor);
480
481	simple_lock(rqlock);
482	if (processor == thread->runq) {
483		/*
484		 *	Thread is on a run queue and we have a lock on
485		 *	that run queue.
486		 */
487		runq_consider_decr_bound_count(processor, thread);
488		run_queue_remove(rq, thread);
489	}
490	else {
491		/*
492		 *	The thread left the run queue before we could
493		 * 	lock the run queue.
494		 */
495		assert(thread->runq == PROCESSOR_NULL);
496		processor = PROCESSOR_NULL;
497	}
498
499	simple_unlock(rqlock);
500
501	return (processor != PROCESSOR_NULL);
502}
503
504static boolean_t
505sched_fixedpriority_processor_queue_empty(processor_t		processor)
506{
507	/*
508	 * See sched_traditional_with_pset_runqueue_processor_queue_empty
509	 * for algorithm
510	 */
511	int count = runq_for_processor(processor)->count;
512
513	if (sched_fixedpriority_use_pset_runqueue) {
514		processor_set_t pset = processor->processor_set;
515
516		count -= pset->pset_runq_bound_count;
517		count += processor->runq_bound_count;
518	}
519
520	return count == 0;
521}
522
523static boolean_t
524sched_fixedpriority_processor_queue_has_priority(processor_t		processor,
525										 int				priority,
526										 boolean_t		gte)
527{
528	if (gte)
529		return runq_for_processor(processor)->highq >= priority;
530	else
531		return runq_for_processor(processor)->highq > priority;
532}
533
534/* Implement sched_preempt_pri in code */
535static boolean_t
536sched_fixedpriority_priority_is_urgent(int priority)
537{
538	if (priority <= BASEPRI_FOREGROUND)
539		return FALSE;
540
541	if (priority < MINPRI_KERNEL)
542		return TRUE;
543
544	if (priority >= BASEPRI_PREEMPT)
545		return TRUE;
546
547	return FALSE;
548}
549
550static ast_t
551sched_fixedpriority_processor_csw_check(processor_t processor)
552{
553	run_queue_t		runq;
554	boolean_t		has_higher;
555
556	runq = runq_for_processor(processor);
557	if (first_timeslice(processor)) {
558		has_higher = (runq->highq > processor->current_pri);
559	} else {
560		has_higher = (runq->highq >= processor->current_pri);
561	}
562	if (has_higher) {
563		if (runq->urgency > 0)
564			return (AST_PREEMPT | AST_URGENT);
565
566		if (processor->active_thread && thread_eager_preemption(processor->active_thread))
567			return (AST_PREEMPT | AST_URGENT);
568
569		return AST_PREEMPT;
570	} else if (processor->current_thmode == TH_MODE_FAIRSHARE) {
571		if (!sched_fixedpriority_processor_queue_empty(processor)) {
572			/* Allow queued threads to run if the current thread got demoted to fairshare */
573			return (AST_PREEMPT | AST_URGENT);
574		} else if ((!first_timeslice(processor)) && SCHED(fairshare_runq_count)() > 0) {
575			/* Allow other fairshare threads to run */
576			return AST_PREEMPT | AST_URGENT;
577		}
578	}
579
580	return AST_NONE;
581}
582
583static uint32_t
584sched_fixedpriority_initial_quantum_size(thread_t thread __unused)
585{
586	return sched_fixedpriority_quantum;
587}
588
589static sched_mode_t
590sched_fixedpriority_initial_thread_sched_mode(task_t parent_task)
591{
592	if (parent_task == kernel_task)
593		return TH_MODE_FIXED;
594	else
595		return TH_MODE_TIMESHARE;
596}
597
598static boolean_t
599sched_fixedpriority_supports_timeshare_mode(void)
600{
601	return TRUE;
602}
603
604static boolean_t
605sched_fixedpriority_can_update_priority(thread_t	thread __unused)
606{
607	return ((thread->sched_flags & TH_SFLAG_PRI_UPDATE) == 0);
608}
609
610static void
611sched_fixedpriority_update_priority(thread_t	thread)
612{
613	uint64_t current_time = mach_absolute_time();
614
615	thread->sched_flags |= TH_SFLAG_PRI_UPDATE;
616
617	if (thread->sched_flags & TH_SFLAG_FAIRSHARE_TRIPPED) {
618
619		/*
620		 * Make sure we've waited fairshare_minimum_blocked_time both from the time
621		 * we were throttled into the fairshare band, and the last time
622		 * we ran.
623		 */
624		if (current_time >= thread->last_run_time + fairshare_minimum_blocked_time) {
625
626			boolean_t		removed = thread_run_queue_remove(thread);
627
628			thread->sched_flags &= ~TH_SFLAG_FAIRSHARE_TRIPPED;
629			thread->sched_mode = thread->saved_mode;
630			thread->saved_mode = TH_MODE_NONE;
631
632			if (removed)
633				thread_setrun(thread, SCHED_TAILQ);
634
635			KERNEL_DEBUG_CONSTANT1(
636								   MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_EXIT) | DBG_FUNC_NONE, (uint32_t)(thread->last_run_time & 0xFFFFFFFF), (uint32_t)(thread->last_run_time >> 32), (uint32_t)(current_time & 0xFFFFFFFF), (uint32_t)(current_time >> 32), thread_tid(thread));
637
638		}
639	} else if ((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) && (thread->bound_processor == PROCESSOR_NULL)) {
640		boolean_t		removed = thread_run_queue_remove(thread);
641
642		thread->sched_flags |= TH_SFLAG_FAIRSHARE_TRIPPED;
643		thread->saved_mode = thread->sched_mode;
644		thread->sched_mode = TH_MODE_FAIRSHARE;
645
646		thread->last_quantum_refill_time = thread->last_run_time - 2 * sched_fixedpriority_quantum - 1;
647
648		if (removed)
649			thread_setrun(thread, SCHED_TAILQ);
650
651		KERNEL_DEBUG_CONSTANT(
652							   MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_ENTER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), 0xFFFFFFFF, 0, 0, 0);
653
654	}
655
656#if CONFIG_EMBEDDED
657	/* Check for pending throttle transitions, and safely switch queues */
658	if ((thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_MASK) && (thread->bound_processor == PROCESSOR_NULL)) {
659			boolean_t		removed = thread_run_queue_remove(thread);
660
661			if (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_DEMOTION) {
662				if (thread->sched_mode == TH_MODE_REALTIME) {
663					thread->saved_mode = thread->sched_mode;
664					thread->sched_mode = TH_MODE_TIMESHARE;
665
666					if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
667						sched_share_incr();
668				} else {
669					/*
670					 * It's possible that this is a realtime thread that has
671					 * already tripped the failsafe, in which case it should not
672					 * degrade further.
673					 */
674					if (!(thread->sched_flags & TH_SFLAG_FAILSAFE)) {
675
676						thread->saved_mode = thread->sched_mode;
677
678						if (thread->sched_mode == TH_MODE_TIMESHARE) {
679							thread->sched_mode = TH_MODE_FAIRSHARE;
680						}
681					}
682				}
683				thread->sched_flags |= TH_SFLAG_THROTTLED;
684
685				KERNEL_DEBUG_CONSTANT(
686					MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_ENTER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), 0xFFFFFFFF, 0, 0, 0);
687
688			} else {
689				if ((thread->sched_mode == TH_MODE_TIMESHARE)
690					&& (thread->saved_mode == TH_MODE_REALTIME)) {
691					if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
692						sched_share_decr();
693				}
694
695				thread->sched_mode = thread->saved_mode;
696				thread->saved_mode = TH_MODE_NONE;
697				thread->sched_flags &= ~TH_SFLAG_THROTTLED;
698
699				KERNEL_DEBUG_CONSTANT1(
700					MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_EXIT) | DBG_FUNC_NONE, 0, 0, 0, 0, thread_tid(thread));
701
702			}
703
704			thread->sched_flags &= ~(TH_SFLAG_PENDING_THROTTLE_MASK);
705
706			if (removed)
707				thread_setrun(thread, SCHED_TAILQ);
708	}
709#endif
710
711	/*
712	 *	Check for fail-safe release.
713	 */
714	if (	(thread->sched_flags & TH_SFLAG_FAILSAFE)		&&
715		current_time >= thread->safe_release		) {
716
717
718		thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
719
720		if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
721			/* Restore to previous */
722
723			thread->sched_mode = thread->saved_mode;
724			thread->saved_mode = TH_MODE_NONE;
725
726			if (thread->sched_mode == TH_MODE_REALTIME) {
727				thread->priority = BASEPRI_RTQUEUES;
728
729			}
730
731			if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK))
732				set_sched_pri(thread, thread->priority);
733		}
734	}
735
736	thread->sched_flags &= ~TH_SFLAG_PRI_UPDATE;
737	return;
738}
739
740static void
741sched_fixedpriority_lightweight_update_priority(thread_t	thread __unused)
742{
743	return;
744}
745
746static void
747sched_fixedpriority_quantum_expire(
748						  thread_t	thread)
749{
750	/* Put thread into fairshare class, core scheduler will manage runqueue */
751	if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->task != kernel_task) && !(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
752		uint64_t elapsed = thread->last_run_time - thread->last_quantum_refill_time;
753
754		/* If we managed to use our quantum in less than 2*quantum wall clock time,
755		 * we are considered CPU bound and eligible for demotion. Since the quantum
756		 * is reset when thread_unblock() is called, we are only really considering
757		 * threads that elongate their execution time due to preemption.
758		 */
759		if ((elapsed < 2 * sched_fixedpriority_quantum) && (thread->bound_processor == PROCESSOR_NULL)) {
760
761			thread->saved_mode = thread->sched_mode;
762			thread->sched_mode = TH_MODE_FAIRSHARE;
763			thread->sched_flags |= TH_SFLAG_FAIRSHARE_TRIPPED;
764			KERNEL_DEBUG_CONSTANT(
765							  MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_ENTER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), (uint32_t)(elapsed & 0xFFFFFFFF), (uint32_t)(elapsed >> 32), 0, 0);
766		}
767	}
768}
769
770
771static boolean_t
772sched_fixedpriority_should_current_thread_rechoose_processor(processor_t			processor __unused)
773{
774	return (TRUE);
775}
776
777
778static int
779sched_fixedpriority_processor_runq_count(processor_t	processor)
780{
781	return runq_for_processor(processor)->count;
782}
783
784static uint64_t
785sched_fixedpriority_processor_runq_stats_count_sum(processor_t	processor)
786{
787	return runq_for_processor(processor)->runq_stats.count_sum;
788}
789