1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31
32#include <machine/machine_routines.h>
33#include <machine/sched_param.h>
34#include <machine/machine_cpu.h>
35
36#include <kern/kern_types.h>
37#include <kern/debug.h>
38#include <kern/machine.h>
39#include <kern/misc_protos.h>
40#include <kern/processor.h>
41#include <kern/queue.h>
42#include <kern/sched.h>
43#include <kern/sched_prim.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46
47#include <sys/kdebug.h>
48
49static void
50sched_dualq_init(void);
51
52static thread_t
53sched_dualq_steal_thread(processor_set_t pset);
54
55static void
56sched_dualq_thread_update_scan(void);
57
58static boolean_t
59sched_dualq_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
60
61static boolean_t
62sched_dualq_processor_queue_remove(processor_t processor, thread_t thread);
63
64static ast_t
65sched_dualq_processor_csw_check(processor_t processor);
66
67static boolean_t
68sched_dualq_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
69
70static int
71sched_dualq_runq_count(processor_t processor);
72
73static boolean_t
74sched_dualq_processor_queue_empty(processor_t processor);
75
76static uint64_t
77sched_dualq_runq_stats_count_sum(processor_t processor);
78
79static int
80sched_dualq_processor_bound_count(processor_t processor);
81
82static void
83sched_dualq_pset_init(processor_set_t pset);
84
85static void
86sched_dualq_processor_init(processor_t processor);
87
88static thread_t
89sched_dualq_choose_thread(processor_t processor, int priority, ast_t reason);
90
91static void
92sched_dualq_processor_queue_shutdown(processor_t processor);
93
94static sched_mode_t
95sched_dualq_initial_thread_sched_mode(task_t parent_task);
96
97static boolean_t
98sched_dualq_should_current_thread_rechoose_processor(processor_t processor);
99
100const struct sched_dispatch_table sched_dualq_dispatch = {
101	.init                                           = sched_dualq_init,
102	.timebase_init                                  = sched_traditional_timebase_init,
103	.processor_init                                 = sched_dualq_processor_init,
104	.pset_init                                      = sched_dualq_pset_init,
105	.maintenance_continuation                       = sched_traditional_maintenance_continue,
106	.choose_thread                                  = sched_dualq_choose_thread,
107	.steal_thread                                   = sched_dualq_steal_thread,
108	.compute_priority                               = compute_priority,
109	.choose_processor                               = choose_processor,
110	.processor_enqueue                              = sched_dualq_processor_enqueue,
111	.processor_queue_shutdown                       = sched_dualq_processor_queue_shutdown,
112	.processor_queue_remove                         = sched_dualq_processor_queue_remove,
113	.processor_queue_empty                          = sched_dualq_processor_queue_empty,
114	.priority_is_urgent                             = priority_is_urgent,
115	.processor_csw_check                            = sched_dualq_processor_csw_check,
116	.processor_queue_has_priority                   = sched_dualq_processor_queue_has_priority,
117	.initial_quantum_size                           = sched_traditional_initial_quantum_size,
118	.initial_thread_sched_mode                      = sched_dualq_initial_thread_sched_mode,
119	.can_update_priority                            = can_update_priority,
120	.update_priority                                = update_priority,
121	.lightweight_update_priority                    = lightweight_update_priority,
122	.quantum_expire                                 = sched_traditional_quantum_expire,
123	.should_current_thread_rechoose_processor       = sched_dualq_should_current_thread_rechoose_processor,
124	.processor_runq_count                           = sched_dualq_runq_count,
125	.processor_runq_stats_count_sum                 = sched_dualq_runq_stats_count_sum,
126	.fairshare_init                                 = sched_traditional_fairshare_init,
127	.fairshare_runq_count                           = sched_traditional_fairshare_runq_count,
128	.fairshare_runq_stats_count_sum                 = sched_traditional_fairshare_runq_stats_count_sum,
129	.fairshare_enqueue                              = sched_traditional_fairshare_enqueue,
130	.fairshare_dequeue                              = sched_traditional_fairshare_dequeue,
131	.fairshare_queue_remove                         = sched_traditional_fairshare_queue_remove,
132	.processor_bound_count                          = sched_dualq_processor_bound_count,
133	.thread_update_scan                             = sched_dualq_thread_update_scan,
134	.direct_dispatch_to_idle_processors             = FALSE,
135};
136
137__attribute__((always_inline))
138static inline run_queue_t dualq_main_runq(processor_t processor)
139{
140	return &processor->processor_set->pset_runq;
141}
142
143__attribute__((always_inline))
144static inline run_queue_t dualq_bound_runq(processor_t processor)
145{
146	return &processor->runq;
147}
148
149__attribute__((always_inline))
150static inline run_queue_t dualq_runq_for_thread(processor_t processor, thread_t thread)
151{
152	if (thread->bound_processor == PROCESSOR_NULL) {
153		return dualq_main_runq(processor);
154	} else {
155		assert(thread->bound_processor == processor);
156		return dualq_bound_runq(processor);
157	}
158}
159
160static sched_mode_t
161sched_dualq_initial_thread_sched_mode(task_t parent_task)
162{
163	if (parent_task == kernel_task)
164		return TH_MODE_FIXED;
165	else
166		return TH_MODE_TIMESHARE;
167}
168
169static void
170sched_dualq_processor_init(processor_t processor)
171{
172	run_queue_init(&processor->runq);
173}
174
175static void
176sched_dualq_pset_init(processor_set_t pset)
177{
178	run_queue_init(&pset->pset_runq);
179}
180
181static void
182sched_dualq_init(void)
183{
184	sched_traditional_init();
185}
186
187static thread_t
188sched_dualq_choose_thread(
189                          processor_t      processor,
190                          int              priority,
191                 __unused ast_t            reason)
192{
193	run_queue_t main_runq  = dualq_main_runq(processor);
194	run_queue_t bound_runq = dualq_bound_runq(processor);
195	run_queue_t chosen_runq;
196
197	if (bound_runq->highq < priority &&
198	     main_runq->highq < priority)
199		return THREAD_NULL;
200
201	if (bound_runq->count && main_runq->count) {
202		if (bound_runq->highq >= main_runq->highq) {
203			chosen_runq = bound_runq;
204		} else {
205			chosen_runq = main_runq;
206		}
207	} else if (bound_runq->count) {
208		chosen_runq = bound_runq;
209	} else if (main_runq->count) {
210		chosen_runq = main_runq;
211	} else {
212		return (THREAD_NULL);
213	}
214
215	return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
216}
217
218static boolean_t
219sched_dualq_processor_enqueue(
220                              processor_t       processor,
221                              thread_t          thread,
222                              integer_t         options)
223{
224	run_queue_t     rq = dualq_runq_for_thread(processor, thread);
225	boolean_t       result;
226
227	result = run_queue_enqueue(rq, thread, options);
228	thread->runq = processor;
229
230	return (result);
231}
232
233static boolean_t
234sched_dualq_processor_queue_empty(processor_t processor)
235{
236	return dualq_main_runq(processor)->count  == 0 &&
237	       dualq_bound_runq(processor)->count == 0;
238}
239
240static ast_t
241sched_dualq_processor_csw_check(processor_t processor)
242{
243	boolean_t       has_higher;
244	int             pri;
245
246	run_queue_t main_runq  = dualq_main_runq(processor);
247	run_queue_t bound_runq = dualq_bound_runq(processor);
248
249	assert(processor->active_thread != NULL);
250
251	pri = MAX(main_runq->highq, bound_runq->highq);
252
253	if (first_timeslice(processor)) {
254		has_higher = (pri > processor->current_pri);
255	} else {
256		has_higher = (pri >= processor->current_pri);
257	}
258
259	if (has_higher) {
260		if (main_runq->urgency > 0)
261			return (AST_PREEMPT | AST_URGENT);
262
263		if (bound_runq->urgency > 0)
264			return (AST_PREEMPT | AST_URGENT);
265
266		if (processor->active_thread && thread_eager_preemption(processor->active_thread))
267			return (AST_PREEMPT | AST_URGENT);
268
269		return AST_PREEMPT;
270	}
271
272	return AST_NONE;
273}
274
275static boolean_t
276sched_dualq_processor_queue_has_priority(processor_t    processor,
277                                         int            priority,
278                                         boolean_t      gte)
279{
280	int qpri = MAX(dualq_main_runq(processor)->highq, dualq_bound_runq(processor)->highq);
281
282	if (gte)
283		return qpri >= priority;
284	else
285		return qpri > priority;
286}
287
288static boolean_t
289sched_dualq_should_current_thread_rechoose_processor(processor_t processor)
290{
291	return (processor->current_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor);
292}
293
294static int
295sched_dualq_runq_count(processor_t processor)
296{
297	return dualq_main_runq(processor)->count + dualq_bound_runq(processor)->count;
298}
299
300static uint64_t
301sched_dualq_runq_stats_count_sum(processor_t processor)
302{
303	uint64_t bound_sum = dualq_bound_runq(processor)->runq_stats.count_sum;
304
305	if (processor->cpu_id == processor->processor_set->cpu_set_low)
306		return bound_sum + dualq_main_runq(processor)->runq_stats.count_sum;
307	else
308		return bound_sum;
309}
310static int
311sched_dualq_processor_bound_count(processor_t processor)
312{
313	return dualq_bound_runq(processor)->count;
314}
315
316static void
317sched_dualq_processor_queue_shutdown(processor_t processor)
318{
319	processor_set_t pset = processor->processor_set;
320	run_queue_t     rq   = dualq_main_runq(processor);
321	thread_t        thread;
322	queue_head_t    tqueue;
323
324	/* We only need to migrate threads if this is the last active processor in the pset */
325	if (pset->online_processor_count > 0) {
326		pset_unlock(pset);
327		return;
328	}
329
330	queue_init(&tqueue);
331
332	while (rq->count > 0) {
333		thread = run_queue_dequeue(rq, SCHED_HEADQ);
334		enqueue_tail(&tqueue, (queue_entry_t)thread);
335	}
336
337	pset_unlock(pset);
338
339	while ((thread = (thread_t)(void*)dequeue_head(&tqueue)) != THREAD_NULL) {
340		thread_lock(thread);
341
342		thread_setrun(thread, SCHED_TAILQ);
343
344		thread_unlock(thread);
345	}
346}
347
348static boolean_t
349sched_dualq_processor_queue_remove(
350                                   processor_t processor,
351                                   thread_t    thread)
352{
353	run_queue_t             rq;
354	processor_set_t         pset = processor->processor_set;
355
356	pset_lock(pset);
357
358	rq = dualq_runq_for_thread(processor, thread);
359
360	if (processor == thread->runq) {
361		/*
362		 * Thread is on a run queue and we have a lock on
363		 * that run queue.
364		 */
365		run_queue_remove(rq, thread);
366	}
367	else {
368		/*
369		 * The thread left the run queue before we could
370		 * lock the run queue.
371		 */
372		assert(thread->runq == PROCESSOR_NULL);
373		processor = PROCESSOR_NULL;
374	}
375
376	pset_unlock(pset);
377
378	return (processor != PROCESSOR_NULL);
379}
380
381static thread_t
382sched_dualq_steal_thread(processor_set_t pset)
383{
384	processor_set_t nset, cset = pset;
385	thread_t        thread;
386
387	do {
388		if (cset->pset_runq.count > 0) {
389			thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ);
390			pset_unlock(cset);
391			return (thread);
392		}
393
394		nset = next_pset(cset);
395
396		if (nset != pset) {
397			pset_unlock(cset);
398
399			cset = nset;
400			pset_lock(cset);
401		}
402	} while (nset != pset);
403
404	pset_unlock(cset);
405
406	return (THREAD_NULL);
407}
408
409static void
410sched_dualq_thread_update_scan(void)
411{
412	boolean_t               restart_needed = FALSE;
413	processor_t             processor = processor_list;
414	processor_set_t         pset;
415	thread_t                thread;
416	spl_t                   s;
417
418	/*
419	 *  We update the threads associated with each processor (bound and idle threads)
420	 *  and then update the threads in each pset runqueue.
421	 */
422
423	do {
424		do {
425			pset = processor->processor_set;
426
427			s = splsched();
428			pset_lock(pset);
429
430			restart_needed = runq_scan(dualq_bound_runq(processor));
431
432			pset_unlock(pset);
433			splx(s);
434
435			if (restart_needed)
436				break;
437
438			thread = processor->idle_thread;
439			if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
440				if (thread_update_add_thread(thread) == FALSE) {
441					restart_needed = TRUE;
442					break;
443				}
444			}
445		} while ((processor = processor->processor_list) != NULL);
446
447		/* Ok, we now have a collection of candidates -- fix them. */
448		thread_update_process_threads();
449
450	} while (restart_needed);
451
452	pset = &pset0;
453
454	do {
455		do {
456			s = splsched();
457			pset_lock(pset);
458
459			restart_needed = runq_scan(&pset->pset_runq);
460
461			pset_unlock(pset);
462			splx(s);
463
464			if (restart_needed)
465				break;
466		} while ((pset = pset->pset_list) != NULL);
467
468		/* Ok, we now have a collection of candidates -- fix them. */
469		thread_update_process_threads();
470
471	} while (restart_needed);
472}
473
474
475