1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/thread_act_server.h>
31
32#include <kern/kern_types.h>
33#include <kern/processor.h>
34#include <kern/thread.h>
35#include <kern/affinity.h>
36
37static void
38thread_recompute_priority(
39	thread_t		thread);
40
41#if CONFIG_EMBEDDED
42static void
43thread_throttle(
44	thread_t		thread,
45	integer_t		task_priority);
46
47extern int mach_do_background_thread(thread_t thread, int prio);
48#endif
49
50
51kern_return_t
52thread_policy_set(
53	thread_t				thread,
54	thread_policy_flavor_t	flavor,
55	thread_policy_t			policy_info,
56	mach_msg_type_number_t	count)
57{
58
59	if (thread == THREAD_NULL)
60		return (KERN_INVALID_ARGUMENT);
61
62	if (thread->static_param)
63		return (KERN_SUCCESS);
64
65	return (thread_policy_set_internal(thread, flavor, policy_info, count));
66}
67
68kern_return_t
69thread_policy_set_internal(
70	thread_t				thread,
71	thread_policy_flavor_t	flavor,
72	thread_policy_t			policy_info,
73	mach_msg_type_number_t	count)
74{
75	kern_return_t			result = KERN_SUCCESS;
76	spl_t					s;
77
78	thread_mtx_lock(thread);
79	if (!thread->active) {
80		thread_mtx_unlock(thread);
81
82		return (KERN_TERMINATED);
83	}
84	switch (flavor) {
85
86	case THREAD_EXTENDED_POLICY:
87	{
88		boolean_t				timeshare = TRUE;
89
90		if (count >= THREAD_EXTENDED_POLICY_COUNT) {
91			thread_extended_policy_t	info;
92
93			info = (thread_extended_policy_t)policy_info;
94			timeshare = info->timeshare;
95		}
96
97		if (!SCHED(supports_timeshare_mode)())
98			timeshare = FALSE;
99
100		s = splsched();
101		thread_lock(thread);
102
103		if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
104			integer_t	oldmode = (thread->sched_mode == TH_MODE_TIMESHARE);
105
106			if (timeshare) {
107				thread->sched_mode = TH_MODE_TIMESHARE;
108
109				if (!oldmode) {
110					if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
111						sched_share_incr();
112				}
113			}
114			else {
115				thread->sched_mode = TH_MODE_FIXED;
116
117				if (oldmode) {
118					if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
119						sched_share_decr();
120				}
121			}
122
123			thread_recompute_priority(thread);
124		}
125		else {
126
127			if (timeshare)
128				thread->saved_mode = TH_MODE_TIMESHARE;
129			else
130				thread->saved_mode = TH_MODE_FIXED;
131		}
132
133		thread_unlock(thread);
134		splx(s);
135
136		break;
137	}
138
139	case THREAD_TIME_CONSTRAINT_POLICY:
140	{
141		thread_time_constraint_policy_t		info;
142
143		if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
144			result = KERN_INVALID_ARGUMENT;
145			break;
146		}
147
148		info = (thread_time_constraint_policy_t)policy_info;
149		if (	info->constraint < info->computation	||
150				info->computation > max_rt_quantum		||
151				info->computation < min_rt_quantum		) {
152			result = KERN_INVALID_ARGUMENT;
153			break;
154		}
155
156		s = splsched();
157		thread_lock(thread);
158
159		thread->realtime.period = info->period;
160		thread->realtime.computation = info->computation;
161		thread->realtime.constraint = info->constraint;
162		thread->realtime.preemptible = info->preemptible;
163
164		if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
165			thread->saved_mode = TH_MODE_REALTIME;
166		}
167		else {
168			if (thread->sched_mode == TH_MODE_TIMESHARE) {
169				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
170					sched_share_decr();
171			}
172			thread->sched_mode = TH_MODE_REALTIME;
173			thread_recompute_priority(thread);
174		}
175
176		thread_unlock(thread);
177		splx(s);
178
179		break;
180	}
181
182	case THREAD_PRECEDENCE_POLICY:
183	{
184		thread_precedence_policy_t		info;
185
186		if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
187			result = KERN_INVALID_ARGUMENT;
188			break;
189		}
190		info = (thread_precedence_policy_t)policy_info;
191
192		s = splsched();
193		thread_lock(thread);
194
195		thread->importance = info->importance;
196
197		thread_recompute_priority(thread);
198
199		thread_unlock(thread);
200		splx(s);
201
202		break;
203	}
204
205	case THREAD_AFFINITY_POLICY:
206	{
207		thread_affinity_policy_t	info;
208
209		if (!thread_affinity_is_supported()) {
210			result = KERN_NOT_SUPPORTED;
211			break;
212		}
213		if (count < THREAD_AFFINITY_POLICY_COUNT) {
214			result = KERN_INVALID_ARGUMENT;
215			break;
216		}
217
218		info = (thread_affinity_policy_t) policy_info;
219		/*
220		 * Unlock the thread mutex here and
221		 * return directly after calling thread_affinity_set().
222		 * This is necessary for correct lock ordering because
223		 * thread_affinity_set() takes the task lock.
224		 */
225		thread_mtx_unlock(thread);
226		return thread_affinity_set(thread, info->affinity_tag);
227	}
228
229#if 0
230#if CONFIG_EMBEDDED
231	case THREAD_BACKGROUND_POLICY:
232	{
233		thread_background_policy_t	info;
234
235		info = (thread_background_policy_t) policy_info;
236
237		thread_mtx_unlock(thread);
238		return mach_do_background_thread(thread, info->priority);
239	}
240#endif /* CONFIG_EMBEDDED */
241#endif
242
243	default:
244		result = KERN_INVALID_ARGUMENT;
245		break;
246	}
247
248	thread_mtx_unlock(thread);
249	return (result);
250}
251
252static void
253thread_recompute_priority(
254	thread_t		thread)
255{
256	integer_t		priority;
257
258	if (thread->sched_mode == TH_MODE_REALTIME)
259		priority = BASEPRI_RTQUEUES;
260	else {
261		if (thread->importance > MAXPRI)
262			priority = MAXPRI;
263		else
264		if (thread->importance < -MAXPRI)
265			priority = -MAXPRI;
266		else
267			priority = thread->importance;
268
269		priority += thread->task_priority;
270
271		if (priority > thread->max_priority)
272			priority = thread->max_priority;
273		else
274		if (priority < MINPRI)
275			priority = MINPRI;
276#if CONFIG_EMBEDDED
277		/* No one can have a base priority less than MAXPRI_THROTTLE */
278		if (priority < MAXPRI_THROTTLE)
279			priority = MAXPRI_THROTTLE;
280#endif /* CONFIG_EMBEDDED */
281	}
282
283	set_priority(thread, priority);
284}
285
286#if CONFIG_EMBEDDED
287static void
288thread_throttle(
289	thread_t		thread,
290	integer_t		task_priority)
291{
292	if ((!(thread->sched_flags & TH_SFLAG_THROTTLED)
293		 || (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_PROMOTION))
294		 && (task_priority <= MAXPRI_THROTTLE)) {
295
296		/* Kill a promotion if it was in flight */
297		thread->sched_flags &= ~TH_SFLAG_PENDING_THROTTLE_PROMOTION;
298
299		if (!(thread->sched_flags & TH_SFLAG_THROTTLED)) {
300			/*
301			 * Set the pending bit so that we can switch runqueues
302			 * (potentially) at a later time safely
303			 */
304			thread->sched_flags |= TH_SFLAG_PENDING_THROTTLE_DEMOTION;
305		}
306	}
307	else if (((thread->sched_flags & TH_SFLAG_THROTTLED)
308			  || (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_DEMOTION))
309			  && (task_priority > MAXPRI_THROTTLE)) {
310
311		/* Kill a demotion if it was in flight */
312		thread->sched_flags &= ~TH_SFLAG_PENDING_THROTTLE_DEMOTION;
313
314		if (thread->sched_flags & TH_SFLAG_THROTTLED) {
315			thread->sched_flags |= TH_SFLAG_PENDING_THROTTLE_PROMOTION;
316		}
317	}
318}
319#endif
320
321void
322thread_task_priority(
323	thread_t		thread,
324	integer_t		priority,
325	integer_t		max_priority)
326{
327	spl_t				s;
328
329	assert(thread != THREAD_NULL);
330
331	s = splsched();
332	thread_lock(thread);
333
334#if CONFIG_EMBEDDED
335	thread_throttle(thread, priority);
336#endif
337
338	thread->task_priority = priority;
339	thread->max_priority = max_priority;
340
341	thread_recompute_priority(thread);
342
343	thread_unlock(thread);
344	splx(s);
345}
346
347void
348thread_policy_reset(
349	thread_t		thread)
350{
351	spl_t		s;
352
353	s = splsched();
354	thread_lock(thread);
355
356	if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
357		sched_mode_t oldmode = thread->sched_mode;
358
359		thread->sched_mode = SCHED(initial_thread_sched_mode)(thread->task);
360
361		if ((oldmode != TH_MODE_TIMESHARE) && (thread->sched_mode == TH_MODE_TIMESHARE)) {
362
363			if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
364				sched_share_incr();
365		}
366	}
367	else {
368		thread->sched_mode = thread->saved_mode;
369		thread->saved_mode = TH_MODE_NONE;
370		thread->sched_flags &= ~TH_SFLAG_DEMOTED_MASK;
371	}
372
373	thread->importance = 0;
374
375	thread_recompute_priority(thread);
376
377	thread_unlock(thread);
378	splx(s);
379}
380
381kern_return_t
382thread_policy_get(
383	thread_t				thread,
384	thread_policy_flavor_t	flavor,
385	thread_policy_t			policy_info,
386	mach_msg_type_number_t	*count,
387	boolean_t				*get_default)
388{
389	kern_return_t			result = KERN_SUCCESS;
390	spl_t					s;
391
392	if (thread == THREAD_NULL)
393		return (KERN_INVALID_ARGUMENT);
394
395	thread_mtx_lock(thread);
396	if (!thread->active) {
397		thread_mtx_unlock(thread);
398
399		return (KERN_TERMINATED);
400	}
401
402	switch (flavor) {
403
404	case THREAD_EXTENDED_POLICY:
405	{
406		boolean_t		timeshare = TRUE;
407
408		if (!(*get_default)) {
409			s = splsched();
410			thread_lock(thread);
411
412			if (	 (thread->sched_mode != TH_MODE_REALTIME)	&&
413					 (thread->saved_mode != TH_MODE_REALTIME)			) {
414				if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK))
415					timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0;
416				else
417					timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0;
418			}
419			else
420				*get_default = TRUE;
421
422			thread_unlock(thread);
423			splx(s);
424		}
425
426		if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
427			thread_extended_policy_t	info;
428
429			info = (thread_extended_policy_t)policy_info;
430			info->timeshare = timeshare;
431		}
432
433		break;
434	}
435
436	case THREAD_TIME_CONSTRAINT_POLICY:
437	{
438		thread_time_constraint_policy_t		info;
439
440		if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
441			result = KERN_INVALID_ARGUMENT;
442			break;
443		}
444
445		info = (thread_time_constraint_policy_t)policy_info;
446
447		if (!(*get_default)) {
448			s = splsched();
449			thread_lock(thread);
450
451			if (	(thread->sched_mode == TH_MODE_REALTIME)	||
452					(thread->saved_mode == TH_MODE_REALTIME)		) {
453				info->period = thread->realtime.period;
454				info->computation = thread->realtime.computation;
455				info->constraint = thread->realtime.constraint;
456				info->preemptible = thread->realtime.preemptible;
457			}
458			else
459				*get_default = TRUE;
460
461			thread_unlock(thread);
462			splx(s);
463		}
464
465		if (*get_default) {
466			info->period = 0;
467			info->computation = default_timeshare_computation;
468			info->constraint = default_timeshare_constraint;
469			info->preemptible = TRUE;
470		}
471
472		break;
473	}
474
475	case THREAD_PRECEDENCE_POLICY:
476	{
477		thread_precedence_policy_t		info;
478
479		if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
480			result = KERN_INVALID_ARGUMENT;
481			break;
482		}
483
484		info = (thread_precedence_policy_t)policy_info;
485
486		if (!(*get_default)) {
487			s = splsched();
488			thread_lock(thread);
489
490			info->importance = thread->importance;
491
492			thread_unlock(thread);
493			splx(s);
494		}
495		else
496			info->importance = 0;
497
498		break;
499	}
500
501	case THREAD_AFFINITY_POLICY:
502	{
503		thread_affinity_policy_t		info;
504
505		if (!thread_affinity_is_supported()) {
506			result = KERN_NOT_SUPPORTED;
507			break;
508		}
509		if (*count < THREAD_AFFINITY_POLICY_COUNT) {
510			result = KERN_INVALID_ARGUMENT;
511			break;
512		}
513
514		info = (thread_affinity_policy_t)policy_info;
515
516		if (!(*get_default))
517			info->affinity_tag = thread_affinity_get(thread);
518		else
519			info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
520
521		break;
522	}
523
524	default:
525		result = KERN_INVALID_ARGUMENT;
526		break;
527	}
528
529	thread_mtx_unlock(thread);
530
531	return (result);
532}
533