1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/thread_act_server.h>
31
32#include <kern/kern_types.h>
33#include <kern/processor.h>
34#include <kern/thread.h>
35#include <kern/affinity.h>
36
37static void
38thread_recompute_priority(
39	thread_t		thread);
40
41
42extern void proc_get_thread_policy(thread_t thread, thread_policy_state_t info);
43
44kern_return_t
45thread_policy_set(
46	thread_t				thread,
47	thread_policy_flavor_t	flavor,
48	thread_policy_t			policy_info,
49	mach_msg_type_number_t	count)
50{
51
52	if (thread == THREAD_NULL)
53		return (KERN_INVALID_ARGUMENT);
54
55	if (thread->static_param)
56		return (KERN_SUCCESS);
57
58	return (thread_policy_set_internal(thread, flavor, policy_info, count));
59}
60
61kern_return_t
62thread_policy_set_internal(
63	thread_t				thread,
64	thread_policy_flavor_t	flavor,
65	thread_policy_t			policy_info,
66	mach_msg_type_number_t	count)
67{
68	kern_return_t			result = KERN_SUCCESS;
69	spl_t					s;
70
71	thread_mtx_lock(thread);
72	if (!thread->active) {
73		thread_mtx_unlock(thread);
74
75		return (KERN_TERMINATED);
76	}
77	switch (flavor) {
78
79	case THREAD_EXTENDED_POLICY:
80	{
81		boolean_t				timeshare = TRUE;
82
83		if (count >= THREAD_EXTENDED_POLICY_COUNT) {
84			thread_extended_policy_t	info;
85
86			info = (thread_extended_policy_t)policy_info;
87			timeshare = info->timeshare;
88		}
89
90		if (!SCHED(supports_timeshare_mode)())
91			timeshare = FALSE;
92
93		s = splsched();
94		thread_lock(thread);
95
96		if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
97			integer_t	oldmode = (thread->sched_mode == TH_MODE_TIMESHARE);
98
99			if (timeshare) {
100				thread->sched_mode = TH_MODE_TIMESHARE;
101
102				if (!oldmode) {
103					if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
104						sched_share_incr();
105
106						if (thread->max_priority <= MAXPRI_THROTTLE)
107							sched_background_incr();
108					}
109				}
110			}
111			else {
112				thread->sched_mode = TH_MODE_FIXED;
113
114				if (oldmode) {
115					if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
116						if (thread->max_priority <= MAXPRI_THROTTLE)
117							sched_background_decr();
118
119						sched_share_decr();
120					}
121				}
122			}
123
124			thread_recompute_priority(thread);
125		}
126		else {
127
128			if (timeshare)
129				thread->saved_mode = TH_MODE_TIMESHARE;
130			else
131				thread->saved_mode = TH_MODE_FIXED;
132		}
133
134		thread_unlock(thread);
135		splx(s);
136
137		break;
138	}
139
140	case THREAD_TIME_CONSTRAINT_POLICY:
141	{
142		thread_time_constraint_policy_t		info;
143
144		if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
145			result = KERN_INVALID_ARGUMENT;
146			break;
147		}
148
149		info = (thread_time_constraint_policy_t)policy_info;
150		if (	info->constraint < info->computation	||
151				info->computation > max_rt_quantum		||
152				info->computation < min_rt_quantum		) {
153			result = KERN_INVALID_ARGUMENT;
154			break;
155		}
156
157		s = splsched();
158		thread_lock(thread);
159
160		thread->realtime.period = info->period;
161		thread->realtime.computation = info->computation;
162		thread->realtime.constraint = info->constraint;
163		thread->realtime.preemptible = info->preemptible;
164
165		if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
166			thread->saved_mode = TH_MODE_REALTIME;
167		}
168		else {
169			if (thread->sched_mode == TH_MODE_TIMESHARE) {
170				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
171					if (thread->max_priority <= MAXPRI_THROTTLE)
172						sched_background_decr();
173
174					sched_share_decr();
175				}
176			}
177			thread->sched_mode = TH_MODE_REALTIME;
178			thread_recompute_priority(thread);
179		}
180
181		thread_unlock(thread);
182		splx(s);
183
184		break;
185	}
186
187	case THREAD_PRECEDENCE_POLICY:
188	{
189		thread_precedence_policy_t		info;
190
191		if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
192			result = KERN_INVALID_ARGUMENT;
193			break;
194		}
195		info = (thread_precedence_policy_t)policy_info;
196
197		s = splsched();
198		thread_lock(thread);
199
200		thread->importance = info->importance;
201
202		thread_recompute_priority(thread);
203
204		thread_unlock(thread);
205		splx(s);
206
207		break;
208	}
209
210	case THREAD_AFFINITY_POLICY:
211	{
212		thread_affinity_policy_t	info;
213
214		if (!thread_affinity_is_supported()) {
215			result = KERN_NOT_SUPPORTED;
216			break;
217		}
218		if (count < THREAD_AFFINITY_POLICY_COUNT) {
219			result = KERN_INVALID_ARGUMENT;
220			break;
221		}
222
223		info = (thread_affinity_policy_t) policy_info;
224		/*
225		 * Unlock the thread mutex here and
226		 * return directly after calling thread_affinity_set().
227		 * This is necessary for correct lock ordering because
228		 * thread_affinity_set() takes the task lock.
229		 */
230		thread_mtx_unlock(thread);
231		return thread_affinity_set(thread, info->affinity_tag);
232	}
233
234
235	default:
236		result = KERN_INVALID_ARGUMENT;
237		break;
238	}
239
240	thread_mtx_unlock(thread);
241	return (result);
242}
243
244static void
245thread_recompute_priority(
246	thread_t		thread)
247{
248	integer_t		priority;
249
250	if (thread->sched_mode == TH_MODE_REALTIME)
251		priority = BASEPRI_RTQUEUES;
252	else {
253		if (thread->importance > MAXPRI)
254			priority = MAXPRI;
255		else
256		if (thread->importance < -MAXPRI)
257			priority = -MAXPRI;
258		else
259			priority = thread->importance;
260
261		priority += thread->task_priority;
262
263		if (priority > thread->max_priority)
264			priority = thread->max_priority;
265		else
266		if (priority < MINPRI)
267			priority = MINPRI;
268	}
269
270	set_priority(thread, priority);
271}
272
273
274void
275thread_task_priority(
276	thread_t		thread,
277	integer_t		priority,
278	integer_t		max_priority)
279{
280	spl_t				s;
281
282	assert(thread != THREAD_NULL);
283
284	s = splsched();
285	thread_lock(thread);
286
287
288
289	if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
290		if ((thread->max_priority <= MAXPRI_THROTTLE) && (max_priority > MAXPRI_THROTTLE)) {
291			sched_background_decr();
292		} else if ((thread->max_priority > MAXPRI_THROTTLE) && (max_priority <= MAXPRI_THROTTLE)) {
293			sched_background_incr();
294		}
295	}
296
297	thread->task_priority = priority;
298	thread->max_priority = max_priority;
299
300	thread_recompute_priority(thread);
301
302	thread_unlock(thread);
303	splx(s);
304}
305
306void
307thread_policy_reset(
308	thread_t		thread)
309{
310	spl_t		s;
311
312	s = splsched();
313	thread_lock(thread);
314
315	if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
316		sched_mode_t oldmode = thread->sched_mode;
317
318		thread->sched_mode = SCHED(initial_thread_sched_mode)(thread->task);
319
320		if ((oldmode != TH_MODE_TIMESHARE) && (thread->sched_mode == TH_MODE_TIMESHARE)) {
321
322			if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
323				sched_share_incr();
324
325				if (thread->max_priority <= MAXPRI_THROTTLE)
326					sched_background_incr();
327			}
328		}
329	}
330	else {
331		thread->sched_mode = thread->saved_mode;
332		thread->saved_mode = TH_MODE_NONE;
333		thread->sched_flags &= ~TH_SFLAG_DEMOTED_MASK;
334	}
335
336	thread->importance = 0;
337
338	thread_recompute_priority(thread);
339
340	thread_unlock(thread);
341	splx(s);
342}
343
344kern_return_t
345thread_policy_get(
346	thread_t				thread,
347	thread_policy_flavor_t	flavor,
348	thread_policy_t			policy_info,
349	mach_msg_type_number_t	*count,
350	boolean_t				*get_default)
351{
352	kern_return_t			result = KERN_SUCCESS;
353	spl_t					s;
354
355	if (thread == THREAD_NULL)
356		return (KERN_INVALID_ARGUMENT);
357
358	thread_mtx_lock(thread);
359	if (!thread->active) {
360		thread_mtx_unlock(thread);
361
362		return (KERN_TERMINATED);
363	}
364
365	switch (flavor) {
366
367	case THREAD_EXTENDED_POLICY:
368	{
369		boolean_t		timeshare = TRUE;
370
371		if (!(*get_default)) {
372			s = splsched();
373			thread_lock(thread);
374
375			if (	 (thread->sched_mode != TH_MODE_REALTIME)	&&
376					 (thread->saved_mode != TH_MODE_REALTIME)			) {
377				if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK))
378					timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0;
379				else
380					timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0;
381			}
382			else
383				*get_default = TRUE;
384
385			thread_unlock(thread);
386			splx(s);
387		}
388
389		if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
390			thread_extended_policy_t	info;
391
392			info = (thread_extended_policy_t)policy_info;
393			info->timeshare = timeshare;
394		}
395
396		break;
397	}
398
399	case THREAD_TIME_CONSTRAINT_POLICY:
400	{
401		thread_time_constraint_policy_t		info;
402
403		if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
404			result = KERN_INVALID_ARGUMENT;
405			break;
406		}
407
408		info = (thread_time_constraint_policy_t)policy_info;
409
410		if (!(*get_default)) {
411			s = splsched();
412			thread_lock(thread);
413
414			if (	(thread->sched_mode == TH_MODE_REALTIME)	||
415					(thread->saved_mode == TH_MODE_REALTIME)		) {
416				info->period = thread->realtime.period;
417				info->computation = thread->realtime.computation;
418				info->constraint = thread->realtime.constraint;
419				info->preemptible = thread->realtime.preemptible;
420			}
421			else
422				*get_default = TRUE;
423
424			thread_unlock(thread);
425			splx(s);
426		}
427
428		if (*get_default) {
429			info->period = 0;
430			info->computation = default_timeshare_computation;
431			info->constraint = default_timeshare_constraint;
432			info->preemptible = TRUE;
433		}
434
435		break;
436	}
437
438	case THREAD_PRECEDENCE_POLICY:
439	{
440		thread_precedence_policy_t		info;
441
442		if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
443			result = KERN_INVALID_ARGUMENT;
444			break;
445		}
446
447		info = (thread_precedence_policy_t)policy_info;
448
449		if (!(*get_default)) {
450			s = splsched();
451			thread_lock(thread);
452
453			info->importance = thread->importance;
454
455			thread_unlock(thread);
456			splx(s);
457		}
458		else
459			info->importance = 0;
460
461		break;
462	}
463
464	case THREAD_AFFINITY_POLICY:
465	{
466		thread_affinity_policy_t		info;
467
468		if (!thread_affinity_is_supported()) {
469			result = KERN_NOT_SUPPORTED;
470			break;
471		}
472		if (*count < THREAD_AFFINITY_POLICY_COUNT) {
473			result = KERN_INVALID_ARGUMENT;
474			break;
475		}
476
477		info = (thread_affinity_policy_t)policy_info;
478
479		if (!(*get_default))
480			info->affinity_tag = thread_affinity_get(thread);
481		else
482			info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
483
484		break;
485	}
486
487	case THREAD_POLICY_STATE:
488	{
489		thread_policy_state_t		info;
490
491		if (*count < THREAD_POLICY_STATE_COUNT) {
492			result = KERN_INVALID_ARGUMENT;
493			break;
494		}
495
496		/* Only root can get this info */
497		if (current_task()->sec_token.val[0] != 0) {
498			result = KERN_PROTECTION_FAILURE;
499			break;
500		}
501
502		info = (thread_policy_state_t)policy_info;
503
504		if (!(*get_default)) {
505			/*
506			 * Unlock the thread mutex and directly return.
507			 * This is necessary because proc_get_thread_policy()
508			 * takes the task lock.
509			 */
510			thread_mtx_unlock(thread);
511			proc_get_thread_policy(thread, info);
512			return (result);
513		} else {
514			info->requested = 0;
515			info->effective = 0;
516			info->pending = 0;
517		}
518
519		break;
520	}
521
522
523	default:
524		result = KERN_INVALID_ARGUMENT;
525		break;
526	}
527
528	thread_mtx_unlock(thread);
529
530	return (result);
531}
532