1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 *	File:	kern/task.c
58 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 *		David Black
60 *
61 *	Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL).  All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections.  This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89#include <mach_kdb.h>
90#include <fast_tas.h>
91#include <platforms.h>
92
93#include <mach/mach_types.h>
94#include <mach/boolean.h>
95#include <mach/host_priv.h>
96#include <mach/machine/vm_types.h>
97#include <mach/vm_param.h>
98#include <mach/semaphore.h>
99#include <mach/task_info.h>
100#include <mach/task_special_ports.h>
101
102#include <ipc/ipc_types.h>
103#include <ipc/ipc_space.h>
104#include <ipc/ipc_entry.h>
105
106#include <kern/kern_types.h>
107#include <kern/mach_param.h>
108#include <kern/misc_protos.h>
109#include <kern/task.h>
110#include <kern/thread.h>
111#include <kern/zalloc.h>
112#include <kern/kalloc.h>
113#include <kern/processor.h>
114#include <kern/sched_prim.h>	/* for thread_wakeup */
115#include <kern/ipc_tt.h>
116#include <kern/ledger.h>
117#include <kern/host.h>
118#include <kern/clock.h>
119#include <kern/timer.h>
120#include <kern/assert.h>
121#include <kern/sync_lock.h>
122#include <kern/affinity.h>
123
124#include <vm/pmap.h>
125#include <vm/vm_map.h>
126#include <vm/vm_kern.h>		/* for kernel_map, ipc_kernel_map */
127#include <vm/vm_pageout.h>
128#include <vm/vm_protos.h>
129
130#if	MACH_KDB
131#include <ddb/db_sym.h>
132#endif	/* MACH_KDB */
133
134#ifdef __ppc__
135#include <ppc/exception.h>
136#include <ppc/hw_perfmon.h>
137#endif
138
139/*
140 * Exported interfaces
141 */
142
143#include <mach/task_server.h>
144#include <mach/mach_host_server.h>
145#include <mach/host_security_server.h>
146#include <mach/mach_port_server.h>
147#include <mach/security_server.h>
148
149#include <vm/vm_shared_region.h>
150
151#if CONFIG_MACF_MACH
152#include <security/mac_mach_internal.h>
153#endif
154
155task_t	kernel_task;
156zone_t	task_zone;
157
158/* Forwards */
159
160void		task_hold_locked(
161			task_t		task);
162void		task_wait_locked(
163			task_t		task);
164void		task_release_locked(
165			task_t		task);
166void		task_free(
167			task_t		task );
168void		task_synchronizer_destroy_all(
169			task_t		task);
170
171kern_return_t	task_set_ledger(
172			task_t		task,
173			ledger_t	wired,
174			ledger_t	paged);
175
176void
177task_backing_store_privileged(
178			task_t task)
179{
180	task_lock(task);
181	task->priv_flags |= VM_BACKING_STORE_PRIV;
182	task_unlock(task);
183	return;
184}
185
186
187void
188task_set_64bit(
189		task_t task,
190		boolean_t is64bit)
191{
192#ifdef __i386__
193	thread_t thread;
194#endif /* __i386__ */
195	int	vm_flags = 0;
196
197	if (is64bit) {
198		if (task_has_64BitAddr(task))
199			return;
200
201		task_set_64BitAddr(task);
202	} else {
203		if ( !task_has_64BitAddr(task))
204			return;
205
206		/*
207		 * Deallocate all memory previously allocated
208		 * above the 32-bit address space, since it won't
209		 * be accessible anymore.
210		 */
211		/* remove regular VM map entries & pmap mappings */
212		(void) vm_map_remove(task->map,
213				     (vm_map_offset_t) VM_MAX_ADDRESS,
214				     MACH_VM_MAX_ADDRESS,
215				     0);
216#ifdef __ppc__
217		/* LP64todo - make this clean */
218		/*
219		 * PPC51: ppc64 is limited to 51-bit addresses.
220		 * Memory mapped above that limit is handled specially
221		 * at the pmap level, so let pmap clean the commpage mapping
222		 * explicitly...
223		 */
224		pmap_unmap_sharedpage(task->map->pmap);	/* Unmap commpage */
225		/* ... and avoid regular pmap cleanup */
226		vm_flags |= VM_MAP_REMOVE_NO_PMAP_CLEANUP;
227#endif /* __ppc__ */
228		/* remove the higher VM mappings */
229		(void) vm_map_remove(task->map,
230				     MACH_VM_MAX_ADDRESS,
231				     0xFFFFFFFFFFFFF000ULL,
232				     vm_flags);
233		task_clear_64BitAddr(task);
234	}
235	/* FIXME: On x86, the thread save state flavor can diverge from the
236	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
237	 * state dichotomy. Since we can be pre-empted in this interval,
238	 * certain routines may observe the thread as being in an inconsistent
239	 * state with respect to its task's 64-bitness.
240	 */
241#ifdef __i386__
242	queue_iterate(&task->threads, thread, thread_t, task_threads) {
243		machine_thread_switch_addrmode(thread);
244	}
245#endif /* __i386__ */
246}
247
248void
249task_init(void)
250{
251	task_zone = zinit(
252			sizeof(struct task),
253			TASK_MAX * sizeof(struct task),
254			TASK_CHUNK * sizeof(struct task),
255			"tasks");
256
257	/*
258	 * Create the kernel task as the first task.
259	 */
260	if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
261		panic("task_init\n");
262
263	vm_map_deallocate(kernel_task->map);
264	kernel_task->map = kernel_map;
265}
266
267/*
268 * Create a task running in the kernel address space.  It may
269 * have its own map of size mem_size and may have ipc privileges.
270 */
271kern_return_t
272kernel_task_create(
273	__unused task_t		parent_task,
274	__unused vm_offset_t		map_base,
275	__unused vm_size_t		map_size,
276	__unused task_t		*child_task)
277{
278	return (KERN_INVALID_ARGUMENT);
279}
280
281kern_return_t
282task_create(
283	task_t				parent_task,
284	__unused ledger_port_array_t	ledger_ports,
285	__unused mach_msg_type_number_t	num_ledger_ports,
286	__unused boolean_t		inherit_memory,
287	__unused task_t			*child_task)	/* OUT */
288{
289	if (parent_task == TASK_NULL)
290		return(KERN_INVALID_ARGUMENT);
291
292	/*
293	 * No longer supported: too many calls assume that a task has a valid
294	 * process attached.
295	 */
296	return(KERN_FAILURE);
297}
298
299kern_return_t
300host_security_create_task_token(
301	host_security_t			host_security,
302	task_t				parent_task,
303	__unused security_token_t	sec_token,
304	__unused audit_token_t		audit_token,
305	__unused host_priv_t		host_priv,
306	__unused ledger_port_array_t	ledger_ports,
307	__unused mach_msg_type_number_t	num_ledger_ports,
308	__unused boolean_t		inherit_memory,
309	__unused task_t			*child_task)	/* OUT */
310{
311	if (parent_task == TASK_NULL)
312		return(KERN_INVALID_ARGUMENT);
313
314	if (host_security == HOST_NULL)
315		return(KERN_INVALID_SECURITY);
316
317	/*
318	 * No longer supported.
319	 */
320	return(KERN_FAILURE);
321}
322
323kern_return_t
324task_create_internal(
325	task_t		parent_task,
326	boolean_t	inherit_memory,
327	boolean_t	is_64bit,
328	task_t		*child_task)		/* OUT */
329{
330	task_t			new_task;
331	vm_shared_region_t	shared_region;
332
333	new_task = (task_t) zalloc(task_zone);
334
335	if (new_task == TASK_NULL)
336		return(KERN_RESOURCE_SHORTAGE);
337
338	/* one ref for just being alive; one for our caller */
339	new_task->ref_count = 2;
340
341	if (inherit_memory)
342		new_task->map = vm_map_fork(parent_task->map);
343	else
344		new_task->map = vm_map_create(pmap_create(0, is_64bit),
345					(vm_map_offset_t)(VM_MIN_ADDRESS),
346					(vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
347
348	/* Inherit memlock limit from parent */
349	if (parent_task)
350		vm_map_set_user_wire_limit(new_task->map, parent_task->map->user_wire_limit);
351
352	mutex_init(&new_task->lock, 0);
353	queue_init(&new_task->threads);
354	new_task->suspend_count = 0;
355	new_task->thread_count = 0;
356	new_task->active_thread_count = 0;
357	new_task->user_stop_count = 0;
358	new_task->pset_hint = PROCESSOR_SET_NULL;
359	new_task->role = TASK_UNSPECIFIED;
360	new_task->active = TRUE;
361	new_task->user_data = NULL;
362	new_task->faults = 0;
363	new_task->cow_faults = 0;
364	new_task->pageins = 0;
365	new_task->messages_sent = 0;
366	new_task->messages_received = 0;
367	new_task->syscalls_mach = 0;
368	new_task->priv_flags = 0;
369	new_task->syscalls_unix=0;
370	new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
371	new_task->taskFeatures[0] = 0;				/* Init task features */
372	new_task->taskFeatures[1] = 0;				/* Init task features */
373
374#ifdef MACH_BSD
375	new_task->bsd_info = NULL;
376#endif /* MACH_BSD */
377
378#ifdef __i386__
379	new_task->i386_ldt = 0;
380#endif
381
382#ifdef __ppc__
383	if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData;	/* If 64-bit machine, show we have 64-bit registers at least */
384#endif
385
386	queue_init(&new_task->semaphore_list);
387	queue_init(&new_task->lock_set_list);
388	new_task->semaphores_owned = 0;
389	new_task->lock_sets_owned = 0;
390
391#if CONFIG_MACF_MACH
392	/*mutex_init(&new_task->labellock, ETAP_NO_TRACE);*/
393	new_task->label = labelh_new(1);
394	mac_task_label_init (&new_task->maclabel);
395#endif
396
397	ipc_task_init(new_task, parent_task);
398
399	new_task->total_user_time = 0;
400	new_task->total_system_time = 0;
401
402	new_task->vtimers = 0;
403
404	new_task->shared_region = NULL;
405
406	new_task->affinity_space = NULL;
407
408	if (parent_task != TASK_NULL) {
409		new_task->sec_token = parent_task->sec_token;
410		new_task->audit_token = parent_task->audit_token;
411
412		/* inherit the parent's shared region */
413		shared_region = vm_shared_region_get(parent_task);
414		vm_shared_region_set(new_task, shared_region);
415
416		new_task->wired_ledger_port = ledger_copy(
417			convert_port_to_ledger(parent_task->wired_ledger_port));
418		new_task->paged_ledger_port = ledger_copy(
419			convert_port_to_ledger(parent_task->paged_ledger_port));
420		if(task_has_64BitAddr(parent_task))
421			task_set_64BitAddr(new_task);
422
423#ifdef __i386__
424		if (inherit_memory && parent_task->i386_ldt)
425			new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
426#endif
427		if (inherit_memory && parent_task->affinity_space)
428			task_affinity_create(parent_task, new_task);
429	}
430	else {
431		new_task->sec_token = KERNEL_SECURITY_TOKEN;
432		new_task->audit_token = KERNEL_AUDIT_TOKEN;
433		new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
434		new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
435	}
436
437	if (kernel_task == TASK_NULL) {
438		new_task->priority = BASEPRI_KERNEL;
439		new_task->max_priority = MAXPRI_KERNEL;
440	}
441	else {
442		new_task->priority = BASEPRI_DEFAULT;
443		new_task->max_priority = MAXPRI_USER;
444	}
445
446	mutex_lock(&tasks_threads_lock);
447	queue_enter(&tasks, new_task, task_t, tasks);
448	tasks_count++;
449	mutex_unlock(&tasks_threads_lock);
450
451	if (vm_backing_store_low && parent_task != NULL)
452		new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
453
454	ipc_task_enable(new_task);
455
456	*child_task = new_task;
457	return(KERN_SUCCESS);
458}
459
460/*
461 *	task_deallocate:
462 *
463 *	Drop a reference on a task.
464 */
465void
466task_deallocate(
467	task_t		task)
468{
469	if (task == TASK_NULL)
470	    return;
471
472	if (task_deallocate_internal(task) > 0)
473		return;
474
475	ipc_task_terminate(task);
476
477	if (task->affinity_space)
478		task_affinity_deallocate(task);
479
480	vm_map_deallocate(task->map);
481	is_release(task->itk_space);
482
483#if CONFIG_MACF_MACH
484	labelh_release(task->label);
485#endif
486	zfree(task_zone, task);
487}
488
489/*
490 *	task_name_deallocate:
491 *
492 *	Drop a reference on a task name.
493 */
494void
495task_name_deallocate(
496	task_name_t		task_name)
497{
498	return(task_deallocate((task_t)task_name));
499}
500
501
502/*
503 *	task_terminate:
504 *
505 *	Terminate the specified task.  See comments on thread_terminate
506 *	(kern/thread.c) about problems with terminating the "current task."
507 */
508
509kern_return_t
510task_terminate(
511	task_t		task)
512{
513	if (task == TASK_NULL)
514		return (KERN_INVALID_ARGUMENT);
515
516	if (task->bsd_info)
517		return (KERN_FAILURE);
518
519	return (task_terminate_internal(task));
520}
521
522kern_return_t
523task_terminate_internal(
524	task_t			task)
525{
526	thread_t			thread, self;
527	task_t				self_task;
528	boolean_t			interrupt_save;
529
530	assert(task != kernel_task);
531
532	self = current_thread();
533	self_task = self->task;
534
535	/*
536	 *	Get the task locked and make sure that we are not racing
537	 *	with someone else trying to terminate us.
538	 */
539	if (task == self_task)
540		task_lock(task);
541	else
542	if (task < self_task) {
543		task_lock(task);
544		task_lock(self_task);
545	}
546	else {
547		task_lock(self_task);
548		task_lock(task);
549	}
550
551	if (!task->active || !self->active) {
552		/*
553		 *	Task or current act is already being terminated.
554		 *	Just return an error. If we are dying, this will
555		 *	just get us to our AST special handler and that
556		 *	will get us to finalize the termination of ourselves.
557		 */
558		task_unlock(task);
559		if (self_task != task)
560			task_unlock(self_task);
561
562		return (KERN_FAILURE);
563	}
564
565	if (self_task != task)
566		task_unlock(self_task);
567
568	/*
569	 * Make sure the current thread does not get aborted out of
570	 * the waits inside these operations.
571	 */
572	interrupt_save = thread_interrupt_level(THREAD_UNINT);
573
574	/*
575	 *	Indicate that we want all the threads to stop executing
576	 *	at user space by holding the task (we would have held
577	 *	each thread independently in thread_terminate_internal -
578	 *	but this way we may be more likely to already find it
579	 *	held there).  Mark the task inactive, and prevent
580	 *	further task operations via the task port.
581	 */
582	task_hold_locked(task);
583	task->active = FALSE;
584	ipc_task_disable(task);
585
586	/*
587	 *	Terminate each thread in the task.
588	 */
589	queue_iterate(&task->threads, thread, thread_t, task_threads) {
590			thread_terminate_internal(thread);
591	}
592
593	/*
594	 *	Give the machine dependent code a chance
595	 *	to perform cleanup before ripping apart
596	 *	the task.
597	 */
598	if (self_task == task)
599		machine_thread_terminate_self();
600
601	task_unlock(task);
602
603	/*
604	 *	Destroy all synchronizers owned by the task.
605	 */
606	task_synchronizer_destroy_all(task);
607
608	/*
609	 *	Destroy the IPC space, leaving just a reference for it.
610	 */
611	ipc_space_destroy(task->itk_space);
612
613#ifdef __ppc__
614	/* LP64todo - make this clean */
615	/*
616	 * PPC51: ppc64 is limited to 51-bit addresses.
617	 */
618	pmap_unmap_sharedpage(task->map->pmap);		/* Unmap commpage */
619#endif /* __ppc__ */
620
621	if (vm_map_has_4GB_pagezero(task->map))
622		vm_map_clear_4GB_pagezero(task->map);
623
624	/*
625	 * If the current thread is a member of the task
626	 * being terminated, then the last reference to
627	 * the task will not be dropped until the thread
628	 * is finally reaped.  To avoid incurring the
629	 * expense of removing the address space regions
630	 * at reap time, we do it explictly here.
631	 */
632	vm_map_remove(task->map,
633		      task->map->min_offset,
634		      task->map->max_offset,
635		      VM_MAP_NO_FLAGS);
636
637	/* release our shared region */
638	vm_shared_region_set(task, NULL);
639
640	mutex_lock(&tasks_threads_lock);
641	queue_remove(&tasks, task, task_t, tasks);
642	tasks_count--;
643	mutex_unlock(&tasks_threads_lock);
644
645	/*
646	 * We no longer need to guard against being aborted, so restore
647	 * the previous interruptible state.
648	 */
649	thread_interrupt_level(interrupt_save);
650
651#if __ppc__
652    perfmon_release_facility(task); // notify the perfmon facility
653#endif
654
655	/*
656	 * Get rid of the task active reference on itself.
657	 */
658	task_deallocate(task);
659
660	return (KERN_SUCCESS);
661}
662
663/*
664 * task_halt:
665 *
666 * 	Shut the current task down (except for the current thread) in
667 *	preparation for dramatic changes to the task (probably exec).
668 *	We hold the task, terminate all other threads in the task and
669 *	wait for them to terminate, clean up the portspace, and when
670 *	all done, let the current thread go.
671 */
672kern_return_t
673task_halt(
674	task_t		task)
675{
676	thread_t	thread, self;
677
678	assert(task != kernel_task);
679
680	self = current_thread();
681
682	if (task != self->task)
683		return (KERN_INVALID_ARGUMENT);
684
685	task_lock(task);
686
687	if (!task->active || !self->active) {
688		/*
689		 *	Task or current thread is already being terminated.
690		 *	Hurry up and return out of the current kernel context
691		 *	so that we run our AST special handler to terminate
692		 *	ourselves.
693		 */
694		task_unlock(task);
695
696		return (KERN_FAILURE);
697	}
698
699	if (task->thread_count > 1) {
700		/*
701		 * Mark all the threads to keep them from starting any more
702		 * user-level execution.  The thread_terminate_internal code
703		 * would do this on a thread by thread basis anyway, but this
704		 * gives us a better chance of not having to wait there.
705		 */
706		task_hold_locked(task);
707
708		/*
709		 *	Terminate all the other threads in the task.
710		 */
711		queue_iterate(&task->threads, thread, thread_t, task_threads) {
712			if (thread != self)
713				thread_terminate_internal(thread);
714		}
715
716		task_release_locked(task);
717	}
718
719	/*
720	 *	Give the machine dependent code a chance
721	 *	to perform cleanup before ripping apart
722	 *	the task.
723	 */
724	machine_thread_terminate_self();
725
726	task_unlock(task);
727
728	/*
729	 *	Destroy all synchronizers owned by the task.
730	 */
731	task_synchronizer_destroy_all(task);
732
733	/*
734	 *	Destroy the contents of the IPC space, leaving just
735	 *	a reference for it.
736	 */
737	ipc_space_clean(task->itk_space);
738
739	/*
740	 * Clean out the address space, as we are going to be
741	 * getting a new one.
742	 */
743	vm_map_remove(task->map, task->map->min_offset,
744		      task->map->max_offset, VM_MAP_NO_FLAGS);
745
746	return (KERN_SUCCESS);
747}
748
749/*
750 *	task_hold_locked:
751 *
752 *	Suspend execution of the specified task.
753 *	This is a recursive-style suspension of the task, a count of
754 *	suspends is maintained.
755 *
756 * 	CONDITIONS: the task is locked and active.
757 */
758void
759task_hold_locked(
760	register task_t		task)
761{
762	register thread_t	thread;
763
764	assert(task->active);
765
766	if (task->suspend_count++ > 0)
767		return;
768
769	/*
770	 *	Iterate through all the threads and hold them.
771	 */
772	queue_iterate(&task->threads, thread, thread_t, task_threads) {
773		thread_mtx_lock(thread);
774		thread_hold(thread);
775		thread_mtx_unlock(thread);
776	}
777}
778
779/*
780 *	task_hold:
781 *
782 *	Same as the internal routine above, except that is must lock
783 *	and verify that the task is active.  This differs from task_suspend
784 *	in that it places a kernel hold on the task rather than just a
785 *	user-level hold.  This keeps users from over resuming and setting
786 *	it running out from under the kernel.
787 *
788 * 	CONDITIONS: the caller holds a reference on the task
789 */
790kern_return_t
791task_hold(
792	register task_t		task)
793{
794	if (task == TASK_NULL)
795		return (KERN_INVALID_ARGUMENT);
796
797	task_lock(task);
798
799	if (!task->active) {
800		task_unlock(task);
801
802		return (KERN_FAILURE);
803	}
804
805	task_hold_locked(task);
806	task_unlock(task);
807
808	return (KERN_SUCCESS);
809}
810
811/*
812 *	task_wait_locked:
813 *
814 *	Wait for all threads in task to stop.
815 *
816 * Conditions:
817 *	Called with task locked, active, and held.
818 */
819void
820task_wait_locked(
821	register task_t		task)
822{
823	register thread_t	thread, self;
824
825	assert(task->active);
826	assert(task->suspend_count > 0);
827
828	self = current_thread();
829
830	/*
831	 *	Iterate through all the threads and wait for them to
832	 *	stop.  Do not wait for the current thread if it is within
833	 *	the task.
834	 */
835	queue_iterate(&task->threads, thread, thread_t, task_threads) {
836		if (thread != self)
837			thread_wait(thread);
838	}
839}
840
841/*
842 *	task_release_locked:
843 *
844 *	Release a kernel hold on a task.
845 *
846 * 	CONDITIONS: the task is locked and active
847 */
848void
849task_release_locked(
850	register task_t		task)
851{
852	register thread_t	thread;
853
854	assert(task->active);
855	assert(task->suspend_count > 0);
856
857	if (--task->suspend_count > 0)
858		return;
859
860	queue_iterate(&task->threads, thread, thread_t, task_threads) {
861		thread_mtx_lock(thread);
862		thread_release(thread);
863		thread_mtx_unlock(thread);
864	}
865}
866
867/*
868 *	task_release:
869 *
870 *	Same as the internal routine above, except that it must lock
871 *	and verify that the task is active.
872 *
873 * 	CONDITIONS: The caller holds a reference to the task
874 */
875kern_return_t
876task_release(
877	task_t		task)
878{
879	if (task == TASK_NULL)
880		return (KERN_INVALID_ARGUMENT);
881
882	task_lock(task);
883
884	if (!task->active) {
885		task_unlock(task);
886
887		return (KERN_FAILURE);
888	}
889
890	task_release_locked(task);
891	task_unlock(task);
892
893	return (KERN_SUCCESS);
894}
895
896kern_return_t
897task_threads(
898	task_t					task,
899	thread_act_array_t		*threads_out,
900	mach_msg_type_number_t	*count)
901{
902	mach_msg_type_number_t	actual;
903	thread_t				*thread_list;
904	thread_t				thread;
905	vm_size_t				size, size_needed;
906	void					*addr;
907	unsigned int			i, j;
908
909	if (task == TASK_NULL)
910		return (KERN_INVALID_ARGUMENT);
911
912	size = 0; addr = NULL;
913
914	for (;;) {
915		task_lock(task);
916		if (!task->active) {
917			task_unlock(task);
918
919			if (size != 0)
920				kfree(addr, size);
921
922			return (KERN_FAILURE);
923		}
924
925		actual = task->thread_count;
926
927		/* do we have the memory we need? */
928		size_needed = actual * sizeof (mach_port_t);
929		if (size_needed <= size)
930			break;
931
932		/* unlock the task and allocate more memory */
933		task_unlock(task);
934
935		if (size != 0)
936			kfree(addr, size);
937
938		assert(size_needed > 0);
939		size = size_needed;
940
941		addr = kalloc(size);
942		if (addr == 0)
943			return (KERN_RESOURCE_SHORTAGE);
944	}
945
946	/* OK, have memory and the task is locked & active */
947	thread_list = (thread_t *)addr;
948
949	i = j = 0;
950
951	for (thread = (thread_t)queue_first(&task->threads); i < actual;
952				++i, thread = (thread_t)queue_next(&thread->task_threads)) {
953		thread_reference_internal(thread);
954		thread_list[j++] = thread;
955	}
956
957	assert(queue_end(&task->threads, (queue_entry_t)thread));
958
959	actual = j;
960	size_needed = actual * sizeof (mach_port_t);
961
962	/* can unlock task now that we've got the thread refs */
963	task_unlock(task);
964
965	if (actual == 0) {
966		/* no threads, so return null pointer and deallocate memory */
967
968		*threads_out = NULL;
969		*count = 0;
970
971		if (size != 0)
972			kfree(addr, size);
973	}
974	else {
975		/* if we allocated too much, must copy */
976
977		if (size_needed < size) {
978			void *newaddr;
979
980			newaddr = kalloc(size_needed);
981			if (newaddr == 0) {
982				for (i = 0; i < actual; ++i)
983					thread_deallocate(thread_list[i]);
984				kfree(addr, size);
985				return (KERN_RESOURCE_SHORTAGE);
986			}
987
988			bcopy(addr, newaddr, size_needed);
989			kfree(addr, size);
990			thread_list = (thread_t *)newaddr;
991		}
992
993		*threads_out = thread_list;
994		*count = actual;
995
996		/* do the conversion that Mig should handle */
997
998		for (i = 0; i < actual; ++i)
999			((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1000	}
1001
1002	return (KERN_SUCCESS);
1003}
1004
1005/*
1006 *	task_suspend:
1007 *
1008 *	Implement a user-level suspension on a task.
1009 *
1010 * Conditions:
1011 * 	The caller holds a reference to the task
1012 */
1013kern_return_t
1014task_suspend(
1015	register task_t		task)
1016{
1017	if (task == TASK_NULL || task == kernel_task)
1018		return (KERN_INVALID_ARGUMENT);
1019
1020	task_lock(task);
1021
1022	if (!task->active) {
1023		task_unlock(task);
1024
1025		return (KERN_FAILURE);
1026	}
1027
1028	if (task->user_stop_count++ > 0) {
1029		/*
1030		 *	If the stop count was positive, the task is
1031		 *	already stopped and we can exit.
1032		 */
1033		task_unlock(task);
1034
1035		return (KERN_SUCCESS);
1036	}
1037
1038	/*
1039	 * Put a kernel-level hold on the threads in the task (all
1040	 * user-level task suspensions added together represent a
1041	 * single kernel-level hold).  We then wait for the threads
1042	 * to stop executing user code.
1043	 */
1044	task_hold_locked(task);
1045	task_wait_locked(task);
1046
1047	task_unlock(task);
1048
1049	return (KERN_SUCCESS);
1050}
1051
1052/*
1053 *	task_resume:
1054 *		Release a kernel hold on a task.
1055 *
1056 * Conditions:
1057 *		The caller holds a reference to the task
1058 */
1059kern_return_t
1060task_resume(
1061	register task_t	task)
1062{
1063	register boolean_t	release = FALSE;
1064
1065	if (task == TASK_NULL || task == kernel_task)
1066		return (KERN_INVALID_ARGUMENT);
1067
1068	task_lock(task);
1069
1070	if (!task->active) {
1071		task_unlock(task);
1072
1073		return (KERN_FAILURE);
1074	}
1075
1076	if (task->user_stop_count > 0) {
1077		if (--task->user_stop_count == 0)
1078			release = TRUE;
1079	}
1080	else {
1081		task_unlock(task);
1082
1083		return (KERN_FAILURE);
1084	}
1085
1086	/*
1087	 *	Release the task if necessary.
1088	 */
1089	if (release)
1090		task_release_locked(task);
1091
1092	task_unlock(task);
1093
1094	return (KERN_SUCCESS);
1095}
1096
1097kern_return_t
1098host_security_set_task_token(
1099        host_security_t  host_security,
1100        task_t		 task,
1101        security_token_t sec_token,
1102	audit_token_t	 audit_token,
1103	host_priv_t	 host_priv)
1104{
1105	ipc_port_t	 host_port;
1106	kern_return_t	 kr;
1107
1108	if (task == TASK_NULL)
1109		return(KERN_INVALID_ARGUMENT);
1110
1111	if (host_security == HOST_NULL)
1112		return(KERN_INVALID_SECURITY);
1113
1114        task_lock(task);
1115        task->sec_token = sec_token;
1116	task->audit_token = audit_token;
1117        task_unlock(task);
1118
1119	if (host_priv != HOST_PRIV_NULL) {
1120		kr = host_get_host_priv_port(host_priv, &host_port);
1121	} else {
1122		kr = host_get_host_port(host_priv_self(), &host_port);
1123	}
1124	assert(kr == KERN_SUCCESS);
1125	kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1126        return(kr);
1127}
1128
1129/*
1130 * Utility routine to set a ledger
1131 */
1132kern_return_t
1133task_set_ledger(
1134        task_t		task,
1135        ledger_t	wired,
1136        ledger_t	paged)
1137{
1138	if (task == TASK_NULL)
1139		return(KERN_INVALID_ARGUMENT);
1140
1141        task_lock(task);
1142        if (wired) {
1143                ipc_port_release_send(task->wired_ledger_port);
1144                task->wired_ledger_port = ledger_copy(wired);
1145        }
1146        if (paged) {
1147                ipc_port_release_send(task->paged_ledger_port);
1148                task->paged_ledger_port = ledger_copy(paged);
1149        }
1150        task_unlock(task);
1151
1152        return(KERN_SUCCESS);
1153}
1154
1155/*
1156 * This routine was added, pretty much exclusively, for registering the
1157 * RPC glue vector for in-kernel short circuited tasks.  Rather than
1158 * removing it completely, I have only disabled that feature (which was
1159 * the only feature at the time).  It just appears that we are going to
1160 * want to add some user data to tasks in the future (i.e. bsd info,
1161 * task names, etc...), so I left it in the formal task interface.
1162 */
1163kern_return_t
1164task_set_info(
1165	task_t		task,
1166	task_flavor_t	flavor,
1167	__unused task_info_t	task_info_in,		/* pointer to IN array */
1168	__unused mach_msg_type_number_t	task_info_count)
1169{
1170	if (task == TASK_NULL)
1171		return(KERN_INVALID_ARGUMENT);
1172
1173	switch (flavor) {
1174	    default:
1175		return (KERN_INVALID_ARGUMENT);
1176	}
1177	return (KERN_SUCCESS);
1178}
1179
1180kern_return_t
1181task_info(
1182	task_t					task,
1183	task_flavor_t			flavor,
1184	task_info_t				task_info_out,
1185	mach_msg_type_number_t	*task_info_count)
1186{
1187	if (task == TASK_NULL)
1188		return (KERN_INVALID_ARGUMENT);
1189
1190	switch (flavor) {
1191
1192	case TASK_BASIC_INFO_32:
1193	case TASK_BASIC2_INFO_32:
1194	{
1195		task_basic_info_32_t	basic_info;
1196		vm_map_t			map;
1197
1198		if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1199		    return (KERN_INVALID_ARGUMENT);
1200
1201		basic_info = (task_basic_info_32_t)task_info_out;
1202
1203		map = (task == kernel_task)? kernel_map: task->map;
1204		basic_info->virtual_size  = CAST_DOWN(vm_offset_t,map->size);
1205		if (flavor == TASK_BASIC2_INFO_32) {
1206			/*
1207			 * The "BASIC2" flavor gets the maximum resident
1208			 * size instead of the current resident size...
1209			 */
1210			basic_info->resident_size = pmap_resident_max(map->pmap);
1211		} else {
1212			basic_info->resident_size = pmap_resident_count(map->pmap);
1213		}
1214		basic_info->resident_size *= PAGE_SIZE;
1215
1216		task_lock(task);
1217		basic_info->policy = ((task != kernel_task)?
1218										  POLICY_TIMESHARE: POLICY_RR);
1219		basic_info->suspend_count = task->user_stop_count;
1220
1221		absolutetime_to_microtime(task->total_user_time,
1222					  (unsigned *)&basic_info->user_time.seconds,
1223					  (unsigned *)&basic_info->user_time.microseconds);
1224		absolutetime_to_microtime(task->total_system_time,
1225					  (unsigned *)&basic_info->system_time.seconds,
1226					  (unsigned *)&basic_info->system_time.microseconds);
1227		task_unlock(task);
1228
1229		*task_info_count = TASK_BASIC_INFO_32_COUNT;
1230		break;
1231	}
1232
1233	case TASK_BASIC_INFO_64:
1234	{
1235		task_basic_info_64_t	basic_info;
1236		vm_map_t			map;
1237
1238		if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1239		    return (KERN_INVALID_ARGUMENT);
1240
1241		basic_info = (task_basic_info_64_t)task_info_out;
1242
1243		map = (task == kernel_task)? kernel_map: task->map;
1244		basic_info->virtual_size  = map->size;
1245		basic_info->resident_size =
1246			(mach_vm_size_t)(pmap_resident_count(map->pmap))
1247			* PAGE_SIZE_64;
1248
1249		task_lock(task);
1250		basic_info->policy = ((task != kernel_task)?
1251										  POLICY_TIMESHARE: POLICY_RR);
1252		basic_info->suspend_count = task->user_stop_count;
1253
1254		absolutetime_to_microtime(task->total_user_time,
1255					  (unsigned *)&basic_info->user_time.seconds,
1256					  (unsigned *)&basic_info->user_time.microseconds);
1257		absolutetime_to_microtime(task->total_system_time,
1258					  (unsigned *)&basic_info->system_time.seconds,
1259					  (unsigned *)&basic_info->system_time.microseconds);
1260		task_unlock(task);
1261
1262		*task_info_count = TASK_BASIC_INFO_64_COUNT;
1263		break;
1264	}
1265
1266	case TASK_THREAD_TIMES_INFO:
1267	{
1268		register task_thread_times_info_t	times_info;
1269		register thread_t					thread;
1270
1271		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1272		    return (KERN_INVALID_ARGUMENT);
1273
1274		times_info = (task_thread_times_info_t) task_info_out;
1275		times_info->user_time.seconds = 0;
1276		times_info->user_time.microseconds = 0;
1277		times_info->system_time.seconds = 0;
1278		times_info->system_time.microseconds = 0;
1279
1280		task_lock(task);
1281
1282		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1283		    time_value_t	user_time, system_time;
1284
1285		    thread_read_times(thread, &user_time, &system_time);
1286
1287		    time_value_add(&times_info->user_time, &user_time);
1288		    time_value_add(&times_info->system_time, &system_time);
1289		}
1290
1291		task_unlock(task);
1292
1293		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1294		break;
1295	}
1296
1297	case TASK_ABSOLUTETIME_INFO:
1298	{
1299		task_absolutetime_info_t	info;
1300		register thread_t			thread;
1301
1302		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1303			return (KERN_INVALID_ARGUMENT);
1304
1305		info = (task_absolutetime_info_t)task_info_out;
1306		info->threads_user = info->threads_system = 0;
1307
1308		task_lock(task);
1309
1310		info->total_user = task->total_user_time;
1311		info->total_system = task->total_system_time;
1312
1313		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1314			uint64_t	tval;
1315
1316			tval = timer_grab(&thread->user_timer);
1317			info->threads_user += tval;
1318			info->total_user += tval;
1319
1320			tval = timer_grab(&thread->system_timer);
1321			info->threads_system += tval;
1322			info->total_system += tval;
1323		}
1324
1325		task_unlock(task);
1326
1327		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1328		break;
1329	}
1330
1331	/* OBSOLETE */
1332	case TASK_SCHED_FIFO_INFO:
1333	{
1334
1335		if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1336			return (KERN_INVALID_ARGUMENT);
1337
1338		return (KERN_INVALID_POLICY);
1339	}
1340
1341	/* OBSOLETE */
1342	case TASK_SCHED_RR_INFO:
1343	{
1344		register policy_rr_base_t	rr_base;
1345
1346		if (*task_info_count < POLICY_RR_BASE_COUNT)
1347			return (KERN_INVALID_ARGUMENT);
1348
1349		rr_base = (policy_rr_base_t) task_info_out;
1350
1351		task_lock(task);
1352		if (task != kernel_task) {
1353			task_unlock(task);
1354			return (KERN_INVALID_POLICY);
1355		}
1356
1357		rr_base->base_priority = task->priority;
1358		task_unlock(task);
1359
1360		rr_base->quantum = std_quantum_us / 1000;
1361
1362		*task_info_count = POLICY_RR_BASE_COUNT;
1363		break;
1364	}
1365
1366	/* OBSOLETE */
1367	case TASK_SCHED_TIMESHARE_INFO:
1368	{
1369		register policy_timeshare_base_t	ts_base;
1370
1371		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1372			return (KERN_INVALID_ARGUMENT);
1373
1374		ts_base = (policy_timeshare_base_t) task_info_out;
1375
1376		task_lock(task);
1377		if (task == kernel_task) {
1378			task_unlock(task);
1379			return (KERN_INVALID_POLICY);
1380		}
1381
1382		ts_base->base_priority = task->priority;
1383		task_unlock(task);
1384
1385		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1386		break;
1387	}
1388
1389	case TASK_SECURITY_TOKEN:
1390	{
1391		register security_token_t	*sec_token_p;
1392
1393		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1394		    return (KERN_INVALID_ARGUMENT);
1395
1396		sec_token_p = (security_token_t *) task_info_out;
1397
1398		task_lock(task);
1399		*sec_token_p = task->sec_token;
1400		task_unlock(task);
1401
1402		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
1403		break;
1404	}
1405
1406	case TASK_AUDIT_TOKEN:
1407	{
1408		register audit_token_t	*audit_token_p;
1409
1410		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1411		    return (KERN_INVALID_ARGUMENT);
1412
1413		audit_token_p = (audit_token_t *) task_info_out;
1414
1415		task_lock(task);
1416		*audit_token_p = task->audit_token;
1417		task_unlock(task);
1418
1419		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
1420		break;
1421	}
1422
1423	case TASK_SCHED_INFO:
1424		return (KERN_INVALID_ARGUMENT);
1425
1426	case TASK_EVENTS_INFO:
1427	{
1428		register task_events_info_t	events_info;
1429		register thread_t			thread;
1430
1431		if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1432		    return (KERN_INVALID_ARGUMENT);
1433
1434		events_info = (task_events_info_t) task_info_out;
1435
1436		task_lock(task);
1437
1438		events_info->faults = task->faults;
1439		events_info->pageins = task->pageins;
1440		events_info->cow_faults = task->cow_faults;
1441		events_info->messages_sent = task->messages_sent;
1442		events_info->messages_received = task->messages_received;
1443		events_info->syscalls_mach = task->syscalls_mach;
1444		events_info->syscalls_unix = task->syscalls_unix;
1445
1446		events_info->csw = task->c_switch;
1447
1448		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1449			events_info->csw += thread->c_switch;
1450		}
1451
1452		task_unlock(task);
1453
1454		*task_info_count = TASK_EVENTS_INFO_COUNT;
1455		break;
1456	}
1457	case TASK_AFFINITY_TAG_INFO:
1458	{
1459		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT)
1460		    return (KERN_INVALID_ARGUMENT);
1461
1462		return task_affinity_info(task, task_info_out, task_info_count);
1463	}
1464
1465	default:
1466		return (KERN_INVALID_ARGUMENT);
1467	}
1468
1469	return (KERN_SUCCESS);
1470}
1471
1472void
1473task_vtimer_set(
1474	task_t		task,
1475	integer_t	which)
1476{
1477	thread_t	thread;
1478
1479	/* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
1480
1481	task_lock(task);
1482
1483	task->vtimers |= which;
1484
1485	switch (which) {
1486
1487	case TASK_VTIMER_USER:
1488		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1489			thread->vtimer_user_save = timer_grab(&thread->user_timer);
1490		}
1491		break;
1492
1493	case TASK_VTIMER_PROF:
1494		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1495			thread->vtimer_prof_save = timer_grab(&thread->user_timer);
1496			thread->vtimer_prof_save += timer_grab(&thread->system_timer);
1497		}
1498		break;
1499
1500	case TASK_VTIMER_RLIM:
1501		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1502			thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
1503			thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
1504		}
1505		break;
1506	}
1507
1508	task_unlock(task);
1509}
1510
1511void
1512task_vtimer_clear(
1513	task_t		task,
1514	integer_t	which)
1515{
1516	assert(task == current_task());
1517
1518	task_lock(task);
1519
1520	task->vtimers &= ~which;
1521
1522	task_unlock(task);
1523}
1524
1525void
1526task_vtimer_update(
1527__unused
1528	task_t		task,
1529	integer_t	which,
1530	uint32_t	*microsecs)
1531{
1532	thread_t	thread = current_thread();
1533	uint32_t	tdelt, secs;
1534	uint64_t	tsum;
1535
1536	assert(task == current_task());
1537
1538	assert(task->vtimers & which);
1539
1540	tdelt = secs = 0;
1541
1542	switch (which) {
1543
1544	case TASK_VTIMER_USER:
1545		tdelt = timer_delta(&thread->user_timer,
1546								&thread->vtimer_user_save);
1547		break;
1548
1549	case TASK_VTIMER_PROF:
1550		tsum = timer_grab(&thread->user_timer);
1551		tsum += timer_grab(&thread->system_timer);
1552		tdelt = tsum - thread->vtimer_prof_save;
1553		thread->vtimer_prof_save = tsum;
1554		break;
1555
1556	case TASK_VTIMER_RLIM:
1557		tsum = timer_grab(&thread->user_timer);
1558		tsum += timer_grab(&thread->system_timer);
1559		tdelt = tsum - thread->vtimer_rlim_save;
1560		thread->vtimer_rlim_save = tsum;
1561		break;
1562	}
1563
1564	absolutetime_to_microtime(tdelt, &secs, microsecs);
1565}
1566
1567/*
1568 *	task_assign:
1569 *
1570 *	Change the assigned processor set for the task
1571 */
1572kern_return_t
1573task_assign(
1574	__unused task_t		task,
1575	__unused processor_set_t	new_pset,
1576	__unused boolean_t	assign_threads)
1577{
1578	return(KERN_FAILURE);
1579}
1580
1581/*
1582 *	task_assign_default:
1583 *
1584 *	Version of task_assign to assign to default processor set.
1585 */
1586kern_return_t
1587task_assign_default(
1588	task_t		task,
1589	boolean_t	assign_threads)
1590{
1591    return (task_assign(task, &pset0, assign_threads));
1592}
1593
1594/*
1595 *	task_get_assignment
1596 *
1597 *	Return name of processor set that task is assigned to.
1598 */
1599kern_return_t
1600task_get_assignment(
1601	task_t		task,
1602	processor_set_t	*pset)
1603{
1604	if (!task->active)
1605		return(KERN_FAILURE);
1606
1607	*pset = &pset0;
1608
1609	return (KERN_SUCCESS);
1610}
1611
1612
1613/*
1614 * 	task_policy
1615 *
1616 *	Set scheduling policy and parameters, both base and limit, for
1617 *	the given task. Policy must be a policy which is enabled for the
1618 *	processor set. Change contained threads if requested.
1619 */
1620kern_return_t
1621task_policy(
1622	__unused task_t			task,
1623	__unused policy_t			policy_id,
1624	__unused policy_base_t		base,
1625	__unused mach_msg_type_number_t	count,
1626	__unused boolean_t			set_limit,
1627	__unused boolean_t			change)
1628{
1629	return(KERN_FAILURE);
1630}
1631
1632/*
1633 *	task_set_policy
1634 *
1635 *	Set scheduling policy and parameters, both base and limit, for
1636 *	the given task. Policy can be any policy implemented by the
1637 *	processor set, whether enabled or not. Change contained threads
1638 *	if requested.
1639 */
1640kern_return_t
1641task_set_policy(
1642	__unused task_t			task,
1643	__unused processor_set_t		pset,
1644	__unused policy_t			policy_id,
1645	__unused policy_base_t		base,
1646	__unused mach_msg_type_number_t	base_count,
1647	__unused policy_limit_t		limit,
1648	__unused mach_msg_type_number_t	limit_count,
1649	__unused boolean_t			change)
1650{
1651	return(KERN_FAILURE);
1652}
1653
1654#if	FAST_TAS
1655kern_return_t
1656task_set_ras_pc(
1657 	task_t		task,
1658 	vm_offset_t	pc,
1659 	vm_offset_t	endpc)
1660{
1661	extern int fast_tas_debug;
1662
1663	if (fast_tas_debug) {
1664		printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1665		       task, pc, endpc);
1666	}
1667	task_lock(task);
1668	task->fast_tas_base = pc;
1669	task->fast_tas_end =  endpc;
1670	task_unlock(task);
1671	return KERN_SUCCESS;
1672}
1673#else	/* FAST_TAS */
1674kern_return_t
1675task_set_ras_pc(
1676 	__unused task_t	task,
1677 	__unused vm_offset_t	pc,
1678 	__unused vm_offset_t	endpc)
1679{
1680	return KERN_FAILURE;
1681}
1682#endif	/* FAST_TAS */
1683
1684void
1685task_synchronizer_destroy_all(task_t task)
1686{
1687	semaphore_t	semaphore;
1688	lock_set_t	lock_set;
1689
1690	/*
1691	 *  Destroy owned semaphores
1692	 */
1693
1694	while (!queue_empty(&task->semaphore_list)) {
1695		semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1696		(void) semaphore_destroy(task, semaphore);
1697	}
1698
1699	/*
1700	 *  Destroy owned lock sets
1701	 */
1702
1703	while (!queue_empty(&task->lock_set_list)) {
1704		lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1705		(void) lock_set_destroy(task, lock_set);
1706	}
1707}
1708
1709/*
1710 * We need to export some functions to other components that
1711 * are currently implemented in macros within the osfmk
1712 * component.  Just export them as functions of the same name.
1713 */
1714boolean_t is_kerneltask(task_t t)
1715{
1716	if (t == kernel_task)
1717		return (TRUE);
1718
1719	return (FALSE);
1720}
1721
1722#undef current_task
1723task_t current_task(void);
1724task_t current_task(void)
1725{
1726	return (current_task_fast());
1727}
1728
1729#undef task_reference
1730void task_reference(task_t task);
1731void
1732task_reference(
1733	task_t		task)
1734{
1735	if (task != TASK_NULL)
1736		task_reference_internal(task);
1737}
1738
1739#if CONFIG_MACF_MACH
1740/*
1741 * Protect 2 task labels against modification by adding a reference on
1742 * both label handles. The locks do not actually have to be held while
1743 * using the labels as only labels with one reference can be modified
1744 * in place.
1745 */
1746
1747void
1748tasklabel_lock2(
1749	task_t a,
1750	task_t b)
1751{
1752	labelh_reference(a->label);
1753	labelh_reference(b->label);
1754}
1755
1756void
1757tasklabel_unlock2(
1758	task_t a,
1759	task_t b)
1760{
1761	labelh_release(a->label);
1762	labelh_release(b->label);
1763}
1764
1765void
1766mac_task_label_update_internal(
1767	struct label	*pl,
1768	struct task	*task)
1769{
1770
1771	tasklabel_lock(task);
1772	task->label = labelh_modify(task->label);
1773	mac_task_label_update(pl, &task->maclabel);
1774	tasklabel_unlock(task);
1775	ip_lock(task->itk_self);
1776	mac_port_label_update_cred(pl, &task->itk_self->ip_label);
1777	ip_unlock(task->itk_self);
1778}
1779
1780void
1781mac_task_label_modify(
1782	struct task	*task,
1783	void		*arg,
1784	void (*f)	(struct label *l, void *arg))
1785{
1786
1787	tasklabel_lock(task);
1788	task->label = labelh_modify(task->label);
1789	(*f)(&task->maclabel, arg);
1790	tasklabel_unlock(task);
1791}
1792
1793struct label *
1794mac_task_get_label(struct task *task)
1795{
1796	return (&task->maclabel);
1797}
1798#endif
1799