1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 *	File:	kern/task.c
58 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 *		David Black
60 *
61 *	Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL).  All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections.  This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89#include <fast_tas.h>
90#include <platforms.h>
91
92#include <mach/mach_types.h>
93#include <mach/boolean.h>
94#include <mach/host_priv.h>
95#include <mach/machine/vm_types.h>
96#include <mach/vm_param.h>
97#include <mach/semaphore.h>
98#include <mach/task_info.h>
99#include <mach/task_special_ports.h>
100
101#include <ipc/ipc_types.h>
102#include <ipc/ipc_space.h>
103#include <ipc/ipc_entry.h>
104
105#include <kern/kern_types.h>
106#include <kern/mach_param.h>
107#include <kern/misc_protos.h>
108#include <kern/task.h>
109#include <kern/thread.h>
110#include <kern/zalloc.h>
111#include <kern/kalloc.h>
112#include <kern/processor.h>
113#include <kern/sched_prim.h>	/* for thread_wakeup */
114#include <kern/ipc_tt.h>
115#include <kern/host.h>
116#include <kern/clock.h>
117#include <kern/timer.h>
118#include <kern/assert.h>
119#include <kern/sync_lock.h>
120#include <kern/affinity.h>
121
122#include <vm/pmap.h>
123#include <vm/vm_map.h>
124#include <vm/vm_kern.h>		/* for kernel_map, ipc_kernel_map */
125#include <vm/vm_pageout.h>
126#include <vm/vm_protos.h>
127
128/*
129 * Exported interfaces
130 */
131
132#include <mach/task_server.h>
133#include <mach/mach_host_server.h>
134#include <mach/host_security_server.h>
135#include <mach/mach_port_server.h>
136#include <mach/security_server.h>
137
138#include <vm/vm_shared_region.h>
139
140#if CONFIG_MACF_MACH
141#include <security/mac_mach_internal.h>
142#endif
143
144#if CONFIG_COUNTERS
145#include <pmc/pmc.h>
146#endif /* CONFIG_COUNTERS */
147
148task_t			kernel_task;
149zone_t			task_zone;
150lck_attr_t      task_lck_attr;
151lck_grp_t       task_lck_grp;
152lck_grp_attr_t  task_lck_grp_attr;
153#if CONFIG_EMBEDDED
154lck_mtx_t	task_watch_mtx;
155#endif /* CONFIG_EMBEDDED */
156
157zinfo_usage_store_t tasks_tkm_private;
158zinfo_usage_store_t tasks_tkm_shared;
159
160/* A container to accumulate statistics for expired tasks */
161expired_task_statistics_t                dead_task_statistics;
162lck_spin_t                dead_task_statistics_lock;
163
164static ledger_template_t task_ledger_template = NULL;
165struct _task_ledger_indices task_ledgers = {-1, -1, -1, -1, -1, -1, -1};
166void init_task_ledgers(void);
167
168
169int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
170
171/* externs for BSD kernel */
172extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
173
174/* Forwards */
175
176void		task_hold_locked(
177			task_t		task);
178void		task_wait_locked(
179			task_t		task,
180			boolean_t	until_not_runnable);
181void		task_release_locked(
182			task_t		task);
183void		task_free(
184			task_t		task );
185void		task_synchronizer_destroy_all(
186			task_t		task);
187
188int check_for_tasksuspend(
189			task_t task);
190
191void
192task_backing_store_privileged(
193			task_t task)
194{
195	task_lock(task);
196	task->priv_flags |= VM_BACKING_STORE_PRIV;
197	task_unlock(task);
198	return;
199}
200
201
202void
203task_set_64bit(
204		task_t task,
205		boolean_t is64bit)
206{
207#if defined(__i386__) || defined(__x86_64__)
208	thread_t thread;
209#endif /* __i386__ */
210	int	vm_flags = 0;
211
212	if (is64bit) {
213		if (task_has_64BitAddr(task))
214			return;
215
216		task_set_64BitAddr(task);
217	} else {
218		if ( !task_has_64BitAddr(task))
219			return;
220
221		/*
222		 * Deallocate all memory previously allocated
223		 * above the 32-bit address space, since it won't
224		 * be accessible anymore.
225		 */
226		/* remove regular VM map entries & pmap mappings */
227		(void) vm_map_remove(task->map,
228				     (vm_map_offset_t) VM_MAX_ADDRESS,
229				     MACH_VM_MAX_ADDRESS,
230				     0);
231		/* remove the higher VM mappings */
232		(void) vm_map_remove(task->map,
233				     MACH_VM_MAX_ADDRESS,
234				     0xFFFFFFFFFFFFF000ULL,
235				     vm_flags);
236		task_clear_64BitAddr(task);
237	}
238	/* FIXME: On x86, the thread save state flavor can diverge from the
239	 * task's 64-bit feature flag due to the 32-bit/64-bit register save
240	 * state dichotomy. Since we can be pre-empted in this interval,
241	 * certain routines may observe the thread as being in an inconsistent
242	 * state with respect to its task's 64-bitness.
243	 */
244#if defined(__i386__) || defined(__x86_64__)
245	task_lock(task);
246	queue_iterate(&task->threads, thread, thread_t, task_threads) {
247		thread_mtx_lock(thread);
248		machine_thread_switch_addrmode(thread);
249		thread_mtx_unlock(thread);
250	}
251	task_unlock(task);
252#endif /* __i386__ */
253}
254
255
256void
257task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
258{
259	task_lock(task);
260	task->all_image_info_addr = addr;
261	task->all_image_info_size = size;
262	task_unlock(task);
263}
264
265void
266task_init(void)
267{
268
269	lck_grp_attr_setdefault(&task_lck_grp_attr);
270	lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
271	lck_attr_setdefault(&task_lck_attr);
272	lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
273#if CONFIG_EMBEDDED
274	lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr);
275#endif /* CONFIG_EMBEDDED */
276
277	task_zone = zinit(
278			sizeof(struct task),
279			task_max * sizeof(struct task),
280			TASK_CHUNK * sizeof(struct task),
281			"tasks");
282
283	zone_change(task_zone, Z_NOENCRYPT, TRUE);
284
285	init_task_ledgers();
286
287	/*
288	 * Create the kernel task as the first task.
289	 */
290#ifdef __LP64__
291	if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
292#else
293	if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
294#endif
295		panic("task_init\n");
296
297	vm_map_deallocate(kernel_task->map);
298	kernel_task->map = kernel_map;
299	lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
300}
301
302/*
303 * Create a task running in the kernel address space.  It may
304 * have its own map of size mem_size and may have ipc privileges.
305 */
306kern_return_t
307kernel_task_create(
308	__unused task_t		parent_task,
309	__unused vm_offset_t		map_base,
310	__unused vm_size_t		map_size,
311	__unused task_t		*child_task)
312{
313	return (KERN_INVALID_ARGUMENT);
314}
315
316kern_return_t
317task_create(
318	task_t				parent_task,
319	__unused ledger_port_array_t	ledger_ports,
320	__unused mach_msg_type_number_t	num_ledger_ports,
321	__unused boolean_t		inherit_memory,
322	__unused task_t			*child_task)	/* OUT */
323{
324	if (parent_task == TASK_NULL)
325		return(KERN_INVALID_ARGUMENT);
326
327	/*
328	 * No longer supported: too many calls assume that a task has a valid
329	 * process attached.
330	 */
331	return(KERN_FAILURE);
332}
333
334kern_return_t
335host_security_create_task_token(
336	host_security_t			host_security,
337	task_t				parent_task,
338	__unused security_token_t	sec_token,
339	__unused audit_token_t		audit_token,
340	__unused host_priv_t		host_priv,
341	__unused ledger_port_array_t	ledger_ports,
342	__unused mach_msg_type_number_t	num_ledger_ports,
343	__unused boolean_t		inherit_memory,
344	__unused task_t			*child_task)	/* OUT */
345{
346	if (parent_task == TASK_NULL)
347		return(KERN_INVALID_ARGUMENT);
348
349	if (host_security == HOST_NULL)
350		return(KERN_INVALID_SECURITY);
351
352	/*
353	 * No longer supported.
354	 */
355	return(KERN_FAILURE);
356}
357
358void
359init_task_ledgers(void)
360{
361	ledger_template_t t;
362
363	assert(task_ledger_template == NULL);
364	assert(kernel_task == TASK_NULL);
365
366	if ((t = ledger_template_create("Per-task ledger")) == NULL)
367		panic("couldn't create task ledger template");
368
369	task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
370	task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
371	    "physmem", "bytes");
372	task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
373	    "bytes");
374	task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
375	    "bytes");
376	task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
377	    "bytes");
378	task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
379	    "count");
380	task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
381	    "count");
382
383	if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) ||
384	    (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) ||
385	    (task_ledgers.wired_mem < 0) || (task_ledgers.platform_idle_wakeups < 0) ||
386	    (task_ledgers.interrupt_wakeups < 0)) {
387		panic("couldn't create entries for task ledger template");
388	}
389
390	task_ledger_template = t;
391}
392
393kern_return_t
394task_create_internal(
395	task_t		parent_task,
396	boolean_t	inherit_memory,
397	boolean_t	is_64bit,
398	task_t		*child_task)		/* OUT */
399{
400	task_t			new_task;
401	vm_shared_region_t	shared_region;
402	ledger_t		ledger = NULL;
403
404	new_task = (task_t) zalloc(task_zone);
405
406	if (new_task == TASK_NULL)
407		return(KERN_RESOURCE_SHORTAGE);
408
409	/* one ref for just being alive; one for our caller */
410	new_task->ref_count = 2;
411
412	/* allocate with active entries */
413	assert(task_ledger_template != NULL);
414	if ((ledger = ledger_instantiate(task_ledger_template,
415			LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
416		zfree(task_zone, new_task);
417		return(KERN_RESOURCE_SHORTAGE);
418	}
419	new_task->ledger = ledger;
420
421	/* if inherit_memory is true, parent_task MUST not be NULL */
422	if (inherit_memory)
423		new_task->map = vm_map_fork(ledger, parent_task->map);
424	else
425		new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
426				(vm_map_offset_t)(VM_MIN_ADDRESS),
427				(vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
428
429	/* Inherit memlock limit from parent */
430	if (parent_task)
431		vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
432
433	lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
434	queue_init(&new_task->threads);
435	new_task->suspend_count = 0;
436	new_task->thread_count = 0;
437	new_task->active_thread_count = 0;
438	new_task->user_stop_count = 0;
439	new_task->role = TASK_UNSPECIFIED;
440	new_task->active = TRUE;
441	new_task->halting = FALSE;
442	new_task->user_data = NULL;
443	new_task->faults = 0;
444	new_task->cow_faults = 0;
445	new_task->pageins = 0;
446	new_task->messages_sent = 0;
447	new_task->messages_received = 0;
448	new_task->syscalls_mach = 0;
449	new_task->priv_flags = 0;
450	new_task->syscalls_unix=0;
451	new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
452	new_task->taskFeatures[0] = 0;				/* Init task features */
453	new_task->taskFeatures[1] = 0;				/* Init task features */
454
455	zinfo_task_init(new_task);
456
457#ifdef MACH_BSD
458	new_task->bsd_info = NULL;
459#endif /* MACH_BSD */
460
461#if defined(__i386__) || defined(__x86_64__)
462	new_task->i386_ldt = 0;
463	new_task->task_debug = NULL;
464#endif
465
466
467	queue_init(&new_task->semaphore_list);
468	queue_init(&new_task->lock_set_list);
469	new_task->semaphores_owned = 0;
470	new_task->lock_sets_owned = 0;
471
472#if CONFIG_MACF_MACH
473	new_task->label = labelh_new(1);
474	mac_task_label_init (&new_task->maclabel);
475#endif
476
477	ipc_task_init(new_task, parent_task);
478
479	new_task->total_user_time = 0;
480	new_task->total_system_time = 0;
481
482	new_task->vtimers = 0;
483
484	new_task->shared_region = NULL;
485
486	new_task->affinity_space = NULL;
487
488#if CONFIG_COUNTERS
489	new_task->t_chud = 0U;
490#endif
491
492	new_task->pidsuspended = FALSE;
493	new_task->frozen = FALSE;
494	new_task->rusage_cpu_flags = 0;
495	new_task->rusage_cpu_percentage = 0;
496	new_task->rusage_cpu_interval = 0;
497	new_task->rusage_cpu_deadline = 0;
498	new_task->rusage_cpu_callt = NULL;
499	new_task->proc_terminate = 0;
500#if CONFIG_EMBEDDED
501	queue_init(&new_task->task_watchers);
502	new_task->appstate = TASK_APPSTATE_ACTIVE;
503	new_task->num_taskwatchers  = 0;
504	new_task->watchapplying  = 0;
505#endif /* CONFIG_EMBEDDED */
506
507	new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
508
509	if (parent_task != TASK_NULL) {
510		new_task->sec_token = parent_task->sec_token;
511		new_task->audit_token = parent_task->audit_token;
512
513		/* inherit the parent's shared region */
514		shared_region = vm_shared_region_get(parent_task);
515		vm_shared_region_set(new_task, shared_region);
516
517		if(task_has_64BitAddr(parent_task))
518			task_set_64BitAddr(new_task);
519		new_task->all_image_info_addr = parent_task->all_image_info_addr;
520		new_task->all_image_info_size = parent_task->all_image_info_size;
521
522#if defined(__i386__) || defined(__x86_64__)
523		if (inherit_memory && parent_task->i386_ldt)
524			new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
525#endif
526		if (inherit_memory && parent_task->affinity_space)
527			task_affinity_create(parent_task, new_task);
528
529		new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
530		new_task->policystate = parent_task->policystate;
531		/* inherit the self action state */
532		new_task->appliedstate = parent_task->appliedstate;
533		new_task->ext_policystate = parent_task->ext_policystate;
534#if NOTYET
535		/* till the child lifecycle is cleared do not inherit external action */
536		new_task->ext_appliedstate = parent_task->ext_appliedstate;
537#else
538		new_task->ext_appliedstate = default_task_null_policy;
539#endif
540	}
541	else {
542		new_task->sec_token = KERNEL_SECURITY_TOKEN;
543		new_task->audit_token = KERNEL_AUDIT_TOKEN;
544#ifdef __LP64__
545		if(is_64bit)
546			task_set_64BitAddr(new_task);
547#endif
548		new_task->all_image_info_addr = (mach_vm_address_t)0;
549		new_task->all_image_info_size = (mach_vm_size_t)0;
550
551		new_task->pset_hint = PROCESSOR_SET_NULL;
552		new_task->policystate = default_task_proc_policy;
553		new_task->ext_policystate = default_task_proc_policy;
554		new_task->appliedstate = default_task_null_policy;
555		new_task->ext_appliedstate = default_task_null_policy;
556	}
557
558	if (kernel_task == TASK_NULL) {
559		new_task->priority = BASEPRI_KERNEL;
560		new_task->max_priority = MAXPRI_KERNEL;
561	}
562	else {
563		new_task->priority = BASEPRI_DEFAULT;
564		new_task->max_priority = MAXPRI_USER;
565	}
566
567	bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
568	new_task->task_timer_wakeups_bin_1 = new_task->task_timer_wakeups_bin_2 = 0;
569
570	lck_mtx_lock(&tasks_threads_lock);
571	queue_enter(&tasks, new_task, task_t, tasks);
572	tasks_count++;
573	lck_mtx_unlock(&tasks_threads_lock);
574
575	if (vm_backing_store_low && parent_task != NULL)
576		new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
577
578	ipc_task_enable(new_task);
579
580	*child_task = new_task;
581	return(KERN_SUCCESS);
582}
583
584/*
585 *	task_deallocate:
586 *
587 *	Drop a reference on a task.
588 */
589void
590task_deallocate(
591	task_t		task)
592{
593	ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
594
595	if (task == TASK_NULL)
596	    return;
597
598	if (task_deallocate_internal(task) > 0)
599		return;
600
601	lck_mtx_lock(&tasks_threads_lock);
602	queue_remove(&terminated_tasks, task, task_t, tasks);
603	lck_mtx_unlock(&tasks_threads_lock);
604
605	/*
606	 *	Give the machine dependent code a chance
607	 *	to perform cleanup before ripping apart
608	 *	the task.
609	 */
610	machine_task_terminate(task);
611
612	ipc_task_terminate(task);
613
614	if (task->affinity_space)
615		task_affinity_deallocate(task);
616
617	vm_map_deallocate(task->map);
618	is_release(task->itk_space);
619
620	ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
621	                   &interrupt_wakeups, &debit);
622	ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
623	                   &platform_idle_wakeups, &debit);
624
625	/* Accumulate statistics for dead tasks */
626	lck_spin_lock(&dead_task_statistics_lock);
627	dead_task_statistics.total_user_time += task->total_user_time;
628	dead_task_statistics.total_system_time += task->total_system_time;
629
630	dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
631	dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
632
633	dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
634	dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
635
636	lck_spin_unlock(&dead_task_statistics_lock);
637	lck_mtx_destroy(&task->lock, &task_lck_grp);
638
639#if CONFIG_MACF_MACH
640	labelh_release(task->label);
641#endif
642
643	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
644	    &debit)) {
645		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
646		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
647	}
648	if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
649	    &debit)) {
650		OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
651		OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
652	}
653	ledger_dereference(task->ledger);
654	zinfo_task_free(task);
655	zfree(task_zone, task);
656}
657
658/*
659 *	task_name_deallocate:
660 *
661 *	Drop a reference on a task name.
662 */
663void
664task_name_deallocate(
665	task_name_t		task_name)
666{
667	return(task_deallocate((task_t)task_name));
668}
669
670
671/*
672 *	task_terminate:
673 *
674 *	Terminate the specified task.  See comments on thread_terminate
675 *	(kern/thread.c) about problems with terminating the "current task."
676 */
677
678kern_return_t
679task_terminate(
680	task_t		task)
681{
682	if (task == TASK_NULL)
683		return (KERN_INVALID_ARGUMENT);
684
685	if (task->bsd_info)
686		return (KERN_FAILURE);
687
688	return (task_terminate_internal(task));
689}
690
691kern_return_t
692task_terminate_internal(
693	task_t			task)
694{
695	thread_t			thread, self;
696	task_t				self_task;
697	boolean_t			interrupt_save;
698
699	assert(task != kernel_task);
700
701	self = current_thread();
702	self_task = self->task;
703
704	/*
705	 *	Get the task locked and make sure that we are not racing
706	 *	with someone else trying to terminate us.
707	 */
708	if (task == self_task)
709		task_lock(task);
710	else
711	if (task < self_task) {
712		task_lock(task);
713		task_lock(self_task);
714	}
715	else {
716		task_lock(self_task);
717		task_lock(task);
718	}
719
720	if (!task->active) {
721		/*
722		 *	Task is already being terminated.
723		 *	Just return an error. If we are dying, this will
724		 *	just get us to our AST special handler and that
725		 *	will get us to finalize the termination of ourselves.
726		 */
727		task_unlock(task);
728		if (self_task != task)
729			task_unlock(self_task);
730
731		return (KERN_FAILURE);
732	}
733
734	if (self_task != task)
735		task_unlock(self_task);
736
737	/*
738	 * Make sure the current thread does not get aborted out of
739	 * the waits inside these operations.
740	 */
741	interrupt_save = thread_interrupt_level(THREAD_UNINT);
742
743	/*
744	 *	Indicate that we want all the threads to stop executing
745	 *	at user space by holding the task (we would have held
746	 *	each thread independently in thread_terminate_internal -
747	 *	but this way we may be more likely to already find it
748	 *	held there).  Mark the task inactive, and prevent
749	 *	further task operations via the task port.
750	 */
751	task_hold_locked(task);
752	task->active = FALSE;
753	ipc_task_disable(task);
754
755	/*
756	 *	Terminate each thread in the task.
757	 */
758	queue_iterate(&task->threads, thread, thread_t, task_threads) {
759			thread_terminate_internal(thread);
760	}
761
762	task_unlock(task);
763
764#if CONFIG_EMBEDDED
765	/*
766	 * remove all task watchers
767	 */
768	task_removewatchers(task);
769#endif /* CONFIG_EMBEDDED */
770
771	/*
772	 *	Destroy all synchronizers owned by the task.
773	 */
774	task_synchronizer_destroy_all(task);
775
776	/*
777	 *	Destroy the IPC space, leaving just a reference for it.
778	 */
779	ipc_space_terminate(task->itk_space);
780
781	if (vm_map_has_4GB_pagezero(task->map))
782		vm_map_clear_4GB_pagezero(task->map);
783
784	/*
785	 * If the current thread is a member of the task
786	 * being terminated, then the last reference to
787	 * the task will not be dropped until the thread
788	 * is finally reaped.  To avoid incurring the
789	 * expense of removing the address space regions
790	 * at reap time, we do it explictly here.
791	 */
792	vm_map_remove(task->map,
793		      task->map->min_offset,
794		      task->map->max_offset,
795		      VM_MAP_NO_FLAGS);
796
797	/* release our shared region */
798	vm_shared_region_set(task, NULL);
799
800	lck_mtx_lock(&tasks_threads_lock);
801	queue_remove(&tasks, task, task_t, tasks);
802	queue_enter(&terminated_tasks, task, task_t, tasks);
803	tasks_count--;
804	lck_mtx_unlock(&tasks_threads_lock);
805
806	/*
807	 * We no longer need to guard against being aborted, so restore
808	 * the previous interruptible state.
809	 */
810	thread_interrupt_level(interrupt_save);
811
812	/*
813	 * Get rid of the task active reference on itself.
814	 */
815	task_deallocate(task);
816
817	return (KERN_SUCCESS);
818}
819
820/*
821 * task_start_halt:
822 *
823 * 	Shut the current task down (except for the current thread) in
824 *	preparation for dramatic changes to the task (probably exec).
825 *	We hold the task and mark all other threads in the task for
826 *	termination.
827 */
828kern_return_t
829task_start_halt(
830	task_t		task)
831{
832	thread_t	thread, self;
833
834	assert(task != kernel_task);
835
836	self = current_thread();
837
838	if (task != self->task)
839		return (KERN_INVALID_ARGUMENT);
840
841	task_lock(task);
842
843	if (task->halting || !task->active || !self->active) {
844		/*
845		 *	Task or current thread is already being terminated.
846		 *	Hurry up and return out of the current kernel context
847		 *	so that we run our AST special handler to terminate
848		 *	ourselves.
849		 */
850		task_unlock(task);
851
852		return (KERN_FAILURE);
853	}
854
855	task->halting = TRUE;
856
857	if (task->thread_count > 1) {
858
859		/*
860		 * Mark all the threads to keep them from starting any more
861		 * user-level execution.  The thread_terminate_internal code
862		 * would do this on a thread by thread basis anyway, but this
863		 * gives us a better chance of not having to wait there.
864		 */
865		task_hold_locked(task);
866
867		/*
868		 *	Terminate all the other threads in the task.
869		 */
870		queue_iterate(&task->threads, thread, thread_t, task_threads) {
871			if (thread != self)
872				thread_terminate_internal(thread);
873		}
874
875		task_release_locked(task);
876	}
877	task_unlock(task);
878	return KERN_SUCCESS;
879}
880
881
882/*
883 * task_complete_halt:
884 *
885 *	Complete task halt by waiting for threads to terminate, then clean
886 *	up task resources (VM, port namespace, etc...) and then let the
887 *	current thread go in the (practically empty) task context.
888 */
889void
890task_complete_halt(task_t task)
891{
892	task_lock(task);
893	assert(task->halting);
894	assert(task == current_task());
895
896	/*
897	 *	Wait for the other threads to get shut down.
898	 *      When the last other thread is reaped, we'll be
899	 *	woken up.
900	 */
901	if (task->thread_count > 1) {
902		assert_wait((event_t)&task->halting, THREAD_UNINT);
903		task_unlock(task);
904		thread_block(THREAD_CONTINUE_NULL);
905	} else {
906		task_unlock(task);
907	}
908
909	/*
910	 *	Give the machine dependent code a chance
911	 *	to perform cleanup of task-level resources
912	 *	associated with the current thread before
913	 *	ripping apart the task.
914	 */
915	machine_task_terminate(task);
916
917	/*
918	 *	Destroy all synchronizers owned by the task.
919	 */
920	task_synchronizer_destroy_all(task);
921
922	/*
923	 *	Destroy the contents of the IPC space, leaving just
924	 *	a reference for it.
925	 */
926	ipc_space_clean(task->itk_space);
927
928	/*
929	 * Clean out the address space, as we are going to be
930	 * getting a new one.
931	 */
932	vm_map_remove(task->map, task->map->min_offset,
933		      task->map->max_offset, VM_MAP_NO_FLAGS);
934
935	task->halting = FALSE;
936}
937
938/*
939 *	task_hold_locked:
940 *
941 *	Suspend execution of the specified task.
942 *	This is a recursive-style suspension of the task, a count of
943 *	suspends is maintained.
944 *
945 * 	CONDITIONS: the task is locked and active.
946 */
947void
948task_hold_locked(
949	register task_t		task)
950{
951	register thread_t	thread;
952
953	assert(task->active);
954
955	if (task->suspend_count++ > 0)
956		return;
957
958	/*
959	 *	Iterate through all the threads and hold them.
960	 */
961	queue_iterate(&task->threads, thread, thread_t, task_threads) {
962		thread_mtx_lock(thread);
963		thread_hold(thread);
964		thread_mtx_unlock(thread);
965	}
966}
967
968/*
969 *	task_hold:
970 *
971 *	Same as the internal routine above, except that is must lock
972 *	and verify that the task is active.  This differs from task_suspend
973 *	in that it places a kernel hold on the task rather than just a
974 *	user-level hold.  This keeps users from over resuming and setting
975 *	it running out from under the kernel.
976 *
977 * 	CONDITIONS: the caller holds a reference on the task
978 */
979kern_return_t
980task_hold(
981	register task_t		task)
982{
983	if (task == TASK_NULL)
984		return (KERN_INVALID_ARGUMENT);
985
986	task_lock(task);
987
988	if (!task->active) {
989		task_unlock(task);
990
991		return (KERN_FAILURE);
992	}
993
994	task_hold_locked(task);
995	task_unlock(task);
996
997	return (KERN_SUCCESS);
998}
999
1000kern_return_t
1001task_wait(
1002		task_t		task,
1003		boolean_t	until_not_runnable)
1004{
1005	if (task == TASK_NULL)
1006		return (KERN_INVALID_ARGUMENT);
1007
1008	task_lock(task);
1009
1010	if (!task->active) {
1011		task_unlock(task);
1012
1013		return (KERN_FAILURE);
1014	}
1015
1016	task_wait_locked(task, until_not_runnable);
1017	task_unlock(task);
1018
1019	return (KERN_SUCCESS);
1020}
1021
1022/*
1023 *	task_wait_locked:
1024 *
1025 *	Wait for all threads in task to stop.
1026 *
1027 * Conditions:
1028 *	Called with task locked, active, and held.
1029 */
1030void
1031task_wait_locked(
1032	register task_t		task,
1033	boolean_t		until_not_runnable)
1034{
1035	register thread_t	thread, self;
1036
1037	assert(task->active);
1038	assert(task->suspend_count > 0);
1039
1040	self = current_thread();
1041
1042	/*
1043	 *	Iterate through all the threads and wait for them to
1044	 *	stop.  Do not wait for the current thread if it is within
1045	 *	the task.
1046	 */
1047	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1048		if (thread != self)
1049			thread_wait(thread, until_not_runnable);
1050	}
1051}
1052
1053/*
1054 *	task_release_locked:
1055 *
1056 *	Release a kernel hold on a task.
1057 *
1058 * 	CONDITIONS: the task is locked and active
1059 */
1060void
1061task_release_locked(
1062	register task_t		task)
1063{
1064	register thread_t	thread;
1065
1066	assert(task->active);
1067	assert(task->suspend_count > 0);
1068
1069	if (--task->suspend_count > 0)
1070		return;
1071
1072	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1073		thread_mtx_lock(thread);
1074		thread_release(thread);
1075		thread_mtx_unlock(thread);
1076	}
1077}
1078
1079/*
1080 *	task_release:
1081 *
1082 *	Same as the internal routine above, except that it must lock
1083 *	and verify that the task is active.
1084 *
1085 * 	CONDITIONS: The caller holds a reference to the task
1086 */
1087kern_return_t
1088task_release(
1089	task_t		task)
1090{
1091	if (task == TASK_NULL)
1092		return (KERN_INVALID_ARGUMENT);
1093
1094	task_lock(task);
1095
1096	if (!task->active) {
1097		task_unlock(task);
1098
1099		return (KERN_FAILURE);
1100	}
1101
1102	task_release_locked(task);
1103	task_unlock(task);
1104
1105	return (KERN_SUCCESS);
1106}
1107
1108kern_return_t
1109task_threads(
1110	task_t					task,
1111	thread_act_array_t		*threads_out,
1112	mach_msg_type_number_t	*count)
1113{
1114	mach_msg_type_number_t	actual;
1115	thread_t				*thread_list;
1116	thread_t				thread;
1117	vm_size_t				size, size_needed;
1118	void					*addr;
1119	unsigned int			i, j;
1120
1121	if (task == TASK_NULL)
1122		return (KERN_INVALID_ARGUMENT);
1123
1124	size = 0; addr = NULL;
1125
1126	for (;;) {
1127		task_lock(task);
1128		if (!task->active) {
1129			task_unlock(task);
1130
1131			if (size != 0)
1132				kfree(addr, size);
1133
1134			return (KERN_FAILURE);
1135		}
1136
1137		actual = task->thread_count;
1138
1139		/* do we have the memory we need? */
1140		size_needed = actual * sizeof (mach_port_t);
1141		if (size_needed <= size)
1142			break;
1143
1144		/* unlock the task and allocate more memory */
1145		task_unlock(task);
1146
1147		if (size != 0)
1148			kfree(addr, size);
1149
1150		assert(size_needed > 0);
1151		size = size_needed;
1152
1153		addr = kalloc(size);
1154		if (addr == 0)
1155			return (KERN_RESOURCE_SHORTAGE);
1156	}
1157
1158	/* OK, have memory and the task is locked & active */
1159	thread_list = (thread_t *)addr;
1160
1161	i = j = 0;
1162
1163	for (thread = (thread_t)queue_first(&task->threads); i < actual;
1164				++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1165		thread_reference_internal(thread);
1166		thread_list[j++] = thread;
1167	}
1168
1169	assert(queue_end(&task->threads, (queue_entry_t)thread));
1170
1171	actual = j;
1172	size_needed = actual * sizeof (mach_port_t);
1173
1174	/* can unlock task now that we've got the thread refs */
1175	task_unlock(task);
1176
1177	if (actual == 0) {
1178		/* no threads, so return null pointer and deallocate memory */
1179
1180		*threads_out = NULL;
1181		*count = 0;
1182
1183		if (size != 0)
1184			kfree(addr, size);
1185	}
1186	else {
1187		/* if we allocated too much, must copy */
1188
1189		if (size_needed < size) {
1190			void *newaddr;
1191
1192			newaddr = kalloc(size_needed);
1193			if (newaddr == 0) {
1194				for (i = 0; i < actual; ++i)
1195					thread_deallocate(thread_list[i]);
1196				kfree(addr, size);
1197				return (KERN_RESOURCE_SHORTAGE);
1198			}
1199
1200			bcopy(addr, newaddr, size_needed);
1201			kfree(addr, size);
1202			thread_list = (thread_t *)newaddr;
1203		}
1204
1205		*threads_out = thread_list;
1206		*count = actual;
1207
1208		/* do the conversion that Mig should handle */
1209
1210		for (i = 0; i < actual; ++i)
1211			((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1212	}
1213
1214	return (KERN_SUCCESS);
1215}
1216
1217static kern_return_t
1218place_task_hold    (
1219	register task_t		task)
1220{
1221	if (!task->active) {
1222		return (KERN_FAILURE);
1223	}
1224
1225	if (task->user_stop_count++ > 0) {
1226		/*
1227		 *	If the stop count was positive, the task is
1228		 *	already stopped and we can exit.
1229		 */
1230		return (KERN_SUCCESS);
1231	}
1232
1233	/*
1234	 * Put a kernel-level hold on the threads in the task (all
1235	 * user-level task suspensions added together represent a
1236	 * single kernel-level hold).  We then wait for the threads
1237	 * to stop executing user code.
1238	 */
1239	task_hold_locked(task);
1240	task_wait_locked(task, TRUE);
1241
1242	return (KERN_SUCCESS);
1243}
1244
1245static kern_return_t
1246release_task_hold    (
1247	register task_t		task,
1248	boolean_t           pidresume)
1249{
1250	register boolean_t release = FALSE;
1251
1252	if (!task->active) {
1253		return (KERN_FAILURE);
1254	}
1255
1256	if (pidresume) {
1257	    if (task->pidsuspended == FALSE) {
1258            return (KERN_FAILURE);
1259	    }
1260	    task->pidsuspended = FALSE;
1261	}
1262
1263    if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1264		if (--task->user_stop_count == 0) {
1265			release = TRUE;
1266		}
1267	}
1268	else {
1269		return (KERN_FAILURE);
1270	}
1271
1272	/*
1273	 *	Release the task if necessary.
1274	 */
1275	if (release)
1276		task_release_locked(task);
1277
1278    return (KERN_SUCCESS);
1279}
1280
1281/*
1282 *	task_suspend:
1283 *
1284 *	Implement a user-level suspension on a task.
1285 *
1286 * Conditions:
1287 * 	The caller holds a reference to the task
1288 */
1289kern_return_t
1290task_suspend(
1291	register task_t		task)
1292{
1293	kern_return_t	 kr;
1294
1295	if (task == TASK_NULL || task == kernel_task)
1296		return (KERN_INVALID_ARGUMENT);
1297
1298	task_lock(task);
1299
1300	kr = place_task_hold(task);
1301
1302	task_unlock(task);
1303
1304	return (kr);
1305}
1306
1307/*
1308 *	task_resume:
1309 *		Release a kernel hold on a task.
1310 *
1311 * Conditions:
1312 *		The caller holds a reference to the task
1313 */
1314kern_return_t
1315task_resume(
1316	register task_t	task)
1317{
1318	kern_return_t	 kr;
1319
1320	if (task == TASK_NULL || task == kernel_task)
1321		return (KERN_INVALID_ARGUMENT);
1322
1323	task_lock(task);
1324
1325	kr = release_task_hold(task, FALSE);
1326
1327	task_unlock(task);
1328
1329	return (kr);
1330}
1331
1332kern_return_t
1333task_pidsuspend_locked(task_t task)
1334{
1335	kern_return_t kr;
1336
1337	if (task->pidsuspended) {
1338		kr = KERN_FAILURE;
1339		goto out;
1340	}
1341
1342	task->pidsuspended = TRUE;
1343
1344	kr = place_task_hold(task);
1345	if (kr != KERN_SUCCESS) {
1346		task->pidsuspended = FALSE;
1347	}
1348out:
1349	return(kr);
1350}
1351
1352
1353/*
1354 *	task_pidsuspend:
1355 *
1356 *	Suspends a task by placing a hold on its threads.
1357 *
1358 * Conditions:
1359 * 	The caller holds a reference to the task
1360 */
1361kern_return_t
1362task_pidsuspend(
1363	register task_t		task)
1364{
1365	kern_return_t	 kr;
1366
1367	if (task == TASK_NULL || task == kernel_task)
1368		return (KERN_INVALID_ARGUMENT);
1369
1370	task_lock(task);
1371
1372	kr = task_pidsuspend_locked(task);
1373
1374	task_unlock(task);
1375
1376	return (kr);
1377}
1378
1379/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1380#define THAW_ON_RESUME 1
1381
1382/*
1383 *	task_pidresume:
1384 *		Resumes a previously suspended task.
1385 *
1386 * Conditions:
1387 *		The caller holds a reference to the task
1388 */
1389kern_return_t
1390task_pidresume(
1391	register task_t	task)
1392{
1393	kern_return_t	 kr;
1394#if (CONFIG_FREEZE && THAW_ON_RESUME)
1395    boolean_t frozen;
1396#endif
1397
1398	if (task == TASK_NULL || task == kernel_task)
1399		return (KERN_INVALID_ARGUMENT);
1400
1401	task_lock(task);
1402
1403#if (CONFIG_FREEZE && THAW_ON_RESUME)
1404    frozen = task->frozen;
1405    task->frozen = FALSE;
1406#endif
1407
1408	kr = release_task_hold(task, TRUE);
1409
1410	task_unlock(task);
1411
1412#if (CONFIG_FREEZE && THAW_ON_RESUME)
1413	if ((kr == KERN_SUCCESS) && (frozen == TRUE)) {
1414	    kr = vm_map_thaw(task->map);
1415	}
1416#endif
1417
1418	return (kr);
1419}
1420
1421#if CONFIG_FREEZE
1422
1423/*
1424 *	task_freeze:
1425 *
1426 *	Freeze a task.
1427 *
1428 * Conditions:
1429 * 	The caller holds a reference to the task
1430 */
1431kern_return_t
1432task_freeze(
1433	register task_t    task,
1434	uint32_t           *purgeable_count,
1435	uint32_t           *wired_count,
1436	uint32_t           *clean_count,
1437	uint32_t           *dirty_count,
1438	uint32_t           dirty_budget,
1439	boolean_t          *shared,
1440	boolean_t          walk_only)
1441{
1442	kern_return_t kr;
1443
1444	if (task == TASK_NULL || task == kernel_task)
1445		return (KERN_INVALID_ARGUMENT);
1446
1447	task_lock(task);
1448
1449	if (task->frozen) {
1450	    task_unlock(task);
1451	    return (KERN_FAILURE);
1452	}
1453
1454    if (walk_only == FALSE) {
1455	    task->frozen = TRUE;
1456    }
1457
1458	task_unlock(task);
1459
1460	if (walk_only) {
1461		kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
1462	} else {
1463		kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
1464	}
1465
1466	return (kr);
1467}
1468
1469/*
1470 *	task_thaw:
1471 *
1472 *	Thaw a currently frozen task.
1473 *
1474 * Conditions:
1475 * 	The caller holds a reference to the task
1476 */
1477kern_return_t
1478task_thaw(
1479	register task_t		task)
1480{
1481	kern_return_t kr;
1482
1483	if (task == TASK_NULL || task == kernel_task)
1484		return (KERN_INVALID_ARGUMENT);
1485
1486	task_lock(task);
1487
1488	if (!task->frozen) {
1489	    task_unlock(task);
1490	    return (KERN_FAILURE);
1491	}
1492
1493	task->frozen = FALSE;
1494
1495	task_unlock(task);
1496
1497	kr = vm_map_thaw(task->map);
1498
1499	return (kr);
1500}
1501
1502#endif /* CONFIG_FREEZE */
1503
1504kern_return_t
1505host_security_set_task_token(
1506        host_security_t  host_security,
1507        task_t		 task,
1508        security_token_t sec_token,
1509	audit_token_t	 audit_token,
1510	host_priv_t	 host_priv)
1511{
1512	ipc_port_t	 host_port;
1513	kern_return_t	 kr;
1514
1515	if (task == TASK_NULL)
1516		return(KERN_INVALID_ARGUMENT);
1517
1518	if (host_security == HOST_NULL)
1519		return(KERN_INVALID_SECURITY);
1520
1521        task_lock(task);
1522        task->sec_token = sec_token;
1523	task->audit_token = audit_token;
1524        task_unlock(task);
1525
1526	if (host_priv != HOST_PRIV_NULL) {
1527		kr = host_get_host_priv_port(host_priv, &host_port);
1528	} else {
1529		kr = host_get_host_port(host_priv_self(), &host_port);
1530	}
1531	assert(kr == KERN_SUCCESS);
1532	kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1533        return(kr);
1534}
1535
1536/*
1537 * This routine was added, pretty much exclusively, for registering the
1538 * RPC glue vector for in-kernel short circuited tasks.  Rather than
1539 * removing it completely, I have only disabled that feature (which was
1540 * the only feature at the time).  It just appears that we are going to
1541 * want to add some user data to tasks in the future (i.e. bsd info,
1542 * task names, etc...), so I left it in the formal task interface.
1543 */
1544kern_return_t
1545task_set_info(
1546	task_t		task,
1547	task_flavor_t	flavor,
1548	__unused task_info_t	task_info_in,		/* pointer to IN array */
1549	__unused mach_msg_type_number_t	task_info_count)
1550{
1551	if (task == TASK_NULL)
1552		return(KERN_INVALID_ARGUMENT);
1553
1554	switch (flavor) {
1555	    default:
1556		return (KERN_INVALID_ARGUMENT);
1557	}
1558	return (KERN_SUCCESS);
1559}
1560
1561kern_return_t
1562task_info(
1563	task_t					task,
1564	task_flavor_t			flavor,
1565	task_info_t				task_info_out,
1566	mach_msg_type_number_t	*task_info_count)
1567{
1568	kern_return_t error = KERN_SUCCESS;
1569
1570	if (task == TASK_NULL)
1571		return (KERN_INVALID_ARGUMENT);
1572
1573	task_lock(task);
1574
1575	if ((task != current_task()) && (!task->active)) {
1576		task_unlock(task);
1577		return (KERN_INVALID_ARGUMENT);
1578	}
1579
1580	switch (flavor) {
1581
1582	case TASK_BASIC_INFO_32:
1583	case TASK_BASIC2_INFO_32:
1584	{
1585		task_basic_info_32_t	basic_info;
1586		vm_map_t				map;
1587		clock_sec_t				secs;
1588		clock_usec_t			usecs;
1589
1590		if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1591		    error = KERN_INVALID_ARGUMENT;
1592		    break;
1593		}
1594
1595		basic_info = (task_basic_info_32_t)task_info_out;
1596
1597		map = (task == kernel_task)? kernel_map: task->map;
1598		basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
1599		if (flavor == TASK_BASIC2_INFO_32) {
1600			/*
1601			 * The "BASIC2" flavor gets the maximum resident
1602			 * size instead of the current resident size...
1603			 */
1604			basic_info->resident_size = pmap_resident_max(map->pmap);
1605		} else {
1606			basic_info->resident_size = pmap_resident_count(map->pmap);
1607		}
1608		basic_info->resident_size *= PAGE_SIZE;
1609
1610		basic_info->policy = ((task != kernel_task)?
1611										  POLICY_TIMESHARE: POLICY_RR);
1612		basic_info->suspend_count = task->user_stop_count;
1613
1614		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1615		basic_info->user_time.seconds =
1616			(typeof(basic_info->user_time.seconds))secs;
1617		basic_info->user_time.microseconds = usecs;
1618
1619		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1620		basic_info->system_time.seconds =
1621			(typeof(basic_info->system_time.seconds))secs;
1622		basic_info->system_time.microseconds = usecs;
1623
1624		*task_info_count = TASK_BASIC_INFO_32_COUNT;
1625		break;
1626	}
1627
1628	case TASK_BASIC_INFO_64:
1629	{
1630		task_basic_info_64_t	basic_info;
1631		vm_map_t				map;
1632		clock_sec_t				secs;
1633		clock_usec_t			usecs;
1634
1635		if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1636		    error = KERN_INVALID_ARGUMENT;
1637		    break;
1638		}
1639
1640		basic_info = (task_basic_info_64_t)task_info_out;
1641
1642		map = (task == kernel_task)? kernel_map: task->map;
1643		basic_info->virtual_size  = map->size;
1644		basic_info->resident_size =
1645			(mach_vm_size_t)(pmap_resident_count(map->pmap))
1646			* PAGE_SIZE_64;
1647
1648		basic_info->policy = ((task != kernel_task)?
1649										  POLICY_TIMESHARE: POLICY_RR);
1650		basic_info->suspend_count = task->user_stop_count;
1651
1652		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1653		basic_info->user_time.seconds =
1654			(typeof(basic_info->user_time.seconds))secs;
1655		basic_info->user_time.microseconds = usecs;
1656
1657		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1658		basic_info->system_time.seconds =
1659			(typeof(basic_info->system_time.seconds))secs;
1660		basic_info->system_time.microseconds = usecs;
1661
1662		*task_info_count = TASK_BASIC_INFO_64_COUNT;
1663		break;
1664	}
1665
1666	case MACH_TASK_BASIC_INFO:
1667	{
1668		mach_task_basic_info_t  basic_info;
1669		vm_map_t                map;
1670		clock_sec_t             secs;
1671		clock_usec_t            usecs;
1672
1673		if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
1674		    error = KERN_INVALID_ARGUMENT;
1675		    break;
1676		}
1677
1678		basic_info = (mach_task_basic_info_t)task_info_out;
1679
1680		map = (task == kernel_task) ? kernel_map : task->map;
1681
1682		basic_info->virtual_size  = map->size;
1683
1684		basic_info->resident_size =
1685		    (mach_vm_size_t)(pmap_resident_count(map->pmap));
1686		basic_info->resident_size *= PAGE_SIZE_64;
1687
1688		basic_info->resident_size_max =
1689		    (mach_vm_size_t)(pmap_resident_max(map->pmap));
1690		basic_info->resident_size_max *= PAGE_SIZE_64;
1691
1692		basic_info->policy = ((task != kernel_task) ?
1693		                      POLICY_TIMESHARE : POLICY_RR);
1694
1695		basic_info->suspend_count = task->user_stop_count;
1696
1697		absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1698		basic_info->user_time.seconds =
1699		    (typeof(basic_info->user_time.seconds))secs;
1700		basic_info->user_time.microseconds = usecs;
1701
1702		absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1703		basic_info->system_time.seconds =
1704		    (typeof(basic_info->system_time.seconds))secs;
1705		basic_info->system_time.microseconds = usecs;
1706
1707		*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1708		break;
1709	}
1710
1711	case TASK_THREAD_TIMES_INFO:
1712	{
1713		register task_thread_times_info_t	times_info;
1714		register thread_t					thread;
1715
1716		if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1717		    error = KERN_INVALID_ARGUMENT;
1718		    break;
1719		}
1720
1721		times_info = (task_thread_times_info_t) task_info_out;
1722		times_info->user_time.seconds = 0;
1723		times_info->user_time.microseconds = 0;
1724		times_info->system_time.seconds = 0;
1725		times_info->system_time.microseconds = 0;
1726
1727
1728		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1729		    time_value_t	user_time, system_time;
1730
1731		    thread_read_times(thread, &user_time, &system_time);
1732
1733		    time_value_add(&times_info->user_time, &user_time);
1734		    time_value_add(&times_info->system_time, &system_time);
1735		}
1736
1737
1738		*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1739		break;
1740	}
1741
1742	case TASK_ABSOLUTETIME_INFO:
1743	{
1744		task_absolutetime_info_t	info;
1745		register thread_t			thread;
1746
1747		if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1748			error = KERN_INVALID_ARGUMENT;
1749			break;
1750		}
1751
1752		info = (task_absolutetime_info_t)task_info_out;
1753		info->threads_user = info->threads_system = 0;
1754
1755
1756		info->total_user = task->total_user_time;
1757		info->total_system = task->total_system_time;
1758
1759		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1760			uint64_t	tval;
1761			spl_t 		x;
1762
1763			x = splsched();
1764			thread_lock(thread);
1765
1766			tval = timer_grab(&thread->user_timer);
1767			info->threads_user += tval;
1768			info->total_user += tval;
1769
1770			tval = timer_grab(&thread->system_timer);
1771			if (thread->precise_user_kernel_time) {
1772				info->threads_system += tval;
1773				info->total_system += tval;
1774			} else {
1775				/* system_timer may represent either sys or user */
1776				info->threads_user += tval;
1777				info->total_user += tval;
1778			}
1779
1780			thread_unlock(thread);
1781			splx(x);
1782		}
1783
1784
1785		*task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1786		break;
1787	}
1788
1789	case TASK_DYLD_INFO:
1790	{
1791		task_dyld_info_t info;
1792
1793		/*
1794		 * We added the format field to TASK_DYLD_INFO output.  For
1795		 * temporary backward compatibility, accept the fact that
1796		 * clients may ask for the old version - distinquished by the
1797		 * size of the expected result structure.
1798		 */
1799#define TASK_LEGACY_DYLD_INFO_COUNT \
1800		offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1801
1802		if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
1803			error = KERN_INVALID_ARGUMENT;
1804			break;
1805		}
1806
1807		info = (task_dyld_info_t)task_info_out;
1808		info->all_image_info_addr = task->all_image_info_addr;
1809		info->all_image_info_size = task->all_image_info_size;
1810
1811		/* only set format on output for those expecting it */
1812		if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
1813			info->all_image_info_format = task_has_64BitAddr(task) ?
1814				                 TASK_DYLD_ALL_IMAGE_INFO_64 :
1815				                 TASK_DYLD_ALL_IMAGE_INFO_32 ;
1816			*task_info_count = TASK_DYLD_INFO_COUNT;
1817		} else {
1818			*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
1819		}
1820		break;
1821	}
1822
1823	case TASK_EXTMOD_INFO:
1824	{
1825		task_extmod_info_t info;
1826		void *p;
1827
1828		if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
1829			error = KERN_INVALID_ARGUMENT;
1830			break;
1831		}
1832
1833		info = (task_extmod_info_t)task_info_out;
1834
1835		p = get_bsdtask_info(task);
1836		if (p) {
1837			proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
1838		} else {
1839			bzero(info->task_uuid, sizeof(info->task_uuid));
1840		}
1841		info->extmod_statistics = task->extmod_statistics;
1842		*task_info_count = TASK_EXTMOD_INFO_COUNT;
1843
1844		break;
1845	}
1846
1847	case TASK_KERNELMEMORY_INFO:
1848	{
1849		task_kernelmemory_info_t	tkm_info;
1850		ledger_amount_t			credit, debit;
1851
1852		if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
1853		   error = KERN_INVALID_ARGUMENT;
1854		   break;
1855		}
1856
1857		tkm_info = (task_kernelmemory_info_t) task_info_out;
1858		tkm_info->total_palloc = 0;
1859		tkm_info->total_pfree = 0;
1860		tkm_info->total_salloc = 0;
1861		tkm_info->total_sfree = 0;
1862
1863		if (task == kernel_task) {
1864			/*
1865			 * All shared allocs/frees from other tasks count against
1866			 * the kernel private memory usage.  If we are looking up
1867			 * info for the kernel task, gather from everywhere.
1868			 */
1869			task_unlock(task);
1870
1871			/* start by accounting for all the terminated tasks against the kernel */
1872			tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
1873			tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
1874
1875			/* count all other task/thread shared alloc/free against the kernel */
1876			lck_mtx_lock(&tasks_threads_lock);
1877
1878			/* XXX this really shouldn't be using the function parameter 'task' as a local var! */
1879			queue_iterate(&tasks, task, task_t, tasks) {
1880				if (task == kernel_task) {
1881					if (ledger_get_entries(task->ledger,
1882					    task_ledgers.tkm_private, &credit,
1883					    &debit) == KERN_SUCCESS) {
1884						tkm_info->total_palloc += credit;
1885						tkm_info->total_pfree += debit;
1886					}
1887				}
1888				if (!ledger_get_entries(task->ledger,
1889				    task_ledgers.tkm_shared, &credit, &debit)) {
1890					tkm_info->total_palloc += credit;
1891					tkm_info->total_pfree += debit;
1892				}
1893			}
1894			lck_mtx_unlock(&tasks_threads_lock);
1895		} else {
1896			if (!ledger_get_entries(task->ledger,
1897			    task_ledgers.tkm_private, &credit, &debit)) {
1898				tkm_info->total_palloc = credit;
1899				tkm_info->total_pfree = debit;
1900			}
1901			if (!ledger_get_entries(task->ledger,
1902			    task_ledgers.tkm_shared, &credit, &debit)) {
1903				tkm_info->total_salloc = credit;
1904				tkm_info->total_sfree = debit;
1905			}
1906			task_unlock(task);
1907		}
1908
1909		*task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
1910		return KERN_SUCCESS;
1911	}
1912
1913	/* OBSOLETE */
1914	case TASK_SCHED_FIFO_INFO:
1915	{
1916
1917		if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1918			error = KERN_INVALID_ARGUMENT;
1919			break;
1920		}
1921
1922		error = KERN_INVALID_POLICY;
1923		break;
1924	}
1925
1926	/* OBSOLETE */
1927	case TASK_SCHED_RR_INFO:
1928	{
1929		register policy_rr_base_t	rr_base;
1930		uint32_t quantum_time;
1931		uint64_t quantum_ns;
1932
1933		if (*task_info_count < POLICY_RR_BASE_COUNT) {
1934			error = KERN_INVALID_ARGUMENT;
1935			break;
1936		}
1937
1938		rr_base = (policy_rr_base_t) task_info_out;
1939
1940		if (task != kernel_task) {
1941			error = KERN_INVALID_POLICY;
1942			break;
1943		}
1944
1945		rr_base->base_priority = task->priority;
1946
1947		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1948		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1949
1950		rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1951
1952		*task_info_count = POLICY_RR_BASE_COUNT;
1953		break;
1954	}
1955
1956	/* OBSOLETE */
1957	case TASK_SCHED_TIMESHARE_INFO:
1958	{
1959		register policy_timeshare_base_t	ts_base;
1960
1961		if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1962			error = KERN_INVALID_ARGUMENT;
1963			break;
1964		}
1965
1966		ts_base = (policy_timeshare_base_t) task_info_out;
1967
1968		if (task == kernel_task) {
1969			error = KERN_INVALID_POLICY;
1970			break;
1971		}
1972
1973		ts_base->base_priority = task->priority;
1974
1975		*task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1976		break;
1977	}
1978
1979	case TASK_SECURITY_TOKEN:
1980	{
1981		register security_token_t	*sec_token_p;
1982
1983		if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1984		    error = KERN_INVALID_ARGUMENT;
1985		    break;
1986		}
1987
1988		sec_token_p = (security_token_t *) task_info_out;
1989
1990		*sec_token_p = task->sec_token;
1991
1992		*task_info_count = TASK_SECURITY_TOKEN_COUNT;
1993		break;
1994	}
1995
1996	case TASK_AUDIT_TOKEN:
1997	{
1998		register audit_token_t	*audit_token_p;
1999
2000		if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
2001		    error = KERN_INVALID_ARGUMENT;
2002		    break;
2003		}
2004
2005		audit_token_p = (audit_token_t *) task_info_out;
2006
2007		*audit_token_p = task->audit_token;
2008
2009		*task_info_count = TASK_AUDIT_TOKEN_COUNT;
2010		break;
2011	}
2012
2013	case TASK_SCHED_INFO:
2014		error = KERN_INVALID_ARGUMENT;
2015		break;
2016
2017	case TASK_EVENTS_INFO:
2018	{
2019		register task_events_info_t	events_info;
2020		register thread_t			thread;
2021
2022		if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
2023		   error = KERN_INVALID_ARGUMENT;
2024		   break;
2025		}
2026
2027		events_info = (task_events_info_t) task_info_out;
2028
2029
2030		events_info->faults = task->faults;
2031		events_info->pageins = task->pageins;
2032		events_info->cow_faults = task->cow_faults;
2033		events_info->messages_sent = task->messages_sent;
2034		events_info->messages_received = task->messages_received;
2035		events_info->syscalls_mach = task->syscalls_mach;
2036		events_info->syscalls_unix = task->syscalls_unix;
2037
2038		events_info->csw = task->c_switch;
2039
2040		queue_iterate(&task->threads, thread, thread_t, task_threads) {
2041			events_info->csw	   += thread->c_switch;
2042			events_info->syscalls_mach += thread->syscalls_mach;
2043			events_info->syscalls_unix += thread->syscalls_unix;
2044		}
2045
2046
2047		*task_info_count = TASK_EVENTS_INFO_COUNT;
2048		break;
2049	}
2050	case TASK_AFFINITY_TAG_INFO:
2051	{
2052		if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
2053		    error = KERN_INVALID_ARGUMENT;
2054		    break;
2055		}
2056
2057		error = task_affinity_info(task, task_info_out, task_info_count);
2058		break;
2059	}
2060
2061	case TASK_POWER_INFO:
2062	{
2063		task_power_info_t	info;
2064		thread_t			thread;
2065		ledger_amount_t		tmp;
2066
2067		if (*task_info_count < TASK_POWER_INFO_COUNT) {
2068			error = KERN_INVALID_ARGUMENT;
2069			break;
2070		}
2071
2072		info = (task_power_info_t)task_info_out;
2073
2074		ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2075			(ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
2076		ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2077			(ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
2078
2079		info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
2080		info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
2081
2082		info->total_user = task->total_user_time;
2083		info->total_system = task->total_system_time;
2084
2085		queue_iterate(&task->threads, thread, thread_t, task_threads) {
2086			uint64_t	tval;
2087			spl_t 		x;
2088
2089			if ((task == kernel_task) && (thread->priority == IDLEPRI) && (thread->sched_pri == IDLEPRI))
2090				continue;
2091			x = splsched();
2092			thread_lock(thread);
2093
2094			info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
2095			info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
2096
2097			tval = timer_grab(&thread->user_timer);
2098			info->total_user += tval;
2099
2100			tval = timer_grab(&thread->system_timer);
2101			if (thread->precise_user_kernel_time) {
2102				info->total_system += tval;
2103			} else {
2104				/* system_timer may represent either sys or user */
2105				info->total_user += tval;
2106			}
2107
2108			thread_unlock(thread);
2109			splx(x);
2110		}
2111		break;
2112	}
2113
2114	default:
2115		error = KERN_INVALID_ARGUMENT;
2116	}
2117
2118	task_unlock(task);
2119	return (error);
2120}
2121
2122void
2123task_vtimer_set(
2124	task_t		task,
2125	integer_t	which)
2126{
2127	thread_t	thread;
2128	spl_t		x;
2129
2130	/* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2131
2132	task_lock(task);
2133
2134	task->vtimers |= which;
2135
2136	switch (which) {
2137
2138	case TASK_VTIMER_USER:
2139		queue_iterate(&task->threads, thread, thread_t, task_threads) {
2140			x = splsched();
2141			thread_lock(thread);
2142			if (thread->precise_user_kernel_time)
2143				thread->vtimer_user_save = timer_grab(&thread->user_timer);
2144			else
2145				thread->vtimer_user_save = timer_grab(&thread->system_timer);
2146			thread_unlock(thread);
2147			splx(x);
2148		}
2149		break;
2150
2151	case TASK_VTIMER_PROF:
2152		queue_iterate(&task->threads, thread, thread_t, task_threads) {
2153			x = splsched();
2154			thread_lock(thread);
2155			thread->vtimer_prof_save = timer_grab(&thread->user_timer);
2156			thread->vtimer_prof_save += timer_grab(&thread->system_timer);
2157			thread_unlock(thread);
2158			splx(x);
2159		}
2160		break;
2161
2162	case TASK_VTIMER_RLIM:
2163		queue_iterate(&task->threads, thread, thread_t, task_threads) {
2164			x = splsched();
2165			thread_lock(thread);
2166			thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
2167			thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
2168			thread_unlock(thread);
2169			splx(x);
2170		}
2171		break;
2172	}
2173
2174	task_unlock(task);
2175}
2176
2177void
2178task_vtimer_clear(
2179	task_t		task,
2180	integer_t	which)
2181{
2182	assert(task == current_task());
2183
2184	task_lock(task);
2185
2186	task->vtimers &= ~which;
2187
2188	task_unlock(task);
2189}
2190
2191void
2192task_vtimer_update(
2193__unused
2194	task_t		task,
2195	integer_t	which,
2196	uint32_t	*microsecs)
2197{
2198	thread_t	thread = current_thread();
2199	uint32_t	tdelt;
2200	clock_sec_t	secs;
2201	uint64_t	tsum;
2202
2203	assert(task == current_task());
2204
2205	assert(task->vtimers & which);
2206
2207	secs = tdelt = 0;
2208
2209	switch (which) {
2210
2211	case TASK_VTIMER_USER:
2212		if (thread->precise_user_kernel_time) {
2213			tdelt = (uint32_t)timer_delta(&thread->user_timer,
2214								&thread->vtimer_user_save);
2215		} else {
2216			tdelt = (uint32_t)timer_delta(&thread->system_timer,
2217								&thread->vtimer_user_save);
2218		}
2219		absolutetime_to_microtime(tdelt, &secs, microsecs);
2220		break;
2221
2222	case TASK_VTIMER_PROF:
2223		tsum = timer_grab(&thread->user_timer);
2224		tsum += timer_grab(&thread->system_timer);
2225		tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
2226		absolutetime_to_microtime(tdelt, &secs, microsecs);
2227		/* if the time delta is smaller than a usec, ignore */
2228		if (*microsecs != 0)
2229			thread->vtimer_prof_save = tsum;
2230		break;
2231
2232	case TASK_VTIMER_RLIM:
2233		tsum = timer_grab(&thread->user_timer);
2234		tsum += timer_grab(&thread->system_timer);
2235		tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2236		thread->vtimer_rlim_save = tsum;
2237		absolutetime_to_microtime(tdelt, &secs, microsecs);
2238		break;
2239	}
2240
2241}
2242
2243/*
2244 *	task_assign:
2245 *
2246 *	Change the assigned processor set for the task
2247 */
2248kern_return_t
2249task_assign(
2250	__unused task_t		task,
2251	__unused processor_set_t	new_pset,
2252	__unused boolean_t	assign_threads)
2253{
2254	return(KERN_FAILURE);
2255}
2256
2257/*
2258 *	task_assign_default:
2259 *
2260 *	Version of task_assign to assign to default processor set.
2261 */
2262kern_return_t
2263task_assign_default(
2264	task_t		task,
2265	boolean_t	assign_threads)
2266{
2267    return (task_assign(task, &pset0, assign_threads));
2268}
2269
2270/*
2271 *	task_get_assignment
2272 *
2273 *	Return name of processor set that task is assigned to.
2274 */
2275kern_return_t
2276task_get_assignment(
2277	task_t		task,
2278	processor_set_t	*pset)
2279{
2280	if (!task->active)
2281		return(KERN_FAILURE);
2282
2283	*pset = &pset0;
2284
2285	return (KERN_SUCCESS);
2286}
2287
2288
2289/*
2290 * 	task_policy
2291 *
2292 *	Set scheduling policy and parameters, both base and limit, for
2293 *	the given task. Policy must be a policy which is enabled for the
2294 *	processor set. Change contained threads if requested.
2295 */
2296kern_return_t
2297task_policy(
2298	__unused task_t			task,
2299	__unused policy_t			policy_id,
2300	__unused policy_base_t		base,
2301	__unused mach_msg_type_number_t	count,
2302	__unused boolean_t			set_limit,
2303	__unused boolean_t			change)
2304{
2305	return(KERN_FAILURE);
2306}
2307
2308/*
2309 *	task_set_policy
2310 *
2311 *	Set scheduling policy and parameters, both base and limit, for
2312 *	the given task. Policy can be any policy implemented by the
2313 *	processor set, whether enabled or not. Change contained threads
2314 *	if requested.
2315 */
2316kern_return_t
2317task_set_policy(
2318	__unused task_t			task,
2319	__unused processor_set_t		pset,
2320	__unused policy_t			policy_id,
2321	__unused policy_base_t		base,
2322	__unused mach_msg_type_number_t	base_count,
2323	__unused policy_limit_t		limit,
2324	__unused mach_msg_type_number_t	limit_count,
2325	__unused boolean_t			change)
2326{
2327	return(KERN_FAILURE);
2328}
2329
2330#if	FAST_TAS
2331kern_return_t
2332task_set_ras_pc(
2333 	task_t		task,
2334 	vm_offset_t	pc,
2335 	vm_offset_t	endpc)
2336{
2337	extern int fast_tas_debug;
2338
2339	if (fast_tas_debug) {
2340		printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2341		       task, pc, endpc);
2342	}
2343	task_lock(task);
2344	task->fast_tas_base = pc;
2345	task->fast_tas_end =  endpc;
2346	task_unlock(task);
2347	return KERN_SUCCESS;
2348}
2349#else	/* FAST_TAS */
2350kern_return_t
2351task_set_ras_pc(
2352 	__unused task_t	task,
2353 	__unused vm_offset_t	pc,
2354 	__unused vm_offset_t	endpc)
2355{
2356	return KERN_FAILURE;
2357}
2358#endif	/* FAST_TAS */
2359
2360void
2361task_synchronizer_destroy_all(task_t task)
2362{
2363	semaphore_t	semaphore;
2364	lock_set_t	lock_set;
2365
2366	/*
2367	 *  Destroy owned semaphores
2368	 */
2369
2370	while (!queue_empty(&task->semaphore_list)) {
2371		semaphore = (semaphore_t) queue_first(&task->semaphore_list);
2372		(void) semaphore_destroy(task, semaphore);
2373	}
2374
2375	/*
2376	 *  Destroy owned lock sets
2377	 */
2378
2379	while (!queue_empty(&task->lock_set_list)) {
2380		lock_set = (lock_set_t) queue_first(&task->lock_set_list);
2381		(void) lock_set_destroy(task, lock_set);
2382	}
2383}
2384
2385/*
2386 * Install default (machine-dependent) initial thread state
2387 * on the task.  Subsequent thread creation will have this initial
2388 * state set on the thread by machine_thread_inherit_taskwide().
2389 * Flavors and structures are exactly the same as those to thread_set_state()
2390 */
2391kern_return_t
2392task_set_state(
2393	task_t task,
2394	int flavor,
2395	thread_state_t state,
2396	mach_msg_type_number_t state_count)
2397{
2398	kern_return_t ret;
2399
2400	if (task == TASK_NULL) {
2401		return (KERN_INVALID_ARGUMENT);
2402	}
2403
2404	task_lock(task);
2405
2406	if (!task->active) {
2407		task_unlock(task);
2408		return (KERN_FAILURE);
2409	}
2410
2411	ret = machine_task_set_state(task, flavor, state, state_count);
2412
2413	task_unlock(task);
2414	return ret;
2415}
2416
2417/*
2418 * Examine the default (machine-dependent) initial thread state
2419 * on the task, as set by task_set_state().  Flavors and structures
2420 * are exactly the same as those passed to thread_get_state().
2421 */
2422kern_return_t
2423task_get_state(
2424	task_t 	task,
2425	int	flavor,
2426	thread_state_t state,
2427	mach_msg_type_number_t *state_count)
2428{
2429	kern_return_t ret;
2430
2431	if (task == TASK_NULL) {
2432		return (KERN_INVALID_ARGUMENT);
2433	}
2434
2435	task_lock(task);
2436
2437	if (!task->active) {
2438		task_unlock(task);
2439		return (KERN_FAILURE);
2440	}
2441
2442	ret = machine_task_get_state(task, flavor, state, state_count);
2443
2444	task_unlock(task);
2445	return ret;
2446}
2447
2448
2449/*
2450 * We need to export some functions to other components that
2451 * are currently implemented in macros within the osfmk
2452 * component.  Just export them as functions of the same name.
2453 */
2454boolean_t is_kerneltask(task_t t)
2455{
2456	if (t == kernel_task)
2457		return (TRUE);
2458
2459	return (FALSE);
2460}
2461
2462int
2463check_for_tasksuspend(task_t task)
2464{
2465
2466	if (task == TASK_NULL)
2467		return (0);
2468
2469	return (task->suspend_count > 0);
2470}
2471
2472#undef current_task
2473task_t current_task(void);
2474task_t current_task(void)
2475{
2476	return (current_task_fast());
2477}
2478
2479#undef task_reference
2480void task_reference(task_t task);
2481void
2482task_reference(
2483	task_t		task)
2484{
2485	if (task != TASK_NULL)
2486		task_reference_internal(task);
2487}
2488
2489/*
2490 * This routine is called always with task lock held.
2491 * And it returns a thread handle without reference as the caller
2492 * operates on it under the task lock held.
2493 */
2494thread_t
2495task_findtid(task_t task, uint64_t tid)
2496{
2497	thread_t thread= THREAD_NULL;
2498
2499	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2500			if (thread->thread_id == tid)
2501				return(thread);
2502	}
2503	return(THREAD_NULL);
2504}
2505
2506
2507#if CONFIG_MACF_MACH
2508/*
2509 * Protect 2 task labels against modification by adding a reference on
2510 * both label handles. The locks do not actually have to be held while
2511 * using the labels as only labels with one reference can be modified
2512 * in place.
2513 */
2514
2515void
2516tasklabel_lock2(
2517	task_t a,
2518	task_t b)
2519{
2520	labelh_reference(a->label);
2521	labelh_reference(b->label);
2522}
2523
2524void
2525tasklabel_unlock2(
2526	task_t a,
2527	task_t b)
2528{
2529	labelh_release(a->label);
2530	labelh_release(b->label);
2531}
2532
2533void
2534mac_task_label_update_internal(
2535	struct label	*pl,
2536	struct task	*task)
2537{
2538
2539	tasklabel_lock(task);
2540	task->label = labelh_modify(task->label);
2541	mac_task_label_update(pl, &task->maclabel);
2542	tasklabel_unlock(task);
2543	ip_lock(task->itk_self);
2544	mac_port_label_update_cred(pl, &task->itk_self->ip_label);
2545	ip_unlock(task->itk_self);
2546}
2547
2548void
2549mac_task_label_modify(
2550	struct task	*task,
2551	void		*arg,
2552	void (*f)	(struct label *l, void *arg))
2553{
2554
2555	tasklabel_lock(task);
2556	task->label = labelh_modify(task->label);
2557	(*f)(&task->maclabel, arg);
2558	tasklabel_unlock(task);
2559}
2560
2561struct label *
2562mac_task_get_label(struct task *task)
2563{
2564	return (&task->maclabel);
2565}
2566#endif
2567