1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections.  This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62/*
63 */
64
65/*
66 * File:	ipc_tt.c
67 * Purpose:
68 *	Task and thread related IPC functions.
69 */
70
71#include <mach/mach_types.h>
72#include <mach/boolean.h>
73#include <mach/kern_return.h>
74#include <mach/mach_param.h>
75#include <mach/task_special_ports.h>
76#include <mach/thread_special_ports.h>
77#include <mach/thread_status.h>
78#include <mach/exception_types.h>
79#include <mach/memory_object_types.h>
80#include <mach/mach_traps.h>
81#include <mach/task_server.h>
82#include <mach/thread_act_server.h>
83#include <mach/mach_host_server.h>
84#include <mach/host_priv_server.h>
85#include <mach/vm_map_server.h>
86
87#include <kern/kern_types.h>
88#include <kern/host.h>
89#include <kern/ipc_kobject.h>
90#include <kern/ipc_tt.h>
91#include <kern/kalloc.h>
92#include <kern/thread.h>
93#include <kern/misc_protos.h>
94
95#include <vm/vm_map.h>
96#include <vm/vm_pageout.h>
97#include <vm/vm_protos.h>
98
99#include <security/mac_mach_internal.h>
100
101/* forward declarations */
102task_t convert_port_to_locked_task(ipc_port_t port);
103
104
105/*
106 *	Routine:	ipc_task_init
107 *	Purpose:
108 *		Initialize a task's IPC state.
109 *
110 *		If non-null, some state will be inherited from the parent.
111 *		The parent must be appropriately initialized.
112 *	Conditions:
113 *		Nothing locked.
114 */
115
116void
117ipc_task_init(
118	task_t		task,
119	task_t		parent)
120{
121	ipc_space_t space;
122	ipc_port_t kport;
123	ipc_port_t nport;
124	kern_return_t kr;
125	int i;
126
127
128	kr = ipc_space_create(&ipc_table_entries[0], &space);
129	if (kr != KERN_SUCCESS)
130		panic("ipc_task_init");
131
132	space->is_task = task;
133
134	kport = ipc_port_alloc_kernel();
135	if (kport == IP_NULL)
136		panic("ipc_task_init");
137
138	nport = ipc_port_alloc_kernel();
139	if (nport == IP_NULL)
140		panic("ipc_task_init");
141
142	itk_lock_init(task);
143	task->itk_self = kport;
144	task->itk_nself = nport;
145	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
146	task->itk_sself = ipc_port_make_send(kport);
147	task->itk_debug_control = IP_NULL;
148	task->itk_space = space;
149
150	if (parent == TASK_NULL) {
151		ipc_port_t port;
152
153		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
154			task->exc_actions[i].port = IP_NULL;
155		}/* for */
156
157		kr = host_get_host_port(host_priv_self(), &port);
158		assert(kr == KERN_SUCCESS);
159		task->itk_host = port;
160
161		task->itk_bootstrap = IP_NULL;
162		task->itk_seatbelt = IP_NULL;
163		task->itk_gssd = IP_NULL;
164		task->itk_task_access = IP_NULL;
165
166		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
167			task->itk_registered[i] = IP_NULL;
168	} else {
169		itk_lock(parent);
170		assert(parent->itk_self != IP_NULL);
171
172		/* inherit registered ports */
173
174		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
175			task->itk_registered[i] =
176				ipc_port_copy_send(parent->itk_registered[i]);
177
178		/* inherit exception and bootstrap ports */
179
180		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
181		    task->exc_actions[i].port =
182		  		ipc_port_copy_send(parent->exc_actions[i].port);
183		    task->exc_actions[i].flavor =
184				parent->exc_actions[i].flavor;
185		    task->exc_actions[i].behavior =
186				parent->exc_actions[i].behavior;
187		    task->exc_actions[i].privileged =
188				parent->exc_actions[i].privileged;
189		}/* for */
190		task->itk_host =
191			ipc_port_copy_send(parent->itk_host);
192
193		task->itk_bootstrap =
194			ipc_port_copy_send(parent->itk_bootstrap);
195
196		task->itk_seatbelt =
197			ipc_port_copy_send(parent->itk_seatbelt);
198
199		task->itk_gssd =
200			ipc_port_copy_send(parent->itk_gssd);
201
202		task->itk_task_access =
203			ipc_port_copy_send(parent->itk_task_access);
204
205		itk_unlock(parent);
206	}
207}
208
209/*
210 *	Routine:	ipc_task_enable
211 *	Purpose:
212 *		Enable a task for IPC access.
213 *	Conditions:
214 *		Nothing locked.
215 */
216
217void
218ipc_task_enable(
219	task_t		task)
220{
221	ipc_port_t kport;
222	ipc_port_t nport;
223
224	itk_lock(task);
225	kport = task->itk_self;
226	if (kport != IP_NULL)
227		ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
228	nport = task->itk_nself;
229	if (nport != IP_NULL)
230		ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
231	itk_unlock(task);
232}
233
234/*
235 *	Routine:	ipc_task_disable
236 *	Purpose:
237 *		Disable IPC access to a task.
238 *	Conditions:
239 *		Nothing locked.
240 */
241
242void
243ipc_task_disable(
244	task_t		task)
245{
246	ipc_port_t kport;
247	ipc_port_t nport;
248	ipc_port_t rport;
249
250	itk_lock(task);
251	kport = task->itk_self;
252	if (kport != IP_NULL)
253		ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
254	nport = task->itk_nself;
255	if (nport != IP_NULL)
256		ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
257
258	rport = task->itk_resume;
259	if (rport != IP_NULL) {
260		/*
261		 * From this point onwards this task is no longer accepting
262		 * resumptions.
263		 *
264		 * There are still outstanding suspensions on this task,
265		 * even as it is being torn down. Disconnect the task
266		 * from the rport, thereby "orphaning" the rport. The rport
267		 * itself will go away only when the last suspension holder
268		 * destroys his SO right to it -- when he either
269		 * exits, or tries to actually use that last SO right to
270		 * resume this (now non-existent) task.
271		 */
272		ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
273	}
274	itk_unlock(task);
275}
276
277/*
278 *	Routine:	ipc_task_terminate
279 *	Purpose:
280 *		Clean up and destroy a task's IPC state.
281 *	Conditions:
282 *		Nothing locked.  The task must be suspended.
283 *		(Or the current thread must be in the task.)
284 */
285
286void
287ipc_task_terminate(
288	task_t		task)
289{
290	ipc_port_t kport;
291	ipc_port_t nport;
292	ipc_port_t rport;
293	int i;
294
295	itk_lock(task);
296	kport = task->itk_self;
297
298	if (kport == IP_NULL) {
299		/* the task is already terminated (can this happen?) */
300		itk_unlock(task);
301		return;
302	}
303	task->itk_self = IP_NULL;
304
305	nport = task->itk_nself;
306	assert(nport != IP_NULL);
307	task->itk_nself = IP_NULL;
308
309	rport = task->itk_resume;
310	task->itk_resume = IP_NULL;
311
312	itk_unlock(task);
313
314	/* release the naked send rights */
315
316	if (IP_VALID(task->itk_sself))
317		ipc_port_release_send(task->itk_sself);
318
319	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
320		if (IP_VALID(task->exc_actions[i].port)) {
321			ipc_port_release_send(task->exc_actions[i].port);
322		}
323	}
324
325	if (IP_VALID(task->itk_host))
326		ipc_port_release_send(task->itk_host);
327
328	if (IP_VALID(task->itk_bootstrap))
329		ipc_port_release_send(task->itk_bootstrap);
330
331	if (IP_VALID(task->itk_seatbelt))
332		ipc_port_release_send(task->itk_seatbelt);
333
334	if (IP_VALID(task->itk_gssd))
335		ipc_port_release_send(task->itk_gssd);
336
337	if (IP_VALID(task->itk_task_access))
338		ipc_port_release_send(task->itk_task_access);
339
340	if (IP_VALID(task->itk_debug_control))
341		ipc_port_release_send(task->itk_debug_control);
342
343	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
344		if (IP_VALID(task->itk_registered[i]))
345			ipc_port_release_send(task->itk_registered[i]);
346
347	/* destroy the kernel ports */
348	ipc_port_dealloc_kernel(kport);
349	ipc_port_dealloc_kernel(nport);
350	if (rport != IP_NULL)
351		ipc_port_dealloc_kernel(rport);
352
353	itk_lock_destroy(task);
354}
355
356/*
357 *	Routine:	ipc_task_reset
358 *	Purpose:
359 *		Reset a task's IPC state to protect it when
360 *		it enters an elevated security context. The
361 *		task name port can remain the same - since
362 *		it represents no specific privilege.
363 *	Conditions:
364 *		Nothing locked.  The task must be suspended.
365 *		(Or the current thread must be in the task.)
366 */
367
368void
369ipc_task_reset(
370	task_t		task)
371{
372	ipc_port_t old_kport, new_kport;
373	ipc_port_t old_sself;
374	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
375	int i;
376
377	new_kport = ipc_port_alloc_kernel();
378	if (new_kport == IP_NULL)
379		panic("ipc_task_reset");
380
381	itk_lock(task);
382
383	old_kport = task->itk_self;
384
385	if (old_kport == IP_NULL) {
386		/* the task is already terminated (can this happen?) */
387		itk_unlock(task);
388		ipc_port_dealloc_kernel(new_kport);
389		return;
390	}
391
392	task->itk_self = new_kport;
393	old_sself = task->itk_sself;
394	task->itk_sself = ipc_port_make_send(new_kport);
395	ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
396	ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
397
398	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
399		if (!task->exc_actions[i].privileged) {
400			old_exc_actions[i] = task->exc_actions[i].port;
401			task->exc_actions[i].port = IP_NULL;
402		} else {
403			old_exc_actions[i] = IP_NULL;
404		}
405	}/* for */
406
407	if (IP_VALID(task->itk_debug_control)) {
408		ipc_port_release_send(task->itk_debug_control);
409	}
410	task->itk_debug_control = IP_NULL;
411
412	itk_unlock(task);
413
414	/* release the naked send rights */
415
416	if (IP_VALID(old_sself))
417		ipc_port_release_send(old_sself);
418
419	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
420		if (IP_VALID(old_exc_actions[i])) {
421			ipc_port_release_send(old_exc_actions[i]);
422		}
423	}/* for */
424
425	/* destroy the kernel port */
426	ipc_port_dealloc_kernel(old_kport);
427}
428
429/*
430 *	Routine:	ipc_thread_init
431 *	Purpose:
432 *		Initialize a thread's IPC state.
433 *	Conditions:
434 *		Nothing locked.
435 */
436
437void
438ipc_thread_init(
439	thread_t	thread)
440{
441	ipc_port_t	kport;
442
443	kport = ipc_port_alloc_kernel();
444	if (kport == IP_NULL)
445		panic("ipc_thread_init");
446
447	thread->ith_self = kport;
448	thread->ith_sself = ipc_port_make_send(kport);
449	thread->exc_actions = NULL;
450
451	ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
452
453#if IMPORTANCE_INHERITANCE
454	thread->ith_assertions = 0;
455#endif
456
457	ipc_kmsg_queue_init(&thread->ith_messages);
458
459	thread->ith_rpc_reply = IP_NULL;
460}
461
462void
463ipc_thread_init_exc_actions(
464	thread_t	thread)
465{
466	assert(thread->exc_actions == NULL);
467
468	thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
469	bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
470}
471
472void
473ipc_thread_destroy_exc_actions(
474	thread_t	thread)
475{
476	if (thread->exc_actions != NULL) {
477		kfree(thread->exc_actions,
478		      sizeof(struct exception_action) * EXC_TYPES_COUNT);
479		thread->exc_actions = NULL;
480	}
481}
482
483void
484ipc_thread_disable(
485	thread_t	thread)
486{
487	ipc_port_t	kport = thread->ith_self;
488
489	if (kport != IP_NULL)
490		ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
491}
492
493/*
494 *	Routine:	ipc_thread_terminate
495 *	Purpose:
496 *		Clean up and destroy a thread's IPC state.
497 *	Conditions:
498 *		Nothing locked.
499 */
500
501void
502ipc_thread_terminate(
503	thread_t	thread)
504{
505	ipc_port_t	kport = thread->ith_self;
506
507	if (kport != IP_NULL) {
508		int			i;
509
510		if (IP_VALID(thread->ith_sself))
511			ipc_port_release_send(thread->ith_sself);
512
513		thread->ith_sself = thread->ith_self = IP_NULL;
514
515		if (thread->exc_actions != NULL) {
516			for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
517				if (IP_VALID(thread->exc_actions[i].port))
518					ipc_port_release_send(thread->exc_actions[i].port);
519			}
520			ipc_thread_destroy_exc_actions(thread);
521		}
522
523		ipc_port_dealloc_kernel(kport);
524	}
525
526#if IMPORTANCE_INHERITANCE
527	assert(thread->ith_assertions == 0);
528#endif
529
530	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
531
532	if (thread->ith_rpc_reply != IP_NULL)
533		ipc_port_dealloc_reply(thread->ith_rpc_reply);
534
535	thread->ith_rpc_reply = IP_NULL;
536}
537
538/*
539 *	Routine:	ipc_thread_reset
540 *	Purpose:
541 *		Reset the IPC state for a given Mach thread when
542 *		its task enters an elevated security context.
543 * 		Both the thread port and its exception ports have
544 *		to be reset.  Its RPC reply port cannot have any
545 *		rights outstanding, so it should be fine.
546 *	Conditions:
547 *		Nothing locked.
548 */
549
550void
551ipc_thread_reset(
552	thread_t	thread)
553{
554	ipc_port_t old_kport, new_kport;
555	ipc_port_t old_sself;
556	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
557	boolean_t  has_old_exc_actions = FALSE;
558	int		   i;
559
560	new_kport = ipc_port_alloc_kernel();
561	if (new_kport == IP_NULL)
562		panic("ipc_task_reset");
563
564	thread_mtx_lock(thread);
565
566	old_kport = thread->ith_self;
567
568	if (old_kport == IP_NULL) {
569		/* the  is already terminated (can this happen?) */
570		thread_mtx_unlock(thread);
571		ipc_port_dealloc_kernel(new_kport);
572		return;
573	}
574
575	thread->ith_self = new_kport;
576	old_sself = thread->ith_sself;
577	thread->ith_sself = ipc_port_make_send(new_kport);
578	ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
579	ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
580
581	/*
582	 * Only ports that were set by root-owned processes
583	 * (privileged ports) should survive
584	 */
585	if (thread->exc_actions != NULL) {
586		has_old_exc_actions = TRUE;
587		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
588			if (thread->exc_actions[i].privileged) {
589				old_exc_actions[i] = IP_NULL;
590			} else {
591				old_exc_actions[i] = thread->exc_actions[i].port;
592				thread->exc_actions[i].port = IP_NULL;
593			}
594		}
595	}
596
597	thread_mtx_unlock(thread);
598
599	/* release the naked send rights */
600
601	if (IP_VALID(old_sself))
602		ipc_port_release_send(old_sself);
603
604	if (has_old_exc_actions) {
605		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
606			ipc_port_release_send(old_exc_actions[i]);
607		}
608	}
609
610	/* destroy the kernel port */
611	ipc_port_dealloc_kernel(old_kport);
612}
613
614/*
615 *	Routine:	retrieve_task_self_fast
616 *	Purpose:
617 *		Optimized version of retrieve_task_self,
618 *		that only works for the current task.
619 *
620 *		Return a send right (possibly null/dead)
621 *		for the task's user-visible self port.
622 *	Conditions:
623 *		Nothing locked.
624 */
625
626ipc_port_t
627retrieve_task_self_fast(
628	register task_t		task)
629{
630	register ipc_port_t port;
631
632	assert(task == current_task());
633
634	itk_lock(task);
635	assert(task->itk_self != IP_NULL);
636
637	if ((port = task->itk_sself) == task->itk_self) {
638		/* no interposing */
639
640		ip_lock(port);
641		assert(ip_active(port));
642		ip_reference(port);
643		port->ip_srights++;
644		ip_unlock(port);
645	} else
646		port = ipc_port_copy_send(port);
647	itk_unlock(task);
648
649	return port;
650}
651
652/*
653 *	Routine:	retrieve_thread_self_fast
654 *	Purpose:
655 *		Return a send right (possibly null/dead)
656 *		for the thread's user-visible self port.
657 *
658 *		Only works for the current thread.
659 *
660 *	Conditions:
661 *		Nothing locked.
662 */
663
664ipc_port_t
665retrieve_thread_self_fast(
666	thread_t		thread)
667{
668	register ipc_port_t port;
669
670	assert(thread == current_thread());
671
672	thread_mtx_lock(thread);
673
674	assert(thread->ith_self != IP_NULL);
675
676	if ((port = thread->ith_sself) == thread->ith_self) {
677		/* no interposing */
678
679		ip_lock(port);
680		assert(ip_active(port));
681		ip_reference(port);
682		port->ip_srights++;
683		ip_unlock(port);
684	}
685	else
686		port = ipc_port_copy_send(port);
687
688	thread_mtx_unlock(thread);
689
690	return port;
691}
692
693/*
694 *	Routine:	task_self_trap [mach trap]
695 *	Purpose:
696 *		Give the caller send rights for his own task port.
697 *	Conditions:
698 *		Nothing locked.
699 *	Returns:
700 *		MACH_PORT_NULL if there are any resource failures
701 *		or other errors.
702 */
703
704mach_port_name_t
705task_self_trap(
706	__unused struct task_self_trap_args *args)
707{
708	task_t task = current_task();
709	ipc_port_t sright;
710	mach_port_name_t name;
711
712	sright = retrieve_task_self_fast(task);
713	name = ipc_port_copyout_send(sright, task->itk_space);
714	return name;
715}
716
717/*
718 *	Routine:	thread_self_trap [mach trap]
719 *	Purpose:
720 *		Give the caller send rights for his own thread port.
721 *	Conditions:
722 *		Nothing locked.
723 *	Returns:
724 *		MACH_PORT_NULL if there are any resource failures
725 *		or other errors.
726 */
727
728mach_port_name_t
729thread_self_trap(
730	__unused struct thread_self_trap_args *args)
731{
732	thread_t  thread = current_thread();
733	task_t task = thread->task;
734	ipc_port_t sright;
735	mach_port_name_t name;
736
737	sright = retrieve_thread_self_fast(thread);
738	name = ipc_port_copyout_send(sright, task->itk_space);
739	return name;
740
741}
742
743/*
744 *	Routine:	mach_reply_port [mach trap]
745 *	Purpose:
746 *		Allocate a port for the caller.
747 *	Conditions:
748 *		Nothing locked.
749 *	Returns:
750 *		MACH_PORT_NULL if there are any resource failures
751 *		or other errors.
752 */
753
754mach_port_name_t
755mach_reply_port(
756	__unused struct mach_reply_port_args *args)
757{
758	ipc_port_t port;
759	mach_port_name_t name;
760	kern_return_t kr;
761
762	kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
763	if (kr == KERN_SUCCESS)
764		ip_unlock(port);
765	else
766		name = MACH_PORT_NULL;
767	return name;
768}
769
770/*
771 *	Routine:	thread_get_special_port [kernel call]
772 *	Purpose:
773 *		Clones a send right for one of the thread's
774 *		special ports.
775 *	Conditions:
776 *		Nothing locked.
777 *	Returns:
778 *		KERN_SUCCESS		Extracted a send right.
779 *		KERN_INVALID_ARGUMENT	The thread is null.
780 *		KERN_FAILURE		The thread is dead.
781 *		KERN_INVALID_ARGUMENT	Invalid special port.
782 */
783
784kern_return_t
785thread_get_special_port(
786	thread_t		thread,
787	int				which,
788	ipc_port_t		*portp)
789{
790	kern_return_t	result = KERN_SUCCESS;
791	ipc_port_t		*whichp;
792
793	if (thread == THREAD_NULL)
794		return (KERN_INVALID_ARGUMENT);
795
796	switch (which) {
797
798	case THREAD_KERNEL_PORT:
799		whichp = &thread->ith_sself;
800		break;
801
802	default:
803		return (KERN_INVALID_ARGUMENT);
804	}
805
806 	thread_mtx_lock(thread);
807
808	if (thread->active)
809		*portp = ipc_port_copy_send(*whichp);
810	else
811		result = KERN_FAILURE;
812
813	thread_mtx_unlock(thread);
814
815	return (result);
816}
817
818/*
819 *	Routine:	thread_set_special_port [kernel call]
820 *	Purpose:
821 *		Changes one of the thread's special ports,
822 *		setting it to the supplied send right.
823 *	Conditions:
824 *		Nothing locked.  If successful, consumes
825 *		the supplied send right.
826 *	Returns:
827 *		KERN_SUCCESS		Changed the special port.
828 *		KERN_INVALID_ARGUMENT	The thread is null.
829 *		KERN_FAILURE		The thread is dead.
830 *		KERN_INVALID_ARGUMENT	Invalid special port.
831 */
832
833kern_return_t
834thread_set_special_port(
835	thread_t		thread,
836	int			which,
837	ipc_port_t	port)
838{
839	kern_return_t	result = KERN_SUCCESS;
840	ipc_port_t		*whichp, old = IP_NULL;
841
842	if (thread == THREAD_NULL)
843		return (KERN_INVALID_ARGUMENT);
844
845	switch (which) {
846
847	case THREAD_KERNEL_PORT:
848		whichp = &thread->ith_sself;
849		break;
850
851	default:
852		return (KERN_INVALID_ARGUMENT);
853	}
854
855	thread_mtx_lock(thread);
856
857	if (thread->active) {
858		old = *whichp;
859		*whichp = port;
860	}
861	else
862		result = KERN_FAILURE;
863
864	thread_mtx_unlock(thread);
865
866	if (IP_VALID(old))
867		ipc_port_release_send(old);
868
869	return (result);
870}
871
872/*
873 *	Routine:	task_get_special_port [kernel call]
874 *	Purpose:
875 *		Clones a send right for one of the task's
876 *		special ports.
877 *	Conditions:
878 *		Nothing locked.
879 *	Returns:
880 *		KERN_SUCCESS		Extracted a send right.
881 *		KERN_INVALID_ARGUMENT	The task is null.
882 *		KERN_FAILURE		The task/space is dead.
883 *		KERN_INVALID_ARGUMENT	Invalid special port.
884 */
885
886kern_return_t
887task_get_special_port(
888	task_t		task,
889	int		which,
890	ipc_port_t	*portp)
891{
892	ipc_port_t port;
893
894	if (task == TASK_NULL)
895		return KERN_INVALID_ARGUMENT;
896
897	itk_lock(task);
898	if (task->itk_self == IP_NULL) {
899		itk_unlock(task);
900		return KERN_FAILURE;
901	}
902
903	switch (which) {
904	    case TASK_KERNEL_PORT:
905		port = ipc_port_copy_send(task->itk_sself);
906		break;
907
908	    case TASK_NAME_PORT:
909		port = ipc_port_make_send(task->itk_nself);
910		break;
911
912	    case TASK_HOST_PORT:
913		port = ipc_port_copy_send(task->itk_host);
914		break;
915
916	    case TASK_BOOTSTRAP_PORT:
917		port = ipc_port_copy_send(task->itk_bootstrap);
918		break;
919
920	    case TASK_SEATBELT_PORT:
921		port = ipc_port_copy_send(task->itk_seatbelt);
922		break;
923
924	    case TASK_ACCESS_PORT:
925		port = ipc_port_copy_send(task->itk_task_access);
926		break;
927
928		case TASK_DEBUG_CONTROL_PORT:
929		port = ipc_port_copy_send(task->itk_debug_control);
930		break;
931
932	    default:
933               itk_unlock(task);
934		return KERN_INVALID_ARGUMENT;
935	}
936	itk_unlock(task);
937
938	*portp = port;
939	return KERN_SUCCESS;
940}
941
942/*
943 *	Routine:	task_set_special_port [kernel call]
944 *	Purpose:
945 *		Changes one of the task's special ports,
946 *		setting it to the supplied send right.
947 *	Conditions:
948 *		Nothing locked.  If successful, consumes
949 *		the supplied send right.
950 *	Returns:
951 *		KERN_SUCCESS		Changed the special port.
952 *		KERN_INVALID_ARGUMENT	The task is null.
953 *		KERN_FAILURE		The task/space is dead.
954 *		KERN_INVALID_ARGUMENT	Invalid special port.
955 * 		KERN_NO_ACCESS		Attempted overwrite of seatbelt port.
956 */
957
958kern_return_t
959task_set_special_port(
960	task_t		task,
961	int		which,
962	ipc_port_t	port)
963{
964	ipc_port_t *whichp;
965	ipc_port_t old;
966
967	if (task == TASK_NULL)
968		return KERN_INVALID_ARGUMENT;
969
970	switch (which) {
971	    case TASK_KERNEL_PORT:
972		whichp = &task->itk_sself;
973		break;
974
975	    case TASK_HOST_PORT:
976		whichp = &task->itk_host;
977		break;
978
979	    case TASK_BOOTSTRAP_PORT:
980		whichp = &task->itk_bootstrap;
981		break;
982
983	    case TASK_SEATBELT_PORT:
984		whichp = &task->itk_seatbelt;
985		break;
986
987	    case TASK_ACCESS_PORT:
988		whichp = &task->itk_task_access;
989		break;
990
991	    case TASK_DEBUG_CONTROL_PORT:
992		whichp = &task->itk_debug_control;
993		break;
994
995
996	    default:
997		return KERN_INVALID_ARGUMENT;
998	}/* switch */
999
1000	itk_lock(task);
1001	if (task->itk_self == IP_NULL) {
1002		itk_unlock(task);
1003		return KERN_FAILURE;
1004	}
1005
1006	/* do not allow overwrite of seatbelt or task access ports */
1007	if ((TASK_SEATBELT_PORT == which  || TASK_ACCESS_PORT == which)
1008		&& IP_VALID(*whichp)) {
1009			itk_unlock(task);
1010			return KERN_NO_ACCESS;
1011	}
1012
1013	old = *whichp;
1014	*whichp = port;
1015	itk_unlock(task);
1016
1017	if (IP_VALID(old))
1018		ipc_port_release_send(old);
1019	return KERN_SUCCESS;
1020}
1021
1022
1023/*
1024 *	Routine:	mach_ports_register [kernel call]
1025 *	Purpose:
1026 *		Stash a handful of port send rights in the task.
1027 *		Child tasks will inherit these rights, but they
1028 *		must use mach_ports_lookup to acquire them.
1029 *
1030 *		The rights are supplied in a (wired) kalloc'd segment.
1031 *		Rights which aren't supplied are assumed to be null.
1032 *	Conditions:
1033 *		Nothing locked.  If successful, consumes
1034 *		the supplied rights and memory.
1035 *	Returns:
1036 *		KERN_SUCCESS		Stashed the port rights.
1037 *		KERN_INVALID_ARGUMENT	The task is null.
1038 *		KERN_INVALID_ARGUMENT	The task is dead.
1039 *		KERN_INVALID_ARGUMENT	The memory param is null.
1040 *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
1041 */
1042
1043kern_return_t
1044mach_ports_register(
1045	task_t			task,
1046	mach_port_array_t	memory,
1047	mach_msg_type_number_t	portsCnt)
1048{
1049	ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1050	unsigned int i;
1051
1052	if ((task == TASK_NULL) ||
1053	    (portsCnt > TASK_PORT_REGISTER_MAX) ||
1054	    (portsCnt && memory == NULL))
1055		return KERN_INVALID_ARGUMENT;
1056
1057	/*
1058	 *	Pad the port rights with nulls.
1059	 */
1060
1061	for (i = 0; i < portsCnt; i++)
1062		ports[i] = memory[i];
1063	for (; i < TASK_PORT_REGISTER_MAX; i++)
1064		ports[i] = IP_NULL;
1065
1066	itk_lock(task);
1067	if (task->itk_self == IP_NULL) {
1068		itk_unlock(task);
1069		return KERN_INVALID_ARGUMENT;
1070	}
1071
1072	/*
1073	 *	Replace the old send rights with the new.
1074	 *	Release the old rights after unlocking.
1075	 */
1076
1077	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1078		ipc_port_t old;
1079
1080		old = task->itk_registered[i];
1081		task->itk_registered[i] = ports[i];
1082		ports[i] = old;
1083	}
1084
1085	itk_unlock(task);
1086
1087	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1088		if (IP_VALID(ports[i]))
1089			ipc_port_release_send(ports[i]);
1090
1091	/*
1092	 *	Now that the operation is known to be successful,
1093	 *	we can free the memory.
1094	 */
1095
1096	if (portsCnt != 0)
1097		kfree(memory,
1098		      (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1099
1100	return KERN_SUCCESS;
1101}
1102
1103/*
1104 *	Routine:	mach_ports_lookup [kernel call]
1105 *	Purpose:
1106 *		Retrieves (clones) the stashed port send rights.
1107 *	Conditions:
1108 *		Nothing locked.  If successful, the caller gets
1109 *		rights and memory.
1110 *	Returns:
1111 *		KERN_SUCCESS		Retrieved the send rights.
1112 *		KERN_INVALID_ARGUMENT	The task is null.
1113 *		KERN_INVALID_ARGUMENT	The task is dead.
1114 *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
1115 */
1116
1117kern_return_t
1118mach_ports_lookup(
1119	task_t			task,
1120	mach_port_array_t	*portsp,
1121	mach_msg_type_number_t	*portsCnt)
1122{
1123	void  *memory;
1124	vm_size_t size;
1125	ipc_port_t *ports;
1126	int i;
1127
1128	if (task == TASK_NULL)
1129		return KERN_INVALID_ARGUMENT;
1130
1131	size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1132
1133	memory = kalloc(size);
1134	if (memory == 0)
1135		return KERN_RESOURCE_SHORTAGE;
1136
1137	itk_lock(task);
1138	if (task->itk_self == IP_NULL) {
1139		itk_unlock(task);
1140
1141		kfree(memory, size);
1142		return KERN_INVALID_ARGUMENT;
1143	}
1144
1145	ports = (ipc_port_t *) memory;
1146
1147	/*
1148	 *	Clone port rights.  Because kalloc'd memory
1149	 *	is wired, we won't fault while holding the task lock.
1150	 */
1151
1152	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1153		ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1154
1155	itk_unlock(task);
1156
1157	*portsp = (mach_port_array_t) ports;
1158	*portsCnt = TASK_PORT_REGISTER_MAX;
1159	return KERN_SUCCESS;
1160}
1161
1162/*
1163 *	Routine: convert_port_to_locked_task
1164 *	Purpose:
1165 *		Internal helper routine to convert from a port to a locked
1166 *		task.  Used by several routines that try to convert from a
1167 *		task port to a reference on some task related object.
1168 *	Conditions:
1169 *		Nothing locked, blocking OK.
1170 */
1171task_t
1172convert_port_to_locked_task(ipc_port_t port)
1173{
1174        int try_failed_count = 0;
1175
1176	while (IP_VALID(port)) {
1177		task_t task;
1178
1179		ip_lock(port);
1180		if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1181			ip_unlock(port);
1182			return TASK_NULL;
1183		}
1184		task = (task_t) port->ip_kobject;
1185		assert(task != TASK_NULL);
1186
1187		/*
1188		 * Normal lock ordering puts task_lock() before ip_lock().
1189		 * Attempt out-of-order locking here.
1190		 */
1191		if (task_lock_try(task)) {
1192			ip_unlock(port);
1193			return(task);
1194		}
1195		try_failed_count++;
1196
1197		ip_unlock(port);
1198		mutex_pause(try_failed_count);
1199	}
1200	return TASK_NULL;
1201}
1202
1203/*
1204 *	Routine:	convert_port_to_task
1205 *	Purpose:
1206 *		Convert from a port to a task.
1207 *		Doesn't consume the port ref; produces a task ref,
1208 *		which may be null.
1209 *	Conditions:
1210 *		Nothing locked.
1211 */
1212task_t
1213convert_port_to_task(
1214	ipc_port_t		port)
1215{
1216	task_t		task = TASK_NULL;
1217
1218	if (IP_VALID(port)) {
1219		ip_lock(port);
1220
1221		if (	ip_active(port)					&&
1222				ip_kotype(port) == IKOT_TASK		) {
1223			task = (task_t)port->ip_kobject;
1224			assert(task != TASK_NULL);
1225
1226			task_reference_internal(task);
1227		}
1228
1229		ip_unlock(port);
1230	}
1231
1232	return (task);
1233}
1234
1235/*
1236 *	Routine:	convert_port_to_task_name
1237 *	Purpose:
1238 *		Convert from a port to a task name.
1239 *		Doesn't consume the port ref; produces a task name ref,
1240 *		which may be null.
1241 *	Conditions:
1242 *		Nothing locked.
1243 */
1244task_name_t
1245convert_port_to_task_name(
1246	ipc_port_t		port)
1247{
1248	task_name_t		task = TASK_NULL;
1249
1250	if (IP_VALID(port)) {
1251		ip_lock(port);
1252
1253		if (	ip_active(port)					&&
1254				(ip_kotype(port) == IKOT_TASK	||
1255				 ip_kotype(port) == IKOT_TASK_NAME)) {
1256			task = (task_name_t)port->ip_kobject;
1257			assert(task != TASK_NAME_NULL);
1258
1259			task_reference_internal(task);
1260		}
1261
1262		ip_unlock(port);
1263	}
1264
1265	return (task);
1266}
1267
1268/*
1269 *	Routine:	convert_port_to_task_suspension_token
1270 *	Purpose:
1271 *		Convert from a port to a task suspension token.
1272 *		Doesn't consume the port ref; produces a suspension token ref,
1273 *		which may be null.
1274 *	Conditions:
1275 *		Nothing locked.
1276 */
1277task_suspension_token_t
1278convert_port_to_task_suspension_token(
1279	ipc_port_t		port)
1280{
1281	task_suspension_token_t		task = TASK_NULL;
1282
1283	if (IP_VALID(port)) {
1284		ip_lock(port);
1285
1286		if (	ip_active(port)					&&
1287				ip_kotype(port) == IKOT_TASK_RESUME) {
1288			task = (task_suspension_token_t)port->ip_kobject;
1289			assert(task != TASK_NULL);
1290
1291			task_reference_internal(task);
1292		}
1293
1294		ip_unlock(port);
1295	}
1296
1297	return (task);
1298}
1299
1300/*
1301 *	Routine:	convert_port_to_space
1302 *	Purpose:
1303 *		Convert from a port to a space.
1304 *		Doesn't consume the port ref; produces a space ref,
1305 *		which may be null.
1306 *	Conditions:
1307 *		Nothing locked.
1308 */
1309ipc_space_t
1310convert_port_to_space(
1311	ipc_port_t	port)
1312{
1313	ipc_space_t space;
1314	task_t task;
1315
1316	task = convert_port_to_locked_task(port);
1317
1318	if (task == TASK_NULL)
1319		return IPC_SPACE_NULL;
1320
1321	if (!task->active) {
1322		task_unlock(task);
1323		return IPC_SPACE_NULL;
1324	}
1325
1326	space = task->itk_space;
1327	is_reference(space);
1328	task_unlock(task);
1329	return (space);
1330}
1331
1332/*
1333 *	Routine:	convert_port_to_map
1334 *	Purpose:
1335 *		Convert from a port to a map.
1336 *		Doesn't consume the port ref; produces a map ref,
1337 *		which may be null.
1338 *	Conditions:
1339 *		Nothing locked.
1340 */
1341
1342vm_map_t
1343convert_port_to_map(
1344	ipc_port_t	port)
1345{
1346	task_t task;
1347	vm_map_t map;
1348
1349	task = convert_port_to_locked_task(port);
1350
1351	if (task == TASK_NULL)
1352		return VM_MAP_NULL;
1353
1354	if (!task->active) {
1355		task_unlock(task);
1356		return VM_MAP_NULL;
1357	}
1358
1359	map = task->map;
1360	vm_map_reference_swap(map);
1361	task_unlock(task);
1362	return map;
1363}
1364
1365
1366/*
1367 *	Routine:	convert_port_to_thread
1368 *	Purpose:
1369 *		Convert from a port to a thread.
1370 *		Doesn't consume the port ref; produces an thread ref,
1371 *		which may be null.
1372 *	Conditions:
1373 *		Nothing locked.
1374 */
1375
1376thread_t
1377convert_port_to_thread(
1378	ipc_port_t		port)
1379{
1380	thread_t	thread = THREAD_NULL;
1381
1382	if (IP_VALID(port)) {
1383		ip_lock(port);
1384
1385		if (	ip_active(port)					&&
1386				ip_kotype(port) == IKOT_THREAD		) {
1387			thread = (thread_t)port->ip_kobject;
1388			assert(thread != THREAD_NULL);
1389
1390			thread_reference_internal(thread);
1391		}
1392
1393		ip_unlock(port);
1394	}
1395
1396	return (thread);
1397}
1398
1399/*
1400 *	Routine:	port_name_to_thread
1401 *	Purpose:
1402 *		Convert from a port name to an thread reference
1403 *		A name of MACH_PORT_NULL is valid for the null thread.
1404 *	Conditions:
1405 *		Nothing locked.
1406 */
1407thread_t
1408port_name_to_thread(
1409	mach_port_name_t	name)
1410{
1411	thread_t	thread = THREAD_NULL;
1412	ipc_port_t	kport;
1413
1414	if (MACH_PORT_VALID(name)) {
1415		if (ipc_object_copyin(current_space(), name,
1416					       MACH_MSG_TYPE_COPY_SEND,
1417							  (ipc_object_t *)&kport) != KERN_SUCCESS)
1418			return (THREAD_NULL);
1419
1420		thread = convert_port_to_thread(kport);
1421
1422		if (IP_VALID(kport))
1423			ipc_port_release_send(kport);
1424	}
1425
1426	return (thread);
1427}
1428
1429task_t
1430port_name_to_task(
1431	mach_port_name_t name)
1432{
1433	ipc_port_t kern_port;
1434	kern_return_t kr;
1435	task_t task = TASK_NULL;
1436
1437	if (MACH_PORT_VALID(name)) {
1438		kr = ipc_object_copyin(current_space(), name,
1439				       MACH_MSG_TYPE_COPY_SEND,
1440				       (ipc_object_t *) &kern_port);
1441		if (kr != KERN_SUCCESS)
1442			return TASK_NULL;
1443
1444		task = convert_port_to_task(kern_port);
1445
1446		if (IP_VALID(kern_port))
1447			ipc_port_release_send(kern_port);
1448	}
1449	return task;
1450}
1451
1452/*
1453 *	Routine:	convert_task_to_port
1454 *	Purpose:
1455 *		Convert from a task to a port.
1456 *		Consumes a task ref; produces a naked send right
1457 *		which may be invalid.
1458 *	Conditions:
1459 *		Nothing locked.
1460 */
1461
1462ipc_port_t
1463convert_task_to_port(
1464	task_t		task)
1465{
1466	ipc_port_t port;
1467
1468	itk_lock(task);
1469	if (task->itk_self != IP_NULL)
1470		port = ipc_port_make_send(task->itk_self);
1471	else
1472		port = IP_NULL;
1473	itk_unlock(task);
1474
1475	task_deallocate(task);
1476	return port;
1477}
1478
1479/*
1480 *	Routine:	convert_task_suspend_token_to_port
1481 *	Purpose:
1482 *		Convert from a task suspension token to a port.
1483 *		Consumes a task suspension token ref; produces a naked send-once right
1484 *		which may be invalid.
1485 *	Conditions:
1486 *		Nothing locked.
1487 */
1488ipc_port_t
1489convert_task_suspension_token_to_port(
1490	task_suspension_token_t		task)
1491{
1492	ipc_port_t port;
1493
1494	task_lock(task);
1495	if (task->active) {
1496		if (task->itk_resume == IP_NULL) {
1497			task->itk_resume = ipc_port_alloc_kernel();
1498			if (!IP_VALID(task->itk_resume)) {
1499				panic("failed to create resume port");
1500			}
1501
1502			ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
1503		}
1504
1505		/*
1506		 * Create a send-once right for each instance of a direct user-called
1507		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1508		 * the notification handler will resume the target task.
1509		 */
1510		port = ipc_port_make_sonce(task->itk_resume);
1511		assert(IP_VALID(port));
1512	} else {
1513		port = IP_NULL;
1514	}
1515
1516	task_unlock(task);
1517	task_suspension_token_deallocate(task);
1518
1519	return port;
1520}
1521
1522
1523/*
1524 *	Routine:	convert_task_name_to_port
1525 *	Purpose:
1526 *		Convert from a task name ref to a port.
1527 *		Consumes a task name ref; produces a naked send right
1528 *		which may be invalid.
1529 *	Conditions:
1530 *		Nothing locked.
1531 */
1532
1533ipc_port_t
1534convert_task_name_to_port(
1535	task_name_t		task_name)
1536{
1537	ipc_port_t port;
1538
1539	itk_lock(task_name);
1540	if (task_name->itk_nself != IP_NULL)
1541		port = ipc_port_make_send(task_name->itk_nself);
1542	else
1543		port = IP_NULL;
1544	itk_unlock(task_name);
1545
1546	task_name_deallocate(task_name);
1547	return port;
1548}
1549
1550/*
1551 *	Routine:	convert_thread_to_port
1552 *	Purpose:
1553 *		Convert from a thread to a port.
1554 *		Consumes an thread ref; produces a naked send right
1555 *		which may be invalid.
1556 *	Conditions:
1557 *		Nothing locked.
1558 */
1559
1560ipc_port_t
1561convert_thread_to_port(
1562	thread_t		thread)
1563{
1564	ipc_port_t		port;
1565
1566	thread_mtx_lock(thread);
1567
1568	if (thread->ith_self != IP_NULL)
1569		port = ipc_port_make_send(thread->ith_self);
1570	else
1571		port = IP_NULL;
1572
1573	thread_mtx_unlock(thread);
1574
1575	thread_deallocate(thread);
1576
1577	return (port);
1578}
1579
1580/*
1581 *	Routine:	space_deallocate
1582 *	Purpose:
1583 *		Deallocate a space ref produced by convert_port_to_space.
1584 *	Conditions:
1585 *		Nothing locked.
1586 */
1587
1588void
1589space_deallocate(
1590	ipc_space_t	space)
1591{
1592	if (space != IS_NULL)
1593		is_release(space);
1594}
1595
1596/*
1597 *	Routine:	thread/task_set_exception_ports [kernel call]
1598 *	Purpose:
1599 *			Sets the thread/task exception port, flavor and
1600 *			behavior for the exception types specified by the mask.
1601 *			There will be one send right per exception per valid
1602 *			port.
1603 *	Conditions:
1604 *		Nothing locked.  If successful, consumes
1605 *		the supplied send right.
1606 *	Returns:
1607 *		KERN_SUCCESS		Changed the special port.
1608 *		KERN_INVALID_ARGUMENT	The thread is null,
1609 *					Illegal mask bit set.
1610 *					Illegal exception behavior
1611 *		KERN_FAILURE		The thread is dead.
1612 */
1613
1614kern_return_t
1615thread_set_exception_ports(
1616	thread_t		 		thread,
1617	exception_mask_t		exception_mask,
1618	ipc_port_t				new_port,
1619	exception_behavior_t	new_behavior,
1620	thread_state_flavor_t	new_flavor)
1621{
1622	ipc_port_t		old_port[EXC_TYPES_COUNT];
1623	boolean_t privileged = current_task()->sec_token.val[0] == 0;
1624	register int	i;
1625
1626	if (thread == THREAD_NULL)
1627		return (KERN_INVALID_ARGUMENT);
1628
1629	if (exception_mask & ~EXC_MASK_VALID)
1630		return (KERN_INVALID_ARGUMENT);
1631
1632	if (IP_VALID(new_port)) {
1633		switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1634
1635		case EXCEPTION_DEFAULT:
1636		case EXCEPTION_STATE:
1637		case EXCEPTION_STATE_IDENTITY:
1638			break;
1639
1640		default:
1641			return (KERN_INVALID_ARGUMENT);
1642		}
1643	}
1644
1645	/*
1646	 * Check the validity of the thread_state_flavor by calling the
1647	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1648	 * osfmk/mach/ARCHITECTURE/thread_status.h
1649	 */
1650	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1651		return (KERN_INVALID_ARGUMENT);
1652
1653	thread_mtx_lock(thread);
1654
1655	if (!thread->active) {
1656		thread_mtx_unlock(thread);
1657
1658		return (KERN_FAILURE);
1659	}
1660
1661	if (thread->exc_actions == NULL) {
1662		ipc_thread_init_exc_actions(thread);
1663	}
1664	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1665		if (exception_mask & (1 << i)) {
1666			old_port[i] = thread->exc_actions[i].port;
1667			thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1668			thread->exc_actions[i].behavior = new_behavior;
1669			thread->exc_actions[i].flavor = new_flavor;
1670			thread->exc_actions[i].privileged = privileged;
1671		}
1672		else
1673			old_port[i] = IP_NULL;
1674	}
1675
1676	thread_mtx_unlock(thread);
1677
1678	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1679		if (IP_VALID(old_port[i]))
1680			ipc_port_release_send(old_port[i]);
1681
1682	if (IP_VALID(new_port))		 /* consume send right */
1683		ipc_port_release_send(new_port);
1684
1685	return (KERN_SUCCESS);
1686}
1687
1688kern_return_t
1689task_set_exception_ports(
1690	task_t					task,
1691	exception_mask_t		exception_mask,
1692	ipc_port_t				new_port,
1693	exception_behavior_t	new_behavior,
1694	thread_state_flavor_t	new_flavor)
1695{
1696	ipc_port_t		old_port[EXC_TYPES_COUNT];
1697	boolean_t privileged = current_task()->sec_token.val[0] == 0;
1698	register int	i;
1699
1700	if (task == TASK_NULL)
1701		return (KERN_INVALID_ARGUMENT);
1702
1703	if (exception_mask & ~EXC_MASK_VALID)
1704		return (KERN_INVALID_ARGUMENT);
1705
1706	if (IP_VALID(new_port)) {
1707		switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1708
1709		case EXCEPTION_DEFAULT:
1710		case EXCEPTION_STATE:
1711		case EXCEPTION_STATE_IDENTITY:
1712			break;
1713
1714		default:
1715			return (KERN_INVALID_ARGUMENT);
1716		}
1717	}
1718
1719	/*
1720	 * Check the validity of the thread_state_flavor by calling the
1721	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1722	 * osfmk/mach/ARCHITECTURE/thread_status.h
1723	 */
1724	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1725		return (KERN_INVALID_ARGUMENT);
1726
1727	itk_lock(task);
1728
1729	if (task->itk_self == IP_NULL) {
1730		itk_unlock(task);
1731
1732		return (KERN_FAILURE);
1733	}
1734
1735	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1736		if (exception_mask & (1 << i)) {
1737			old_port[i] = task->exc_actions[i].port;
1738			task->exc_actions[i].port =
1739				ipc_port_copy_send(new_port);
1740			task->exc_actions[i].behavior = new_behavior;
1741			task->exc_actions[i].flavor = new_flavor;
1742			task->exc_actions[i].privileged = privileged;
1743		}
1744		else
1745			old_port[i] = IP_NULL;
1746	}
1747
1748	itk_unlock(task);
1749
1750	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1751		if (IP_VALID(old_port[i]))
1752			ipc_port_release_send(old_port[i]);
1753
1754	if (IP_VALID(new_port))		 /* consume send right */
1755		ipc_port_release_send(new_port);
1756
1757	return (KERN_SUCCESS);
1758}
1759
1760/*
1761 *	Routine:	thread/task_swap_exception_ports [kernel call]
1762 *	Purpose:
1763 *			Sets the thread/task exception port, flavor and
1764 *			behavior for the exception types specified by the
1765 *			mask.
1766 *
1767 *			The old ports, behavior and flavors are returned
1768 *			Count specifies the array sizes on input and
1769 *			the number of returned ports etc. on output.  The
1770 *			arrays must be large enough to hold all the returned
1771 *			data, MIG returnes an error otherwise.  The masks
1772 *			array specifies the corresponding exception type(s).
1773 *
1774 *	Conditions:
1775 *		Nothing locked.  If successful, consumes
1776 *		the supplied send right.
1777 *
1778 *		Returns upto [in} CountCnt elements.
1779 *	Returns:
1780 *		KERN_SUCCESS		Changed the special port.
1781 *		KERN_INVALID_ARGUMENT	The thread is null,
1782 *					Illegal mask bit set.
1783 *					Illegal exception behavior
1784 *		KERN_FAILURE		The thread is dead.
1785 */
1786
1787kern_return_t
1788thread_swap_exception_ports(
1789	thread_t					thread,
1790	exception_mask_t			exception_mask,
1791	ipc_port_t					new_port,
1792	exception_behavior_t		new_behavior,
1793	thread_state_flavor_t		new_flavor,
1794	exception_mask_array_t		masks,
1795	mach_msg_type_number_t		*CountCnt,
1796	exception_port_array_t		ports,
1797	exception_behavior_array_t	behaviors,
1798	thread_state_flavor_array_t	flavors)
1799{
1800	ipc_port_t		old_port[EXC_TYPES_COUNT];
1801	boolean_t privileged = current_task()->sec_token.val[0] == 0;
1802	unsigned int	i, j, count;
1803
1804	if (thread == THREAD_NULL)
1805		return (KERN_INVALID_ARGUMENT);
1806
1807	if (exception_mask & ~EXC_MASK_VALID)
1808		return (KERN_INVALID_ARGUMENT);
1809
1810	if (IP_VALID(new_port)) {
1811		switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1812
1813		case EXCEPTION_DEFAULT:
1814		case EXCEPTION_STATE:
1815		case EXCEPTION_STATE_IDENTITY:
1816			break;
1817
1818		default:
1819			return (KERN_INVALID_ARGUMENT);
1820		}
1821	}
1822
1823	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1824		return (KERN_INVALID_ARGUMENT);
1825
1826	thread_mtx_lock(thread);
1827
1828	if (!thread->active) {
1829		thread_mtx_unlock(thread);
1830
1831		return (KERN_FAILURE);
1832	}
1833
1834	if (thread->exc_actions == NULL) {
1835		ipc_thread_init_exc_actions(thread);
1836	}
1837
1838	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1839	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1840		if (exception_mask & (1 << i)) {
1841			for (j = 0; j < count; ++j) {
1842				/*
1843				 * search for an identical entry, if found
1844				 * set corresponding mask for this exception.
1845				 */
1846				if (	thread->exc_actions[i].port == ports[j]				&&
1847						thread->exc_actions[i].behavior == behaviors[j]		&&
1848						thread->exc_actions[i].flavor == flavors[j]			) {
1849					masks[j] |= (1 << i);
1850					break;
1851				}
1852			}
1853
1854			if (j == count) {
1855				masks[j] = (1 << i);
1856				ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1857
1858				behaviors[j] = thread->exc_actions[i].behavior;
1859				flavors[j] = thread->exc_actions[i].flavor;
1860				++count;
1861			}
1862
1863			old_port[i] = thread->exc_actions[i].port;
1864			thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1865			thread->exc_actions[i].behavior = new_behavior;
1866			thread->exc_actions[i].flavor = new_flavor;
1867			thread->exc_actions[i].privileged = privileged;
1868		}
1869		else
1870			old_port[i] = IP_NULL;
1871	}
1872
1873	thread_mtx_unlock(thread);
1874
1875	while (--i >= FIRST_EXCEPTION) {
1876		if (IP_VALID(old_port[i]))
1877			ipc_port_release_send(old_port[i]);
1878	}
1879
1880	if (IP_VALID(new_port))		 /* consume send right */
1881		ipc_port_release_send(new_port);
1882
1883	*CountCnt = count;
1884
1885	return (KERN_SUCCESS);
1886}
1887
1888kern_return_t
1889task_swap_exception_ports(
1890	task_t						task,
1891	exception_mask_t			exception_mask,
1892	ipc_port_t					new_port,
1893	exception_behavior_t		new_behavior,
1894	thread_state_flavor_t		new_flavor,
1895	exception_mask_array_t		masks,
1896	mach_msg_type_number_t		*CountCnt,
1897	exception_port_array_t		ports,
1898	exception_behavior_array_t	behaviors,
1899	thread_state_flavor_array_t	flavors)
1900{
1901	ipc_port_t		old_port[EXC_TYPES_COUNT];
1902	boolean_t privileged = current_task()->sec_token.val[0] == 0;
1903	unsigned int	i, j, count;
1904
1905	if (task == TASK_NULL)
1906		return (KERN_INVALID_ARGUMENT);
1907
1908	if (exception_mask & ~EXC_MASK_VALID)
1909		return (KERN_INVALID_ARGUMENT);
1910
1911	if (IP_VALID(new_port)) {
1912		switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1913
1914		case EXCEPTION_DEFAULT:
1915		case EXCEPTION_STATE:
1916		case EXCEPTION_STATE_IDENTITY:
1917			break;
1918
1919		default:
1920			return (KERN_INVALID_ARGUMENT);
1921		}
1922	}
1923
1924	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1925		return (KERN_INVALID_ARGUMENT);
1926
1927	itk_lock(task);
1928
1929	if (task->itk_self == IP_NULL) {
1930		itk_unlock(task);
1931
1932		return (KERN_FAILURE);
1933	}
1934
1935	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1936	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1937		if (exception_mask & (1 << i)) {
1938			for (j = 0; j < count; j++) {
1939				/*
1940				 * search for an identical entry, if found
1941				 * set corresponding mask for this exception.
1942				 */
1943				if (	task->exc_actions[i].port == ports[j]			&&
1944						task->exc_actions[i].behavior == behaviors[j]	&&
1945						task->exc_actions[i].flavor == flavors[j]		) {
1946					masks[j] |= (1 << i);
1947					break;
1948				}
1949			}
1950
1951			if (j == count) {
1952				masks[j] = (1 << i);
1953				ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1954				behaviors[j] = task->exc_actions[i].behavior;
1955				flavors[j] = task->exc_actions[i].flavor;
1956				++count;
1957			}
1958
1959			old_port[i] = task->exc_actions[i].port;
1960
1961			task->exc_actions[i].port =	ipc_port_copy_send(new_port);
1962			task->exc_actions[i].behavior = new_behavior;
1963			task->exc_actions[i].flavor = new_flavor;
1964			task->exc_actions[i].privileged = privileged;
1965		}
1966		else
1967			old_port[i] = IP_NULL;
1968	}
1969
1970	itk_unlock(task);
1971
1972	while (--i >= FIRST_EXCEPTION) {
1973		if (IP_VALID(old_port[i]))
1974			ipc_port_release_send(old_port[i]);
1975	}
1976
1977	if (IP_VALID(new_port))		 /* consume send right */
1978		ipc_port_release_send(new_port);
1979
1980	*CountCnt = count;
1981
1982	return (KERN_SUCCESS);
1983}
1984
1985/*
1986 *	Routine:	thread/task_get_exception_ports [kernel call]
1987 *	Purpose:
1988 *		Clones a send right for each of the thread/task's exception
1989 *		ports specified in the mask and returns the behaviour
1990 *		and flavor of said port.
1991 *
1992 *		Returns upto [in} CountCnt elements.
1993 *
1994 *	Conditions:
1995 *		Nothing locked.
1996 *	Returns:
1997 *		KERN_SUCCESS		Extracted a send right.
1998 *		KERN_INVALID_ARGUMENT	The thread is null,
1999 *					Invalid special port,
2000 *					Illegal mask bit set.
2001 *		KERN_FAILURE		The thread is dead.
2002 */
2003
2004kern_return_t
2005thread_get_exception_ports(
2006	thread_t					thread,
2007	exception_mask_t			exception_mask,
2008	exception_mask_array_t		masks,
2009	mach_msg_type_number_t		*CountCnt,
2010	exception_port_array_t		ports,
2011	exception_behavior_array_t	behaviors,
2012	thread_state_flavor_array_t	flavors)
2013{
2014	unsigned int	i, j, count;
2015
2016	if (thread == THREAD_NULL)
2017		return (KERN_INVALID_ARGUMENT);
2018
2019	if (exception_mask & ~EXC_MASK_VALID)
2020		return (KERN_INVALID_ARGUMENT);
2021
2022	thread_mtx_lock(thread);
2023
2024	if (!thread->active) {
2025		thread_mtx_unlock(thread);
2026
2027		return (KERN_FAILURE);
2028	}
2029
2030	count = 0;
2031
2032	if (thread->exc_actions == NULL) {
2033		goto done;
2034	}
2035
2036	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2037		if (exception_mask & (1 << i)) {
2038			for (j = 0; j < count; ++j) {
2039				/*
2040				 * search for an identical entry, if found
2041				 * set corresponding mask for this exception.
2042				 */
2043				if (	thread->exc_actions[i].port == ports[j]			&&
2044						thread->exc_actions[i].behavior ==behaviors[j]	&&
2045						thread->exc_actions[i].flavor == flavors[j]		) {
2046					masks[j] |= (1 << i);
2047					break;
2048				}
2049			}
2050
2051			if (j == count) {
2052				masks[j] = (1 << i);
2053				ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2054				behaviors[j] = thread->exc_actions[i].behavior;
2055				flavors[j] = thread->exc_actions[i].flavor;
2056				++count;
2057				if (count >= *CountCnt)
2058					break;
2059			}
2060		}
2061	}
2062
2063done:
2064	thread_mtx_unlock(thread);
2065
2066	*CountCnt = count;
2067
2068	return (KERN_SUCCESS);
2069}
2070
2071kern_return_t
2072task_get_exception_ports(
2073	task_t						task,
2074	exception_mask_t			exception_mask,
2075	exception_mask_array_t		masks,
2076	mach_msg_type_number_t		*CountCnt,
2077	exception_port_array_t		ports,
2078	exception_behavior_array_t	behaviors,
2079	thread_state_flavor_array_t	flavors)
2080{
2081	unsigned int	i, j, count;
2082
2083	if (task == TASK_NULL)
2084		return (KERN_INVALID_ARGUMENT);
2085
2086	if (exception_mask & ~EXC_MASK_VALID)
2087		return (KERN_INVALID_ARGUMENT);
2088
2089	itk_lock(task);
2090
2091	if (task->itk_self == IP_NULL) {
2092		itk_unlock(task);
2093
2094		return (KERN_FAILURE);
2095	}
2096
2097	count = 0;
2098
2099	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2100		if (exception_mask & (1 << i)) {
2101			for (j = 0; j < count; ++j) {
2102				/*
2103				 * search for an identical entry, if found
2104				 * set corresponding mask for this exception.
2105				 */
2106				if (	task->exc_actions[i].port == ports[j]			&&
2107						task->exc_actions[i].behavior == behaviors[j]	&&
2108						task->exc_actions[i].flavor == flavors[j]		) {
2109					masks[j] |= (1 << i);
2110					break;
2111				}
2112			}
2113
2114			if (j == count) {
2115				masks[j] = (1 << i);
2116				ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2117				behaviors[j] = task->exc_actions[i].behavior;
2118				flavors[j] = task->exc_actions[i].flavor;
2119				++count;
2120				if (count > *CountCnt)
2121					break;
2122			}
2123		}
2124	}
2125
2126	itk_unlock(task);
2127
2128	*CountCnt = count;
2129
2130	return (KERN_SUCCESS);
2131}
2132