1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS).  All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 *	Author:	Bryan Ford, University of Utah CSS
49 *
50 *	Thread management routines
51 */
52#include <mach/mach_types.h>
53#include <mach/kern_return.h>
54#include <mach/alert.h>
55#include <mach/rpc.h>
56#include <mach/thread_act_server.h>
57
58#include <kern/kern_types.h>
59#include <kern/ast.h>
60#include <kern/mach_param.h>
61#include <kern/zalloc.h>
62#include <kern/thread.h>
63#include <kern/task.h>
64#include <kern/sched_prim.h>
65#include <kern/misc_protos.h>
66#include <kern/assert.h>
67#include <kern/exception.h>
68#include <kern/ipc_mig.h>
69#include <kern/ipc_tt.h>
70#include <kern/machine.h>
71#include <kern/spl.h>
72#include <kern/syscall_subr.h>
73#include <kern/sync_lock.h>
74#include <kern/processor.h>
75#include <kern/timer.h>
76#include <kern/affinity.h>
77
78#include <mach/rpc.h>
79
80#include <mach/sdt.h>
81
82void			act_abort(thread_t);
83void			install_special_handler_locked(thread_t);
84void			special_handler_continue(void);
85
86/*
87 * Internal routine to mark a thread as started.
88 * Always called with the thread locked.
89 *
90 * Note: function intentionall declared with the noinline attribute to
91 * prevent multiple declaration of probe symbols in this file; we would
92 * prefer "#pragma noinline", but gcc does not support it.
93 */
94void
95thread_start_internal(
96	thread_t			thread)
97{
98	clear_wait(thread, THREAD_AWAKENED);
99	thread->started = TRUE;
100	DTRACE_PROC1(lwp__start, thread_t, thread);
101}
102
103/*
104 * Internal routine to terminate a thread.
105 * Sometimes called with task already locked.
106 */
107kern_return_t
108thread_terminate_internal(
109	thread_t			thread)
110{
111	kern_return_t		result = KERN_SUCCESS;
112
113	DTRACE_PROC(lwp__exit);
114
115	thread_mtx_lock(thread);
116
117	if (thread->active) {
118		thread->active = FALSE;
119
120		act_abort(thread);
121
122		if (thread->started)
123			clear_wait(thread, THREAD_INTERRUPTED);
124		else {
125			thread_start_internal(thread);
126		}
127	}
128	else
129		result = KERN_TERMINATED;
130
131	if (thread->affinity_set != NULL)
132		thread_affinity_terminate(thread);
133
134	thread_mtx_unlock(thread);
135
136	if (thread != current_thread() && result == KERN_SUCCESS)
137		thread_wait(thread);
138
139	return (result);
140}
141
142/*
143 * Terminate a thread.
144 */
145kern_return_t
146thread_terminate(
147	thread_t		thread)
148{
149	kern_return_t	result;
150
151	if (thread == THREAD_NULL)
152		return (KERN_INVALID_ARGUMENT);
153
154	if (	thread->task == kernel_task		&&
155			thread != current_thread()			)
156		return (KERN_FAILURE);
157
158	result = thread_terminate_internal(thread);
159
160	/*
161	 * If a kernel thread is terminating itself, force an AST here.
162	 * Kernel threads don't normally pass through the AST checking
163	 * code - and all threads finish their own termination in the
164	 * special handler APC.
165	 */
166	if (thread->task == kernel_task) {
167		ml_set_interrupts_enabled(FALSE);
168		ast_taken(AST_APC, TRUE);
169		panic("thread_terminate");
170	}
171
172	return (result);
173}
174
175/*
176 * Suspend execution of the specified thread.
177 * This is a recursive-style suspension of the thread, a count of
178 * suspends is maintained.
179 *
180 * Called with thread mutex held.
181 */
182void
183thread_hold(
184	register thread_t	thread)
185{
186	if (thread->suspend_count++ == 0) {
187		install_special_handler(thread);
188		if (thread->started)
189			thread_wakeup_one(&thread->suspend_count);
190	}
191}
192
193/*
194 * Decrement internal suspension count, setting thread
195 * runnable when count falls to zero.
196 *
197 * Called with thread mutex held.
198 */
199void
200thread_release(
201	register thread_t	thread)
202{
203	if (	thread->suspend_count > 0		&&
204			--thread->suspend_count == 0	) {
205		if (thread->started)
206			thread_wakeup_one(&thread->suspend_count);
207		else {
208			thread_start_internal(thread);
209		}
210	}
211}
212
213kern_return_t
214thread_suspend(
215	register thread_t	thread)
216{
217	thread_t			self = current_thread();
218	kern_return_t		result = KERN_SUCCESS;
219
220	if (thread == THREAD_NULL || thread->task == kernel_task)
221		return (KERN_INVALID_ARGUMENT);
222
223	thread_mtx_lock(thread);
224
225	if (thread->active) {
226		if (	thread->user_stop_count++ == 0		&&
227				thread->suspend_count++ == 0		) {
228			install_special_handler(thread);
229			if (thread != self)
230				thread_wakeup_one(&thread->suspend_count);
231		}
232	}
233	else
234		result = KERN_TERMINATED;
235
236	thread_mtx_unlock(thread);
237
238	if (thread != self && result == KERN_SUCCESS)
239		thread_wait(thread);
240
241	return (result);
242}
243
244kern_return_t
245thread_resume(
246	register thread_t	thread)
247{
248	kern_return_t		result = KERN_SUCCESS;
249
250	if (thread == THREAD_NULL || thread->task == kernel_task)
251		return (KERN_INVALID_ARGUMENT);
252
253	thread_mtx_lock(thread);
254
255	if (thread->active) {
256		if (thread->user_stop_count > 0) {
257			if (	--thread->user_stop_count == 0		&&
258					--thread->suspend_count == 0		) {
259				if (thread->started)
260					thread_wakeup_one(&thread->suspend_count);
261				else {
262					thread_start_internal(thread);
263				}
264			}
265		}
266		else
267			result = KERN_FAILURE;
268	}
269	else
270		result = KERN_TERMINATED;
271
272	thread_mtx_unlock(thread);
273
274	return (result);
275}
276
277/*
278 *	thread_depress_abort:
279 *
280 *	Prematurely abort priority depression if there is one.
281 */
282kern_return_t
283thread_depress_abort(
284	register thread_t	thread)
285{
286	kern_return_t		result;
287
288    if (thread == THREAD_NULL)
289		return (KERN_INVALID_ARGUMENT);
290
291    thread_mtx_lock(thread);
292
293	if (thread->active)
294		result = thread_depress_abort_internal(thread);
295	else
296		result = KERN_TERMINATED;
297
298    thread_mtx_unlock(thread);
299
300	return (result);
301}
302
303
304/*
305 * Indicate that the activation should run its
306 * special handler to detect a condition.
307 *
308 * Called with thread mutex held.
309 */
310void
311act_abort(
312	thread_t	thread)
313{
314	spl_t		s = splsched();
315
316	thread_lock(thread);
317
318	if (!(thread->sched_mode & TH_MODE_ABORT)) {
319		thread->sched_mode |= TH_MODE_ABORT;
320		install_special_handler_locked(thread);
321	}
322	else
323		thread->sched_mode &= ~TH_MODE_ABORTSAFELY;
324
325	thread_unlock(thread);
326	splx(s);
327}
328
329kern_return_t
330thread_abort(
331	register thread_t	thread)
332{
333	kern_return_t	result = KERN_SUCCESS;
334
335	if (thread == THREAD_NULL)
336		return (KERN_INVALID_ARGUMENT);
337
338	thread_mtx_lock(thread);
339
340	if (thread->active) {
341		act_abort(thread);
342		clear_wait(thread, THREAD_INTERRUPTED);
343	}
344	else
345		result = KERN_TERMINATED;
346
347	thread_mtx_unlock(thread);
348
349	return (result);
350}
351
352kern_return_t
353thread_abort_safely(
354	thread_t		thread)
355{
356	kern_return_t	result = KERN_SUCCESS;
357
358	if (thread == THREAD_NULL)
359		return (KERN_INVALID_ARGUMENT);
360
361	thread_mtx_lock(thread);
362
363	if (thread->active) {
364		spl_t		s = splsched();
365
366		thread_lock(thread);
367		if (!thread->at_safe_point ||
368				clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
369			if (!(thread->sched_mode & TH_MODE_ABORT)) {
370				thread->sched_mode |= TH_MODE_ISABORTED;
371				install_special_handler_locked(thread);
372			}
373		}
374		thread_unlock(thread);
375		splx(s);
376	}
377	else
378		result = KERN_TERMINATED;
379
380	thread_mtx_unlock(thread);
381
382	return (result);
383}
384
385/*** backward compatibility hacks ***/
386#include <mach/thread_info.h>
387#include <mach/thread_special_ports.h>
388#include <ipc/ipc_port.h>
389
390kern_return_t
391thread_info(
392	thread_t				thread,
393	thread_flavor_t			flavor,
394	thread_info_t			thread_info_out,
395	mach_msg_type_number_t	*thread_info_count)
396{
397	kern_return_t			result;
398
399	if (thread == THREAD_NULL)
400		return (KERN_INVALID_ARGUMENT);
401
402	thread_mtx_lock(thread);
403
404	if (thread->active)
405		result = thread_info_internal(
406						thread, flavor, thread_info_out, thread_info_count);
407	else
408		result = KERN_TERMINATED;
409
410	thread_mtx_unlock(thread);
411
412	return (result);
413}
414
415kern_return_t
416thread_get_state(
417	register thread_t		thread,
418	int						flavor,
419	thread_state_t			state,			/* pointer to OUT array */
420	mach_msg_type_number_t	*state_count)	/*IN/OUT*/
421{
422	kern_return_t		result = KERN_SUCCESS;
423
424	if (thread == THREAD_NULL)
425		return (KERN_INVALID_ARGUMENT);
426
427	thread_mtx_lock(thread);
428
429	if (thread->active) {
430		if (thread != current_thread()) {
431			thread_hold(thread);
432
433			thread_mtx_unlock(thread);
434
435			if (thread_stop(thread)) {
436				thread_mtx_lock(thread);
437				result = machine_thread_get_state(
438										thread, flavor, state, state_count);
439				thread_unstop(thread);
440			}
441			else {
442				thread_mtx_lock(thread);
443				result = KERN_ABORTED;
444			}
445
446			thread_release(thread);
447		}
448		else
449			result = machine_thread_get_state(
450									thread, flavor, state, state_count);
451	}
452	else
453		result = KERN_TERMINATED;
454
455	thread_mtx_unlock(thread);
456
457	return (result);
458}
459
460/*
461 *	Change thread's machine-dependent state.  Called with nothing
462 *	locked.  Returns same way.
463 */
464kern_return_t
465thread_set_state(
466	register thread_t		thread,
467	int						flavor,
468	thread_state_t			state,
469	mach_msg_type_number_t	state_count)
470{
471	kern_return_t		result = KERN_SUCCESS;
472
473	if (thread == THREAD_NULL)
474		return (KERN_INVALID_ARGUMENT);
475
476	thread_mtx_lock(thread);
477
478	if (thread->active) {
479		if (thread != current_thread()) {
480			thread_hold(thread);
481
482			thread_mtx_unlock(thread);
483
484			if (thread_stop(thread)) {
485				thread_mtx_lock(thread);
486				result = machine_thread_set_state(
487										thread, flavor, state, state_count);
488				thread_unstop(thread);
489			}
490			else {
491				thread_mtx_lock(thread);
492				result = KERN_ABORTED;
493			}
494
495			thread_release(thread);
496		}
497		else
498			result = machine_thread_set_state(
499									thread, flavor, state, state_count);
500	}
501	else
502		result = KERN_TERMINATED;
503
504	thread_mtx_unlock(thread);
505
506	return (result);
507}
508
509
510/*
511 * Kernel-internal "thread" interfaces used outside this file:
512 */
513
514/* Initialize (or re-initialize) a thread state.  Called from execve
515 * with nothing locked, returns same way.
516 */
517kern_return_t
518thread_state_initialize(
519	register thread_t		thread)
520{
521	kern_return_t		result = KERN_SUCCESS;
522
523	if (thread == THREAD_NULL)
524		return (KERN_INVALID_ARGUMENT);
525
526	thread_mtx_lock(thread);
527
528	if (thread->active) {
529		if (thread != current_thread()) {
530			thread_hold(thread);
531
532			thread_mtx_unlock(thread);
533
534			if (thread_stop(thread)) {
535				thread_mtx_lock(thread);
536				result = machine_thread_state_initialize( thread );
537				thread_unstop(thread);
538			}
539			else {
540				thread_mtx_lock(thread);
541				result = KERN_ABORTED;
542			}
543
544			thread_release(thread);
545		}
546		else
547            result = machine_thread_state_initialize( thread );
548	}
549	else
550		result = KERN_TERMINATED;
551
552	thread_mtx_unlock(thread);
553
554	return (result);
555}
556
557
558kern_return_t
559thread_dup(
560	register thread_t	target)
561{
562	thread_t			self = current_thread();
563	kern_return_t		result = KERN_SUCCESS;
564
565	if (target == THREAD_NULL || target == self)
566		return (KERN_INVALID_ARGUMENT);
567
568	thread_mtx_lock(target);
569
570	if (target->active) {
571		thread_hold(target);
572
573		thread_mtx_unlock(target);
574
575		if (thread_stop(target)) {
576			thread_mtx_lock(target);
577			result = machine_thread_dup(self, target);
578			if (self->affinity_set != AFFINITY_SET_NULL)
579				thread_affinity_dup(self, target);
580			thread_unstop(target);
581		}
582		else {
583			thread_mtx_lock(target);
584			result = KERN_ABORTED;
585		}
586
587		thread_release(target);
588	}
589	else
590		result = KERN_TERMINATED;
591
592	thread_mtx_unlock(target);
593
594	return (result);
595}
596
597
598/*
599 *	thread_setstatus:
600 *
601 *	Set the status of the specified thread.
602 *	Called with (and returns with) no locks held.
603 */
604kern_return_t
605thread_setstatus(
606	register thread_t		thread,
607	int						flavor,
608	thread_state_t			tstate,
609	mach_msg_type_number_t	count)
610{
611
612	return (thread_set_state(thread, flavor, tstate, count));
613}
614
615/*
616 *	thread_getstatus:
617 *
618 *	Get the status of the specified thread.
619 */
620kern_return_t
621thread_getstatus(
622	register thread_t		thread,
623	int						flavor,
624	thread_state_t			tstate,
625	mach_msg_type_number_t	*count)
626{
627	return (thread_get_state(thread, flavor, tstate, count));
628}
629
630/*
631 * install_special_handler:
632 *
633 *	Install the special returnhandler that handles suspension and
634 *	termination, if it hasn't been installed already.
635 *
636 *	Called with the thread mutex held.
637 */
638void
639install_special_handler(
640	thread_t		thread)
641{
642	spl_t		s = splsched();
643
644	thread_lock(thread);
645	install_special_handler_locked(thread);
646	thread_unlock(thread);
647	splx(s);
648}
649
650/*
651 * install_special_handler_locked:
652 *
653 *	Do the work of installing the special_handler.
654 *
655 *	Called with the thread mutex and scheduling lock held.
656 */
657void
658install_special_handler_locked(
659	thread_t				thread)
660{
661	ReturnHandler	**rh;
662
663	/* The work handler must always be the last ReturnHandler on the list,
664	   because it can do tricky things like detach the thr_act.  */
665	for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
666		continue;
667
668	if (rh != &thread->special_handler.next)
669		*rh = &thread->special_handler;
670
671	/*
672	 * Temporarily undepress, so target has
673	 * a chance to do locking required to
674	 * block itself in special_handler().
675	 */
676	if (thread->sched_mode & TH_MODE_ISDEPRESSED)
677		compute_priority(thread, TRUE);
678
679	thread_ast_set(thread, AST_APC);
680
681	if (thread == current_thread())
682		ast_propagate(thread->ast);
683	else {
684		processor_t		processor = thread->last_processor;
685
686		if (	processor != PROCESSOR_NULL					&&
687				processor->state == PROCESSOR_RUNNING		&&
688				processor->active_thread == thread			)
689			cause_ast_check(processor);
690	}
691}
692
693/*
694 * Activation control support routines internal to this file:
695 */
696
697void
698act_execute_returnhandlers(void)
699{
700	thread_t	thread = current_thread();
701
702	thread_ast_clear(thread, AST_APC);
703	spllo();
704
705	for (;;) {
706		ReturnHandler	*rh;
707
708		thread_mtx_lock(thread);
709
710		(void)splsched();
711		thread_lock(thread);
712
713		rh = thread->handlers;
714		if (rh != NULL) {
715			thread->handlers = rh->next;
716
717			thread_unlock(thread);
718			spllo();
719
720			thread_mtx_unlock(thread);
721
722			/* Execute it */
723			(*rh->handler)(rh, thread);
724		}
725		else
726			break;
727	}
728
729	thread_unlock(thread);
730	spllo();
731
732	thread_mtx_unlock(thread);
733}
734
735/*
736 * special_handler_continue
737 *
738 * Continuation routine for the special handler blocks.  It checks
739 * to see whether there has been any new suspensions.  If so, it
740 * installs the special handler again.  Otherwise, it checks to see
741 * if the current depression needs to be re-instated (it may have
742 * been temporarily removed in order to get to this point in a hurry).
743 */
744void
745special_handler_continue(void)
746{
747	thread_t		thread = current_thread();
748
749	thread_mtx_lock(thread);
750
751	if (thread->suspend_count > 0)
752		install_special_handler(thread);
753	else {
754		spl_t			s = splsched();
755
756		thread_lock(thread);
757		if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
758			processor_t		myprocessor = thread->last_processor;
759
760			thread->sched_pri = DEPRESSPRI;
761			myprocessor->current_pri = thread->sched_pri;
762		}
763		thread_unlock(thread);
764		splx(s);
765	}
766
767	thread_mtx_unlock(thread);
768
769	thread_exception_return();
770	/*NOTREACHED*/
771}
772
773/*
774 * special_handler	- handles suspension, termination.  Called
775 * with nothing locked.  Returns (if it returns) the same way.
776 */
777void
778special_handler(
779	__unused ReturnHandler	*rh,
780	thread_t				thread)
781{
782	spl_t		s;
783
784	thread_mtx_lock(thread);
785
786	s = splsched();
787	thread_lock(thread);
788	thread->sched_mode &= ~TH_MODE_ISABORTED;
789	thread_unlock(thread);
790	splx(s);
791
792	/*
793	 * If we're suspended, go to sleep and wait for someone to wake us up.
794	 */
795	if (thread->active) {
796		if (thread->suspend_count > 0) {
797			if (thread->handlers == NULL) {
798				assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
799				thread_mtx_unlock(thread);
800				thread_block((thread_continue_t)special_handler_continue);
801				/*NOTREACHED*/
802			}
803
804			thread_mtx_unlock(thread);
805
806			special_handler_continue();
807			/*NOTREACHED*/
808		}
809	}
810	else {
811		thread_mtx_unlock(thread);
812
813		thread_terminate_self();
814		/*NOTREACHED*/
815	}
816
817	thread_mtx_unlock(thread);
818}
819
820kern_return_t
821act_set_state(
822	thread_t				thread,
823	int						flavor,
824	thread_state_t			state,
825	mach_msg_type_number_t	count)
826{
827    if (thread == current_thread())
828	    return (KERN_INVALID_ARGUMENT);
829
830    return (thread_set_state(thread, flavor, state, count));
831
832}
833
834kern_return_t
835act_get_state(
836	thread_t				thread,
837	int						flavor,
838	thread_state_t			state,
839	mach_msg_type_number_t	*count)
840{
841    if (thread == current_thread())
842	    return (KERN_INVALID_ARGUMENT);
843
844    return (thread_get_state(thread, flavor, state, count));
845}
846
847void
848act_set_astbsd(
849	thread_t	thread)
850{
851	spl_t		s = splsched();
852
853	if (thread == current_thread()) {
854		thread_ast_set(thread, AST_BSD);
855		ast_propagate(thread->ast);
856	}
857	else {
858		processor_t		processor;
859
860		thread_lock(thread);
861		thread_ast_set(thread, AST_BSD);
862		processor = thread->last_processor;
863		if (	processor != PROCESSOR_NULL					&&
864				processor->state == PROCESSOR_RUNNING		&&
865				processor->active_thread == thread			)
866			cause_ast_check(processor);
867		thread_unlock(thread);
868	}
869
870	splx(s);
871}
872
873void
874act_set_apc(
875	thread_t	thread)
876{
877	spl_t		s = splsched();
878
879	if (thread == current_thread()) {
880		thread_ast_set(thread, AST_APC);
881		ast_propagate(thread->ast);
882	}
883	else {
884		processor_t		processor;
885
886		thread_lock(thread);
887		thread_ast_set(thread, AST_APC);
888		processor = thread->last_processor;
889		if (	processor != PROCESSOR_NULL					&&
890				processor->state == PROCESSOR_RUNNING		&&
891				processor->active_thread == thread			)
892			cause_ast_check(processor);
893		thread_unlock(thread);
894	}
895
896	splx(s);
897}
898