kern_thread.c revision 156705
1/*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 *  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 156705 2006-03-14 04:00:21Z davidxu $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/resourcevar.h>
39#include <sys/smp.h>
40#include <sys/sysctl.h>
41#include <sys/sched.h>
42#include <sys/sleepqueue.h>
43#include <sys/turnstile.h>
44#include <sys/ktr.h>
45#include <sys/umtx.h>
46
47#include <security/audit/audit.h>
48
49#include <vm/vm.h>
50#include <vm/vm_extern.h>
51#include <vm/uma.h>
52
53/*
54 * KSEGRP related storage.
55 */
56static uma_zone_t ksegrp_zone;
57static uma_zone_t thread_zone;
58
59/* DEBUG ONLY */
60SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
61static int thread_debug = 0;
62SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
63	&thread_debug, 0, "thread debug");
64
65int max_threads_per_proc = 1500;
66SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
67	&max_threads_per_proc, 0, "Limit on threads per proc");
68
69int max_groups_per_proc = 1500;
70SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
71	&max_groups_per_proc, 0, "Limit on thread groups per proc");
72
73int max_threads_hits;
74SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
75	&max_threads_hits, 0, "");
76
77int virtual_cpu;
78
79TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
80TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
81struct mtx kse_zombie_lock;
82MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
83
84static int
85sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
86{
87	int error, new_val;
88	int def_val;
89
90	def_val = mp_ncpus;
91	if (virtual_cpu == 0)
92		new_val = def_val;
93	else
94		new_val = virtual_cpu;
95	error = sysctl_handle_int(oidp, &new_val, 0, req);
96	if (error != 0 || req->newptr == NULL)
97		return (error);
98	if (new_val < 0)
99		return (EINVAL);
100	virtual_cpu = new_val;
101	return (0);
102}
103
104/* DEBUG ONLY */
105SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
106	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
107	"debug virtual cpus");
108
109struct mtx tid_lock;
110static struct unrhdr *tid_unrhdr;
111
112/*
113 * Prepare a thread for use.
114 */
115static int
116thread_ctor(void *mem, int size, void *arg, int flags)
117{
118	struct thread	*td;
119
120	td = (struct thread *)mem;
121	td->td_state = TDS_INACTIVE;
122	td->td_oncpu = NOCPU;
123
124	td->td_tid = alloc_unr(tid_unrhdr);
125
126	/*
127	 * Note that td_critnest begins life as 1 because the thread is not
128	 * running and is thereby implicitly waiting to be on the receiving
129	 * end of a context switch.  A context switch must occur inside a
130	 * critical section, and in fact, includes hand-off of the sched_lock.
131	 * After a context switch to a newly created thread, it will release
132	 * sched_lock for the first time, and its td_critnest will hit 0 for
133	 * the first time.  This happens on the far end of a context switch,
134	 * and when it context switches away from itself, it will in fact go
135	 * back into a critical section, and hand off the sched lock to the
136	 * next thread.
137	 */
138	td->td_critnest = 1;
139
140#ifdef AUDIT
141	audit_thread_alloc(td);
142#endif
143	return (0);
144}
145
146/*
147 * Reclaim a thread after use.
148 */
149static void
150thread_dtor(void *mem, int size, void *arg)
151{
152	struct thread *td;
153
154	td = (struct thread *)mem;
155
156#ifdef INVARIANTS
157	/* Verify that this thread is in a safe state to free. */
158	switch (td->td_state) {
159	case TDS_INHIBITED:
160	case TDS_RUNNING:
161	case TDS_CAN_RUN:
162	case TDS_RUNQ:
163		/*
164		 * We must never unlink a thread that is in one of
165		 * these states, because it is currently active.
166		 */
167		panic("bad state for thread unlinking");
168		/* NOTREACHED */
169	case TDS_INACTIVE:
170		break;
171	default:
172		panic("bad thread state");
173		/* NOTREACHED */
174	}
175#endif
176#ifdef AUDIT
177	audit_thread_free(td);
178#endif
179	free_unr(tid_unrhdr, td->td_tid);
180	sched_newthread(td);
181}
182
183/*
184 * Initialize type-stable parts of a thread (when newly created).
185 */
186static int
187thread_init(void *mem, int size, int flags)
188{
189	struct thread *td;
190
191	td = (struct thread *)mem;
192
193	vm_thread_new(td, 0);
194	cpu_thread_setup(td);
195	td->td_sleepqueue = sleepq_alloc();
196	td->td_turnstile = turnstile_alloc();
197	td->td_umtxq = umtxq_alloc();
198	td->td_sched = (struct td_sched *)&td[1];
199	sched_newthread(td);
200	return (0);
201}
202
203/*
204 * Tear down type-stable parts of a thread (just before being discarded).
205 */
206static void
207thread_fini(void *mem, int size)
208{
209	struct thread *td;
210
211	td = (struct thread *)mem;
212	turnstile_free(td->td_turnstile);
213	sleepq_free(td->td_sleepqueue);
214	umtxq_free(td->td_umtxq);
215	vm_thread_dispose(td);
216}
217
218/*
219 * Initialize type-stable parts of a ksegrp (when newly created).
220 */
221static int
222ksegrp_ctor(void *mem, int size, void *arg, int flags)
223{
224	struct ksegrp	*kg;
225
226	kg = (struct ksegrp *)mem;
227	bzero(mem, size);
228	kg->kg_sched = (struct kg_sched *)&kg[1];
229	return (0);
230}
231
232void
233ksegrp_link(struct ksegrp *kg, struct proc *p)
234{
235
236	TAILQ_INIT(&kg->kg_threads);
237	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
238	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
239	kg->kg_proc = p;
240	/*
241	 * the following counters are in the -zero- section
242	 * and may not need clearing
243	 */
244	kg->kg_numthreads = 0;
245	kg->kg_numupcalls = 0;
246	/* link it in now that it's consistent */
247	p->p_numksegrps++;
248	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
249}
250
251/*
252 * Called from:
253 *   thread-exit()
254 */
255void
256ksegrp_unlink(struct ksegrp *kg)
257{
258	struct proc *p;
259
260	mtx_assert(&sched_lock, MA_OWNED);
261	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
262	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
263
264	p = kg->kg_proc;
265	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
266	p->p_numksegrps--;
267	/*
268	 * Aggregate stats from the KSE
269	 */
270	if (p->p_procscopegrp == kg)
271		p->p_procscopegrp = NULL;
272}
273
274/*
275 * For a newly created process,
276 * link up all the structures and its initial threads etc.
277 * called from:
278 * {arch}/{arch}/machdep.c   ia64_init(), init386() etc.
279 * proc_dtor() (should go away)
280 * proc_init()
281 */
282void
283proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
284{
285
286	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
287	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
288	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
289	sigqueue_init(&p->p_sigqueue, p);
290	p->p_ksi = ksiginfo_alloc(1);
291	if (p->p_ksi != NULL) {
292		/* XXX p_ksi may be null if ksiginfo zone is not ready */
293		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
294	}
295	LIST_INIT(&p->p_mqnotifier);
296	p->p_numksegrps = 0;
297	p->p_numthreads = 0;
298
299	ksegrp_link(kg, p);
300	thread_link(td, kg);
301}
302
303/*
304 * Initialize global thread allocation resources.
305 */
306void
307threadinit(void)
308{
309
310	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
311	tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
312
313	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
314	    thread_ctor, thread_dtor, thread_init, thread_fini,
315	    UMA_ALIGN_CACHE, 0);
316	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
317	    ksegrp_ctor, NULL, NULL, NULL,
318	    UMA_ALIGN_CACHE, 0);
319	kseinit();	/* set up kse specific stuff  e.g. upcall zone*/
320}
321
322/*
323 * Stash an embarasingly extra thread into the zombie thread queue.
324 */
325void
326thread_stash(struct thread *td)
327{
328	mtx_lock_spin(&kse_zombie_lock);
329	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
330	mtx_unlock_spin(&kse_zombie_lock);
331}
332
333/*
334 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
335 */
336void
337ksegrp_stash(struct ksegrp *kg)
338{
339	mtx_lock_spin(&kse_zombie_lock);
340	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
341	mtx_unlock_spin(&kse_zombie_lock);
342}
343
344/*
345 * Reap zombie kse resource.
346 */
347void
348thread_reap(void)
349{
350	struct thread *td_first, *td_next;
351	struct ksegrp *kg_first, * kg_next;
352
353	/*
354	 * Don't even bother to lock if none at this instant,
355	 * we really don't care about the next instant..
356	 */
357	if ((!TAILQ_EMPTY(&zombie_threads))
358	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
359		mtx_lock_spin(&kse_zombie_lock);
360		td_first = TAILQ_FIRST(&zombie_threads);
361		kg_first = TAILQ_FIRST(&zombie_ksegrps);
362		if (td_first)
363			TAILQ_INIT(&zombie_threads);
364		if (kg_first)
365			TAILQ_INIT(&zombie_ksegrps);
366		mtx_unlock_spin(&kse_zombie_lock);
367		while (td_first) {
368			td_next = TAILQ_NEXT(td_first, td_runq);
369			if (td_first->td_ucred)
370				crfree(td_first->td_ucred);
371			thread_free(td_first);
372			td_first = td_next;
373		}
374		while (kg_first) {
375			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
376			ksegrp_free(kg_first);
377			kg_first = kg_next;
378		}
379		/*
380		 * there will always be a thread on the list if one of these
381		 * is there.
382		 */
383		kse_GC();
384	}
385}
386
387/*
388 * Allocate a ksegrp.
389 */
390struct ksegrp *
391ksegrp_alloc(void)
392{
393	return (uma_zalloc(ksegrp_zone, M_WAITOK));
394}
395
396/*
397 * Allocate a thread.
398 */
399struct thread *
400thread_alloc(void)
401{
402	thread_reap(); /* check if any zombies to get */
403	return (uma_zalloc(thread_zone, M_WAITOK));
404}
405
406/*
407 * Deallocate a ksegrp.
408 */
409void
410ksegrp_free(struct ksegrp *td)
411{
412	uma_zfree(ksegrp_zone, td);
413}
414
415/*
416 * Deallocate a thread.
417 */
418void
419thread_free(struct thread *td)
420{
421
422	cpu_thread_clean(td);
423	uma_zfree(thread_zone, td);
424}
425
426/*
427 * Discard the current thread and exit from its context.
428 * Always called with scheduler locked.
429 *
430 * Because we can't free a thread while we're operating under its context,
431 * push the current thread into our CPU's deadthread holder. This means
432 * we needn't worry about someone else grabbing our context before we
433 * do a cpu_throw().  This may not be needed now as we are under schedlock.
434 * Maybe we can just do a thread_stash() as thr_exit1 does.
435 */
436/*  XXX
437 * libthr expects its thread exit to return for the last
438 * thread, meaning that the program is back to non-threaded
439 * mode I guess. Because we do this (cpu_throw) unconditionally
440 * here, they have their own version of it. (thr_exit1())
441 * that doesn't do it all if this was the last thread.
442 * It is also called from thread_suspend_check().
443 * Of course in the end, they end up coming here through exit1
444 * anyhow..  After fixing 'thr' to play by the rules we should be able
445 * to merge these two functions together.
446 *
447 * called from:
448 * exit1()
449 * kse_exit()
450 * thr_exit()
451 * thread_user_enter()
452 * thread_userret()
453 * thread_suspend_check()
454 */
455void
456thread_exit(void)
457{
458	uint64_t new_switchtime;
459	struct thread *td;
460	struct proc *p;
461	struct ksegrp	*kg;
462
463	td = curthread;
464	kg = td->td_ksegrp;
465	p = td->td_proc;
466
467	mtx_assert(&sched_lock, MA_OWNED);
468	mtx_assert(&Giant, MA_NOTOWNED);
469	PROC_LOCK_ASSERT(p, MA_OWNED);
470	KASSERT(p != NULL, ("thread exiting without a process"));
471	KASSERT(kg != NULL, ("thread exiting without a kse group"));
472	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
473	    (long)p->p_pid, p->p_comm);
474	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
475
476#ifdef AUDIT
477	AUDIT_SYSCALL_EXIT(0, td);
478#endif
479
480	if (td->td_standin != NULL) {
481		/*
482		 * Note that we don't need to free the cred here as it
483		 * is done in thread_reap().
484		 */
485		thread_stash(td->td_standin);
486		td->td_standin = NULL;
487	}
488
489	/*
490	 * drop FPU & debug register state storage, or any other
491	 * architecture specific resources that
492	 * would not be on a new untouched process.
493	 */
494	cpu_thread_exit(td);	/* XXXSMP */
495
496	/*
497	 * The thread is exiting. scheduler can release its stuff
498	 * and collect stats etc.
499	 * XXX this is not very right, since PROC_UNLOCK may still
500	 * need scheduler stuff.
501	 */
502	sched_thread_exit(td);
503
504	/* Do the same timestamp bookkeeping that mi_switch() would do. */
505	new_switchtime = cpu_ticks();
506	p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
507	p->p_rux.rux_uticks += td->td_uticks;
508	p->p_rux.rux_sticks += td->td_sticks;
509	p->p_rux.rux_iticks += td->td_iticks;
510	PCPU_SET(switchtime, new_switchtime);
511	PCPU_SET(switchticks, ticks);
512	cnt.v_swtch++;
513
514	/* Add our usage into the usage of all our children. */
515	if (p->p_numthreads == 1)
516		ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
517
518	/*
519	 * The last thread is left attached to the process
520	 * So that the whole bundle gets recycled. Skip
521	 * all this stuff if we never had threads.
522	 * EXIT clears all sign of other threads when
523	 * it goes to single threading, so the last thread always
524	 * takes the short path.
525	 */
526	if (p->p_flag & P_HADTHREADS) {
527		if (p->p_numthreads > 1) {
528			thread_unlink(td);
529
530			/* XXX first arg not used in 4BSD or ULE */
531			sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
532
533			/*
534			 * The test below is NOT true if we are the
535			 * sole exiting thread. P_STOPPED_SNGL is unset
536			 * in exit1() after it is the only survivor.
537			 */
538			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
539				if (p->p_numthreads == p->p_suspcount) {
540					thread_unsuspend_one(p->p_singlethread);
541				}
542			}
543
544			/*
545			 * Because each upcall structure has an owner thread,
546			 * owner thread exits only when process is in exiting
547			 * state, so upcall to userland is no longer needed,
548			 * deleting upcall structure is safe here.
549			 * So when all threads in a group is exited, all upcalls
550			 * in the group should be automatically freed.
551			 *  XXXKSE This is a KSE thing and should be exported
552			 * there somehow.
553			 */
554			upcall_remove(td);
555
556			/*
557			 * If the thread we unlinked above was the last one,
558			 * then this ksegrp should go away too.
559			 */
560			if (kg->kg_numthreads == 0) {
561				/*
562				 * let the scheduler know about this in case
563				 * it needs to recover stats or resources.
564				 * Theoretically we could let
565				 * sched_exit_ksegrp()  do the equivalent of
566				 * setting the concurrency to 0
567				 * but don't do it yet to avoid changing
568				 * the existing scheduler code until we
569				 * are ready.
570				 * We supply a random other ksegrp
571				 * as the recipient of any built up
572				 * cpu usage etc. (If the scheduler wants it).
573				 * XXXKSE
574				 * This is probably not fair so think of
575 				 * a better answer.
576				 */
577				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
578				sched_set_concurrency(kg, 0); /* XXX TEMP */
579				ksegrp_unlink(kg);
580				ksegrp_stash(kg);
581			}
582			PROC_UNLOCK(p);
583			td->td_ksegrp	= NULL;
584			PCPU_SET(deadthread, td);
585		} else {
586			/*
587			 * The last thread is exiting.. but not through exit()
588			 * what should we do?
589			 * Theoretically this can't happen
590 			 * exit1() - clears threading flags before coming here
591 			 * kse_exit() - treats last thread specially
592 			 * thr_exit() - treats last thread specially
593 			 * thread_user_enter() - only if more exist
594 			 * thread_userret() - only if more exist
595 			 * thread_suspend_check() - only if more exist
596			 */
597			panic ("thread_exit: Last thread exiting on its own");
598		}
599	} else {
600		/*
601		 * non threaded process comes here.
602		 * This includes an EX threaded process that is coming
603		 * here via exit1(). (exit1 dethreads the proc first).
604		 */
605		PROC_UNLOCK(p);
606	}
607	td->td_state = TDS_INACTIVE;
608	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
609	cpu_throw(td, choosethread());
610	panic("I'm a teapot!");
611	/* NOTREACHED */
612}
613
614/*
615 * Do any thread specific cleanups that may be needed in wait()
616 * called with Giant, proc and schedlock not held.
617 */
618void
619thread_wait(struct proc *p)
620{
621	struct thread *td;
622
623	mtx_assert(&Giant, MA_NOTOWNED);
624	KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
625	KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
626	FOREACH_THREAD_IN_PROC(p, td) {
627		if (td->td_standin != NULL) {
628			if (td->td_standin->td_ucred != NULL) {
629				crfree(td->td_standin->td_ucred);
630				td->td_standin->td_ucred = NULL;
631			}
632			thread_free(td->td_standin);
633			td->td_standin = NULL;
634		}
635		cpu_thread_clean(td);
636		crfree(td->td_ucred);
637	}
638	thread_reap();	/* check for zombie threads etc. */
639}
640
641/*
642 * Link a thread to a process.
643 * set up anything that needs to be initialized for it to
644 * be used by the process.
645 *
646 * Note that we do not link to the proc's ucred here.
647 * The thread is linked as if running but no KSE assigned.
648 * Called from:
649 *  proc_linkup()
650 *  thread_schedule_upcall()
651 *  thr_create()
652 */
653void
654thread_link(struct thread *td, struct ksegrp *kg)
655{
656	struct proc *p;
657
658	p = kg->kg_proc;
659	td->td_state    = TDS_INACTIVE;
660	td->td_proc     = p;
661	td->td_ksegrp   = kg;
662	td->td_flags    = 0;
663	td->td_kflags	= 0;
664
665	LIST_INIT(&td->td_contested);
666	sigqueue_init(&td->td_sigqueue, p);
667	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
668	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
669	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
670	p->p_numthreads++;
671	kg->kg_numthreads++;
672}
673
674/*
675 * Convert a process with one thread to an unthreaded process.
676 * Called from:
677 *  thread_single(exit)  (called from execve and exit)
678 *  kse_exit()		XXX may need cleaning up wrt KSE stuff
679 */
680void
681thread_unthread(struct thread *td)
682{
683	struct proc *p = td->td_proc;
684
685	KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
686	upcall_remove(td);
687	p->p_flag &= ~(P_SA|P_HADTHREADS);
688	td->td_mailbox = NULL;
689	td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
690	if (td->td_standin != NULL) {
691		thread_stash(td->td_standin);
692		td->td_standin = NULL;
693	}
694	sched_set_concurrency(td->td_ksegrp, 1);
695}
696
697/*
698 * Called from:
699 *  thread_exit()
700 */
701void
702thread_unlink(struct thread *td)
703{
704	struct proc *p = td->td_proc;
705	struct ksegrp *kg = td->td_ksegrp;
706
707	mtx_assert(&sched_lock, MA_OWNED);
708	TAILQ_REMOVE(&p->p_threads, td, td_plist);
709	p->p_numthreads--;
710	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
711	kg->kg_numthreads--;
712	/* could clear a few other things here */
713	/* Must  NOT clear links to proc and ksegrp! */
714}
715
716/*
717 * Enforce single-threading.
718 *
719 * Returns 1 if the caller must abort (another thread is waiting to
720 * exit the process or similar). Process is locked!
721 * Returns 0 when you are successfully the only thread running.
722 * A process has successfully single threaded in the suspend mode when
723 * There are no threads in user mode. Threads in the kernel must be
724 * allowed to continue until they get to the user boundary. They may even
725 * copy out their return values and data before suspending. They may however be
726 * accellerated in reaching the user boundary as we will wake up
727 * any sleeping threads that are interruptable. (PCATCH).
728 */
729int
730thread_single(int mode)
731{
732	struct thread *td;
733	struct thread *td2;
734	struct proc *p;
735	int remaining;
736
737	td = curthread;
738	p = td->td_proc;
739	mtx_assert(&Giant, MA_NOTOWNED);
740	PROC_LOCK_ASSERT(p, MA_OWNED);
741	KASSERT((td != NULL), ("curthread is NULL"));
742
743	if ((p->p_flag & P_HADTHREADS) == 0)
744		return (0);
745
746	/* Is someone already single threading? */
747	if (p->p_singlethread != NULL && p->p_singlethread != td)
748		return (1);
749
750	if (mode == SINGLE_EXIT) {
751		p->p_flag |= P_SINGLE_EXIT;
752		p->p_flag &= ~P_SINGLE_BOUNDARY;
753	} else {
754		p->p_flag &= ~P_SINGLE_EXIT;
755		if (mode == SINGLE_BOUNDARY)
756			p->p_flag |= P_SINGLE_BOUNDARY;
757		else
758			p->p_flag &= ~P_SINGLE_BOUNDARY;
759	}
760	p->p_flag |= P_STOPPED_SINGLE;
761	mtx_lock_spin(&sched_lock);
762	p->p_singlethread = td;
763	if (mode == SINGLE_EXIT)
764		remaining = p->p_numthreads;
765	else if (mode == SINGLE_BOUNDARY)
766		remaining = p->p_numthreads - p->p_boundary_count;
767	else
768		remaining = p->p_numthreads - p->p_suspcount;
769	while (remaining != 1) {
770		FOREACH_THREAD_IN_PROC(p, td2) {
771			if (td2 == td)
772				continue;
773			td2->td_flags |= TDF_ASTPENDING;
774			if (TD_IS_INHIBITED(td2)) {
775				switch (mode) {
776				case SINGLE_EXIT:
777					if (td->td_flags & TDF_DBSUSPEND)
778						td->td_flags &= ~TDF_DBSUSPEND;
779					if (TD_IS_SUSPENDED(td2))
780						thread_unsuspend_one(td2);
781					if (TD_ON_SLEEPQ(td2) &&
782					    (td2->td_flags & TDF_SINTR))
783						sleepq_abort(td2, EINTR);
784					break;
785				case SINGLE_BOUNDARY:
786					if (TD_IS_SUSPENDED(td2) &&
787					    !(td2->td_flags & TDF_BOUNDARY))
788						thread_unsuspend_one(td2);
789					if (TD_ON_SLEEPQ(td2) &&
790					    (td2->td_flags & TDF_SINTR))
791						sleepq_abort(td2, ERESTART);
792					break;
793				default:
794					if (TD_IS_SUSPENDED(td2))
795						continue;
796					/*
797					 * maybe other inhibitted states too?
798					 */
799					if ((td2->td_flags & TDF_SINTR) &&
800					    (td2->td_inhibitors &
801					    (TDI_SLEEPING | TDI_SWAPPED)))
802						thread_suspend_one(td2);
803					break;
804				}
805			}
806#ifdef SMP
807			else if (TD_IS_RUNNING(td2) && td != td2) {
808				forward_signal(td2);
809			}
810#endif
811		}
812		if (mode == SINGLE_EXIT)
813			remaining = p->p_numthreads;
814		else if (mode == SINGLE_BOUNDARY)
815			remaining = p->p_numthreads - p->p_boundary_count;
816		else
817			remaining = p->p_numthreads - p->p_suspcount;
818
819		/*
820		 * Maybe we suspended some threads.. was it enough?
821		 */
822		if (remaining == 1)
823			break;
824
825		/*
826		 * Wake us up when everyone else has suspended.
827		 * In the mean time we suspend as well.
828		 */
829		thread_suspend_one(td);
830		PROC_UNLOCK(p);
831		mi_switch(SW_VOL, NULL);
832		mtx_unlock_spin(&sched_lock);
833		PROC_LOCK(p);
834		mtx_lock_spin(&sched_lock);
835		if (mode == SINGLE_EXIT)
836			remaining = p->p_numthreads;
837		else if (mode == SINGLE_BOUNDARY)
838			remaining = p->p_numthreads - p->p_boundary_count;
839		else
840			remaining = p->p_numthreads - p->p_suspcount;
841	}
842	if (mode == SINGLE_EXIT) {
843		/*
844		 * We have gotten rid of all the other threads and we
845		 * are about to either exit or exec. In either case,
846		 * we try our utmost  to revert to being a non-threaded
847		 * process.
848		 */
849		p->p_singlethread = NULL;
850		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
851		thread_unthread(td);
852	}
853	mtx_unlock_spin(&sched_lock);
854	return (0);
855}
856
857/*
858 * Called in from locations that can safely check to see
859 * whether we have to suspend or at least throttle for a
860 * single-thread event (e.g. fork).
861 *
862 * Such locations include userret().
863 * If the "return_instead" argument is non zero, the thread must be able to
864 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
865 *
866 * The 'return_instead' argument tells the function if it may do a
867 * thread_exit() or suspend, or whether the caller must abort and back
868 * out instead.
869 *
870 * If the thread that set the single_threading request has set the
871 * P_SINGLE_EXIT bit in the process flags then this call will never return
872 * if 'return_instead' is false, but will exit.
873 *
874 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
875 *---------------+--------------------+---------------------
876 *       0       | returns 0          |   returns 0 or 1
877 *               | when ST ends       |   immediatly
878 *---------------+--------------------+---------------------
879 *       1       | thread exits       |   returns 1
880 *               |                    |  immediatly
881 * 0 = thread_exit() or suspension ok,
882 * other = return error instead of stopping the thread.
883 *
884 * While a full suspension is under effect, even a single threading
885 * thread would be suspended if it made this call (but it shouldn't).
886 * This call should only be made from places where
887 * thread_exit() would be safe as that may be the outcome unless
888 * return_instead is set.
889 */
890int
891thread_suspend_check(int return_instead)
892{
893	struct thread *td;
894	struct proc *p;
895
896	td = curthread;
897	p = td->td_proc;
898	mtx_assert(&Giant, MA_NOTOWNED);
899	PROC_LOCK_ASSERT(p, MA_OWNED);
900	while (P_SHOULDSTOP(p) ||
901	      ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
902		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
903			KASSERT(p->p_singlethread != NULL,
904			    ("singlethread not set"));
905			/*
906			 * The only suspension in action is a
907			 * single-threading. Single threader need not stop.
908			 * XXX Should be safe to access unlocked
909			 * as it can only be set to be true by us.
910			 */
911			if (p->p_singlethread == td)
912				return (0);	/* Exempt from stopping. */
913		}
914		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
915			return (EINTR);
916
917		/* Should we goto user boundary if we didn't come from there? */
918		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
919		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
920			return (ERESTART);
921
922		/* If thread will exit, flush its pending signals */
923		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
924			sigqueue_flush(&td->td_sigqueue);
925
926		mtx_lock_spin(&sched_lock);
927		thread_stopped(p);
928		/*
929		 * If the process is waiting for us to exit,
930		 * this thread should just suicide.
931		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
932		 */
933		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
934			thread_exit();
935
936		/*
937		 * When a thread suspends, it just
938		 * moves to the processes's suspend queue
939		 * and stays there.
940		 */
941		thread_suspend_one(td);
942		if (return_instead == 0) {
943			p->p_boundary_count++;
944			td->td_flags |= TDF_BOUNDARY;
945		}
946		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
947			if (p->p_numthreads == p->p_suspcount)
948				thread_unsuspend_one(p->p_singlethread);
949		}
950		PROC_UNLOCK(p);
951		mi_switch(SW_INVOL, NULL);
952		if (return_instead == 0) {
953			p->p_boundary_count--;
954			td->td_flags &= ~TDF_BOUNDARY;
955		}
956		mtx_unlock_spin(&sched_lock);
957		PROC_LOCK(p);
958	}
959	return (0);
960}
961
962void
963thread_suspend_one(struct thread *td)
964{
965	struct proc *p = td->td_proc;
966
967	mtx_assert(&sched_lock, MA_OWNED);
968	PROC_LOCK_ASSERT(p, MA_OWNED);
969	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
970	p->p_suspcount++;
971	TD_SET_SUSPENDED(td);
972	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
973}
974
975void
976thread_unsuspend_one(struct thread *td)
977{
978	struct proc *p = td->td_proc;
979
980	mtx_assert(&sched_lock, MA_OWNED);
981	PROC_LOCK_ASSERT(p, MA_OWNED);
982	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
983	TD_CLR_SUSPENDED(td);
984	p->p_suspcount--;
985	setrunnable(td);
986}
987
988/*
989 * Allow all threads blocked by single threading to continue running.
990 */
991void
992thread_unsuspend(struct proc *p)
993{
994	struct thread *td;
995
996	mtx_assert(&sched_lock, MA_OWNED);
997	PROC_LOCK_ASSERT(p, MA_OWNED);
998	if (!P_SHOULDSTOP(p)) {
999		while ((td = TAILQ_FIRST(&p->p_suspended))) {
1000			thread_unsuspend_one(td);
1001		}
1002	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1003	    (p->p_numthreads == p->p_suspcount)) {
1004		/*
1005		 * Stopping everything also did the job for the single
1006		 * threading request. Now we've downgraded to single-threaded,
1007		 * let it continue.
1008		 */
1009		thread_unsuspend_one(p->p_singlethread);
1010	}
1011}
1012
1013/*
1014 * End the single threading mode..
1015 */
1016void
1017thread_single_end(void)
1018{
1019	struct thread *td;
1020	struct proc *p;
1021
1022	td = curthread;
1023	p = td->td_proc;
1024	PROC_LOCK_ASSERT(p, MA_OWNED);
1025	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
1026	mtx_lock_spin(&sched_lock);
1027	p->p_singlethread = NULL;
1028	p->p_procscopegrp = NULL;
1029	/*
1030	 * If there are other threads they mey now run,
1031	 * unless of course there is a blanket 'stop order'
1032	 * on the process. The single threader must be allowed
1033	 * to continue however as this is a bad place to stop.
1034	 */
1035	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1036		while ((td = TAILQ_FIRST(&p->p_suspended))) {
1037			thread_unsuspend_one(td);
1038		}
1039	}
1040	mtx_unlock_spin(&sched_lock);
1041}
1042
1043struct thread *
1044thread_find(struct proc *p, lwpid_t tid)
1045{
1046	struct thread *td;
1047
1048	PROC_LOCK_ASSERT(p, MA_OWNED);
1049	mtx_lock_spin(&sched_lock);
1050	FOREACH_THREAD_IN_PROC(p, td) {
1051		if (td->td_tid == tid)
1052			break;
1053	}
1054	mtx_unlock_spin(&sched_lock);
1055	return (td);
1056}
1057