kern_thread.c revision 122514
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 *  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 122514 2003-11-11 22:07:29Z jhb $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/mutex.h>
38#include <sys/proc.h>
39#include <sys/smp.h>
40#include <sys/sysctl.h>
41#include <sys/sysproto.h>
42#include <sys/filedesc.h>
43#include <sys/sched.h>
44#include <sys/signalvar.h>
45#include <sys/sx.h>
46#include <sys/tty.h>
47#include <sys/turnstile.h>
48#include <sys/user.h>
49#include <sys/jail.h>
50#include <sys/kse.h>
51#include <sys/ktr.h>
52#include <sys/ucontext.h>
53
54#include <vm/vm.h>
55#include <vm/vm_extern.h>
56#include <vm/vm_object.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59#include <vm/vm_map.h>
60
61#include <machine/frame.h>
62
63/*
64 * KSEGRP related storage.
65 */
66static uma_zone_t ksegrp_zone;
67static uma_zone_t kse_zone;
68static uma_zone_t thread_zone;
69static uma_zone_t upcall_zone;
70
71/* DEBUG ONLY */
72SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
73static int thread_debug = 0;
74SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
75	&thread_debug, 0, "thread debug");
76
77static int max_threads_per_proc = 150;
78SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
79	&max_threads_per_proc, 0, "Limit on threads per proc");
80
81static int max_groups_per_proc = 50;
82SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
83	&max_groups_per_proc, 0, "Limit on thread groups per proc");
84
85static int max_threads_hits;
86SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
87	&max_threads_hits, 0, "");
88
89static int virtual_cpu;
90
91#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
92
93TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
94TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
95TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
96TAILQ_HEAD(, kse_upcall) zombie_upcalls =
97	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
98struct mtx kse_zombie_lock;
99MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
100
101static void kse_purge(struct proc *p, struct thread *td);
102static void kse_purge_group(struct thread *td);
103static int thread_update_usr_ticks(struct thread *td, int user);
104static void thread_alloc_spare(struct thread *td, struct thread *spare);
105
106static int
107sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
108{
109	int error, new_val;
110	int def_val;
111
112#ifdef SMP
113	def_val = mp_ncpus;
114#else
115	def_val = 1;
116#endif
117	if (virtual_cpu == 0)
118		new_val = def_val;
119	else
120		new_val = virtual_cpu;
121	error = sysctl_handle_int(oidp, &new_val, 0, req);
122        if (error != 0 || req->newptr == NULL)
123		return (error);
124	if (new_val < 0)
125		return (EINVAL);
126	virtual_cpu = new_val;
127	return (0);
128}
129
130/* DEBUG ONLY */
131SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
132	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
133	"debug virtual cpus");
134
135/*
136 * Prepare a thread for use.
137 */
138static void
139thread_ctor(void *mem, int size, void *arg)
140{
141	struct thread	*td;
142
143	td = (struct thread *)mem;
144	td->td_state = TDS_INACTIVE;
145	td->td_oncpu	= NOCPU;
146	td->td_critnest = 1;
147}
148
149/*
150 * Reclaim a thread after use.
151 */
152static void
153thread_dtor(void *mem, int size, void *arg)
154{
155	struct thread	*td;
156
157	td = (struct thread *)mem;
158
159#ifdef INVARIANTS
160	/* Verify that this thread is in a safe state to free. */
161	switch (td->td_state) {
162	case TDS_INHIBITED:
163	case TDS_RUNNING:
164	case TDS_CAN_RUN:
165	case TDS_RUNQ:
166		/*
167		 * We must never unlink a thread that is in one of
168		 * these states, because it is currently active.
169		 */
170		panic("bad state for thread unlinking");
171		/* NOTREACHED */
172	case TDS_INACTIVE:
173		break;
174	default:
175		panic("bad thread state");
176		/* NOTREACHED */
177	}
178#endif
179}
180
181/*
182 * Initialize type-stable parts of a thread (when newly created).
183 */
184static void
185thread_init(void *mem, int size)
186{
187	struct thread	*td;
188
189	td = (struct thread *)mem;
190	mtx_lock(&Giant);
191	vm_thread_new(td, 0);
192	mtx_unlock(&Giant);
193	cpu_thread_setup(td);
194	td->td_turnstile = turnstile_alloc();
195	td->td_sched = (struct td_sched *)&td[1];
196}
197
198/*
199 * Tear down type-stable parts of a thread (just before being discarded).
200 */
201static void
202thread_fini(void *mem, int size)
203{
204	struct thread	*td;
205
206	td = (struct thread *)mem;
207	turnstile_free(td->td_turnstile);
208	vm_thread_dispose(td);
209}
210
211/*
212 * Initialize type-stable parts of a kse (when newly created).
213 */
214static void
215kse_init(void *mem, int size)
216{
217	struct kse	*ke;
218
219	ke = (struct kse *)mem;
220	ke->ke_sched = (struct ke_sched *)&ke[1];
221}
222
223/*
224 * Initialize type-stable parts of a ksegrp (when newly created).
225 */
226static void
227ksegrp_init(void *mem, int size)
228{
229	struct ksegrp	*kg;
230
231	kg = (struct ksegrp *)mem;
232	kg->kg_sched = (struct kg_sched *)&kg[1];
233}
234
235/*
236 * KSE is linked into kse group.
237 */
238void
239kse_link(struct kse *ke, struct ksegrp *kg)
240{
241	struct proc *p = kg->kg_proc;
242
243	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
244	kg->kg_kses++;
245	ke->ke_state	= KES_UNQUEUED;
246	ke->ke_proc	= p;
247	ke->ke_ksegrp	= kg;
248	ke->ke_thread	= NULL;
249	ke->ke_oncpu	= NOCPU;
250	ke->ke_flags	= 0;
251}
252
253void
254kse_unlink(struct kse *ke)
255{
256	struct ksegrp *kg;
257
258	mtx_assert(&sched_lock, MA_OWNED);
259	kg = ke->ke_ksegrp;
260	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
261	if (ke->ke_state == KES_IDLE) {
262		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
263		kg->kg_idle_kses--;
264	}
265	--kg->kg_kses;
266	/*
267	 * Aggregate stats from the KSE
268	 */
269	kse_stash(ke);
270}
271
272void
273ksegrp_link(struct ksegrp *kg, struct proc *p)
274{
275
276	TAILQ_INIT(&kg->kg_threads);
277	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
278	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
279	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
280	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
281	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
282	kg->kg_proc = p;
283	/*
284	 * the following counters are in the -zero- section
285	 * and may not need clearing
286	 */
287	kg->kg_numthreads = 0;
288	kg->kg_runnable   = 0;
289	kg->kg_kses       = 0;
290	kg->kg_runq_kses  = 0; /* XXXKSE change name */
291	kg->kg_idle_kses  = 0;
292	kg->kg_numupcalls = 0;
293	/* link it in now that it's consistent */
294	p->p_numksegrps++;
295	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
296}
297
298void
299ksegrp_unlink(struct ksegrp *kg)
300{
301	struct proc *p;
302
303	mtx_assert(&sched_lock, MA_OWNED);
304	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
305	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
306	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
307
308	p = kg->kg_proc;
309	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
310	p->p_numksegrps--;
311	/*
312	 * Aggregate stats from the KSE
313	 */
314	ksegrp_stash(kg);
315}
316
317struct kse_upcall *
318upcall_alloc(void)
319{
320	struct kse_upcall *ku;
321
322	ku = uma_zalloc(upcall_zone, M_WAITOK);
323	bzero(ku, sizeof(*ku));
324	return (ku);
325}
326
327void
328upcall_free(struct kse_upcall *ku)
329{
330
331	uma_zfree(upcall_zone, ku);
332}
333
334void
335upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
336{
337
338	mtx_assert(&sched_lock, MA_OWNED);
339	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
340	ku->ku_ksegrp = kg;
341	kg->kg_numupcalls++;
342}
343
344void
345upcall_unlink(struct kse_upcall *ku)
346{
347	struct ksegrp *kg = ku->ku_ksegrp;
348
349	mtx_assert(&sched_lock, MA_OWNED);
350	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
351	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
352	kg->kg_numupcalls--;
353	upcall_stash(ku);
354}
355
356void
357upcall_remove(struct thread *td)
358{
359
360	if (td->td_upcall) {
361		td->td_upcall->ku_owner = NULL;
362		upcall_unlink(td->td_upcall);
363		td->td_upcall = 0;
364	}
365}
366
367/*
368 * For a newly created process,
369 * link up all the structures and its initial threads etc.
370 */
371void
372proc_linkup(struct proc *p, struct ksegrp *kg,
373	    struct kse *ke, struct thread *td)
374{
375
376	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
377	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
378	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
379	p->p_numksegrps = 0;
380	p->p_numthreads = 0;
381
382	ksegrp_link(kg, p);
383	kse_link(ke, kg);
384	thread_link(td, kg);
385}
386
387/*
388struct kse_thr_interrupt_args {
389	struct kse_thr_mailbox * tmbx;
390	int cmd;
391	long data;
392};
393*/
394int
395kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
396{
397	struct proc *p;
398	struct thread *td2;
399
400	p = td->td_proc;
401
402	if (!(p->p_flag & P_SA))
403		return (EINVAL);
404
405	switch (uap->cmd) {
406	case KSE_INTR_SENDSIG:
407		if (uap->data < 0 || uap->data > _SIG_MAXSIG)
408			return (EINVAL);
409	case KSE_INTR_INTERRUPT:
410	case KSE_INTR_RESTART:
411		PROC_LOCK(p);
412		mtx_lock_spin(&sched_lock);
413		FOREACH_THREAD_IN_PROC(p, td2) {
414			if (td2->td_mailbox == uap->tmbx)
415				break;
416		}
417		if (td2 == NULL) {
418			mtx_unlock_spin(&sched_lock);
419			PROC_UNLOCK(p);
420			return (ESRCH);
421		}
422		if (uap->cmd == KSE_INTR_SENDSIG) {
423			if (uap->data > 0) {
424				td2->td_flags &= ~TDF_INTERRUPT;
425				mtx_unlock_spin(&sched_lock);
426				tdsignal(td2, (int)uap->data, SIGTARGET_TD);
427			} else {
428				mtx_unlock_spin(&sched_lock);
429			}
430		} else {
431			td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
432			if (TD_CAN_UNBIND(td2))
433				td2->td_upcall->ku_flags |= KUF_DOUPCALL;
434			if (uap->cmd == KSE_INTR_INTERRUPT)
435				td2->td_intrval = EINTR;
436			else
437				td2->td_intrval = ERESTART;
438			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
439				if (td2->td_flags & TDF_CVWAITQ)
440					cv_abort(td2);
441				else
442					abortsleep(td2);
443			}
444			mtx_unlock_spin(&sched_lock);
445		}
446		PROC_UNLOCK(p);
447		break;
448	case KSE_INTR_SIGEXIT:
449		if (uap->data < 1 || uap->data > _SIG_MAXSIG)
450			return (EINVAL);
451		PROC_LOCK(p);
452		sigexit(td, (int)uap->data);
453		break;
454	default:
455		return (EINVAL);
456	}
457	return (0);
458}
459
460/*
461struct kse_exit_args {
462	register_t dummy;
463};
464*/
465int
466kse_exit(struct thread *td, struct kse_exit_args *uap)
467{
468	struct proc *p;
469	struct ksegrp *kg;
470	struct kse *ke;
471	struct kse_upcall *ku, *ku2;
472	int    error, count;
473
474	p = td->td_proc;
475	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
476		return (EINVAL);
477	kg = td->td_ksegrp;
478	count = 0;
479	PROC_LOCK(p);
480	mtx_lock_spin(&sched_lock);
481	FOREACH_UPCALL_IN_GROUP(kg, ku2) {
482		if (ku2->ku_flags & KUF_EXITING)
483			count++;
484	}
485	if ((kg->kg_numupcalls - count) == 1 &&
486	    (kg->kg_numthreads > 1)) {
487		mtx_unlock_spin(&sched_lock);
488		PROC_UNLOCK(p);
489		return (EDEADLK);
490	}
491	ku->ku_flags |= KUF_EXITING;
492	mtx_unlock_spin(&sched_lock);
493	PROC_UNLOCK(p);
494	error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
495	PROC_LOCK(p);
496	if (error)
497		psignal(p, SIGSEGV);
498	mtx_lock_spin(&sched_lock);
499	upcall_remove(td);
500	ke = td->td_kse;
501	if (p->p_numthreads == 1) {
502		kse_purge(p, td);
503		p->p_flag &= ~P_SA;
504		mtx_unlock_spin(&sched_lock);
505		PROC_UNLOCK(p);
506	} else {
507		if (kg->kg_numthreads == 1) { /* Shutdown a group */
508			kse_purge_group(td);
509			ke->ke_flags |= KEF_EXIT;
510		}
511		thread_stopped(p);
512		thread_exit();
513		/* NOTREACHED */
514	}
515	return (0);
516}
517
518/*
519 * Either becomes an upcall or waits for an awakening event and
520 * then becomes an upcall. Only error cases return.
521 */
522/*
523struct kse_release_args {
524	struct timespec *timeout;
525};
526*/
527int
528kse_release(struct thread *td, struct kse_release_args *uap)
529{
530	struct proc *p;
531	struct ksegrp *kg;
532	struct kse_upcall *ku;
533	struct timespec timeout;
534	struct timeval tv;
535	sigset_t sigset;
536	int error;
537
538	p = td->td_proc;
539	kg = td->td_ksegrp;
540	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
541		return (EINVAL);
542	if (uap->timeout != NULL) {
543		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
544			return (error);
545		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
546	}
547	if (td->td_flags & TDF_SA)
548		td->td_pflags |= TDP_UPCALLING;
549	else {
550		ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
551		if (ku->ku_mflags == -1) {
552			PROC_LOCK(p);
553			sigexit(td, SIGSEGV);
554		}
555	}
556	PROC_LOCK(p);
557	if (ku->ku_mflags & KMF_WAITSIGEVENT) {
558		/* UTS wants to wait for signal event */
559		if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL))
560			error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
561			    "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
562		p->p_flag &= ~P_SIGEVENT;
563		sigset = p->p_siglist;
564		PROC_UNLOCK(p);
565		error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
566		    sizeof(sigset));
567	} else {
568		 if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) {
569			kg->kg_upsleeps++;
570			error = msleep(&kg->kg_completed, &p->p_mtx,
571				PPAUSE|PCATCH, "kserel",
572				(uap->timeout ? tvtohz(&tv) : 0));
573			kg->kg_upsleeps--;
574		}
575		PROC_UNLOCK(p);
576	}
577	if (ku->ku_flags & KUF_DOUPCALL) {
578		mtx_lock_spin(&sched_lock);
579		ku->ku_flags &= ~KUF_DOUPCALL;
580		mtx_unlock_spin(&sched_lock);
581	}
582	return (0);
583}
584
585/* struct kse_wakeup_args {
586	struct kse_mailbox *mbx;
587}; */
588int
589kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
590{
591	struct proc *p;
592	struct ksegrp *kg;
593	struct kse_upcall *ku;
594	struct thread *td2;
595
596	p = td->td_proc;
597	td2 = NULL;
598	ku = NULL;
599	/* KSE-enabled processes only, please. */
600	if (!(p->p_flag & P_SA))
601		return (EINVAL);
602	PROC_LOCK(p);
603	mtx_lock_spin(&sched_lock);
604	if (uap->mbx) {
605		FOREACH_KSEGRP_IN_PROC(p, kg) {
606			FOREACH_UPCALL_IN_GROUP(kg, ku) {
607				if (ku->ku_mailbox == uap->mbx)
608					break;
609			}
610			if (ku)
611				break;
612		}
613	} else {
614		kg = td->td_ksegrp;
615		if (kg->kg_upsleeps) {
616			wakeup_one(&kg->kg_completed);
617			mtx_unlock_spin(&sched_lock);
618			PROC_UNLOCK(p);
619			return (0);
620		}
621		ku = TAILQ_FIRST(&kg->kg_upcalls);
622	}
623	if (ku) {
624		if ((td2 = ku->ku_owner) == NULL) {
625			panic("%s: no owner", __func__);
626		} else if (TD_ON_SLEEPQ(td2) &&
627		           ((td2->td_wchan == &kg->kg_completed) ||
628			    (td2->td_wchan == &p->p_siglist &&
629			     (ku->ku_mflags & KMF_WAITSIGEVENT)))) {
630			abortsleep(td2);
631		} else {
632			ku->ku_flags |= KUF_DOUPCALL;
633		}
634		mtx_unlock_spin(&sched_lock);
635		PROC_UNLOCK(p);
636		return (0);
637	}
638	mtx_unlock_spin(&sched_lock);
639	PROC_UNLOCK(p);
640	return (ESRCH);
641}
642
643/*
644 * No new KSEG: first call: use current KSE, don't schedule an upcall
645 * All other situations, do allocate max new KSEs and schedule an upcall.
646 */
647/* struct kse_create_args {
648	struct kse_mailbox *mbx;
649	int newgroup;
650}; */
651int
652kse_create(struct thread *td, struct kse_create_args *uap)
653{
654	struct kse *newke;
655	struct ksegrp *newkg;
656	struct ksegrp *kg;
657	struct proc *p;
658	struct kse_mailbox mbx;
659	struct kse_upcall *newku;
660	int err, ncpus, sa = 0, first = 0;
661	struct thread *newtd;
662
663	p = td->td_proc;
664	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
665		return (err);
666
667	/* Too bad, why hasn't kernel always a cpu counter !? */
668#ifdef SMP
669	ncpus = mp_ncpus;
670#else
671	ncpus = 1;
672#endif
673	if (virtual_cpu != 0)
674		ncpus = virtual_cpu;
675	if (!(mbx.km_flags & KMF_BOUND))
676		sa = TDF_SA;
677	else
678		ncpus = 1;
679	PROC_LOCK(p);
680	if (!(p->p_flag & P_SA)) {
681		first = 1;
682		p->p_flag |= P_SA;
683	}
684	PROC_UNLOCK(p);
685	if (!sa && !uap->newgroup && !first)
686		return (EINVAL);
687	kg = td->td_ksegrp;
688	if (uap->newgroup) {
689		/* Have race condition but it is cheap */
690		if (p->p_numksegrps >= max_groups_per_proc)
691			return (EPROCLIM);
692		/*
693		 * If we want a new KSEGRP it doesn't matter whether
694		 * we have already fired up KSE mode before or not.
695		 * We put the process in KSE mode and create a new KSEGRP.
696		 */
697		newkg = ksegrp_alloc();
698		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
699		      kg_startzero, kg_endzero));
700		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
701		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
702		PROC_LOCK(p);
703		mtx_lock_spin(&sched_lock);
704		if (p->p_numksegrps >= max_groups_per_proc) {
705			mtx_unlock_spin(&sched_lock);
706			PROC_UNLOCK(p);
707			ksegrp_free(newkg);
708			return (EPROCLIM);
709		}
710		ksegrp_link(newkg, p);
711		sched_fork_ksegrp(kg, newkg);
712		mtx_unlock_spin(&sched_lock);
713		PROC_UNLOCK(p);
714	} else {
715		if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
716			return (EINVAL);
717		newkg = kg;
718	}
719
720	/*
721	 * Creating upcalls more than number of physical cpu does
722	 * not help performance.
723	 */
724	if (newkg->kg_numupcalls >= ncpus)
725		return (EPROCLIM);
726
727	if (newkg->kg_numupcalls == 0) {
728		/*
729		 * Initialize KSE group
730		 *
731		 * For multiplxed group, create KSEs as many as physical
732		 * cpus. This increases concurrent even if userland
733		 * is not MP safe and can only run on single CPU.
734		 * In ideal world, every physical cpu should execute a thread.
735		 * If there is enough KSEs, threads in kernel can be
736		 * executed parallel on different cpus with full speed,
737		 * Concurrent in kernel shouldn't be restricted by number of
738		 * upcalls userland provides. Adding more upcall structures
739		 * only increases concurrent in userland.
740		 *
741		 * For bound thread group, because there is only thread in the
742		 * group, we only create one KSE for the group. Thread in this
743		 * kind of group will never schedule an upcall when blocked,
744		 * this intends to simulate pthread system scope thread.
745		 */
746		while (newkg->kg_kses < ncpus) {
747			newke = kse_alloc();
748			bzero(&newke->ke_startzero, RANGEOF(struct kse,
749			      ke_startzero, ke_endzero));
750#if 0
751			mtx_lock_spin(&sched_lock);
752			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
753			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
754			mtx_unlock_spin(&sched_lock);
755#endif
756			mtx_lock_spin(&sched_lock);
757			kse_link(newke, newkg);
758			sched_fork_kse(td->td_kse, newke);
759			/* Add engine */
760			kse_reassign(newke);
761			mtx_unlock_spin(&sched_lock);
762		}
763	}
764	newku = upcall_alloc();
765	newku->ku_mailbox = uap->mbx;
766	newku->ku_func = mbx.km_func;
767	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
768
769	/* For the first call this may not have been set */
770	if (td->td_standin == NULL)
771		thread_alloc_spare(td, NULL);
772
773	PROC_LOCK(p);
774	if (newkg->kg_numupcalls >= ncpus) {
775		PROC_UNLOCK(p);
776		upcall_free(newku);
777		return (EPROCLIM);
778	}
779	if (first && sa) {
780		SIGSETOR(p->p_siglist, td->td_siglist);
781		SIGEMPTYSET(td->td_siglist);
782		SIGFILLSET(td->td_sigmask);
783		SIG_CANTMASK(td->td_sigmask);
784	}
785	mtx_lock_spin(&sched_lock);
786	PROC_UNLOCK(p);
787	upcall_link(newku, newkg);
788	if (mbx.km_quantum)
789		newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
790
791	/*
792	 * Each upcall structure has an owner thread, find which
793	 * one owns it.
794	 */
795	if (uap->newgroup) {
796		/*
797		 * Because new ksegrp hasn't thread,
798		 * create an initial upcall thread to own it.
799		 */
800		newtd = thread_schedule_upcall(td, newku);
801	} else {
802		/*
803		 * If current thread hasn't an upcall structure,
804		 * just assign the upcall to it.
805		 */
806		if (td->td_upcall == NULL) {
807			newku->ku_owner = td;
808			td->td_upcall = newku;
809			newtd = td;
810		} else {
811			/*
812			 * Create a new upcall thread to own it.
813			 */
814			newtd = thread_schedule_upcall(td, newku);
815		}
816	}
817	if (!sa) {
818		newtd->td_mailbox = mbx.km_curthread;
819		newtd->td_flags &= ~TDF_SA;
820		if (newtd != td) {
821			mtx_unlock_spin(&sched_lock);
822			cpu_set_upcall_kse(newtd, newku);
823			mtx_lock_spin(&sched_lock);
824		}
825	} else {
826		newtd->td_flags |= TDF_SA;
827	}
828	if (newtd != td)
829		setrunqueue(newtd);
830	mtx_unlock_spin(&sched_lock);
831	return (0);
832}
833
834/*
835 * Initialize global thread allocation resources.
836 */
837void
838threadinit(void)
839{
840
841	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
842	    thread_ctor, thread_dtor, thread_init, thread_fini,
843	    UMA_ALIGN_CACHE, 0);
844	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
845	    NULL, NULL, ksegrp_init, NULL,
846	    UMA_ALIGN_CACHE, 0);
847	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
848	    NULL, NULL, kse_init, NULL,
849	    UMA_ALIGN_CACHE, 0);
850	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
851	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
852}
853
854/*
855 * Stash an embarasingly extra thread into the zombie thread queue.
856 */
857void
858thread_stash(struct thread *td)
859{
860	mtx_lock_spin(&kse_zombie_lock);
861	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
862	mtx_unlock_spin(&kse_zombie_lock);
863}
864
865/*
866 * Stash an embarasingly extra kse into the zombie kse queue.
867 */
868void
869kse_stash(struct kse *ke)
870{
871	mtx_lock_spin(&kse_zombie_lock);
872	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
873	mtx_unlock_spin(&kse_zombie_lock);
874}
875
876/*
877 * Stash an embarasingly extra upcall into the zombie upcall queue.
878 */
879
880void
881upcall_stash(struct kse_upcall *ku)
882{
883	mtx_lock_spin(&kse_zombie_lock);
884	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
885	mtx_unlock_spin(&kse_zombie_lock);
886}
887
888/*
889 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
890 */
891void
892ksegrp_stash(struct ksegrp *kg)
893{
894	mtx_lock_spin(&kse_zombie_lock);
895	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
896	mtx_unlock_spin(&kse_zombie_lock);
897}
898
899/*
900 * Reap zombie kse resource.
901 */
902void
903thread_reap(void)
904{
905	struct thread *td_first, *td_next;
906	struct kse *ke_first, *ke_next;
907	struct ksegrp *kg_first, * kg_next;
908	struct kse_upcall *ku_first, *ku_next;
909
910	/*
911	 * Don't even bother to lock if none at this instant,
912	 * we really don't care about the next instant..
913	 */
914	if ((!TAILQ_EMPTY(&zombie_threads))
915	    || (!TAILQ_EMPTY(&zombie_kses))
916	    || (!TAILQ_EMPTY(&zombie_ksegrps))
917	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
918		mtx_lock_spin(&kse_zombie_lock);
919		td_first = TAILQ_FIRST(&zombie_threads);
920		ke_first = TAILQ_FIRST(&zombie_kses);
921		kg_first = TAILQ_FIRST(&zombie_ksegrps);
922		ku_first = TAILQ_FIRST(&zombie_upcalls);
923		if (td_first)
924			TAILQ_INIT(&zombie_threads);
925		if (ke_first)
926			TAILQ_INIT(&zombie_kses);
927		if (kg_first)
928			TAILQ_INIT(&zombie_ksegrps);
929		if (ku_first)
930			TAILQ_INIT(&zombie_upcalls);
931		mtx_unlock_spin(&kse_zombie_lock);
932		while (td_first) {
933			td_next = TAILQ_NEXT(td_first, td_runq);
934			if (td_first->td_ucred)
935				crfree(td_first->td_ucred);
936			thread_free(td_first);
937			td_first = td_next;
938		}
939		while (ke_first) {
940			ke_next = TAILQ_NEXT(ke_first, ke_procq);
941			kse_free(ke_first);
942			ke_first = ke_next;
943		}
944		while (kg_first) {
945			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
946			ksegrp_free(kg_first);
947			kg_first = kg_next;
948		}
949		while (ku_first) {
950			ku_next = TAILQ_NEXT(ku_first, ku_link);
951			upcall_free(ku_first);
952			ku_first = ku_next;
953		}
954	}
955}
956
957/*
958 * Allocate a ksegrp.
959 */
960struct ksegrp *
961ksegrp_alloc(void)
962{
963	return (uma_zalloc(ksegrp_zone, M_WAITOK));
964}
965
966/*
967 * Allocate a kse.
968 */
969struct kse *
970kse_alloc(void)
971{
972	return (uma_zalloc(kse_zone, M_WAITOK));
973}
974
975/*
976 * Allocate a thread.
977 */
978struct thread *
979thread_alloc(void)
980{
981	thread_reap(); /* check if any zombies to get */
982	return (uma_zalloc(thread_zone, M_WAITOK));
983}
984
985/*
986 * Deallocate a ksegrp.
987 */
988void
989ksegrp_free(struct ksegrp *td)
990{
991	uma_zfree(ksegrp_zone, td);
992}
993
994/*
995 * Deallocate a kse.
996 */
997void
998kse_free(struct kse *td)
999{
1000	uma_zfree(kse_zone, td);
1001}
1002
1003/*
1004 * Deallocate a thread.
1005 */
1006void
1007thread_free(struct thread *td)
1008{
1009
1010	cpu_thread_clean(td);
1011	uma_zfree(thread_zone, td);
1012}
1013
1014/*
1015 * Store the thread context in the UTS's mailbox.
1016 * then add the mailbox at the head of a list we are building in user space.
1017 * The list is anchored in the ksegrp structure.
1018 */
1019int
1020thread_export_context(struct thread *td, int willexit)
1021{
1022	struct proc *p;
1023	struct ksegrp *kg;
1024	uintptr_t mbx;
1025	void *addr;
1026	int error = 0, temp, sig;
1027	mcontext_t mc;
1028
1029	p = td->td_proc;
1030	kg = td->td_ksegrp;
1031
1032	/* Export the user/machine context. */
1033	get_mcontext(td, &mc, 0);
1034	addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
1035	error = copyout(&mc, addr, sizeof(mcontext_t));
1036	if (error)
1037		goto bad;
1038
1039	/* Exports clock ticks in kernel mode */
1040	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
1041	temp = fuword32(addr) + td->td_usticks;
1042	if (suword32(addr, temp)) {
1043		error = EFAULT;
1044		goto bad;
1045	}
1046
1047	/*
1048	 * Post sync signal, or process SIGKILL and SIGSTOP.
1049	 * For sync signal, it is only possible when the signal is not
1050	 * caught by userland or process is being debugged.
1051	 */
1052	PROC_LOCK(p);
1053	if (td->td_flags & TDF_NEEDSIGCHK) {
1054		mtx_lock_spin(&sched_lock);
1055		td->td_flags &= ~TDF_NEEDSIGCHK;
1056		mtx_unlock_spin(&sched_lock);
1057		mtx_lock(&p->p_sigacts->ps_mtx);
1058		while ((sig = cursig(td)) != 0)
1059			postsig(sig);
1060		mtx_unlock(&p->p_sigacts->ps_mtx);
1061	}
1062	if (willexit)
1063		SIGFILLSET(td->td_sigmask);
1064	PROC_UNLOCK(p);
1065
1066	/* Get address in latest mbox of list pointer */
1067	addr = (void *)(&td->td_mailbox->tm_next);
1068	/*
1069	 * Put the saved address of the previous first
1070	 * entry into this one
1071	 */
1072	for (;;) {
1073		mbx = (uintptr_t)kg->kg_completed;
1074		if (suword(addr, mbx)) {
1075			error = EFAULT;
1076			goto bad;
1077		}
1078		PROC_LOCK(p);
1079		if (mbx == (uintptr_t)kg->kg_completed) {
1080			kg->kg_completed = td->td_mailbox;
1081			/*
1082			 * The thread context may be taken away by
1083			 * other upcall threads when we unlock
1084			 * process lock. it's no longer valid to
1085			 * use it again in any other places.
1086			 */
1087			td->td_mailbox = NULL;
1088			PROC_UNLOCK(p);
1089			break;
1090		}
1091		PROC_UNLOCK(p);
1092	}
1093	td->td_usticks = 0;
1094	return (0);
1095
1096bad:
1097	PROC_LOCK(p);
1098	sigexit(td, SIGILL);
1099	return (error);
1100}
1101
1102/*
1103 * Take the list of completed mailboxes for this KSEGRP and put them on this
1104 * upcall's mailbox as it's the next one going up.
1105 */
1106static int
1107thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1108{
1109	struct proc *p = kg->kg_proc;
1110	void *addr;
1111	uintptr_t mbx;
1112
1113	addr = (void *)(&ku->ku_mailbox->km_completed);
1114	for (;;) {
1115		mbx = (uintptr_t)kg->kg_completed;
1116		if (suword(addr, mbx)) {
1117			PROC_LOCK(p);
1118			psignal(p, SIGSEGV);
1119			PROC_UNLOCK(p);
1120			return (EFAULT);
1121		}
1122		PROC_LOCK(p);
1123		if (mbx == (uintptr_t)kg->kg_completed) {
1124			kg->kg_completed = NULL;
1125			PROC_UNLOCK(p);
1126			break;
1127		}
1128		PROC_UNLOCK(p);
1129	}
1130	return (0);
1131}
1132
1133/*
1134 * This function should be called at statclock interrupt time
1135 */
1136int
1137thread_statclock(int user)
1138{
1139	struct thread *td = curthread;
1140	struct ksegrp *kg = td->td_ksegrp;
1141
1142	if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
1143		return (0);
1144	if (user) {
1145		/* Current always do via ast() */
1146		mtx_lock_spin(&sched_lock);
1147		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1148		mtx_unlock_spin(&sched_lock);
1149		td->td_uuticks++;
1150	} else {
1151		if (td->td_mailbox != NULL)
1152			td->td_usticks++;
1153		else {
1154			/* XXXKSE
1155		 	 * We will call thread_user_enter() for every
1156			 * kernel entry in future, so if the thread mailbox
1157			 * is NULL, it must be a UTS kernel, don't account
1158			 * clock ticks for it.
1159			 */
1160		}
1161	}
1162	return (0);
1163}
1164
1165/*
1166 * Export state clock ticks for userland
1167 */
1168static int
1169thread_update_usr_ticks(struct thread *td, int user)
1170{
1171	struct proc *p = td->td_proc;
1172	struct kse_thr_mailbox *tmbx;
1173	struct kse_upcall *ku;
1174	struct ksegrp *kg;
1175	caddr_t addr;
1176	u_int uticks;
1177
1178	if ((ku = td->td_upcall) == NULL)
1179		return (-1);
1180
1181	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1182	if ((tmbx == NULL) || (tmbx == (void *)-1))
1183		return (-1);
1184	if (user) {
1185		uticks = td->td_uuticks;
1186		td->td_uuticks = 0;
1187		addr = (caddr_t)&tmbx->tm_uticks;
1188	} else {
1189		uticks = td->td_usticks;
1190		td->td_usticks = 0;
1191		addr = (caddr_t)&tmbx->tm_sticks;
1192	}
1193	if (uticks) {
1194		if (suword32(addr, uticks+fuword32(addr))) {
1195			PROC_LOCK(p);
1196			psignal(p, SIGSEGV);
1197			PROC_UNLOCK(p);
1198			return (-2);
1199		}
1200	}
1201	kg = td->td_ksegrp;
1202	if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1203		mtx_lock_spin(&sched_lock);
1204		td->td_upcall->ku_flags |= KUF_DOUPCALL;
1205		mtx_unlock_spin(&sched_lock);
1206	}
1207	return (0);
1208}
1209
1210/*
1211 * Discard the current thread and exit from its context.
1212 *
1213 * Because we can't free a thread while we're operating under its context,
1214 * push the current thread into our CPU's deadthread holder. This means
1215 * we needn't worry about someone else grabbing our context before we
1216 * do a cpu_throw().
1217 */
1218void
1219thread_exit(void)
1220{
1221	struct thread *td;
1222	struct kse *ke;
1223	struct proc *p;
1224	struct ksegrp	*kg;
1225
1226	td = curthread;
1227	kg = td->td_ksegrp;
1228	p = td->td_proc;
1229	ke = td->td_kse;
1230
1231	mtx_assert(&sched_lock, MA_OWNED);
1232	KASSERT(p != NULL, ("thread exiting without a process"));
1233	KASSERT(ke != NULL, ("thread exiting without a kse"));
1234	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1235	PROC_LOCK_ASSERT(p, MA_OWNED);
1236	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1237	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1238
1239	if (td->td_standin != NULL) {
1240		thread_stash(td->td_standin);
1241		td->td_standin = NULL;
1242	}
1243
1244	cpu_thread_exit(td);	/* XXXSMP */
1245
1246	/*
1247	 * The last thread is left attached to the process
1248	 * So that the whole bundle gets recycled. Skip
1249	 * all this stuff.
1250	 */
1251	if (p->p_numthreads > 1) {
1252		thread_unlink(td);
1253		if (p->p_maxthrwaits)
1254			wakeup(&p->p_numthreads);
1255		/*
1256		 * The test below is NOT true if we are the
1257		 * sole exiting thread. P_STOPPED_SNGL is unset
1258		 * in exit1() after it is the only survivor.
1259		 */
1260		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1261			if (p->p_numthreads == p->p_suspcount) {
1262				thread_unsuspend_one(p->p_singlethread);
1263			}
1264		}
1265
1266		/*
1267		 * Because each upcall structure has an owner thread,
1268		 * owner thread exits only when process is in exiting
1269		 * state, so upcall to userland is no longer needed,
1270		 * deleting upcall structure is safe here.
1271		 * So when all threads in a group is exited, all upcalls
1272		 * in the group should be automatically freed.
1273		 */
1274		if (td->td_upcall)
1275			upcall_remove(td);
1276
1277		sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
1278		sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
1279		ke->ke_state = KES_UNQUEUED;
1280		ke->ke_thread = NULL;
1281		/*
1282		 * Decide what to do with the KSE attached to this thread.
1283		 */
1284		if (ke->ke_flags & KEF_EXIT) {
1285			kse_unlink(ke);
1286			if (kg->kg_kses == 0) {
1287				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
1288				ksegrp_unlink(kg);
1289			}
1290		}
1291		else
1292			kse_reassign(ke);
1293		PROC_UNLOCK(p);
1294		td->td_kse	= NULL;
1295		td->td_state	= TDS_INACTIVE;
1296#if 0
1297		td->td_proc	= NULL;
1298#endif
1299		td->td_ksegrp	= NULL;
1300		td->td_last_kse	= NULL;
1301		PCPU_SET(deadthread, td);
1302	} else {
1303		PROC_UNLOCK(p);
1304	}
1305	/* XXX Shouldn't cpu_throw() here. */
1306	mtx_assert(&sched_lock, MA_OWNED);
1307	cpu_throw(td, choosethread());
1308	panic("I'm a teapot!");
1309	/* NOTREACHED */
1310}
1311
1312/*
1313 * Do any thread specific cleanups that may be needed in wait()
1314 * called with Giant held, proc and schedlock not held.
1315 */
1316void
1317thread_wait(struct proc *p)
1318{
1319	struct thread *td;
1320
1321	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1322	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1323	FOREACH_THREAD_IN_PROC(p, td) {
1324		if (td->td_standin != NULL) {
1325			thread_free(td->td_standin);
1326			td->td_standin = NULL;
1327		}
1328		cpu_thread_clean(td);
1329	}
1330	thread_reap();	/* check for zombie threads etc. */
1331}
1332
1333/*
1334 * Link a thread to a process.
1335 * set up anything that needs to be initialized for it to
1336 * be used by the process.
1337 *
1338 * Note that we do not link to the proc's ucred here.
1339 * The thread is linked as if running but no KSE assigned.
1340 */
1341void
1342thread_link(struct thread *td, struct ksegrp *kg)
1343{
1344	struct proc *p;
1345
1346	p = kg->kg_proc;
1347	td->td_state    = TDS_INACTIVE;
1348	td->td_proc     = p;
1349	td->td_ksegrp   = kg;
1350	td->td_last_kse = NULL;
1351	td->td_flags    = 0;
1352	td->td_kse      = NULL;
1353
1354	LIST_INIT(&td->td_contested);
1355	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
1356	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1357	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1358	p->p_numthreads++;
1359	kg->kg_numthreads++;
1360}
1361
1362void
1363thread_unlink(struct thread *td)
1364{
1365	struct proc *p = td->td_proc;
1366	struct ksegrp *kg = td->td_ksegrp;
1367
1368	mtx_assert(&sched_lock, MA_OWNED);
1369	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1370	p->p_numthreads--;
1371	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1372	kg->kg_numthreads--;
1373	/* could clear a few other things here */
1374}
1375
1376/*
1377 * Purge a ksegrp resource. When a ksegrp is preparing to
1378 * exit, it calls this function.
1379 */
1380static void
1381kse_purge_group(struct thread *td)
1382{
1383	struct ksegrp *kg;
1384	struct kse *ke;
1385
1386	kg = td->td_ksegrp;
1387 	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1388	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1389		KASSERT(ke->ke_state == KES_IDLE,
1390			("%s: wrong idle KSE state", __func__));
1391		kse_unlink(ke);
1392	}
1393	KASSERT((kg->kg_kses == 1),
1394		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1395	KASSERT((kg->kg_numupcalls == 0),
1396	        ("%s: ksegrp still has %d upcall datas",
1397		__func__, kg->kg_numupcalls));
1398}
1399
1400/*
1401 * Purge a process's KSE resource. When a process is preparing to
1402 * exit, it calls kse_purge to release any extra KSE resources in
1403 * the process.
1404 */
1405static void
1406kse_purge(struct proc *p, struct thread *td)
1407{
1408	struct ksegrp *kg;
1409	struct kse *ke;
1410
1411 	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1412	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1413		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1414		p->p_numksegrps--;
1415		/*
1416		 * There is no ownership for KSE, after all threads
1417		 * in the group exited, it is possible that some KSEs
1418		 * were left in idle queue, gc them now.
1419		 */
1420		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1421			KASSERT(ke->ke_state == KES_IDLE,
1422			   ("%s: wrong idle KSE state", __func__));
1423			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1424			kg->kg_idle_kses--;
1425			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1426			kg->kg_kses--;
1427			kse_stash(ke);
1428		}
1429		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1430		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1431		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1432		KASSERT((kg->kg_numupcalls == 0),
1433		        ("%s: ksegrp still has %d upcall datas",
1434			__func__, kg->kg_numupcalls));
1435
1436		if (kg != td->td_ksegrp)
1437			ksegrp_stash(kg);
1438	}
1439	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1440	p->p_numksegrps++;
1441}
1442
1443/*
1444 * This function is intended to be used to initialize a spare thread
1445 * for upcall. Initialize thread's large data area outside sched_lock
1446 * for thread_schedule_upcall().
1447 */
1448void
1449thread_alloc_spare(struct thread *td, struct thread *spare)
1450{
1451	if (td->td_standin)
1452		return;
1453	if (spare == NULL)
1454		spare = thread_alloc();
1455	td->td_standin = spare;
1456	bzero(&spare->td_startzero,
1457	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1458	spare->td_proc = td->td_proc;
1459	spare->td_ucred = crhold(td->td_ucred);
1460}
1461
1462/*
1463 * Create a thread and schedule it for upcall on the KSE given.
1464 * Use our thread's standin so that we don't have to allocate one.
1465 */
1466struct thread *
1467thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1468{
1469	struct thread *td2;
1470
1471	mtx_assert(&sched_lock, MA_OWNED);
1472
1473	/*
1474	 * Schedule an upcall thread on specified kse_upcall,
1475	 * the kse_upcall must be free.
1476	 * td must have a spare thread.
1477	 */
1478	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1479	if ((td2 = td->td_standin) != NULL) {
1480		td->td_standin = NULL;
1481	} else {
1482		panic("no reserve thread when scheduling an upcall");
1483		return (NULL);
1484	}
1485	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1486	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1487	bcopy(&td->td_startcopy, &td2->td_startcopy,
1488	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1489	thread_link(td2, ku->ku_ksegrp);
1490	/* inherit blocked thread's context */
1491	cpu_set_upcall(td2, td);
1492	/* Let the new thread become owner of the upcall */
1493	ku->ku_owner   = td2;
1494	td2->td_upcall = ku;
1495	td2->td_flags  = TDF_SA;
1496	td2->td_pflags = TDP_UPCALLING;
1497	td2->td_kse    = NULL;
1498	td2->td_state  = TDS_CAN_RUN;
1499	td2->td_inhibitors = 0;
1500	SIGFILLSET(td2->td_sigmask);
1501	SIG_CANTMASK(td2->td_sigmask);
1502	sched_fork_thread(td, td2);
1503	return (td2);	/* bogus.. should be a void function */
1504}
1505
1506/*
1507 * It is only used when thread generated a trap and process is being
1508 * debugged.
1509 */
1510void
1511thread_signal_add(struct thread *td, int sig)
1512{
1513	struct proc *p;
1514	siginfo_t siginfo;
1515	struct sigacts *ps;
1516	int error;
1517
1518	p = td->td_proc;
1519	PROC_LOCK_ASSERT(p, MA_OWNED);
1520	ps = p->p_sigacts;
1521	mtx_assert(&ps->ps_mtx, MA_OWNED);
1522
1523	cpu_thread_siginfo(sig, 0, &siginfo);
1524	mtx_unlock(&ps->ps_mtx);
1525	PROC_UNLOCK(p);
1526	error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1527	if (error) {
1528		PROC_LOCK(p);
1529		sigexit(td, SIGILL);
1530	}
1531	PROC_LOCK(p);
1532	SIGADDSET(td->td_sigmask, sig);
1533	mtx_lock(&ps->ps_mtx);
1534}
1535
1536void
1537thread_switchout(struct thread *td)
1538{
1539	struct kse_upcall *ku;
1540	struct thread *td2;
1541
1542	mtx_assert(&sched_lock, MA_OWNED);
1543
1544	/*
1545	 * If the outgoing thread is in threaded group and has never
1546	 * scheduled an upcall, decide whether this is a short
1547	 * or long term event and thus whether or not to schedule
1548	 * an upcall.
1549	 * If it is a short term event, just suspend it in
1550	 * a way that takes its KSE with it.
1551	 * Select the events for which we want to schedule upcalls.
1552	 * For now it's just sleep.
1553	 * XXXKSE eventually almost any inhibition could do.
1554	 */
1555	if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1556		/*
1557		 * Release ownership of upcall, and schedule an upcall
1558		 * thread, this new upcall thread becomes the owner of
1559		 * the upcall structure.
1560		 */
1561		ku = td->td_upcall;
1562		ku->ku_owner = NULL;
1563		td->td_upcall = NULL;
1564		td->td_flags &= ~TDF_CAN_UNBIND;
1565		td2 = thread_schedule_upcall(td, ku);
1566		setrunqueue(td2);
1567	}
1568}
1569
1570/*
1571 * Setup done on the thread when it enters the kernel.
1572 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1573 */
1574void
1575thread_user_enter(struct proc *p, struct thread *td)
1576{
1577	struct ksegrp *kg;
1578	struct kse_upcall *ku;
1579	struct kse_thr_mailbox *tmbx;
1580	uint32_t tflags;
1581
1582	kg = td->td_ksegrp;
1583
1584	/*
1585	 * First check that we shouldn't just abort.
1586	 * But check if we are the single thread first!
1587	 */
1588	if (p->p_flag & P_SINGLE_EXIT) {
1589		PROC_LOCK(p);
1590		mtx_lock_spin(&sched_lock);
1591		thread_stopped(p);
1592		thread_exit();
1593		/* NOTREACHED */
1594	}
1595
1596	/*
1597	 * If we are doing a syscall in a KSE environment,
1598	 * note where our mailbox is. There is always the
1599	 * possibility that we could do this lazily (in kse_reassign()),
1600	 * but for now do it every time.
1601	 */
1602	kg = td->td_ksegrp;
1603	if (td->td_flags & TDF_SA) {
1604		ku = td->td_upcall;
1605		KASSERT(ku, ("%s: no upcall owned", __func__));
1606		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1607		KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1608		ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1609		tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1610		if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1611		    (ku->ku_mflags & KMF_NOUPCALL)) {
1612			td->td_mailbox = NULL;
1613		} else {
1614			if (td->td_standin == NULL)
1615				thread_alloc_spare(td, NULL);
1616			tflags = fuword32(&tmbx->tm_flags);
1617			/*
1618			 * On some architectures, TP register points to thread
1619			 * mailbox but not points to kse mailbox, and userland
1620			 * can not atomically clear km_curthread, but can
1621			 * use TP register, and set TMF_NOUPCALL in thread
1622			 * flag	to indicate a critical region.
1623			 */
1624			if (tflags & TMF_NOUPCALL) {
1625				td->td_mailbox = NULL;
1626			} else {
1627				td->td_mailbox = tmbx;
1628				mtx_lock_spin(&sched_lock);
1629				td->td_flags |= TDF_CAN_UNBIND;
1630				mtx_unlock_spin(&sched_lock);
1631			}
1632		}
1633	}
1634}
1635
1636/*
1637 * The extra work we go through if we are a threaded process when we
1638 * return to userland.
1639 *
1640 * If we are a KSE process and returning to user mode, check for
1641 * extra work to do before we return (e.g. for more syscalls
1642 * to complete first).  If we were in a critical section, we should
1643 * just return to let it finish. Same if we were in the UTS (in
1644 * which case the mailbox's context's busy indicator will be set).
1645 * The only traps we suport will have set the mailbox.
1646 * We will clear it here.
1647 */
1648int
1649thread_userret(struct thread *td, struct trapframe *frame)
1650{
1651	int error = 0, upcalls, uts_crit;
1652	struct kse_upcall *ku;
1653	struct ksegrp *kg, *kg2;
1654	struct proc *p;
1655	struct timespec ts;
1656
1657	p = td->td_proc;
1658	kg = td->td_ksegrp;
1659	ku = td->td_upcall;
1660
1661	/* Nothing to do with bound thread */
1662	if (!(td->td_flags & TDF_SA))
1663		return (0);
1664
1665	/*
1666	 * Stat clock interrupt hit in userland, it
1667	 * is returning from interrupt, charge thread's
1668	 * userland time for UTS.
1669	 */
1670	if (td->td_flags & TDF_USTATCLOCK) {
1671		thread_update_usr_ticks(td, 1);
1672		mtx_lock_spin(&sched_lock);
1673		td->td_flags &= ~TDF_USTATCLOCK;
1674		mtx_unlock_spin(&sched_lock);
1675		if (kg->kg_completed ||
1676		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
1677			thread_user_enter(p, td);
1678	}
1679
1680	uts_crit = (td->td_mailbox == NULL);
1681	/*
1682	 * Optimisation:
1683	 * This thread has not started any upcall.
1684	 * If there is no work to report other than ourself,
1685	 * then it can return direct to userland.
1686	 */
1687	if (TD_CAN_UNBIND(td)) {
1688		mtx_lock_spin(&sched_lock);
1689		td->td_flags &= ~TDF_CAN_UNBIND;
1690		if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1691		    (kg->kg_completed == NULL) &&
1692		    (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1693		    (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1694			mtx_unlock_spin(&sched_lock);
1695			thread_update_usr_ticks(td, 0);
1696			nanotime(&ts);
1697			error = copyout(&ts,
1698				(caddr_t)&ku->ku_mailbox->km_timeofday,
1699				sizeof(ts));
1700			td->td_mailbox = 0;
1701			ku->ku_mflags = 0;
1702			if (error)
1703				goto out;
1704			return (0);
1705		}
1706		mtx_unlock_spin(&sched_lock);
1707		thread_export_context(td, 0);
1708		/*
1709		 * There is something to report, and we own an upcall
1710		 * strucuture, we can go to userland.
1711		 * Turn ourself into an upcall thread.
1712		 */
1713		td->td_pflags |= TDP_UPCALLING;
1714	} else if (td->td_mailbox && (ku == NULL)) {
1715		thread_export_context(td, 1);
1716		PROC_LOCK(p);
1717		/*
1718		 * There are upcall threads waiting for
1719		 * work to do, wake one of them up.
1720		 * XXXKSE Maybe wake all of them up.
1721		 */
1722		if (kg->kg_upsleeps)
1723			wakeup_one(&kg->kg_completed);
1724		mtx_lock_spin(&sched_lock);
1725		thread_stopped(p);
1726		thread_exit();
1727		/* NOTREACHED */
1728	}
1729
1730	KASSERT(ku != NULL, ("upcall is NULL\n"));
1731	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1732
1733	if (p->p_numthreads > max_threads_per_proc) {
1734		max_threads_hits++;
1735		PROC_LOCK(p);
1736		mtx_lock_spin(&sched_lock);
1737		p->p_maxthrwaits++;
1738		while (p->p_numthreads > max_threads_per_proc) {
1739			upcalls = 0;
1740			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1741				if (kg2->kg_numupcalls == 0)
1742					upcalls++;
1743				else
1744					upcalls += kg2->kg_numupcalls;
1745			}
1746			if (upcalls >= max_threads_per_proc)
1747				break;
1748			mtx_unlock_spin(&sched_lock);
1749			if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1750			    "maxthreads", NULL)) {
1751				mtx_lock_spin(&sched_lock);
1752				break;
1753			} else {
1754				mtx_lock_spin(&sched_lock);
1755			}
1756		}
1757		p->p_maxthrwaits--;
1758		mtx_unlock_spin(&sched_lock);
1759		PROC_UNLOCK(p);
1760	}
1761
1762	if (td->td_pflags & TDP_UPCALLING) {
1763		uts_crit = 0;
1764		kg->kg_nextupcall = ticks+kg->kg_upquantum;
1765		/*
1766		 * There is no more work to do and we are going to ride
1767		 * this thread up to userland as an upcall.
1768		 * Do the last parts of the setup needed for the upcall.
1769		 */
1770		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1771		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1772
1773		td->td_pflags &= ~TDP_UPCALLING;
1774		if (ku->ku_flags & KUF_DOUPCALL) {
1775			mtx_lock_spin(&sched_lock);
1776			ku->ku_flags &= ~KUF_DOUPCALL;
1777			mtx_unlock_spin(&sched_lock);
1778		}
1779		/*
1780		 * Set user context to the UTS
1781		 */
1782		if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1783			cpu_set_upcall_kse(td, ku);
1784			error = suword(&ku->ku_mailbox->km_curthread, 0);
1785			if (error)
1786				goto out;
1787		}
1788
1789		/*
1790		 * Unhook the list of completed threads.
1791		 * anything that completes after this gets to
1792		 * come in next time.
1793		 * Put the list of completed thread mailboxes on
1794		 * this KSE's mailbox.
1795		 */
1796		if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1797		    (error = thread_link_mboxes(kg, ku)) != 0)
1798			goto out;
1799	}
1800	if (!uts_crit) {
1801		nanotime(&ts);
1802		error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1803	}
1804
1805out:
1806	if (error) {
1807		/*
1808		 * Things are going to be so screwed we should just kill
1809		 * the process.
1810		 * how do we do that?
1811		 */
1812		PROC_LOCK(td->td_proc);
1813		psignal(td->td_proc, SIGSEGV);
1814		PROC_UNLOCK(td->td_proc);
1815	} else {
1816		/*
1817		 * Optimisation:
1818		 * Ensure that we have a spare thread available,
1819		 * for when we re-enter the kernel.
1820		 */
1821		if (td->td_standin == NULL)
1822			thread_alloc_spare(td, NULL);
1823	}
1824
1825	ku->ku_mflags = 0;
1826	/*
1827	 * Clear thread mailbox first, then clear system tick count.
1828	 * The order is important because thread_statclock() use
1829	 * mailbox pointer to see if it is an userland thread or
1830	 * an UTS kernel thread.
1831	 */
1832	td->td_mailbox = NULL;
1833	td->td_usticks = 0;
1834	return (error);	/* go sync */
1835}
1836
1837/*
1838 * Enforce single-threading.
1839 *
1840 * Returns 1 if the caller must abort (another thread is waiting to
1841 * exit the process or similar). Process is locked!
1842 * Returns 0 when you are successfully the only thread running.
1843 * A process has successfully single threaded in the suspend mode when
1844 * There are no threads in user mode. Threads in the kernel must be
1845 * allowed to continue until they get to the user boundary. They may even
1846 * copy out their return values and data before suspending. They may however be
1847 * accellerated in reaching the user boundary as we will wake up
1848 * any sleeping threads that are interruptable. (PCATCH).
1849 */
1850int
1851thread_single(int force_exit)
1852{
1853	struct thread *td;
1854	struct thread *td2;
1855	struct proc *p;
1856
1857	td = curthread;
1858	p = td->td_proc;
1859	mtx_assert(&Giant, MA_OWNED);
1860	PROC_LOCK_ASSERT(p, MA_OWNED);
1861	KASSERT((td != NULL), ("curthread is NULL"));
1862
1863	if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
1864		return (0);
1865
1866	/* Is someone already single threading? */
1867	if (p->p_singlethread)
1868		return (1);
1869
1870	if (force_exit == SINGLE_EXIT) {
1871		p->p_flag |= P_SINGLE_EXIT;
1872	} else
1873		p->p_flag &= ~P_SINGLE_EXIT;
1874	p->p_flag |= P_STOPPED_SINGLE;
1875	mtx_lock_spin(&sched_lock);
1876	p->p_singlethread = td;
1877	while ((p->p_numthreads - p->p_suspcount) != 1) {
1878		FOREACH_THREAD_IN_PROC(p, td2) {
1879			if (td2 == td)
1880				continue;
1881			td2->td_flags |= TDF_ASTPENDING;
1882			if (TD_IS_INHIBITED(td2)) {
1883				if (force_exit == SINGLE_EXIT) {
1884					if (TD_IS_SUSPENDED(td2)) {
1885						thread_unsuspend_one(td2);
1886					}
1887					if (TD_ON_SLEEPQ(td2) &&
1888					    (td2->td_flags & TDF_SINTR)) {
1889						if (td2->td_flags & TDF_CVWAITQ)
1890							cv_abort(td2);
1891						else
1892							abortsleep(td2);
1893					}
1894				} else {
1895					if (TD_IS_SUSPENDED(td2))
1896						continue;
1897					/*
1898					 * maybe other inhibitted states too?
1899					 * XXXKSE Is it totally safe to
1900					 * suspend a non-interruptable thread?
1901					 */
1902					if (td2->td_inhibitors &
1903					    (TDI_SLEEPING | TDI_SWAPPED))
1904						thread_suspend_one(td2);
1905				}
1906			}
1907		}
1908		/*
1909		 * Maybe we suspended some threads.. was it enough?
1910		 */
1911		if ((p->p_numthreads - p->p_suspcount) == 1)
1912			break;
1913
1914		/*
1915		 * Wake us up when everyone else has suspended.
1916		 * In the mean time we suspend as well.
1917		 */
1918		thread_suspend_one(td);
1919		DROP_GIANT();
1920		PROC_UNLOCK(p);
1921		p->p_stats->p_ru.ru_nvcsw++;
1922		mi_switch();
1923		mtx_unlock_spin(&sched_lock);
1924		PICKUP_GIANT();
1925		PROC_LOCK(p);
1926		mtx_lock_spin(&sched_lock);
1927	}
1928	if (force_exit == SINGLE_EXIT) {
1929		if (td->td_upcall)
1930			upcall_remove(td);
1931		kse_purge(p, td);
1932	}
1933	mtx_unlock_spin(&sched_lock);
1934	return (0);
1935}
1936
1937/*
1938 * Called in from locations that can safely check to see
1939 * whether we have to suspend or at least throttle for a
1940 * single-thread event (e.g. fork).
1941 *
1942 * Such locations include userret().
1943 * If the "return_instead" argument is non zero, the thread must be able to
1944 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1945 *
1946 * The 'return_instead' argument tells the function if it may do a
1947 * thread_exit() or suspend, or whether the caller must abort and back
1948 * out instead.
1949 *
1950 * If the thread that set the single_threading request has set the
1951 * P_SINGLE_EXIT bit in the process flags then this call will never return
1952 * if 'return_instead' is false, but will exit.
1953 *
1954 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1955 *---------------+--------------------+---------------------
1956 *       0       | returns 0          |   returns 0 or 1
1957 *               | when ST ends       |   immediatly
1958 *---------------+--------------------+---------------------
1959 *       1       | thread exits       |   returns 1
1960 *               |                    |  immediatly
1961 * 0 = thread_exit() or suspension ok,
1962 * other = return error instead of stopping the thread.
1963 *
1964 * While a full suspension is under effect, even a single threading
1965 * thread would be suspended if it made this call (but it shouldn't).
1966 * This call should only be made from places where
1967 * thread_exit() would be safe as that may be the outcome unless
1968 * return_instead is set.
1969 */
1970int
1971thread_suspend_check(int return_instead)
1972{
1973	struct thread *td;
1974	struct proc *p;
1975
1976	td = curthread;
1977	p = td->td_proc;
1978	PROC_LOCK_ASSERT(p, MA_OWNED);
1979	while (P_SHOULDSTOP(p)) {
1980		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1981			KASSERT(p->p_singlethread != NULL,
1982			    ("singlethread not set"));
1983			/*
1984			 * The only suspension in action is a
1985			 * single-threading. Single threader need not stop.
1986			 * XXX Should be safe to access unlocked
1987			 * as it can only be set to be true by us.
1988			 */
1989			if (p->p_singlethread == td)
1990				return (0);	/* Exempt from stopping. */
1991		}
1992		if (return_instead)
1993			return (1);
1994
1995		mtx_lock_spin(&sched_lock);
1996		thread_stopped(p);
1997		/*
1998		 * If the process is waiting for us to exit,
1999		 * this thread should just suicide.
2000		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
2001		 */
2002		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
2003			while (mtx_owned(&Giant))
2004				mtx_unlock(&Giant);
2005			if (p->p_flag & P_SA)
2006				thread_exit();
2007			else
2008				thr_exit1();
2009		}
2010
2011		/*
2012		 * When a thread suspends, it just
2013		 * moves to the processes's suspend queue
2014		 * and stays there.
2015		 */
2016		thread_suspend_one(td);
2017		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
2018			if (p->p_numthreads == p->p_suspcount) {
2019				thread_unsuspend_one(p->p_singlethread);
2020			}
2021		}
2022		DROP_GIANT();
2023		PROC_UNLOCK(p);
2024		p->p_stats->p_ru.ru_nivcsw++;
2025		mi_switch();
2026		mtx_unlock_spin(&sched_lock);
2027		PICKUP_GIANT();
2028		PROC_LOCK(p);
2029	}
2030	return (0);
2031}
2032
2033void
2034thread_suspend_one(struct thread *td)
2035{
2036	struct proc *p = td->td_proc;
2037
2038	mtx_assert(&sched_lock, MA_OWNED);
2039	PROC_LOCK_ASSERT(p, MA_OWNED);
2040	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
2041	p->p_suspcount++;
2042	TD_SET_SUSPENDED(td);
2043	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
2044	/*
2045	 * Hack: If we are suspending but are on the sleep queue
2046	 * then we are in msleep or the cv equivalent. We
2047	 * want to look like we have two Inhibitors.
2048	 * May already be set.. doesn't matter.
2049	 */
2050	if (TD_ON_SLEEPQ(td))
2051		TD_SET_SLEEPING(td);
2052}
2053
2054void
2055thread_unsuspend_one(struct thread *td)
2056{
2057	struct proc *p = td->td_proc;
2058
2059	mtx_assert(&sched_lock, MA_OWNED);
2060	PROC_LOCK_ASSERT(p, MA_OWNED);
2061	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
2062	TD_CLR_SUSPENDED(td);
2063	p->p_suspcount--;
2064	setrunnable(td);
2065}
2066
2067/*
2068 * Allow all threads blocked by single threading to continue running.
2069 */
2070void
2071thread_unsuspend(struct proc *p)
2072{
2073	struct thread *td;
2074
2075	mtx_assert(&sched_lock, MA_OWNED);
2076	PROC_LOCK_ASSERT(p, MA_OWNED);
2077	if (!P_SHOULDSTOP(p)) {
2078		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2079			thread_unsuspend_one(td);
2080		}
2081	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2082	    (p->p_numthreads == p->p_suspcount)) {
2083		/*
2084		 * Stopping everything also did the job for the single
2085		 * threading request. Now we've downgraded to single-threaded,
2086		 * let it continue.
2087		 */
2088		thread_unsuspend_one(p->p_singlethread);
2089	}
2090}
2091
2092void
2093thread_single_end(void)
2094{
2095	struct thread *td;
2096	struct proc *p;
2097
2098	td = curthread;
2099	p = td->td_proc;
2100	PROC_LOCK_ASSERT(p, MA_OWNED);
2101	p->p_flag &= ~P_STOPPED_SINGLE;
2102	mtx_lock_spin(&sched_lock);
2103	p->p_singlethread = NULL;
2104	/*
2105	 * If there are other threads they mey now run,
2106	 * unless of course there is a blanket 'stop order'
2107	 * on the process. The single threader must be allowed
2108	 * to continue however as this is a bad place to stop.
2109	 */
2110	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2111		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2112			thread_unsuspend_one(td);
2113		}
2114	}
2115	mtx_unlock_spin(&sched_lock);
2116}
2117
2118
2119