kern_thread.c revision 106242
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 *  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * $FreeBSD: head/sys/kern/kern_thread.c 106242 2002-10-31 08:00:51Z davidxu $
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/sysctl.h>
39#include <sys/sysproto.h>
40#include <sys/filedesc.h>
41#include <sys/tty.h>
42#include <sys/signalvar.h>
43#include <sys/sx.h>
44#include <sys/user.h>
45#include <sys/jail.h>
46#include <sys/kse.h>
47#include <sys/ktr.h>
48#include <sys/ucontext.h>
49
50#include <vm/vm.h>
51#include <vm/vm_object.h>
52#include <vm/pmap.h>
53#include <vm/uma.h>
54#include <vm/vm_map.h>
55
56#include <machine/frame.h>
57
58/*
59 * KSEGRP related storage.
60 */
61static uma_zone_t ksegrp_zone;
62static uma_zone_t kse_zone;
63static uma_zone_t thread_zone;
64
65/* DEBUG ONLY */
66SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
67static int oiks_debug = 1;	/* 0 disable, 1 printf, 2 enter debugger */
68SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
69	&oiks_debug, 0, "OIKS thread debug");
70
71static int max_threads_per_proc = 10;
72SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW,
73	&max_threads_per_proc, 0, "Limit on threads per proc");
74
75#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
76
77struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
78TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
79TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
80struct mtx zombie_thread_lock;
81MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
82    "zombie_thread_lock", MTX_SPIN);
83
84
85
86void kse_purge(struct proc *p, struct thread *td);
87/*
88 * Pepare a thread for use.
89 */
90static void
91thread_ctor(void *mem, int size, void *arg)
92{
93	struct thread	*td;
94
95	KASSERT((size == sizeof(struct thread)),
96	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
97
98	td = (struct thread *)mem;
99	td->td_state = TDS_INACTIVE;
100	td->td_flags |= TDF_UNBOUND;
101}
102
103/*
104 * Reclaim a thread after use.
105 */
106static void
107thread_dtor(void *mem, int size, void *arg)
108{
109	struct thread	*td;
110
111	KASSERT((size == sizeof(struct thread)),
112	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
113
114	td = (struct thread *)mem;
115
116#ifdef INVARIANTS
117	/* Verify that this thread is in a safe state to free. */
118	switch (td->td_state) {
119	case TDS_INHIBITED:
120	case TDS_RUNNING:
121	case TDS_CAN_RUN:
122	case TDS_RUNQ:
123		/*
124		 * We must never unlink a thread that is in one of
125		 * these states, because it is currently active.
126		 */
127		panic("bad state for thread unlinking");
128		/* NOTREACHED */
129	case TDS_INACTIVE:
130		break;
131	default:
132		panic("bad thread state");
133		/* NOTREACHED */
134	}
135#endif
136}
137
138/*
139 * Initialize type-stable parts of a thread (when newly created).
140 */
141static void
142thread_init(void *mem, int size)
143{
144	struct thread	*td;
145
146	KASSERT((size == sizeof(struct thread)),
147	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
148
149	td = (struct thread *)mem;
150	mtx_lock(&Giant);
151	pmap_new_thread(td, 0);
152	mtx_unlock(&Giant);
153	cpu_thread_setup(td);
154}
155
156/*
157 * Tear down type-stable parts of a thread (just before being discarded).
158 */
159static void
160thread_fini(void *mem, int size)
161{
162	struct thread	*td;
163
164	KASSERT((size == sizeof(struct thread)),
165	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
166
167	td = (struct thread *)mem;
168	pmap_dispose_thread(td);
169}
170
171/*
172 * KSE is linked onto the idle queue.
173 */
174void
175kse_link(struct kse *ke, struct ksegrp *kg)
176{
177	struct proc *p = kg->kg_proc;
178
179	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
180	kg->kg_kses++;
181	ke->ke_state = KES_UNQUEUED;
182	ke->ke_proc	= p;
183	ke->ke_ksegrp	= kg;
184	ke->ke_thread	= NULL;
185	ke->ke_oncpu = NOCPU;
186}
187
188void
189kse_unlink(struct kse *ke)
190{
191	struct ksegrp *kg;
192
193	mtx_assert(&sched_lock, MA_OWNED);
194	kg = ke->ke_ksegrp;
195	if (ke->ke_state == KES_IDLE) {
196		kg->kg_idle_kses--;
197		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
198	}
199
200	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
201	if (--kg->kg_kses == 0) {
202			ksegrp_unlink(kg);
203	}
204	/*
205	 * Aggregate stats from the KSE
206	 */
207	kse_stash(ke);
208}
209
210void
211ksegrp_link(struct ksegrp *kg, struct proc *p)
212{
213
214	TAILQ_INIT(&kg->kg_threads);
215	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
216	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
217	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
218	TAILQ_INIT(&kg->kg_iq);		/* idle kses in ksegrp */
219	TAILQ_INIT(&kg->kg_lq);		/* loan kses in ksegrp */
220	kg->kg_proc	= p;
221/* the following counters are in the -zero- section and may not need clearing */
222	kg->kg_numthreads = 0;
223	kg->kg_runnable = 0;
224	kg->kg_kses = 0;
225	kg->kg_idle_kses = 0;
226	kg->kg_loan_kses = 0;
227	kg->kg_runq_kses = 0; /* XXXKSE change name */
228/* link it in now that it's consistent */
229	p->p_numksegrps++;
230	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
231}
232
233void
234ksegrp_unlink(struct ksegrp *kg)
235{
236	struct proc *p;
237
238	mtx_assert(&sched_lock, MA_OWNED);
239	p = kg->kg_proc;
240	KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
241	    ("kseg_unlink: residual threads or KSEs"));
242	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
243	p->p_numksegrps--;
244	/*
245	 * Aggregate stats from the KSE
246	 */
247	ksegrp_stash(kg);
248}
249
250/*
251 * for a newly created process,
252 * link up a the structure and its initial threads etc.
253 */
254void
255proc_linkup(struct proc *p, struct ksegrp *kg,
256			struct kse *ke, struct thread *td)
257{
258
259	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
260	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
261	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
262	p->p_numksegrps = 0;
263	p->p_numthreads = 0;
264
265	ksegrp_link(kg, p);
266	kse_link(ke, kg);
267	thread_link(td, kg);
268}
269
270int
271kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
272{
273	struct proc *p;
274	struct thread *td2;
275
276	p = td->td_proc;
277	/* KSE-enabled processes only, please. */
278	if (!(p->p_flag & P_KSES))
279		return (EINVAL);
280	if (uap->tmbx == NULL)
281		return (EINVAL);
282	mtx_lock_spin(&sched_lock);
283	FOREACH_THREAD_IN_PROC(p, td2) {
284		if (td2->td_mailbox == uap->tmbx) {
285			td2->td_flags |= TDF_INTERRUPT;
286			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
287				if (td2->td_flags & TDF_CVWAITQ)
288					cv_abort(td2);
289				else
290					abortsleep(td2);
291			}
292			mtx_unlock_spin(&sched_lock);
293			td->td_retval[0] = 0;
294			td->td_retval[1] = 0;
295			return (0);
296		}
297	}
298	mtx_unlock_spin(&sched_lock);
299	return (ESRCH);
300}
301
302int
303kse_exit(struct thread *td, struct kse_exit_args *uap)
304{
305	struct proc *p;
306	struct ksegrp *kg;
307
308	p = td->td_proc;
309	/* KSE-enabled processes only, please. */
310	if (!(p->p_flag & P_KSES))
311		return (EINVAL);
312	/* must be a bound thread */
313	if (td->td_flags & TDF_UNBOUND)
314		return (EINVAL);
315	kg = td->td_ksegrp;
316	/* serialize killing kse */
317	PROC_LOCK(p);
318	mtx_lock_spin(&sched_lock);
319	if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
320		mtx_unlock_spin(&sched_lock);
321		PROC_UNLOCK(p);
322		return (EDEADLK);
323	}
324	if ((p->p_numthreads == 1) && (p->p_numksegrps == 1)) {
325		p->p_flag &= ~P_KSES;
326		mtx_unlock_spin(&sched_lock);
327		PROC_UNLOCK(p);
328	} else {
329		while (mtx_owned(&Giant))
330			mtx_unlock(&Giant);
331		td->td_kse->ke_flags |= KEF_EXIT;
332		thread_exit();
333		/* NOTREACHED */
334	}
335	return (0);
336}
337
338int
339kse_release(struct thread *td, struct kse_release_args *uap)
340{
341	struct proc *p;
342
343	p = td->td_proc;
344	/* KSE-enabled processes only, please. */
345	if (p->p_flag & P_KSES) {
346		PROC_LOCK(p);
347		mtx_lock_spin(&sched_lock);
348		thread_exit();
349		/* NOTREACHED */
350	}
351	return (EINVAL);
352}
353
354/* struct kse_wakeup_args {
355	struct kse_mailbox *mbx;
356}; */
357int
358kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
359{
360	struct proc *p;
361	struct kse *ke, *ke2;
362	struct ksegrp *kg;
363
364	p = td->td_proc;
365	/* KSE-enabled processes only, please. */
366	if (!(p->p_flag & P_KSES))
367		return EINVAL;
368	if (td->td_standin == NULL)
369		td->td_standin = thread_alloc();
370	ke = NULL;
371	mtx_lock_spin(&sched_lock);
372	if (uap->mbx) {
373		FOREACH_KSEGRP_IN_PROC(p, kg) {
374			FOREACH_KSE_IN_GROUP(kg, ke2) {
375				if (ke2->ke_mailbox != uap->mbx)
376					continue;
377				if (ke2->ke_state == KES_IDLE) {
378					ke = ke2;
379					goto found;
380				} else {
381					mtx_unlock_spin(&sched_lock);
382					td->td_retval[0] = 0;
383					td->td_retval[1] = 0;
384					return (0);
385				}
386			}
387		}
388	} else {
389		kg = td->td_ksegrp;
390		ke = TAILQ_FIRST(&kg->kg_iq);
391	}
392	if (ke == NULL) {
393		mtx_unlock_spin(&sched_lock);
394		return (ESRCH);
395	}
396found:
397	thread_schedule_upcall(td, ke);
398	mtx_unlock_spin(&sched_lock);
399	td->td_retval[0] = 0;
400	td->td_retval[1] = 0;
401	return (0);
402}
403
404/*
405 * No new KSEG: first call: use current KSE, don't schedule an upcall
406 * All other situations, do allocate a new KSE and schedule an upcall on it.
407 */
408/* struct kse_create_args {
409	struct kse_mailbox *mbx;
410	int newgroup;
411}; */
412int
413kse_create(struct thread *td, struct kse_create_args *uap)
414{
415	struct kse *newke;
416	struct kse *ke;
417	struct ksegrp *newkg;
418	struct ksegrp *kg;
419	struct proc *p;
420	struct kse_mailbox mbx;
421	int err;
422
423	p = td->td_proc;
424	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
425		return (err);
426
427	p->p_flag |= P_KSES; /* easier to just set it than to test and set */
428	kg = td->td_ksegrp;
429	if (uap->newgroup) {
430		/*
431		 * If we want a new KSEGRP it doesn't matter whether
432		 * we have already fired up KSE mode before or not.
433		 * We put the process in KSE mode and create a new KSEGRP
434		 * and KSE. If our KSE has not got a mailbox yet then
435		 * that doesn't matter, just leave it that way. It will
436		 * ensure that this thread stay BOUND. It's possible
437		 * that the call came form a threaded library and the main
438		 * program knows nothing of threads.
439		 */
440		newkg = ksegrp_alloc();
441		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
442		      kg_startzero, kg_endzero));
443		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
444		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
445		newke = kse_alloc();
446	} else {
447		/*
448		 * Otherwise, if we have already set this KSE
449		 * to have a mailbox, we want to make another KSE here,
450		 * but only if there are not already the limit, which
451		 * is 1 per CPU max.
452		 *
453		 * If the current KSE doesn't have a mailbox we just use it
454		 * and give it one.
455		 *
456		 * Because we don't like to access
457		 * the KSE outside of schedlock if we are UNBOUND,
458		 * (because it can change if we are preempted by an interrupt)
459		 * we can deduce it as having a mailbox if we are UNBOUND,
460		 * and only need to actually look at it if we are BOUND,
461		 * which is safe.
462		 */
463		if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
464#if 0  /* while debugging */
465#ifdef SMP
466			if (kg->kg_kses > mp_ncpus)
467#endif
468				return (EPROCLIM);
469#endif
470			newke = kse_alloc();
471		} else {
472			newke = NULL;
473		}
474		newkg = NULL;
475	}
476	if (newke) {
477		bzero(&newke->ke_startzero, RANGEOF(struct kse,
478		      ke_startzero, ke_endzero));
479#if 0
480		bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
481		      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
482#endif
483		/* For the first call this may not have been set */
484		if (td->td_standin == NULL) {
485			td->td_standin = thread_alloc();
486		}
487		mtx_lock_spin(&sched_lock);
488		if (newkg)
489			ksegrp_link(newkg, p);
490		else
491			newkg = kg;
492		kse_link(newke, newkg);
493		if (p->p_sflag & PS_NEEDSIGCHK)
494			newke->ke_flags |= KEF_ASTPENDING;
495		newke->ke_mailbox = uap->mbx;
496		newke->ke_upcall = mbx.km_func;
497		bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
498		thread_schedule_upcall(td, newke);
499		mtx_unlock_spin(&sched_lock);
500	} else {
501		/*
502		 * If we didn't allocate a new KSE then the we are using
503		 * the exisiting (BOUND) kse.
504		 */
505		ke = td->td_kse;
506		ke->ke_mailbox = uap->mbx;
507		ke->ke_upcall = mbx.km_func;
508		bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
509	}
510	/*
511	 * Fill out the KSE-mode specific fields of the new kse.
512	 */
513
514	td->td_retval[0] = 0;
515	td->td_retval[1] = 0;
516	return (0);
517}
518
519/*
520 * Fill a ucontext_t with a thread's context information.
521 *
522 * This is an analogue to getcontext(3).
523 */
524void
525thread_getcontext(struct thread *td, ucontext_t *uc)
526{
527
528/*
529 * XXX this is declared in a MD include file, i386/include/ucontext.h but
530 * is used in MI code.
531 */
532#ifdef __i386__
533	get_mcontext(td, &uc->uc_mcontext);
534#endif
535	uc->uc_sigmask = td->td_proc->p_sigmask;
536}
537
538/*
539 * Set a thread's context from a ucontext_t.
540 *
541 * This is an analogue to setcontext(3).
542 */
543int
544thread_setcontext(struct thread *td, ucontext_t *uc)
545{
546	int ret;
547
548/*
549 * XXX this is declared in a MD include file, i386/include/ucontext.h but
550 * is used in MI code.
551 */
552#ifdef __i386__
553	ret = set_mcontext(td, &uc->uc_mcontext);
554#else
555	ret = ENOSYS;
556#endif
557	if (ret == 0) {
558		SIG_CANTMASK(uc->uc_sigmask);
559		PROC_LOCK(td->td_proc);
560		td->td_proc->p_sigmask = uc->uc_sigmask;
561		PROC_UNLOCK(td->td_proc);
562	}
563	return (ret);
564}
565
566/*
567 * Initialize global thread allocation resources.
568 */
569void
570threadinit(void)
571{
572
573#ifndef __ia64__
574	thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
575	    thread_ctor, thread_dtor, thread_init, thread_fini,
576	    UMA_ALIGN_CACHE, 0);
577#else
578	/*
579	 * XXX the ia64 kstack allocator is really lame and is at the mercy
580	 * of contigmallloc().  This hackery is to pre-construct a whole
581	 * pile of thread structures with associated kernel stacks early
582	 * in the system startup while contigmalloc() still works. Once we
583	 * have them, keep them.  Sigh.
584	 */
585	thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
586	    thread_ctor, thread_dtor, thread_init, thread_fini,
587	    UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
588	uma_prealloc(thread_zone, 512);		/* XXX arbitary */
589#endif
590	ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
591	    NULL, NULL, NULL, NULL,
592	    UMA_ALIGN_CACHE, 0);
593	kse_zone = uma_zcreate("KSE", sizeof (struct kse),
594	    NULL, NULL, NULL, NULL,
595	    UMA_ALIGN_CACHE, 0);
596}
597
598/*
599 * Stash an embarasingly extra thread into the zombie thread queue.
600 */
601void
602thread_stash(struct thread *td)
603{
604	mtx_lock_spin(&zombie_thread_lock);
605	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
606	mtx_unlock_spin(&zombie_thread_lock);
607}
608
609/*
610 * Stash an embarasingly extra kse into the zombie kse queue.
611 */
612void
613kse_stash(struct kse *ke)
614{
615	mtx_lock_spin(&zombie_thread_lock);
616	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
617	mtx_unlock_spin(&zombie_thread_lock);
618}
619
620/*
621 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
622 */
623void
624ksegrp_stash(struct ksegrp *kg)
625{
626	mtx_lock_spin(&zombie_thread_lock);
627	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
628	mtx_unlock_spin(&zombie_thread_lock);
629}
630
631/*
632 * Reap zombie threads.
633 */
634void
635thread_reap(void)
636{
637	struct thread *td_first, *td_next;
638	struct kse *ke_first, *ke_next;
639	struct ksegrp *kg_first, * kg_next;
640
641	/*
642	 * don't even bother to lock if none at this instant
643	 * We really don't care about the next instant..
644	 */
645	if ((!TAILQ_EMPTY(&zombie_threads))
646	    || (!TAILQ_EMPTY(&zombie_kses))
647	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
648		mtx_lock_spin(&zombie_thread_lock);
649		td_first = TAILQ_FIRST(&zombie_threads);
650		ke_first = TAILQ_FIRST(&zombie_kses);
651		kg_first = TAILQ_FIRST(&zombie_ksegrps);
652		if (td_first)
653			TAILQ_INIT(&zombie_threads);
654		if (ke_first)
655			TAILQ_INIT(&zombie_kses);
656		if (kg_first)
657			TAILQ_INIT(&zombie_ksegrps);
658		mtx_unlock_spin(&zombie_thread_lock);
659		while (td_first) {
660			td_next = TAILQ_NEXT(td_first, td_runq);
661			thread_free(td_first);
662			td_first = td_next;
663		}
664		while (ke_first) {
665			ke_next = TAILQ_NEXT(ke_first, ke_procq);
666			kse_free(ke_first);
667			ke_first = ke_next;
668		}
669		while (kg_first) {
670			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
671			ksegrp_free(kg_first);
672			kg_first = kg_next;
673		}
674	}
675}
676
677/*
678 * Allocate a ksegrp.
679 */
680struct ksegrp *
681ksegrp_alloc(void)
682{
683	return (uma_zalloc(ksegrp_zone, M_WAITOK));
684}
685
686/*
687 * Allocate a kse.
688 */
689struct kse *
690kse_alloc(void)
691{
692	return (uma_zalloc(kse_zone, M_WAITOK));
693}
694
695/*
696 * Allocate a thread.
697 */
698struct thread *
699thread_alloc(void)
700{
701	thread_reap(); /* check if any zombies to get */
702	return (uma_zalloc(thread_zone, M_WAITOK));
703}
704
705/*
706 * Deallocate a ksegrp.
707 */
708void
709ksegrp_free(struct ksegrp *td)
710{
711	uma_zfree(ksegrp_zone, td);
712}
713
714/*
715 * Deallocate a kse.
716 */
717void
718kse_free(struct kse *td)
719{
720	uma_zfree(kse_zone, td);
721}
722
723/*
724 * Deallocate a thread.
725 */
726void
727thread_free(struct thread *td)
728{
729	uma_zfree(thread_zone, td);
730}
731
732/*
733 * Store the thread context in the UTS's mailbox.
734 * then add the mailbox at the head of a list we are building in user space.
735 * The list is anchored in the ksegrp structure.
736 */
737int
738thread_export_context(struct thread *td)
739{
740	struct proc *p;
741	struct ksegrp *kg;
742	uintptr_t mbx;
743	void *addr;
744	int error;
745	ucontext_t uc;
746
747	p = td->td_proc;
748	kg = td->td_ksegrp;
749
750	/* Export the user/machine context. */
751#if 0
752	addr = (caddr_t)td->td_mailbox +
753	    offsetof(struct kse_thr_mailbox, tm_context);
754#else /* if user pointer arithmetic is valid in the kernel */
755		addr = (void *)(&td->td_mailbox->tm_context);
756#endif
757	error = copyin(addr, &uc, sizeof(ucontext_t));
758	if (error == 0) {
759		thread_getcontext(td, &uc);
760		error = copyout(&uc, addr, sizeof(ucontext_t));
761
762	}
763	if (error) {
764		PROC_LOCK(p);
765		psignal(p, SIGSEGV);
766		PROC_UNLOCK(p);
767		return (error);
768	}
769	/* get address in latest mbox of list pointer */
770#if 0
771	addr = (caddr_t)td->td_mailbox
772	    + offsetof(struct kse_thr_mailbox , tm_next);
773#else /* if user pointer arithmetic is valid in the kernel */
774	addr = (void *)(&td->td_mailbox->tm_next);
775#endif
776	/*
777	 * Put the saved address of the previous first
778	 * entry into this one
779	 */
780	for (;;) {
781		mbx = (uintptr_t)kg->kg_completed;
782		if (suword(addr, mbx)) {
783			PROC_LOCK(p);
784			psignal(p, SIGSEGV);
785			PROC_UNLOCK(p);
786			return (EFAULT);
787		}
788		PROC_LOCK(p);
789		if (mbx == (uintptr_t)kg->kg_completed) {
790			kg->kg_completed = td->td_mailbox;
791			PROC_UNLOCK(p);
792			break;
793		}
794		PROC_UNLOCK(p);
795	}
796	return (0);
797}
798
799/*
800 * Take the list of completed mailboxes for this KSEGRP and put them on this
801 * KSE's mailbox as it's the next one going up.
802 */
803static int
804thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
805{
806	struct proc *p = kg->kg_proc;
807	void *addr;
808	uintptr_t mbx;
809
810#if 0
811	addr = (caddr_t)ke->ke_mailbox
812	    + offsetof(struct kse_mailbox, km_completed);
813#else /* if user pointer arithmetic is valid in the kernel */
814		addr = (void *)(&ke->ke_mailbox->km_completed);
815#endif
816	for (;;) {
817		mbx = (uintptr_t)kg->kg_completed;
818		if (suword(addr, mbx)) {
819			PROC_LOCK(p);
820			psignal(p, SIGSEGV);
821			PROC_UNLOCK(p);
822			return (EFAULT);
823		}
824		/* XXXKSE could use atomic CMPXCH here */
825		PROC_LOCK(p);
826		if (mbx == (uintptr_t)kg->kg_completed) {
827			kg->kg_completed = NULL;
828			PROC_UNLOCK(p);
829			break;
830		}
831		PROC_UNLOCK(p);
832	}
833	return (0);
834}
835
836/*
837 * Discard the current thread and exit from its context.
838 *
839 * Because we can't free a thread while we're operating under its context,
840 * push the current thread into our KSE's ke_tdspare slot, freeing the
841 * thread that might be there currently. Because we know that only this
842 * processor will run our KSE, we needn't worry about someone else grabbing
843 * our context before we do a cpu_throw.
844 */
845void
846thread_exit(void)
847{
848	struct thread *td;
849	struct kse *ke;
850	struct proc *p;
851	struct ksegrp	*kg;
852
853	td = curthread;
854	kg = td->td_ksegrp;
855	p = td->td_proc;
856	ke = td->td_kse;
857
858	mtx_assert(&sched_lock, MA_OWNED);
859	KASSERT(p != NULL, ("thread exiting without a process"));
860	KASSERT(ke != NULL, ("thread exiting without a kse"));
861	KASSERT(kg != NULL, ("thread exiting without a kse group"));
862	PROC_LOCK_ASSERT(p, MA_OWNED);
863	CTR1(KTR_PROC, "thread_exit: thread %p", td);
864	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
865
866	if (ke->ke_tdspare != NULL) {
867		thread_stash(ke->ke_tdspare);
868		ke->ke_tdspare = NULL;
869	}
870	if (td->td_standin != NULL) {
871		thread_stash(td->td_standin);
872		td->td_standin = NULL;
873	}
874
875	cpu_thread_exit(td);	/* XXXSMP */
876
877	/*
878	 * The last thread is left attached to the process
879	 * So that the whole bundle gets recycled. Skip
880	 * all this stuff.
881	 */
882	if (p->p_numthreads > 1) {
883		/*
884		 * Unlink this thread from its proc and the kseg.
885		 * In keeping with the other structs we probably should
886		 * have a thread_unlink() that does some of this but it
887		 * would only be called from here (I think) so it would
888		 * be a waste. (might be useful for proc_fini() as well.)
889 		 */
890		TAILQ_REMOVE(&p->p_threads, td, td_plist);
891		p->p_numthreads--;
892		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
893		kg->kg_numthreads--;
894		/*
895		 * The test below is NOT true if we are the
896		 * sole exiting thread. P_STOPPED_SNGL is unset
897		 * in exit1() after it is the only survivor.
898		 */
899		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
900			if (p->p_numthreads == p->p_suspcount) {
901				thread_unsuspend_one(p->p_singlethread);
902			}
903		}
904
905		/* Reassign this thread's KSE. */
906		ke->ke_thread = NULL;
907		td->td_kse = NULL;
908		ke->ke_state = KES_UNQUEUED;
909		KASSERT((ke->ke_bound != td),
910		    ("thread_exit: entered with ke_bound set"));
911
912		/*
913		 * The reason for all this hoopla is
914		 * an attempt to stop our thread stack from being freed
915		 * until AFTER we have stopped running on it.
916		 * Since we are under schedlock, almost any method where
917		 * it is eventually freed by someone else is probably ok.
918		 * (Especially if they do it under schedlock). We could
919		 * almost free it here if we could be certain that
920		 * the uma code wouldn't pull it apart immediatly,
921		 * but unfortunatly we can not guarantee that.
922		 *
923		 * For threads that are exiting and NOT killing their
924		 * KSEs we can just stash it in the KSE, however
925		 * in the case where the KSE is also being deallocated,
926		 * we need to store it somewhere else. It turns out that
927		 * we will never free the last KSE, so there is always one
928		 * other KSE available. We might as well just choose one
929		 * and stash it there. Being under schedlock should make that
930		 * safe.
931		 *
932		 * In borrower threads, we can stash it in the lender
933		 * Where it won't be needed until this thread is long gone.
934		 * Borrower threads can't kill their KSE anyhow, so even
935		 * the KSE would be a safe place for them. It is not
936		 * necessary to have a KSE (or KSEGRP) at all beyond this
937		 * point, while we are under the protection of schedlock.
938		 *
939		 * Either give the KSE to another thread to use (or make
940		 * it idle), or free it entirely, possibly along with its
941		 * ksegrp if it's the last one.
942		 */
943		if (ke->ke_flags & KEF_EXIT) {
944			kse_unlink(ke);
945			/*
946			 * Designate another KSE to hold our thread.
947			 * Safe as long as we abide by whatever lock
948			 * we control it with.. The other KSE will not
949			 * be able to run it until we release the schelock,
950			 * but we need to be careful about it deciding to
951			 * write to the stack before then. Luckily
952			 * I believe that while another thread's
953			 * standin thread can be used in this way, the
954			 * spare thread for the KSE cannot be used without
955			 * holding schedlock at least once.
956			 */
957			ke =  FIRST_KSE_IN_PROC(p);
958		} else {
959			kse_reassign(ke);
960		}
961		if (ke->ke_bound) {
962			/*
963			 * WE are a borrower..
964			 * stash our thread with the owner.
965			 */
966			if (ke->ke_bound->td_standin) {
967				thread_stash(ke->ke_bound->td_standin);
968			}
969			ke->ke_bound->td_standin = td;
970		} else {
971			if (ke->ke_tdspare != NULL) {
972				thread_stash(ke->ke_tdspare);
973				ke->ke_tdspare = NULL;
974			}
975			ke->ke_tdspare = td;
976		}
977		PROC_UNLOCK(p);
978		td->td_state	= TDS_INACTIVE;
979		td->td_proc	= NULL;
980		td->td_ksegrp	= NULL;
981		td->td_last_kse	= NULL;
982	} else {
983		PROC_UNLOCK(p);
984	}
985
986	cpu_throw();
987	/* NOTREACHED */
988}
989
990/*
991 * Link a thread to a process.
992 * set up anything that needs to be initialized for it to
993 * be used by the process.
994 *
995 * Note that we do not link to the proc's ucred here.
996 * The thread is linked as if running but no KSE assigned.
997 */
998void
999thread_link(struct thread *td, struct ksegrp *kg)
1000{
1001	struct proc *p;
1002
1003	p = kg->kg_proc;
1004	td->td_state = TDS_INACTIVE;
1005	td->td_proc	= p;
1006	td->td_ksegrp	= kg;
1007	td->td_last_kse	= NULL;
1008
1009	LIST_INIT(&td->td_contested);
1010	callout_init(&td->td_slpcallout, 1);
1011	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1012	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1013	p->p_numthreads++;
1014	kg->kg_numthreads++;
1015	if (oiks_debug && p->p_numthreads > max_threads_per_proc) {
1016		printf("OIKS %d\n", p->p_numthreads);
1017		if (oiks_debug > 1)
1018			Debugger("OIKS");
1019	}
1020	td->td_kse	= NULL;
1021}
1022
1023void
1024kse_purge(struct proc *p, struct thread *td)
1025{
1026	struct kse *ke;
1027	struct ksegrp *kg;
1028
1029 	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1030	mtx_lock_spin(&sched_lock);
1031	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1032		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1033			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1034			kg->kg_idle_kses--;
1035			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1036			kg->kg_kses--;
1037			if (ke->ke_tdspare)
1038				thread_stash(ke->ke_tdspare);
1039   			kse_stash(ke);
1040		}
1041		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1042		p->p_numksegrps--;
1043		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1044		    ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1045			("wrong kg_kses"));
1046		if (kg != td->td_ksegrp) {
1047			ksegrp_stash(kg);
1048		}
1049	}
1050	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1051	p->p_numksegrps++;
1052	mtx_unlock_spin(&sched_lock);
1053}
1054
1055
1056/*
1057 * Create a thread and schedule it for upcall on the KSE given.
1058 */
1059struct thread *
1060thread_schedule_upcall(struct thread *td, struct kse *ke)
1061{
1062	struct thread *td2;
1063	struct ksegrp *kg;
1064	int newkse;
1065
1066	mtx_assert(&sched_lock, MA_OWNED);
1067	newkse = (ke != td->td_kse);
1068
1069	/*
1070	 * If the kse is already owned by another thread then we can't
1071	 * schedule an upcall because the other thread must be BOUND
1072	 * which means it is not in a position to take an upcall.
1073	 * We must be borrowing the KSE to allow us to complete some in-kernel
1074	 * work. When we complete, the Bound thread will have teh chance to
1075	 * complete. This thread will sleep as planned. Hopefully there will
1076	 * eventually be un unbound thread that can be converted to an
1077	 * upcall to report the completion of this thread.
1078	 */
1079	if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
1080		return (NULL);
1081	}
1082	KASSERT((ke->ke_bound == NULL), ("kse already bound"));
1083
1084	if (ke->ke_state == KES_IDLE) {
1085		kg = ke->ke_ksegrp;
1086		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1087		kg->kg_idle_kses--;
1088		ke->ke_state = KES_UNQUEUED;
1089	}
1090	if ((td2 = td->td_standin) != NULL) {
1091		td->td_standin = NULL;
1092	} else {
1093		if (newkse)
1094			panic("no reserve thread when called with a new kse");
1095		/*
1096		 * If called from (e.g.) sleep and we do not have
1097		 * a reserve thread, then we've used it, so do not
1098		 * create an upcall.
1099		 */
1100		return (NULL);
1101	}
1102	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1103	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1104	bzero(&td2->td_startzero,
1105	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1106	bcopy(&td->td_startcopy, &td2->td_startcopy,
1107	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1108	thread_link(td2, ke->ke_ksegrp);
1109	cpu_set_upcall(td2, td->td_pcb);
1110
1111	/*
1112	 * XXXKSE do we really need this? (default values for the
1113	 * frame).
1114	 */
1115	bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1116
1117	/*
1118	 * Bind the new thread to the KSE,
1119	 * and if it's our KSE, lend it back to ourself
1120	 * so we can continue running.
1121	 */
1122	td2->td_ucred = crhold(td->td_ucred);
1123	td2->td_flags = TDF_UPCALLING; /* note: BOUND */
1124	td2->td_kse = ke;
1125	td2->td_state = TDS_CAN_RUN;
1126	td2->td_inhibitors = 0;
1127	/*
1128	 * If called from msleep(), we are working on the current
1129	 * KSE so fake that we borrowed it. If called from
1130	 * kse_create(), don't, as we have a new kse too.
1131	 */
1132	if (!newkse) {
1133		/*
1134		 * This thread will be scheduled when the current thread
1135		 * blocks, exits or tries to enter userspace, (which ever
1136		 * happens first). When that happens the KSe will "revert"
1137		 * to this thread in a BOUND manner. Since we are called
1138		 * from msleep() this is going to be "very soon" in nearly
1139		 * all cases.
1140		 */
1141		ke->ke_bound = td2;
1142		TD_SET_LOAN(td2);
1143	} else {
1144		ke->ke_bound = NULL;
1145		ke->ke_thread = td2;
1146		ke->ke_state = KES_THREAD;
1147		setrunqueue(td2);
1148	}
1149	return (td2);	/* bogus.. should be a void function */
1150}
1151
1152/*
1153 * Schedule an upcall to notify a KSE process recieved signals.
1154 *
1155 * XXX - Modifying a sigset_t like this is totally bogus.
1156 */
1157struct thread *
1158signal_upcall(struct proc *p, int sig)
1159{
1160	struct thread *td, *td2;
1161	struct kse *ke;
1162	sigset_t ss;
1163	int error;
1164
1165	PROC_LOCK_ASSERT(p, MA_OWNED);
1166return (NULL);
1167
1168	td = FIRST_THREAD_IN_PROC(p);
1169	ke = td->td_kse;
1170	PROC_UNLOCK(p);
1171	error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1172	PROC_LOCK(p);
1173	if (error)
1174		return (NULL);
1175	SIGADDSET(ss, sig);
1176	PROC_UNLOCK(p);
1177	error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1178	PROC_LOCK(p);
1179	if (error)
1180		return (NULL);
1181	if (td->td_standin == NULL)
1182		td->td_standin = thread_alloc();
1183	mtx_lock_spin(&sched_lock);
1184	td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1185	mtx_unlock_spin(&sched_lock);
1186	return (td2);
1187}
1188
1189/*
1190 * setup done on the thread when it enters the kernel.
1191 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1192 */
1193void
1194thread_user_enter(struct proc *p, struct thread *td)
1195{
1196	struct kse *ke;
1197
1198	/*
1199	 * First check that we shouldn't just abort.
1200	 * But check if we are the single thread first!
1201	 * XXX p_singlethread not locked, but should be safe.
1202	 */
1203	if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
1204		PROC_LOCK(p);
1205		mtx_lock_spin(&sched_lock);
1206		thread_exit();
1207		/* NOTREACHED */
1208	}
1209
1210	/*
1211	 * If we are doing a syscall in a KSE environment,
1212	 * note where our mailbox is. There is always the
1213	 * possibility that we could do this lazily (in sleep()),
1214	 * but for now do it every time.
1215	 */
1216	ke = td->td_kse;
1217	if (ke->ke_mailbox != NULL) {
1218#if 0
1219		td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
1220		    + offsetof(struct kse_mailbox, km_curthread));
1221#else /* if user pointer arithmetic is ok in the kernel */
1222		td->td_mailbox =
1223		    (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
1224#endif
1225		if ((td->td_mailbox == NULL) ||
1226		    (td->td_mailbox == (void *)-1)) {
1227			td->td_mailbox = NULL;	/* single thread it.. */
1228			td->td_flags &= ~TDF_UNBOUND;
1229		} else {
1230			if (td->td_standin == NULL)
1231				td->td_standin = thread_alloc();
1232			td->td_flags |= TDF_UNBOUND;
1233		}
1234	}
1235}
1236
1237/*
1238 * The extra work we go through if we are a threaded process when we
1239 * return to userland.
1240 *
1241 * If we are a KSE process and returning to user mode, check for
1242 * extra work to do before we return (e.g. for more syscalls
1243 * to complete first).  If we were in a critical section, we should
1244 * just return to let it finish. Same if we were in the UTS (in
1245 * which case the mailbox's context's busy indicator will be set).
1246 * The only traps we suport will have set the mailbox.
1247 * We will clear it here.
1248 */
1249int
1250thread_userret(struct thread *td, struct trapframe *frame)
1251{
1252	int error;
1253	int unbound;
1254	struct kse *ke;
1255	struct ksegrp *kg;
1256	struct thread *td2;
1257	struct proc *p;
1258
1259	error = 0;
1260
1261	unbound = td->td_flags & TDF_UNBOUND;
1262
1263	kg = td->td_ksegrp;
1264	p = td->td_proc;
1265
1266	/*
1267	 * Originally bound threads never upcall but they may
1268	 * loan out their KSE at this point.
1269	 * Upcalls imply bound.. They also may want to do some Philantropy.
1270	 * Unbound threads on the other hand either yield to other work
1271	 * or transform into an upcall.
1272	 * (having saved their context to user space in both cases)
1273	 */
1274	if (unbound) {
1275		/*
1276		 * We are an unbound thread, looking to return to
1277		 * user space.
1278		 * THere are several possibilities:
1279		 * 1) we are using a borrowed KSE. save state and exit.
1280		 *    kse_reassign() will recycle the kse as needed,
1281		 * 2) we are not.. save state, and then convert ourself
1282		 *    to be an upcall, bound to the KSE.
1283		 *    if there are others that need the kse,
1284		 *    give them a chance by doing an mi_switch().
1285		 *    Because we are bound, control will eventually return
1286		 *    to us here.
1287		 * ***
1288		 * Save the thread's context, and link it
1289		 * into the KSEGRP's list of completed threads.
1290		 */
1291		error = thread_export_context(td);
1292		td->td_mailbox = NULL;
1293		if (error) {
1294			/*
1295			 * If we are not running on a borrowed KSE, then
1296			 * failing to do the KSE operation just defaults
1297			 * back to synchonous operation, so just return from
1298			 * the syscall. If it IS borrowed, there is nothing
1299			 * we can do. We just lose that context. We
1300			 * probably should note this somewhere and send
1301			 * the process a signal.
1302			 */
1303			PROC_LOCK(td->td_proc);
1304			psignal(td->td_proc, SIGSEGV);
1305			mtx_lock_spin(&sched_lock);
1306			if (td->td_kse->ke_bound == NULL) {
1307				td->td_flags &= ~TDF_UNBOUND;
1308				PROC_UNLOCK(td->td_proc);
1309				mtx_unlock_spin(&sched_lock);
1310				return (error);	/* go sync */
1311			}
1312			thread_exit();
1313		}
1314
1315		/*
1316		 * if the KSE is owned and we are borrowing it,
1317		 * don't make an upcall, just exit so that the owner
1318		 * can get its KSE if it wants it.
1319		 * Our context is already safely stored for later
1320		 * use by the UTS.
1321		 */
1322		PROC_LOCK(p);
1323		mtx_lock_spin(&sched_lock);
1324		if (td->td_kse->ke_bound) {
1325			thread_exit();
1326		}
1327		PROC_UNLOCK(p);
1328
1329		/*
1330		 * Turn ourself into a bound upcall.
1331		 * We will rely on kse_reassign()
1332		 * to make us run at a later time.
1333		 * We should look just like a sheduled upcall
1334		 * from msleep() or cv_wait().
1335		 */
1336		td->td_flags &= ~TDF_UNBOUND;
1337		td->td_flags |= TDF_UPCALLING;
1338		/* Only get here if we have become an upcall */
1339
1340	} else {
1341		mtx_lock_spin(&sched_lock);
1342	}
1343	/*
1344	 * We ARE going back to userland with this KSE.
1345	 * Check for threads that need to borrow it.
1346	 * Optimisation: don't call mi_switch if no-one wants the KSE.
1347	 * Any other thread that comes ready after this missed the boat.
1348	 */
1349	ke = td->td_kse;
1350	if ((td2 = kg->kg_last_assigned))
1351		td2 = TAILQ_NEXT(td2, td_runq);
1352	else
1353		td2 = TAILQ_FIRST(&kg->kg_runq);
1354	if (td2)  {
1355		/*
1356		 * force a switch to more urgent 'in kernel'
1357		 * work. Control will return to this thread
1358		 * when there is no more work to do.
1359		 * kse_reassign() will do tha for us.
1360		 */
1361		TD_SET_LOAN(td);
1362		ke->ke_bound = td;
1363		ke->ke_thread = NULL;
1364		mi_switch(); /* kse_reassign() will (re)find td2 */
1365	}
1366	mtx_unlock_spin(&sched_lock);
1367
1368	/*
1369	 * Optimisation:
1370	 * Ensure that we have a spare thread available,
1371	 * for when we re-enter the kernel.
1372	 */
1373	if (td->td_standin == NULL) {
1374		if (ke->ke_tdspare) {
1375			td->td_standin = ke->ke_tdspare;
1376			ke->ke_tdspare = NULL;
1377		} else {
1378			td->td_standin = thread_alloc();
1379		}
1380	}
1381
1382	/*
1383	 * To get here, we know there is no other need for our
1384	 * KSE so we can proceed. If not upcalling, go back to
1385	 * userspace. If we are, get the upcall set up.
1386	 */
1387	if ((td->td_flags & TDF_UPCALLING) == 0)
1388		return (0);
1389
1390	/*
1391	 * We must be an upcall to get this far.
1392	 * There is no more work to do and we are going to ride
1393	 * this thead/KSE up to userland as an upcall.
1394	 * Do the last parts of the setup needed for the upcall.
1395	 */
1396	CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1397	    td, td->td_proc->p_pid, td->td_proc->p_comm);
1398
1399	/*
1400	 * Set user context to the UTS.
1401	 */
1402	cpu_set_upcall_kse(td, ke);
1403
1404	/*
1405	 * Put any completed mailboxes on this KSE's list.
1406	 */
1407	error = thread_link_mboxes(kg, ke);
1408	if (error)
1409		goto bad;
1410
1411	/*
1412	 * Set state and mailbox.
1413	 * From now on we are just a bound outgoing process.
1414	 * **Problem** userret is often called several times.
1415	 * it would be nice if this all happenned only on the first time
1416	 * through. (the scan for extra work etc.)
1417	 */
1418	mtx_lock_spin(&sched_lock);
1419	td->td_flags &= ~TDF_UPCALLING;
1420	mtx_unlock_spin(&sched_lock);
1421#if 0
1422	error = suword((caddr_t)ke->ke_mailbox +
1423	    offsetof(struct kse_mailbox, km_curthread), 0);
1424#else	/* if user pointer arithmetic is ok in the kernel */
1425	error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
1426#endif
1427	if (!error)
1428		return (0);
1429
1430bad:
1431	/*
1432	 * Things are going to be so screwed we should just kill the process.
1433 	 * how do we do that?
1434	 */
1435	PROC_LOCK(td->td_proc);
1436	psignal(td->td_proc, SIGSEGV);
1437	PROC_UNLOCK(td->td_proc);
1438	return (error);	/* go sync */
1439}
1440
1441/*
1442 * Enforce single-threading.
1443 *
1444 * Returns 1 if the caller must abort (another thread is waiting to
1445 * exit the process or similar). Process is locked!
1446 * Returns 0 when you are successfully the only thread running.
1447 * A process has successfully single threaded in the suspend mode when
1448 * There are no threads in user mode. Threads in the kernel must be
1449 * allowed to continue until they get to the user boundary. They may even
1450 * copy out their return values and data before suspending. They may however be
1451 * accellerated in reaching the user boundary as we will wake up
1452 * any sleeping threads that are interruptable. (PCATCH).
1453 */
1454int
1455thread_single(int force_exit)
1456{
1457	struct thread *td;
1458	struct thread *td2;
1459	struct proc *p;
1460
1461	td = curthread;
1462	p = td->td_proc;
1463	PROC_LOCK_ASSERT(p, MA_OWNED);
1464	KASSERT((td != NULL), ("curthread is NULL"));
1465
1466	if ((p->p_flag & P_KSES) == 0)
1467		return (0);
1468
1469	/* Is someone already single threading? */
1470	if (p->p_singlethread)
1471		return (1);
1472
1473	if (force_exit == SINGLE_EXIT)
1474		p->p_flag |= P_SINGLE_EXIT;
1475	else
1476		p->p_flag &= ~P_SINGLE_EXIT;
1477	p->p_flag |= P_STOPPED_SINGLE;
1478	p->p_singlethread = td;
1479	/* XXXKSE Which lock protects the below values? */
1480	while ((p->p_numthreads - p->p_suspcount) != 1) {
1481		mtx_lock_spin(&sched_lock);
1482		FOREACH_THREAD_IN_PROC(p, td2) {
1483			if (td2 == td)
1484				continue;
1485			if (TD_IS_INHIBITED(td2)) {
1486				if (force_exit == SINGLE_EXIT) {
1487					if (TD_IS_SUSPENDED(td2)) {
1488						thread_unsuspend_one(td2);
1489					}
1490					if (TD_ON_SLEEPQ(td2) &&
1491					    (td2->td_flags & TDF_SINTR)) {
1492						if (td2->td_flags & TDF_CVWAITQ)
1493							cv_abort(td2);
1494						else
1495							abortsleep(td2);
1496					}
1497				} else {
1498					if (TD_IS_SUSPENDED(td2))
1499						continue;
1500					/* maybe other inhibitted states too? */
1501					if (TD_IS_SLEEPING(td2))
1502						thread_suspend_one(td2);
1503				}
1504			}
1505		}
1506		/*
1507		 * Maybe we suspended some threads.. was it enough?
1508		 */
1509		if ((p->p_numthreads - p->p_suspcount) == 1) {
1510			mtx_unlock_spin(&sched_lock);
1511			break;
1512		}
1513
1514		/*
1515		 * Wake us up when everyone else has suspended.
1516		 * In the mean time we suspend as well.
1517		 */
1518		thread_suspend_one(td);
1519		mtx_unlock(&Giant);
1520		PROC_UNLOCK(p);
1521		mi_switch();
1522		mtx_unlock_spin(&sched_lock);
1523		mtx_lock(&Giant);
1524		PROC_LOCK(p);
1525	}
1526	if (force_exit == SINGLE_EXIT)
1527		kse_purge(p, td);
1528	return (0);
1529}
1530
1531/*
1532 * Called in from locations that can safely check to see
1533 * whether we have to suspend or at least throttle for a
1534 * single-thread event (e.g. fork).
1535 *
1536 * Such locations include userret().
1537 * If the "return_instead" argument is non zero, the thread must be able to
1538 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1539 *
1540 * The 'return_instead' argument tells the function if it may do a
1541 * thread_exit() or suspend, or whether the caller must abort and back
1542 * out instead.
1543 *
1544 * If the thread that set the single_threading request has set the
1545 * P_SINGLE_EXIT bit in the process flags then this call will never return
1546 * if 'return_instead' is false, but will exit.
1547 *
1548 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1549 *---------------+--------------------+---------------------
1550 *       0       | returns 0          |   returns 0 or 1
1551 *               | when ST ends       |   immediatly
1552 *---------------+--------------------+---------------------
1553 *       1       | thread exits       |   returns 1
1554 *               |                    |  immediatly
1555 * 0 = thread_exit() or suspension ok,
1556 * other = return error instead of stopping the thread.
1557 *
1558 * While a full suspension is under effect, even a single threading
1559 * thread would be suspended if it made this call (but it shouldn't).
1560 * This call should only be made from places where
1561 * thread_exit() would be safe as that may be the outcome unless
1562 * return_instead is set.
1563 */
1564int
1565thread_suspend_check(int return_instead)
1566{
1567	struct thread *td;
1568	struct proc *p;
1569	struct kse *ke;
1570	struct ksegrp *kg;
1571
1572	td = curthread;
1573	p = td->td_proc;
1574	kg = td->td_ksegrp;
1575	PROC_LOCK_ASSERT(p, MA_OWNED);
1576	while (P_SHOULDSTOP(p)) {
1577		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1578			KASSERT(p->p_singlethread != NULL,
1579			    ("singlethread not set"));
1580			/*
1581			 * The only suspension in action is a
1582			 * single-threading. Single threader need not stop.
1583			 * XXX Should be safe to access unlocked
1584			 * as it can only be set to be true by us.
1585			 */
1586			if (p->p_singlethread == td)
1587				return (0);	/* Exempt from stopping. */
1588		}
1589		if (return_instead)
1590			return (1);
1591
1592		/*
1593		 * If the process is waiting for us to exit,
1594		 * this thread should just suicide.
1595		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1596		 */
1597		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1598			mtx_lock_spin(&sched_lock);
1599			while (mtx_owned(&Giant))
1600				mtx_unlock(&Giant);
1601			/*
1602			 * free extra kses and ksegrps, we needn't worry
1603			 * about if current thread is in same ksegrp as
1604			 * p_singlethread and last kse in the group
1605			 * could be killed, this is protected by kg_numthreads,
1606			 * in this case, we deduce that kg_numthreads must > 1.
1607			 */
1608			ke = td->td_kse;
1609			if (ke->ke_bound == NULL &&
1610			    ((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
1611				ke->ke_flags |= KEF_EXIT;
1612			thread_exit();
1613		}
1614
1615		/*
1616		 * When a thread suspends, it just
1617		 * moves to the processes's suspend queue
1618		 * and stays there.
1619		 *
1620		 * XXXKSE if TDF_BOUND is true
1621		 * it will not release it's KSE which might
1622		 * lead to deadlock if there are not enough KSEs
1623		 * to complete all waiting threads.
1624		 * Maybe be able to 'lend' it out again.
1625		 * (lent kse's can not go back to userland?)
1626		 * and can only be lent in STOPPED state.
1627		 */
1628		mtx_lock_spin(&sched_lock);
1629		if ((p->p_flag & P_STOPPED_SIG) &&
1630		    (p->p_suspcount+1 == p->p_numthreads)) {
1631			mtx_unlock_spin(&sched_lock);
1632			PROC_LOCK(p->p_pptr);
1633			if ((p->p_pptr->p_procsig->ps_flag &
1634				PS_NOCLDSTOP) == 0) {
1635				psignal(p->p_pptr, SIGCHLD);
1636			}
1637			PROC_UNLOCK(p->p_pptr);
1638			mtx_lock_spin(&sched_lock);
1639		}
1640		mtx_assert(&Giant, MA_NOTOWNED);
1641		thread_suspend_one(td);
1642		PROC_UNLOCK(p);
1643		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1644			if (p->p_numthreads == p->p_suspcount) {
1645				thread_unsuspend_one(p->p_singlethread);
1646			}
1647		}
1648		p->p_stats->p_ru.ru_nivcsw++;
1649		mi_switch();
1650		mtx_unlock_spin(&sched_lock);
1651		PROC_LOCK(p);
1652	}
1653	return (0);
1654}
1655
1656void
1657thread_suspend_one(struct thread *td)
1658{
1659	struct proc *p = td->td_proc;
1660
1661	mtx_assert(&sched_lock, MA_OWNED);
1662	p->p_suspcount++;
1663	TD_SET_SUSPENDED(td);
1664	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1665	/*
1666	 * Hack: If we are suspending but are on the sleep queue
1667	 * then we are in msleep or the cv equivalent. We
1668	 * want to look like we have two Inhibitors.
1669	 * May already be set.. doesn't matter.
1670	 */
1671	if (TD_ON_SLEEPQ(td))
1672		TD_SET_SLEEPING(td);
1673}
1674
1675void
1676thread_unsuspend_one(struct thread *td)
1677{
1678	struct proc *p = td->td_proc;
1679
1680	mtx_assert(&sched_lock, MA_OWNED);
1681	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1682	TD_CLR_SUSPENDED(td);
1683	p->p_suspcount--;
1684	setrunnable(td);
1685}
1686
1687/*
1688 * Allow all threads blocked by single threading to continue running.
1689 */
1690void
1691thread_unsuspend(struct proc *p)
1692{
1693	struct thread *td;
1694
1695	mtx_assert(&sched_lock, MA_OWNED);
1696	PROC_LOCK_ASSERT(p, MA_OWNED);
1697	if (!P_SHOULDSTOP(p)) {
1698		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1699			thread_unsuspend_one(td);
1700		}
1701	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1702	    (p->p_numthreads == p->p_suspcount)) {
1703		/*
1704		 * Stopping everything also did the job for the single
1705		 * threading request. Now we've downgraded to single-threaded,
1706		 * let it continue.
1707		 */
1708		thread_unsuspend_one(p->p_singlethread);
1709	}
1710}
1711
1712void
1713thread_single_end(void)
1714{
1715	struct thread *td;
1716	struct proc *p;
1717
1718	td = curthread;
1719	p = td->td_proc;
1720	PROC_LOCK_ASSERT(p, MA_OWNED);
1721	p->p_flag &= ~P_STOPPED_SINGLE;
1722	p->p_singlethread = NULL;
1723	/*
1724	 * If there are other threads they mey now run,
1725	 * unless of course there is a blanket 'stop order'
1726	 * on the process. The single threader must be allowed
1727	 * to continue however as this is a bad place to stop.
1728	 */
1729	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1730		mtx_lock_spin(&sched_lock);
1731		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1732			thread_unsuspend_one(td);
1733		}
1734		mtx_unlock_spin(&sched_lock);
1735	}
1736}
1737
1738
1739