kern_thread.c revision 109909
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 *  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * $FreeBSD: head/sys/kern/kern_thread.c 109909 2003-01-26 23:39:33Z davidxu $
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/smp.h>
39#include <sys/sysctl.h>
40#include <sys/sysproto.h>
41#include <sys/filedesc.h>
42#include <sys/sched.h>
43#include <sys/signalvar.h>
44#include <sys/sx.h>
45#include <sys/tty.h>
46#include <sys/user.h>
47#include <sys/jail.h>
48#include <sys/kse.h>
49#include <sys/ktr.h>
50#include <sys/ucontext.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/pmap.h>
55#include <vm/uma.h>
56#include <vm/vm_map.h>
57
58#include <machine/frame.h>
59
60/*
61 * KSEGRP related storage.
62 */
63static uma_zone_t ksegrp_zone;
64static uma_zone_t kse_zone;
65static uma_zone_t thread_zone;
66static uma_zone_t upcall_zone;
67
68/* DEBUG ONLY */
69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70static int thread_debug = 0;
71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72	&thread_debug, 0, "thread debug");
73
74static int max_threads_per_proc = 30;
75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
76	&max_threads_per_proc, 0, "Limit on threads per proc");
77
78static int max_groups_per_proc = 5;
79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80	&max_groups_per_proc, 0, "Limit on thread groups per proc");
81
82static int virtual_cpu;
83
84#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
85
86TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
87TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
88TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
89TAILQ_HEAD(, kse_upcall) zombie_upcalls =
90	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
91struct mtx kse_zombie_lock;
92MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
93
94static void kse_purge(struct proc *p, struct thread *td);
95static void kse_purge_group(struct thread *td);
96static int thread_update_usr_ticks(struct thread *td);
97static int thread_update_sys_ticks(struct thread *td);
98static void thread_alloc_spare(struct thread *td, struct thread *spare);
99
100static int
101sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
102{
103	int error, new_val;
104	int def_val;
105
106#ifdef SMP
107	def_val = mp_ncpus;
108#else
109	def_val = 1;
110#endif
111	if (virtual_cpu == 0)
112		new_val = def_val;
113	else
114		new_val = virtual_cpu;
115	error = sysctl_handle_int(oidp, &new_val, 0, req);
116        if (error != 0 || req->newptr == NULL)
117		return (error);
118	if (new_val < 0)
119		return (EINVAL);
120	virtual_cpu = new_val;
121	return (0);
122}
123
124/* DEBUG ONLY */
125SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
126	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
127	"debug virtual cpus");
128
129/*
130 * Prepare a thread for use.
131 */
132static void
133thread_ctor(void *mem, int size, void *arg)
134{
135	struct thread	*td;
136
137	td = (struct thread *)mem;
138	td->td_state = TDS_INACTIVE;
139}
140
141/*
142 * Reclaim a thread after use.
143 */
144static void
145thread_dtor(void *mem, int size, void *arg)
146{
147	struct thread	*td;
148
149	td = (struct thread *)mem;
150
151#ifdef INVARIANTS
152	/* Verify that this thread is in a safe state to free. */
153	switch (td->td_state) {
154	case TDS_INHIBITED:
155	case TDS_RUNNING:
156	case TDS_CAN_RUN:
157	case TDS_RUNQ:
158		/*
159		 * We must never unlink a thread that is in one of
160		 * these states, because it is currently active.
161		 */
162		panic("bad state for thread unlinking");
163		/* NOTREACHED */
164	case TDS_INACTIVE:
165		break;
166	default:
167		panic("bad thread state");
168		/* NOTREACHED */
169	}
170#endif
171}
172
173/*
174 * Initialize type-stable parts of a thread (when newly created).
175 */
176static void
177thread_init(void *mem, int size)
178{
179	struct thread	*td;
180
181	td = (struct thread *)mem;
182	mtx_lock(&Giant);
183	pmap_new_thread(td, 0);
184	mtx_unlock(&Giant);
185	cpu_thread_setup(td);
186	td->td_sched = (struct td_sched *)&td[1];
187}
188
189/*
190 * Tear down type-stable parts of a thread (just before being discarded).
191 */
192static void
193thread_fini(void *mem, int size)
194{
195	struct thread	*td;
196
197	td = (struct thread *)mem;
198	pmap_dispose_thread(td);
199}
200
201/*
202 * Initialize type-stable parts of a kse (when newly created).
203 */
204static void
205kse_init(void *mem, int size)
206{
207	struct kse	*ke;
208
209	ke = (struct kse *)mem;
210	ke->ke_sched = (struct ke_sched *)&ke[1];
211}
212
213/*
214 * Initialize type-stable parts of a ksegrp (when newly created).
215 */
216static void
217ksegrp_init(void *mem, int size)
218{
219	struct ksegrp	*kg;
220
221	kg = (struct ksegrp *)mem;
222	kg->kg_sched = (struct kg_sched *)&kg[1];
223}
224
225/*
226 * KSE is linked into kse group.
227 */
228void
229kse_link(struct kse *ke, struct ksegrp *kg)
230{
231	struct proc *p = kg->kg_proc;
232
233	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
234	kg->kg_kses++;
235	ke->ke_state	= KES_UNQUEUED;
236	ke->ke_proc	= p;
237	ke->ke_ksegrp	= kg;
238	ke->ke_thread	= NULL;
239	ke->ke_oncpu	= NOCPU;
240	ke->ke_flags	= 0;
241}
242
243void
244kse_unlink(struct kse *ke)
245{
246	struct ksegrp *kg;
247
248	mtx_assert(&sched_lock, MA_OWNED);
249	kg = ke->ke_ksegrp;
250	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
251	if (ke->ke_state == KES_IDLE) {
252		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
253		kg->kg_idle_kses--;
254	}
255	if (--kg->kg_kses == 0)
256		ksegrp_unlink(kg);
257	/*
258	 * Aggregate stats from the KSE
259	 */
260	kse_stash(ke);
261}
262
263void
264ksegrp_link(struct ksegrp *kg, struct proc *p)
265{
266
267	TAILQ_INIT(&kg->kg_threads);
268	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
269	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
270	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
271	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
272	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
273	kg->kg_proc = p;
274	/*
275	 * the following counters are in the -zero- section
276	 * and may not need clearing
277	 */
278	kg->kg_numthreads = 0;
279	kg->kg_runnable   = 0;
280	kg->kg_kses       = 0;
281	kg->kg_runq_kses  = 0; /* XXXKSE change name */
282	kg->kg_idle_kses  = 0;
283	kg->kg_numupcalls = 0;
284	/* link it in now that it's consistent */
285	p->p_numksegrps++;
286	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
287}
288
289void
290ksegrp_unlink(struct ksegrp *kg)
291{
292	struct proc *p;
293
294	mtx_assert(&sched_lock, MA_OWNED);
295	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
296	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
297	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
298
299	p = kg->kg_proc;
300	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
301	p->p_numksegrps--;
302	/*
303	 * Aggregate stats from the KSE
304	 */
305	ksegrp_stash(kg);
306}
307
308struct kse_upcall *
309upcall_alloc(void)
310{
311	struct kse_upcall *ku;
312
313	ku = uma_zalloc(upcall_zone, 0);
314	bzero(ku, sizeof(*ku));
315	return (ku);
316}
317
318void
319upcall_free(struct kse_upcall *ku)
320{
321
322	uma_zfree(upcall_zone, ku);
323}
324
325void
326upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
327{
328
329	mtx_assert(&sched_lock, MA_OWNED);
330	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
331	ku->ku_ksegrp = kg;
332	kg->kg_numupcalls++;
333}
334
335void
336upcall_unlink(struct kse_upcall *ku)
337{
338	struct ksegrp *kg = ku->ku_ksegrp;
339
340	mtx_assert(&sched_lock, MA_OWNED);
341	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
342	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
343	kg->kg_numupcalls--;
344	upcall_stash(ku);
345}
346
347void
348upcall_remove(struct thread *td)
349{
350
351	if (td->td_upcall) {
352		td->td_upcall->ku_owner = NULL;
353		upcall_unlink(td->td_upcall);
354		td->td_upcall = 0;
355	}
356}
357
358/*
359 * For a newly created process,
360 * link up all the structures and its initial threads etc.
361 */
362void
363proc_linkup(struct proc *p, struct ksegrp *kg,
364	    struct kse *ke, struct thread *td)
365{
366
367	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
368	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
369	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
370	p->p_numksegrps = 0;
371	p->p_numthreads = 0;
372
373	ksegrp_link(kg, p);
374	kse_link(ke, kg);
375	thread_link(td, kg);
376}
377
378/*
379struct kse_thr_interrupt_args {
380	struct kse_thr_mailbox * tmbx;
381};
382*/
383int
384kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
385{
386	struct proc *p;
387	struct thread *td2;
388
389	p = td->td_proc;
390	if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL))
391		return (EINVAL);
392	mtx_lock_spin(&sched_lock);
393	FOREACH_THREAD_IN_PROC(p, td2) {
394		if (td2->td_mailbox == uap->tmbx) {
395			td2->td_flags |= TDF_INTERRUPT;
396			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
397				if (td2->td_flags & TDF_CVWAITQ)
398					cv_abort(td2);
399				else
400					abortsleep(td2);
401			}
402			mtx_unlock_spin(&sched_lock);
403			return (0);
404		}
405	}
406	mtx_unlock_spin(&sched_lock);
407	return (ESRCH);
408}
409
410/*
411struct kse_exit_args {
412	register_t dummy;
413};
414*/
415int
416kse_exit(struct thread *td, struct kse_exit_args *uap)
417{
418	struct proc *p;
419	struct ksegrp *kg;
420	struct kse *ke;
421
422	p = td->td_proc;
423	/*
424	 * Only UTS can call the syscall and current group
425	 * should be a threaded group.
426	 */
427	if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
428		return (EINVAL);
429	KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
430
431	kg = td->td_ksegrp;
432	/* Serialize removing upcall */
433	PROC_LOCK(p);
434	mtx_lock_spin(&sched_lock);
435	if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) {
436		mtx_unlock_spin(&sched_lock);
437		PROC_UNLOCK(p);
438		return (EDEADLK);
439	}
440	ke = td->td_kse;
441	upcall_remove(td);
442	if (p->p_numthreads == 1) {
443		kse_purge(p, td);
444		p->p_flag &= ~P_KSES;
445		mtx_unlock_spin(&sched_lock);
446		PROC_UNLOCK(p);
447	} else {
448		if (kg->kg_numthreads == 1) { /* Shutdown a group */
449			kse_purge_group(td);
450			ke->ke_flags |= KEF_EXIT;
451		}
452		thread_exit();
453		/* NOTREACHED */
454	}
455	return (0);
456}
457
458/*
459 * Either becomes an upcall or waits for an awakening event and
460 * then becomes an upcall. Only error cases return.
461 */
462/*
463struct kse_release_args {
464	register_t dummy;
465};
466*/
467int
468kse_release(struct thread *td, struct kse_release_args *uap)
469{
470	struct proc *p;
471	struct ksegrp *kg;
472
473	p = td->td_proc;
474	kg = td->td_ksegrp;
475	/*
476	 * Only UTS can call the syscall and current group
477	 * should be a threaded group.
478	 */
479	if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
480		return (EINVAL);
481	KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
482
483	PROC_LOCK(p);
484	mtx_lock_spin(&sched_lock);
485	/* Change OURSELF to become an upcall. */
486	td->td_flags = TDF_UPCALLING;
487	if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
488	    (kg->kg_completed == NULL)) {
489		kg->kg_upsleeps++;
490		mtx_unlock_spin(&sched_lock);
491		msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause",
492		       NULL);
493		kg->kg_upsleeps--;
494		PROC_UNLOCK(p);
495	} else {
496		mtx_unlock_spin(&sched_lock);
497		PROC_UNLOCK(p);
498	}
499	return (0);
500}
501
502/* struct kse_wakeup_args {
503	struct kse_mailbox *mbx;
504}; */
505int
506kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
507{
508	struct proc *p;
509	struct ksegrp *kg;
510	struct kse_upcall *ku;
511	struct thread *td2;
512
513	p = td->td_proc;
514	td2 = NULL;
515	ku = NULL;
516	/* KSE-enabled processes only, please. */
517	if (!(p->p_flag & P_KSES))
518		return (EINVAL);
519
520	PROC_LOCK(p);
521	mtx_lock_spin(&sched_lock);
522	if (uap->mbx) {
523		FOREACH_KSEGRP_IN_PROC(p, kg) {
524			FOREACH_UPCALL_IN_GROUP(kg, ku) {
525				if (ku->ku_mailbox == uap->mbx)
526					break;
527			}
528			if (ku)
529				break;
530		}
531	} else {
532		kg = td->td_ksegrp;
533		if (kg->kg_upsleeps) {
534			wakeup_one(&kg->kg_completed);
535			mtx_unlock_spin(&sched_lock);
536			PROC_UNLOCK(p);
537			return (0);
538		}
539		ku = TAILQ_FIRST(&kg->kg_upcalls);
540	}
541	if (ku) {
542		if ((td2 = ku->ku_owner) == NULL) {
543			panic("%s: no owner", __func__);
544		} else if (TD_ON_SLEEPQ(td2) &&
545		           (td2->td_wchan == &kg->kg_completed)) {
546			abortsleep(td2);
547		} else {
548			ku->ku_flags |= KUF_DOUPCALL;
549		}
550		mtx_unlock_spin(&sched_lock);
551		PROC_UNLOCK(p);
552		return (0);
553	}
554	mtx_unlock_spin(&sched_lock);
555	PROC_UNLOCK(p);
556	return (ESRCH);
557}
558
559/*
560 * No new KSEG: first call: use current KSE, don't schedule an upcall
561 * All other situations, do allocate max new KSEs and schedule an upcall.
562 */
563/* struct kse_create_args {
564	struct kse_mailbox *mbx;
565	int newgroup;
566}; */
567int
568kse_create(struct thread *td, struct kse_create_args *uap)
569{
570	struct kse *newke;
571	struct ksegrp *newkg;
572	struct ksegrp *kg;
573	struct proc *p;
574	struct kse_mailbox mbx;
575	struct kse_upcall *newku;
576	int err, ncpus;
577
578	p = td->td_proc;
579	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
580		return (err);
581
582	/* Too bad, why hasn't kernel always a cpu counter !? */
583#ifdef SMP
584	ncpus = mp_ncpus;
585#else
586	ncpus = 1;
587#endif
588	if (thread_debug && virtual_cpu != 0)
589		ncpus = virtual_cpu;
590
591	/* Easier to just set it than to test and set */
592	p->p_flag |= P_KSES;
593	kg = td->td_ksegrp;
594	if (uap->newgroup) {
595		/* Have race condition but it is cheap */
596		if (p->p_numksegrps >= max_groups_per_proc)
597			return (EPROCLIM);
598		/*
599		 * If we want a new KSEGRP it doesn't matter whether
600		 * we have already fired up KSE mode before or not.
601		 * We put the process in KSE mode and create a new KSEGRP.
602		 */
603		newkg = ksegrp_alloc();
604		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
605		      kg_startzero, kg_endzero));
606		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
607		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
608		mtx_lock_spin(&sched_lock);
609		ksegrp_link(newkg, p);
610		if (p->p_numksegrps >= max_groups_per_proc) {
611			ksegrp_unlink(newkg);
612			mtx_unlock_spin(&sched_lock);
613			return (EPROCLIM);
614		}
615		mtx_unlock_spin(&sched_lock);
616	} else {
617		newkg = kg;
618	}
619
620	/*
621	 * Creating upcalls more than number of physical cpu does
622	 * not help performance.
623	 */
624	if (newkg->kg_numupcalls >= ncpus)
625		return (EPROCLIM);
626
627	if (newkg->kg_numupcalls == 0) {
628		/*
629		 * Initialize KSE group, optimized for MP.
630		 * Create KSEs as many as physical cpus, this increases
631		 * concurrent even if userland is not MP safe and can only run
632		 * on single CPU (for early version of libpthread, it is true).
633		 * In ideal world, every physical cpu should execute a thread.
634		 * If there is enough KSEs, threads in kernel can be
635		 * executed parallel on different cpus with full speed,
636		 * Concurrent in kernel shouldn't be restricted by number of
637		 * upcalls userland provides.
638		 * Adding more upcall structures only increases concurrent
639		 * in userland.
640		 * Highest performance configuration is:
641		 * N kses = N upcalls = N phyiscal cpus
642		 */
643		while (newkg->kg_kses < ncpus) {
644			newke = kse_alloc();
645			bzero(&newke->ke_startzero, RANGEOF(struct kse,
646			      ke_startzero, ke_endzero));
647#if 0
648			mtx_lock_spin(&sched_lock);
649			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
650			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
651			mtx_unlock_spin(&sched_lock);
652#endif
653			mtx_lock_spin(&sched_lock);
654			kse_link(newke, newkg);
655			if (p->p_sflag & PS_NEEDSIGCHK)
656				newke->ke_flags |= KEF_ASTPENDING;
657			/* Add engine */
658			kse_reassign(newke);
659			mtx_unlock_spin(&sched_lock);
660		}
661	}
662	newku = upcall_alloc();
663	newku->ku_mailbox = uap->mbx;
664	newku->ku_func = mbx.km_func;
665	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
666
667	/* For the first call this may not have been set */
668	if (td->td_standin == NULL)
669		thread_alloc_spare(td, NULL);
670
671	mtx_lock_spin(&sched_lock);
672	if (newkg->kg_numupcalls >= ncpus) {
673		upcall_free(newku);
674		mtx_unlock_spin(&sched_lock);
675		return (EPROCLIM);
676	}
677	upcall_link(newku, newkg);
678
679	/*
680	 * Each upcall structure has an owner thread, find which
681	 * one owns it.
682	 */
683	if (uap->newgroup) {
684		/*
685		 * Because new ksegrp hasn't thread,
686		 * create an initial upcall thread to own it.
687		 */
688		thread_schedule_upcall(td, newku);
689	} else {
690		/*
691		 * If current thread hasn't an upcall structure,
692		 * just assign the upcall to it.
693		 */
694		if (td->td_upcall == NULL) {
695			newku->ku_owner = td;
696			td->td_upcall = newku;
697		} else {
698			/*
699			 * Create a new upcall thread to own it.
700			 */
701			thread_schedule_upcall(td, newku);
702		}
703	}
704	mtx_unlock_spin(&sched_lock);
705	return (0);
706}
707
708/*
709 * Fill a ucontext_t with a thread's context information.
710 *
711 * This is an analogue to getcontext(3).
712 */
713void
714thread_getcontext(struct thread *td, ucontext_t *uc)
715{
716
717/*
718 * XXX this is declared in a MD include file, i386/include/ucontext.h but
719 * is used in MI code.
720 */
721#ifdef __i386__
722	get_mcontext(td, &uc->uc_mcontext);
723#endif
724	uc->uc_sigmask = td->td_proc->p_sigmask;
725}
726
727/*
728 * Set a thread's context from a ucontext_t.
729 *
730 * This is an analogue to setcontext(3).
731 */
732int
733thread_setcontext(struct thread *td, ucontext_t *uc)
734{
735	int ret;
736
737/*
738 * XXX this is declared in a MD include file, i386/include/ucontext.h but
739 * is used in MI code.
740 */
741#ifdef __i386__
742	ret = set_mcontext(td, &uc->uc_mcontext);
743#else
744	ret = ENOSYS;
745#endif
746	if (ret == 0) {
747		SIG_CANTMASK(uc->uc_sigmask);
748		PROC_LOCK(td->td_proc);
749		td->td_proc->p_sigmask = uc->uc_sigmask;
750		PROC_UNLOCK(td->td_proc);
751	}
752	return (ret);
753}
754
755/*
756 * Initialize global thread allocation resources.
757 */
758void
759threadinit(void)
760{
761
762#ifndef __ia64__
763	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
764	    thread_ctor, thread_dtor, thread_init, thread_fini,
765	    UMA_ALIGN_CACHE, 0);
766#else
767	/*
768	 * XXX the ia64 kstack allocator is really lame and is at the mercy
769	 * of contigmallloc().  This hackery is to pre-construct a whole
770	 * pile of thread structures with associated kernel stacks early
771	 * in the system startup while contigmalloc() still works. Once we
772	 * have them, keep them.  Sigh.
773	 */
774	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
775	    thread_ctor, thread_dtor, thread_init, thread_fini,
776	    UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
777	uma_prealloc(thread_zone, 512);		/* XXX arbitary */
778#endif
779	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
780	    NULL, NULL, ksegrp_init, NULL,
781	    UMA_ALIGN_CACHE, 0);
782	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
783	    NULL, NULL, kse_init, NULL,
784	    UMA_ALIGN_CACHE, 0);
785	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
786	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
787}
788
789/*
790 * Stash an embarasingly extra thread into the zombie thread queue.
791 */
792void
793thread_stash(struct thread *td)
794{
795	mtx_lock_spin(&kse_zombie_lock);
796	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
797	mtx_unlock_spin(&kse_zombie_lock);
798}
799
800/*
801 * Stash an embarasingly extra kse into the zombie kse queue.
802 */
803void
804kse_stash(struct kse *ke)
805{
806	mtx_lock_spin(&kse_zombie_lock);
807	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
808	mtx_unlock_spin(&kse_zombie_lock);
809}
810
811/*
812 * Stash an embarasingly extra upcall into the zombie upcall queue.
813 */
814
815void
816upcall_stash(struct kse_upcall *ku)
817{
818	mtx_lock_spin(&kse_zombie_lock);
819	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
820	mtx_unlock_spin(&kse_zombie_lock);
821}
822
823/*
824 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
825 */
826void
827ksegrp_stash(struct ksegrp *kg)
828{
829	mtx_lock_spin(&kse_zombie_lock);
830	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
831	mtx_unlock_spin(&kse_zombie_lock);
832}
833
834/*
835 * Reap zombie kse resource.
836 */
837void
838thread_reap(void)
839{
840	struct thread *td_first, *td_next;
841	struct kse *ke_first, *ke_next;
842	struct ksegrp *kg_first, * kg_next;
843	struct kse_upcall *ku_first, *ku_next;
844
845	/*
846	 * Don't even bother to lock if none at this instant,
847	 * we really don't care about the next instant..
848	 */
849	if ((!TAILQ_EMPTY(&zombie_threads))
850	    || (!TAILQ_EMPTY(&zombie_kses))
851	    || (!TAILQ_EMPTY(&zombie_ksegrps))
852	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
853		mtx_lock_spin(&kse_zombie_lock);
854		td_first = TAILQ_FIRST(&zombie_threads);
855		ke_first = TAILQ_FIRST(&zombie_kses);
856		kg_first = TAILQ_FIRST(&zombie_ksegrps);
857		ku_first = TAILQ_FIRST(&zombie_upcalls);
858		if (td_first)
859			TAILQ_INIT(&zombie_threads);
860		if (ke_first)
861			TAILQ_INIT(&zombie_kses);
862		if (kg_first)
863			TAILQ_INIT(&zombie_ksegrps);
864		if (ku_first)
865			TAILQ_INIT(&zombie_upcalls);
866		mtx_unlock_spin(&kse_zombie_lock);
867		while (td_first) {
868			td_next = TAILQ_NEXT(td_first, td_runq);
869			if (td_first->td_ucred)
870				crfree(td_first->td_ucred);
871			thread_free(td_first);
872			td_first = td_next;
873		}
874		while (ke_first) {
875			ke_next = TAILQ_NEXT(ke_first, ke_procq);
876			kse_free(ke_first);
877			ke_first = ke_next;
878		}
879		while (kg_first) {
880			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
881			ksegrp_free(kg_first);
882			kg_first = kg_next;
883		}
884		while (ku_first) {
885			ku_next = TAILQ_NEXT(ku_first, ku_link);
886			upcall_free(ku_first);
887			ku_first = ku_next;
888		}
889	}
890}
891
892/*
893 * Allocate a ksegrp.
894 */
895struct ksegrp *
896ksegrp_alloc(void)
897{
898	return (uma_zalloc(ksegrp_zone, 0));
899}
900
901/*
902 * Allocate a kse.
903 */
904struct kse *
905kse_alloc(void)
906{
907	return (uma_zalloc(kse_zone, 0));
908}
909
910/*
911 * Allocate a thread.
912 */
913struct thread *
914thread_alloc(void)
915{
916	thread_reap(); /* check if any zombies to get */
917	return (uma_zalloc(thread_zone, 0));
918}
919
920/*
921 * Deallocate a ksegrp.
922 */
923void
924ksegrp_free(struct ksegrp *td)
925{
926	uma_zfree(ksegrp_zone, td);
927}
928
929/*
930 * Deallocate a kse.
931 */
932void
933kse_free(struct kse *td)
934{
935	uma_zfree(kse_zone, td);
936}
937
938/*
939 * Deallocate a thread.
940 */
941void
942thread_free(struct thread *td)
943{
944
945	cpu_thread_clean(td);
946	uma_zfree(thread_zone, td);
947}
948
949/*
950 * Store the thread context in the UTS's mailbox.
951 * then add the mailbox at the head of a list we are building in user space.
952 * The list is anchored in the ksegrp structure.
953 */
954int
955thread_export_context(struct thread *td)
956{
957	struct proc *p;
958	struct ksegrp *kg;
959	uintptr_t mbx;
960	void *addr;
961	int error,temp;
962	ucontext_t uc;
963
964	p = td->td_proc;
965	kg = td->td_ksegrp;
966
967	/* Export the user/machine context. */
968	addr = (void *)(&td->td_mailbox->tm_context);
969	error = copyin(addr, &uc, sizeof(ucontext_t));
970	if (error)
971		goto bad;
972
973	thread_getcontext(td, &uc);
974	error = copyout(&uc, addr, sizeof(ucontext_t));
975	if (error)
976		goto bad;
977
978	/* Exports clock ticks in kernel mode */
979	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
980	temp = fuword(addr) + td->td_usticks;
981	if (suword(addr, temp))
982		goto bad;
983
984	/* Get address in latest mbox of list pointer */
985	addr = (void *)(&td->td_mailbox->tm_next);
986	/*
987	 * Put the saved address of the previous first
988	 * entry into this one
989	 */
990	for (;;) {
991		mbx = (uintptr_t)kg->kg_completed;
992		if (suword(addr, mbx)) {
993			error = EFAULT;
994			goto bad;
995		}
996		PROC_LOCK(p);
997		if (mbx == (uintptr_t)kg->kg_completed) {
998			kg->kg_completed = td->td_mailbox;
999			/*
1000			 * The thread context may be taken away by
1001			 * other upcall threads when we unlock
1002			 * process lock. it's no longer valid to
1003			 * use it again in any other places.
1004			 */
1005			td->td_mailbox = NULL;
1006			PROC_UNLOCK(p);
1007			break;
1008		}
1009		PROC_UNLOCK(p);
1010	}
1011	td->td_usticks = 0;
1012	return (0);
1013
1014bad:
1015	PROC_LOCK(p);
1016	psignal(p, SIGSEGV);
1017	PROC_UNLOCK(p);
1018	/* The mailbox is bad, don't use it */
1019	td->td_mailbox = NULL;
1020	td->td_usticks = 0;
1021	return (error);
1022}
1023
1024/*
1025 * Take the list of completed mailboxes for this KSEGRP and put them on this
1026 * upcall's mailbox as it's the next one going up.
1027 */
1028static int
1029thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1030{
1031	struct proc *p = kg->kg_proc;
1032	void *addr;
1033	uintptr_t mbx;
1034
1035	addr = (void *)(&ku->ku_mailbox->km_completed);
1036	for (;;) {
1037		mbx = (uintptr_t)kg->kg_completed;
1038		if (suword(addr, mbx)) {
1039			PROC_LOCK(p);
1040			psignal(p, SIGSEGV);
1041			PROC_UNLOCK(p);
1042			return (EFAULT);
1043		}
1044		/* XXXKSE could use atomic CMPXCH here */
1045		PROC_LOCK(p);
1046		if (mbx == (uintptr_t)kg->kg_completed) {
1047			kg->kg_completed = NULL;
1048			PROC_UNLOCK(p);
1049			break;
1050		}
1051		PROC_UNLOCK(p);
1052	}
1053	return (0);
1054}
1055
1056/*
1057 * This function should be called at statclock interrupt time
1058 */
1059int
1060thread_statclock(int user)
1061{
1062	struct thread *td = curthread;
1063
1064	if (td->td_ksegrp->kg_numupcalls == 0)
1065		return (-1);
1066	if (user) {
1067		/* Current always do via ast() */
1068		td->td_flags |= (TDF_ASTPENDING|TDF_USTATCLOCK);
1069		td->td_uuticks += ticks;
1070	} else {
1071		if (td->td_mailbox != NULL)
1072			td->td_usticks += ticks;
1073		else {
1074			/* XXXKSE
1075		 	 * We will call thread_user_enter() for every
1076			 * kernel entry in future, so if the thread mailbox
1077			 * is NULL, it must be a UTS kernel, don't account
1078			 * clock ticks for it.
1079			 */
1080		}
1081	}
1082	return (0);
1083}
1084
1085/*
1086 * Export user mode state clock ticks
1087 */
1088static int
1089thread_update_usr_ticks(struct thread *td)
1090{
1091	struct proc *p = td->td_proc;
1092	struct kse_thr_mailbox *tmbx;
1093	struct kse_upcall *ku;
1094	caddr_t addr;
1095	uint uticks;
1096
1097	if ((ku = td->td_upcall) == NULL)
1098		return (-1);
1099
1100	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1101	if ((tmbx == NULL) || (tmbx == (void *)-1))
1102		return (-1);
1103	uticks = td->td_uuticks;
1104	td->td_uuticks = 0;
1105	if (uticks) {
1106		addr = (caddr_t)&tmbx->tm_uticks;
1107		uticks += fuword(addr);
1108		if (suword(addr, uticks)) {
1109			PROC_LOCK(p);
1110			psignal(p, SIGSEGV);
1111			PROC_UNLOCK(p);
1112			return (-2);
1113		}
1114	}
1115	return (0);
1116}
1117
1118/*
1119 * Export kernel mode state clock ticks
1120 */
1121
1122static int
1123thread_update_sys_ticks(struct thread *td)
1124{
1125	struct proc *p = td->td_proc;
1126	caddr_t addr;
1127	int sticks;
1128
1129	if (td->td_mailbox == NULL)
1130		return (-1);
1131	if (td->td_usticks == 0)
1132		return (0);
1133	addr = (caddr_t)&td->td_mailbox->tm_sticks;
1134	sticks = fuword(addr);
1135	/* XXXKSE use XCHG instead */
1136	sticks += td->td_usticks;
1137	td->td_usticks = 0;
1138	if (suword(addr, sticks)) {
1139		PROC_LOCK(p);
1140		psignal(p, SIGSEGV);
1141		PROC_UNLOCK(p);
1142		return (-2);
1143	}
1144	return (0);
1145}
1146
1147/*
1148 * Discard the current thread and exit from its context.
1149 *
1150 * Because we can't free a thread while we're operating under its context,
1151 * push the current thread into our CPU's deadthread holder. This means
1152 * we needn't worry about someone else grabbing our context before we
1153 * do a cpu_throw().
1154 */
1155void
1156thread_exit(void)
1157{
1158	struct thread *td;
1159	struct kse *ke;
1160	struct proc *p;
1161	struct ksegrp	*kg;
1162
1163	td = curthread;
1164	kg = td->td_ksegrp;
1165	p = td->td_proc;
1166	ke = td->td_kse;
1167
1168	mtx_assert(&sched_lock, MA_OWNED);
1169	KASSERT(p != NULL, ("thread exiting without a process"));
1170	KASSERT(ke != NULL, ("thread exiting without a kse"));
1171	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1172	PROC_LOCK_ASSERT(p, MA_OWNED);
1173	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1174	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1175
1176	if (td->td_standin != NULL) {
1177		thread_stash(td->td_standin);
1178		td->td_standin = NULL;
1179	}
1180
1181	cpu_thread_exit(td);	/* XXXSMP */
1182
1183	/*
1184	 * The last thread is left attached to the process
1185	 * So that the whole bundle gets recycled. Skip
1186	 * all this stuff.
1187	 */
1188	if (p->p_numthreads > 1) {
1189		/*
1190		 * Unlink this thread from its proc and the kseg.
1191		 * In keeping with the other structs we probably should
1192		 * have a thread_unlink() that does some of this but it
1193		 * would only be called from here (I think) so it would
1194		 * be a waste. (might be useful for proc_fini() as well.)
1195 		 */
1196		TAILQ_REMOVE(&p->p_threads, td, td_plist);
1197		p->p_numthreads--;
1198		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1199		kg->kg_numthreads--;
1200
1201		/*
1202		 * The test below is NOT true if we are the
1203		 * sole exiting thread. P_STOPPED_SNGL is unset
1204		 * in exit1() after it is the only survivor.
1205		 */
1206		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1207			if (p->p_numthreads == p->p_suspcount) {
1208				thread_unsuspend_one(p->p_singlethread);
1209			}
1210		}
1211
1212		/*
1213		 * Because each upcall structure has an owner thread,
1214		 * owner thread exits only when process is in exiting
1215		 * state, so upcall to userland is no longer needed,
1216		 * deleting upcall structure is safe here.
1217		 * So when all threads in a group is exited, all upcalls
1218		 * in the group should be automatically freed.
1219		 */
1220		if (td->td_upcall)
1221			upcall_remove(td);
1222
1223		ke->ke_state = KES_UNQUEUED;
1224		ke->ke_thread = NULL;
1225		/*
1226		 * Decide what to do with the KSE attached to this thread.
1227		 */
1228		if (ke->ke_flags & KEF_EXIT)
1229			kse_unlink(ke);
1230		else
1231			kse_reassign(ke);
1232		PROC_UNLOCK(p);
1233		td->td_kse	= NULL;
1234		td->td_state	= TDS_INACTIVE;
1235		td->td_proc	= NULL;
1236		td->td_ksegrp	= NULL;
1237		td->td_last_kse	= NULL;
1238		PCPU_SET(deadthread, td);
1239	} else {
1240		PROC_UNLOCK(p);
1241	}
1242	cpu_throw();
1243	/* NOTREACHED */
1244}
1245
1246/*
1247 * Do any thread specific cleanups that may be needed in wait()
1248 * called with Giant held, proc and schedlock not held.
1249 */
1250void
1251thread_wait(struct proc *p)
1252{
1253	struct thread *td;
1254
1255	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1256	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1257	FOREACH_THREAD_IN_PROC(p, td) {
1258		if (td->td_standin != NULL) {
1259			thread_free(td->td_standin);
1260			td->td_standin = NULL;
1261		}
1262		cpu_thread_clean(td);
1263	}
1264	thread_reap();	/* check for zombie threads etc. */
1265}
1266
1267/*
1268 * Link a thread to a process.
1269 * set up anything that needs to be initialized for it to
1270 * be used by the process.
1271 *
1272 * Note that we do not link to the proc's ucred here.
1273 * The thread is linked as if running but no KSE assigned.
1274 */
1275void
1276thread_link(struct thread *td, struct ksegrp *kg)
1277{
1278	struct proc *p;
1279
1280	p = kg->kg_proc;
1281	td->td_state    = TDS_INACTIVE;
1282	td->td_proc     = p;
1283	td->td_ksegrp   = kg;
1284	td->td_last_kse = NULL;
1285	td->td_flags    = 0;
1286	td->td_kse      = NULL;
1287
1288	LIST_INIT(&td->td_contested);
1289	callout_init(&td->td_slpcallout, 1);
1290	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1291	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1292	p->p_numthreads++;
1293	kg->kg_numthreads++;
1294}
1295
1296/*
1297 * Purge a ksegrp resource. When a ksegrp is preparing to
1298 * exit, it calls this function.
1299 */
1300void
1301kse_purge_group(struct thread *td)
1302{
1303	struct ksegrp *kg;
1304	struct kse *ke;
1305
1306	kg = td->td_ksegrp;
1307 	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1308	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1309		KASSERT(ke->ke_state == KES_IDLE,
1310			("%s: wrong idle KSE state", __func__));
1311		kse_unlink(ke);
1312	}
1313	KASSERT((kg->kg_kses == 1),
1314		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1315	KASSERT((kg->kg_numupcalls == 0),
1316	        ("%s: ksegrp still has %d upcall datas",
1317		__func__, kg->kg_numupcalls));
1318}
1319
1320/*
1321 * Purge a process's KSE resource. When a process is preparing to
1322 * exit, it calls kse_purge to release any extra KSE resources in
1323 * the process.
1324 */
1325void
1326kse_purge(struct proc *p, struct thread *td)
1327{
1328	struct ksegrp *kg;
1329	struct kse *ke;
1330
1331 	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1332	mtx_lock_spin(&sched_lock);
1333	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1334		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1335		p->p_numksegrps--;
1336		/*
1337		 * There is no ownership for KSE, after all threads
1338		 * in the group exited, it is possible that some KSEs
1339		 * were left in idle queue, gc them now.
1340		 */
1341		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1342			KASSERT(ke->ke_state == KES_IDLE,
1343			   ("%s: wrong idle KSE state", __func__));
1344			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1345			kg->kg_idle_kses--;
1346			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1347			kg->kg_kses--;
1348			kse_stash(ke);
1349		}
1350		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1351		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1352		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1353		KASSERT((kg->kg_numupcalls == 0),
1354		        ("%s: ksegrp still has %d upcall datas",
1355			__func__, kg->kg_numupcalls));
1356
1357		if (kg != td->td_ksegrp)
1358			ksegrp_stash(kg);
1359	}
1360	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1361	p->p_numksegrps++;
1362	mtx_unlock_spin(&sched_lock);
1363}
1364
1365/*
1366 * This function is intended to be used to initialize a spare thread
1367 * for upcall. Initialize thread's large data area outside sched_lock
1368 * for thread_schedule_upcall().
1369 */
1370void
1371thread_alloc_spare(struct thread *td, struct thread *spare)
1372{
1373	if (td->td_standin)
1374		return;
1375	if (spare == NULL)
1376		spare = thread_alloc();
1377	td->td_standin = spare;
1378	bzero(&spare->td_startzero,
1379	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1380	spare->td_proc = td->td_proc;
1381	/* Setup PCB and fork address */
1382	cpu_set_upcall(spare, td->td_pcb);
1383	/*
1384	 * XXXKSE do we really need this? (default values for the
1385	 * frame).
1386	 */
1387	bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe));
1388	spare->td_ucred = crhold(td->td_ucred);
1389}
1390
1391/*
1392 * Create a thread and schedule it for upcall on the KSE given.
1393 * Use our thread's standin so that we don't have to allocate one.
1394 */
1395struct thread *
1396thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1397{
1398	struct thread *td2;
1399
1400	mtx_assert(&sched_lock, MA_OWNED);
1401
1402	/*
1403	 * Schedule an upcall thread on specified kse_upcall,
1404	 * the kse_upcall must be free.
1405	 * td must have a spare thread.
1406	 */
1407	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1408	if ((td2 = td->td_standin) != NULL) {
1409		td->td_standin = NULL;
1410	} else {
1411		panic("no reserve thread when scheduling an upcall");
1412		return (NULL);
1413	}
1414	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1415	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1416	bcopy(&td->td_startcopy, &td2->td_startcopy,
1417	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1418	thread_link(td2, ku->ku_ksegrp);
1419	/* Let the new thread become owner of the upcall */
1420	ku->ku_owner   = td2;
1421	td2->td_upcall = ku;
1422	td2->td_flags  = TDF_UPCALLING;
1423	td2->td_kse    = NULL;
1424	td2->td_state  = TDS_CAN_RUN;
1425	td2->td_inhibitors = 0;
1426	setrunqueue(td2);
1427	return (td2);	/* bogus.. should be a void function */
1428}
1429
1430/*
1431 * Schedule an upcall to notify a KSE process recieved signals.
1432 *
1433 * XXX - Modifying a sigset_t like this is totally bogus.
1434 */
1435struct thread *
1436signal_upcall(struct proc *p, int sig)
1437{
1438#if 0
1439	struct thread *td, *td2;
1440	struct kse *ke;
1441	sigset_t ss;
1442	int error;
1443
1444#endif
1445	PROC_LOCK_ASSERT(p, MA_OWNED);
1446return (NULL);
1447#if 0
1448	td = FIRST_THREAD_IN_PROC(p);
1449	ke = td->td_kse;
1450	PROC_UNLOCK(p);
1451	error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1452	PROC_LOCK(p);
1453	if (error)
1454		return (NULL);
1455	SIGADDSET(ss, sig);
1456	PROC_UNLOCK(p);
1457	error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1458	PROC_LOCK(p);
1459	if (error)
1460		return (NULL);
1461	if (td->td_standin == NULL)
1462		thread_alloc_spare(td, NULL);
1463	mtx_lock_spin(&sched_lock);
1464	td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1465	mtx_unlock_spin(&sched_lock);
1466	return (td2);
1467#endif
1468}
1469
1470/*
1471 * Setup done on the thread when it enters the kernel.
1472 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1473 */
1474void
1475thread_user_enter(struct proc *p, struct thread *td)
1476{
1477	struct ksegrp *kg;
1478	struct kse_upcall *ku;
1479
1480	kg = td->td_ksegrp;
1481	/*
1482	 * First check that we shouldn't just abort.
1483	 * But check if we are the single thread first!
1484	 * XXX p_singlethread not locked, but should be safe.
1485	 */
1486	if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1487		PROC_LOCK(p);
1488		mtx_lock_spin(&sched_lock);
1489		thread_exit();
1490		/* NOTREACHED */
1491	}
1492
1493	/*
1494	 * If we are doing a syscall in a KSE environment,
1495	 * note where our mailbox is. There is always the
1496	 * possibility that we could do this lazily (in kse_reassign()),
1497	 * but for now do it every time.
1498	 */
1499	kg = td->td_ksegrp;
1500	if (kg->kg_numupcalls) {
1501		ku = td->td_upcall;
1502		KASSERT(ku, ("%s: no upcall owned", __func__));
1503		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1504		td->td_mailbox =
1505		    (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1506		if ((td->td_mailbox == NULL) ||
1507		    (td->td_mailbox == (void *)-1)) {
1508		    	/* Don't schedule upcall when blocked */
1509			td->td_mailbox = NULL;
1510			mtx_lock_spin(&sched_lock);
1511			td->td_flags &= ~TDF_CAN_UNBIND;
1512			mtx_unlock_spin(&sched_lock);
1513		} else {
1514			if (p->p_numthreads > max_threads_per_proc) {
1515				/*
1516			 	 * Since kernel thread limit reached,
1517				 * don't schedule upcall anymore.
1518				 * XXXKSE These code in fact needn't.
1519				 */
1520				mtx_lock_spin(&sched_lock);
1521				td->td_flags &= ~TDF_CAN_UNBIND;
1522				mtx_unlock_spin(&sched_lock);
1523			} else {
1524				if (td->td_standin == NULL)
1525					thread_alloc_spare(td, NULL);
1526				mtx_lock_spin(&sched_lock);
1527				td->td_flags |= TDF_CAN_UNBIND;
1528				mtx_unlock_spin(&sched_lock);
1529			}
1530		}
1531	}
1532}
1533
1534/*
1535 * The extra work we go through if we are a threaded process when we
1536 * return to userland.
1537 *
1538 * If we are a KSE process and returning to user mode, check for
1539 * extra work to do before we return (e.g. for more syscalls
1540 * to complete first).  If we were in a critical section, we should
1541 * just return to let it finish. Same if we were in the UTS (in
1542 * which case the mailbox's context's busy indicator will be set).
1543 * The only traps we suport will have set the mailbox.
1544 * We will clear it here.
1545 */
1546int
1547thread_userret(struct thread *td, struct trapframe *frame)
1548{
1549	int error;
1550	struct kse_upcall *ku;
1551	struct ksegrp *kg;
1552	struct proc *p;
1553	struct timespec ts;
1554
1555	p = td->td_proc;
1556	kg = td->td_ksegrp;
1557
1558	/* Nothing to do with non-threaded group/process */
1559	if (td->td_ksegrp->kg_numupcalls == 0)
1560		return (0);
1561
1562	/*
1563	 * State clock interrupt hit in userland, it
1564	 * is returning from interrupt, charge thread's
1565	 * userland time for UTS.
1566	 */
1567	if (td->td_flags & TDF_USTATCLOCK) {
1568		thread_update_usr_ticks(td);
1569		mtx_lock_spin(&sched_lock);
1570		td->td_flags &= ~TDF_USTATCLOCK;
1571		mtx_unlock_spin(&sched_lock);
1572	}
1573
1574	/*
1575	 * Optimisation:
1576	 * This thread has not started any upcall.
1577	 * If there is no work to report other than ourself,
1578	 * then it can return direct to userland.
1579	 */
1580	if (TD_CAN_UNBIND(td)) {
1581		mtx_lock_spin(&sched_lock);
1582		td->td_flags &= ~TDF_CAN_UNBIND;
1583		mtx_unlock_spin(&sched_lock);
1584		if ((kg->kg_completed == NULL) &&
1585		    (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) {
1586			thread_update_sys_ticks(td);
1587			td->td_mailbox = NULL;
1588			return (0);
1589		}
1590		error = thread_export_context(td);
1591		if (error) {
1592			/*
1593			 * Failing to do the KSE operation just defaults
1594			 * back to synchonous operation, so just return from
1595			 * the syscall.
1596			 */
1597			return (0);
1598		}
1599		/*
1600		 * There is something to report, and we own an upcall
1601		 * strucuture, we can go to userland.
1602		 * Turn ourself into an upcall thread.
1603		 */
1604		mtx_lock_spin(&sched_lock);
1605		td->td_flags |= TDF_UPCALLING;
1606		mtx_unlock_spin(&sched_lock);
1607	} else if (td->td_mailbox) {
1608		error = thread_export_context(td);
1609		if (error) {
1610			PROC_LOCK(td->td_proc);
1611			mtx_lock_spin(&sched_lock);
1612			/* possibly upcall with error? */
1613		} else {
1614			PROC_LOCK(td->td_proc);
1615			mtx_lock_spin(&sched_lock);
1616			/*
1617			 * There are upcall threads waiting for
1618			 * work to do, wake one of them up.
1619			 * XXXKSE Maybe wake all of them up.
1620			 */
1621			if (kg->kg_upsleeps)
1622				wakeup_one(&kg->kg_completed);
1623		}
1624		thread_exit();
1625		/* NOTREACHED */
1626	}
1627
1628	if (td->td_flags & TDF_UPCALLING) {
1629		KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind"));
1630		ku = td->td_upcall;
1631		/*
1632		 * There is no more work to do and we are going to ride
1633		 * this thread up to userland as an upcall.
1634		 * Do the last parts of the setup needed for the upcall.
1635		 */
1636		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1637		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1638
1639		/*
1640		 * Set user context to the UTS.
1641		 * Will use Giant in cpu_thread_clean() because it uses
1642		 * kmem_free(kernel_map, ...)
1643		 */
1644		cpu_set_upcall_kse(td, ku);
1645
1646		/*
1647		 * Clear TDF_UPCALLING after set upcall context,
1648		 * profiling code looks TDF_UPCALLING to avoid account
1649		 * a wrong user %EIP
1650		 */
1651		mtx_lock_spin(&sched_lock);
1652		td->td_flags &= ~TDF_UPCALLING;
1653		if (ku->ku_flags & KUF_DOUPCALL)
1654			ku->ku_flags &= ~KUF_DOUPCALL;
1655		mtx_unlock_spin(&sched_lock);
1656
1657		/*
1658		 * Unhook the list of completed threads.
1659		 * anything that completes after this gets to
1660		 * come in next time.
1661		 * Put the list of completed thread mailboxes on
1662		 * this KSE's mailbox.
1663		 */
1664		error = thread_link_mboxes(kg, ku);
1665		if (error)
1666			goto bad;
1667
1668		/*
1669		 * Set state and clear the  thread mailbox pointer.
1670		 * From now on we are just a bound outgoing process.
1671		 * **Problem** userret is often called several times.
1672		 * it would be nice if this all happenned only on the first
1673		 * time through. (the scan for extra work etc.)
1674		 */
1675		error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0);
1676		if (error)
1677			goto bad;
1678
1679		/* Export current system time */
1680		nanotime(&ts);
1681		if (copyout(&ts,
1682		    (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts))) {
1683			goto bad;
1684		}
1685	}
1686	/*
1687	 * Optimisation:
1688	 * Ensure that we have a spare thread available,
1689	 * for when we re-enter the kernel.
1690	 */
1691	if (td->td_standin == NULL)
1692		thread_alloc_spare(td, NULL);
1693
1694	/*
1695	 * Clear thread mailbox first, then clear system tick count.
1696	 * The order is important because thread_statclock() use
1697	 * mailbox pointer to see if it is an userland thread or
1698	 * an UTS kernel thread.
1699	 */
1700	td->td_mailbox = NULL;
1701	td->td_usticks = 0;
1702	return (0);
1703
1704bad:
1705	/*
1706	 * Things are going to be so screwed we should just kill the process.
1707	 * how do we do that?
1708	 */
1709	PROC_LOCK(td->td_proc);
1710	psignal(td->td_proc, SIGSEGV);
1711	PROC_UNLOCK(td->td_proc);
1712	td->td_mailbox = NULL;
1713	td->td_usticks = 0;
1714	return (error);	/* go sync */
1715}
1716
1717/*
1718 * Enforce single-threading.
1719 *
1720 * Returns 1 if the caller must abort (another thread is waiting to
1721 * exit the process or similar). Process is locked!
1722 * Returns 0 when you are successfully the only thread running.
1723 * A process has successfully single threaded in the suspend mode when
1724 * There are no threads in user mode. Threads in the kernel must be
1725 * allowed to continue until they get to the user boundary. They may even
1726 * copy out their return values and data before suspending. They may however be
1727 * accellerated in reaching the user boundary as we will wake up
1728 * any sleeping threads that are interruptable. (PCATCH).
1729 */
1730int
1731thread_single(int force_exit)
1732{
1733	struct thread *td;
1734	struct thread *td2;
1735	struct proc *p;
1736
1737	td = curthread;
1738	p = td->td_proc;
1739	mtx_assert(&Giant, MA_OWNED);
1740	PROC_LOCK_ASSERT(p, MA_OWNED);
1741	KASSERT((td != NULL), ("curthread is NULL"));
1742
1743	if ((p->p_flag & P_KSES) == 0)
1744		return (0);
1745
1746	/* Is someone already single threading? */
1747	if (p->p_singlethread)
1748		return (1);
1749
1750	if (force_exit == SINGLE_EXIT) {
1751		p->p_flag |= P_SINGLE_EXIT;
1752	} else
1753		p->p_flag &= ~P_SINGLE_EXIT;
1754	p->p_flag |= P_STOPPED_SINGLE;
1755	p->p_singlethread = td;
1756	/* XXXKSE Which lock protects the below values? */
1757	while ((p->p_numthreads - p->p_suspcount) != 1) {
1758		mtx_lock_spin(&sched_lock);
1759		FOREACH_THREAD_IN_PROC(p, td2) {
1760			if (td2 == td)
1761				continue;
1762			if (TD_IS_INHIBITED(td2)) {
1763				if (force_exit == SINGLE_EXIT) {
1764					if (TD_IS_SUSPENDED(td2)) {
1765						thread_unsuspend_one(td2);
1766					}
1767					if (TD_ON_SLEEPQ(td2) &&
1768					    (td2->td_flags & TDF_SINTR)) {
1769						if (td2->td_flags & TDF_CVWAITQ)
1770							cv_abort(td2);
1771						else
1772							abortsleep(td2);
1773					}
1774				} else {
1775					if (TD_IS_SUSPENDED(td2))
1776						continue;
1777					/*
1778					 * maybe other inhibitted states too?
1779					 * XXXKSE Is it totally safe to
1780					 * suspend a non-interruptable thread?
1781					 */
1782					if (td2->td_inhibitors &
1783					    (TDI_SLEEPING | TDI_SWAPPED))
1784						thread_suspend_one(td2);
1785				}
1786			}
1787		}
1788		/*
1789		 * Maybe we suspended some threads.. was it enough?
1790		 */
1791		if ((p->p_numthreads - p->p_suspcount) == 1) {
1792			mtx_unlock_spin(&sched_lock);
1793			break;
1794		}
1795
1796		/*
1797		 * Wake us up when everyone else has suspended.
1798		 * In the mean time we suspend as well.
1799		 */
1800		thread_suspend_one(td);
1801		mtx_unlock(&Giant);
1802		PROC_UNLOCK(p);
1803		p->p_stats->p_ru.ru_nvcsw++;
1804		mi_switch();
1805		mtx_unlock_spin(&sched_lock);
1806		mtx_lock(&Giant);
1807		PROC_LOCK(p);
1808	}
1809	if (force_exit == SINGLE_EXIT) {
1810		if (td->td_upcall) {
1811			mtx_lock_spin(&sched_lock);
1812			upcall_remove(td);
1813			mtx_unlock_spin(&sched_lock);
1814		}
1815		kse_purge(p, td);
1816	}
1817	return (0);
1818}
1819
1820/*
1821 * Called in from locations that can safely check to see
1822 * whether we have to suspend or at least throttle for a
1823 * single-thread event (e.g. fork).
1824 *
1825 * Such locations include userret().
1826 * If the "return_instead" argument is non zero, the thread must be able to
1827 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1828 *
1829 * The 'return_instead' argument tells the function if it may do a
1830 * thread_exit() or suspend, or whether the caller must abort and back
1831 * out instead.
1832 *
1833 * If the thread that set the single_threading request has set the
1834 * P_SINGLE_EXIT bit in the process flags then this call will never return
1835 * if 'return_instead' is false, but will exit.
1836 *
1837 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1838 *---------------+--------------------+---------------------
1839 *       0       | returns 0          |   returns 0 or 1
1840 *               | when ST ends       |   immediatly
1841 *---------------+--------------------+---------------------
1842 *       1       | thread exits       |   returns 1
1843 *               |                    |  immediatly
1844 * 0 = thread_exit() or suspension ok,
1845 * other = return error instead of stopping the thread.
1846 *
1847 * While a full suspension is under effect, even a single threading
1848 * thread would be suspended if it made this call (but it shouldn't).
1849 * This call should only be made from places where
1850 * thread_exit() would be safe as that may be the outcome unless
1851 * return_instead is set.
1852 */
1853int
1854thread_suspend_check(int return_instead)
1855{
1856	struct thread *td;
1857	struct proc *p;
1858	struct ksegrp *kg;
1859
1860	td = curthread;
1861	p = td->td_proc;
1862	kg = td->td_ksegrp;
1863	PROC_LOCK_ASSERT(p, MA_OWNED);
1864	while (P_SHOULDSTOP(p)) {
1865		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1866			KASSERT(p->p_singlethread != NULL,
1867			    ("singlethread not set"));
1868			/*
1869			 * The only suspension in action is a
1870			 * single-threading. Single threader need not stop.
1871			 * XXX Should be safe to access unlocked
1872			 * as it can only be set to be true by us.
1873			 */
1874			if (p->p_singlethread == td)
1875				return (0);	/* Exempt from stopping. */
1876		}
1877		if (return_instead)
1878			return (1);
1879
1880		/*
1881		 * If the process is waiting for us to exit,
1882		 * this thread should just suicide.
1883		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1884		 */
1885		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1886			mtx_lock_spin(&sched_lock);
1887			while (mtx_owned(&Giant))
1888				mtx_unlock(&Giant);
1889			thread_exit();
1890		}
1891
1892		/*
1893		 * When a thread suspends, it just
1894		 * moves to the processes's suspend queue
1895		 * and stays there.
1896		 */
1897		mtx_lock_spin(&sched_lock);
1898		if ((p->p_flag & P_STOPPED_SIG) &&
1899		    (p->p_suspcount+1 == p->p_numthreads)) {
1900			mtx_unlock_spin(&sched_lock);
1901			PROC_LOCK(p->p_pptr);
1902			if ((p->p_pptr->p_procsig->ps_flag &
1903				PS_NOCLDSTOP) == 0) {
1904				psignal(p->p_pptr, SIGCHLD);
1905			}
1906			PROC_UNLOCK(p->p_pptr);
1907			mtx_lock_spin(&sched_lock);
1908		}
1909		mtx_assert(&Giant, MA_NOTOWNED);
1910		thread_suspend_one(td);
1911		PROC_UNLOCK(p);
1912		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1913			if (p->p_numthreads == p->p_suspcount) {
1914				thread_unsuspend_one(p->p_singlethread);
1915			}
1916		}
1917		p->p_stats->p_ru.ru_nivcsw++;
1918		mi_switch();
1919		mtx_unlock_spin(&sched_lock);
1920		PROC_LOCK(p);
1921	}
1922	return (0);
1923}
1924
1925void
1926thread_suspend_one(struct thread *td)
1927{
1928	struct proc *p = td->td_proc;
1929
1930	mtx_assert(&sched_lock, MA_OWNED);
1931	p->p_suspcount++;
1932	TD_SET_SUSPENDED(td);
1933	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1934	/*
1935	 * Hack: If we are suspending but are on the sleep queue
1936	 * then we are in msleep or the cv equivalent. We
1937	 * want to look like we have two Inhibitors.
1938	 * May already be set.. doesn't matter.
1939	 */
1940	if (TD_ON_SLEEPQ(td))
1941		TD_SET_SLEEPING(td);
1942}
1943
1944void
1945thread_unsuspend_one(struct thread *td)
1946{
1947	struct proc *p = td->td_proc;
1948
1949	mtx_assert(&sched_lock, MA_OWNED);
1950	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1951	TD_CLR_SUSPENDED(td);
1952	p->p_suspcount--;
1953	setrunnable(td);
1954}
1955
1956/*
1957 * Allow all threads blocked by single threading to continue running.
1958 */
1959void
1960thread_unsuspend(struct proc *p)
1961{
1962	struct thread *td;
1963
1964	mtx_assert(&sched_lock, MA_OWNED);
1965	PROC_LOCK_ASSERT(p, MA_OWNED);
1966	if (!P_SHOULDSTOP(p)) {
1967		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1968			thread_unsuspend_one(td);
1969		}
1970	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1971	    (p->p_numthreads == p->p_suspcount)) {
1972		/*
1973		 * Stopping everything also did the job for the single
1974		 * threading request. Now we've downgraded to single-threaded,
1975		 * let it continue.
1976		 */
1977		thread_unsuspend_one(p->p_singlethread);
1978	}
1979}
1980
1981void
1982thread_single_end(void)
1983{
1984	struct thread *td;
1985	struct proc *p;
1986
1987	td = curthread;
1988	p = td->td_proc;
1989	PROC_LOCK_ASSERT(p, MA_OWNED);
1990	p->p_flag &= ~P_STOPPED_SINGLE;
1991	p->p_singlethread = NULL;
1992	/*
1993	 * If there are other threads they mey now run,
1994	 * unless of course there is a blanket 'stop order'
1995	 * on the process. The single threader must be allowed
1996	 * to continue however as this is a bad place to stop.
1997	 */
1998	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1999		mtx_lock_spin(&sched_lock);
2000		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2001			thread_unsuspend_one(td);
2002		}
2003		mtx_unlock_spin(&sched_lock);
2004	}
2005}
2006
2007
2008