kern_thread.c revision 108338
199026Sjulian/*
299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
399026Sjulian *  All rights reserved.
499026Sjulian *
599026Sjulian * Redistribution and use in source and binary forms, with or without
699026Sjulian * modification, are permitted provided that the following conditions
799026Sjulian * are met:
899026Sjulian * 1. Redistributions of source code must retain the above copyright
999026Sjulian *    notice(s), this list of conditions and the following disclaimer as
1099026Sjulian *    the first lines of this file unmodified other than the possible
1199026Sjulian *    addition of one or more copyright notices.
1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright
1399026Sjulian *    notice(s), this list of conditions and the following disclaimer in the
1499026Sjulian *    documentation and/or other materials provided with the distribution.
1599026Sjulian *
1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1999026Sjulian * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2699026Sjulian * DAMAGE.
2799026Sjulian *
2899026Sjulian * $FreeBSD: head/sys/kern/kern_thread.c 108338 2002-12-28 01:23:07Z julian $
2999026Sjulian */
3099026Sjulian
3199026Sjulian#include <sys/param.h>
3299026Sjulian#include <sys/systm.h>
3399026Sjulian#include <sys/kernel.h>
3499026Sjulian#include <sys/lock.h>
3599026Sjulian#include <sys/malloc.h>
3699026Sjulian#include <sys/mutex.h>
3799026Sjulian#include <sys/proc.h>
38107029Sjulian#include <sys/smp.h>
3999026Sjulian#include <sys/sysctl.h>
40105854Sjulian#include <sys/sysproto.h>
4199026Sjulian#include <sys/filedesc.h>
42107126Sjeff#include <sys/sched.h>
4399026Sjulian#include <sys/signalvar.h>
4499026Sjulian#include <sys/sx.h>
45107126Sjeff#include <sys/tty.h>
4699026Sjulian#include <sys/user.h>
4799026Sjulian#include <sys/jail.h>
4899026Sjulian#include <sys/kse.h>
4999026Sjulian#include <sys/ktr.h>
50103410Smini#include <sys/ucontext.h>
5199026Sjulian
5299026Sjulian#include <vm/vm.h>
5399026Sjulian#include <vm/vm_object.h>
5499026Sjulian#include <vm/pmap.h>
5599026Sjulian#include <vm/uma.h>
5699026Sjulian#include <vm/vm_map.h>
5799026Sjulian
58100273Speter#include <machine/frame.h>
59100273Speter
6099026Sjulian/*
61103367Sjulian * KSEGRP related storage.
6299026Sjulian */
63103367Sjulianstatic uma_zone_t ksegrp_zone;
64103367Sjulianstatic uma_zone_t kse_zone;
6599026Sjulianstatic uma_zone_t thread_zone;
6699026Sjulian
67103367Sjulian/* DEBUG ONLY */
6899026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
69107719Sjulianstatic int thread_debug = 0;
70107719SjulianSYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
71107719Sjulian	&thread_debug, 0, "thread debug");
7299026Sjulian
73107006Sdavidxustatic int max_threads_per_proc = 30;
74107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
75103367Sjulian	&max_threads_per_proc, 0, "Limit on threads per proc");
76103367Sjulian
77107006Sdavidxustatic int max_groups_per_proc = 5;
78107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
79107006Sdavidxu	&max_groups_per_proc, 0, "Limit on thread groups per proc");
80107006Sdavidxu
8199026Sjulian#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
8299026Sjulian
8399026Sjulianstruct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
84105854SjulianTAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
85105854SjulianTAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
8699026Sjulianstruct mtx zombie_thread_lock;
8799026SjulianMTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
8899026Sjulian    "zombie_thread_lock", MTX_SPIN);
8999026Sjulian
90107719Sjulianstatic void kse_purge(struct proc *p, struct thread *td);
91105854Sjulian
9299026Sjulian/*
93107719Sjulian * Prepare a thread for use.
9499026Sjulian */
9599026Sjulianstatic void
9699026Sjulianthread_ctor(void *mem, int size, void *arg)
9799026Sjulian{
9899026Sjulian	struct thread	*td;
9999026Sjulian
10099026Sjulian	td = (struct thread *)mem;
101103216Sjulian	td->td_state = TDS_INACTIVE;
10299026Sjulian	td->td_flags |= TDF_UNBOUND;
10399026Sjulian}
10499026Sjulian
10599026Sjulian/*
10699026Sjulian * Reclaim a thread after use.
10799026Sjulian */
10899026Sjulianstatic void
10999026Sjulianthread_dtor(void *mem, int size, void *arg)
11099026Sjulian{
11199026Sjulian	struct thread	*td;
11299026Sjulian
11399026Sjulian	td = (struct thread *)mem;
11499026Sjulian
11599026Sjulian#ifdef INVARIANTS
11699026Sjulian	/* Verify that this thread is in a safe state to free. */
11799026Sjulian	switch (td->td_state) {
118103216Sjulian	case TDS_INHIBITED:
119103216Sjulian	case TDS_RUNNING:
120103216Sjulian	case TDS_CAN_RUN:
12199026Sjulian	case TDS_RUNQ:
12299026Sjulian		/*
12399026Sjulian		 * We must never unlink a thread that is in one of
12499026Sjulian		 * these states, because it is currently active.
12599026Sjulian		 */
12699026Sjulian		panic("bad state for thread unlinking");
12799026Sjulian		/* NOTREACHED */
128103216Sjulian	case TDS_INACTIVE:
12999026Sjulian		break;
13099026Sjulian	default:
13199026Sjulian		panic("bad thread state");
13299026Sjulian		/* NOTREACHED */
13399026Sjulian	}
13499026Sjulian#endif
13599026Sjulian}
13699026Sjulian
13799026Sjulian/*
13899026Sjulian * Initialize type-stable parts of a thread (when newly created).
13999026Sjulian */
14099026Sjulianstatic void
14199026Sjulianthread_init(void *mem, int size)
14299026Sjulian{
14399026Sjulian	struct thread	*td;
14499026Sjulian
14599026Sjulian	td = (struct thread *)mem;
146103312Sjulian	mtx_lock(&Giant);
147104354Sscottl	pmap_new_thread(td, 0);
148103312Sjulian	mtx_unlock(&Giant);
14999026Sjulian	cpu_thread_setup(td);
150107126Sjeff	td->td_sched = (struct td_sched *)&td[1];
15199026Sjulian}
15299026Sjulian
15399026Sjulian/*
15499026Sjulian * Tear down type-stable parts of a thread (just before being discarded).
15599026Sjulian */
15699026Sjulianstatic void
15799026Sjulianthread_fini(void *mem, int size)
15899026Sjulian{
15999026Sjulian	struct thread	*td;
16099026Sjulian
16199026Sjulian	td = (struct thread *)mem;
16299026Sjulian	pmap_dispose_thread(td);
16399026Sjulian}
164107126Sjeff/*
165107126Sjeff * Initialize type-stable parts of a kse (when newly created).
166107126Sjeff */
167107126Sjeffstatic void
168107126Sjeffkse_init(void *mem, int size)
169107126Sjeff{
170107126Sjeff	struct kse	*ke;
17199026Sjulian
172107126Sjeff	ke = (struct kse *)mem;
173107126Sjeff	ke->ke_sched = (struct ke_sched *)&ke[1];
174107126Sjeff}
175107126Sjeff/*
176107126Sjeff * Initialize type-stable parts of a ksegrp (when newly created).
177107126Sjeff */
178107126Sjeffstatic void
179107126Sjeffksegrp_init(void *mem, int size)
180107126Sjeff{
181107126Sjeff	struct ksegrp	*kg;
182107126Sjeff
183107126Sjeff	kg = (struct ksegrp *)mem;
184107126Sjeff	kg->kg_sched = (struct kg_sched *)&kg[1];
185107126Sjeff}
186107126Sjeff
187105854Sjulian/*
188105854Sjulian * KSE is linked onto the idle queue.
189105854Sjulian */
190105854Sjulianvoid
191105854Sjuliankse_link(struct kse *ke, struct ksegrp *kg)
192105854Sjulian{
193105854Sjulian	struct proc *p = kg->kg_proc;
194105854Sjulian
195105854Sjulian	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
196105854Sjulian	kg->kg_kses++;
197105854Sjulian	ke->ke_state = KES_UNQUEUED;
198105854Sjulian	ke->ke_proc	= p;
199105854Sjulian	ke->ke_ksegrp	= kg;
200108338Sjulian	ke->ke_owner	= NULL;
201105854Sjulian	ke->ke_thread	= NULL;
202105854Sjulian	ke->ke_oncpu = NOCPU;
203105854Sjulian}
204105854Sjulian
205105854Sjulianvoid
206105854Sjuliankse_unlink(struct kse *ke)
207105854Sjulian{
208105854Sjulian	struct ksegrp *kg;
209105854Sjulian
210105854Sjulian	mtx_assert(&sched_lock, MA_OWNED);
211105854Sjulian	kg = ke->ke_ksegrp;
212105854Sjulian
213105854Sjulian	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
214105854Sjulian	if (--kg->kg_kses == 0) {
215105854Sjulian			ksegrp_unlink(kg);
216105854Sjulian	}
217105854Sjulian	/*
218105854Sjulian	 * Aggregate stats from the KSE
219105854Sjulian	 */
220105854Sjulian	kse_stash(ke);
221105854Sjulian}
222105854Sjulian
223105854Sjulianvoid
224105854Sjulianksegrp_link(struct ksegrp *kg, struct proc *p)
225105854Sjulian{
226105854Sjulian
227105854Sjulian	TAILQ_INIT(&kg->kg_threads);
228105854Sjulian	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
229105854Sjulian	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
230105854Sjulian	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
231105854Sjulian	TAILQ_INIT(&kg->kg_lq);		/* loan kses in ksegrp */
232105854Sjulian	kg->kg_proc	= p;
233105854Sjulian/* the following counters are in the -zero- section and may not need clearing */
234105854Sjulian	kg->kg_numthreads = 0;
235105854Sjulian	kg->kg_runnable = 0;
236105854Sjulian	kg->kg_kses = 0;
237105854Sjulian	kg->kg_loan_kses = 0;
238105854Sjulian	kg->kg_runq_kses = 0; /* XXXKSE change name */
239105854Sjulian/* link it in now that it's consistent */
240105854Sjulian	p->p_numksegrps++;
241105854Sjulian	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
242105854Sjulian}
243105854Sjulian
244105854Sjulianvoid
245105854Sjulianksegrp_unlink(struct ksegrp *kg)
246105854Sjulian{
247105854Sjulian	struct proc *p;
248105854Sjulian
249105854Sjulian	mtx_assert(&sched_lock, MA_OWNED);
250105854Sjulian	p = kg->kg_proc;
251105854Sjulian	KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
252105854Sjulian	    ("kseg_unlink: residual threads or KSEs"));
253105854Sjulian	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
254105854Sjulian	p->p_numksegrps--;
255105854Sjulian	/*
256105854Sjulian	 * Aggregate stats from the KSE
257105854Sjulian	 */
258105854Sjulian	ksegrp_stash(kg);
259105854Sjulian}
260105854Sjulian
26199026Sjulian/*
262105854Sjulian * for a newly created process,
263105854Sjulian * link up a the structure and its initial threads etc.
264105854Sjulian */
265105854Sjulianvoid
266105854Sjulianproc_linkup(struct proc *p, struct ksegrp *kg,
267105854Sjulian			struct kse *ke, struct thread *td)
268105854Sjulian{
269105854Sjulian
270105854Sjulian	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
271105854Sjulian	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
272105854Sjulian	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
273105854Sjulian	p->p_numksegrps = 0;
274105854Sjulian	p->p_numthreads = 0;
275105854Sjulian
276105854Sjulian	ksegrp_link(kg, p);
277105854Sjulian	kse_link(ke, kg);
278105854Sjulian	thread_link(td, kg);
279105854Sjulian}
280105854Sjulian
281105854Sjulianint
282105854Sjuliankse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
283105854Sjulian{
284106180Sdavidxu	struct proc *p;
285106180Sdavidxu	struct thread *td2;
286105854Sjulian
287106242Sdavidxu	p = td->td_proc;
288106242Sdavidxu	/* KSE-enabled processes only, please. */
289106242Sdavidxu	if (!(p->p_flag & P_KSES))
290106242Sdavidxu		return (EINVAL);
291106188Sdavidxu	if (uap->tmbx == NULL)
292106188Sdavidxu		return (EINVAL);
293106180Sdavidxu	mtx_lock_spin(&sched_lock);
294106180Sdavidxu	FOREACH_THREAD_IN_PROC(p, td2) {
295106180Sdavidxu		if (td2->td_mailbox == uap->tmbx) {
296106180Sdavidxu			td2->td_flags |= TDF_INTERRUPT;
297106180Sdavidxu			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
298106180Sdavidxu				if (td2->td_flags & TDF_CVWAITQ)
299106180Sdavidxu					cv_abort(td2);
300106180Sdavidxu				else
301106180Sdavidxu					abortsleep(td2);
302106180Sdavidxu			}
303106180Sdavidxu			mtx_unlock_spin(&sched_lock);
304106181Sdavidxu			td->td_retval[0] = 0;
305106181Sdavidxu			td->td_retval[1] = 0;
306106182Sdavidxu			return (0);
307106180Sdavidxu		}
308106180Sdavidxu	}
309106180Sdavidxu	mtx_unlock_spin(&sched_lock);
310106182Sdavidxu	return (ESRCH);
311105854Sjulian}
312105854Sjulian
313105854Sjulianint
314105854Sjuliankse_exit(struct thread *td, struct kse_exit_args *uap)
315105854Sjulian{
316105854Sjulian	struct proc *p;
317105854Sjulian	struct ksegrp *kg;
318105854Sjulian
319105854Sjulian	p = td->td_proc;
320105854Sjulian	/* KSE-enabled processes only, please. */
321105854Sjulian	if (!(p->p_flag & P_KSES))
322106182Sdavidxu		return (EINVAL);
323105854Sjulian	/* must be a bound thread */
324105854Sjulian	if (td->td_flags & TDF_UNBOUND)
325106182Sdavidxu		return (EINVAL);
326105854Sjulian	kg = td->td_ksegrp;
327105854Sjulian	/* serialize killing kse */
328105854Sjulian	PROC_LOCK(p);
329105854Sjulian	mtx_lock_spin(&sched_lock);
330105854Sjulian	if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
331105854Sjulian		mtx_unlock_spin(&sched_lock);
332105854Sjulian		PROC_UNLOCK(p);
333105854Sjulian		return (EDEADLK);
334105854Sjulian	}
335105854Sjulian	if ((p->p_numthreads == 1) && (p->p_numksegrps == 1)) {
336107719Sjulian		/* XXXSKE what if >1 KSE? check.... */
337105854Sjulian		p->p_flag &= ~P_KSES;
338105854Sjulian		mtx_unlock_spin(&sched_lock);
339105854Sjulian		PROC_UNLOCK(p);
340105854Sjulian	} else {
341105854Sjulian		td->td_kse->ke_flags |= KEF_EXIT;
342105854Sjulian		thread_exit();
343105854Sjulian		/* NOTREACHED */
344105854Sjulian	}
345106182Sdavidxu	return (0);
346105854Sjulian}
347105854Sjulian
348107719Sjulian/*
349108338Sjulian * Either becomes an upcall or waits for an awakening event and
350108338Sjulian * THEN becomes an upcall. Only error cases return.
351107719Sjulian */
352105854Sjulianint
353107719Sjuliankse_release(struct thread * td, struct kse_release_args * uap)
354105854Sjulian{
355105854Sjulian	struct proc *p;
356107719Sjulian	struct ksegrp *kg;
357105854Sjulian
358105854Sjulian	p = td->td_proc;
359107719Sjulian	kg = td->td_ksegrp;
360106903Sdavidxu	/*
361106903Sdavidxu	 * Must be a bound thread. And kse must have a mailbox ready,
362107719Sjulian	 * if not, the kse can not generate an upcall.
363106903Sdavidxu	 */
364107719Sjulian	if (!(p->p_flag & P_KSES) ||
365107719Sjulian	    (td->td_flags & TDF_UNBOUND) ||
366107719Sjulian	    (td->td_kse->ke_mailbox == NULL))
367107719Sjulian		return (EINVAL);
368108338Sjulian
369107719Sjulian	PROC_LOCK(p);
370108338Sjulian	/* Change OURSELF to become an upcall. */
371108338Sjulian	td->td_flags = TDF_UPCALLING; /* BOUND */
372107719Sjulian	if (kg->kg_completed == NULL) {
373108338Sjulian	/* XXXKSE also look for waiting signals etc. */
374108338Sjulian		/*
375108338Sjulian		 * The KSE will however be lendable.
376108338Sjulian		 */
377108338Sjulian		mtx_lock_spin(&sched_lock);
378108338Sjulian		TD_SET_IDLE(td);
379108338Sjulian		PROC_UNLOCK(p);
380108338Sjulian		p->p_stats->p_ru.ru_nvcsw++;
381108338Sjulian		mi_switch();
382108338Sjulian		mtx_unlock_spin(&sched_lock);
383108338Sjulian	} else {
384108338Sjulian		PROC_UNLOCK(p);
385105854Sjulian	}
386107719Sjulian	return (0);
387105854Sjulian}
388105854Sjulian
389105854Sjulian/* struct kse_wakeup_args {
390105854Sjulian	struct kse_mailbox *mbx;
391105854Sjulian}; */
392105854Sjulianint
393105854Sjuliankse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
394105854Sjulian{
395105854Sjulian	struct proc *p;
396108338Sjulian	struct kse *ke;
397105854Sjulian	struct ksegrp *kg;
398108338Sjulian	struct thread *td2;
399105854Sjulian
400105854Sjulian	p = td->td_proc;
401108338Sjulian	td2 = NULL;
402105854Sjulian	/* KSE-enabled processes only, please. */
403105854Sjulian	if (!(p->p_flag & P_KSES))
404105854Sjulian		return EINVAL;
405108338Sjulian	PROC_LOCK(p);
406105854Sjulian	if (uap->mbx) {
407105854Sjulian		FOREACH_KSEGRP_IN_PROC(p, kg) {
408108338Sjulian			FOREACH_KSE_IN_GROUP(kg, ke) {
409108338Sjulian				if (ke->ke_mailbox != uap->mbx)
410105854Sjulian					continue;
411108338Sjulian				td2 = ke->ke_owner ;
412108338Sjulian				KASSERT((td2 != NULL),("KSE with no owner"));
413108338Sjulian				if (!TD_IS_IDLE(td2)) {
414108338Sjulian					/* Return silently if no longer idle */
415108338Sjulian					PROC_UNLOCK(p);
416108338Sjulian				        td->td_retval[0] = 0;
417108338Sjulian       					td->td_retval[1] = 0;
418106182Sdavidxu					return (0);
419105854Sjulian				}
420108338Sjulian				break;
421105854Sjulian			}
422108338Sjulian			if (td2) {
423108338Sjulian				break;
424108338Sjulian			}
425105854Sjulian		}
426105854Sjulian	} else {
427108338Sjulian		/*
428108338Sjulian		 * look for any idle KSE to resurrect.
429108338Sjulian		 */
430105854Sjulian		kg = td->td_ksegrp;
431108338Sjulian		mtx_lock_spin(&sched_lock);
432108338Sjulian		FOREACH_KSE_IN_GROUP(kg, ke) {
433108338Sjulian			td2 = ke->ke_owner;
434108338Sjulian			KASSERT((td2 != NULL),("KSE with no owner2"));
435108338Sjulian			if (TD_IS_IDLE(td2))
436108338Sjulian				break;
437108338Sjulian		}
438105854Sjulian	}
439108338Sjulian	if (td2) {
440108338Sjulian		mtx_lock_spin(&sched_lock);
441108338Sjulian		PROC_UNLOCK(p);
442108338Sjulian		TD_CLR_IDLE(td2);
443108338Sjulian		setrunnable(td2);
444105854Sjulian		mtx_unlock_spin(&sched_lock);
445108338Sjulian	        td->td_retval[0] = 0;
446108338Sjulian       		td->td_retval[1] = 0;
447108338Sjulian		return (0);
448108338Sjulian	}
449105854Sjulian	mtx_unlock_spin(&sched_lock);
450108338Sjulian	PROC_UNLOCK(p);
451108338Sjulian	return (ESRCH);
452105854Sjulian}
453105854Sjulian
454105854Sjulian/*
455105854Sjulian * No new KSEG: first call: use current KSE, don't schedule an upcall
456105854Sjulian * All other situations, do allocate a new KSE and schedule an upcall on it.
457105854Sjulian */
458105854Sjulian/* struct kse_create_args {
459105854Sjulian	struct kse_mailbox *mbx;
460105854Sjulian	int newgroup;
461105854Sjulian}; */
462105854Sjulianint
463105854Sjuliankse_create(struct thread *td, struct kse_create_args *uap)
464105854Sjulian{
465105854Sjulian	struct kse *newke;
466105854Sjulian	struct kse *ke;
467105854Sjulian	struct ksegrp *newkg;
468105854Sjulian	struct ksegrp *kg;
469105854Sjulian	struct proc *p;
470105854Sjulian	struct kse_mailbox mbx;
471105854Sjulian	int err;
472105854Sjulian
473105854Sjulian	p = td->td_proc;
474105854Sjulian	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
475105854Sjulian		return (err);
476105854Sjulian
477105854Sjulian	p->p_flag |= P_KSES; /* easier to just set it than to test and set */
478105854Sjulian	kg = td->td_ksegrp;
479105854Sjulian	if (uap->newgroup) {
480107006Sdavidxu		if (p->p_numksegrps >= max_groups_per_proc)
481107006Sdavidxu			return (EPROCLIM);
482105854Sjulian		/*
483105854Sjulian		 * If we want a new KSEGRP it doesn't matter whether
484105854Sjulian		 * we have already fired up KSE mode before or not.
485105854Sjulian		 * We put the process in KSE mode and create a new KSEGRP
486105854Sjulian		 * and KSE. If our KSE has not got a mailbox yet then
487105854Sjulian		 * that doesn't matter, just leave it that way. It will
488105854Sjulian		 * ensure that this thread stay BOUND. It's possible
489105854Sjulian		 * that the call came form a threaded library and the main
490105854Sjulian		 * program knows nothing of threads.
491105854Sjulian		 */
492105854Sjulian		newkg = ksegrp_alloc();
493105854Sjulian		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
494105854Sjulian		      kg_startzero, kg_endzero));
495105854Sjulian		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
496105854Sjulian		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
497105854Sjulian		newke = kse_alloc();
498105854Sjulian	} else {
499105854Sjulian		/*
500105854Sjulian		 * Otherwise, if we have already set this KSE
501105854Sjulian		 * to have a mailbox, we want to make another KSE here,
502105854Sjulian		 * but only if there are not already the limit, which
503105854Sjulian		 * is 1 per CPU max.
504105854Sjulian		 *
505105854Sjulian		 * If the current KSE doesn't have a mailbox we just use it
506105854Sjulian		 * and give it one.
507105854Sjulian		 *
508105854Sjulian		 * Because we don't like to access
509105854Sjulian		 * the KSE outside of schedlock if we are UNBOUND,
510105854Sjulian		 * (because it can change if we are preempted by an interrupt)
511105854Sjulian		 * we can deduce it as having a mailbox if we are UNBOUND,
512105854Sjulian		 * and only need to actually look at it if we are BOUND,
513105854Sjulian		 * which is safe.
514105854Sjulian		 */
515105854Sjulian		if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
516107719Sjulian			if (thread_debug == 0) { /* if debugging, allow more */
517105854Sjulian#ifdef SMP
518105854Sjulian			if (kg->kg_kses > mp_ncpus)
519105854Sjulian#endif
520105854Sjulian				return (EPROCLIM);
521107006Sdavidxu			}
522105854Sjulian			newke = kse_alloc();
523105854Sjulian		} else {
524105854Sjulian			newke = NULL;
525105854Sjulian		}
526105854Sjulian		newkg = NULL;
527105854Sjulian	}
528105854Sjulian	if (newke) {
529105854Sjulian		bzero(&newke->ke_startzero, RANGEOF(struct kse,
530105854Sjulian		      ke_startzero, ke_endzero));
531105854Sjulian#if 0
532105854Sjulian		bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
533105854Sjulian		      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
534105854Sjulian#endif
535105854Sjulian		/* For the first call this may not have been set */
536105854Sjulian		if (td->td_standin == NULL) {
537105854Sjulian			td->td_standin = thread_alloc();
538105854Sjulian		}
539105854Sjulian		mtx_lock_spin(&sched_lock);
540107006Sdavidxu		if (newkg) {
541107006Sdavidxu			if (p->p_numksegrps >= max_groups_per_proc) {
542107006Sdavidxu				mtx_unlock_spin(&sched_lock);
543107006Sdavidxu				ksegrp_free(newkg);
544107006Sdavidxu				kse_free(newke);
545107006Sdavidxu				return (EPROCLIM);
546107006Sdavidxu			}
547105854Sjulian			ksegrp_link(newkg, p);
548107006Sdavidxu		}
549105854Sjulian		else
550105854Sjulian			newkg = kg;
551105854Sjulian		kse_link(newke, newkg);
552106075Sdavidxu		if (p->p_sflag & PS_NEEDSIGCHK)
553106075Sdavidxu			newke->ke_flags |= KEF_ASTPENDING;
554105854Sjulian		newke->ke_mailbox = uap->mbx;
555105854Sjulian		newke->ke_upcall = mbx.km_func;
556105854Sjulian		bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
557105854Sjulian		thread_schedule_upcall(td, newke);
558105854Sjulian		mtx_unlock_spin(&sched_lock);
559105854Sjulian	} else {
560105854Sjulian		/*
561105854Sjulian		 * If we didn't allocate a new KSE then the we are using
562105854Sjulian		 * the exisiting (BOUND) kse.
563105854Sjulian		 */
564105854Sjulian		ke = td->td_kse;
565105854Sjulian		ke->ke_mailbox = uap->mbx;
566105854Sjulian		ke->ke_upcall = mbx.km_func;
567105854Sjulian		bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
568105854Sjulian	}
569105854Sjulian	/*
570105854Sjulian	 * Fill out the KSE-mode specific fields of the new kse.
571105854Sjulian	 */
572105854Sjulian
573105854Sjulian	td->td_retval[0] = 0;
574105854Sjulian	td->td_retval[1] = 0;
575105854Sjulian	return (0);
576105854Sjulian}
577105854Sjulian
578105854Sjulian/*
579103410Smini * Fill a ucontext_t with a thread's context information.
580103410Smini *
581103410Smini * This is an analogue to getcontext(3).
582103410Smini */
583103410Sminivoid
584103410Sminithread_getcontext(struct thread *td, ucontext_t *uc)
585103410Smini{
586103410Smini
587103464Speter/*
588103464Speter * XXX this is declared in a MD include file, i386/include/ucontext.h but
589103464Speter * is used in MI code.
590103464Speter */
591103463Speter#ifdef __i386__
592103410Smini	get_mcontext(td, &uc->uc_mcontext);
593103463Speter#endif
594103410Smini	uc->uc_sigmask = td->td_proc->p_sigmask;
595103410Smini}
596103410Smini
597103410Smini/*
598103410Smini * Set a thread's context from a ucontext_t.
599103410Smini *
600103410Smini * This is an analogue to setcontext(3).
601103410Smini */
602103410Sminiint
603103410Sminithread_setcontext(struct thread *td, ucontext_t *uc)
604103410Smini{
605103410Smini	int ret;
606103410Smini
607103464Speter/*
608103464Speter * XXX this is declared in a MD include file, i386/include/ucontext.h but
609103464Speter * is used in MI code.
610103464Speter */
611103463Speter#ifdef __i386__
612103410Smini	ret = set_mcontext(td, &uc->uc_mcontext);
613103463Speter#else
614103463Speter	ret = ENOSYS;
615103463Speter#endif
616103410Smini	if (ret == 0) {
617103410Smini		SIG_CANTMASK(uc->uc_sigmask);
618103410Smini		PROC_LOCK(td->td_proc);
619103410Smini		td->td_proc->p_sigmask = uc->uc_sigmask;
620103410Smini		PROC_UNLOCK(td->td_proc);
621103410Smini	}
622103410Smini	return (ret);
623103410Smini}
624103410Smini
625103410Smini/*
62699026Sjulian * Initialize global thread allocation resources.
62799026Sjulian */
62899026Sjulianvoid
62999026Sjulianthreadinit(void)
63099026Sjulian{
63199026Sjulian
632104437Speter#ifndef __ia64__
633107126Sjeff	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
63499026Sjulian	    thread_ctor, thread_dtor, thread_init, thread_fini,
63599026Sjulian	    UMA_ALIGN_CACHE, 0);
636104437Speter#else
637104437Speter	/*
638104437Speter	 * XXX the ia64 kstack allocator is really lame and is at the mercy
639104437Speter	 * of contigmallloc().  This hackery is to pre-construct a whole
640104437Speter	 * pile of thread structures with associated kernel stacks early
641104437Speter	 * in the system startup while contigmalloc() still works. Once we
642104437Speter	 * have them, keep them.  Sigh.
643104437Speter	 */
644107126Sjeff	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
645104437Speter	    thread_ctor, thread_dtor, thread_init, thread_fini,
646104437Speter	    UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
647104437Speter	uma_prealloc(thread_zone, 512);		/* XXX arbitary */
648104437Speter#endif
649107126Sjeff	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
650107126Sjeff	    NULL, NULL, ksegrp_init, NULL,
651103367Sjulian	    UMA_ALIGN_CACHE, 0);
652107126Sjeff	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
653107126Sjeff	    NULL, NULL, kse_init, NULL,
654103367Sjulian	    UMA_ALIGN_CACHE, 0);
65599026Sjulian}
65699026Sjulian
65799026Sjulian/*
658103002Sjulian * Stash an embarasingly extra thread into the zombie thread queue.
65999026Sjulian */
66099026Sjulianvoid
66199026Sjulianthread_stash(struct thread *td)
66299026Sjulian{
66399026Sjulian	mtx_lock_spin(&zombie_thread_lock);
66499026Sjulian	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
66599026Sjulian	mtx_unlock_spin(&zombie_thread_lock);
66699026Sjulian}
66799026Sjulian
668103410Smini/*
669105854Sjulian * Stash an embarasingly extra kse into the zombie kse queue.
670105854Sjulian */
671105854Sjulianvoid
672105854Sjuliankse_stash(struct kse *ke)
673105854Sjulian{
674105854Sjulian	mtx_lock_spin(&zombie_thread_lock);
675105854Sjulian	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
676105854Sjulian	mtx_unlock_spin(&zombie_thread_lock);
677105854Sjulian}
678105854Sjulian
679105854Sjulian/*
680105854Sjulian * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
681105854Sjulian */
682105854Sjulianvoid
683105854Sjulianksegrp_stash(struct ksegrp *kg)
684105854Sjulian{
685105854Sjulian	mtx_lock_spin(&zombie_thread_lock);
686105854Sjulian	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
687105854Sjulian	mtx_unlock_spin(&zombie_thread_lock);
688105854Sjulian}
689105854Sjulian
690105854Sjulian/*
691103410Smini * Reap zombie threads.
69299026Sjulian */
69399026Sjulianvoid
69499026Sjulianthread_reap(void)
69599026Sjulian{
696105854Sjulian	struct thread *td_first, *td_next;
697105854Sjulian	struct kse *ke_first, *ke_next;
698105854Sjulian	struct ksegrp *kg_first, * kg_next;
69999026Sjulian
70099026Sjulian	/*
70199026Sjulian	 * don't even bother to lock if none at this instant
70299026Sjulian	 * We really don't care about the next instant..
70399026Sjulian	 */
704105854Sjulian	if ((!TAILQ_EMPTY(&zombie_threads))
705105854Sjulian	    || (!TAILQ_EMPTY(&zombie_kses))
706105854Sjulian	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
70799026Sjulian		mtx_lock_spin(&zombie_thread_lock);
708105854Sjulian		td_first = TAILQ_FIRST(&zombie_threads);
709105854Sjulian		ke_first = TAILQ_FIRST(&zombie_kses);
710105854Sjulian		kg_first = TAILQ_FIRST(&zombie_ksegrps);
711105854Sjulian		if (td_first)
712105854Sjulian			TAILQ_INIT(&zombie_threads);
713105854Sjulian		if (ke_first)
714105854Sjulian			TAILQ_INIT(&zombie_kses);
715105854Sjulian		if (kg_first)
716105854Sjulian			TAILQ_INIT(&zombie_ksegrps);
717105854Sjulian		mtx_unlock_spin(&zombie_thread_lock);
718105854Sjulian		while (td_first) {
719105854Sjulian			td_next = TAILQ_NEXT(td_first, td_runq);
720105854Sjulian			thread_free(td_first);
721105854Sjulian			td_first = td_next;
72299026Sjulian		}
723105854Sjulian		while (ke_first) {
724105854Sjulian			ke_next = TAILQ_NEXT(ke_first, ke_procq);
725105854Sjulian			kse_free(ke_first);
726105854Sjulian			ke_first = ke_next;
727105854Sjulian		}
728105854Sjulian		while (kg_first) {
729105854Sjulian			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
730105854Sjulian			ksegrp_free(kg_first);
731105854Sjulian			kg_first = kg_next;
732105854Sjulian		}
73399026Sjulian	}
73499026Sjulian}
73599026Sjulian
73699026Sjulian/*
737103367Sjulian * Allocate a ksegrp.
738103367Sjulian */
739103367Sjulianstruct ksegrp *
740103367Sjulianksegrp_alloc(void)
741103367Sjulian{
742103367Sjulian	return (uma_zalloc(ksegrp_zone, M_WAITOK));
743103367Sjulian}
744103367Sjulian
745103367Sjulian/*
746103367Sjulian * Allocate a kse.
747103367Sjulian */
748103367Sjulianstruct kse *
749103367Sjuliankse_alloc(void)
750103367Sjulian{
751103367Sjulian	return (uma_zalloc(kse_zone, M_WAITOK));
752103367Sjulian}
753103367Sjulian
754103367Sjulian/*
75599026Sjulian * Allocate a thread.
75699026Sjulian */
75799026Sjulianstruct thread *
75899026Sjulianthread_alloc(void)
75999026Sjulian{
76099026Sjulian	thread_reap(); /* check if any zombies to get */
76199026Sjulian	return (uma_zalloc(thread_zone, M_WAITOK));
76299026Sjulian}
76399026Sjulian
76499026Sjulian/*
765103367Sjulian * Deallocate a ksegrp.
766103367Sjulian */
767103367Sjulianvoid
768103367Sjulianksegrp_free(struct ksegrp *td)
769103367Sjulian{
770103367Sjulian	uma_zfree(ksegrp_zone, td);
771103367Sjulian}
772103367Sjulian
773103367Sjulian/*
774103367Sjulian * Deallocate a kse.
775103367Sjulian */
776103367Sjulianvoid
777103367Sjuliankse_free(struct kse *td)
778103367Sjulian{
779103367Sjulian	uma_zfree(kse_zone, td);
780103367Sjulian}
781103367Sjulian
782103367Sjulian/*
78399026Sjulian * Deallocate a thread.
78499026Sjulian */
78599026Sjulianvoid
78699026Sjulianthread_free(struct thread *td)
78799026Sjulian{
788107719Sjulian
789107719Sjulian	cpu_thread_clean(td);
79099026Sjulian	uma_zfree(thread_zone, td);
79199026Sjulian}
79299026Sjulian
79399026Sjulian/*
79499026Sjulian * Store the thread context in the UTS's mailbox.
795104031Sjulian * then add the mailbox at the head of a list we are building in user space.
796104031Sjulian * The list is anchored in the ksegrp structure.
79799026Sjulian */
79899026Sjulianint
79999026Sjulianthread_export_context(struct thread *td)
80099026Sjulian{
801104503Sjmallett	struct proc *p;
802104031Sjulian	struct ksegrp *kg;
803104031Sjulian	uintptr_t mbx;
804104031Sjulian	void *addr;
80599026Sjulian	int error;
806103410Smini	ucontext_t uc;
807107034Sdavidxu	uint temp;
80899026Sjulian
809104503Sjmallett	p = td->td_proc;
810104503Sjmallett	kg = td->td_ksegrp;
811104503Sjmallett
812104031Sjulian	/* Export the user/machine context. */
813104031Sjulian#if 0
814104031Sjulian	addr = (caddr_t)td->td_mailbox +
815104031Sjulian	    offsetof(struct kse_thr_mailbox, tm_context);
816104031Sjulian#else /* if user pointer arithmetic is valid in the kernel */
817104031Sjulian		addr = (void *)(&td->td_mailbox->tm_context);
818100271Speter#endif
819104031Sjulian	error = copyin(addr, &uc, sizeof(ucontext_t));
820108338Sjulian	if (error)
821108338Sjulian		goto bad;
822104031Sjulian
823108338Sjulian	thread_getcontext(td, &uc);
824108338Sjulian	error = copyout(&uc, addr, sizeof(ucontext_t));
825108338Sjulian	if (error)
826108338Sjulian		goto bad;
827108338Sjulian
828104031Sjulian	/* get address in latest mbox of list pointer */
829104031Sjulian#if 0
830104031Sjulian	addr = (caddr_t)td->td_mailbox
831104031Sjulian	    + offsetof(struct kse_thr_mailbox , tm_next);
832104031Sjulian#else /* if user pointer arithmetic is valid in the kernel */
833104031Sjulian	addr = (void *)(&td->td_mailbox->tm_next);
834104031Sjulian#endif
835104031Sjulian	/*
836104031Sjulian	 * Put the saved address of the previous first
837104031Sjulian	 * entry into this one
838104031Sjulian	 */
839104031Sjulian	for (;;) {
840104031Sjulian		mbx = (uintptr_t)kg->kg_completed;
841104031Sjulian		if (suword(addr, mbx)) {
842108338Sjulian			error = EFAULT;
843107034Sdavidxu			goto bad;
844104031Sjulian		}
845104126Sjulian		PROC_LOCK(p);
846104031Sjulian		if (mbx == (uintptr_t)kg->kg_completed) {
847104031Sjulian			kg->kg_completed = td->td_mailbox;
848104126Sjulian			PROC_UNLOCK(p);
849104031Sjulian			break;
850104031Sjulian		}
851104126Sjulian		PROC_UNLOCK(p);
852104031Sjulian	}
853107034Sdavidxu	addr = (caddr_t)td->td_mailbox
854107034Sdavidxu		 + offsetof(struct kse_thr_mailbox, tm_sticks);
855107034Sdavidxu	temp = fuword(addr) + td->td_usticks;
856107034Sdavidxu	if (suword(addr, temp))
857107034Sdavidxu		goto bad;
858104031Sjulian	return (0);
859107034Sdavidxu
860107034Sdavidxubad:
861107034Sdavidxu	PROC_LOCK(p);
862107034Sdavidxu	psignal(p, SIGSEGV);
863107034Sdavidxu	PROC_UNLOCK(p);
864108338Sjulian	return (error);
865104031Sjulian}
86699026Sjulian
867104031Sjulian/*
868104031Sjulian * Take the list of completed mailboxes for this KSEGRP and put them on this
869104031Sjulian * KSE's mailbox as it's the next one going up.
870104031Sjulian */
871104031Sjulianstatic int
872104031Sjulianthread_link_mboxes(struct ksegrp *kg, struct kse *ke)
873104031Sjulian{
874104126Sjulian	struct proc *p = kg->kg_proc;
875104031Sjulian	void *addr;
876104031Sjulian	uintptr_t mbx;
877104031Sjulian
878104031Sjulian#if 0
879104031Sjulian	addr = (caddr_t)ke->ke_mailbox
880104031Sjulian	    + offsetof(struct kse_mailbox, km_completed);
881104031Sjulian#else /* if user pointer arithmetic is valid in the kernel */
882104031Sjulian		addr = (void *)(&ke->ke_mailbox->km_completed);
883104031Sjulian#endif
884104031Sjulian	for (;;) {
885104031Sjulian		mbx = (uintptr_t)kg->kg_completed;
886104031Sjulian		if (suword(addr, mbx)) {
887104126Sjulian			PROC_LOCK(p);
888104126Sjulian			psignal(p, SIGSEGV);
889104126Sjulian			PROC_UNLOCK(p);
890104031Sjulian			return (EFAULT);
891104031Sjulian		}
892104031Sjulian		/* XXXKSE could use atomic CMPXCH here */
893104126Sjulian		PROC_LOCK(p);
894104031Sjulian		if (mbx == (uintptr_t)kg->kg_completed) {
895104031Sjulian			kg->kg_completed = NULL;
896104126Sjulian			PROC_UNLOCK(p);
897104031Sjulian			break;
898104031Sjulian		}
899104126Sjulian		PROC_UNLOCK(p);
90099026Sjulian	}
901104031Sjulian	return (0);
90299026Sjulian}
90399026Sjulian
90499026Sjulian/*
905107034Sdavidxu * This function should be called at statclock interrupt time
906107034Sdavidxu */
907107034Sdavidxuint
908107034Sdavidxuthread_add_ticks_intr(int user, uint ticks)
909107034Sdavidxu{
910107034Sdavidxu	struct thread *td = curthread;
911107034Sdavidxu	struct kse *ke = td->td_kse;
912107034Sdavidxu
913107034Sdavidxu	if (ke->ke_mailbox == NULL)
914107034Sdavidxu		return -1;
915107034Sdavidxu	if (user) {
916107034Sdavidxu		/* Current always do via ast() */
917107034Sdavidxu		ke->ke_flags |= KEF_ASTPENDING;
918107034Sdavidxu		ke->ke_uuticks += ticks;
919107034Sdavidxu	} else {
920107034Sdavidxu		if (td->td_mailbox != NULL)
921107034Sdavidxu			td->td_usticks += ticks;
922107034Sdavidxu		else
923107034Sdavidxu			ke->ke_usticks += ticks;
924107034Sdavidxu	}
925107034Sdavidxu	return 0;
926107034Sdavidxu}
927107034Sdavidxu
928107034Sdavidxustatic int
929107034Sdavidxuthread_update_uticks(void)
930107034Sdavidxu{
931107034Sdavidxu	struct thread *td = curthread;
932107034Sdavidxu	struct proc *p = td->td_proc;
933107034Sdavidxu	struct kse *ke = td->td_kse;
934107034Sdavidxu	struct kse_thr_mailbox *tmbx;
935107034Sdavidxu	caddr_t addr;
936107034Sdavidxu	uint uticks, sticks;
937107034Sdavidxu
938107034Sdavidxu	if (ke->ke_mailbox == NULL)
939107034Sdavidxu		return 0;
940107034Sdavidxu
941107034Sdavidxu	uticks = ke->ke_uuticks;
942107034Sdavidxu	ke->ke_uuticks = 0;
943107034Sdavidxu	sticks = ke->ke_usticks;
944107034Sdavidxu	ke->ke_usticks = 0;
945108338Sjulian#if 0
946107034Sdavidxu	tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
947108338Sjulian	    + offsetof(struct kse_mailbox, km_curthread));
948108338Sjulian#else /* if user pointer arithmetic is ok in the kernel */
949108338Sjulian	tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
950108338Sjulian#endif
951107034Sdavidxu	if ((tmbx == NULL) || (tmbx == (void *)-1))
952107034Sdavidxu		return 0;
953107034Sdavidxu	if (uticks) {
954107034Sdavidxu		addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks);
955107034Sdavidxu		uticks += fuword(addr);
956107034Sdavidxu		if (suword(addr, uticks))
957107034Sdavidxu			goto bad;
958107034Sdavidxu	}
959107034Sdavidxu	if (sticks) {
960107034Sdavidxu		addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks);
961107034Sdavidxu		sticks += fuword(addr);
962107034Sdavidxu		if (suword(addr, sticks))
963107034Sdavidxu			goto bad;
964107034Sdavidxu	}
965107034Sdavidxu	return 0;
966107034Sdavidxubad:
967107034Sdavidxu	PROC_LOCK(p);
968107034Sdavidxu	psignal(p, SIGSEGV);
969107034Sdavidxu	PROC_UNLOCK(p);
970107034Sdavidxu	return -1;
971107034Sdavidxu}
972107034Sdavidxu
973107034Sdavidxu/*
97499026Sjulian * Discard the current thread and exit from its context.
97599026Sjulian *
97699026Sjulian * Because we can't free a thread while we're operating under its context,
977107719Sjulian * push the current thread into our CPU's deadthread holder. This means
978107719Sjulian * we needn't worry about someone else grabbing our context before we
979107719Sjulian * do a cpu_throw().
98099026Sjulian */
98199026Sjulianvoid
98299026Sjulianthread_exit(void)
98399026Sjulian{
98499026Sjulian	struct thread *td;
98599026Sjulian	struct kse *ke;
98699026Sjulian	struct proc *p;
98799026Sjulian	struct ksegrp	*kg;
98899026Sjulian
98999026Sjulian	td = curthread;
99099026Sjulian	kg = td->td_ksegrp;
99199026Sjulian	p = td->td_proc;
99299026Sjulian	ke = td->td_kse;
99399026Sjulian
99499026Sjulian	mtx_assert(&sched_lock, MA_OWNED);
995102581Sjulian	KASSERT(p != NULL, ("thread exiting without a process"));
996102581Sjulian	KASSERT(ke != NULL, ("thread exiting without a kse"));
997102581Sjulian	KASSERT(kg != NULL, ("thread exiting without a kse group"));
99899026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
99999026Sjulian	CTR1(KTR_PROC, "thread_exit: thread %p", td);
100099026Sjulian	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
100199026Sjulian
1002104695Sjulian	if (td->td_standin != NULL) {
1003104695Sjulian		thread_stash(td->td_standin);
1004104695Sjulian		td->td_standin = NULL;
1005104695Sjulian	}
1006104695Sjulian
100799026Sjulian	cpu_thread_exit(td);	/* XXXSMP */
100899026Sjulian
1009102581Sjulian	/*
1010103002Sjulian	 * The last thread is left attached to the process
1011103002Sjulian	 * So that the whole bundle gets recycled. Skip
1012103002Sjulian	 * all this stuff.
1013102581Sjulian	 */
1014103002Sjulian	if (p->p_numthreads > 1) {
1015105854Sjulian		/*
1016105854Sjulian		 * Unlink this thread from its proc and the kseg.
1017105854Sjulian		 * In keeping with the other structs we probably should
1018105854Sjulian		 * have a thread_unlink() that does some of this but it
1019105854Sjulian		 * would only be called from here (I think) so it would
1020105854Sjulian		 * be a waste. (might be useful for proc_fini() as well.)
1021105854Sjulian 		 */
1022103002Sjulian		TAILQ_REMOVE(&p->p_threads, td, td_plist);
1023103002Sjulian		p->p_numthreads--;
1024103002Sjulian		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1025103002Sjulian		kg->kg_numthreads--;
1026103002Sjulian		/*
1027103002Sjulian		 * The test below is NOT true if we are the
1028103002Sjulian		 * sole exiting thread. P_STOPPED_SNGL is unset
1029103002Sjulian		 * in exit1() after it is the only survivor.
1030103002Sjulian		 */
1031103002Sjulian		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1032103002Sjulian			if (p->p_numthreads == p->p_suspcount) {
1033103216Sjulian				thread_unsuspend_one(p->p_singlethread);
1034103002Sjulian			}
103599026Sjulian		}
1036104695Sjulian
1037104695Sjulian		/* Reassign this thread's KSE. */
1038104695Sjulian		ke->ke_state = KES_UNQUEUED;
1039104695Sjulian
1040104695Sjulian		/*
1041108338Sjulian		 * Decide what to do with the KSE attached to this thread.
1042108338Sjulian		 * XXX Possibly kse_reassign should do both cases as it already
1043108338Sjulian		 * does some of this.
1044104695Sjulian		 */
1045105854Sjulian		if (ke->ke_flags & KEF_EXIT) {
1046108338Sjulian			KASSERT((ke->ke_owner == td),
1047108338Sjulian		    	    ("thread_exit: KSE exiting with non-owner thread"));
1048108338Sjulian			ke->ke_thread = NULL;
1049108338Sjulian			td->td_kse = NULL;
1050105854Sjulian			kse_unlink(ke);
1051105854Sjulian		} else {
1052108338Sjulian			TD_SET_EXITING(td);	/* definitly not runnable */
1053105854Sjulian			kse_reassign(ke);
1054105854Sjulian		}
1055105854Sjulian		PROC_UNLOCK(p);
1056105854Sjulian		td->td_state	= TDS_INACTIVE;
1057105854Sjulian		td->td_proc	= NULL;
1058105854Sjulian		td->td_ksegrp	= NULL;
1059105854Sjulian		td->td_last_kse	= NULL;
1060107719Sjulian		PCPU_SET(deadthread, td);
1061103002Sjulian	} else {
1062103002Sjulian		PROC_UNLOCK(p);
106399026Sjulian	}
106499026Sjulian	cpu_throw();
106599026Sjulian	/* NOTREACHED */
106699026Sjulian}
106799026Sjulian
1068107719Sjulian/*
1069107719Sjulian * Do any thread specific cleanups that may be needed in wait()
1070107719Sjulian * called with Giant held, proc and schedlock not held.
1071107719Sjulian */
1072107719Sjulianvoid
1073107719Sjulianthread_wait(struct proc *p)
1074107719Sjulian{
1075107719Sjulian	struct thread *td;
1076107719Sjulian
1077107719Sjulian	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1078107719Sjulian	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1079107719Sjulian	FOREACH_THREAD_IN_PROC(p, td) {
1080107719Sjulian		if (td->td_standin != NULL) {
1081107719Sjulian			thread_free(td->td_standin);
1082107719Sjulian			td->td_standin = NULL;
1083107719Sjulian		}
1084107719Sjulian		cpu_thread_clean(td);
1085107719Sjulian	}
1086107719Sjulian	thread_reap();	/* check for zombie threads etc. */
1087107719Sjulian}
1088107719Sjulian
108999026Sjulian/*
109099026Sjulian * Link a thread to a process.
1091103002Sjulian * set up anything that needs to be initialized for it to
1092103002Sjulian * be used by the process.
109399026Sjulian *
109499026Sjulian * Note that we do not link to the proc's ucred here.
109599026Sjulian * The thread is linked as if running but no KSE assigned.
109699026Sjulian */
109799026Sjulianvoid
109899026Sjulianthread_link(struct thread *td, struct ksegrp *kg)
109999026Sjulian{
110099026Sjulian	struct proc *p;
110199026Sjulian
110299026Sjulian	p = kg->kg_proc;
1103103216Sjulian	td->td_state = TDS_INACTIVE;
110499026Sjulian	td->td_proc	= p;
110599026Sjulian	td->td_ksegrp	= kg;
110699026Sjulian	td->td_last_kse	= NULL;
110799026Sjulian
1108103002Sjulian	LIST_INIT(&td->td_contested);
1109103002Sjulian	callout_init(&td->td_slpcallout, 1);
111099026Sjulian	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
111199026Sjulian	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
111299026Sjulian	p->p_numthreads++;
111399026Sjulian	kg->kg_numthreads++;
111499026Sjulian	td->td_kse	= NULL;
111599026Sjulian}
111699026Sjulian
1117105854Sjulianvoid
1118105854Sjuliankse_purge(struct proc *p, struct thread *td)
1119105854Sjulian{
1120108338Sjulian	/* XXXKSE think about this..
1121108338Sjulian		may need to wake up threads on loan queue. */
1122105854Sjulian	struct ksegrp *kg;
1123105854Sjulian
1124105854Sjulian 	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1125105854Sjulian	mtx_lock_spin(&sched_lock);
1126105854Sjulian	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1127105854Sjulian		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1128105854Sjulian		p->p_numksegrps--;
1129105854Sjulian		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1130105854Sjulian		    ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1131105854Sjulian			("wrong kg_kses"));
1132105854Sjulian		if (kg != td->td_ksegrp) {
1133105854Sjulian			ksegrp_stash(kg);
1134105854Sjulian		}
1135105854Sjulian	}
1136105854Sjulian	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1137105854Sjulian	p->p_numksegrps++;
1138105854Sjulian	mtx_unlock_spin(&sched_lock);
1139105854Sjulian}
1140105854Sjulian
1141105854Sjulian
114299026Sjulian/*
1143103410Smini * Create a thread and schedule it for upcall on the KSE given.
1144108338Sjulian * Use our thread's standin so that we don't have to allocate one.
114599026Sjulian */
114699026Sjulianstruct thread *
114799026Sjulianthread_schedule_upcall(struct thread *td, struct kse *ke)
114899026Sjulian{
114999026Sjulian	struct thread *td2;
1150104695Sjulian	int newkse;
115199026Sjulian
115299026Sjulian	mtx_assert(&sched_lock, MA_OWNED);
1153104695Sjulian	newkse = (ke != td->td_kse);
1154104695Sjulian
1155104695Sjulian	/*
1156108338Sjulian	 * If the owner and kse are BOUND then that thread is planning to
1157108338Sjulian	 * go to userland and upcalls are not expected. So don't make one.
1158108338Sjulian	 * If it is not bound then make it so with the spare thread
1159108338Sjulian	 * anf then borrw back the KSE to allow us to complete some in-kernel
1160108338Sjulian	 * work. When we complete, the Bound thread will have the chance to
1161104695Sjulian	 * complete. This thread will sleep as planned. Hopefully there will
1162104695Sjulian	 * eventually be un unbound thread that can be converted to an
1163104695Sjulian	 * upcall to report the completion of this thread.
1164104695Sjulian	 */
1165104695Sjulian
1166104695Sjulian	if ((td2 = td->td_standin) != NULL) {
1167104695Sjulian		td->td_standin = NULL;
116899026Sjulian	} else {
1169104695Sjulian		if (newkse)
1170104695Sjulian			panic("no reserve thread when called with a new kse");
1171104695Sjulian		/*
1172104695Sjulian		 * If called from (e.g.) sleep and we do not have
1173104695Sjulian		 * a reserve thread, then we've used it, so do not
1174104695Sjulian		 * create an upcall.
1175104695Sjulian		 */
1176106182Sdavidxu		return (NULL);
117799026Sjulian	}
117899026Sjulian	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1179104695Sjulian	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1180103072Sjulian	bzero(&td2->td_startzero,
1181103002Sjulian	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1182103002Sjulian	bcopy(&td->td_startcopy, &td2->td_startcopy,
1183103002Sjulian	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
118499026Sjulian	thread_link(td2, ke->ke_ksegrp);
1185103410Smini	cpu_set_upcall(td2, td->td_pcb);
1186104695Sjulian
1187104695Sjulian	/*
1188104695Sjulian	 * XXXKSE do we really need this? (default values for the
1189104695Sjulian	 * frame).
1190104695Sjulian	 */
1191103410Smini	bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1192104695Sjulian
1193103410Smini	/*
1194104695Sjulian	 * Bind the new thread to the KSE,
1195104695Sjulian	 * and if it's our KSE, lend it back to ourself
1196104695Sjulian	 * so we can continue running.
1197103410Smini	 */
119899026Sjulian	td2->td_ucred = crhold(td->td_ucred);
1199104695Sjulian	td2->td_flags = TDF_UPCALLING; /* note: BOUND */
1200104695Sjulian	td2->td_kse = ke;
1201104695Sjulian	td2->td_state = TDS_CAN_RUN;
1202104695Sjulian	td2->td_inhibitors = 0;
1203108338Sjulian	ke->ke_owner = td2;
1204104695Sjulian	/*
1205108338Sjulian	 * If called from kse_reassign(), we are working on the current
1206104695Sjulian	 * KSE so fake that we borrowed it. If called from
1207104695Sjulian	 * kse_create(), don't, as we have a new kse too.
1208104695Sjulian	 */
1209104695Sjulian	if (!newkse) {
1210104695Sjulian		/*
1211104695Sjulian		 * This thread will be scheduled when the current thread
1212104695Sjulian		 * blocks, exits or tries to enter userspace, (which ever
1213104695Sjulian		 * happens first). When that happens the KSe will "revert"
1214104695Sjulian		 * to this thread in a BOUND manner. Since we are called
1215104695Sjulian		 * from msleep() this is going to be "very soon" in nearly
1216104695Sjulian		 * all cases.
1217104695Sjulian		 */
1218104695Sjulian		TD_SET_LOAN(td2);
1219104695Sjulian	} else {
1220104695Sjulian		ke->ke_thread = td2;
1221105930Sdavidxu		ke->ke_state = KES_THREAD;
1222104695Sjulian		setrunqueue(td2);
1223104695Sjulian	}
1224104695Sjulian	return (td2);	/* bogus.. should be a void function */
122599026Sjulian}
122699026Sjulian
122799026Sjulian/*
1228103410Smini * Schedule an upcall to notify a KSE process recieved signals.
122999026Sjulian *
1230103410Smini * XXX - Modifying a sigset_t like this is totally bogus.
1231103410Smini */
1232103410Sministruct thread *
1233103410Sminisignal_upcall(struct proc *p, int sig)
1234103410Smini{
1235103410Smini	struct thread *td, *td2;
1236103410Smini	struct kse *ke;
1237103410Smini	sigset_t ss;
1238103410Smini	int error;
1239103410Smini
1240103410Smini	PROC_LOCK_ASSERT(p, MA_OWNED);
1241104695Sjulianreturn (NULL);
1242103410Smini
1243103410Smini	td = FIRST_THREAD_IN_PROC(p);
1244103410Smini	ke = td->td_kse;
1245103410Smini	PROC_UNLOCK(p);
1246103410Smini	error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1247103410Smini	PROC_LOCK(p);
1248103410Smini	if (error)
1249103410Smini		return (NULL);
1250103410Smini	SIGADDSET(ss, sig);
1251103410Smini	PROC_UNLOCK(p);
1252103410Smini	error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1253103410Smini	PROC_LOCK(p);
1254103410Smini	if (error)
1255103410Smini		return (NULL);
1256104695Sjulian	if (td->td_standin == NULL)
1257104695Sjulian		td->td_standin = thread_alloc();
1258103410Smini	mtx_lock_spin(&sched_lock);
1259104695Sjulian	td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1260103410Smini	mtx_unlock_spin(&sched_lock);
1261103410Smini	return (td2);
1262103410Smini}
1263103410Smini
1264103410Smini/*
1265105900Sjulian * setup done on the thread when it enters the kernel.
1266105900Sjulian * XXXKSE Presently only for syscalls but eventually all kernel entries.
1267105900Sjulian */
1268105900Sjulianvoid
1269105900Sjulianthread_user_enter(struct proc *p, struct thread *td)
1270105900Sjulian{
1271105900Sjulian	struct kse *ke;
1272105900Sjulian
1273105900Sjulian	/*
1274105900Sjulian	 * First check that we shouldn't just abort.
1275105900Sjulian	 * But check if we are the single thread first!
1276105900Sjulian	 * XXX p_singlethread not locked, but should be safe.
1277105900Sjulian	 */
1278105900Sjulian	if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
1279105900Sjulian		PROC_LOCK(p);
1280105900Sjulian		mtx_lock_spin(&sched_lock);
1281105900Sjulian		thread_exit();
1282105900Sjulian		/* NOTREACHED */
1283105900Sjulian	}
1284105900Sjulian
1285105900Sjulian	/*
1286105900Sjulian	 * If we are doing a syscall in a KSE environment,
1287105900Sjulian	 * note where our mailbox is. There is always the
1288108338Sjulian	 * possibility that we could do this lazily (in kse_reassign()),
1289105900Sjulian	 * but for now do it every time.
1290105900Sjulian	 */
1291105901Sdavidxu	ke = td->td_kse;
1292108338Sjulian	td->td_flags &= ~TDF_UNBOUND;
1293105912Sjulian	if (ke->ke_mailbox != NULL) {
1294105900Sjulian#if 0
1295105900Sjulian		td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
1296105900Sjulian		    + offsetof(struct kse_mailbox, km_curthread));
1297105900Sjulian#else /* if user pointer arithmetic is ok in the kernel */
1298105900Sjulian		td->td_mailbox =
1299105900Sjulian		    (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
1300105900Sjulian#endif
1301105900Sjulian		if ((td->td_mailbox == NULL) ||
1302107034Sdavidxu		    (td->td_mailbox == (void *)-1)) {
1303105900Sjulian			td->td_mailbox = NULL;	/* single thread it.. */
1304107034Sdavidxu			mtx_lock_spin(&sched_lock);
1305108338Sjulian			td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
1306107034Sdavidxu			mtx_unlock_spin(&sched_lock);
1307105900Sjulian		} else {
1308107034Sdavidxu			/*
1309107034Sdavidxu			 * when thread limit reached, act like that the thread
1310107034Sdavidxu			 * has already done an upcall.
1311107034Sdavidxu			 */
1312107034Sdavidxu		    	if (p->p_numthreads > max_threads_per_proc) {
1313107034Sdavidxu				if (td->td_standin != NULL)
1314107034Sdavidxu					thread_stash(td->td_standin);
1315107034Sdavidxu				td->td_standin = NULL;
1316107034Sdavidxu			} else {
1317107034Sdavidxu				if (td->td_standin == NULL)
1318107034Sdavidxu					td->td_standin = thread_alloc();
1319107034Sdavidxu			}
1320107034Sdavidxu			mtx_lock_spin(&sched_lock);
1321108338Sjulian			td->td_flags |= TDF_CAN_UNBIND;
1322107034Sdavidxu			mtx_unlock_spin(&sched_lock);
1323108338Sjulian			KASSERT((ke->ke_owner == td),
1324108338Sjulian			    ("thread_user_enter: No starting owner "));
1325108338Sjulian			ke->ke_owner = td;
1326107034Sdavidxu			td->td_usticks = 0;
1327105900Sjulian		}
1328105900Sjulian	}
1329105900Sjulian}
1330105900Sjulian
1331105900Sjulian/*
1332103410Smini * The extra work we go through if we are a threaded process when we
1333103410Smini * return to userland.
1334103410Smini *
133599026Sjulian * If we are a KSE process and returning to user mode, check for
133699026Sjulian * extra work to do before we return (e.g. for more syscalls
133799026Sjulian * to complete first).  If we were in a critical section, we should
133899026Sjulian * just return to let it finish. Same if we were in the UTS (in
1339103410Smini * which case the mailbox's context's busy indicator will be set).
1340103410Smini * The only traps we suport will have set the mailbox.
1341103410Smini * We will clear it here.
134299026Sjulian */
134399026Sjulianint
1344103838Sjulianthread_userret(struct thread *td, struct trapframe *frame)
134599026Sjulian{
1346103410Smini	int error;
1347104031Sjulian	int unbound;
1348104031Sjulian	struct kse *ke;
1349104695Sjulian	struct ksegrp *kg;
1350108338Sjulian	struct thread *worktodo;
1351104695Sjulian	struct proc *p;
1352107060Sdavidxu	struct timespec ts;
135399026Sjulian
1354108338Sjulian	KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
1355108338Sjulian	    ("thread_userret: bad thread/kse pointers"));
1356108338Sjulian	KASSERT((td == curthread),
1357108338Sjulian	    ("thread_userret: bad thread argument"));
1358104157Sjulian
1359104695Sjulian
1360104695Sjulian	kg = td->td_ksegrp;
1361104695Sjulian	p = td->td_proc;
1362108338Sjulian	error = 0;
1363108338Sjulian	unbound = TD_IS_UNBOUND(td);
1364104695Sjulian
1365108338Sjulian	mtx_lock_spin(&sched_lock);
1366108338Sjulian       	if ((worktodo = kg->kg_last_assigned))
1367108338Sjulian       		worktodo = TAILQ_NEXT(worktodo, td_runq);
1368108338Sjulian       	else
1369108338Sjulian       		worktodo = TAILQ_FIRST(&kg->kg_runq);
1370108338Sjulian
1371103410Smini	/*
1372108338Sjulian	 * Permanently bound threads never upcall but they may
1373104695Sjulian	 * loan out their KSE at this point.
1374104695Sjulian	 * Upcalls imply bound.. They also may want to do some Philantropy.
1375108338Sjulian	 * Temporarily bound threads on the other hand either yield
1376108338Sjulian	 * to other work and transform into an upcall, or proceed back to
1377108338Sjulian	 * userland.
1378103410Smini	 */
1379108338Sjulian
1380108338Sjulian	if (TD_CAN_UNBIND(td)) {
1381108338Sjulian		td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
1382108338Sjulian		if (!worktodo && (kg->kg_completed == NULL)) {
1383108338Sjulian			/*
1384108338Sjulian			 * This thread has not started any upcall.
1385108338Sjulian			 * If there is no work to report other than
1386108338Sjulian			 * ourself, then it can return direct to userland.
1387108338Sjulian			 */
1388108338Sjulianjustreturn:
1389108338Sjulian			mtx_unlock_spin(&sched_lock);
1390108338Sjulian			thread_update_uticks();
1391108338Sjulian			td->td_mailbox = NULL;
1392108338Sjulian			return (0);
1393108338Sjulian		}
1394108338Sjulian		mtx_unlock_spin(&sched_lock);
1395104695Sjulian		error = thread_export_context(td);
1396107034Sdavidxu		td->td_usticks = 0;
1397104695Sjulian		if (error) {
1398104695Sjulian			/*
1399108338Sjulian			 * As we are not running on a borrowed KSE,
1400104695Sjulian			 * failing to do the KSE operation just defaults
1401104695Sjulian			 * back to synchonous operation, so just return from
1402108338Sjulian			 * the syscall.
1403104695Sjulian			 */
1404108338Sjulian			goto justreturn;
1405104695Sjulian		}
1406104695Sjulian		mtx_lock_spin(&sched_lock);
1407104695Sjulian		/*
1408104695Sjulian		 * Turn ourself into a bound upcall.
1409104695Sjulian		 * We will rely on kse_reassign()
1410104695Sjulian		 * to make us run at a later time.
1411104695Sjulian		 */
1412104695Sjulian		td->td_flags |= TDF_UPCALLING;
1413104695Sjulian
1414108338Sjulian		/* there may be more work since we re-locked schedlock */
1415108338Sjulian       		if ((worktodo = kg->kg_last_assigned))
1416108338Sjulian       			worktodo = TAILQ_NEXT(worktodo, td_runq);
1417108338Sjulian       		else
1418108338Sjulian       			worktodo = TAILQ_FIRST(&kg->kg_runq);
1419108338Sjulian	} else if (unbound) {
1420108338Sjulian		/*
1421108338Sjulian		 * We are an unbound thread, looking to
1422108338Sjulian		 * return to user space. There must be another owner
1423108338Sjulian		 * of this KSE.
1424108338Sjulian		 * We are using a borrowed KSE. save state and exit.
1425108338Sjulian		 * kse_reassign() will recycle the kse as needed,
1426108338Sjulian		 */
1427108338Sjulian		mtx_unlock_spin(&sched_lock);
1428108338Sjulian		error = thread_export_context(td);
1429108338Sjulian		td->td_usticks = 0;
1430108338Sjulian		if (error) {
1431108338Sjulian			/*
1432108338Sjulian			 * There is nothing we can do.
1433108338Sjulian			 * We just lose that context. We
1434108338Sjulian			 * probably should note this somewhere and send
1435108338Sjulian			 * the process a signal.
1436108338Sjulian			 */
1437108338Sjulian			PROC_LOCK(td->td_proc);
1438108338Sjulian			psignal(td->td_proc, SIGSEGV);
1439108338Sjulian			mtx_lock_spin(&sched_lock);
1440108338Sjulian			ke = td->td_kse;
1441108338Sjulian			/* possibly upcall with error? */
1442108338Sjulian		} else {
1443108338Sjulian			/*
1444108338Sjulian			 * Don't make an upcall, just exit so that the owner
1445108338Sjulian			 * can get its KSE if it wants it.
1446108338Sjulian			 * Our context is already safely stored for later
1447108338Sjulian			 * use by the UTS.
1448108338Sjulian			 */
1449108338Sjulian			PROC_LOCK(p);
1450108338Sjulian			mtx_lock_spin(&sched_lock);
1451108338Sjulian			ke = td->td_kse;
1452108338Sjulian		}
1453108338Sjulian		/*
1454108338Sjulian		 * If the owner is idling, we now have something for it
1455108338Sjulian		 * to report, so make it runnable.
1456108338Sjulian		 * If the owner is not an upcall, make an attempt to
1457108338Sjulian		 * ensure that at least one of any IDLED upcalls can
1458108338Sjulian		 * wake up.
1459108338Sjulian		 */
1460108338Sjulian		if (ke->ke_owner->td_flags & TDF_UPCALLING) {
1461108338Sjulian			TD_CLR_IDLE(ke->ke_owner);
1462108338Sjulian		} else {
1463108338Sjulian			FOREACH_KSE_IN_GROUP(kg, ke) {
1464108338Sjulian				if (TD_IS_IDLE(ke->ke_owner)) {
1465108338Sjulian					TD_CLR_IDLE(ke->ke_owner);
1466108338Sjulian				}
1467108338Sjulian			}
1468108338Sjulian		}
1469108338Sjulian		thread_exit();
1470104695Sjulian	}
1471104695Sjulian	/*
1472104695Sjulian	 * We ARE going back to userland with this KSE.
1473108338Sjulian	 * We are permanently bound. We may be an upcall.
1474108338Sjulian	 * If an upcall, check for threads that need to borrow the KSE.
1475104695Sjulian	 * Any other thread that comes ready after this missed the boat.
1476104695Sjulian	 */
1477104031Sjulian	ke = td->td_kse;
1478104695Sjulian
1479103410Smini	/*
1480108338Sjulian	 *  If not upcalling, go back to userspace.
1481108338Sjulian	 * If we are, get the upcall set up.
1482103410Smini	 */
1483108338Sjulian	if (td->td_flags & TDF_UPCALLING) {
1484108338Sjulian		if (worktodo)  {
1485108338Sjulian			/*
1486108338Sjulian			 * force a switch to more urgent 'in kernel'
1487108338Sjulian			 * work. Control will return to this thread
1488108338Sjulian			 * when there is no more work to do.
1489108338Sjulian			 * kse_reassign() will do that for us.
1490108338Sjulian			 */
1491108338Sjulian			TD_SET_LOAN(td);  /* XXXKSE may not be needed */
1492108338Sjulian			p->p_stats->p_ru.ru_nvcsw++;
1493108338Sjulian			mi_switch(); /* kse_reassign() will (re)find worktodo */
1494108338Sjulian		}
1495108338Sjulian		td->td_flags &= ~TDF_UPCALLING;
1496108338Sjulian		mtx_unlock_spin(&sched_lock);
1497104695Sjulian
1498108338Sjulian		/*
1499108338Sjulian		 * There is no more work to do and we are going to ride
1500108338Sjulian		 * this thread/KSE up to userland as an upcall.
1501108338Sjulian		 * Do the last parts of the setup needed for the upcall.
1502108338Sjulian		 */
1503108338Sjulian		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1504108338Sjulian		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1505104695Sjulian
1506108338Sjulian		/*
1507108338Sjulian		 * Set user context to the UTS.
1508108338Sjulian		 * Will use Giant in cpu_thread_clean() because it uses
1509108338Sjulian		 * kmem_free(kernel_map, ...)
1510108338Sjulian		 */
1511108338Sjulian		cpu_set_upcall_kse(td, ke);
1512104695Sjulian
1513108338Sjulian		/*
1514108338Sjulian		 * Unhook the list of completed threads.
1515108338Sjulian		 * anything that completes after this gets to
1516108338Sjulian		 * come in next time.
1517108338Sjulian		 * Put the list of completed thread mailboxes on
1518108338Sjulian		 * this KSE's mailbox.
1519108338Sjulian		 */
1520108338Sjulian		error = thread_link_mboxes(kg, ke);
1521108338Sjulian		if (error)
1522108338Sjulian			goto bad;
152399026Sjulian
1524108338Sjulian		/*
1525108338Sjulian		 * Set state and clear the  thread mailbox pointer.
1526108338Sjulian		 * From now on we are just a bound outgoing process.
1527108338Sjulian		 * **Problem** userret is often called several times.
1528108338Sjulian		 * it would be nice if this all happenned only on the first
1529108338Sjulian		 * time through. (the scan for extra work etc.)
1530108338Sjulian		 */
1531104031Sjulian#if 0
1532108338Sjulian		error = suword((caddr_t)ke->ke_mailbox +
1533108338Sjulian		    offsetof(struct kse_mailbox, km_curthread), 0);
1534104031Sjulian#else	/* if user pointer arithmetic is ok in the kernel */
1535108338Sjulian		error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
1536107060Sdavidxu#endif
1537108338Sjulian		ke->ke_uuticks = ke->ke_usticks = 0;
1538108338Sjulian		if (error)
1539108338Sjulian			goto bad;
1540107060Sdavidxu		nanotime(&ts);
1541108338Sjulian		if (copyout(&ts,
1542108338Sjulian		    (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
1543107060Sdavidxu			goto bad;
1544107060Sdavidxu		}
1545108338Sjulian	} else {
1546108338Sjulian		mtx_unlock_spin(&sched_lock);
1547107060Sdavidxu	}
1548108338Sjulian	/*
1549108338Sjulian	 * Optimisation:
1550108338Sjulian	 * Ensure that we have a spare thread available,
1551108338Sjulian	 * for when we re-enter the kernel.
1552108338Sjulian	 */
1553108338Sjulian	if (td->td_standin == NULL) {
1554108338Sjulian		td->td_standin = thread_alloc();
1555108338Sjulian	}
1556108338Sjulian
1557108338Sjulian	thread_update_uticks();
1558108338Sjulian	td->td_mailbox = NULL;
1559107060Sdavidxu	return (0);
1560104695Sjulian
1561104031Sjulianbad:
1562104031Sjulian	/*
1563104031Sjulian	 * Things are going to be so screwed we should just kill the process.
1564104031Sjulian 	 * how do we do that?
1565104031Sjulian	 */
1566104695Sjulian	PROC_LOCK(td->td_proc);
1567104695Sjulian	psignal(td->td_proc, SIGSEGV);
1568104695Sjulian	PROC_UNLOCK(td->td_proc);
1569108338Sjulian	td->td_mailbox = NULL;
1570104695Sjulian	return (error);	/* go sync */
157199026Sjulian}
157299026Sjulian
157399026Sjulian/*
157499026Sjulian * Enforce single-threading.
157599026Sjulian *
157699026Sjulian * Returns 1 if the caller must abort (another thread is waiting to
157799026Sjulian * exit the process or similar). Process is locked!
157899026Sjulian * Returns 0 when you are successfully the only thread running.
157999026Sjulian * A process has successfully single threaded in the suspend mode when
158099026Sjulian * There are no threads in user mode. Threads in the kernel must be
158199026Sjulian * allowed to continue until they get to the user boundary. They may even
158299026Sjulian * copy out their return values and data before suspending. They may however be
158399026Sjulian * accellerated in reaching the user boundary as we will wake up
158499026Sjulian * any sleeping threads that are interruptable. (PCATCH).
158599026Sjulian */
158699026Sjulianint
158799026Sjulianthread_single(int force_exit)
158899026Sjulian{
158999026Sjulian	struct thread *td;
159099026Sjulian	struct thread *td2;
159199026Sjulian	struct proc *p;
159299026Sjulian
159399026Sjulian	td = curthread;
159499026Sjulian	p = td->td_proc;
1595107719Sjulian	mtx_assert(&Giant, MA_OWNED);
159699026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
159799026Sjulian	KASSERT((td != NULL), ("curthread is NULL"));
159899026Sjulian
159999026Sjulian	if ((p->p_flag & P_KSES) == 0)
160099026Sjulian		return (0);
160199026Sjulian
1602100648Sjulian	/* Is someone already single threading? */
1603100648Sjulian	if (p->p_singlethread)
160499026Sjulian		return (1);
160599026Sjulian
1606108338Sjulian	if (force_exit == SINGLE_EXIT) {
160799026Sjulian		p->p_flag |= P_SINGLE_EXIT;
1608108338Sjulian		td->td_flags &= ~TDF_UNBOUND;
1609108338Sjulian	} else
161099026Sjulian		p->p_flag &= ~P_SINGLE_EXIT;
1611102950Sdavidxu	p->p_flag |= P_STOPPED_SINGLE;
161299026Sjulian	p->p_singlethread = td;
1613105911Sjulian	/* XXXKSE Which lock protects the below values? */
161499026Sjulian	while ((p->p_numthreads - p->p_suspcount) != 1) {
1615103216Sjulian		mtx_lock_spin(&sched_lock);
161699026Sjulian		FOREACH_THREAD_IN_PROC(p, td2) {
161799026Sjulian			if (td2 == td)
161899026Sjulian				continue;
1619103216Sjulian			if (TD_IS_INHIBITED(td2)) {
1620105911Sjulian				if (force_exit == SINGLE_EXIT) {
1621105911Sjulian					if (TD_IS_SUSPENDED(td2)) {
1622103216Sjulian						thread_unsuspend_one(td2);
1623105911Sjulian					}
1624105911Sjulian					if (TD_ON_SLEEPQ(td2) &&
1625105911Sjulian					    (td2->td_flags & TDF_SINTR)) {
1626105911Sjulian						if (td2->td_flags & TDF_CVWAITQ)
1627105911Sjulian							cv_abort(td2);
1628105911Sjulian						else
1629105911Sjulian							abortsleep(td2);
1630105911Sjulian					}
1631108338Sjulian					if (TD_IS_IDLE(td2)) {
1632108338Sjulian						TD_CLR_IDLE(td2);
1633108338Sjulian					}
1634105911Sjulian				} else {
1635105911Sjulian					if (TD_IS_SUSPENDED(td2))
1636105874Sdavidxu						continue;
1637105911Sjulian					/* maybe other inhibitted states too? */
1638108338Sjulian					if (td2->td_inhibitors &
1639108338Sjulian					    (TDI_SLEEPING | TDI_SWAPPED |
1640108338Sjulian					    TDI_LOAN | TDI_IDLE |
1641108338Sjulian					    TDI_EXITING))
1642105911Sjulian						thread_suspend_one(td2);
164399026Sjulian				}
164499026Sjulian			}
164599026Sjulian		}
1646105911Sjulian		/*
1647105911Sjulian		 * Maybe we suspended some threads.. was it enough?
1648105911Sjulian		 */
1649105911Sjulian		if ((p->p_numthreads - p->p_suspcount) == 1) {
1650105911Sjulian			mtx_unlock_spin(&sched_lock);
1651105911Sjulian			break;
1652105911Sjulian		}
1653105911Sjulian
165499026Sjulian		/*
165599026Sjulian		 * Wake us up when everyone else has suspended.
1656100648Sjulian		 * In the mean time we suspend as well.
165799026Sjulian		 */
1658103216Sjulian		thread_suspend_one(td);
165999026Sjulian		mtx_unlock(&Giant);
166099026Sjulian		PROC_UNLOCK(p);
1661107719Sjulian		p->p_stats->p_ru.ru_nvcsw++;
166299026Sjulian		mi_switch();
166399026Sjulian		mtx_unlock_spin(&sched_lock);
166499026Sjulian		mtx_lock(&Giant);
166599026Sjulian		PROC_LOCK(p);
166699026Sjulian	}
1667105854Sjulian	if (force_exit == SINGLE_EXIT)
1668105854Sjulian		kse_purge(p, td);
166999026Sjulian	return (0);
167099026Sjulian}
167199026Sjulian
167299026Sjulian/*
167399026Sjulian * Called in from locations that can safely check to see
167499026Sjulian * whether we have to suspend or at least throttle for a
167599026Sjulian * single-thread event (e.g. fork).
167699026Sjulian *
167799026Sjulian * Such locations include userret().
167899026Sjulian * If the "return_instead" argument is non zero, the thread must be able to
167999026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result.
168099026Sjulian *
168199026Sjulian * The 'return_instead' argument tells the function if it may do a
168299026Sjulian * thread_exit() or suspend, or whether the caller must abort and back
168399026Sjulian * out instead.
168499026Sjulian *
168599026Sjulian * If the thread that set the single_threading request has set the
168699026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return
168799026Sjulian * if 'return_instead' is false, but will exit.
168899026Sjulian *
168999026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
169099026Sjulian *---------------+--------------------+---------------------
169199026Sjulian *       0       | returns 0          |   returns 0 or 1
169299026Sjulian *               | when ST ends       |   immediatly
169399026Sjulian *---------------+--------------------+---------------------
169499026Sjulian *       1       | thread exits       |   returns 1
169599026Sjulian *               |                    |  immediatly
169699026Sjulian * 0 = thread_exit() or suspension ok,
169799026Sjulian * other = return error instead of stopping the thread.
169899026Sjulian *
169999026Sjulian * While a full suspension is under effect, even a single threading
170099026Sjulian * thread would be suspended if it made this call (but it shouldn't).
170199026Sjulian * This call should only be made from places where
170299026Sjulian * thread_exit() would be safe as that may be the outcome unless
170399026Sjulian * return_instead is set.
170499026Sjulian */
170599026Sjulianint
170699026Sjulianthread_suspend_check(int return_instead)
170799026Sjulian{
1708104502Sjmallett	struct thread *td;
1709104502Sjmallett	struct proc *p;
1710105854Sjulian	struct kse *ke;
1711105854Sjulian	struct ksegrp *kg;
171299026Sjulian
171399026Sjulian	td = curthread;
171499026Sjulian	p = td->td_proc;
1715105854Sjulian	kg = td->td_ksegrp;
171699026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
171799026Sjulian	while (P_SHOULDSTOP(p)) {
1718102950Sdavidxu		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
171999026Sjulian			KASSERT(p->p_singlethread != NULL,
172099026Sjulian			    ("singlethread not set"));
172199026Sjulian			/*
1722100648Sjulian			 * The only suspension in action is a
1723100648Sjulian			 * single-threading. Single threader need not stop.
1724100646Sjulian			 * XXX Should be safe to access unlocked
1725100646Sjulian			 * as it can only be set to be true by us.
172699026Sjulian			 */
1727100648Sjulian			if (p->p_singlethread == td)
172899026Sjulian				return (0);	/* Exempt from stopping. */
172999026Sjulian		}
1730100648Sjulian		if (return_instead)
173199026Sjulian			return (1);
173299026Sjulian
173399026Sjulian		/*
173499026Sjulian		 * If the process is waiting for us to exit,
173599026Sjulian		 * this thread should just suicide.
1736102950Sdavidxu		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
173799026Sjulian		 */
173899026Sjulian		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
173999026Sjulian			mtx_lock_spin(&sched_lock);
174099026Sjulian			while (mtx_owned(&Giant))
174199026Sjulian				mtx_unlock(&Giant);
1742105854Sjulian			/*
1743108338Sjulian			 * All threads should be exiting
1744108338Sjulian			 * Unless they are the active "singlethread".
1745108338Sjulian			 * destroy un-needed KSEs as we go..
1746108338Sjulian			 * KSEGRPS may implode too as #kses -> 0.
1747105854Sjulian			 */
1748105854Sjulian			ke = td->td_kse;
1749108338Sjulian			if (ke->ke_owner == td &&
1750108338Sjulian			    (kg->kg_kses >= kg->kg_numthreads ))
1751105854Sjulian				ke->ke_flags |= KEF_EXIT;
175299026Sjulian			thread_exit();
175399026Sjulian		}
175499026Sjulian
175599026Sjulian		/*
175699026Sjulian		 * When a thread suspends, it just
175799026Sjulian		 * moves to the processes's suspend queue
175899026Sjulian		 * and stays there.
175999026Sjulian		 *
176099026Sjulian		 * XXXKSE if TDF_BOUND is true
176199026Sjulian		 * it will not release it's KSE which might
176299026Sjulian		 * lead to deadlock if there are not enough KSEs
176399026Sjulian		 * to complete all waiting threads.
176499026Sjulian		 * Maybe be able to 'lend' it out again.
176599026Sjulian		 * (lent kse's can not go back to userland?)
176699026Sjulian		 * and can only be lent in STOPPED state.
176799026Sjulian		 */
1768102238Sjulian		mtx_lock_spin(&sched_lock);
1769102950Sdavidxu		if ((p->p_flag & P_STOPPED_SIG) &&
1770102238Sjulian		    (p->p_suspcount+1 == p->p_numthreads)) {
1771102238Sjulian			mtx_unlock_spin(&sched_lock);
1772102238Sjulian			PROC_LOCK(p->p_pptr);
1773102238Sjulian			if ((p->p_pptr->p_procsig->ps_flag &
1774102238Sjulian				PS_NOCLDSTOP) == 0) {
1775102238Sjulian				psignal(p->p_pptr, SIGCHLD);
1776102238Sjulian			}
1777102238Sjulian			PROC_UNLOCK(p->p_pptr);
1778103055Sjulian			mtx_lock_spin(&sched_lock);
1779102238Sjulian		}
178099026Sjulian		mtx_assert(&Giant, MA_NOTOWNED);
1781103216Sjulian		thread_suspend_one(td);
178299026Sjulian		PROC_UNLOCK(p);
1783102950Sdavidxu		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1784100632Sjulian			if (p->p_numthreads == p->p_suspcount) {
1785103216Sjulian				thread_unsuspend_one(p->p_singlethread);
1786100632Sjulian			}
1787100632Sjulian		}
1788100594Sjulian		p->p_stats->p_ru.ru_nivcsw++;
178999026Sjulian		mi_switch();
179099026Sjulian		mtx_unlock_spin(&sched_lock);
179199026Sjulian		PROC_LOCK(p);
179299026Sjulian	}
179399026Sjulian	return (0);
179499026Sjulian}
179599026Sjulian
1796102898Sdavidxuvoid
1797102898Sdavidxuthread_suspend_one(struct thread *td)
1798102898Sdavidxu{
1799102898Sdavidxu	struct proc *p = td->td_proc;
1800102898Sdavidxu
1801102898Sdavidxu	mtx_assert(&sched_lock, MA_OWNED);
1802102898Sdavidxu	p->p_suspcount++;
1803103216Sjulian	TD_SET_SUSPENDED(td);
1804102898Sdavidxu	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1805103216Sjulian	/*
1806103216Sjulian	 * Hack: If we are suspending but are on the sleep queue
1807103216Sjulian	 * then we are in msleep or the cv equivalent. We
1808103216Sjulian	 * want to look like we have two Inhibitors.
1809105911Sjulian	 * May already be set.. doesn't matter.
1810103216Sjulian	 */
1811103216Sjulian	if (TD_ON_SLEEPQ(td))
1812103216Sjulian		TD_SET_SLEEPING(td);
1813102898Sdavidxu}
1814102898Sdavidxu
1815102898Sdavidxuvoid
1816102898Sdavidxuthread_unsuspend_one(struct thread *td)
1817102898Sdavidxu{
1818102898Sdavidxu	struct proc *p = td->td_proc;
1819102898Sdavidxu
1820102898Sdavidxu	mtx_assert(&sched_lock, MA_OWNED);
1821102898Sdavidxu	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1822103216Sjulian	TD_CLR_SUSPENDED(td);
1823102898Sdavidxu	p->p_suspcount--;
1824103216Sjulian	setrunnable(td);
1825102898Sdavidxu}
1826102898Sdavidxu
182799026Sjulian/*
182899026Sjulian * Allow all threads blocked by single threading to continue running.
182999026Sjulian */
183099026Sjulianvoid
183199026Sjulianthread_unsuspend(struct proc *p)
183299026Sjulian{
183399026Sjulian	struct thread *td;
183499026Sjulian
1835100646Sjulian	mtx_assert(&sched_lock, MA_OWNED);
183699026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
183799026Sjulian	if (!P_SHOULDSTOP(p)) {
183899026Sjulian		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1839102898Sdavidxu			thread_unsuspend_one(td);
184099026Sjulian		}
1841102950Sdavidxu	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
184299026Sjulian	    (p->p_numthreads == p->p_suspcount)) {
184399026Sjulian		/*
184499026Sjulian		 * Stopping everything also did the job for the single
184599026Sjulian		 * threading request. Now we've downgraded to single-threaded,
184699026Sjulian		 * let it continue.
184799026Sjulian		 */
1848102898Sdavidxu		thread_unsuspend_one(p->p_singlethread);
184999026Sjulian	}
185099026Sjulian}
185199026Sjulian
185299026Sjulianvoid
185399026Sjulianthread_single_end(void)
185499026Sjulian{
185599026Sjulian	struct thread *td;
185699026Sjulian	struct proc *p;
185799026Sjulian
185899026Sjulian	td = curthread;
185999026Sjulian	p = td->td_proc;
186099026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
1861102950Sdavidxu	p->p_flag &= ~P_STOPPED_SINGLE;
186299026Sjulian	p->p_singlethread = NULL;
1863102292Sjulian	/*
1864102292Sjulian	 * If there are other threads they mey now run,
1865102292Sjulian	 * unless of course there is a blanket 'stop order'
1866102292Sjulian	 * on the process. The single threader must be allowed
1867102292Sjulian	 * to continue however as this is a bad place to stop.
1868102292Sjulian	 */
1869102292Sjulian	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1870102292Sjulian		mtx_lock_spin(&sched_lock);
1871102292Sjulian		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1872103216Sjulian			thread_unsuspend_one(td);
1873102292Sjulian		}
1874102292Sjulian		mtx_unlock_spin(&sched_lock);
1875102292Sjulian	}
187699026Sjulian}
187799026Sjulian
1878102292Sjulian
1879