kern_thread.c revision 130199
1139825Simp/*
21541Srgrimes * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
31549Srgrimes *  All rights reserved.
41549Srgrimes *
59507Sdg * Redistribution and use in source and binary forms, with or without
69507Sdg * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice(s), this list of conditions and the following disclaimer as
101541Srgrimes *    the first lines of this file unmodified other than the possible
111541Srgrimes *    addition of one or more copyright notices.
121541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
131541Srgrimes *    notice(s), this list of conditions and the following disclaimer in the
141541Srgrimes *    documentation and/or other materials provided with the distribution.
151541Srgrimes *
161541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
171541Srgrimes * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
181541Srgrimes * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
191541Srgrimes * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
201541Srgrimes * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2158705Scharnier * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
221541Srgrimes * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
231541Srgrimes * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
241541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
251541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
261541Srgrimes * DAMAGE.
271541Srgrimes */
281541Srgrimes
291541Srgrimes#include <sys/cdefs.h>
301541Srgrimes__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 130199 2004-06-07 19:00:57Z julian $");
311541Srgrimes
321541Srgrimes#include <sys/param.h>
331541Srgrimes#include <sys/systm.h>
341541Srgrimes#include <sys/kernel.h>
351541Srgrimes#include <sys/lock.h>
361541Srgrimes#include <sys/mutex.h>
371541Srgrimes#include <sys/proc.h>
381541Srgrimes#include <sys/sysctl.h>
391541Srgrimes#include <sys/sched.h>
401549Srgrimes#include <sys/sleepqueue.h>
411541Srgrimes#include <sys/turnstile.h>
421541Srgrimes#include <sys/ktr.h>
431541Srgrimes
441541Srgrimes#include <vm/vm.h>
451541Srgrimes#include <vm/vm_extern.h>
461541Srgrimes#include <vm/uma.h>
471549Srgrimes
481549Srgrimes/*
499507Sdg * KSEGRP related storage.
507695Sdg */
511549Srgrimesstatic uma_zone_t ksegrp_zone;
521549Srgrimesstatic uma_zone_t kse_zone;
53116226Sobrienstatic uma_zone_t thread_zone;
54116226Sobrien
55116226Sobrien/* DEBUG ONLY */
561541SrgrimesSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
571541Srgrimesstatic int thread_debug = 0;
581541SrgrimesSYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
591541Srgrimes	&thread_debug, 0, "thread debug");
601541Srgrimes
6160041Sphkint max_threads_per_proc = 1500;
629507SdgSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
6312662Sdg	&max_threads_per_proc, 0, "Limit on threads per proc");
64140767Sphk
6551340Sdillonint max_groups_per_proc = 500;
66127926SalcSYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
671541Srgrimes	&max_groups_per_proc, 0, "Limit on thread groups per proc");
68148875Sssouhlal
69148875Sssouhlalint max_threads_hits;
701541SrgrimesSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
7112662Sdg	&max_threads_hits, 0, "");
721541Srgrimes
739507Sdg
7431853SdysonTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
751541SrgrimesTAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
7612662SdgTAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
771541Srgrimesstruct mtx kse_zombie_lock;
78163359SalcMTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
79163359Salc
8092727Salfredvoid kse_purge(struct proc *p, struct thread *td);
8192727Salfredvoid kse_purge_group(struct thread *td);
8292727Salfred
8392727Salfred/* move to proc.h */
8492727Salfredextern void	kseinit(void);
8592727Salfredextern void	kse_GC(void);
86194766Skib
87194766Skib
8811943Sbde
891541Srgrimes/*
90118466Sphk * Thread ID allocator. The allocator keeps track of assigned IDs by
91118466Sphk * using a bitmap. The bitmap is created in parts. The parts are linked
92118466Sphk * together.
93118466Sphk */
94118466Sphktypedef u_long tid_bitmap_word;
951541Srgrimes
961541Srgrimes#define	TID_IDS_PER_PART	1024
9779127Sjhb#define	TID_IDS_PER_IDX		(sizeof(tid_bitmap_word) << 3)
9810556Sdyson#define	TID_BITMAP_SIZE		(TID_IDS_PER_PART / TID_IDS_PER_IDX)
99140767Sphk#define	TID_MIN			(PID_MAX + 1)
100140767Sphk
101155177Syarstruct tid_bitmap_part {
102140767Sphk	STAILQ_ENTRY(tid_bitmap_part) bmp_next;
103140767Sphk	tid_bitmap_word	bmp_bitmap[TID_BITMAP_SIZE];
104140767Sphk	int		bmp_base;
105140767Sphk	int		bmp_free;
106140767Sphk};
107140767Sphk
108140767Sphkstatic STAILQ_HEAD(, tid_bitmap_part) tid_bitmap =
109140767Sphk    STAILQ_HEAD_INITIALIZER(tid_bitmap);
110140767Sphkstatic uma_zone_t tid_zone;
111140767Sphk
112140767Sphkstruct mtx tid_lock;
113140767SphkMTX_SYSINIT(tid_lock, &tid_lock, "TID lock", MTX_DEF);
114140767Sphk
115140767Sphk/*
116175294Sattilio * Prepare a thread for use.
117140767Sphk */
118140767Sphkstatic void
119175202Sattiliothread_ctor(void *mem, int size, void *arg)
120140767Sphk{
121140767Sphk	struct thread	*td;
122140767Sphk
123140767Sphk	td = (struct thread *)mem;
124140767Sphk	td->td_tid = 0;
125140767Sphk	td->td_state = TDS_INACTIVE;
126182371Sattilio	td->td_oncpu	= NOCPU;
127140767Sphk	td->td_critnest = 1;
128140767Sphk}
129140767Sphk
130140767Sphk/*
131140767Sphk * Reclaim a thread after use.
132194766Skib */
133140767Sphkstatic void
134140767Sphkthread_dtor(void *mem, int size, void *arg)
135140767Sphk{
136140767Sphk	struct thread *td;
137140767Sphk	struct tid_bitmap_part *bmp;
138140767Sphk	int bit, idx, tid;
139140767Sphk
140140767Sphk	td = (struct thread *)mem;
141140767Sphk
142140767Sphk	if (td->td_tid > PID_MAX) {
143140767Sphk		STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {
144140767Sphk			if (td->td_tid >= bmp->bmp_base &&
145140767Sphk			    td->td_tid < bmp->bmp_base + TID_IDS_PER_PART)
146140767Sphk				break;
147140929Sphk		}
148140929Sphk		KASSERT(bmp != NULL, ("No TID bitmap?"));
149140929Sphk		mtx_lock(&tid_lock);
150140929Sphk		tid = td->td_tid - bmp->bmp_base;
151140929Sphk		idx = tid / TID_IDS_PER_IDX;
152140929Sphk		bit = 1UL << (tid % TID_IDS_PER_IDX);
153140929Sphk		bmp->bmp_bitmap[idx] |= bit;
154140929Sphk		bmp->bmp_free++;
155171599Spjd		mtx_unlock(&tid_lock);
156140929Sphk	}
157140929Sphk
158140929Sphk#ifdef INVARIANTS
159140929Sphk	/* Verify that this thread is in a safe state to free. */
160140929Sphk	switch (td->td_state) {
161140929Sphk	case TDS_INHIBITED:
162140929Sphk	case TDS_RUNNING:
163140929Sphk	case TDS_CAN_RUN:
164140929Sphk	case TDS_RUNQ:
165140929Sphk		/*
166140929Sphk		 * We must never unlink a thread that is in one of
167140929Sphk		 * these states, because it is currently active.
168140929Sphk		 */
169140929Sphk		panic("bad state for thread unlinking");
170140929Sphk		/* NOTREACHED */
171140929Sphk	case TDS_INACTIVE:
172140929Sphk		break;
173140929Sphk	default:
174140929Sphk		panic("bad thread state");
175140929Sphk		/* NOTREACHED */
176140929Sphk	}
177144610Sjeff#endif
178140929Sphk}
179140929Sphk
180140929Sphk/*
1811541Srgrimes * Initialize type-stable parts of a thread (when newly created).
1821541Srgrimes */
1831541Srgrimesstatic void
18498604Salcthread_init(void *mem, int size)
18598604Salc{
1861541Srgrimes	struct thread	*td;
1879507Sdg
18840286Sdg	td = (struct thread *)mem;
189194766Skib	vm_thread_new(td, 0);
1901541Srgrimes	cpu_thread_setup(td);
1919456Sdg	td->td_sleepqueue = sleepq_alloc();
1921541Srgrimes	td->td_turnstile = turnstile_alloc();
1931541Srgrimes	td->td_sched = (struct td_sched *)&td[1];
1941541Srgrimes}
1951541Srgrimes
1961541Srgrimes/*
1971541Srgrimes * Tear down type-stable parts of a thread (just before being discarded).
1981827Sdg */
1991541Srgrimesstatic void
2009411Sdgthread_fini(void *mem, int size)
2019411Sdg{
2021541Srgrimes	struct thread	*td;
2039411Sdg
2049411Sdg	td = (struct thread *)mem;
2059411Sdg	turnstile_free(td->td_turnstile);
206179159Sups	sleepq_free(td->td_sleepqueue);
207114074Salc	vm_thread_dispose(td);
208114074Salc}
209181020Sjhb
210114074Salc/*
211137297Salc * Initialize type-stable parts of a kse (when newly created).
212114074Salc */
2139507Sdgstatic void
2145455Sdgkse_init(void *mem, int size)
21532071Sdyson{
21632071Sdyson	struct kse	*ke;
21732071Sdyson
2189507Sdg	ke = (struct kse *)mem;
2191541Srgrimes	ke->ke_sched = (struct ke_sched *)&ke[1];
220179159Sups}
2211541Srgrimes
22240286Sdg/*
2231827Sdg * Initialize type-stable parts of a ksegrp (when newly created).
22440286Sdg */
2251549Srgrimesstatic void
2269507Sdgksegrp_init(void *mem, int size)
227179765Sups{
228179765Sups	struct ksegrp	*kg;
229179159Sups
230179159Sups	kg = (struct ksegrp *)mem;
231179159Sups	kg->kg_sched = (struct kg_sched *)&kg[1];
232179765Sups}
233179159Sups
234179159Sups/*
235179159Sups * KSE is linked into kse group.
2369507Sdg */
237179765Supsvoid
238179765Supskse_link(struct kse *ke, struct ksegrp *kg)
23932286Sdyson{
240179765Sups	struct proc *p = kg->kg_proc;
241179765Sups
242143559Sjeff	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
2439507Sdg	kg->kg_kses++;
2441541Srgrimes	ke->ke_state	= KES_UNQUEUED;
2451541Srgrimes	ke->ke_proc	= p;
246114774Salc	ke->ke_ksegrp	= kg;
247114774Salc	ke->ke_thread	= NULL;
248114774Salc	ke->ke_oncpu	= NOCPU;
24912820Sphk	ke->ke_flags	= 0;
2509507Sdg}
2519507Sdg
2521541Srgrimesvoid
253202529Skibkse_unlink(struct kse *ke)
254202529Skib{
2551541Srgrimes	struct ksegrp *kg;
256202529Skib
2579507Sdg	mtx_assert(&sched_lock, MA_OWNED);
2589507Sdg	kg = ke->ke_ksegrp;
2599507Sdg	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
260114774Salc	if (ke->ke_state == KES_IDLE) {
26133817Sdyson		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
262202529Skib		kg->kg_idle_kses--;
2631541Srgrimes	}
2649507Sdg	--kg->kg_kses;
26533109Sdyson	/*
266137297Salc	 * Aggregate stats from the KSE
267137297Salc	 */
268137297Salc	kse_stash(ke);
269137297Salc}
270171599Spjd
2719507Sdgvoid
272140734Sphkksegrp_link(struct ksegrp *kg, struct proc *p)
273202529Skib{
274202529Skib
2751549Srgrimes	TAILQ_INIT(&kg->kg_threads);
2761541Srgrimes	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
27712820Sphk	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
27812767Sdyson	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
2799507Sdg	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
28012767Sdyson	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
2819507Sdg	kg->kg_proc = p;
2829507Sdg	/*
2831541Srgrimes	 * the following counters are in the -zero- section
2849507Sdg	 * and may not need clearing
28596572Sphk	 */
28612423Sphk	kg->kg_numthreads = 0;
28710556Sdyson	kg->kg_runnable   = 0;
28811701Sdyson	kg->kg_kses       = 0;
28911701Sdyson	kg->kg_runq_kses  = 0; /* XXXKSE change name */
29012914Sdyson	kg->kg_idle_kses  = 0;
291140723Sjeff	kg->kg_numupcalls = 0;
2921541Srgrimes	/* link it in now that it's consistent */
293116695Salc	p->p_numksegrps++;
29451340Sdillon	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
29551340Sdillon}
29651340Sdillon
29751340Sdillonvoid
298155384Sjeffksegrp_unlink(struct ksegrp *kg)
29932585Sdyson{
3001541Srgrimes	struct proc *p;
301155384Sjeff
3025455Sdg	mtx_assert(&sched_lock, MA_OWNED);
3031541Srgrimes	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
304155384Sjeff	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
3054797Sdg	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
3061541Srgrimes
30711576Sdg	p = kg->kg_proc;
30810556Sdyson	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
30912914Sdyson	p->p_numksegrps--;
31012914Sdyson	/*
31112914Sdyson	 * Aggregate stats from the KSE
31212914Sdyson	 */
31312914Sdyson	ksegrp_stash(kg);
31412914Sdyson}
31512914Sdyson
316116695Salc/*
317140723Sjeff * For a newly created process,
318119045Sphk * link up all the structures and its initial threads etc.
319140723Sjeff */
320116695Salcvoid
3218876Srgrimesproc_linkup(struct proc *p, struct ksegrp *kg,
3229507Sdg	    struct kse *ke, struct thread *td)
32392029Seivind{
32410576Sdyson
32512914Sdyson	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
32612914Sdyson	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
32712914Sdyson	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
32812914Sdyson	p->p_numksegrps = 0;
32912914Sdyson	p->p_numthreads = 0;
33010669Sdyson
33112914Sdyson	ksegrp_link(kg, p);
33212914Sdyson	kse_link(ke, kg);
33312914Sdyson	thread_link(td, kg);
33412914Sdyson}
33599211Srobert
33699211Srobert/*
33799211Srobert * Initialize global thread allocation resources.
33899211Srobert */
33999211Srobertvoid
34012914Sdysonthreadinit(void)
34112914Sdyson{
34212914Sdyson
34312914Sdyson	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
34412914Sdyson	    thread_ctor, thread_dtor, thread_init, thread_fini,
34512914Sdyson	    UMA_ALIGN_CACHE, 0);
34612914Sdyson	tid_zone = uma_zcreate("TID", sizeof(struct tid_bitmap_part),
34712914Sdyson	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
34812914Sdyson	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
34912914Sdyson	    NULL, NULL, ksegrp_init, NULL,
35012914Sdyson	    UMA_ALIGN_CACHE, 0);
35110556Sdyson	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
35210576Sdyson	    NULL, NULL, kse_init, NULL,
3531541Srgrimes	    UMA_ALIGN_CACHE, 0);
3541541Srgrimes	kseinit();
3551541Srgrimes}
3561541Srgrimes
3579507Sdg/*
3581541Srgrimes * Stash an embarasingly extra thread into the zombie thread queue.
3591541Srgrimes */
3601541Srgrimesvoid
3611541Srgrimesthread_stash(struct thread *td)
3621541Srgrimes{
3631541Srgrimes	mtx_lock_spin(&kse_zombie_lock);
3641541Srgrimes	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
3651541Srgrimes	mtx_unlock_spin(&kse_zombie_lock);
36612767Sdyson}
3671541Srgrimes
368116167Salc/*
369116167Salc * Stash an embarasingly extra kse into the zombie kse queue.
37038542Sluoqi */
3711541Srgrimesvoid
372116167Salckse_stash(struct kse *ke)
3731541Srgrimes{
374188386Skib	mtx_lock_spin(&kse_zombie_lock);
375116167Salc	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
376116167Salc	mtx_unlock_spin(&kse_zombie_lock);
377116167Salc}
378116167Salc
379116167Salc/*
380116167Salc * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
3813374Sdg */
382116167Salcvoid
38338542Sluoqiksegrp_stash(struct ksegrp *kg)
3849507Sdg{
385116167Salc	mtx_lock_spin(&kse_zombie_lock);
386116167Salc	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
387116167Salc	mtx_unlock_spin(&kse_zombie_lock);
388116167Salc}
38938542Sluoqi
390223677Salc/*
3911827Sdg * Reap zombie kse resource.
3921827Sdg */
39387834Sdillonvoid
39487834Sdillonthread_reap(void)
39587834Sdillon{
39687834Sdillon	struct thread *td_first, *td_next;
39787834Sdillon	struct kse *ke_first, *ke_next;
3981827Sdg	struct ksegrp *kg_first, * kg_next;
399116167Salc
400121230Salc	/*
401121230Salc	 * Don't even bother to lock if none at this instant,
402121230Salc	 * we really don't care about the next instant..
403121230Salc	 */
40470374Sdillon	if ((!TAILQ_EMPTY(&zombie_threads))
405121230Salc	    || (!TAILQ_EMPTY(&zombie_kses))
406121230Salc	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
407121230Salc		mtx_lock_spin(&kse_zombie_lock);
408121230Salc		td_first = TAILQ_FIRST(&zombie_threads);
409121230Salc		ke_first = TAILQ_FIRST(&zombie_kses);
41070374Sdillon		kg_first = TAILQ_FIRST(&zombie_ksegrps);
411121230Salc		if (td_first)
412193303Salc			TAILQ_INIT(&zombie_threads);
413193303Salc		if (ke_first)
414193303Salc			TAILQ_INIT(&zombie_kses);
415193303Salc		if (kg_first)
416228156Skib			TAILQ_INIT(&zombie_ksegrps);
417193303Salc		mtx_unlock_spin(&kse_zombie_lock);
418193303Salc		while (td_first) {
419193303Salc			td_next = TAILQ_NEXT(td_first, td_runq);
420193303Salc			if (td_first->td_ucred)
421193303Salc				crfree(td_first->td_ucred);
422193303Salc			thread_free(td_first);
423193303Salc			td_first = td_next;
424193303Salc		}
425193303Salc		while (ke_first) {
426193303Salc			ke_next = TAILQ_NEXT(ke_first, ke_procq);
427121230Salc			kse_free(ke_first);
428121230Salc			ke_first = ke_next;
429121230Salc		}
430121230Salc		while (kg_first) {
431121230Salc			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
432193303Salc			ksegrp_free(kg_first);
433172875Salc			kg_first = kg_next;
434172875Salc		}
435172875Salc	}
436172875Salc	kse_GC();
4371827Sdg}
4381541Srgrimes
43912767Sdyson/*
44038542Sluoqi * Allocate a ksegrp.
441116167Salc */
4421541Srgrimesstruct ksegrp *
4431541Srgrimesksegrp_alloc(void)
4441549Srgrimes{
4451549Srgrimes	return (uma_zalloc(ksegrp_zone, M_WAITOK));
4461549Srgrimes}
4471549Srgrimes
448163359Salc/*
449163359Salc * Allocate a kse.
450163359Salc */
4511549Srgrimesstruct kse *
4525455Sdgkse_alloc(void)
4535455Sdg{
45412767Sdyson	return (uma_zalloc(kse_zone, M_WAITOK));
455146340Sbz}
4561549Srgrimes
457138531Salc/*
4585455Sdg * Allocate a thread.
4595455Sdg */
460155384Sjeffstruct thread *
46111701Sdysonthread_alloc(void)
46211701Sdyson{
4631549Srgrimes	thread_reap(); /* check if any zombies to get */
4641549Srgrimes	return (uma_zalloc(thread_zone, M_WAITOK));
4651549Srgrimes}
4661549Srgrimes
467163359Salc/*
468163359Salc * Deallocate a ksegrp.
469163359Salc */
470163359Salcvoid
47192029Seivindksegrp_free(struct ksegrp *td)
4726151Sdg{
4736151Sdg	uma_zfree(ksegrp_zone, td);
4746151Sdg}
4756151Sdg
4766151Sdg/*
4771549Srgrimes * Deallocate a kse.
478163359Salc */
4791549Srgrimesvoid
4801549Srgrimeskse_free(struct kse *td)
4811549Srgrimes{
48296755Strhodes	uma_zfree(kse_zone, td);
4831549Srgrimes}
48412820Sphk
4859507Sdg/*
4869507Sdg * Deallocate a thread.
4871549Srgrimes */
4881549Srgrimesvoid
489137726Sphkthread_free(struct thread *td)
490137726Sphk{
4911549Srgrimes
492127926Salc	cpu_thread_clean(td);
493146340Sbz	uma_zfree(thread_zone, td);
4941549Srgrimes}
495227102Skib
496227102Skib/*
4971549Srgrimes * Assign a thread ID.
498227102Skib */
4999507Sdgint
500155384Sjeffthread_new_tid(void)
50111701Sdyson{
50211701Sdyson	struct tid_bitmap_part *bmp, *new;
5031549Srgrimes	int bit, idx, tid;
5041549Srgrimes
505137726Sphk	mtx_lock(&tid_lock);
5061549Srgrimes	STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {
507127926Salc		if (bmp->bmp_free)
5081549Srgrimes			break;
5091827Sdg	}
51086092Sdillon	/* Create a new bitmap if we run out of free bits. */
5111827Sdg	if (bmp == NULL) {
512191935Salc		mtx_unlock(&tid_lock);
513191935Salc		new = uma_zalloc(tid_zone, M_WAITOK);
5145455Sdg		mtx_lock(&tid_lock);
5151549Srgrimes		bmp = STAILQ_LAST(&tid_bitmap, tid_bitmap_part, bmp_next);
51686092Sdillon		if (bmp == NULL || bmp->bmp_free < TID_IDS_PER_PART/2) {
51786092Sdillon			/* 1=free, 0=assigned. This way we can use ffsl(). */
51886092Sdillon			memset(new->bmp_bitmap, ~0U, sizeof(new->bmp_bitmap));
51986092Sdillon			new->bmp_base = (bmp == NULL) ? TID_MIN :
520163359Salc			    bmp->bmp_base + TID_IDS_PER_PART;
521163359Salc			new->bmp_free = TID_IDS_PER_PART;
522163359Salc			STAILQ_INSERT_TAIL(&tid_bitmap, new, bmp_next);
52386092Sdillon			bmp = new;
5241827Sdg			new = NULL;
52542957Sdillon		}
5261549Srgrimes	} else
5271827Sdg		new = NULL;
52858345Sphk	/* We have a bitmap with available IDs. */
529119092Sphk	idx = 0;
53084827Sjhb	while (idx < TID_BITMAP_SIZE && bmp->bmp_bitmap[idx] == 0UL)
53184827Sjhb		idx++;
53291406Sjhb	bit = ffsl(bmp->bmp_bitmap[idx]) - 1;
53391406Sjhb	tid = bmp->bmp_base + idx * TID_IDS_PER_IDX + bit;
534127926Salc	bmp->bmp_bitmap[idx] &= ~(1UL << bit);
5356626Sdg	bmp->bmp_free--;
536137726Sphk	mtx_unlock(&tid_lock);
5371549Srgrimes
5381549Srgrimes	if (new != NULL)
53970374Sdillon		uma_zfree(tid_zone, new);
540189595Sjhb	return (tid);
5411827Sdg}
5421827Sdg
543121205Sphk
544136927Sphk/*
5451549Srgrimes * Discard the current thread and exit from its context.
546119092Sphk *
547119092Sphk * Because we can't free a thread while we're operating under its context,
54858934Sphk * push the current thread into our CPU's deadthread holder. This means
5491549Srgrimes * we needn't worry about someone else grabbing our context before we
5501549Srgrimes * do a cpu_throw().
5511827Sdg */
5521827Sdgvoid
5531827Sdgthread_exit(void)
554137726Sphk{
55542957Sdillon	struct thread *td;
5561827Sdg	struct kse *ke;
5571549Srgrimes	struct proc *p;
558191935Salc	struct ksegrp	*kg;
559127926Salc
560191935Salc	td = curthread;
561191935Salc	kg = td->td_ksegrp;
562191935Salc	p = td->td_proc;
563191935Salc	ke = td->td_kse;
564191935Salc
5651549Srgrimes	mtx_assert(&sched_lock, MA_OWNED);
566127926Salc	KASSERT(p != NULL, ("thread exiting without a process"));
5671827Sdg	KASSERT(ke != NULL, ("thread exiting without a kse"));
5684207Sdg	KASSERT(kg != NULL, ("thread exiting without a kse group"));
5691549Srgrimes	PROC_LOCK_ASSERT(p, MA_OWNED);
5701549Srgrimes	CTR1(KTR_PROC, "thread_exit: thread %p", td);
5711549Srgrimes	mtx_assert(&Giant, MA_NOTOWNED);
5721549Srgrimes
5731549Srgrimes	if (td->td_standin != NULL) {
574139296Sphk		thread_stash(td->td_standin);
5751549Srgrimes		td->td_standin = NULL;
57612820Sphk	}
5779507Sdg
5789507Sdg	cpu_thread_exit(td);	/* XXXSMP */
5791549Srgrimes
5801549Srgrimes	/*
5811541Srgrimes	 * The last thread is left attached to the process
5821541Srgrimes	 * So that the whole bundle gets recycled. Skip
5835455Sdg	 * all this stuff.
5845455Sdg	 */
585127926Salc	if (p->p_numthreads > 1) {
58677398Sjhb		thread_unlink(td);
5871549Srgrimes		if (p->p_maxthrwaits)
588121495Salc			wakeup(&p->p_numthreads);
5891549Srgrimes		/*
5901827Sdg		 * The test below is NOT true if we are the
5911549Srgrimes		 * sole exiting thread. P_STOPPED_SNGL is unset
5921549Srgrimes		 * in exit1() after it is the only survivor.
5931549Srgrimes		 */
59412767Sdyson		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
5951549Srgrimes			if (p->p_numthreads == p->p_suspcount) {
5961549Srgrimes				thread_unsuspend_one(p->p_singlethread);
5971549Srgrimes			}
59812767Sdyson		}
59912767Sdyson
600121495Salc		/*
601121495Salc		 * Because each upcall structure has an owner thread,
6027178Sdg		 * owner thread exits only when process is in exiting
6035455Sdg		 * state, so upcall to userland is no longer needed,
6045455Sdg		 * deleting upcall structure is safe here.
6055455Sdg		 * So when all threads in a group is exited, all upcalls
6065455Sdg		 * in the group should be automatically freed.
607127926Salc		 */
6087178Sdg		if (td->td_upcall)
609127926Salc			upcall_remove(td);
6101549Srgrimes
6111549Srgrimes		sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
6121549Srgrimes		sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
61312767Sdyson		ke->ke_state = KES_UNQUEUED;
6141549Srgrimes		ke->ke_thread = NULL;
6151549Srgrimes		/*
6161549Srgrimes		 * Decide what to do with the KSE attached to this thread.
61783366Sjulian		 */
6181549Srgrimes		if (ke->ke_flags & KEF_EXIT) {
61991406Sjhb			kse_unlink(ke);
6201549Srgrimes			if (kg->kg_kses == 0) {
62179242Sdillon				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
6221549Srgrimes				ksegrp_unlink(kg);
6231549Srgrimes			}
6241549Srgrimes		}
6251549Srgrimes		else
626127926Salc			kse_reassign(ke);
627127926Salc		PROC_UNLOCK(p);
6281549Srgrimes		td->td_kse	= NULL;
629127926Salc		td->td_state	= TDS_INACTIVE;
630121230Salc#if 0
631121230Salc		td->td_proc	= NULL;
6321549Srgrimes#endif
633191935Salc		td->td_ksegrp	= NULL;
63439739Srvb		td->td_last_kse	= NULL;
63539739Srvb		PCPU_SET(deadthread, td);
6364207Sdg	} else {
6371549Srgrimes		PROC_UNLOCK(p);
6381549Srgrimes	}
6391549Srgrimes	/* XXX Shouldn't cpu_throw() here. */
6401549Srgrimes	mtx_assert(&sched_lock, MA_OWNED);
6411549Srgrimes	cpu_throw(td, choosethread());
64210556Sdyson	panic("I'm a teapot!");
64333847Smsmith	/* NOTREACHED */
64476827Salfred}
64599211Srobert
64699211Srobert/*
64733847Smsmith * Do any thread specific cleanups that may be needed in wait()
64833847Smsmith * called with Giant, proc and schedlock not held.
64933847Smsmith */
65033847Smsmithvoid
65112820Sphkthread_wait(struct proc *p)
6529507Sdg{
6539507Sdg	struct thread *td;
6541549Srgrimes
6559507Sdg	mtx_assert(&Giant, MA_NOTOWNED);
6569507Sdg	KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
6571549Srgrimes	KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
65810556Sdyson	FOREACH_THREAD_IN_PROC(p, td) {
65910556Sdyson		if (td->td_standin != NULL) {
66034403Smsmith			thread_free(td->td_standin);
661140723Sjeff			td->td_standin = NULL;
66232286Sdyson		}
66310556Sdyson		cpu_thread_clean(td);
664116279Salc	}
665140723Sjeff	thread_reap();	/* check for zombie threads etc. */
66634403Smsmith}
66776827Salfred
66876827Salfred/*
669140723Sjeff * Link a thread to a process.
670116279Salc * set up anything that needs to be initialized for it to
67133847Smsmith * be used by the process.
67210556Sdyson *
67310556Sdyson * Note that we do not link to the proc's ucred here.
67433847Smsmith * The thread is linked as if running but no KSE assigned.
67533847Smsmith */
67633847Smsmithvoid
67733847Smsmiththread_link(struct thread *td, struct ksegrp *kg)
67833847Smsmith{
67933847Smsmith	struct proc *p;
68033847Smsmith
68110556Sdyson	p = kg->kg_proc;
68233847Smsmith	td->td_state    = TDS_INACTIVE;
68310556Sdyson	td->td_proc     = p;
68410556Sdyson	td->td_ksegrp   = kg;
68533847Smsmith	td->td_last_kse = NULL;
68612767Sdyson	td->td_flags    = 0;
68734206Sdyson	td->td_kflags	= 0;
688146340Sbz	td->td_kse      = NULL;
689163140Salc
690137726Sphk	LIST_INIT(&td->td_contested);
6916151Sdg	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
6926151Sdg	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
6937178Sdg	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
69433847Smsmith	p->p_numthreads++;
695163210Salc	kg->kg_numthreads++;
6961549Srgrimes}
69733847Smsmith
69833847Smsmithvoid
69933847Smsmiththread_unlink(struct thread *td)
700137726Sphk{
701137726Sphk	struct proc *p = td->td_proc;
702155384Sjeff	struct ksegrp *kg = td->td_ksegrp;
70311701Sdyson
70411701Sdyson	mtx_assert(&sched_lock, MA_OWNED);
7051549Srgrimes	TAILQ_REMOVE(&p->p_threads, td, td_plist);
7061549Srgrimes	p->p_numthreads--;
7071549Srgrimes	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
7081827Sdg	kg->kg_numthreads--;
7091549Srgrimes	/* could clear a few other things here */
7101827Sdg}
7111827Sdg
7121549Srgrimes/*
71312767Sdyson * Purge a ksegrp resource. When a ksegrp is preparing to
7141827Sdg * exit, it calls this function.
7151549Srgrimes */
7161887Sdgvoid
7171549Srgrimeskse_purge_group(struct thread *td)
718163210Salc{
719163210Salc	struct ksegrp *kg;
720116512Salc	struct kse *ke;
721207410Skmacy
722100832Salc	kg = td->td_ksegrp;
723207410Skmacy 	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
724207410Skmacy	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
72575692Salfred		KASSERT(ke->ke_state == KES_IDLE,
726207410Skmacy			("%s: wrong idle KSE state", __func__));
727207410Skmacy		kse_unlink(ke);
728170292Sattilio	}
729170292Sattilio	KASSERT((kg->kg_kses == 1),
730121495Salc		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
731121495Salc	KASSERT((kg->kg_numupcalls == 0),
732121495Salc	        ("%s: ksegrp still has %d upcall datas",
733163210Salc		__func__, kg->kg_numupcalls));
734163210Salc}
735163210Salc
736207410Skmacy/*
737207410Skmacy * Purge a process's KSE resource. When a process is preparing to
738163210Salc * exit, it calls kse_purge to release any extra KSE resources in
739207410Skmacy * the process.
740207410Skmacy */
741163210Salcvoid
742163210Salckse_purge(struct proc *p, struct thread *td)
7431549Srgrimes{
7441827Sdg	struct ksegrp *kg;
7451827Sdg	struct kse *ke;
7461827Sdg
7471827Sdg 	KASSERT(p->p_numthreads == 1, ("bad thread number"));
7481827Sdg	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
7491827Sdg		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
75038866Sbde		p->p_numksegrps--;
751116512Salc		/*
752100832Salc		 * There is no ownership for KSE, after all threads
753207410Skmacy		 * in the group exited, it is possible that some KSEs
754207410Skmacy		 * were left in idle queue, gc them now.
75575692Salfred		 */
756207410Skmacy		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
757207410Skmacy			KASSERT(ke->ke_state == KES_IDLE,
758116512Salc			   ("%s: wrong idle KSE state", __func__));
759170292Sattilio			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
760170292Sattilio			kg->kg_idle_kses--;
7619507Sdg			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
7621549Srgrimes			kg->kg_kses--;
76345347Sjulian			kse_stash(ke);
7641549Srgrimes		}
76545347Sjulian		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
76645347Sjulian		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
76745347Sjulian		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
7681549Srgrimes		KASSERT((kg->kg_numupcalls == 0),
769121227Salc		        ("%s: ksegrp still has %d upcall datas",
77045347Sjulian			__func__, kg->kg_numupcalls));
771100832Salc
772207410Skmacy		if (kg != td->td_ksegrp)
773207410Skmacy			ksegrp_stash(kg);
77475692Salfred	}
775207410Skmacy	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
776207410Skmacy	p->p_numksegrps++;
777116512Salc}
7785455Sdg
779163140Salc/*
780163140Salc * Enforce single-threading.
781192010Salc *
782192010Salc * Returns 1 if the caller must abort (another thread is waiting to
783163140Salc * exit the process or similar). Process is locked!
784163140Salc * Returns 0 when you are successfully the only thread running.
785207410Skmacy * A process has successfully single threaded in the suspend mode when
786207410Skmacy * There are no threads in user mode. Threads in the kernel must be
787163140Salc * allowed to continue until they get to the user boundary. They may even
788207410Skmacy * copy out their return values and data before suspending. They may however be
789207410Skmacy * accellerated in reaching the user boundary as we will wake up
790163140Salc * any sleeping threads that are interruptable. (PCATCH).
791163140Salc */
7921549Srgrimesint
79345347Sjulianthread_single(int force_exit)
794121227Salc{
7957178Sdg	struct thread *td;
7965455Sdg	struct thread *td2;
7975455Sdg	struct proc *p;
7985455Sdg
79992029Seivind	td = curthread;
8001549Srgrimes	p = td->td_proc;
8011549Srgrimes	mtx_assert(&Giant, MA_NOTOWNED);
8026151Sdg	PROC_LOCK_ASSERT(p, MA_OWNED);
8031549Srgrimes	KASSERT((td != NULL), ("curthread is NULL"));
80492029Seivind
805163359Salc	if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
806163359Salc		return (0);
807163359Salc
808163359Salc	/* Is someone already single threading? */
809207410Skmacy	if (p->p_singlethread)
810207410Skmacy		return (1);
811163359Salc
812207410Skmacy	if (force_exit == SINGLE_EXIT) {
813207410Skmacy		p->p_flag |= P_SINGLE_EXIT;
814163359Salc	} else
815163359Salc		p->p_flag &= ~P_SINGLE_EXIT;
816163359Salc	p->p_flag |= P_STOPPED_SINGLE;
8176151Sdg	mtx_lock_spin(&sched_lock);
818116512Salc	p->p_singlethread = td;
8199507Sdg	while ((p->p_numthreads - p->p_suspcount) != 1) {
820146340Sbz		FOREACH_THREAD_IN_PROC(p, td2) {
821146340Sbz			if (td2 == td)
822106603Smux				continue;
823106603Smux			td2->td_flags |= TDF_ASTPENDING;
824106603Smux			if (TD_IS_INHIBITED(td2)) {
825106603Smux				if (force_exit == SINGLE_EXIT) {
8266151Sdg					if (TD_IS_SUSPENDED(td2)) {
827207410Skmacy						thread_unsuspend_one(td2);
82875692Salfred					}
829207410Skmacy					if (TD_ON_SLEEPQ(td2) &&
830116512Salc					    (td2->td_flags & TDF_SINTR)) {
8316151Sdg						sleepq_abort(td2);
8326151Sdg					}
8336151Sdg				} else {
8341549Srgrimes					if (TD_IS_SUSPENDED(td2))
8356151Sdg						continue;
8369507Sdg					/*
837116512Salc					 * maybe other inhibitted states too?
838207410Skmacy					 * XXXKSE Is it totally safe to
839207410Skmacy					 * suspend a non-interruptable thread?
84075692Salfred					 */
841207410Skmacy					if (td2->td_inhibitors &
842207410Skmacy					    (TDI_SLEEPING | TDI_SWAPPED))
843116512Salc						thread_suspend_one(td2);
8441549Srgrimes				}
8459507Sdg			}
846116512Salc		}
847207410Skmacy		/*
848207410Skmacy		 * Maybe we suspended some threads.. was it enough?
84975692Salfred		 */
850207410Skmacy		if ((p->p_numthreads - p->p_suspcount) == 1)
851207410Skmacy			break;
852116512Salc
8536151Sdg		/*
8546151Sdg		 * Wake us up when everyone else has suspended.
8556151Sdg		 * In the mean time we suspend as well.
8561549Srgrimes		 */
8576151Sdg		thread_suspend_one(td);
8581549Srgrimes		PROC_UNLOCK(p);
8591549Srgrimes		mi_switch(SW_VOL);
8601549Srgrimes		mtx_unlock_spin(&sched_lock);
8611827Sdg		PROC_LOCK(p);
8621827Sdg		mtx_lock_spin(&sched_lock);
8631549Srgrimes	}
8641549Srgrimes	if (force_exit == SINGLE_EXIT) {
865163361Salc		if (td->td_upcall)
8661549Srgrimes			upcall_remove(td);
8671549Srgrimes		kse_purge(p, td);
8681549Srgrimes	}
8696151Sdg	mtx_unlock_spin(&sched_lock);
8701549Srgrimes	return (0);
8711549Srgrimes}
8721549Srgrimes
87312767Sdyson/*
8741827Sdg * Called in from locations that can safely check to see
8751549Srgrimes * whether we have to suspend or at least throttle for a
8761549Srgrimes * single-thread event (e.g. fork).
8771549Srgrimes *
8781549Srgrimes * Such locations include userret().
879134892Sphk * If the "return_instead" argument is non zero, the thread must be able to
8809507Sdg * accept 0 (caller may continue), or 1 (caller must abort) as a result.
8819507Sdg *
882134892Sphk * The 'return_instead' argument tells the function if it may do a
8831549Srgrimes * thread_exit() or suspend, or whether the caller must abort and back
8841549Srgrimes * out instead.
88551340Sdillon *
8861549Srgrimes * If the thread that set the single_threading request has set the
887137726Sphk * P_SINGLE_EXIT bit in the process flags then this call will never return
888137726Sphk * if 'return_instead' is false, but will exit.
889136977Sphk *
890136977Sphk * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
891136977Sphk *---------------+--------------------+---------------------
89251340Sdillon *       0       | returns 0          |   returns 0 or 1
89351340Sdillon *               | when ST ends       |   immediatly
8941549Srgrimes *---------------+--------------------+---------------------
89542957Sdillon *       1       | thread exits       |   returns 1
8965455Sdg *               |                    |  immediatly
8971887Sdg * 0 = thread_exit() or suspension ok,
8981549Srgrimes * other = return error instead of stopping the thread.
8991549Srgrimes *
9001549Srgrimes * While a full suspension is under effect, even a single threading
9011887Sdg * thread would be suspended if it made this call (but it shouldn't).
9021549Srgrimes * This call should only be made from places where
9031549Srgrimes * thread_exit() would be safe as that may be the outcome unless
90458345Sphk * return_instead is set.
905119092Sphk */
90684827Sjhbint
90784827Sjhbthread_suspend_check(int return_instead)
90891406Sjhb{
90991406Sjhb	struct thread *td;
9106626Sdg	struct proc *p;
911137726Sphk
9121549Srgrimes	td = curthread;
9131549Srgrimes	p = td->td_proc;
91470374Sdillon	mtx_assert(&Giant, MA_NOTOWNED);
915189595Sjhb	PROC_LOCK_ASSERT(p, MA_OWNED);
9161549Srgrimes	while (P_SHOULDSTOP(p)) {
917170292Sattilio		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
918170292Sattilio			KASSERT(p->p_singlethread != NULL,
9193612Sdg			    ("singlethread not set"));
9201549Srgrimes			/*
921121205Sphk			 * The only suspension in action is a
922136927Sphk			 * single-threading. Single threader need not stop.
9233612Sdg			 * XXX Should be safe to access unlocked
924119092Sphk			 * as it can only be set to be true by us.
9251549Srgrimes			 */
92658934Sphk			if (p->p_singlethread == td)
9271549Srgrimes				return (0);	/* Exempt from stopping. */
9281549Srgrimes		}
9291549Srgrimes		if (return_instead)
9301549Srgrimes			return (1);
9311827Sdg
9321549Srgrimes		mtx_lock_spin(&sched_lock);
9335455Sdg		thread_stopped(p);
9341549Srgrimes		/*
9351549Srgrimes		 * If the process is waiting for us to exit,
9361549Srgrimes		 * this thread should just suicide.
9371549Srgrimes		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
938137726Sphk		 */
93942957Sdillon		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
9401549Srgrimes			if (p->p_flag & P_SA)
941116512Salc				thread_exit();
94234206Sdyson			else
94334206Sdyson				thr_exit1();
94434206Sdyson		}
94534206Sdyson
94634206Sdyson		/*
94734206Sdyson		 * When a thread suspends, it just
94847239Sdt		 * moves to the processes's suspend queue
94945347Sjulian		 * and stays there.
95045347Sjulian		 */
95145347Sjulian		thread_suspend_one(td);
95234206Sdyson		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
953191478Salc			if (p->p_numthreads == p->p_suspcount) {
954191478Salc				thread_unsuspend_one(p->p_singlethread);
955191478Salc			}
956191478Salc		}
957191478Salc		PROC_UNLOCK(p);
958191478Salc		mi_switch(SW_INVOL);
95934206Sdyson		mtx_unlock_spin(&sched_lock);
96045347Sjulian		PROC_LOCK(p);
961192134Salc	}
96245347Sjulian	return (0);
96345347Sjulian}
96445347Sjulian
96545347Sjulianvoid
96645347Sjulianthread_suspend_one(struct thread *td)
967228156Skib{
96847239Sdt	struct proc *p = td->td_proc;
969192134Salc
970192134Salc	mtx_assert(&sched_lock, MA_OWNED);
971192134Salc	PROC_LOCK_ASSERT(p, MA_OWNED);
972192134Salc	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
97334206Sdyson	p->p_suspcount++;
97434206Sdyson	TD_SET_SUSPENDED(td);
9751549Srgrimes	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
9761827Sdg	/*
9771549Srgrimes	 * Hack: If we are suspending but are on the sleep queue
9781827Sdg	 * then we are in msleep or the cv equivalent. We
9791827Sdg	 * want to look like we have two Inhibitors.
9801827Sdg	 * May already be set.. doesn't matter.
98158634Scharnier	 */
9821827Sdg	if (TD_ON_SLEEPQ(td))
9831549Srgrimes		TD_SET_SLEEPING(td);
9841827Sdg}
9851549Srgrimes
9861827Sdgvoid
9871827Sdgthread_unsuspend_one(struct thread *td)
9881549Srgrimes{
9891549Srgrimes	struct proc *p = td->td_proc;
990207746Salc
991207746Salc	mtx_assert(&sched_lock, MA_OWNED);
99234206Sdyson	PROC_LOCK_ASSERT(p, MA_OWNED);
993207746Salc	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
994207746Salc	TD_CLR_SUSPENDED(td);
995207746Salc	p->p_suspcount--;
99634206Sdyson	setrunnable(td);
997207746Salc}
998207746Salc
99938799Sdfr/*
10001549Srgrimes * Allow all threads blocked by single threading to continue running.
1001207746Salc */
100275692Salfredvoid
1003207746Salcthread_unsuspend(struct proc *p)
10041549Srgrimes{
10051549Srgrimes	struct thread *td;
10061549Srgrimes
1007116512Salc	mtx_assert(&sched_lock, MA_OWNED);
10081549Srgrimes	PROC_LOCK_ASSERT(p, MA_OWNED);
10099507Sdg	if (!P_SHOULDSTOP(p)) {
10101549Srgrimes		while (( td = TAILQ_FIRST(&p->p_suspended))) {
10114207Sdg			thread_unsuspend_one(td);
10121549Srgrimes		}
10131549Srgrimes	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
101433847Smsmith	    (p->p_numthreads == p->p_suspcount)) {
101533847Smsmith		/*
101633847Smsmith		 * Stopping everything also did the job for the single
101733847Smsmith		 * threading request. Now we've downgraded to single-threaded,
101833847Smsmith		 * let it continue.
101933847Smsmith		 */
102033847Smsmith		thread_unsuspend_one(p->p_singlethread);
102133847Smsmith	}
102243129Sdillon}
102310556Sdyson
102410556Sdysonvoid
102510556Sdysonthread_single_end(void)
102610556Sdyson{
102710556Sdyson	struct thread *td;
102810556Sdyson	struct proc *p;
102910556Sdyson
103010556Sdyson	td = curthread;
103110556Sdyson	p = td->td_proc;
103234403Smsmith	PROC_LOCK_ASSERT(p, MA_OWNED);
103318973Sdyson	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
103444321Salc	mtx_lock_spin(&sched_lock);
103544321Salc	p->p_singlethread = NULL;
103644321Salc	/*
103744321Salc	 * If there are other threads they mey now run,
103844321Salc	 * unless of course there is a blanket 'stop order'
103944321Salc	 * on the process. The single threader must be allowed
104044321Salc	 * to continue however as this is a bad place to stop.
104144321Salc	 */
104244321Salc	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
104344321Salc		while (( td = TAILQ_FIRST(&p->p_suspended))) {
104444321Salc			thread_unsuspend_one(td);
104544321Salc		}
1046170170Sattilio	}
104744321Salc	mtx_unlock_spin(&sched_lock);
104844321Salc}
104944321Salc
105044321Salc