kern_thread.c revision 275617
1139804Simp/*-
299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
399026Sjulian *  All rights reserved.
499026Sjulian *
599026Sjulian * Redistribution and use in source and binary forms, with or without
699026Sjulian * modification, are permitted provided that the following conditions
799026Sjulian * are met:
899026Sjulian * 1. Redistributions of source code must retain the above copyright
999026Sjulian *    notice(s), this list of conditions and the following disclaimer as
10124350Sschweikh *    the first lines of this file unmodified other than the possible
1199026Sjulian *    addition of one or more copyright notices.
1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright
1399026Sjulian *    notice(s), this list of conditions and the following disclaimer in the
1499026Sjulian *    documentation and/or other materials provided with the distribution.
1599026Sjulian *
1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1999026Sjulian * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2699026Sjulian * DAMAGE.
2799026Sjulian */
2899026Sjulian
29181695Sattilio#include "opt_witness.h"
30198464Sjkoshy#include "opt_hwpmc_hooks.h"
31181695Sattilio
32116182Sobrien#include <sys/cdefs.h>
33116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 275617 2014-12-08 16:27:43Z kib $");
34116182Sobrien
3599026Sjulian#include <sys/param.h>
3699026Sjulian#include <sys/systm.h>
3799026Sjulian#include <sys/kernel.h>
3899026Sjulian#include <sys/lock.h>
3999026Sjulian#include <sys/mutex.h>
4099026Sjulian#include <sys/proc.h>
41236317Skib#include <sys/rangelock.h>
42156705Sdavidxu#include <sys/resourcevar.h>
43235459Srstone#include <sys/sdt.h>
44130355Sjulian#include <sys/smp.h>
45107126Sjeff#include <sys/sched.h>
46126326Sjhb#include <sys/sleepqueue.h>
47174647Sjeff#include <sys/selinfo.h>
48122514Sjhb#include <sys/turnstile.h>
4999026Sjulian#include <sys/ktr.h>
50213642Sdavidxu#include <sys/rwlock.h>
51143149Sdavidxu#include <sys/umtx.h>
52176730Sjeff#include <sys/cpuset.h>
53198464Sjkoshy#ifdef	HWPMC_HOOKS
54198464Sjkoshy#include <sys/pmckern.h>
55198464Sjkoshy#endif
5699026Sjulian
57155195Srwatson#include <security/audit/audit.h>
58155195Srwatson
5999026Sjulian#include <vm/vm.h>
60116355Salc#include <vm/vm_extern.h>
6199026Sjulian#include <vm/uma.h>
62173631Srrs#include <sys/eventhandler.h>
6399026Sjulian
64235459SrstoneSDT_PROVIDER_DECLARE(proc);
65258622SavgSDT_PROBE_DEFINE(proc, , , lwp__exit);
66235459Srstone
6799026Sjulian/*
68163709Sjb * thread related storage.
69163709Sjb */
7099026Sjulianstatic uma_zone_t thread_zone;
7199026Sjulian
72111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
73172256Sattiliostatic struct mtx zombie_lock;
74170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
7599026Sjulian
76170598Sjeffstatic void thread_zombie(struct thread *);
77170598Sjeff
78216314Sdavidxu#define TID_BUFFER_SIZE	1024
79216314Sdavidxu
80127794Smarcelstruct mtx tid_lock;
81143802Sphkstatic struct unrhdr *tid_unrhdr;
82216314Sdavidxustatic lwpid_t tid_buffer[TID_BUFFER_SIZE];
83216314Sdavidxustatic int tid_head, tid_tail;
84213642Sdavidxustatic MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
85213642Sdavidxu
86213642Sdavidxustruct	tidhashhead *tidhashtbl;
87213642Sdavidxuu_long	tidhash;
88213642Sdavidxustruct	rwlock tidhash_lock;
89213642Sdavidxu
90216314Sdavidxustatic lwpid_t
91216314Sdavidxutid_alloc(void)
92216314Sdavidxu{
93216314Sdavidxu	lwpid_t	tid;
94216314Sdavidxu
95216314Sdavidxu	tid = alloc_unr(tid_unrhdr);
96216314Sdavidxu	if (tid != -1)
97216314Sdavidxu		return (tid);
98216314Sdavidxu	mtx_lock(&tid_lock);
99216314Sdavidxu	if (tid_head == tid_tail) {
100216314Sdavidxu		mtx_unlock(&tid_lock);
101216314Sdavidxu		return (-1);
102216314Sdavidxu	}
103240951Skib	tid = tid_buffer[tid_head];
104240951Skib	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
105216314Sdavidxu	mtx_unlock(&tid_lock);
106216314Sdavidxu	return (tid);
107216314Sdavidxu}
108216314Sdavidxu
109216314Sdavidxustatic void
110216314Sdavidxutid_free(lwpid_t tid)
111216314Sdavidxu{
112216314Sdavidxu	lwpid_t tmp_tid = -1;
113216314Sdavidxu
114216314Sdavidxu	mtx_lock(&tid_lock);
115216314Sdavidxu	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
116240951Skib		tmp_tid = tid_buffer[tid_head];
117240951Skib		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
118216314Sdavidxu	}
119240951Skib	tid_buffer[tid_tail] = tid;
120240951Skib	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
121216314Sdavidxu	mtx_unlock(&tid_lock);
122216314Sdavidxu	if (tmp_tid != -1)
123216314Sdavidxu		free_unr(tid_unrhdr, tmp_tid);
124216314Sdavidxu}
125216314Sdavidxu
126127794Smarcel/*
127107719Sjulian * Prepare a thread for use.
12899026Sjulian */
129132987Sgreenstatic int
130132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags)
13199026Sjulian{
13299026Sjulian	struct thread	*td;
13399026Sjulian
13499026Sjulian	td = (struct thread *)mem;
135103216Sjulian	td->td_state = TDS_INACTIVE;
136135573Sjhb	td->td_oncpu = NOCPU;
137130269Sjmallett
138216314Sdavidxu	td->td_tid = tid_alloc();
139143840Sphk
140130269Sjmallett	/*
141130269Sjmallett	 * Note that td_critnest begins life as 1 because the thread is not
142130269Sjmallett	 * running and is thereby implicitly waiting to be on the receiving
143170296Sjeff	 * end of a context switch.
144130269Sjmallett	 */
145118442Sjhb	td->td_critnest = 1;
146216313Sdavidxu	td->td_lend_user_pri = PRI_MAX;
147173631Srrs	EVENTHANDLER_INVOKE(thread_ctor, td);
148155195Srwatson#ifdef AUDIT
149155195Srwatson	audit_thread_alloc(td);
150155195Srwatson#endif
151161678Sdavidxu	umtx_thread_alloc(td);
152132987Sgreen	return (0);
15399026Sjulian}
15499026Sjulian
15599026Sjulian/*
15699026Sjulian * Reclaim a thread after use.
15799026Sjulian */
15899026Sjulianstatic void
15999026Sjulianthread_dtor(void *mem, int size, void *arg)
16099026Sjulian{
161127794Smarcel	struct thread *td;
16299026Sjulian
16399026Sjulian	td = (struct thread *)mem;
16499026Sjulian
16599026Sjulian#ifdef INVARIANTS
16699026Sjulian	/* Verify that this thread is in a safe state to free. */
16799026Sjulian	switch (td->td_state) {
168103216Sjulian	case TDS_INHIBITED:
169103216Sjulian	case TDS_RUNNING:
170103216Sjulian	case TDS_CAN_RUN:
17199026Sjulian	case TDS_RUNQ:
17299026Sjulian		/*
17399026Sjulian		 * We must never unlink a thread that is in one of
17499026Sjulian		 * these states, because it is currently active.
17599026Sjulian		 */
17699026Sjulian		panic("bad state for thread unlinking");
17799026Sjulian		/* NOTREACHED */
178103216Sjulian	case TDS_INACTIVE:
17999026Sjulian		break;
18099026Sjulian	default:
18199026Sjulian		panic("bad thread state");
18299026Sjulian		/* NOTREACHED */
18399026Sjulian	}
18499026Sjulian#endif
185155353Srwatson#ifdef AUDIT
186155353Srwatson	audit_thread_free(td);
187155353Srwatson#endif
188185029Spjd	/* Free all OSD associated to this thread. */
189185029Spjd	osd_thread_exit(td);
190185029Spjd
191173631Srrs	EVENTHANDLER_INVOKE(thread_dtor, td);
192216314Sdavidxu	tid_free(td->td_tid);
19399026Sjulian}
19499026Sjulian
19599026Sjulian/*
19699026Sjulian * Initialize type-stable parts of a thread (when newly created).
19799026Sjulian */
198132987Sgreenstatic int
199132987Sgreenthread_init(void *mem, int size, int flags)
20099026Sjulian{
201131149Smarcel	struct thread *td;
20299026Sjulian
20399026Sjulian	td = (struct thread *)mem;
204131149Smarcel
205126326Sjhb	td->td_sleepqueue = sleepq_alloc();
206122514Sjhb	td->td_turnstile = turnstile_alloc();
207236317Skib	td->td_rlqe = NULL;
208173631Srrs	EVENTHANDLER_INVOKE(thread_init, td);
209107126Sjeff	td->td_sched = (struct td_sched *)&td[1];
210161678Sdavidxu	umtx_thread_init(td);
211173361Skib	td->td_kstack = 0;
212132987Sgreen	return (0);
21399026Sjulian}
21499026Sjulian
21599026Sjulian/*
21699026Sjulian * Tear down type-stable parts of a thread (just before being discarded).
21799026Sjulian */
21899026Sjulianstatic void
21999026Sjulianthread_fini(void *mem, int size)
22099026Sjulian{
221131149Smarcel	struct thread *td;
22299026Sjulian
22399026Sjulian	td = (struct thread *)mem;
224173631Srrs	EVENTHANDLER_INVOKE(thread_fini, td);
225236317Skib	rlqentry_free(td->td_rlqe);
226122514Sjhb	turnstile_free(td->td_turnstile);
227126326Sjhb	sleepq_free(td->td_sleepqueue);
228161678Sdavidxu	umtx_thread_fini(td);
229174647Sjeff	seltdfini(td);
23099026Sjulian}
231111028Sjeff
232107126Sjeff/*
233111028Sjeff * For a newly created process,
234111028Sjeff * link up all the structures and its initial threads etc.
235134791Sjulian * called from:
236268351Smarcel * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
237134791Sjulian * proc_dtor() (should go away)
238134791Sjulian * proc_init()
239105854Sjulian */
240105854Sjulianvoid
241173361Skibproc_linkup0(struct proc *p, struct thread *td)
242173361Skib{
243173361Skib	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
244173361Skib	proc_linkup(p, td);
245173361Skib}
246173361Skib
247173361Skibvoid
248163709Sjbproc_linkup(struct proc *p, struct thread *td)
249105854Sjulian{
250170296Sjeff
251151316Sdavidxu	sigqueue_init(&p->p_sigqueue, p);
252153253Sdavidxu	p->p_ksi = ksiginfo_alloc(1);
253153253Sdavidxu	if (p->p_ksi != NULL) {
254153253Sdavidxu		/* XXX p_ksi may be null if ksiginfo zone is not ready */
255153253Sdavidxu		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
256152185Sdavidxu	}
257152948Sdavidxu	LIST_INIT(&p->p_mqnotifier);
258105854Sjulian	p->p_numthreads = 0;
259163709Sjb	thread_link(td, p);
260105854Sjulian}
261105854Sjulian
262111028Sjeff/*
26399026Sjulian * Initialize global thread allocation resources.
26499026Sjulian */
26599026Sjulianvoid
26699026Sjulianthreadinit(void)
26799026Sjulian{
26899026Sjulian
269143802Sphk	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
270239301Skib
271239301Skib	/*
272239328Skib	 * pid_max cannot be greater than PID_MAX.
273239301Skib	 * leave one number for thread0.
274239301Skib	 */
275174848Sjulian	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
276143802Sphk
277107126Sjeff	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
27899026Sjulian	    thread_ctor, thread_dtor, thread_init, thread_fini,
279167944Sjhb	    16 - 1, 0);
280213642Sdavidxu	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
281213642Sdavidxu	rw_init(&tidhash_lock, "tidhash");
28299026Sjulian}
28399026Sjulian
28499026Sjulian/*
285170598Sjeff * Place an unused thread on the zombie list.
286164936Sjulian * Use the slpq as that must be unused by now.
28799026Sjulian */
28899026Sjulianvoid
289170598Sjeffthread_zombie(struct thread *td)
29099026Sjulian{
291170296Sjeff	mtx_lock_spin(&zombie_lock);
292164936Sjulian	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
293170296Sjeff	mtx_unlock_spin(&zombie_lock);
29499026Sjulian}
29599026Sjulian
296103410Smini/*
297170598Sjeff * Release a thread that has exited after cpu_throw().
298170598Sjeff */
299170598Sjeffvoid
300170598Sjeffthread_stash(struct thread *td)
301170598Sjeff{
302170598Sjeff	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
303170598Sjeff	thread_zombie(td);
304170598Sjeff}
305170598Sjeff
306170598Sjeff/*
307177091Sjeff * Reap zombie resources.
30899026Sjulian */
30999026Sjulianvoid
31099026Sjulianthread_reap(void)
31199026Sjulian{
312105854Sjulian	struct thread *td_first, *td_next;
31399026Sjulian
31499026Sjulian	/*
315111028Sjeff	 * Don't even bother to lock if none at this instant,
316111028Sjeff	 * we really don't care about the next instant..
31799026Sjulian	 */
318163709Sjb	if (!TAILQ_EMPTY(&zombie_threads)) {
319170296Sjeff		mtx_lock_spin(&zombie_lock);
320105854Sjulian		td_first = TAILQ_FIRST(&zombie_threads);
321105854Sjulian		if (td_first)
322105854Sjulian			TAILQ_INIT(&zombie_threads);
323170296Sjeff		mtx_unlock_spin(&zombie_lock);
324105854Sjulian		while (td_first) {
325164936Sjulian			td_next = TAILQ_NEXT(td_first, td_slpq);
326111028Sjeff			if (td_first->td_ucred)
327111028Sjeff				crfree(td_first->td_ucred);
328105854Sjulian			thread_free(td_first);
329105854Sjulian			td_first = td_next;
33099026Sjulian		}
33199026Sjulian	}
33299026Sjulian}
33399026Sjulian
33499026Sjulian/*
33599026Sjulian * Allocate a thread.
33699026Sjulian */
33799026Sjulianstruct thread *
338196730Skibthread_alloc(int pages)
33999026Sjulian{
340173361Skib	struct thread *td;
341163709Sjb
34299026Sjulian	thread_reap(); /* check if any zombies to get */
343173361Skib
344173361Skib	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
345173361Skib	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
346196730Skib	if (!vm_thread_new(td, pages)) {
347173361Skib		uma_zfree(thread_zone, td);
348173361Skib		return (NULL);
349173361Skib	}
350173615Smarcel	cpu_thread_alloc(td);
351173361Skib	return (td);
35299026Sjulian}
35399026Sjulian
354196730Skibint
355196730Skibthread_alloc_stack(struct thread *td, int pages)
356196730Skib{
357103367Sjulian
358196730Skib	KASSERT(td->td_kstack == 0,
359196730Skib	    ("thread_alloc_stack called on a thread with kstack"));
360196730Skib	if (!vm_thread_new(td, pages))
361196730Skib		return (0);
362196730Skib	cpu_thread_alloc(td);
363196730Skib	return (1);
364196730Skib}
365196730Skib
366103367Sjulian/*
36799026Sjulian * Deallocate a thread.
36899026Sjulian */
36999026Sjulianvoid
37099026Sjulianthread_free(struct thread *td)
37199026Sjulian{
372189845Sjeff
373189845Sjeff	lock_profile_thread_exit(td);
374177369Sjeff	if (td->td_cpuset)
375177369Sjeff		cpuset_rel(td->td_cpuset);
376176730Sjeff	td->td_cpuset = NULL;
377173615Smarcel	cpu_thread_free(td);
378173361Skib	if (td->td_kstack != 0)
379173361Skib		vm_thread_dispose(td);
38099026Sjulian	uma_zfree(thread_zone, td);
38199026Sjulian}
38299026Sjulian
38399026Sjulian/*
38499026Sjulian * Discard the current thread and exit from its context.
385130355Sjulian * Always called with scheduler locked.
38699026Sjulian *
38799026Sjulian * Because we can't free a thread while we're operating under its context,
388107719Sjulian * push the current thread into our CPU's deadthread holder. This means
389107719Sjulian * we needn't worry about someone else grabbing our context before we
390177091Sjeff * do a cpu_throw().
39199026Sjulian */
39299026Sjulianvoid
39399026Sjulianthread_exit(void)
39499026Sjulian{
395229429Sjhb	uint64_t runtime, new_switchtime;
39699026Sjulian	struct thread *td;
397170174Sjeff	struct thread *td2;
39899026Sjulian	struct proc *p;
399182011Sjhb	int wakeup_swapper;
40099026Sjulian
40199026Sjulian	td = curthread;
40299026Sjulian	p = td->td_proc;
40399026Sjulian
404170296Sjeff	PROC_SLOCK_ASSERT(p, MA_OWNED);
405134791Sjulian	mtx_assert(&Giant, MA_NOTOWNED);
406170296Sjeff
407134791Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
408102581Sjulian	KASSERT(p != NULL, ("thread exiting without a process"));
409133234Srwatson	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
410173601Sjulian	    (long)p->p_pid, td->td_name);
411151316Sdavidxu	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
41299026Sjulian
413155376Srwatson#ifdef AUDIT
414155376Srwatson	AUDIT_SYSCALL_EXIT(0, td);
415155376Srwatson#endif
416161678Sdavidxu	umtx_thread_exit(td);
417134791Sjulian	/*
418134791Sjulian	 * drop FPU & debug register state storage, or any other
419134791Sjulian	 * architecture specific resources that
420134791Sjulian	 * would not be on a new untouched process.
421134791Sjulian	 */
42299026Sjulian	cpu_thread_exit(td);	/* XXXSMP */
42399026Sjulian
424134791Sjulian	/*
425103002Sjulian	 * The last thread is left attached to the process
426103002Sjulian	 * So that the whole bundle gets recycled. Skip
427134791Sjulian	 * all this stuff if we never had threads.
428134791Sjulian	 * EXIT clears all sign of other threads when
429134791Sjulian	 * it goes to single threading, so the last thread always
430134791Sjulian	 * takes the short path.
431102581Sjulian	 */
432134791Sjulian	if (p->p_flag & P_HADTHREADS) {
433134791Sjulian		if (p->p_numthreads > 1) {
434271000Skib			atomic_add_int(&td->td_proc->p_exitthreads, 1);
435134791Sjulian			thread_unlink(td);
436170174Sjeff			td2 = FIRST_THREAD_IN_PROC(p);
437170174Sjeff			sched_exit_thread(td2, td);
438134791Sjulian
439134791Sjulian			/*
440134791Sjulian			 * The test below is NOT true if we are the
441207606Skib			 * sole exiting thread. P_STOPPED_SINGLE is unset
442134791Sjulian			 * in exit1() after it is the only survivor.
443134791Sjulian			 */
444134791Sjulian			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
445134791Sjulian				if (p->p_numthreads == p->p_suspcount) {
446170296Sjeff					thread_lock(p->p_singlethread);
447182011Sjhb					wakeup_swapper = thread_unsuspend_one(
448182011Sjhb						p->p_singlethread);
449170296Sjeff					thread_unlock(p->p_singlethread);
450182011Sjhb					if (wakeup_swapper)
451182011Sjhb						kick_proc0();
452134791Sjulian				}
453103002Sjulian			}
454104695Sjulian
455134791Sjulian			PCPU_SET(deadthread, td);
456134791Sjulian		} else {
457134791Sjulian			/*
458134791Sjulian			 * The last thread is exiting.. but not through exit()
459134791Sjulian			 */
460134791Sjulian			panic ("thread_exit: Last thread exiting on its own");
461119488Sdavidxu		}
462170296Sjeff	}
463198464Sjkoshy#ifdef	HWPMC_HOOKS
464198464Sjkoshy	/*
465198464Sjkoshy	 * If this thread is part of a process that is being tracked by hwpmc(4),
466198464Sjkoshy	 * inform the module of the thread's impending exit.
467198464Sjkoshy	 */
468198464Sjkoshy	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
469198464Sjkoshy		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
470198464Sjkoshy#endif
471170296Sjeff	PROC_UNLOCK(p);
472275121Skib	PROC_STATLOCK(p);
473275121Skib	thread_lock(td);
474275121Skib	PROC_SUNLOCK(p);
475229429Sjhb
476229429Sjhb	/* Do the same timestamp bookkeeping that mi_switch() would do. */
477229429Sjhb	new_switchtime = cpu_ticks();
478229429Sjhb	runtime = new_switchtime - PCPU_GET(switchtime);
479229429Sjhb	td->td_runtime += runtime;
480229429Sjhb	td->td_incruntime += runtime;
481229429Sjhb	PCPU_SET(switchtime, new_switchtime);
482229429Sjhb	PCPU_SET(switchticks, ticks);
483229429Sjhb	PCPU_INC(cnt.v_swtch);
484229429Sjhb
485229429Sjhb	/* Save our resource usage in our process. */
486229429Sjhb	td->td_ru.ru_nvcsw++;
487208488Skib	ruxagg(p, td);
488229429Sjhb	rucollect(&p->p_ru, &td->td_ru);
489275121Skib	PROC_STATUNLOCK(p);
490229429Sjhb
491133396Sjulian	td->td_state = TDS_INACTIVE;
492181695Sattilio#ifdef WITNESS
493181695Sattilio	witness_thread_exit(td);
494181695Sattilio#endif
495133396Sjulian	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
496170296Sjeff	sched_throw(td);
497112993Speter	panic("I'm a teapot!");
49899026Sjulian	/* NOTREACHED */
49999026Sjulian}
50099026Sjulian
501124350Sschweikh/*
502107719Sjulian * Do any thread specific cleanups that may be needed in wait()
503126932Speter * called with Giant, proc and schedlock not held.
504107719Sjulian */
505107719Sjulianvoid
506107719Sjulianthread_wait(struct proc *p)
507107719Sjulian{
508107719Sjulian	struct thread *td;
509107719Sjulian
510126932Speter	mtx_assert(&Giant, MA_NOTOWNED);
511271008Skib	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
512271008Skib	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
513170598Sjeff	td = FIRST_THREAD_IN_PROC(p);
514170598Sjeff	/* Lock the last thread so we spin until it exits cpu_throw(). */
515170598Sjeff	thread_lock(td);
516170598Sjeff	thread_unlock(td);
517189845Sjeff	lock_profile_thread_exit(td);
518176730Sjeff	cpuset_rel(td->td_cpuset);
519176730Sjeff	td->td_cpuset = NULL;
520170598Sjeff	cpu_thread_clean(td);
521170598Sjeff	crfree(td->td_ucred);
522107719Sjulian	thread_reap();	/* check for zombie threads etc. */
523107719Sjulian}
524107719Sjulian
52599026Sjulian/*
52699026Sjulian * Link a thread to a process.
527103002Sjulian * set up anything that needs to be initialized for it to
528103002Sjulian * be used by the process.
52999026Sjulian */
53099026Sjulianvoid
531163709Sjbthread_link(struct thread *td, struct proc *p)
53299026Sjulian{
53399026Sjulian
534170296Sjeff	/*
535170296Sjeff	 * XXX This can't be enabled because it's called for proc0 before
536177368Sjeff	 * its lock has been created.
537177368Sjeff	 * PROC_LOCK_ASSERT(p, MA_OWNED);
538170296Sjeff	 */
539111028Sjeff	td->td_state    = TDS_INACTIVE;
540111028Sjeff	td->td_proc     = p;
541172207Sjeff	td->td_flags    = TDF_INMEM;
54299026Sjulian
543103002Sjulian	LIST_INIT(&td->td_contested);
544174629Sjeff	LIST_INIT(&td->td_lprof[0]);
545174629Sjeff	LIST_INIT(&td->td_lprof[1]);
546151316Sdavidxu	sigqueue_init(&td->td_sigqueue, p);
547119137Ssam	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
548269095Sdeischen	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
54999026Sjulian	p->p_numthreads++;
55099026Sjulian}
55199026Sjulian
552134791Sjulian/*
553136160Sjulian * Called from:
554134791Sjulian *  thread_exit()
555134791Sjulian */
556113641Sjulianvoid
557113641Sjulianthread_unlink(struct thread *td)
558124350Sschweikh{
559113641Sjulian	struct proc *p = td->td_proc;
560113920Sjhb
561177368Sjeff	PROC_LOCK_ASSERT(p, MA_OWNED);
562113641Sjulian	TAILQ_REMOVE(&p->p_threads, td, td_plist);
563113641Sjulian	p->p_numthreads--;
564113641Sjulian	/* could clear a few other things here */
565163709Sjb	/* Must  NOT clear links to proc! */
566124350Sschweikh}
567113641Sjulian
568195701Skibstatic int
569195701Skibcalc_remaining(struct proc *p, int mode)
570195701Skib{
571195701Skib	int remaining;
572195701Skib
573227657Skib	PROC_LOCK_ASSERT(p, MA_OWNED);
574227657Skib	PROC_SLOCK_ASSERT(p, MA_OWNED);
575195701Skib	if (mode == SINGLE_EXIT)
576195701Skib		remaining = p->p_numthreads;
577195701Skib	else if (mode == SINGLE_BOUNDARY)
578195701Skib		remaining = p->p_numthreads - p->p_boundary_count;
579195701Skib	else if (mode == SINGLE_NO_EXIT)
580195701Skib		remaining = p->p_numthreads - p->p_suspcount;
581195701Skib	else
582195701Skib		panic("calc_remaining: wrong mode %d", mode);
583195701Skib	return (remaining);
584195701Skib}
585195701Skib
586275617Skibstatic int
587275617Skibremain_for_mode(int mode)
588275617Skib{
589275617Skib
590275617Skib	return (1);
591275617Skib}
592275617Skib
593275617Skibstatic int
594275617Skibweed_inhib(int mode, struct thread *td2, struct proc *p)
595275617Skib{
596275617Skib	int wakeup_swapper;
597275617Skib
598275617Skib	PROC_LOCK_ASSERT(p, MA_OWNED);
599275617Skib	PROC_SLOCK_ASSERT(p, MA_OWNED);
600275617Skib	THREAD_LOCK_ASSERT(td2, MA_OWNED);
601275617Skib
602275617Skib	wakeup_swapper = 0;
603275617Skib	switch (mode) {
604275617Skib	case SINGLE_EXIT:
605275617Skib		if (TD_IS_SUSPENDED(td2))
606275617Skib			wakeup_swapper |= thread_unsuspend_one(td2);
607275617Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
608275617Skib			wakeup_swapper |= sleepq_abort(td2, EINTR);
609275617Skib		break;
610275617Skib	case SINGLE_BOUNDARY:
611275617Skib		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
612275617Skib			wakeup_swapper |= thread_unsuspend_one(td2);
613275617Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
614275617Skib			wakeup_swapper |= sleepq_abort(td2, ERESTART);
615275617Skib		break;
616275617Skib	case SINGLE_NO_EXIT:
617275617Skib		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
618275617Skib			wakeup_swapper |= thread_unsuspend_one(td2);
619275617Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
620275617Skib			wakeup_swapper |= sleepq_abort(td2, ERESTART);
621275617Skib		break;
622275617Skib	}
623275617Skib	return (wakeup_swapper);
624275617Skib}
625275617Skib
626111028Sjeff/*
62799026Sjulian * Enforce single-threading.
62899026Sjulian *
62999026Sjulian * Returns 1 if the caller must abort (another thread is waiting to
63099026Sjulian * exit the process or similar). Process is locked!
63199026Sjulian * Returns 0 when you are successfully the only thread running.
63299026Sjulian * A process has successfully single threaded in the suspend mode when
63399026Sjulian * There are no threads in user mode. Threads in the kernel must be
63499026Sjulian * allowed to continue until they get to the user boundary. They may even
63599026Sjulian * copy out their return values and data before suspending. They may however be
636160048Smaxim * accelerated in reaching the user boundary as we will wake up
63799026Sjulian * any sleeping threads that are interruptable. (PCATCH).
63899026Sjulian */
63999026Sjulianint
640136177Sdavidxuthread_single(int mode)
64199026Sjulian{
64299026Sjulian	struct thread *td;
64399026Sjulian	struct thread *td2;
64499026Sjulian	struct proc *p;
645181334Sjhb	int remaining, wakeup_swapper;
64699026Sjulian
64799026Sjulian	td = curthread;
64899026Sjulian	p = td->td_proc;
649126932Speter	mtx_assert(&Giant, MA_NOTOWNED);
65099026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
65199026Sjulian
652134791Sjulian	if ((p->p_flag & P_HADTHREADS) == 0)
65399026Sjulian		return (0);
65499026Sjulian
655100648Sjulian	/* Is someone already single threading? */
656136177Sdavidxu	if (p->p_singlethread != NULL && p->p_singlethread != td)
65799026Sjulian		return (1);
65899026Sjulian
659136177Sdavidxu	if (mode == SINGLE_EXIT) {
660136177Sdavidxu		p->p_flag |= P_SINGLE_EXIT;
661136177Sdavidxu		p->p_flag &= ~P_SINGLE_BOUNDARY;
662136177Sdavidxu	} else {
663136177Sdavidxu		p->p_flag &= ~P_SINGLE_EXIT;
664136177Sdavidxu		if (mode == SINGLE_BOUNDARY)
665136177Sdavidxu			p->p_flag |= P_SINGLE_BOUNDARY;
666136177Sdavidxu		else
667136177Sdavidxu			p->p_flag &= ~P_SINGLE_BOUNDARY;
668136177Sdavidxu	}
669102950Sdavidxu	p->p_flag |= P_STOPPED_SINGLE;
670184667Sdavidxu	PROC_SLOCK(p);
67199026Sjulian	p->p_singlethread = td;
672195701Skib	remaining = calc_remaining(p, mode);
673275617Skib	while (remaining != remain_for_mode(mode)) {
674156942Sdavidxu		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
675156942Sdavidxu			goto stopme;
676181334Sjhb		wakeup_swapper = 0;
67799026Sjulian		FOREACH_THREAD_IN_PROC(p, td2) {
67899026Sjulian			if (td2 == td)
67999026Sjulian				continue;
680170296Sjeff			thread_lock(td2);
681177471Sjeff			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
682275617Skib			if (TD_IS_INHIBITED(td2))
683275617Skib				wakeup_swapper |= weed_inhib(mode, td2, p);
684155594Sdavidxu#ifdef SMP
685155594Sdavidxu			else if (TD_IS_RUNNING(td2) && td != td2) {
686155594Sdavidxu				forward_signal(td2);
687155594Sdavidxu			}
688155594Sdavidxu#endif
689170296Sjeff			thread_unlock(td2);
69099026Sjulian		}
691181334Sjhb		if (wakeup_swapper)
692181334Sjhb			kick_proc0();
693195701Skib		remaining = calc_remaining(p, mode);
694130674Sdavidxu
695124350Sschweikh		/*
696124350Sschweikh		 * Maybe we suspended some threads.. was it enough?
697105911Sjulian		 */
698275617Skib		if (remaining == remain_for_mode(mode))
699105911Sjulian			break;
700105911Sjulian
701156942Sdavidxustopme:
70299026Sjulian		/*
70399026Sjulian		 * Wake us up when everyone else has suspended.
704100648Sjulian		 * In the mean time we suspend as well.
70599026Sjulian		 */
706170296Sjeff		thread_suspend_switch(td);
707195701Skib		remaining = calc_remaining(p, mode);
70899026Sjulian	}
709136177Sdavidxu	if (mode == SINGLE_EXIT) {
710135269Sjulian		/*
711271007Skib		 * Convert the process to an unthreaded process.  The
712271007Skib		 * SINGLE_EXIT is called by exit1() or execve(), in
713271007Skib		 * both cases other threads must be retired.
714135269Sjulian		 */
715271007Skib		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
716136160Sjulian		p->p_singlethread = NULL;
717271007Skib		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
718271000Skib
719271000Skib		/*
720271000Skib		 * Wait for any remaining threads to exit cpu_throw().
721271000Skib		 */
722271000Skib		while (p->p_exitthreads != 0) {
723271000Skib			PROC_SUNLOCK(p);
724271000Skib			PROC_UNLOCK(p);
725271000Skib			sched_relinquish(td);
726271000Skib			PROC_LOCK(p);
727271000Skib			PROC_SLOCK(p);
728271000Skib		}
729111028Sjeff	}
730184667Sdavidxu	PROC_SUNLOCK(p);
73199026Sjulian	return (0);
73299026Sjulian}
73399026Sjulian
734275616Skibbool
735275616Skibthread_suspend_check_needed(void)
736275616Skib{
737275616Skib	struct proc *p;
738275616Skib	struct thread *td;
739275616Skib
740275616Skib	td = curthread;
741275616Skib	p = td->td_proc;
742275616Skib	PROC_LOCK_ASSERT(p, MA_OWNED);
743275616Skib	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
744275616Skib	    (td->td_dbgflags & TDB_SUSPEND) != 0));
745275616Skib}
746275616Skib
74799026Sjulian/*
74899026Sjulian * Called in from locations that can safely check to see
74999026Sjulian * whether we have to suspend or at least throttle for a
75099026Sjulian * single-thread event (e.g. fork).
75199026Sjulian *
75299026Sjulian * Such locations include userret().
75399026Sjulian * If the "return_instead" argument is non zero, the thread must be able to
75499026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result.
75599026Sjulian *
75699026Sjulian * The 'return_instead' argument tells the function if it may do a
75799026Sjulian * thread_exit() or suspend, or whether the caller must abort and back
75899026Sjulian * out instead.
75999026Sjulian *
76099026Sjulian * If the thread that set the single_threading request has set the
76199026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return
76299026Sjulian * if 'return_instead' is false, but will exit.
76399026Sjulian *
76499026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
76599026Sjulian *---------------+--------------------+---------------------
76699026Sjulian *       0       | returns 0          |   returns 0 or 1
767246996Sjhb *               | when ST ends       |   immediately
76899026Sjulian *---------------+--------------------+---------------------
76999026Sjulian *       1       | thread exits       |   returns 1
770246996Sjhb *               |                    |  immediately
77199026Sjulian * 0 = thread_exit() or suspension ok,
77299026Sjulian * other = return error instead of stopping the thread.
77399026Sjulian *
77499026Sjulian * While a full suspension is under effect, even a single threading
77599026Sjulian * thread would be suspended if it made this call (but it shouldn't).
77699026Sjulian * This call should only be made from places where
777124350Sschweikh * thread_exit() would be safe as that may be the outcome unless
77899026Sjulian * return_instead is set.
77999026Sjulian */
78099026Sjulianint
78199026Sjulianthread_suspend_check(int return_instead)
78299026Sjulian{
783104502Sjmallett	struct thread *td;
784104502Sjmallett	struct proc *p;
785182011Sjhb	int wakeup_swapper;
78699026Sjulian
78799026Sjulian	td = curthread;
78899026Sjulian	p = td->td_proc;
789126932Speter	mtx_assert(&Giant, MA_NOTOWNED);
79099026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
791275616Skib	while (thread_suspend_check_needed()) {
792102950Sdavidxu		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
79399026Sjulian			KASSERT(p->p_singlethread != NULL,
79499026Sjulian			    ("singlethread not set"));
79599026Sjulian			/*
796100648Sjulian			 * The only suspension in action is a
797100648Sjulian			 * single-threading. Single threader need not stop.
798124350Sschweikh			 * XXX Should be safe to access unlocked
799100646Sjulian			 * as it can only be set to be true by us.
80099026Sjulian			 */
801100648Sjulian			if (p->p_singlethread == td)
80299026Sjulian				return (0);	/* Exempt from stopping. */
803124350Sschweikh		}
804134498Sdavidxu		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
805155741Sdavidxu			return (EINTR);
80699026Sjulian
807136177Sdavidxu		/* Should we goto user boundary if we didn't come from there? */
808136177Sdavidxu		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
809136177Sdavidxu		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
810155741Sdavidxu			return (ERESTART);
811136177Sdavidxu
81299026Sjulian		/*
813248584Sjhb		 * Ignore suspend requests for stop signals if they
814248584Sjhb		 * are deferred.
815248584Sjhb		 */
816248584Sjhb		if (P_SHOULDSTOP(p) == P_STOPPED_SIG &&
817248584Sjhb		    td->td_flags & TDF_SBDRY) {
818248584Sjhb			KASSERT(return_instead,
819248584Sjhb			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
820248584Sjhb			return (0);
821248584Sjhb		}
822248584Sjhb
823248584Sjhb		/*
82499026Sjulian		 * If the process is waiting for us to exit,
82599026Sjulian		 * this thread should just suicide.
826102950Sdavidxu		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
82799026Sjulian		 */
828213642Sdavidxu		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
829213642Sdavidxu			PROC_UNLOCK(p);
830213642Sdavidxu			tidhash_remove(td);
831213642Sdavidxu			PROC_LOCK(p);
832213950Sdavidxu			tdsigcleanup(td);
833213642Sdavidxu			PROC_SLOCK(p);
834213950Sdavidxu			thread_stopped(p);
835134791Sjulian			thread_exit();
836213642Sdavidxu		}
837213950Sdavidxu
838213950Sdavidxu		PROC_SLOCK(p);
839213950Sdavidxu		thread_stopped(p);
840170296Sjeff		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
841170296Sjeff			if (p->p_numthreads == p->p_suspcount + 1) {
842170296Sjeff				thread_lock(p->p_singlethread);
843182011Sjhb				wakeup_swapper =
844182011Sjhb				    thread_unsuspend_one(p->p_singlethread);
845170296Sjeff				thread_unlock(p->p_singlethread);
846182011Sjhb				if (wakeup_swapper)
847182011Sjhb					kick_proc0();
848170296Sjeff			}
849170296Sjeff		}
850184667Sdavidxu		PROC_UNLOCK(p);
851184199Sdavidxu		thread_lock(td);
85299026Sjulian		/*
85399026Sjulian		 * When a thread suspends, it just
854164936Sjulian		 * gets taken off all queues.
85599026Sjulian		 */
856103216Sjulian		thread_suspend_one(td);
857136177Sdavidxu		if (return_instead == 0) {
858136177Sdavidxu			p->p_boundary_count++;
859136177Sdavidxu			td->td_flags |= TDF_BOUNDARY;
860136177Sdavidxu		}
861184667Sdavidxu		PROC_SUNLOCK(p);
862178272Sjeff		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
863170296Sjeff		if (return_instead == 0)
864136177Sdavidxu			td->td_flags &= ~TDF_BOUNDARY;
865170296Sjeff		thread_unlock(td);
86699026Sjulian		PROC_LOCK(p);
867227657Skib		if (return_instead == 0) {
868227657Skib			PROC_SLOCK(p);
869170296Sjeff			p->p_boundary_count--;
870227657Skib			PROC_SUNLOCK(p);
871227657Skib		}
87299026Sjulian	}
87399026Sjulian	return (0);
87499026Sjulian}
87599026Sjulian
876102898Sdavidxuvoid
877170296Sjeffthread_suspend_switch(struct thread *td)
878170296Sjeff{
879170296Sjeff	struct proc *p;
880170296Sjeff
881170296Sjeff	p = td->td_proc;
882170296Sjeff	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
883170296Sjeff	PROC_LOCK_ASSERT(p, MA_OWNED);
884184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
885170296Sjeff	/*
886170296Sjeff	 * We implement thread_suspend_one in stages here to avoid
887170296Sjeff	 * dropping the proc lock while the thread lock is owned.
888170296Sjeff	 */
889170296Sjeff	thread_stopped(p);
890170296Sjeff	p->p_suspcount++;
891184667Sdavidxu	PROC_UNLOCK(p);
892184199Sdavidxu	thread_lock(td);
893177471Sjeff	td->td_flags &= ~TDF_NEEDSUSPCHK;
894170296Sjeff	TD_SET_SUSPENDED(td);
895177085Sjeff	sched_sleep(td, 0);
896184667Sdavidxu	PROC_SUNLOCK(p);
897170296Sjeff	DROP_GIANT();
898178272Sjeff	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
899170296Sjeff	thread_unlock(td);
900170296Sjeff	PICKUP_GIANT();
901170296Sjeff	PROC_LOCK(p);
902184667Sdavidxu	PROC_SLOCK(p);
903170296Sjeff}
904170296Sjeff
905170296Sjeffvoid
906102898Sdavidxuthread_suspend_one(struct thread *td)
907102898Sdavidxu{
908102898Sdavidxu	struct proc *p = td->td_proc;
909102898Sdavidxu
910184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
911170296Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
912112071Sdavidxu	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
913102898Sdavidxu	p->p_suspcount++;
914177471Sjeff	td->td_flags &= ~TDF_NEEDSUSPCHK;
915103216Sjulian	TD_SET_SUSPENDED(td);
916177085Sjeff	sched_sleep(td, 0);
917102898Sdavidxu}
918102898Sdavidxu
919182011Sjhbint
920102898Sdavidxuthread_unsuspend_one(struct thread *td)
921102898Sdavidxu{
922102898Sdavidxu	struct proc *p = td->td_proc;
923102898Sdavidxu
924184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
925170296Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
926164936Sjulian	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
927103216Sjulian	TD_CLR_SUSPENDED(td);
928102898Sdavidxu	p->p_suspcount--;
929182011Sjhb	return (setrunnable(td));
930102898Sdavidxu}
931102898Sdavidxu
93299026Sjulian/*
93399026Sjulian * Allow all threads blocked by single threading to continue running.
93499026Sjulian */
93599026Sjulianvoid
93699026Sjulianthread_unsuspend(struct proc *p)
93799026Sjulian{
93899026Sjulian	struct thread *td;
939182011Sjhb	int wakeup_swapper;
94099026Sjulian
94199026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
942184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
943182011Sjhb	wakeup_swapper = 0;
94499026Sjulian	if (!P_SHOULDSTOP(p)) {
945164936Sjulian                FOREACH_THREAD_IN_PROC(p, td) {
946170296Sjeff			thread_lock(td);
947164936Sjulian			if (TD_IS_SUSPENDED(td)) {
948182011Sjhb				wakeup_swapper |= thread_unsuspend_one(td);
949164936Sjulian			}
950170296Sjeff			thread_unlock(td);
95199026Sjulian		}
952102950Sdavidxu	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
95399026Sjulian	    (p->p_numthreads == p->p_suspcount)) {
95499026Sjulian		/*
95599026Sjulian		 * Stopping everything also did the job for the single
95699026Sjulian		 * threading request. Now we've downgraded to single-threaded,
95799026Sjulian		 * let it continue.
95899026Sjulian		 */
959170296Sjeff		thread_lock(p->p_singlethread);
960182011Sjhb		wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
961170296Sjeff		thread_unlock(p->p_singlethread);
96299026Sjulian	}
963182011Sjhb	if (wakeup_swapper)
964182011Sjhb		kick_proc0();
96599026Sjulian}
96699026Sjulian
967134791Sjulian/*
968134791Sjulian * End the single threading mode..
969134791Sjulian */
97099026Sjulianvoid
97199026Sjulianthread_single_end(void)
97299026Sjulian{
97399026Sjulian	struct thread *td;
97499026Sjulian	struct proc *p;
975182011Sjhb	int wakeup_swapper;
97699026Sjulian
977275617Skib	p = curproc;
97899026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
979136177Sdavidxu	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
980184667Sdavidxu	PROC_SLOCK(p);
98199026Sjulian	p->p_singlethread = NULL;
982182011Sjhb	wakeup_swapper = 0;
983102292Sjulian	/*
984182011Sjhb	 * If there are other threads they may now run,
985102292Sjulian	 * unless of course there is a blanket 'stop order'
986102292Sjulian	 * on the process. The single threader must be allowed
987102292Sjulian	 * to continue however as this is a bad place to stop.
988102292Sjulian	 */
989275617Skib	if (p->p_numthreads != remain_for_mode(SINGLE_EXIT) &&
990275617Skib	    !P_SHOULDSTOP(p)) {
991164936Sjulian                FOREACH_THREAD_IN_PROC(p, td) {
992170296Sjeff			thread_lock(td);
993164936Sjulian			if (TD_IS_SUSPENDED(td)) {
994182011Sjhb				wakeup_swapper |= thread_unsuspend_one(td);
995164936Sjulian			}
996170296Sjeff			thread_unlock(td);
997102292Sjulian		}
998102292Sjulian	}
999184667Sdavidxu	PROC_SUNLOCK(p);
1000182011Sjhb	if (wakeup_swapper)
1001182011Sjhb		kick_proc0();
100299026Sjulian}
1003128721Sdeischen
1004151990Sdavidxustruct thread *
1005151990Sdavidxuthread_find(struct proc *p, lwpid_t tid)
1006151990Sdavidxu{
1007151990Sdavidxu	struct thread *td;
1008151990Sdavidxu
1009151990Sdavidxu	PROC_LOCK_ASSERT(p, MA_OWNED);
1010151990Sdavidxu	FOREACH_THREAD_IN_PROC(p, td) {
1011151990Sdavidxu		if (td->td_tid == tid)
1012151990Sdavidxu			break;
1013151990Sdavidxu	}
1014151990Sdavidxu	return (td);
1015151990Sdavidxu}
1016213642Sdavidxu
1017213642Sdavidxu/* Locate a thread by number; return with proc lock held. */
1018213642Sdavidxustruct thread *
1019213642Sdavidxutdfind(lwpid_t tid, pid_t pid)
1020213642Sdavidxu{
1021213642Sdavidxu#define RUN_THRESH	16
1022213642Sdavidxu	struct thread *td;
1023213642Sdavidxu	int run = 0;
1024213642Sdavidxu
1025213642Sdavidxu	rw_rlock(&tidhash_lock);
1026213642Sdavidxu	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1027213642Sdavidxu		if (td->td_tid == tid) {
1028213642Sdavidxu			if (pid != -1 && td->td_proc->p_pid != pid) {
1029213642Sdavidxu				td = NULL;
1030213642Sdavidxu				break;
1031213642Sdavidxu			}
1032219968Sjhb			PROC_LOCK(td->td_proc);
1033213642Sdavidxu			if (td->td_proc->p_state == PRS_NEW) {
1034219968Sjhb				PROC_UNLOCK(td->td_proc);
1035213642Sdavidxu				td = NULL;
1036213642Sdavidxu				break;
1037213642Sdavidxu			}
1038213642Sdavidxu			if (run > RUN_THRESH) {
1039213642Sdavidxu				if (rw_try_upgrade(&tidhash_lock)) {
1040213642Sdavidxu					LIST_REMOVE(td, td_hash);
1041213642Sdavidxu					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1042213642Sdavidxu						td, td_hash);
1043213642Sdavidxu					rw_wunlock(&tidhash_lock);
1044213642Sdavidxu					return (td);
1045213642Sdavidxu				}
1046213642Sdavidxu			}
1047213642Sdavidxu			break;
1048213642Sdavidxu		}
1049213642Sdavidxu		run++;
1050213642Sdavidxu	}
1051213642Sdavidxu	rw_runlock(&tidhash_lock);
1052213642Sdavidxu	return (td);
1053213642Sdavidxu}
1054213642Sdavidxu
1055213642Sdavidxuvoid
1056213642Sdavidxutidhash_add(struct thread *td)
1057213642Sdavidxu{
1058213642Sdavidxu	rw_wlock(&tidhash_lock);
1059213950Sdavidxu	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1060213642Sdavidxu	rw_wunlock(&tidhash_lock);
1061213642Sdavidxu}
1062213642Sdavidxu
1063213642Sdavidxuvoid
1064213642Sdavidxutidhash_remove(struct thread *td)
1065213642Sdavidxu{
1066213642Sdavidxu	rw_wlock(&tidhash_lock);
1067213950Sdavidxu	LIST_REMOVE(td, td_hash);
1068213642Sdavidxu	rw_wunlock(&tidhash_lock);
1069213642Sdavidxu}
1070