kern_thread.c revision 283320
1139804Simp/*-
299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
399026Sjulian *  All rights reserved.
499026Sjulian *
599026Sjulian * Redistribution and use in source and binary forms, with or without
699026Sjulian * modification, are permitted provided that the following conditions
799026Sjulian * are met:
899026Sjulian * 1. Redistributions of source code must retain the above copyright
999026Sjulian *    notice(s), this list of conditions and the following disclaimer as
10124350Sschweikh *    the first lines of this file unmodified other than the possible
1199026Sjulian *    addition of one or more copyright notices.
1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright
1399026Sjulian *    notice(s), this list of conditions and the following disclaimer in the
1499026Sjulian *    documentation and/or other materials provided with the distribution.
1599026Sjulian *
1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1999026Sjulian * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2699026Sjulian * DAMAGE.
2799026Sjulian */
2899026Sjulian
29181695Sattilio#include "opt_witness.h"
30198464Sjkoshy#include "opt_hwpmc_hooks.h"
31181695Sattilio
32116182Sobrien#include <sys/cdefs.h>
33116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 283320 2015-05-23 19:09:04Z kib $");
34116182Sobrien
3599026Sjulian#include <sys/param.h>
3699026Sjulian#include <sys/systm.h>
3799026Sjulian#include <sys/kernel.h>
3899026Sjulian#include <sys/lock.h>
3999026Sjulian#include <sys/mutex.h>
4099026Sjulian#include <sys/proc.h>
41236317Skib#include <sys/rangelock.h>
42156705Sdavidxu#include <sys/resourcevar.h>
43235459Srstone#include <sys/sdt.h>
44130355Sjulian#include <sys/smp.h>
45107126Sjeff#include <sys/sched.h>
46126326Sjhb#include <sys/sleepqueue.h>
47174647Sjeff#include <sys/selinfo.h>
48122514Sjhb#include <sys/turnstile.h>
4999026Sjulian#include <sys/ktr.h>
50213642Sdavidxu#include <sys/rwlock.h>
51143149Sdavidxu#include <sys/umtx.h>
52176730Sjeff#include <sys/cpuset.h>
53198464Sjkoshy#ifdef	HWPMC_HOOKS
54198464Sjkoshy#include <sys/pmckern.h>
55198464Sjkoshy#endif
5699026Sjulian
57155195Srwatson#include <security/audit/audit.h>
58155195Srwatson
5999026Sjulian#include <vm/vm.h>
60116355Salc#include <vm/vm_extern.h>
6199026Sjulian#include <vm/uma.h>
62173631Srrs#include <sys/eventhandler.h>
6399026Sjulian
64235459SrstoneSDT_PROVIDER_DECLARE(proc);
65258622SavgSDT_PROBE_DEFINE(proc, , , lwp__exit);
66235459Srstone
6799026Sjulian/*
68163709Sjb * thread related storage.
69163709Sjb */
7099026Sjulianstatic uma_zone_t thread_zone;
7199026Sjulian
72111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
73172256Sattiliostatic struct mtx zombie_lock;
74170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
7599026Sjulian
76170598Sjeffstatic void thread_zombie(struct thread *);
77282944Skibstatic int thread_unsuspend_one(struct thread *td, struct proc *p,
78282944Skib    bool boundary);
79170598Sjeff
80216314Sdavidxu#define TID_BUFFER_SIZE	1024
81216314Sdavidxu
82127794Smarcelstruct mtx tid_lock;
83143802Sphkstatic struct unrhdr *tid_unrhdr;
84216314Sdavidxustatic lwpid_t tid_buffer[TID_BUFFER_SIZE];
85216314Sdavidxustatic int tid_head, tid_tail;
86213642Sdavidxustatic MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
87213642Sdavidxu
88213642Sdavidxustruct	tidhashhead *tidhashtbl;
89213642Sdavidxuu_long	tidhash;
90213642Sdavidxustruct	rwlock tidhash_lock;
91213642Sdavidxu
92216314Sdavidxustatic lwpid_t
93216314Sdavidxutid_alloc(void)
94216314Sdavidxu{
95216314Sdavidxu	lwpid_t	tid;
96216314Sdavidxu
97216314Sdavidxu	tid = alloc_unr(tid_unrhdr);
98216314Sdavidxu	if (tid != -1)
99216314Sdavidxu		return (tid);
100216314Sdavidxu	mtx_lock(&tid_lock);
101216314Sdavidxu	if (tid_head == tid_tail) {
102216314Sdavidxu		mtx_unlock(&tid_lock);
103216314Sdavidxu		return (-1);
104216314Sdavidxu	}
105240951Skib	tid = tid_buffer[tid_head];
106240951Skib	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
107216314Sdavidxu	mtx_unlock(&tid_lock);
108216314Sdavidxu	return (tid);
109216314Sdavidxu}
110216314Sdavidxu
111216314Sdavidxustatic void
112216314Sdavidxutid_free(lwpid_t tid)
113216314Sdavidxu{
114216314Sdavidxu	lwpid_t tmp_tid = -1;
115216314Sdavidxu
116216314Sdavidxu	mtx_lock(&tid_lock);
117216314Sdavidxu	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
118240951Skib		tmp_tid = tid_buffer[tid_head];
119240951Skib		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
120216314Sdavidxu	}
121240951Skib	tid_buffer[tid_tail] = tid;
122240951Skib	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
123216314Sdavidxu	mtx_unlock(&tid_lock);
124216314Sdavidxu	if (tmp_tid != -1)
125216314Sdavidxu		free_unr(tid_unrhdr, tmp_tid);
126216314Sdavidxu}
127216314Sdavidxu
128127794Smarcel/*
129107719Sjulian * Prepare a thread for use.
13099026Sjulian */
131132987Sgreenstatic int
132132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags)
13399026Sjulian{
13499026Sjulian	struct thread	*td;
13599026Sjulian
13699026Sjulian	td = (struct thread *)mem;
137103216Sjulian	td->td_state = TDS_INACTIVE;
138135573Sjhb	td->td_oncpu = NOCPU;
139130269Sjmallett
140216314Sdavidxu	td->td_tid = tid_alloc();
141143840Sphk
142130269Sjmallett	/*
143130269Sjmallett	 * Note that td_critnest begins life as 1 because the thread is not
144130269Sjmallett	 * running and is thereby implicitly waiting to be on the receiving
145170296Sjeff	 * end of a context switch.
146130269Sjmallett	 */
147118442Sjhb	td->td_critnest = 1;
148216313Sdavidxu	td->td_lend_user_pri = PRI_MAX;
149173631Srrs	EVENTHANDLER_INVOKE(thread_ctor, td);
150155195Srwatson#ifdef AUDIT
151155195Srwatson	audit_thread_alloc(td);
152155195Srwatson#endif
153161678Sdavidxu	umtx_thread_alloc(td);
154132987Sgreen	return (0);
15599026Sjulian}
15699026Sjulian
15799026Sjulian/*
15899026Sjulian * Reclaim a thread after use.
15999026Sjulian */
16099026Sjulianstatic void
16199026Sjulianthread_dtor(void *mem, int size, void *arg)
16299026Sjulian{
163127794Smarcel	struct thread *td;
16499026Sjulian
16599026Sjulian	td = (struct thread *)mem;
16699026Sjulian
16799026Sjulian#ifdef INVARIANTS
16899026Sjulian	/* Verify that this thread is in a safe state to free. */
16999026Sjulian	switch (td->td_state) {
170103216Sjulian	case TDS_INHIBITED:
171103216Sjulian	case TDS_RUNNING:
172103216Sjulian	case TDS_CAN_RUN:
17399026Sjulian	case TDS_RUNQ:
17499026Sjulian		/*
17599026Sjulian		 * We must never unlink a thread that is in one of
17699026Sjulian		 * these states, because it is currently active.
17799026Sjulian		 */
17899026Sjulian		panic("bad state for thread unlinking");
17999026Sjulian		/* NOTREACHED */
180103216Sjulian	case TDS_INACTIVE:
18199026Sjulian		break;
18299026Sjulian	default:
18399026Sjulian		panic("bad thread state");
18499026Sjulian		/* NOTREACHED */
18599026Sjulian	}
18699026Sjulian#endif
187155353Srwatson#ifdef AUDIT
188155353Srwatson	audit_thread_free(td);
189155353Srwatson#endif
190185029Spjd	/* Free all OSD associated to this thread. */
191185029Spjd	osd_thread_exit(td);
192185029Spjd
193173631Srrs	EVENTHANDLER_INVOKE(thread_dtor, td);
194216314Sdavidxu	tid_free(td->td_tid);
19599026Sjulian}
19699026Sjulian
19799026Sjulian/*
19899026Sjulian * Initialize type-stable parts of a thread (when newly created).
19999026Sjulian */
200132987Sgreenstatic int
201132987Sgreenthread_init(void *mem, int size, int flags)
20299026Sjulian{
203131149Smarcel	struct thread *td;
20499026Sjulian
20599026Sjulian	td = (struct thread *)mem;
206131149Smarcel
207126326Sjhb	td->td_sleepqueue = sleepq_alloc();
208122514Sjhb	td->td_turnstile = turnstile_alloc();
209236317Skib	td->td_rlqe = NULL;
210173631Srrs	EVENTHANDLER_INVOKE(thread_init, td);
211107126Sjeff	td->td_sched = (struct td_sched *)&td[1];
212161678Sdavidxu	umtx_thread_init(td);
213173361Skib	td->td_kstack = 0;
214281696Skib	td->td_sel = NULL;
215132987Sgreen	return (0);
21699026Sjulian}
21799026Sjulian
21899026Sjulian/*
21999026Sjulian * Tear down type-stable parts of a thread (just before being discarded).
22099026Sjulian */
22199026Sjulianstatic void
22299026Sjulianthread_fini(void *mem, int size)
22399026Sjulian{
224131149Smarcel	struct thread *td;
22599026Sjulian
22699026Sjulian	td = (struct thread *)mem;
227173631Srrs	EVENTHANDLER_INVOKE(thread_fini, td);
228236317Skib	rlqentry_free(td->td_rlqe);
229122514Sjhb	turnstile_free(td->td_turnstile);
230126326Sjhb	sleepq_free(td->td_sleepqueue);
231161678Sdavidxu	umtx_thread_fini(td);
232174647Sjeff	seltdfini(td);
23399026Sjulian}
234111028Sjeff
235107126Sjeff/*
236111028Sjeff * For a newly created process,
237111028Sjeff * link up all the structures and its initial threads etc.
238134791Sjulian * called from:
239268351Smarcel * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
240134791Sjulian * proc_dtor() (should go away)
241134791Sjulian * proc_init()
242105854Sjulian */
243105854Sjulianvoid
244173361Skibproc_linkup0(struct proc *p, struct thread *td)
245173361Skib{
246173361Skib	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
247173361Skib	proc_linkup(p, td);
248173361Skib}
249173361Skib
250173361Skibvoid
251163709Sjbproc_linkup(struct proc *p, struct thread *td)
252105854Sjulian{
253170296Sjeff
254151316Sdavidxu	sigqueue_init(&p->p_sigqueue, p);
255153253Sdavidxu	p->p_ksi = ksiginfo_alloc(1);
256153253Sdavidxu	if (p->p_ksi != NULL) {
257153253Sdavidxu		/* XXX p_ksi may be null if ksiginfo zone is not ready */
258153253Sdavidxu		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
259152185Sdavidxu	}
260152948Sdavidxu	LIST_INIT(&p->p_mqnotifier);
261105854Sjulian	p->p_numthreads = 0;
262163709Sjb	thread_link(td, p);
263105854Sjulian}
264105854Sjulian
265111028Sjeff/*
26699026Sjulian * Initialize global thread allocation resources.
26799026Sjulian */
26899026Sjulianvoid
26999026Sjulianthreadinit(void)
27099026Sjulian{
27199026Sjulian
272143802Sphk	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
273239301Skib
274239301Skib	/*
275239328Skib	 * pid_max cannot be greater than PID_MAX.
276239301Skib	 * leave one number for thread0.
277239301Skib	 */
278174848Sjulian	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
279143802Sphk
280107126Sjeff	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
28199026Sjulian	    thread_ctor, thread_dtor, thread_init, thread_fini,
282167944Sjhb	    16 - 1, 0);
283213642Sdavidxu	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
284213642Sdavidxu	rw_init(&tidhash_lock, "tidhash");
28599026Sjulian}
28699026Sjulian
28799026Sjulian/*
288170598Sjeff * Place an unused thread on the zombie list.
289164936Sjulian * Use the slpq as that must be unused by now.
29099026Sjulian */
29199026Sjulianvoid
292170598Sjeffthread_zombie(struct thread *td)
29399026Sjulian{
294170296Sjeff	mtx_lock_spin(&zombie_lock);
295164936Sjulian	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
296170296Sjeff	mtx_unlock_spin(&zombie_lock);
29799026Sjulian}
29899026Sjulian
299103410Smini/*
300170598Sjeff * Release a thread that has exited after cpu_throw().
301170598Sjeff */
302170598Sjeffvoid
303170598Sjeffthread_stash(struct thread *td)
304170598Sjeff{
305170598Sjeff	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
306170598Sjeff	thread_zombie(td);
307170598Sjeff}
308170598Sjeff
309170598Sjeff/*
310177091Sjeff * Reap zombie resources.
31199026Sjulian */
31299026Sjulianvoid
31399026Sjulianthread_reap(void)
31499026Sjulian{
315105854Sjulian	struct thread *td_first, *td_next;
31699026Sjulian
31799026Sjulian	/*
318111028Sjeff	 * Don't even bother to lock if none at this instant,
319111028Sjeff	 * we really don't care about the next instant..
32099026Sjulian	 */
321163709Sjb	if (!TAILQ_EMPTY(&zombie_threads)) {
322170296Sjeff		mtx_lock_spin(&zombie_lock);
323105854Sjulian		td_first = TAILQ_FIRST(&zombie_threads);
324105854Sjulian		if (td_first)
325105854Sjulian			TAILQ_INIT(&zombie_threads);
326170296Sjeff		mtx_unlock_spin(&zombie_lock);
327105854Sjulian		while (td_first) {
328164936Sjulian			td_next = TAILQ_NEXT(td_first, td_slpq);
329111028Sjeff			if (td_first->td_ucred)
330111028Sjeff				crfree(td_first->td_ucred);
331105854Sjulian			thread_free(td_first);
332105854Sjulian			td_first = td_next;
33399026Sjulian		}
33499026Sjulian	}
33599026Sjulian}
33699026Sjulian
33799026Sjulian/*
33899026Sjulian * Allocate a thread.
33999026Sjulian */
34099026Sjulianstruct thread *
341196730Skibthread_alloc(int pages)
34299026Sjulian{
343173361Skib	struct thread *td;
344163709Sjb
34599026Sjulian	thread_reap(); /* check if any zombies to get */
346173361Skib
347173361Skib	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
348173361Skib	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
349196730Skib	if (!vm_thread_new(td, pages)) {
350173361Skib		uma_zfree(thread_zone, td);
351173361Skib		return (NULL);
352173361Skib	}
353173615Smarcel	cpu_thread_alloc(td);
354173361Skib	return (td);
35599026Sjulian}
35699026Sjulian
357196730Skibint
358196730Skibthread_alloc_stack(struct thread *td, int pages)
359196730Skib{
360103367Sjulian
361196730Skib	KASSERT(td->td_kstack == 0,
362196730Skib	    ("thread_alloc_stack called on a thread with kstack"));
363196730Skib	if (!vm_thread_new(td, pages))
364196730Skib		return (0);
365196730Skib	cpu_thread_alloc(td);
366196730Skib	return (1);
367196730Skib}
368196730Skib
369103367Sjulian/*
37099026Sjulian * Deallocate a thread.
37199026Sjulian */
37299026Sjulianvoid
37399026Sjulianthread_free(struct thread *td)
37499026Sjulian{
375189845Sjeff
376189845Sjeff	lock_profile_thread_exit(td);
377177369Sjeff	if (td->td_cpuset)
378177369Sjeff		cpuset_rel(td->td_cpuset);
379176730Sjeff	td->td_cpuset = NULL;
380173615Smarcel	cpu_thread_free(td);
381173361Skib	if (td->td_kstack != 0)
382173361Skib		vm_thread_dispose(td);
38399026Sjulian	uma_zfree(thread_zone, td);
38499026Sjulian}
38599026Sjulian
38699026Sjulian/*
38799026Sjulian * Discard the current thread and exit from its context.
388130355Sjulian * Always called with scheduler locked.
38999026Sjulian *
39099026Sjulian * Because we can't free a thread while we're operating under its context,
391107719Sjulian * push the current thread into our CPU's deadthread holder. This means
392107719Sjulian * we needn't worry about someone else grabbing our context before we
393177091Sjeff * do a cpu_throw().
39499026Sjulian */
39599026Sjulianvoid
39699026Sjulianthread_exit(void)
39799026Sjulian{
398229429Sjhb	uint64_t runtime, new_switchtime;
39999026Sjulian	struct thread *td;
400170174Sjeff	struct thread *td2;
40199026Sjulian	struct proc *p;
402182011Sjhb	int wakeup_swapper;
40399026Sjulian
40499026Sjulian	td = curthread;
40599026Sjulian	p = td->td_proc;
40699026Sjulian
407170296Sjeff	PROC_SLOCK_ASSERT(p, MA_OWNED);
408134791Sjulian	mtx_assert(&Giant, MA_NOTOWNED);
409170296Sjeff
410134791Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
411102581Sjulian	KASSERT(p != NULL, ("thread exiting without a process"));
412133234Srwatson	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
413173601Sjulian	    (long)p->p_pid, td->td_name);
414151316Sdavidxu	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
41599026Sjulian
416155376Srwatson#ifdef AUDIT
417155376Srwatson	AUDIT_SYSCALL_EXIT(0, td);
418155376Srwatson#endif
419134791Sjulian	/*
420134791Sjulian	 * drop FPU & debug register state storage, or any other
421134791Sjulian	 * architecture specific resources that
422134791Sjulian	 * would not be on a new untouched process.
423134791Sjulian	 */
42499026Sjulian	cpu_thread_exit(td);	/* XXXSMP */
42599026Sjulian
426134791Sjulian	/*
427103002Sjulian	 * The last thread is left attached to the process
428103002Sjulian	 * So that the whole bundle gets recycled. Skip
429134791Sjulian	 * all this stuff if we never had threads.
430134791Sjulian	 * EXIT clears all sign of other threads when
431134791Sjulian	 * it goes to single threading, so the last thread always
432134791Sjulian	 * takes the short path.
433102581Sjulian	 */
434134791Sjulian	if (p->p_flag & P_HADTHREADS) {
435134791Sjulian		if (p->p_numthreads > 1) {
436271000Skib			atomic_add_int(&td->td_proc->p_exitthreads, 1);
437134791Sjulian			thread_unlink(td);
438170174Sjeff			td2 = FIRST_THREAD_IN_PROC(p);
439170174Sjeff			sched_exit_thread(td2, td);
440134791Sjulian
441134791Sjulian			/*
442134791Sjulian			 * The test below is NOT true if we are the
443207606Skib			 * sole exiting thread. P_STOPPED_SINGLE is unset
444134791Sjulian			 * in exit1() after it is the only survivor.
445134791Sjulian			 */
446134791Sjulian			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
447134791Sjulian				if (p->p_numthreads == p->p_suspcount) {
448170296Sjeff					thread_lock(p->p_singlethread);
449182011Sjhb					wakeup_swapper = thread_unsuspend_one(
450282944Skib						p->p_singlethread, p, false);
451170296Sjeff					thread_unlock(p->p_singlethread);
452182011Sjhb					if (wakeup_swapper)
453182011Sjhb						kick_proc0();
454134791Sjulian				}
455103002Sjulian			}
456104695Sjulian
457134791Sjulian			PCPU_SET(deadthread, td);
458134791Sjulian		} else {
459134791Sjulian			/*
460134791Sjulian			 * The last thread is exiting.. but not through exit()
461134791Sjulian			 */
462134791Sjulian			panic ("thread_exit: Last thread exiting on its own");
463119488Sdavidxu		}
464170296Sjeff	}
465198464Sjkoshy#ifdef	HWPMC_HOOKS
466198464Sjkoshy	/*
467198464Sjkoshy	 * If this thread is part of a process that is being tracked by hwpmc(4),
468198464Sjkoshy	 * inform the module of the thread's impending exit.
469198464Sjkoshy	 */
470198464Sjkoshy	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
471198464Sjkoshy		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
472198464Sjkoshy#endif
473170296Sjeff	PROC_UNLOCK(p);
474275121Skib	PROC_STATLOCK(p);
475275121Skib	thread_lock(td);
476275121Skib	PROC_SUNLOCK(p);
477229429Sjhb
478229429Sjhb	/* Do the same timestamp bookkeeping that mi_switch() would do. */
479229429Sjhb	new_switchtime = cpu_ticks();
480229429Sjhb	runtime = new_switchtime - PCPU_GET(switchtime);
481229429Sjhb	td->td_runtime += runtime;
482229429Sjhb	td->td_incruntime += runtime;
483229429Sjhb	PCPU_SET(switchtime, new_switchtime);
484229429Sjhb	PCPU_SET(switchticks, ticks);
485229429Sjhb	PCPU_INC(cnt.v_swtch);
486229429Sjhb
487229429Sjhb	/* Save our resource usage in our process. */
488229429Sjhb	td->td_ru.ru_nvcsw++;
489208488Skib	ruxagg(p, td);
490229429Sjhb	rucollect(&p->p_ru, &td->td_ru);
491275121Skib	PROC_STATUNLOCK(p);
492229429Sjhb
493133396Sjulian	td->td_state = TDS_INACTIVE;
494181695Sattilio#ifdef WITNESS
495181695Sattilio	witness_thread_exit(td);
496181695Sattilio#endif
497133396Sjulian	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
498170296Sjeff	sched_throw(td);
499112993Speter	panic("I'm a teapot!");
50099026Sjulian	/* NOTREACHED */
50199026Sjulian}
50299026Sjulian
503124350Sschweikh/*
504107719Sjulian * Do any thread specific cleanups that may be needed in wait()
505126932Speter * called with Giant, proc and schedlock not held.
506107719Sjulian */
507107719Sjulianvoid
508107719Sjulianthread_wait(struct proc *p)
509107719Sjulian{
510107719Sjulian	struct thread *td;
511107719Sjulian
512126932Speter	mtx_assert(&Giant, MA_NOTOWNED);
513271008Skib	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
514271008Skib	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
515170598Sjeff	td = FIRST_THREAD_IN_PROC(p);
516170598Sjeff	/* Lock the last thread so we spin until it exits cpu_throw(). */
517170598Sjeff	thread_lock(td);
518170598Sjeff	thread_unlock(td);
519189845Sjeff	lock_profile_thread_exit(td);
520176730Sjeff	cpuset_rel(td->td_cpuset);
521176730Sjeff	td->td_cpuset = NULL;
522170598Sjeff	cpu_thread_clean(td);
523170598Sjeff	crfree(td->td_ucred);
524107719Sjulian	thread_reap();	/* check for zombie threads etc. */
525107719Sjulian}
526107719Sjulian
52799026Sjulian/*
52899026Sjulian * Link a thread to a process.
529103002Sjulian * set up anything that needs to be initialized for it to
530103002Sjulian * be used by the process.
53199026Sjulian */
53299026Sjulianvoid
533163709Sjbthread_link(struct thread *td, struct proc *p)
53499026Sjulian{
53599026Sjulian
536170296Sjeff	/*
537170296Sjeff	 * XXX This can't be enabled because it's called for proc0 before
538177368Sjeff	 * its lock has been created.
539177368Sjeff	 * PROC_LOCK_ASSERT(p, MA_OWNED);
540170296Sjeff	 */
541111028Sjeff	td->td_state    = TDS_INACTIVE;
542111028Sjeff	td->td_proc     = p;
543172207Sjeff	td->td_flags    = TDF_INMEM;
54499026Sjulian
545103002Sjulian	LIST_INIT(&td->td_contested);
546174629Sjeff	LIST_INIT(&td->td_lprof[0]);
547174629Sjeff	LIST_INIT(&td->td_lprof[1]);
548151316Sdavidxu	sigqueue_init(&td->td_sigqueue, p);
549283291Sjkim	callout_init(&td->td_slpcallout, 1);
550269095Sdeischen	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
55199026Sjulian	p->p_numthreads++;
55299026Sjulian}
55399026Sjulian
554134791Sjulian/*
555136160Sjulian * Called from:
556134791Sjulian *  thread_exit()
557134791Sjulian */
558113641Sjulianvoid
559113641Sjulianthread_unlink(struct thread *td)
560124350Sschweikh{
561113641Sjulian	struct proc *p = td->td_proc;
562113920Sjhb
563177368Sjeff	PROC_LOCK_ASSERT(p, MA_OWNED);
564113641Sjulian	TAILQ_REMOVE(&p->p_threads, td, td_plist);
565113641Sjulian	p->p_numthreads--;
566113641Sjulian	/* could clear a few other things here */
567163709Sjb	/* Must  NOT clear links to proc! */
568124350Sschweikh}
569113641Sjulian
570195701Skibstatic int
571195701Skibcalc_remaining(struct proc *p, int mode)
572195701Skib{
573195701Skib	int remaining;
574195701Skib
575227657Skib	PROC_LOCK_ASSERT(p, MA_OWNED);
576227657Skib	PROC_SLOCK_ASSERT(p, MA_OWNED);
577195701Skib	if (mode == SINGLE_EXIT)
578195701Skib		remaining = p->p_numthreads;
579195701Skib	else if (mode == SINGLE_BOUNDARY)
580195701Skib		remaining = p->p_numthreads - p->p_boundary_count;
581275745Skib	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
582195701Skib		remaining = p->p_numthreads - p->p_suspcount;
583195701Skib	else
584195701Skib		panic("calc_remaining: wrong mode %d", mode);
585195701Skib	return (remaining);
586195701Skib}
587195701Skib
588275617Skibstatic int
589275617Skibremain_for_mode(int mode)
590275617Skib{
591275617Skib
592275745Skib	return (mode == SINGLE_ALLPROC ? 0 : 1);
593275617Skib}
594275617Skib
595275617Skibstatic int
596275617Skibweed_inhib(int mode, struct thread *td2, struct proc *p)
597275617Skib{
598275617Skib	int wakeup_swapper;
599275617Skib
600275617Skib	PROC_LOCK_ASSERT(p, MA_OWNED);
601275617Skib	PROC_SLOCK_ASSERT(p, MA_OWNED);
602275617Skib	THREAD_LOCK_ASSERT(td2, MA_OWNED);
603275617Skib
604275617Skib	wakeup_swapper = 0;
605275617Skib	switch (mode) {
606275617Skib	case SINGLE_EXIT:
607275617Skib		if (TD_IS_SUSPENDED(td2))
608282944Skib			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
609275617Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
610275617Skib			wakeup_swapper |= sleepq_abort(td2, EINTR);
611275617Skib		break;
612275617Skib	case SINGLE_BOUNDARY:
613275617Skib		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
614282944Skib			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
615275617Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
616275617Skib			wakeup_swapper |= sleepq_abort(td2, ERESTART);
617275617Skib		break;
618275617Skib	case SINGLE_NO_EXIT:
619275617Skib		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
620282944Skib			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
621275617Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
622275617Skib			wakeup_swapper |= sleepq_abort(td2, ERESTART);
623275820Skib		break;
624275745Skib	case SINGLE_ALLPROC:
625275745Skib		/*
626275745Skib		 * ALLPROC suspend tries to avoid spurious EINTR for
627275745Skib		 * threads sleeping interruptable, by suspending the
628275745Skib		 * thread directly, similarly to sig_suspend_threads().
629275745Skib		 * Since such sleep is not performed at the user
630275745Skib		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
631275745Skib		 * is used to avoid immediate un-suspend.
632275745Skib		 */
633275745Skib		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
634275745Skib		    TDF_ALLPROCSUSP)) == 0)
635282944Skib			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
636275745Skib		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
637275745Skib			if ((td2->td_flags & TDF_SBDRY) == 0) {
638275745Skib				thread_suspend_one(td2);
639275745Skib				td2->td_flags |= TDF_ALLPROCSUSP;
640275745Skib			} else {
641275745Skib				wakeup_swapper |= sleepq_abort(td2, ERESTART);
642275745Skib			}
643275745Skib		}
644275617Skib		break;
645275617Skib	}
646275617Skib	return (wakeup_swapper);
647275617Skib}
648275617Skib
649111028Sjeff/*
65099026Sjulian * Enforce single-threading.
65199026Sjulian *
65299026Sjulian * Returns 1 if the caller must abort (another thread is waiting to
65399026Sjulian * exit the process or similar). Process is locked!
65499026Sjulian * Returns 0 when you are successfully the only thread running.
65599026Sjulian * A process has successfully single threaded in the suspend mode when
65699026Sjulian * There are no threads in user mode. Threads in the kernel must be
65799026Sjulian * allowed to continue until they get to the user boundary. They may even
65899026Sjulian * copy out their return values and data before suspending. They may however be
659160048Smaxim * accelerated in reaching the user boundary as we will wake up
66099026Sjulian * any sleeping threads that are interruptable. (PCATCH).
66199026Sjulian */
66299026Sjulianint
663275745Skibthread_single(struct proc *p, int mode)
66499026Sjulian{
66599026Sjulian	struct thread *td;
66699026Sjulian	struct thread *td2;
667181334Sjhb	int remaining, wakeup_swapper;
66899026Sjulian
66999026Sjulian	td = curthread;
670275745Skib	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
671275745Skib	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
672275745Skib	    ("invalid mode %d", mode));
673275745Skib	/*
674275745Skib	 * If allowing non-ALLPROC singlethreading for non-curproc
675275745Skib	 * callers, calc_remaining() and remain_for_mode() should be
676275745Skib	 * adjusted to also account for td->td_proc != p.  For now
677275745Skib	 * this is not implemented because it is not used.
678275745Skib	 */
679275745Skib	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
680275745Skib	    (mode != SINGLE_ALLPROC && td->td_proc == p),
681275745Skib	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
682126932Speter	mtx_assert(&Giant, MA_NOTOWNED);
68399026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
68499026Sjulian
685275745Skib	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
68699026Sjulian		return (0);
68799026Sjulian
688100648Sjulian	/* Is someone already single threading? */
689136177Sdavidxu	if (p->p_singlethread != NULL && p->p_singlethread != td)
69099026Sjulian		return (1);
69199026Sjulian
692136177Sdavidxu	if (mode == SINGLE_EXIT) {
693136177Sdavidxu		p->p_flag |= P_SINGLE_EXIT;
694136177Sdavidxu		p->p_flag &= ~P_SINGLE_BOUNDARY;
695136177Sdavidxu	} else {
696136177Sdavidxu		p->p_flag &= ~P_SINGLE_EXIT;
697136177Sdavidxu		if (mode == SINGLE_BOUNDARY)
698136177Sdavidxu			p->p_flag |= P_SINGLE_BOUNDARY;
699136177Sdavidxu		else
700136177Sdavidxu			p->p_flag &= ~P_SINGLE_BOUNDARY;
701136177Sdavidxu	}
702275745Skib	if (mode == SINGLE_ALLPROC)
703275745Skib		p->p_flag |= P_TOTAL_STOP;
704102950Sdavidxu	p->p_flag |= P_STOPPED_SINGLE;
705184667Sdavidxu	PROC_SLOCK(p);
70699026Sjulian	p->p_singlethread = td;
707195701Skib	remaining = calc_remaining(p, mode);
708275617Skib	while (remaining != remain_for_mode(mode)) {
709156942Sdavidxu		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
710156942Sdavidxu			goto stopme;
711181334Sjhb		wakeup_swapper = 0;
71299026Sjulian		FOREACH_THREAD_IN_PROC(p, td2) {
71399026Sjulian			if (td2 == td)
71499026Sjulian				continue;
715170296Sjeff			thread_lock(td2);
716177471Sjeff			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
717275745Skib			if (TD_IS_INHIBITED(td2)) {
718275617Skib				wakeup_swapper |= weed_inhib(mode, td2, p);
719155594Sdavidxu#ifdef SMP
720275745Skib			} else if (TD_IS_RUNNING(td2) && td != td2) {
721155594Sdavidxu				forward_signal(td2);
722275745Skib#endif
723155594Sdavidxu			}
724170296Sjeff			thread_unlock(td2);
72599026Sjulian		}
726181334Sjhb		if (wakeup_swapper)
727181334Sjhb			kick_proc0();
728195701Skib		remaining = calc_remaining(p, mode);
729130674Sdavidxu
730124350Sschweikh		/*
731124350Sschweikh		 * Maybe we suspended some threads.. was it enough?
732105911Sjulian		 */
733275617Skib		if (remaining == remain_for_mode(mode))
734105911Sjulian			break;
735105911Sjulian
736156942Sdavidxustopme:
73799026Sjulian		/*
73899026Sjulian		 * Wake us up when everyone else has suspended.
739100648Sjulian		 * In the mean time we suspend as well.
74099026Sjulian		 */
741275745Skib		thread_suspend_switch(td, p);
742195701Skib		remaining = calc_remaining(p, mode);
74399026Sjulian	}
744136177Sdavidxu	if (mode == SINGLE_EXIT) {
745135269Sjulian		/*
746271007Skib		 * Convert the process to an unthreaded process.  The
747271007Skib		 * SINGLE_EXIT is called by exit1() or execve(), in
748271007Skib		 * both cases other threads must be retired.
749135269Sjulian		 */
750271007Skib		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
751136160Sjulian		p->p_singlethread = NULL;
752271007Skib		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
753271000Skib
754271000Skib		/*
755271000Skib		 * Wait for any remaining threads to exit cpu_throw().
756271000Skib		 */
757271000Skib		while (p->p_exitthreads != 0) {
758271000Skib			PROC_SUNLOCK(p);
759271000Skib			PROC_UNLOCK(p);
760271000Skib			sched_relinquish(td);
761271000Skib			PROC_LOCK(p);
762271000Skib			PROC_SLOCK(p);
763271000Skib		}
764282679Skib	} else if (mode == SINGLE_BOUNDARY) {
765282679Skib		/*
766282679Skib		 * Wait until all suspended threads are removed from
767282679Skib		 * the processors.  The thread_suspend_check()
768282679Skib		 * increments p_boundary_count while it is still
769282679Skib		 * running, which makes it possible for the execve()
770282679Skib		 * to destroy vmspace while our other threads are
771282679Skib		 * still using the address space.
772282679Skib		 *
773282679Skib		 * We lock the thread, which is only allowed to
774282679Skib		 * succeed after context switch code finished using
775282679Skib		 * the address space.
776282679Skib		 */
777282679Skib		FOREACH_THREAD_IN_PROC(p, td2) {
778282679Skib			if (td2 == td)
779282679Skib				continue;
780282679Skib			thread_lock(td2);
781282679Skib			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
782282679Skib			    ("td %p not on boundary", td2));
783282679Skib			KASSERT(TD_IS_SUSPENDED(td2),
784282679Skib			    ("td %p is not suspended", td2));
785282679Skib			thread_unlock(td2);
786282679Skib		}
787111028Sjeff	}
788184667Sdavidxu	PROC_SUNLOCK(p);
78999026Sjulian	return (0);
79099026Sjulian}
79199026Sjulian
792275616Skibbool
793275616Skibthread_suspend_check_needed(void)
794275616Skib{
795275616Skib	struct proc *p;
796275616Skib	struct thread *td;
797275616Skib
798275616Skib	td = curthread;
799275616Skib	p = td->td_proc;
800275616Skib	PROC_LOCK_ASSERT(p, MA_OWNED);
801275616Skib	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
802275616Skib	    (td->td_dbgflags & TDB_SUSPEND) != 0));
803275616Skib}
804275616Skib
80599026Sjulian/*
80699026Sjulian * Called in from locations that can safely check to see
80799026Sjulian * whether we have to suspend or at least throttle for a
80899026Sjulian * single-thread event (e.g. fork).
80999026Sjulian *
81099026Sjulian * Such locations include userret().
81199026Sjulian * If the "return_instead" argument is non zero, the thread must be able to
81299026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result.
81399026Sjulian *
81499026Sjulian * The 'return_instead' argument tells the function if it may do a
81599026Sjulian * thread_exit() or suspend, or whether the caller must abort and back
81699026Sjulian * out instead.
81799026Sjulian *
81899026Sjulian * If the thread that set the single_threading request has set the
81999026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return
82099026Sjulian * if 'return_instead' is false, but will exit.
82199026Sjulian *
82299026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
82399026Sjulian *---------------+--------------------+---------------------
82499026Sjulian *       0       | returns 0          |   returns 0 or 1
825246996Sjhb *               | when ST ends       |   immediately
82699026Sjulian *---------------+--------------------+---------------------
82799026Sjulian *       1       | thread exits       |   returns 1
828246996Sjhb *               |                    |  immediately
82999026Sjulian * 0 = thread_exit() or suspension ok,
83099026Sjulian * other = return error instead of stopping the thread.
83199026Sjulian *
83299026Sjulian * While a full suspension is under effect, even a single threading
83399026Sjulian * thread would be suspended if it made this call (but it shouldn't).
83499026Sjulian * This call should only be made from places where
835124350Sschweikh * thread_exit() would be safe as that may be the outcome unless
83699026Sjulian * return_instead is set.
83799026Sjulian */
83899026Sjulianint
83999026Sjulianthread_suspend_check(int return_instead)
84099026Sjulian{
841104502Sjmallett	struct thread *td;
842104502Sjmallett	struct proc *p;
843182011Sjhb	int wakeup_swapper;
84499026Sjulian
84599026Sjulian	td = curthread;
84699026Sjulian	p = td->td_proc;
847126932Speter	mtx_assert(&Giant, MA_NOTOWNED);
84899026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
849275616Skib	while (thread_suspend_check_needed()) {
850102950Sdavidxu		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
85199026Sjulian			KASSERT(p->p_singlethread != NULL,
85299026Sjulian			    ("singlethread not set"));
85399026Sjulian			/*
854100648Sjulian			 * The only suspension in action is a
855100648Sjulian			 * single-threading. Single threader need not stop.
856124350Sschweikh			 * XXX Should be safe to access unlocked
857100646Sjulian			 * as it can only be set to be true by us.
85899026Sjulian			 */
859100648Sjulian			if (p->p_singlethread == td)
86099026Sjulian				return (0);	/* Exempt from stopping. */
861124350Sschweikh		}
862134498Sdavidxu		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
863155741Sdavidxu			return (EINTR);
86499026Sjulian
865136177Sdavidxu		/* Should we goto user boundary if we didn't come from there? */
866136177Sdavidxu		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
867136177Sdavidxu		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
868155741Sdavidxu			return (ERESTART);
869136177Sdavidxu
87099026Sjulian		/*
871283320Skib		 * Ignore suspend requests if they are deferred.
872248584Sjhb		 */
873283320Skib		if ((td->td_flags & TDF_SBDRY) != 0) {
874248584Sjhb			KASSERT(return_instead,
875248584Sjhb			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
876248584Sjhb			return (0);
877248584Sjhb		}
878248584Sjhb
879248584Sjhb		/*
88099026Sjulian		 * If the process is waiting for us to exit,
88199026Sjulian		 * this thread should just suicide.
882102950Sdavidxu		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
88399026Sjulian		 */
884213642Sdavidxu		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
885213642Sdavidxu			PROC_UNLOCK(p);
886213642Sdavidxu			tidhash_remove(td);
887213642Sdavidxu			PROC_LOCK(p);
888213950Sdavidxu			tdsigcleanup(td);
889279390Skib			umtx_thread_exit(td);
890213642Sdavidxu			PROC_SLOCK(p);
891213950Sdavidxu			thread_stopped(p);
892134791Sjulian			thread_exit();
893213642Sdavidxu		}
894213950Sdavidxu
895213950Sdavidxu		PROC_SLOCK(p);
896213950Sdavidxu		thread_stopped(p);
897170296Sjeff		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
898170296Sjeff			if (p->p_numthreads == p->p_suspcount + 1) {
899170296Sjeff				thread_lock(p->p_singlethread);
900282944Skib				wakeup_swapper = thread_unsuspend_one(
901282944Skib				    p->p_singlethread, p, false);
902170296Sjeff				thread_unlock(p->p_singlethread);
903182011Sjhb				if (wakeup_swapper)
904182011Sjhb					kick_proc0();
905170296Sjeff			}
906170296Sjeff		}
907184667Sdavidxu		PROC_UNLOCK(p);
908184199Sdavidxu		thread_lock(td);
90999026Sjulian		/*
91099026Sjulian		 * When a thread suspends, it just
911164936Sjulian		 * gets taken off all queues.
91299026Sjulian		 */
913103216Sjulian		thread_suspend_one(td);
914136177Sdavidxu		if (return_instead == 0) {
915136177Sdavidxu			p->p_boundary_count++;
916136177Sdavidxu			td->td_flags |= TDF_BOUNDARY;
917136177Sdavidxu		}
918184667Sdavidxu		PROC_SUNLOCK(p);
919178272Sjeff		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
920170296Sjeff		thread_unlock(td);
92199026Sjulian		PROC_LOCK(p);
92299026Sjulian	}
92399026Sjulian	return (0);
92499026Sjulian}
92599026Sjulian
926102898Sdavidxuvoid
927275745Skibthread_suspend_switch(struct thread *td, struct proc *p)
928170296Sjeff{
929170296Sjeff
930170296Sjeff	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
931170296Sjeff	PROC_LOCK_ASSERT(p, MA_OWNED);
932184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
933170296Sjeff	/*
934170296Sjeff	 * We implement thread_suspend_one in stages here to avoid
935170296Sjeff	 * dropping the proc lock while the thread lock is owned.
936170296Sjeff	 */
937275745Skib	if (p == td->td_proc) {
938275745Skib		thread_stopped(p);
939275745Skib		p->p_suspcount++;
940275745Skib	}
941184667Sdavidxu	PROC_UNLOCK(p);
942184199Sdavidxu	thread_lock(td);
943177471Sjeff	td->td_flags &= ~TDF_NEEDSUSPCHK;
944170296Sjeff	TD_SET_SUSPENDED(td);
945177085Sjeff	sched_sleep(td, 0);
946184667Sdavidxu	PROC_SUNLOCK(p);
947170296Sjeff	DROP_GIANT();
948178272Sjeff	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
949170296Sjeff	thread_unlock(td);
950170296Sjeff	PICKUP_GIANT();
951170296Sjeff	PROC_LOCK(p);
952184667Sdavidxu	PROC_SLOCK(p);
953170296Sjeff}
954170296Sjeff
955170296Sjeffvoid
956102898Sdavidxuthread_suspend_one(struct thread *td)
957102898Sdavidxu{
958275745Skib	struct proc *p;
959102898Sdavidxu
960275745Skib	p = td->td_proc;
961184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
962170296Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
963112071Sdavidxu	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
964102898Sdavidxu	p->p_suspcount++;
965177471Sjeff	td->td_flags &= ~TDF_NEEDSUSPCHK;
966103216Sjulian	TD_SET_SUSPENDED(td);
967177085Sjeff	sched_sleep(td, 0);
968102898Sdavidxu}
969102898Sdavidxu
970282944Skibstatic int
971282944Skibthread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
972102898Sdavidxu{
973102898Sdavidxu
974170296Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
975164936Sjulian	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
976103216Sjulian	TD_CLR_SUSPENDED(td);
977275745Skib	td->td_flags &= ~TDF_ALLPROCSUSP;
978275745Skib	if (td->td_proc == p) {
979275745Skib		PROC_SLOCK_ASSERT(p, MA_OWNED);
980275745Skib		p->p_suspcount--;
981282944Skib		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
982282944Skib			td->td_flags &= ~TDF_BOUNDARY;
983282944Skib			p->p_boundary_count--;
984282944Skib		}
985275745Skib	}
986182011Sjhb	return (setrunnable(td));
987102898Sdavidxu}
988102898Sdavidxu
98999026Sjulian/*
99099026Sjulian * Allow all threads blocked by single threading to continue running.
99199026Sjulian */
99299026Sjulianvoid
99399026Sjulianthread_unsuspend(struct proc *p)
99499026Sjulian{
99599026Sjulian	struct thread *td;
996182011Sjhb	int wakeup_swapper;
99799026Sjulian
99899026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
999184667Sdavidxu	PROC_SLOCK_ASSERT(p, MA_OWNED);
1000182011Sjhb	wakeup_swapper = 0;
100199026Sjulian	if (!P_SHOULDSTOP(p)) {
1002164936Sjulian                FOREACH_THREAD_IN_PROC(p, td) {
1003170296Sjeff			thread_lock(td);
1004164936Sjulian			if (TD_IS_SUSPENDED(td)) {
1005282944Skib				wakeup_swapper |= thread_unsuspend_one(td, p,
1006282944Skib				    true);
1007164936Sjulian			}
1008170296Sjeff			thread_unlock(td);
100999026Sjulian		}
1010282944Skib	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1011282944Skib	    p->p_numthreads == p->p_suspcount) {
101299026Sjulian		/*
101399026Sjulian		 * Stopping everything also did the job for the single
101499026Sjulian		 * threading request. Now we've downgraded to single-threaded,
101599026Sjulian		 * let it continue.
101699026Sjulian		 */
1017275745Skib		if (p->p_singlethread->td_proc == p) {
1018275745Skib			thread_lock(p->p_singlethread);
1019275745Skib			wakeup_swapper = thread_unsuspend_one(
1020282944Skib			    p->p_singlethread, p, false);
1021275745Skib			thread_unlock(p->p_singlethread);
1022275745Skib		}
102399026Sjulian	}
1024182011Sjhb	if (wakeup_swapper)
1025182011Sjhb		kick_proc0();
102699026Sjulian}
102799026Sjulian
1028134791Sjulian/*
1029134791Sjulian * End the single threading mode..
1030134791Sjulian */
103199026Sjulianvoid
1032275745Skibthread_single_end(struct proc *p, int mode)
103399026Sjulian{
103499026Sjulian	struct thread *td;
1035182011Sjhb	int wakeup_swapper;
103699026Sjulian
1037275745Skib	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1038275745Skib	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1039275745Skib	    ("invalid mode %d", mode));
104099026Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
1041275745Skib	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1042275745Skib	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1043275745Skib	    ("mode %d does not match P_TOTAL_STOP", mode));
1044282944Skib	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1045282944Skib	    ("thread_single_end from other thread %p %p",
1046282944Skib	    curthread, p->p_singlethread));
1047282944Skib	KASSERT(mode != SINGLE_BOUNDARY ||
1048282944Skib	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1049282944Skib	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1050275745Skib	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1051275745Skib	    P_TOTAL_STOP);
1052184667Sdavidxu	PROC_SLOCK(p);
105399026Sjulian	p->p_singlethread = NULL;
1054182011Sjhb	wakeup_swapper = 0;
1055102292Sjulian	/*
1056182011Sjhb	 * If there are other threads they may now run,
1057102292Sjulian	 * unless of course there is a blanket 'stop order'
1058102292Sjulian	 * on the process. The single threader must be allowed
1059102292Sjulian	 * to continue however as this is a bad place to stop.
1060102292Sjulian	 */
1061275745Skib	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1062164936Sjulian                FOREACH_THREAD_IN_PROC(p, td) {
1063170296Sjeff			thread_lock(td);
1064164936Sjulian			if (TD_IS_SUSPENDED(td)) {
1065282944Skib				wakeup_swapper |= thread_unsuspend_one(td, p,
1066282944Skib				    mode == SINGLE_BOUNDARY);
1067164936Sjulian			}
1068170296Sjeff			thread_unlock(td);
1069102292Sjulian		}
1070102292Sjulian	}
1071282944Skib	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1072282944Skib	    ("inconsistent boundary count %d", p->p_boundary_count));
1073184667Sdavidxu	PROC_SUNLOCK(p);
1074182011Sjhb	if (wakeup_swapper)
1075182011Sjhb		kick_proc0();
107699026Sjulian}
1077128721Sdeischen
1078151990Sdavidxustruct thread *
1079151990Sdavidxuthread_find(struct proc *p, lwpid_t tid)
1080151990Sdavidxu{
1081151990Sdavidxu	struct thread *td;
1082151990Sdavidxu
1083151990Sdavidxu	PROC_LOCK_ASSERT(p, MA_OWNED);
1084151990Sdavidxu	FOREACH_THREAD_IN_PROC(p, td) {
1085151990Sdavidxu		if (td->td_tid == tid)
1086151990Sdavidxu			break;
1087151990Sdavidxu	}
1088151990Sdavidxu	return (td);
1089151990Sdavidxu}
1090213642Sdavidxu
1091213642Sdavidxu/* Locate a thread by number; return with proc lock held. */
1092213642Sdavidxustruct thread *
1093213642Sdavidxutdfind(lwpid_t tid, pid_t pid)
1094213642Sdavidxu{
1095213642Sdavidxu#define RUN_THRESH	16
1096213642Sdavidxu	struct thread *td;
1097213642Sdavidxu	int run = 0;
1098213642Sdavidxu
1099213642Sdavidxu	rw_rlock(&tidhash_lock);
1100213642Sdavidxu	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1101213642Sdavidxu		if (td->td_tid == tid) {
1102213642Sdavidxu			if (pid != -1 && td->td_proc->p_pid != pid) {
1103213642Sdavidxu				td = NULL;
1104213642Sdavidxu				break;
1105213642Sdavidxu			}
1106219968Sjhb			PROC_LOCK(td->td_proc);
1107213642Sdavidxu			if (td->td_proc->p_state == PRS_NEW) {
1108219968Sjhb				PROC_UNLOCK(td->td_proc);
1109213642Sdavidxu				td = NULL;
1110213642Sdavidxu				break;
1111213642Sdavidxu			}
1112213642Sdavidxu			if (run > RUN_THRESH) {
1113213642Sdavidxu				if (rw_try_upgrade(&tidhash_lock)) {
1114213642Sdavidxu					LIST_REMOVE(td, td_hash);
1115213642Sdavidxu					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1116213642Sdavidxu						td, td_hash);
1117213642Sdavidxu					rw_wunlock(&tidhash_lock);
1118213642Sdavidxu					return (td);
1119213642Sdavidxu				}
1120213642Sdavidxu			}
1121213642Sdavidxu			break;
1122213642Sdavidxu		}
1123213642Sdavidxu		run++;
1124213642Sdavidxu	}
1125213642Sdavidxu	rw_runlock(&tidhash_lock);
1126213642Sdavidxu	return (td);
1127213642Sdavidxu}
1128213642Sdavidxu
1129213642Sdavidxuvoid
1130213642Sdavidxutidhash_add(struct thread *td)
1131213642Sdavidxu{
1132213642Sdavidxu	rw_wlock(&tidhash_lock);
1133213950Sdavidxu	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1134213642Sdavidxu	rw_wunlock(&tidhash_lock);
1135213642Sdavidxu}
1136213642Sdavidxu
1137213642Sdavidxuvoid
1138213642Sdavidxutidhash_remove(struct thread *td)
1139213642Sdavidxu{
1140213642Sdavidxu	rw_wlock(&tidhash_lock);
1141213950Sdavidxu	LIST_REMOVE(td, td_hash);
1142213642Sdavidxu	rw_wunlock(&tidhash_lock);
1143213642Sdavidxu}
1144