kern_thread.c revision 318743
1/*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 *  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_witness.h"
30#include "opt_hwpmc_hooks.h"
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/kern/kern_thread.c 318743 2017-05-23 12:40:50Z badger $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/rangelock.h>
42#include <sys/resourcevar.h>
43#include <sys/sdt.h>
44#include <sys/smp.h>
45#include <sys/sched.h>
46#include <sys/sleepqueue.h>
47#include <sys/selinfo.h>
48#include <sys/syscallsubr.h>
49#include <sys/sysent.h>
50#include <sys/turnstile.h>
51#include <sys/ktr.h>
52#include <sys/rwlock.h>
53#include <sys/umtx.h>
54#include <sys/cpuset.h>
55#ifdef	HWPMC_HOOKS
56#include <sys/pmckern.h>
57#endif
58
59#include <security/audit/audit.h>
60
61#include <vm/vm.h>
62#include <vm/vm_extern.h>
63#include <vm/uma.h>
64#include <vm/vm_domain.h>
65#include <sys/eventhandler.h>
66
67/*
68 * Asserts below verify the stability of struct thread and struct proc
69 * layout, as exposed by KBI to modules.  On head, the KBI is allowed
70 * to drift, change to the structures must be accompanied by the
71 * assert update.
72 *
73 * On the stable branches after KBI freeze, conditions must not be
74 * violated.  Typically new fields are moved to the end of the
75 * structures.
76 */
77#ifdef __amd64__
78_Static_assert(offsetof(struct thread, td_flags) == 0xe4,
79    "struct thread KBI td_flags");
80_Static_assert(offsetof(struct thread, td_pflags) == 0xec,
81    "struct thread KBI td_pflags");
82_Static_assert(offsetof(struct thread, td_frame) == 0x418,
83    "struct thread KBI td_frame");
84_Static_assert(offsetof(struct thread, td_emuldata) == 0x4c0,
85    "struct thread KBI td_emuldata");
86_Static_assert(offsetof(struct proc, p_flag) == 0xb0,
87    "struct proc KBI p_flag");
88_Static_assert(offsetof(struct proc, p_pid) == 0xbc,
89    "struct proc KBI p_pid");
90_Static_assert(offsetof(struct proc, p_filemon) == 0x3c0,
91    "struct proc KBI p_filemon");
92_Static_assert(offsetof(struct proc, p_comm) == 0x3d0,
93    "struct proc KBI p_comm");
94_Static_assert(offsetof(struct proc, p_emuldata) == 0x4a0,
95    "struct proc KBI p_emuldata");
96#endif
97#ifdef __i386__
98_Static_assert(offsetof(struct thread, td_flags) == 0x8c,
99    "struct thread KBI td_flags");
100_Static_assert(offsetof(struct thread, td_pflags) == 0x94,
101    "struct thread KBI td_pflags");
102_Static_assert(offsetof(struct thread, td_frame) == 0x2c0,
103    "struct thread KBI td_frame");
104_Static_assert(offsetof(struct thread, td_emuldata) == 0x30c,
105    "struct thread KBI td_emuldata");
106_Static_assert(offsetof(struct proc, p_flag) == 0x68,
107    "struct proc KBI p_flag");
108_Static_assert(offsetof(struct proc, p_pid) == 0x74,
109    "struct proc KBI p_pid");
110_Static_assert(offsetof(struct proc, p_filemon) == 0x268,
111    "struct proc KBI p_filemon");
112_Static_assert(offsetof(struct proc, p_comm) == 0x274,
113    "struct proc KBI p_comm");
114_Static_assert(offsetof(struct proc, p_emuldata) == 0x2f4,
115    "struct proc KBI p_emuldata");
116#endif
117
118SDT_PROVIDER_DECLARE(proc);
119SDT_PROBE_DEFINE(proc, , , lwp__exit);
120
121/*
122 * thread related storage.
123 */
124static uma_zone_t thread_zone;
125
126TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
127static struct mtx zombie_lock;
128MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
129
130static void thread_zombie(struct thread *);
131static int thread_unsuspend_one(struct thread *td, struct proc *p,
132    bool boundary);
133
134#define TID_BUFFER_SIZE	1024
135
136struct mtx tid_lock;
137static struct unrhdr *tid_unrhdr;
138static lwpid_t tid_buffer[TID_BUFFER_SIZE];
139static int tid_head, tid_tail;
140static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
141
142struct	tidhashhead *tidhashtbl;
143u_long	tidhash;
144struct	rwlock tidhash_lock;
145
146static lwpid_t
147tid_alloc(void)
148{
149	lwpid_t	tid;
150
151	tid = alloc_unr(tid_unrhdr);
152	if (tid != -1)
153		return (tid);
154	mtx_lock(&tid_lock);
155	if (tid_head == tid_tail) {
156		mtx_unlock(&tid_lock);
157		return (-1);
158	}
159	tid = tid_buffer[tid_head];
160	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
161	mtx_unlock(&tid_lock);
162	return (tid);
163}
164
165static void
166tid_free(lwpid_t tid)
167{
168	lwpid_t tmp_tid = -1;
169
170	mtx_lock(&tid_lock);
171	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
172		tmp_tid = tid_buffer[tid_head];
173		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
174	}
175	tid_buffer[tid_tail] = tid;
176	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
177	mtx_unlock(&tid_lock);
178	if (tmp_tid != -1)
179		free_unr(tid_unrhdr, tmp_tid);
180}
181
182/*
183 * Prepare a thread for use.
184 */
185static int
186thread_ctor(void *mem, int size, void *arg, int flags)
187{
188	struct thread	*td;
189
190	td = (struct thread *)mem;
191	td->td_state = TDS_INACTIVE;
192	td->td_oncpu = NOCPU;
193
194	td->td_tid = tid_alloc();
195
196	/*
197	 * Note that td_critnest begins life as 1 because the thread is not
198	 * running and is thereby implicitly waiting to be on the receiving
199	 * end of a context switch.
200	 */
201	td->td_critnest = 1;
202	td->td_lend_user_pri = PRI_MAX;
203	EVENTHANDLER_INVOKE(thread_ctor, td);
204#ifdef AUDIT
205	audit_thread_alloc(td);
206#endif
207	umtx_thread_alloc(td);
208	return (0);
209}
210
211/*
212 * Reclaim a thread after use.
213 */
214static void
215thread_dtor(void *mem, int size, void *arg)
216{
217	struct thread *td;
218
219	td = (struct thread *)mem;
220
221#ifdef INVARIANTS
222	/* Verify that this thread is in a safe state to free. */
223	switch (td->td_state) {
224	case TDS_INHIBITED:
225	case TDS_RUNNING:
226	case TDS_CAN_RUN:
227	case TDS_RUNQ:
228		/*
229		 * We must never unlink a thread that is in one of
230		 * these states, because it is currently active.
231		 */
232		panic("bad state for thread unlinking");
233		/* NOTREACHED */
234	case TDS_INACTIVE:
235		break;
236	default:
237		panic("bad thread state");
238		/* NOTREACHED */
239	}
240#endif
241#ifdef AUDIT
242	audit_thread_free(td);
243#endif
244	/* Free all OSD associated to this thread. */
245	osd_thread_exit(td);
246	td_softdep_cleanup(td);
247	MPASS(td->td_su == NULL);
248
249	EVENTHANDLER_INVOKE(thread_dtor, td);
250	tid_free(td->td_tid);
251}
252
253/*
254 * Initialize type-stable parts of a thread (when newly created).
255 */
256static int
257thread_init(void *mem, int size, int flags)
258{
259	struct thread *td;
260
261	td = (struct thread *)mem;
262
263	td->td_sleepqueue = sleepq_alloc();
264	td->td_turnstile = turnstile_alloc();
265	td->td_rlqe = NULL;
266	EVENTHANDLER_INVOKE(thread_init, td);
267	umtx_thread_init(td);
268	td->td_kstack = 0;
269	td->td_sel = NULL;
270	return (0);
271}
272
273/*
274 * Tear down type-stable parts of a thread (just before being discarded).
275 */
276static void
277thread_fini(void *mem, int size)
278{
279	struct thread *td;
280
281	td = (struct thread *)mem;
282	EVENTHANDLER_INVOKE(thread_fini, td);
283	rlqentry_free(td->td_rlqe);
284	turnstile_free(td->td_turnstile);
285	sleepq_free(td->td_sleepqueue);
286	umtx_thread_fini(td);
287	seltdfini(td);
288}
289
290/*
291 * For a newly created process,
292 * link up all the structures and its initial threads etc.
293 * called from:
294 * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
295 * proc_dtor() (should go away)
296 * proc_init()
297 */
298void
299proc_linkup0(struct proc *p, struct thread *td)
300{
301	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
302	proc_linkup(p, td);
303}
304
305void
306proc_linkup(struct proc *p, struct thread *td)
307{
308
309	sigqueue_init(&p->p_sigqueue, p);
310	p->p_ksi = ksiginfo_alloc(1);
311	if (p->p_ksi != NULL) {
312		/* XXX p_ksi may be null if ksiginfo zone is not ready */
313		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
314	}
315	LIST_INIT(&p->p_mqnotifier);
316	p->p_numthreads = 0;
317	thread_link(td, p);
318}
319
320/*
321 * Initialize global thread allocation resources.
322 */
323void
324threadinit(void)
325{
326
327	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
328
329	/*
330	 * pid_max cannot be greater than PID_MAX.
331	 * leave one number for thread0.
332	 */
333	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
334
335	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
336	    thread_ctor, thread_dtor, thread_init, thread_fini,
337	    32 - 1, UMA_ZONE_NOFREE);
338	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
339	rw_init(&tidhash_lock, "tidhash");
340}
341
342/*
343 * Place an unused thread on the zombie list.
344 * Use the slpq as that must be unused by now.
345 */
346void
347thread_zombie(struct thread *td)
348{
349	mtx_lock_spin(&zombie_lock);
350	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
351	mtx_unlock_spin(&zombie_lock);
352}
353
354/*
355 * Release a thread that has exited after cpu_throw().
356 */
357void
358thread_stash(struct thread *td)
359{
360	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
361	thread_zombie(td);
362}
363
364/*
365 * Reap zombie resources.
366 */
367void
368thread_reap(void)
369{
370	struct thread *td_first, *td_next;
371
372	/*
373	 * Don't even bother to lock if none at this instant,
374	 * we really don't care about the next instant.
375	 */
376	if (!TAILQ_EMPTY(&zombie_threads)) {
377		mtx_lock_spin(&zombie_lock);
378		td_first = TAILQ_FIRST(&zombie_threads);
379		if (td_first)
380			TAILQ_INIT(&zombie_threads);
381		mtx_unlock_spin(&zombie_lock);
382		while (td_first) {
383			td_next = TAILQ_NEXT(td_first, td_slpq);
384			thread_cow_free(td_first);
385			thread_free(td_first);
386			td_first = td_next;
387		}
388	}
389}
390
391/*
392 * Allocate a thread.
393 */
394struct thread *
395thread_alloc(int pages)
396{
397	struct thread *td;
398
399	thread_reap(); /* check if any zombies to get */
400
401	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
402	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
403	if (!vm_thread_new(td, pages)) {
404		uma_zfree(thread_zone, td);
405		return (NULL);
406	}
407	cpu_thread_alloc(td);
408	vm_domain_policy_init(&td->td_vm_dom_policy);
409	return (td);
410}
411
412int
413thread_alloc_stack(struct thread *td, int pages)
414{
415
416	KASSERT(td->td_kstack == 0,
417	    ("thread_alloc_stack called on a thread with kstack"));
418	if (!vm_thread_new(td, pages))
419		return (0);
420	cpu_thread_alloc(td);
421	return (1);
422}
423
424/*
425 * Deallocate a thread.
426 */
427void
428thread_free(struct thread *td)
429{
430
431	lock_profile_thread_exit(td);
432	if (td->td_cpuset)
433		cpuset_rel(td->td_cpuset);
434	td->td_cpuset = NULL;
435	cpu_thread_free(td);
436	if (td->td_kstack != 0)
437		vm_thread_dispose(td);
438	vm_domain_policy_cleanup(&td->td_vm_dom_policy);
439	callout_drain(&td->td_slpcallout);
440	uma_zfree(thread_zone, td);
441}
442
443void
444thread_cow_get_proc(struct thread *newtd, struct proc *p)
445{
446
447	PROC_LOCK_ASSERT(p, MA_OWNED);
448	newtd->td_ucred = crhold(p->p_ucred);
449	newtd->td_limit = lim_hold(p->p_limit);
450	newtd->td_cowgen = p->p_cowgen;
451}
452
453void
454thread_cow_get(struct thread *newtd, struct thread *td)
455{
456
457	newtd->td_ucred = crhold(td->td_ucred);
458	newtd->td_limit = lim_hold(td->td_limit);
459	newtd->td_cowgen = td->td_cowgen;
460}
461
462void
463thread_cow_free(struct thread *td)
464{
465
466	if (td->td_ucred != NULL)
467		crfree(td->td_ucred);
468	if (td->td_limit != NULL)
469		lim_free(td->td_limit);
470}
471
472void
473thread_cow_update(struct thread *td)
474{
475	struct proc *p;
476	struct ucred *oldcred;
477	struct plimit *oldlimit;
478
479	p = td->td_proc;
480	oldcred = NULL;
481	oldlimit = NULL;
482	PROC_LOCK(p);
483	if (td->td_ucred != p->p_ucred) {
484		oldcred = td->td_ucred;
485		td->td_ucred = crhold(p->p_ucred);
486	}
487	if (td->td_limit != p->p_limit) {
488		oldlimit = td->td_limit;
489		td->td_limit = lim_hold(p->p_limit);
490	}
491	td->td_cowgen = p->p_cowgen;
492	PROC_UNLOCK(p);
493	if (oldcred != NULL)
494		crfree(oldcred);
495	if (oldlimit != NULL)
496		lim_free(oldlimit);
497}
498
499/*
500 * Discard the current thread and exit from its context.
501 * Always called with scheduler locked.
502 *
503 * Because we can't free a thread while we're operating under its context,
504 * push the current thread into our CPU's deadthread holder. This means
505 * we needn't worry about someone else grabbing our context before we
506 * do a cpu_throw().
507 */
508void
509thread_exit(void)
510{
511	uint64_t runtime, new_switchtime;
512	struct thread *td;
513	struct thread *td2;
514	struct proc *p;
515	int wakeup_swapper;
516
517	td = curthread;
518	p = td->td_proc;
519
520	PROC_SLOCK_ASSERT(p, MA_OWNED);
521	mtx_assert(&Giant, MA_NOTOWNED);
522
523	PROC_LOCK_ASSERT(p, MA_OWNED);
524	KASSERT(p != NULL, ("thread exiting without a process"));
525	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
526	    (long)p->p_pid, td->td_name);
527	SDT_PROBE0(proc, , , lwp__exit);
528	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
529
530#ifdef AUDIT
531	AUDIT_SYSCALL_EXIT(0, td);
532#endif
533	/*
534	 * drop FPU & debug register state storage, or any other
535	 * architecture specific resources that
536	 * would not be on a new untouched process.
537	 */
538	cpu_thread_exit(td);
539
540	/*
541	 * The last thread is left attached to the process
542	 * So that the whole bundle gets recycled. Skip
543	 * all this stuff if we never had threads.
544	 * EXIT clears all sign of other threads when
545	 * it goes to single threading, so the last thread always
546	 * takes the short path.
547	 */
548	if (p->p_flag & P_HADTHREADS) {
549		if (p->p_numthreads > 1) {
550			atomic_add_int(&td->td_proc->p_exitthreads, 1);
551			thread_unlink(td);
552			td2 = FIRST_THREAD_IN_PROC(p);
553			sched_exit_thread(td2, td);
554
555			/*
556			 * The test below is NOT true if we are the
557			 * sole exiting thread. P_STOPPED_SINGLE is unset
558			 * in exit1() after it is the only survivor.
559			 */
560			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
561				if (p->p_numthreads == p->p_suspcount) {
562					thread_lock(p->p_singlethread);
563					wakeup_swapper = thread_unsuspend_one(
564						p->p_singlethread, p, false);
565					thread_unlock(p->p_singlethread);
566					if (wakeup_swapper)
567						kick_proc0();
568				}
569			}
570
571			PCPU_SET(deadthread, td);
572		} else {
573			/*
574			 * The last thread is exiting.. but not through exit()
575			 */
576			panic ("thread_exit: Last thread exiting on its own");
577		}
578	}
579#ifdef	HWPMC_HOOKS
580	/*
581	 * If this thread is part of a process that is being tracked by hwpmc(4),
582	 * inform the module of the thread's impending exit.
583	 */
584	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
585		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
586#endif
587	PROC_UNLOCK(p);
588	PROC_STATLOCK(p);
589	thread_lock(td);
590	PROC_SUNLOCK(p);
591
592	/* Do the same timestamp bookkeeping that mi_switch() would do. */
593	new_switchtime = cpu_ticks();
594	runtime = new_switchtime - PCPU_GET(switchtime);
595	td->td_runtime += runtime;
596	td->td_incruntime += runtime;
597	PCPU_SET(switchtime, new_switchtime);
598	PCPU_SET(switchticks, ticks);
599	PCPU_INC(cnt.v_swtch);
600
601	/* Save our resource usage in our process. */
602	td->td_ru.ru_nvcsw++;
603	ruxagg(p, td);
604	rucollect(&p->p_ru, &td->td_ru);
605	PROC_STATUNLOCK(p);
606
607	td->td_state = TDS_INACTIVE;
608#ifdef WITNESS
609	witness_thread_exit(td);
610#endif
611	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
612	sched_throw(td);
613	panic("I'm a teapot!");
614	/* NOTREACHED */
615}
616
617/*
618 * Do any thread specific cleanups that may be needed in wait()
619 * called with Giant, proc and schedlock not held.
620 */
621void
622thread_wait(struct proc *p)
623{
624	struct thread *td;
625
626	mtx_assert(&Giant, MA_NOTOWNED);
627	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
628	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
629	td = FIRST_THREAD_IN_PROC(p);
630	/* Lock the last thread so we spin until it exits cpu_throw(). */
631	thread_lock(td);
632	thread_unlock(td);
633	lock_profile_thread_exit(td);
634	cpuset_rel(td->td_cpuset);
635	td->td_cpuset = NULL;
636	cpu_thread_clean(td);
637	thread_cow_free(td);
638	callout_drain(&td->td_slpcallout);
639	thread_reap();	/* check for zombie threads etc. */
640}
641
642/*
643 * Link a thread to a process.
644 * set up anything that needs to be initialized for it to
645 * be used by the process.
646 */
647void
648thread_link(struct thread *td, struct proc *p)
649{
650
651	/*
652	 * XXX This can't be enabled because it's called for proc0 before
653	 * its lock has been created.
654	 * PROC_LOCK_ASSERT(p, MA_OWNED);
655	 */
656	td->td_state    = TDS_INACTIVE;
657	td->td_proc     = p;
658	td->td_flags    = TDF_INMEM;
659
660	LIST_INIT(&td->td_contested);
661	LIST_INIT(&td->td_lprof[0]);
662	LIST_INIT(&td->td_lprof[1]);
663	sigqueue_init(&td->td_sigqueue, p);
664	callout_init(&td->td_slpcallout, 1);
665	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
666	p->p_numthreads++;
667}
668
669/*
670 * Called from:
671 *  thread_exit()
672 */
673void
674thread_unlink(struct thread *td)
675{
676	struct proc *p = td->td_proc;
677
678	PROC_LOCK_ASSERT(p, MA_OWNED);
679	TAILQ_REMOVE(&p->p_threads, td, td_plist);
680	p->p_numthreads--;
681	/* could clear a few other things here */
682	/* Must  NOT clear links to proc! */
683}
684
685static int
686calc_remaining(struct proc *p, int mode)
687{
688	int remaining;
689
690	PROC_LOCK_ASSERT(p, MA_OWNED);
691	PROC_SLOCK_ASSERT(p, MA_OWNED);
692	if (mode == SINGLE_EXIT)
693		remaining = p->p_numthreads;
694	else if (mode == SINGLE_BOUNDARY)
695		remaining = p->p_numthreads - p->p_boundary_count;
696	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
697		remaining = p->p_numthreads - p->p_suspcount;
698	else
699		panic("calc_remaining: wrong mode %d", mode);
700	return (remaining);
701}
702
703static int
704remain_for_mode(int mode)
705{
706
707	return (mode == SINGLE_ALLPROC ? 0 : 1);
708}
709
710static int
711weed_inhib(int mode, struct thread *td2, struct proc *p)
712{
713	int wakeup_swapper;
714
715	PROC_LOCK_ASSERT(p, MA_OWNED);
716	PROC_SLOCK_ASSERT(p, MA_OWNED);
717	THREAD_LOCK_ASSERT(td2, MA_OWNED);
718
719	wakeup_swapper = 0;
720	switch (mode) {
721	case SINGLE_EXIT:
722		if (TD_IS_SUSPENDED(td2))
723			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
724		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
725			wakeup_swapper |= sleepq_abort(td2, EINTR);
726		break;
727	case SINGLE_BOUNDARY:
728	case SINGLE_NO_EXIT:
729		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
730			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
731		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
732			wakeup_swapper |= sleepq_abort(td2, ERESTART);
733		break;
734	case SINGLE_ALLPROC:
735		/*
736		 * ALLPROC suspend tries to avoid spurious EINTR for
737		 * threads sleeping interruptable, by suspending the
738		 * thread directly, similarly to sig_suspend_threads().
739		 * Since such sleep is not performed at the user
740		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
741		 * is used to avoid immediate un-suspend.
742		 */
743		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
744		    TDF_ALLPROCSUSP)) == 0)
745			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
746		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
747			if ((td2->td_flags & TDF_SBDRY) == 0) {
748				thread_suspend_one(td2);
749				td2->td_flags |= TDF_ALLPROCSUSP;
750			} else {
751				wakeup_swapper |= sleepq_abort(td2, ERESTART);
752			}
753		}
754		break;
755	}
756	return (wakeup_swapper);
757}
758
759/*
760 * Enforce single-threading.
761 *
762 * Returns 1 if the caller must abort (another thread is waiting to
763 * exit the process or similar). Process is locked!
764 * Returns 0 when you are successfully the only thread running.
765 * A process has successfully single threaded in the suspend mode when
766 * There are no threads in user mode. Threads in the kernel must be
767 * allowed to continue until they get to the user boundary. They may even
768 * copy out their return values and data before suspending. They may however be
769 * accelerated in reaching the user boundary as we will wake up
770 * any sleeping threads that are interruptable. (PCATCH).
771 */
772int
773thread_single(struct proc *p, int mode)
774{
775	struct thread *td;
776	struct thread *td2;
777	int remaining, wakeup_swapper;
778
779	td = curthread;
780	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
781	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
782	    ("invalid mode %d", mode));
783	/*
784	 * If allowing non-ALLPROC singlethreading for non-curproc
785	 * callers, calc_remaining() and remain_for_mode() should be
786	 * adjusted to also account for td->td_proc != p.  For now
787	 * this is not implemented because it is not used.
788	 */
789	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
790	    (mode != SINGLE_ALLPROC && td->td_proc == p),
791	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
792	mtx_assert(&Giant, MA_NOTOWNED);
793	PROC_LOCK_ASSERT(p, MA_OWNED);
794
795	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
796		return (0);
797
798	/* Is someone already single threading? */
799	if (p->p_singlethread != NULL && p->p_singlethread != td)
800		return (1);
801
802	if (mode == SINGLE_EXIT) {
803		p->p_flag |= P_SINGLE_EXIT;
804		p->p_flag &= ~P_SINGLE_BOUNDARY;
805	} else {
806		p->p_flag &= ~P_SINGLE_EXIT;
807		if (mode == SINGLE_BOUNDARY)
808			p->p_flag |= P_SINGLE_BOUNDARY;
809		else
810			p->p_flag &= ~P_SINGLE_BOUNDARY;
811	}
812	if (mode == SINGLE_ALLPROC)
813		p->p_flag |= P_TOTAL_STOP;
814	p->p_flag |= P_STOPPED_SINGLE;
815	PROC_SLOCK(p);
816	p->p_singlethread = td;
817	remaining = calc_remaining(p, mode);
818	while (remaining != remain_for_mode(mode)) {
819		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
820			goto stopme;
821		wakeup_swapper = 0;
822		FOREACH_THREAD_IN_PROC(p, td2) {
823			if (td2 == td)
824				continue;
825			thread_lock(td2);
826			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
827			if (TD_IS_INHIBITED(td2)) {
828				wakeup_swapper |= weed_inhib(mode, td2, p);
829#ifdef SMP
830			} else if (TD_IS_RUNNING(td2) && td != td2) {
831				forward_signal(td2);
832#endif
833			}
834			thread_unlock(td2);
835		}
836		if (wakeup_swapper)
837			kick_proc0();
838		remaining = calc_remaining(p, mode);
839
840		/*
841		 * Maybe we suspended some threads.. was it enough?
842		 */
843		if (remaining == remain_for_mode(mode))
844			break;
845
846stopme:
847		/*
848		 * Wake us up when everyone else has suspended.
849		 * In the mean time we suspend as well.
850		 */
851		thread_suspend_switch(td, p);
852		remaining = calc_remaining(p, mode);
853	}
854	if (mode == SINGLE_EXIT) {
855		/*
856		 * Convert the process to an unthreaded process.  The
857		 * SINGLE_EXIT is called by exit1() or execve(), in
858		 * both cases other threads must be retired.
859		 */
860		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
861		p->p_singlethread = NULL;
862		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
863
864		/*
865		 * Wait for any remaining threads to exit cpu_throw().
866		 */
867		while (p->p_exitthreads != 0) {
868			PROC_SUNLOCK(p);
869			PROC_UNLOCK(p);
870			sched_relinquish(td);
871			PROC_LOCK(p);
872			PROC_SLOCK(p);
873		}
874	} else if (mode == SINGLE_BOUNDARY) {
875		/*
876		 * Wait until all suspended threads are removed from
877		 * the processors.  The thread_suspend_check()
878		 * increments p_boundary_count while it is still
879		 * running, which makes it possible for the execve()
880		 * to destroy vmspace while our other threads are
881		 * still using the address space.
882		 *
883		 * We lock the thread, which is only allowed to
884		 * succeed after context switch code finished using
885		 * the address space.
886		 */
887		FOREACH_THREAD_IN_PROC(p, td2) {
888			if (td2 == td)
889				continue;
890			thread_lock(td2);
891			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
892			    ("td %p not on boundary", td2));
893			KASSERT(TD_IS_SUSPENDED(td2),
894			    ("td %p is not suspended", td2));
895			thread_unlock(td2);
896		}
897	}
898	PROC_SUNLOCK(p);
899	return (0);
900}
901
902bool
903thread_suspend_check_needed(void)
904{
905	struct proc *p;
906	struct thread *td;
907
908	td = curthread;
909	p = td->td_proc;
910	PROC_LOCK_ASSERT(p, MA_OWNED);
911	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
912	    (td->td_dbgflags & TDB_SUSPEND) != 0));
913}
914
915/*
916 * Called in from locations that can safely check to see
917 * whether we have to suspend or at least throttle for a
918 * single-thread event (e.g. fork).
919 *
920 * Such locations include userret().
921 * If the "return_instead" argument is non zero, the thread must be able to
922 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
923 *
924 * The 'return_instead' argument tells the function if it may do a
925 * thread_exit() or suspend, or whether the caller must abort and back
926 * out instead.
927 *
928 * If the thread that set the single_threading request has set the
929 * P_SINGLE_EXIT bit in the process flags then this call will never return
930 * if 'return_instead' is false, but will exit.
931 *
932 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
933 *---------------+--------------------+---------------------
934 *       0       | returns 0          |   returns 0 or 1
935 *               | when ST ends       |   immediately
936 *---------------+--------------------+---------------------
937 *       1       | thread exits       |   returns 1
938 *               |                    |  immediately
939 * 0 = thread_exit() or suspension ok,
940 * other = return error instead of stopping the thread.
941 *
942 * While a full suspension is under effect, even a single threading
943 * thread would be suspended if it made this call (but it shouldn't).
944 * This call should only be made from places where
945 * thread_exit() would be safe as that may be the outcome unless
946 * return_instead is set.
947 */
948int
949thread_suspend_check(int return_instead)
950{
951	struct thread *td;
952	struct proc *p;
953	int wakeup_swapper;
954
955	td = curthread;
956	p = td->td_proc;
957	mtx_assert(&Giant, MA_NOTOWNED);
958	PROC_LOCK_ASSERT(p, MA_OWNED);
959	while (thread_suspend_check_needed()) {
960		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
961			KASSERT(p->p_singlethread != NULL,
962			    ("singlethread not set"));
963			/*
964			 * The only suspension in action is a
965			 * single-threading. Single threader need not stop.
966			 * It is safe to access p->p_singlethread unlocked
967			 * because it can only be set to our address by us.
968			 */
969			if (p->p_singlethread == td)
970				return (0);	/* Exempt from stopping. */
971		}
972		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
973			return (EINTR);
974
975		/* Should we goto user boundary if we didn't come from there? */
976		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
977		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
978			return (ERESTART);
979
980		/*
981		 * Ignore suspend requests if they are deferred.
982		 */
983		if ((td->td_flags & TDF_SBDRY) != 0) {
984			KASSERT(return_instead,
985			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
986			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
987			    (TDF_SEINTR | TDF_SERESTART),
988			    ("both TDF_SEINTR and TDF_SERESTART"));
989			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
990		}
991
992		/*
993		 * If the process is waiting for us to exit,
994		 * this thread should just suicide.
995		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
996		 */
997		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
998			PROC_UNLOCK(p);
999
1000			/*
1001			 * Allow Linux emulation layer to do some work
1002			 * before thread suicide.
1003			 */
1004			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1005				(p->p_sysent->sv_thread_detach)(td);
1006			umtx_thread_exit(td);
1007			kern_thr_exit(td);
1008			panic("stopped thread did not exit");
1009		}
1010
1011		PROC_SLOCK(p);
1012		thread_stopped(p);
1013		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1014			if (p->p_numthreads == p->p_suspcount + 1) {
1015				thread_lock(p->p_singlethread);
1016				wakeup_swapper = thread_unsuspend_one(
1017				    p->p_singlethread, p, false);
1018				thread_unlock(p->p_singlethread);
1019				if (wakeup_swapper)
1020					kick_proc0();
1021			}
1022		}
1023		PROC_UNLOCK(p);
1024		thread_lock(td);
1025		/*
1026		 * When a thread suspends, it just
1027		 * gets taken off all queues.
1028		 */
1029		thread_suspend_one(td);
1030		if (return_instead == 0) {
1031			p->p_boundary_count++;
1032			td->td_flags |= TDF_BOUNDARY;
1033		}
1034		PROC_SUNLOCK(p);
1035		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
1036		thread_unlock(td);
1037		PROC_LOCK(p);
1038	}
1039	return (0);
1040}
1041
1042void
1043thread_suspend_switch(struct thread *td, struct proc *p)
1044{
1045
1046	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1047	PROC_LOCK_ASSERT(p, MA_OWNED);
1048	PROC_SLOCK_ASSERT(p, MA_OWNED);
1049	/*
1050	 * We implement thread_suspend_one in stages here to avoid
1051	 * dropping the proc lock while the thread lock is owned.
1052	 */
1053	if (p == td->td_proc) {
1054		thread_stopped(p);
1055		p->p_suspcount++;
1056	}
1057	PROC_UNLOCK(p);
1058	thread_lock(td);
1059	td->td_flags &= ~TDF_NEEDSUSPCHK;
1060	TD_SET_SUSPENDED(td);
1061	sched_sleep(td, 0);
1062	PROC_SUNLOCK(p);
1063	DROP_GIANT();
1064	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
1065	thread_unlock(td);
1066	PICKUP_GIANT();
1067	PROC_LOCK(p);
1068	PROC_SLOCK(p);
1069}
1070
1071void
1072thread_suspend_one(struct thread *td)
1073{
1074	struct proc *p;
1075
1076	p = td->td_proc;
1077	PROC_SLOCK_ASSERT(p, MA_OWNED);
1078	THREAD_LOCK_ASSERT(td, MA_OWNED);
1079	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1080	p->p_suspcount++;
1081	td->td_flags &= ~TDF_NEEDSUSPCHK;
1082	TD_SET_SUSPENDED(td);
1083	sched_sleep(td, 0);
1084}
1085
1086static int
1087thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1088{
1089
1090	THREAD_LOCK_ASSERT(td, MA_OWNED);
1091	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1092	TD_CLR_SUSPENDED(td);
1093	td->td_flags &= ~TDF_ALLPROCSUSP;
1094	if (td->td_proc == p) {
1095		PROC_SLOCK_ASSERT(p, MA_OWNED);
1096		p->p_suspcount--;
1097		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1098			td->td_flags &= ~TDF_BOUNDARY;
1099			p->p_boundary_count--;
1100		}
1101	}
1102	return (setrunnable(td));
1103}
1104
1105/*
1106 * Allow all threads blocked by single threading to continue running.
1107 */
1108void
1109thread_unsuspend(struct proc *p)
1110{
1111	struct thread *td;
1112	int wakeup_swapper;
1113
1114	PROC_LOCK_ASSERT(p, MA_OWNED);
1115	PROC_SLOCK_ASSERT(p, MA_OWNED);
1116	wakeup_swapper = 0;
1117	if (!P_SHOULDSTOP(p)) {
1118                FOREACH_THREAD_IN_PROC(p, td) {
1119			thread_lock(td);
1120			if (TD_IS_SUSPENDED(td)) {
1121				wakeup_swapper |= thread_unsuspend_one(td, p,
1122				    true);
1123			}
1124			thread_unlock(td);
1125		}
1126	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1127	    p->p_numthreads == p->p_suspcount) {
1128		/*
1129		 * Stopping everything also did the job for the single
1130		 * threading request. Now we've downgraded to single-threaded,
1131		 * let it continue.
1132		 */
1133		if (p->p_singlethread->td_proc == p) {
1134			thread_lock(p->p_singlethread);
1135			wakeup_swapper = thread_unsuspend_one(
1136			    p->p_singlethread, p, false);
1137			thread_unlock(p->p_singlethread);
1138		}
1139	}
1140	if (wakeup_swapper)
1141		kick_proc0();
1142}
1143
1144/*
1145 * End the single threading mode..
1146 */
1147void
1148thread_single_end(struct proc *p, int mode)
1149{
1150	struct thread *td;
1151	int wakeup_swapper;
1152
1153	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1154	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1155	    ("invalid mode %d", mode));
1156	PROC_LOCK_ASSERT(p, MA_OWNED);
1157	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1158	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1159	    ("mode %d does not match P_TOTAL_STOP", mode));
1160	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1161	    ("thread_single_end from other thread %p %p",
1162	    curthread, p->p_singlethread));
1163	KASSERT(mode != SINGLE_BOUNDARY ||
1164	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1165	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1166	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1167	    P_TOTAL_STOP);
1168	PROC_SLOCK(p);
1169	p->p_singlethread = NULL;
1170	wakeup_swapper = 0;
1171	/*
1172	 * If there are other threads they may now run,
1173	 * unless of course there is a blanket 'stop order'
1174	 * on the process. The single threader must be allowed
1175	 * to continue however as this is a bad place to stop.
1176	 */
1177	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1178                FOREACH_THREAD_IN_PROC(p, td) {
1179			thread_lock(td);
1180			if (TD_IS_SUSPENDED(td)) {
1181				wakeup_swapper |= thread_unsuspend_one(td, p,
1182				    mode == SINGLE_BOUNDARY);
1183			}
1184			thread_unlock(td);
1185		}
1186	}
1187	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1188	    ("inconsistent boundary count %d", p->p_boundary_count));
1189	PROC_SUNLOCK(p);
1190	if (wakeup_swapper)
1191		kick_proc0();
1192}
1193
1194struct thread *
1195thread_find(struct proc *p, lwpid_t tid)
1196{
1197	struct thread *td;
1198
1199	PROC_LOCK_ASSERT(p, MA_OWNED);
1200	FOREACH_THREAD_IN_PROC(p, td) {
1201		if (td->td_tid == tid)
1202			break;
1203	}
1204	return (td);
1205}
1206
1207/* Locate a thread by number; return with proc lock held. */
1208struct thread *
1209tdfind(lwpid_t tid, pid_t pid)
1210{
1211#define RUN_THRESH	16
1212	struct thread *td;
1213	int run = 0;
1214
1215	rw_rlock(&tidhash_lock);
1216	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1217		if (td->td_tid == tid) {
1218			if (pid != -1 && td->td_proc->p_pid != pid) {
1219				td = NULL;
1220				break;
1221			}
1222			PROC_LOCK(td->td_proc);
1223			if (td->td_proc->p_state == PRS_NEW) {
1224				PROC_UNLOCK(td->td_proc);
1225				td = NULL;
1226				break;
1227			}
1228			if (run > RUN_THRESH) {
1229				if (rw_try_upgrade(&tidhash_lock)) {
1230					LIST_REMOVE(td, td_hash);
1231					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1232						td, td_hash);
1233					rw_wunlock(&tidhash_lock);
1234					return (td);
1235				}
1236			}
1237			break;
1238		}
1239		run++;
1240	}
1241	rw_runlock(&tidhash_lock);
1242	return (td);
1243}
1244
1245void
1246tidhash_add(struct thread *td)
1247{
1248	rw_wlock(&tidhash_lock);
1249	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1250	rw_wunlock(&tidhash_lock);
1251}
1252
1253void
1254tidhash_remove(struct thread *td)
1255{
1256	rw_wlock(&tidhash_lock);
1257	LIST_REMOVE(td, td_hash);
1258	rw_wunlock(&tidhash_lock);
1259}
1260