1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 *  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice(s), this list of conditions and the following disclaimer as
12 *    the first lines of this file unmodified other than the possible
13 *    addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice(s), this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * DAMAGE.
29 */
30
31#include "opt_witness.h"
32#include "opt_hwpmc_hooks.h"
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/rangelock.h>
44#include <sys/resourcevar.h>
45#include <sys/sdt.h>
46#include <sys/smp.h>
47#include <sys/sched.h>
48#include <sys/sleepqueue.h>
49#include <sys/selinfo.h>
50#include <sys/syscallsubr.h>
51#include <sys/sysent.h>
52#include <sys/turnstile.h>
53#include <sys/ktr.h>
54#include <sys/rwlock.h>
55#include <sys/umtx.h>
56#include <sys/vmmeter.h>
57#include <sys/cpuset.h>
58#ifdef	HWPMC_HOOKS
59#include <sys/pmckern.h>
60#endif
61
62#include <security/audit/audit.h>
63
64#include <vm/vm.h>
65#include <vm/vm_extern.h>
66#include <vm/uma.h>
67#include <sys/eventhandler.h>
68
69/*
70 * Asserts below verify the stability of struct thread and struct proc
71 * layout, as exposed by KBI to modules.  On head, the KBI is allowed
72 * to drift, change to the structures must be accompanied by the
73 * assert update.
74 *
75 * On the stable branches after KBI freeze, conditions must not be
76 * violated.  Typically new fields are moved to the end of the
77 * structures.
78 */
79#ifdef __amd64__
80_Static_assert(offsetof(struct thread, td_flags) == 0xfc,
81    "struct thread KBI td_flags");
82_Static_assert(offsetof(struct thread, td_pflags) == 0x104,
83    "struct thread KBI td_pflags");
84_Static_assert(offsetof(struct thread, td_frame) == 0x470,
85    "struct thread KBI td_frame");
86_Static_assert(offsetof(struct thread, td_emuldata) == 0x528,
87    "struct thread KBI td_emuldata");
88_Static_assert(offsetof(struct proc, p_flag) == 0xb0,
89    "struct proc KBI p_flag");
90_Static_assert(offsetof(struct proc, p_pid) == 0xbc,
91    "struct proc KBI p_pid");
92_Static_assert(offsetof(struct proc, p_filemon) == 0x3d0,
93    "struct proc KBI p_filemon");
94_Static_assert(offsetof(struct proc, p_comm) == 0x3e4,
95    "struct proc KBI p_comm");
96_Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8,
97    "struct proc KBI p_emuldata");
98#endif
99#ifdef __i386__
100_Static_assert(offsetof(struct thread, td_flags) == 0x98,
101    "struct thread KBI td_flags");
102_Static_assert(offsetof(struct thread, td_pflags) == 0xa0,
103    "struct thread KBI td_pflags");
104_Static_assert(offsetof(struct thread, td_frame) == 0x2e8,
105    "struct thread KBI td_frame");
106_Static_assert(offsetof(struct thread, td_emuldata) == 0x334,
107    "struct thread KBI td_emuldata");
108_Static_assert(offsetof(struct proc, p_flag) == 0x68,
109    "struct proc KBI p_flag");
110_Static_assert(offsetof(struct proc, p_pid) == 0x74,
111    "struct proc KBI p_pid");
112_Static_assert(offsetof(struct proc, p_filemon) == 0x27c,
113    "struct proc KBI p_filemon");
114_Static_assert(offsetof(struct proc, p_comm) == 0x28c,
115    "struct proc KBI p_comm");
116_Static_assert(offsetof(struct proc, p_emuldata) == 0x318,
117    "struct proc KBI p_emuldata");
118#endif
119
120SDT_PROVIDER_DECLARE(proc);
121SDT_PROBE_DEFINE(proc, , , lwp__exit);
122
123/*
124 * thread related storage.
125 */
126static uma_zone_t thread_zone;
127
128TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
129static struct mtx zombie_lock;
130MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
131
132static void thread_zombie(struct thread *);
133static int thread_unsuspend_one(struct thread *td, struct proc *p,
134    bool boundary);
135
136#define TID_BUFFER_SIZE	1024
137
138struct mtx tid_lock;
139static struct unrhdr *tid_unrhdr;
140static lwpid_t tid_buffer[TID_BUFFER_SIZE];
141static int tid_head, tid_tail;
142static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
143
144struct	tidhashhead *tidhashtbl;
145u_long	tidhash;
146struct	rwlock tidhash_lock;
147
148EVENTHANDLER_LIST_DEFINE(thread_ctor);
149EVENTHANDLER_LIST_DEFINE(thread_dtor);
150EVENTHANDLER_LIST_DEFINE(thread_init);
151EVENTHANDLER_LIST_DEFINE(thread_fini);
152
153static lwpid_t
154tid_alloc(void)
155{
156	lwpid_t	tid;
157
158	tid = alloc_unr(tid_unrhdr);
159	if (tid != -1)
160		return (tid);
161	mtx_lock(&tid_lock);
162	if (tid_head == tid_tail) {
163		mtx_unlock(&tid_lock);
164		return (-1);
165	}
166	tid = tid_buffer[tid_head];
167	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
168	mtx_unlock(&tid_lock);
169	return (tid);
170}
171
172static void
173tid_free(lwpid_t tid)
174{
175	lwpid_t tmp_tid = -1;
176
177	mtx_lock(&tid_lock);
178	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
179		tmp_tid = tid_buffer[tid_head];
180		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
181	}
182	tid_buffer[tid_tail] = tid;
183	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
184	mtx_unlock(&tid_lock);
185	if (tmp_tid != -1)
186		free_unr(tid_unrhdr, tmp_tid);
187}
188
189/*
190 * Prepare a thread for use.
191 */
192static int
193thread_ctor(void *mem, int size, void *arg, int flags)
194{
195	struct thread	*td;
196
197	td = (struct thread *)mem;
198	td->td_state = TDS_INACTIVE;
199	td->td_lastcpu = td->td_oncpu = NOCPU;
200
201	td->td_tid = tid_alloc();
202
203	/*
204	 * Note that td_critnest begins life as 1 because the thread is not
205	 * running and is thereby implicitly waiting to be on the receiving
206	 * end of a context switch.
207	 */
208	td->td_critnest = 1;
209	td->td_lend_user_pri = PRI_MAX;
210	EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
211#ifdef AUDIT
212	audit_thread_alloc(td);
213#endif
214	umtx_thread_alloc(td);
215	return (0);
216}
217
218/*
219 * Reclaim a thread after use.
220 */
221static void
222thread_dtor(void *mem, int size, void *arg)
223{
224	struct thread *td;
225
226	td = (struct thread *)mem;
227
228#ifdef INVARIANTS
229	/* Verify that this thread is in a safe state to free. */
230	switch (td->td_state) {
231	case TDS_INHIBITED:
232	case TDS_RUNNING:
233	case TDS_CAN_RUN:
234	case TDS_RUNQ:
235		/*
236		 * We must never unlink a thread that is in one of
237		 * these states, because it is currently active.
238		 */
239		panic("bad state for thread unlinking");
240		/* NOTREACHED */
241	case TDS_INACTIVE:
242		break;
243	default:
244		panic("bad thread state");
245		/* NOTREACHED */
246	}
247#endif
248#ifdef AUDIT
249	audit_thread_free(td);
250#endif
251	/* Free all OSD associated to this thread. */
252	osd_thread_exit(td);
253	td_softdep_cleanup(td);
254	MPASS(td->td_su == NULL);
255
256	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
257	tid_free(td->td_tid);
258}
259
260/*
261 * Initialize type-stable parts of a thread (when newly created).
262 */
263static int
264thread_init(void *mem, int size, int flags)
265{
266	struct thread *td;
267
268	td = (struct thread *)mem;
269
270	td->td_sleepqueue = sleepq_alloc();
271	td->td_turnstile = turnstile_alloc();
272	td->td_rlqe = NULL;
273	EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
274	umtx_thread_init(td);
275	td->td_kstack = 0;
276	td->td_sel = NULL;
277	return (0);
278}
279
280/*
281 * Tear down type-stable parts of a thread (just before being discarded).
282 */
283static void
284thread_fini(void *mem, int size)
285{
286	struct thread *td;
287
288	td = (struct thread *)mem;
289	EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
290	rlqentry_free(td->td_rlqe);
291	turnstile_free(td->td_turnstile);
292	sleepq_free(td->td_sleepqueue);
293	umtx_thread_fini(td);
294	seltdfini(td);
295}
296
297/*
298 * For a newly created process,
299 * link up all the structures and its initial threads etc.
300 * called from:
301 * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
302 * proc_dtor() (should go away)
303 * proc_init()
304 */
305void
306proc_linkup0(struct proc *p, struct thread *td)
307{
308	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
309	proc_linkup(p, td);
310}
311
312void
313proc_linkup(struct proc *p, struct thread *td)
314{
315
316	sigqueue_init(&p->p_sigqueue, p);
317	p->p_ksi = ksiginfo_alloc(1);
318	if (p->p_ksi != NULL) {
319		/* XXX p_ksi may be null if ksiginfo zone is not ready */
320		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
321	}
322	LIST_INIT(&p->p_mqnotifier);
323	p->p_numthreads = 0;
324	thread_link(td, p);
325}
326
327/*
328 * Initialize global thread allocation resources.
329 */
330void
331threadinit(void)
332{
333
334	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
335
336	/*
337	 * pid_max cannot be greater than PID_MAX.
338	 * leave one number for thread0.
339	 */
340	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
341
342	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
343	    thread_ctor, thread_dtor, thread_init, thread_fini,
344	    32 - 1, UMA_ZONE_NOFREE);
345	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
346	rw_init(&tidhash_lock, "tidhash");
347}
348
349/*
350 * Place an unused thread on the zombie list.
351 * Use the slpq as that must be unused by now.
352 */
353void
354thread_zombie(struct thread *td)
355{
356	mtx_lock_spin(&zombie_lock);
357	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
358	mtx_unlock_spin(&zombie_lock);
359}
360
361/*
362 * Release a thread that has exited after cpu_throw().
363 */
364void
365thread_stash(struct thread *td)
366{
367	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
368	thread_zombie(td);
369}
370
371/*
372 * Reap zombie resources.
373 */
374void
375thread_reap(void)
376{
377	struct thread *td_first, *td_next;
378
379	/*
380	 * Don't even bother to lock if none at this instant,
381	 * we really don't care about the next instant.
382	 */
383	if (!TAILQ_EMPTY(&zombie_threads)) {
384		mtx_lock_spin(&zombie_lock);
385		td_first = TAILQ_FIRST(&zombie_threads);
386		if (td_first)
387			TAILQ_INIT(&zombie_threads);
388		mtx_unlock_spin(&zombie_lock);
389		while (td_first) {
390			td_next = TAILQ_NEXT(td_first, td_slpq);
391			thread_cow_free(td_first);
392			thread_free(td_first);
393			td_first = td_next;
394		}
395	}
396}
397
398/*
399 * Allocate a thread.
400 */
401struct thread *
402thread_alloc(int pages)
403{
404	struct thread *td;
405
406	thread_reap(); /* check if any zombies to get */
407
408	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
409	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
410	if (!vm_thread_new(td, pages)) {
411		uma_zfree(thread_zone, td);
412		return (NULL);
413	}
414	cpu_thread_alloc(td);
415	return (td);
416}
417
418int
419thread_alloc_stack(struct thread *td, int pages)
420{
421
422	KASSERT(td->td_kstack == 0,
423	    ("thread_alloc_stack called on a thread with kstack"));
424	if (!vm_thread_new(td, pages))
425		return (0);
426	cpu_thread_alloc(td);
427	return (1);
428}
429
430/*
431 * Deallocate a thread.
432 */
433void
434thread_free(struct thread *td)
435{
436
437	lock_profile_thread_exit(td);
438	if (td->td_cpuset)
439		cpuset_rel(td->td_cpuset);
440	td->td_cpuset = NULL;
441	cpu_thread_free(td);
442	if (td->td_kstack != 0)
443		vm_thread_dispose(td);
444	callout_drain(&td->td_slpcallout);
445	uma_zfree(thread_zone, td);
446}
447
448void
449thread_cow_get_proc(struct thread *newtd, struct proc *p)
450{
451
452	PROC_LOCK_ASSERT(p, MA_OWNED);
453	newtd->td_ucred = crhold(p->p_ucred);
454	newtd->td_limit = lim_hold(p->p_limit);
455	newtd->td_cowgen = p->p_cowgen;
456}
457
458void
459thread_cow_get(struct thread *newtd, struct thread *td)
460{
461
462	newtd->td_ucred = crhold(td->td_ucred);
463	newtd->td_limit = lim_hold(td->td_limit);
464	newtd->td_cowgen = td->td_cowgen;
465}
466
467void
468thread_cow_free(struct thread *td)
469{
470
471	if (td->td_ucred != NULL)
472		crfree(td->td_ucred);
473	if (td->td_limit != NULL)
474		lim_free(td->td_limit);
475}
476
477void
478thread_cow_update(struct thread *td)
479{
480	struct proc *p;
481	struct ucred *oldcred;
482	struct plimit *oldlimit;
483
484	p = td->td_proc;
485	oldcred = NULL;
486	oldlimit = NULL;
487	PROC_LOCK(p);
488	if (td->td_ucred != p->p_ucred) {
489		oldcred = td->td_ucred;
490		td->td_ucred = crhold(p->p_ucred);
491	}
492	if (td->td_limit != p->p_limit) {
493		oldlimit = td->td_limit;
494		td->td_limit = lim_hold(p->p_limit);
495	}
496	td->td_cowgen = p->p_cowgen;
497	PROC_UNLOCK(p);
498	if (oldcred != NULL)
499		crfree(oldcred);
500	if (oldlimit != NULL)
501		lim_free(oldlimit);
502}
503
504/*
505 * Discard the current thread and exit from its context.
506 * Always called with scheduler locked.
507 *
508 * Because we can't free a thread while we're operating under its context,
509 * push the current thread into our CPU's deadthread holder. This means
510 * we needn't worry about someone else grabbing our context before we
511 * do a cpu_throw().
512 */
513void
514thread_exit(void)
515{
516	uint64_t runtime, new_switchtime;
517	struct thread *td;
518	struct thread *td2;
519	struct proc *p;
520	int wakeup_swapper;
521
522	td = curthread;
523	p = td->td_proc;
524
525	PROC_SLOCK_ASSERT(p, MA_OWNED);
526	mtx_assert(&Giant, MA_NOTOWNED);
527
528	PROC_LOCK_ASSERT(p, MA_OWNED);
529	KASSERT(p != NULL, ("thread exiting without a process"));
530	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
531	    (long)p->p_pid, td->td_name);
532	SDT_PROBE0(proc, , , lwp__exit);
533	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
534
535	/*
536	 * drop FPU & debug register state storage, or any other
537	 * architecture specific resources that
538	 * would not be on a new untouched process.
539	 */
540	cpu_thread_exit(td);
541
542	/*
543	 * The last thread is left attached to the process
544	 * So that the whole bundle gets recycled. Skip
545	 * all this stuff if we never had threads.
546	 * EXIT clears all sign of other threads when
547	 * it goes to single threading, so the last thread always
548	 * takes the short path.
549	 */
550	if (p->p_flag & P_HADTHREADS) {
551		if (p->p_numthreads > 1) {
552			atomic_add_int(&td->td_proc->p_exitthreads, 1);
553			thread_unlink(td);
554			td2 = FIRST_THREAD_IN_PROC(p);
555			sched_exit_thread(td2, td);
556
557			/*
558			 * The test below is NOT true if we are the
559			 * sole exiting thread. P_STOPPED_SINGLE is unset
560			 * in exit1() after it is the only survivor.
561			 */
562			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
563				if (p->p_numthreads == p->p_suspcount) {
564					thread_lock(p->p_singlethread);
565					wakeup_swapper = thread_unsuspend_one(
566						p->p_singlethread, p, false);
567					thread_unlock(p->p_singlethread);
568					if (wakeup_swapper)
569						kick_proc0();
570				}
571			}
572
573			PCPU_SET(deadthread, td);
574		} else {
575			/*
576			 * The last thread is exiting.. but not through exit()
577			 */
578			panic ("thread_exit: Last thread exiting on its own");
579		}
580	}
581#ifdef	HWPMC_HOOKS
582	/*
583	 * If this thread is part of a process that is being tracked by hwpmc(4),
584	 * inform the module of the thread's impending exit.
585	 */
586	if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
587		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
588		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
589	} else if (PMC_SYSTEM_SAMPLING_ACTIVE())
590		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
591#endif
592	PROC_UNLOCK(p);
593	PROC_STATLOCK(p);
594	thread_lock(td);
595	PROC_SUNLOCK(p);
596
597	/* Do the same timestamp bookkeeping that mi_switch() would do. */
598	new_switchtime = cpu_ticks();
599	runtime = new_switchtime - PCPU_GET(switchtime);
600	td->td_runtime += runtime;
601	td->td_incruntime += runtime;
602	PCPU_SET(switchtime, new_switchtime);
603	PCPU_SET(switchticks, ticks);
604	VM_CNT_INC(v_swtch);
605
606	/* Save our resource usage in our process. */
607	td->td_ru.ru_nvcsw++;
608	ruxagg(p, td);
609	rucollect(&p->p_ru, &td->td_ru);
610	PROC_STATUNLOCK(p);
611
612	td->td_state = TDS_INACTIVE;
613#ifdef WITNESS
614	witness_thread_exit(td);
615#endif
616	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
617	sched_throw(td);
618	panic("I'm a teapot!");
619	/* NOTREACHED */
620}
621
622/*
623 * Do any thread specific cleanups that may be needed in wait()
624 * called with Giant, proc and schedlock not held.
625 */
626void
627thread_wait(struct proc *p)
628{
629	struct thread *td;
630
631	mtx_assert(&Giant, MA_NOTOWNED);
632	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
633	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
634	td = FIRST_THREAD_IN_PROC(p);
635	/* Lock the last thread so we spin until it exits cpu_throw(). */
636	thread_lock(td);
637	thread_unlock(td);
638	lock_profile_thread_exit(td);
639	cpuset_rel(td->td_cpuset);
640	td->td_cpuset = NULL;
641	cpu_thread_clean(td);
642	thread_cow_free(td);
643	callout_drain(&td->td_slpcallout);
644	thread_reap();	/* check for zombie threads etc. */
645}
646
647/*
648 * Link a thread to a process.
649 * set up anything that needs to be initialized for it to
650 * be used by the process.
651 */
652void
653thread_link(struct thread *td, struct proc *p)
654{
655
656	/*
657	 * XXX This can't be enabled because it's called for proc0 before
658	 * its lock has been created.
659	 * PROC_LOCK_ASSERT(p, MA_OWNED);
660	 */
661	td->td_state    = TDS_INACTIVE;
662	td->td_proc     = p;
663	td->td_flags    = TDF_INMEM;
664
665	LIST_INIT(&td->td_contested);
666	LIST_INIT(&td->td_lprof[0]);
667	LIST_INIT(&td->td_lprof[1]);
668	sigqueue_init(&td->td_sigqueue, p);
669	callout_init(&td->td_slpcallout, 1);
670	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
671	p->p_numthreads++;
672}
673
674/*
675 * Called from:
676 *  thread_exit()
677 */
678void
679thread_unlink(struct thread *td)
680{
681	struct proc *p = td->td_proc;
682
683	PROC_LOCK_ASSERT(p, MA_OWNED);
684	TAILQ_REMOVE(&p->p_threads, td, td_plist);
685	p->p_numthreads--;
686	/* could clear a few other things here */
687	/* Must  NOT clear links to proc! */
688}
689
690static int
691calc_remaining(struct proc *p, int mode)
692{
693	int remaining;
694
695	PROC_LOCK_ASSERT(p, MA_OWNED);
696	PROC_SLOCK_ASSERT(p, MA_OWNED);
697	if (mode == SINGLE_EXIT)
698		remaining = p->p_numthreads;
699	else if (mode == SINGLE_BOUNDARY)
700		remaining = p->p_numthreads - p->p_boundary_count;
701	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
702		remaining = p->p_numthreads - p->p_suspcount;
703	else
704		panic("calc_remaining: wrong mode %d", mode);
705	return (remaining);
706}
707
708static int
709remain_for_mode(int mode)
710{
711
712	return (mode == SINGLE_ALLPROC ? 0 : 1);
713}
714
715static int
716weed_inhib(int mode, struct thread *td2, struct proc *p)
717{
718	int wakeup_swapper;
719
720	PROC_LOCK_ASSERT(p, MA_OWNED);
721	PROC_SLOCK_ASSERT(p, MA_OWNED);
722	THREAD_LOCK_ASSERT(td2, MA_OWNED);
723
724	wakeup_swapper = 0;
725	switch (mode) {
726	case SINGLE_EXIT:
727		if (TD_IS_SUSPENDED(td2))
728			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
729		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
730			wakeup_swapper |= sleepq_abort(td2, EINTR);
731		break;
732	case SINGLE_BOUNDARY:
733	case SINGLE_NO_EXIT:
734		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
735			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
736		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
737			wakeup_swapper |= sleepq_abort(td2, ERESTART);
738		break;
739	case SINGLE_ALLPROC:
740		/*
741		 * ALLPROC suspend tries to avoid spurious EINTR for
742		 * threads sleeping interruptable, by suspending the
743		 * thread directly, similarly to sig_suspend_threads().
744		 * Since such sleep is not performed at the user
745		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
746		 * is used to avoid immediate un-suspend.
747		 */
748		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
749		    TDF_ALLPROCSUSP)) == 0)
750			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
751		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
752			if ((td2->td_flags & TDF_SBDRY) == 0) {
753				thread_suspend_one(td2);
754				td2->td_flags |= TDF_ALLPROCSUSP;
755			} else {
756				wakeup_swapper |= sleepq_abort(td2, ERESTART);
757			}
758		}
759		break;
760	}
761	return (wakeup_swapper);
762}
763
764/*
765 * Enforce single-threading.
766 *
767 * Returns 1 if the caller must abort (another thread is waiting to
768 * exit the process or similar). Process is locked!
769 * Returns 0 when you are successfully the only thread running.
770 * A process has successfully single threaded in the suspend mode when
771 * There are no threads in user mode. Threads in the kernel must be
772 * allowed to continue until they get to the user boundary. They may even
773 * copy out their return values and data before suspending. They may however be
774 * accelerated in reaching the user boundary as we will wake up
775 * any sleeping threads that are interruptable. (PCATCH).
776 */
777int
778thread_single(struct proc *p, int mode)
779{
780	struct thread *td;
781	struct thread *td2;
782	int remaining, wakeup_swapper;
783
784	td = curthread;
785	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
786	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
787	    ("invalid mode %d", mode));
788	/*
789	 * If allowing non-ALLPROC singlethreading for non-curproc
790	 * callers, calc_remaining() and remain_for_mode() should be
791	 * adjusted to also account for td->td_proc != p.  For now
792	 * this is not implemented because it is not used.
793	 */
794	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
795	    (mode != SINGLE_ALLPROC && td->td_proc == p),
796	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
797	mtx_assert(&Giant, MA_NOTOWNED);
798	PROC_LOCK_ASSERT(p, MA_OWNED);
799
800	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
801		return (0);
802
803	/* Is someone already single threading? */
804	if (p->p_singlethread != NULL && p->p_singlethread != td)
805		return (1);
806
807	if (mode == SINGLE_EXIT) {
808		p->p_flag |= P_SINGLE_EXIT;
809		p->p_flag &= ~P_SINGLE_BOUNDARY;
810	} else {
811		p->p_flag &= ~P_SINGLE_EXIT;
812		if (mode == SINGLE_BOUNDARY)
813			p->p_flag |= P_SINGLE_BOUNDARY;
814		else
815			p->p_flag &= ~P_SINGLE_BOUNDARY;
816	}
817	if (mode == SINGLE_ALLPROC)
818		p->p_flag |= P_TOTAL_STOP;
819	p->p_flag |= P_STOPPED_SINGLE;
820	PROC_SLOCK(p);
821	p->p_singlethread = td;
822	remaining = calc_remaining(p, mode);
823	while (remaining != remain_for_mode(mode)) {
824		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
825			goto stopme;
826		wakeup_swapper = 0;
827		FOREACH_THREAD_IN_PROC(p, td2) {
828			if (td2 == td)
829				continue;
830			thread_lock(td2);
831			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
832			if (TD_IS_INHIBITED(td2)) {
833				wakeup_swapper |= weed_inhib(mode, td2, p);
834#ifdef SMP
835			} else if (TD_IS_RUNNING(td2) && td != td2) {
836				forward_signal(td2);
837#endif
838			}
839			thread_unlock(td2);
840		}
841		if (wakeup_swapper)
842			kick_proc0();
843		remaining = calc_remaining(p, mode);
844
845		/*
846		 * Maybe we suspended some threads.. was it enough?
847		 */
848		if (remaining == remain_for_mode(mode))
849			break;
850
851stopme:
852		/*
853		 * Wake us up when everyone else has suspended.
854		 * In the mean time we suspend as well.
855		 */
856		thread_suspend_switch(td, p);
857		remaining = calc_remaining(p, mode);
858	}
859	if (mode == SINGLE_EXIT) {
860		/*
861		 * Convert the process to an unthreaded process.  The
862		 * SINGLE_EXIT is called by exit1() or execve(), in
863		 * both cases other threads must be retired.
864		 */
865		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
866		p->p_singlethread = NULL;
867		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
868
869		/*
870		 * Wait for any remaining threads to exit cpu_throw().
871		 */
872		while (p->p_exitthreads != 0) {
873			PROC_SUNLOCK(p);
874			PROC_UNLOCK(p);
875			sched_relinquish(td);
876			PROC_LOCK(p);
877			PROC_SLOCK(p);
878		}
879	} else if (mode == SINGLE_BOUNDARY) {
880		/*
881		 * Wait until all suspended threads are removed from
882		 * the processors.  The thread_suspend_check()
883		 * increments p_boundary_count while it is still
884		 * running, which makes it possible for the execve()
885		 * to destroy vmspace while our other threads are
886		 * still using the address space.
887		 *
888		 * We lock the thread, which is only allowed to
889		 * succeed after context switch code finished using
890		 * the address space.
891		 */
892		FOREACH_THREAD_IN_PROC(p, td2) {
893			if (td2 == td)
894				continue;
895			thread_lock(td2);
896			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
897			    ("td %p not on boundary", td2));
898			KASSERT(TD_IS_SUSPENDED(td2),
899			    ("td %p is not suspended", td2));
900			thread_unlock(td2);
901		}
902	}
903	PROC_SUNLOCK(p);
904	return (0);
905}
906
907bool
908thread_suspend_check_needed(void)
909{
910	struct proc *p;
911	struct thread *td;
912
913	td = curthread;
914	p = td->td_proc;
915	PROC_LOCK_ASSERT(p, MA_OWNED);
916	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
917	    (td->td_dbgflags & TDB_SUSPEND) != 0));
918}
919
920/*
921 * Called in from locations that can safely check to see
922 * whether we have to suspend or at least throttle for a
923 * single-thread event (e.g. fork).
924 *
925 * Such locations include userret().
926 * If the "return_instead" argument is non zero, the thread must be able to
927 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
928 *
929 * The 'return_instead' argument tells the function if it may do a
930 * thread_exit() or suspend, or whether the caller must abort and back
931 * out instead.
932 *
933 * If the thread that set the single_threading request has set the
934 * P_SINGLE_EXIT bit in the process flags then this call will never return
935 * if 'return_instead' is false, but will exit.
936 *
937 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
938 *---------------+--------------------+---------------------
939 *       0       | returns 0          |   returns 0 or 1
940 *               | when ST ends       |   immediately
941 *---------------+--------------------+---------------------
942 *       1       | thread exits       |   returns 1
943 *               |                    |  immediately
944 * 0 = thread_exit() or suspension ok,
945 * other = return error instead of stopping the thread.
946 *
947 * While a full suspension is under effect, even a single threading
948 * thread would be suspended if it made this call (but it shouldn't).
949 * This call should only be made from places where
950 * thread_exit() would be safe as that may be the outcome unless
951 * return_instead is set.
952 */
953int
954thread_suspend_check(int return_instead)
955{
956	struct thread *td;
957	struct proc *p;
958	int wakeup_swapper;
959
960	td = curthread;
961	p = td->td_proc;
962	mtx_assert(&Giant, MA_NOTOWNED);
963	PROC_LOCK_ASSERT(p, MA_OWNED);
964	while (thread_suspend_check_needed()) {
965		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
966			KASSERT(p->p_singlethread != NULL,
967			    ("singlethread not set"));
968			/*
969			 * The only suspension in action is a
970			 * single-threading. Single threader need not stop.
971			 * It is safe to access p->p_singlethread unlocked
972			 * because it can only be set to our address by us.
973			 */
974			if (p->p_singlethread == td)
975				return (0);	/* Exempt from stopping. */
976		}
977		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
978			return (EINTR);
979
980		/* Should we goto user boundary if we didn't come from there? */
981		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
982		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
983			return (ERESTART);
984
985		/*
986		 * Ignore suspend requests if they are deferred.
987		 */
988		if ((td->td_flags & TDF_SBDRY) != 0) {
989			KASSERT(return_instead,
990			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
991			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
992			    (TDF_SEINTR | TDF_SERESTART),
993			    ("both TDF_SEINTR and TDF_SERESTART"));
994			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
995		}
996
997		/*
998		 * If the process is waiting for us to exit,
999		 * this thread should just suicide.
1000		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1001		 */
1002		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1003			PROC_UNLOCK(p);
1004
1005			/*
1006			 * Allow Linux emulation layer to do some work
1007			 * before thread suicide.
1008			 */
1009			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1010				(p->p_sysent->sv_thread_detach)(td);
1011			umtx_thread_exit(td);
1012			kern_thr_exit(td);
1013			panic("stopped thread did not exit");
1014		}
1015
1016		PROC_SLOCK(p);
1017		thread_stopped(p);
1018		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1019			if (p->p_numthreads == p->p_suspcount + 1) {
1020				thread_lock(p->p_singlethread);
1021				wakeup_swapper = thread_unsuspend_one(
1022				    p->p_singlethread, p, false);
1023				thread_unlock(p->p_singlethread);
1024				if (wakeup_swapper)
1025					kick_proc0();
1026			}
1027		}
1028		PROC_UNLOCK(p);
1029		thread_lock(td);
1030		/*
1031		 * When a thread suspends, it just
1032		 * gets taken off all queues.
1033		 */
1034		thread_suspend_one(td);
1035		if (return_instead == 0) {
1036			p->p_boundary_count++;
1037			td->td_flags |= TDF_BOUNDARY;
1038		}
1039		PROC_SUNLOCK(p);
1040		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
1041		thread_unlock(td);
1042		PROC_LOCK(p);
1043	}
1044	return (0);
1045}
1046
1047/*
1048 * Check for possible stops and suspensions while executing a
1049 * casueword or similar transiently failing operation.
1050 *
1051 * The sleep argument controls whether the function can handle a stop
1052 * request itself or it should return ERESTART and the request is
1053 * proceed at the kernel/user boundary in ast.
1054 *
1055 * Typically, when retrying due to casueword(9) failure (rv == 1), we
1056 * should handle the stop requests there, with exception of cases when
1057 * the thread owns a kernel resource, for instance busied the umtx
1058 * key, or when functions return immediately if thread_check_susp()
1059 * returned non-zero.  On the other hand, retrying the whole lock
1060 * operation, we better not stop there but delegate the handling to
1061 * ast.
1062 *
1063 * If the request is for thread termination P_SINGLE_EXIT, we cannot
1064 * handle it at all, and simply return EINTR.
1065 */
1066int
1067thread_check_susp(struct thread *td, bool sleep)
1068{
1069	struct proc *p;
1070	int error;
1071
1072	/*
1073	 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
1074	 * eventually break the lockstep loop.
1075	 */
1076	if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
1077		return (0);
1078	error = 0;
1079	p = td->td_proc;
1080	PROC_LOCK(p);
1081	if (p->p_flag & P_SINGLE_EXIT)
1082		error = EINTR;
1083	else if (P_SHOULDSTOP(p) ||
1084	    ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1085		error = sleep ? thread_suspend_check(0) : ERESTART;
1086	PROC_UNLOCK(p);
1087	return (error);
1088}
1089
1090void
1091thread_suspend_switch(struct thread *td, struct proc *p)
1092{
1093
1094	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1095	PROC_LOCK_ASSERT(p, MA_OWNED);
1096	PROC_SLOCK_ASSERT(p, MA_OWNED);
1097	/*
1098	 * We implement thread_suspend_one in stages here to avoid
1099	 * dropping the proc lock while the thread lock is owned.
1100	 */
1101	if (p == td->td_proc) {
1102		thread_stopped(p);
1103		p->p_suspcount++;
1104	}
1105	PROC_UNLOCK(p);
1106	thread_lock(td);
1107	td->td_flags &= ~TDF_NEEDSUSPCHK;
1108	TD_SET_SUSPENDED(td);
1109	sched_sleep(td, 0);
1110	PROC_SUNLOCK(p);
1111	DROP_GIANT();
1112	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
1113	thread_unlock(td);
1114	PICKUP_GIANT();
1115	PROC_LOCK(p);
1116	PROC_SLOCK(p);
1117}
1118
1119void
1120thread_suspend_one(struct thread *td)
1121{
1122	struct proc *p;
1123
1124	p = td->td_proc;
1125	PROC_SLOCK_ASSERT(p, MA_OWNED);
1126	THREAD_LOCK_ASSERT(td, MA_OWNED);
1127	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1128	p->p_suspcount++;
1129	td->td_flags &= ~TDF_NEEDSUSPCHK;
1130	TD_SET_SUSPENDED(td);
1131	sched_sleep(td, 0);
1132}
1133
1134static int
1135thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1136{
1137
1138	THREAD_LOCK_ASSERT(td, MA_OWNED);
1139	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1140	TD_CLR_SUSPENDED(td);
1141	td->td_flags &= ~TDF_ALLPROCSUSP;
1142	if (td->td_proc == p) {
1143		PROC_SLOCK_ASSERT(p, MA_OWNED);
1144		p->p_suspcount--;
1145		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1146			td->td_flags &= ~TDF_BOUNDARY;
1147			p->p_boundary_count--;
1148		}
1149	}
1150	return (setrunnable(td));
1151}
1152
1153/*
1154 * Allow all threads blocked by single threading to continue running.
1155 */
1156void
1157thread_unsuspend(struct proc *p)
1158{
1159	struct thread *td;
1160	int wakeup_swapper;
1161
1162	PROC_LOCK_ASSERT(p, MA_OWNED);
1163	PROC_SLOCK_ASSERT(p, MA_OWNED);
1164	wakeup_swapper = 0;
1165	if (!P_SHOULDSTOP(p)) {
1166                FOREACH_THREAD_IN_PROC(p, td) {
1167			thread_lock(td);
1168			if (TD_IS_SUSPENDED(td)) {
1169				wakeup_swapper |= thread_unsuspend_one(td, p,
1170				    true);
1171			}
1172			thread_unlock(td);
1173		}
1174	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1175	    p->p_numthreads == p->p_suspcount) {
1176		/*
1177		 * Stopping everything also did the job for the single
1178		 * threading request. Now we've downgraded to single-threaded,
1179		 * let it continue.
1180		 */
1181		if (p->p_singlethread->td_proc == p) {
1182			thread_lock(p->p_singlethread);
1183			wakeup_swapper = thread_unsuspend_one(
1184			    p->p_singlethread, p, false);
1185			thread_unlock(p->p_singlethread);
1186		}
1187	}
1188	if (wakeup_swapper)
1189		kick_proc0();
1190}
1191
1192/*
1193 * End the single threading mode..
1194 */
1195void
1196thread_single_end(struct proc *p, int mode)
1197{
1198	struct thread *td;
1199	int wakeup_swapper;
1200
1201	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1202	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1203	    ("invalid mode %d", mode));
1204	PROC_LOCK_ASSERT(p, MA_OWNED);
1205	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1206	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1207	    ("mode %d does not match P_TOTAL_STOP", mode));
1208	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1209	    ("thread_single_end from other thread %p %p",
1210	    curthread, p->p_singlethread));
1211	KASSERT(mode != SINGLE_BOUNDARY ||
1212	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1213	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1214	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1215	    P_TOTAL_STOP);
1216	PROC_SLOCK(p);
1217	p->p_singlethread = NULL;
1218	wakeup_swapper = 0;
1219	/*
1220	 * If there are other threads they may now run,
1221	 * unless of course there is a blanket 'stop order'
1222	 * on the process. The single threader must be allowed
1223	 * to continue however as this is a bad place to stop.
1224	 */
1225	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1226                FOREACH_THREAD_IN_PROC(p, td) {
1227			thread_lock(td);
1228			if (TD_IS_SUSPENDED(td)) {
1229				wakeup_swapper |= thread_unsuspend_one(td, p,
1230				    mode == SINGLE_BOUNDARY);
1231			}
1232			thread_unlock(td);
1233		}
1234	}
1235	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1236	    ("inconsistent boundary count %d", p->p_boundary_count));
1237	PROC_SUNLOCK(p);
1238	if (wakeup_swapper)
1239		kick_proc0();
1240}
1241
1242struct thread *
1243thread_find(struct proc *p, lwpid_t tid)
1244{
1245	struct thread *td;
1246
1247	PROC_LOCK_ASSERT(p, MA_OWNED);
1248	FOREACH_THREAD_IN_PROC(p, td) {
1249		if (td->td_tid == tid)
1250			break;
1251	}
1252	return (td);
1253}
1254
1255/* Locate a thread by number; return with proc lock held. */
1256struct thread *
1257tdfind(lwpid_t tid, pid_t pid)
1258{
1259#define RUN_THRESH	16
1260	struct thread *td;
1261	int run = 0;
1262
1263	td = curthread;
1264	if (td->td_tid == tid) {
1265		if (pid != -1 && td->td_proc->p_pid != pid)
1266			return (NULL);
1267		PROC_LOCK(td->td_proc);
1268		return (td);
1269	}
1270
1271	rw_rlock(&tidhash_lock);
1272	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1273		if (td->td_tid == tid) {
1274			if (pid != -1 && td->td_proc->p_pid != pid) {
1275				td = NULL;
1276				break;
1277			}
1278			PROC_LOCK(td->td_proc);
1279			if (td->td_proc->p_state == PRS_NEW) {
1280				PROC_UNLOCK(td->td_proc);
1281				td = NULL;
1282				break;
1283			}
1284			if (run > RUN_THRESH) {
1285				if (rw_try_upgrade(&tidhash_lock)) {
1286					LIST_REMOVE(td, td_hash);
1287					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1288						td, td_hash);
1289					rw_wunlock(&tidhash_lock);
1290					return (td);
1291				}
1292			}
1293			break;
1294		}
1295		run++;
1296	}
1297	rw_runlock(&tidhash_lock);
1298	return (td);
1299}
1300
1301void
1302tidhash_add(struct thread *td)
1303{
1304	rw_wlock(&tidhash_lock);
1305	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1306	rw_wunlock(&tidhash_lock);
1307}
1308
1309void
1310tidhash_remove(struct thread *td)
1311{
1312	rw_wlock(&tidhash_lock);
1313	LIST_REMOVE(td, td_hash);
1314	rw_wunlock(&tidhash_lock);
1315}
1316