kern_kthread.c revision 1.48
1/*	$NetBSD: kern_kthread.c,v 1.48 2023/07/17 10:55:27 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.48 2023/07/17 10:55:27 riastradh Exp $");
35
36#include <sys/param.h>
37#include <sys/cpu.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/kthread.h>
41#include <sys/mutex.h>
42#include <sys/sched.h>
43#include <sys/kmem.h>
44#include <sys/msan.h>
45
46#include <uvm/uvm_extern.h>
47
48static kmutex_t		kthread_lock;
49static kcondvar_t	kthread_cv;
50
51void
52kthread_sysinit(void)
53{
54
55	mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
56	cv_init(&kthread_cv, "kthrwait");
57}
58
59/*
60 * kthread_create: create a kernel thread, that is, system-only LWP.
61 */
62int
63kthread_create(pri_t pri, int flag, struct cpu_info *ci,
64    void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
65{
66	lwp_t *l;
67	vaddr_t uaddr;
68	int error, lc;
69	va_list ap;
70
71	KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
72
73	uaddr = uvm_uarea_system_alloc(
74	   (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
75	if (uaddr == 0) {
76		return ENOMEM;
77	}
78	kmsan_orig((void *)uaddr, USPACE, KMSAN_TYPE_POOL, __RET_ADDR);
79	if ((flag & KTHREAD_TS) != 0) {
80		lc = SCHED_OTHER;
81	} else {
82		lc = SCHED_RR;
83	}
84
85	error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
86	    0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
87	if (error) {
88		uvm_uarea_system_free(uaddr);
89		return error;
90	}
91	if (fmt != NULL) {
92		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
93		va_start(ap, fmt);
94		vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
95		va_end(ap);
96	}
97
98	/*
99	 * Set parameters.
100	 */
101	if (pri == PRI_NONE) {
102		if ((flag & KTHREAD_TS) != 0) {
103			/* Maximum user priority level. */
104			pri = MAXPRI_USER;
105		} else {
106			/* Minimum kernel priority level. */
107			pri = PRI_KTHREAD;
108		}
109	}
110	mutex_enter(proc0.p_lock);
111	lwp_lock(l);
112	lwp_changepri(l, pri);
113	if (ci != NULL) {
114		if (ci != l->l_cpu) {
115			lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
116			lwp_lock(l);
117		}
118		l->l_pflag |= LP_BOUND;
119		l->l_cpu = ci;
120	}
121
122	if ((flag & KTHREAD_MUSTJOIN) != 0) {
123		KASSERT(lp != NULL);
124		l->l_pflag |= LP_MUSTJOIN;
125	}
126	if ((flag & KTHREAD_INTR) != 0) {
127		l->l_pflag |= LP_INTR;
128	}
129	if ((flag & KTHREAD_MPSAFE) == 0) {
130		l->l_pflag &= ~LP_MPSAFE;
131	}
132
133	/*
134	 * Set the new LWP running, unless the caller has requested
135	 * otherwise.
136	 */
137	KASSERT(l->l_stat == LSIDL);
138	if ((flag & KTHREAD_IDLE) == 0) {
139		setrunnable(l);
140		/* LWP now unlocked */
141	} else {
142		lwp_unlock(l);
143	}
144	mutex_exit(proc0.p_lock);
145
146	/* All done! */
147	if (lp != NULL) {
148		*lp = l;
149	}
150	return 0;
151}
152
153/*
154 * Cause a kernel thread to exit.  Assumes the exiting thread is the
155 * current context.
156 */
157void
158kthread_exit(int ecode)
159{
160	const char *name;
161	lwp_t *l = curlwp;
162
163	/* We can't do much with the exit code, so just report it. */
164	if (ecode != 0) {
165		if ((name = l->l_name) == NULL)
166			name = "unnamed";
167		printf("WARNING: kthread `%s' (%d) exits with status %d\n",
168		    name, l->l_lid, ecode);
169	}
170
171	/* Barrier for joining. */
172	if (l->l_pflag & LP_MUSTJOIN) {
173		bool *exitedp;
174
175		mutex_enter(&kthread_lock);
176		while ((exitedp = l->l_private) == NULL) {
177			cv_wait(&kthread_cv, &kthread_lock);
178		}
179		KASSERT(!*exitedp);
180		*exitedp = true;
181		cv_broadcast(&kthread_cv);
182		mutex_exit(&kthread_lock);
183	}
184
185	/* If the kernel lock is held, we need to drop it now. */
186	if ((l->l_pflag & LP_MPSAFE) == 0) {
187		KERNEL_UNLOCK_LAST(l);
188	}
189
190	/* And exit.. */
191	lwp_exit(l);
192	panic("kthread_exit");
193}
194
195/*
196 * Wait for a kthread to exit, as pthread_join().
197 */
198int
199kthread_join(lwp_t *l)
200{
201	bool exited = false;
202
203	KASSERT((l->l_flag & LW_SYSTEM) != 0);
204	KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
205
206	/*
207	 * - Ask the kthread to write to `exited'.
208	 * - After this, touching l is forbidden -- it may be freed.
209	 * - Wait until the kthread has written to `exited'.
210	 */
211	mutex_enter(&kthread_lock);
212	KASSERT(l->l_private == NULL);
213	l->l_private = &exited;
214	cv_broadcast(&kthread_cv);
215	while (!exited) {
216		cv_wait(&kthread_cv, &kthread_lock);
217	}
218	mutex_exit(&kthread_lock);
219
220	return 0;
221}
222
223/*
224 * kthread_fpu_enter()
225 *
226 *	Allow the current lwp, which must be a kthread, to use the FPU.
227 *	Return a cookie that must be passed to kthread_fpu_exit when
228 *	done.  Must be used only in thread context.  Recursive -- you
229 *	can call kthread_fpu_enter several times in a row as long as
230 *	you pass the cookies in reverse order to kthread_fpu_exit.
231 */
232int
233kthread_fpu_enter(void)
234{
235	struct lwp *l = curlwp;
236	int s;
237
238	KASSERTMSG(!cpu_intr_p(),
239	    "%s is not allowed in interrupt context", __func__);
240	KASSERTMSG(!cpu_softintr_p(),
241	    "%s is not allowed in interrupt context", __func__);
242
243	/*
244	 * Remember whether this thread already had FPU access, and
245	 * mark this thread as having FPU access.
246	 */
247	lwp_lock(l);
248	KASSERTMSG(l->l_flag & LW_SYSTEM,
249	    "%s is allowed only in kthreads", __func__);
250	s = l->l_flag & LW_SYSTEM_FPU;
251	l->l_flag |= LW_SYSTEM_FPU;
252	lwp_unlock(l);
253
254	/* Take MD steps to enable the FPU if necessary.  */
255	if (s == 0)
256		kthread_fpu_enter_md();
257
258	return s;
259}
260
261/*
262 * kthread_fpu_exit(s)
263 *
264 *	Restore the current lwp's FPU access to what it was before the
265 *	matching call to kthread_fpu_enter() that returned s.  Must be
266 *	used only in thread context.
267 */
268void
269kthread_fpu_exit(int s)
270{
271	struct lwp *l = curlwp;
272
273	KASSERT(s == (s & LW_SYSTEM_FPU));
274	KASSERTMSG(!cpu_intr_p(),
275	    "%s is not allowed in interrupt context", __func__);
276	KASSERTMSG(!cpu_softintr_p(),
277	    "%s is not allowed in interrupt context", __func__);
278
279	lwp_lock(l);
280	KASSERTMSG(l->l_flag & LW_SYSTEM,
281	    "%s is allowed only in kthreads", __func__);
282	KASSERT(l->l_flag & LW_SYSTEM_FPU);
283	l->l_flag ^= s ^ LW_SYSTEM_FPU;
284	lwp_unlock(l);
285
286	/* Take MD steps to zero and disable the FPU if necessary.  */
287	if (s == 0)
288		kthread_fpu_exit_md();
289}
290