kern_kthread.c revision 1.46
1/*	$NetBSD: kern_kthread.c,v 1.46 2020/08/01 02:04:55 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.46 2020/08/01 02:04:55 riastradh Exp $");
35
36#include <sys/param.h>
37#include <sys/cpu.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/kthread.h>
41#include <sys/mutex.h>
42#include <sys/sched.h>
43#include <sys/kmem.h>
44
45#include <uvm/uvm_extern.h>
46
47static lwp_t *		kthread_jtarget;
48static kmutex_t		kthread_lock;
49static kcondvar_t	kthread_cv;
50
51void
52kthread_sysinit(void)
53{
54
55	mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
56	cv_init(&kthread_cv, "kthrwait");
57	kthread_jtarget = NULL;
58}
59
60/*
61 * kthread_create: create a kernel thread, that is, system-only LWP.
62 */
63int
64kthread_create(pri_t pri, int flag, struct cpu_info *ci,
65    void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
66{
67	lwp_t *l;
68	vaddr_t uaddr;
69	int error, lc;
70	va_list ap;
71
72	KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
73
74	uaddr = uvm_uarea_system_alloc(
75	   (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
76	if (uaddr == 0) {
77		return ENOMEM;
78	}
79	if ((flag & KTHREAD_TS) != 0) {
80		lc = SCHED_OTHER;
81	} else {
82		lc = SCHED_RR;
83	}
84
85	error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
86	    0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
87	if (error) {
88		uvm_uarea_system_free(uaddr);
89		return error;
90	}
91	if (fmt != NULL) {
92		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
93		va_start(ap, fmt);
94		vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
95		va_end(ap);
96	}
97
98	/*
99	 * Set parameters.
100	 */
101	if (pri == PRI_NONE) {
102		if ((flag & KTHREAD_TS) != 0) {
103			/* Maximum user priority level. */
104			pri = MAXPRI_USER;
105		} else {
106			/* Minimum kernel priority level. */
107			pri = PRI_KTHREAD;
108		}
109	}
110	mutex_enter(proc0.p_lock);
111	lwp_lock(l);
112	lwp_changepri(l, pri);
113	if (ci != NULL) {
114		if (ci != l->l_cpu) {
115			lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
116			lwp_lock(l);
117		}
118		l->l_pflag |= LP_BOUND;
119		l->l_cpu = ci;
120	}
121
122	if ((flag & KTHREAD_MUSTJOIN) != 0) {
123		KASSERT(lp != NULL);
124		l->l_pflag |= LP_MUSTJOIN;
125	}
126	if ((flag & KTHREAD_INTR) != 0) {
127		l->l_pflag |= LP_INTR;
128	}
129	if ((flag & KTHREAD_MPSAFE) == 0) {
130		l->l_pflag &= ~LP_MPSAFE;
131	}
132
133	/*
134	 * Set the new LWP running, unless the caller has requested
135	 * otherwise.
136	 */
137	KASSERT(l->l_stat == LSIDL);
138	if ((flag & KTHREAD_IDLE) == 0) {
139		setrunnable(l);
140		/* LWP now unlocked */
141	} else {
142		lwp_unlock(l);
143	}
144	mutex_exit(proc0.p_lock);
145
146	/* All done! */
147	if (lp != NULL) {
148		*lp = l;
149	}
150	return 0;
151}
152
153/*
154 * Cause a kernel thread to exit.  Assumes the exiting thread is the
155 * current context.
156 */
157void
158kthread_exit(int ecode)
159{
160	const char *name;
161	lwp_t *l = curlwp;
162
163	/* We can't do much with the exit code, so just report it. */
164	if (ecode != 0) {
165		if ((name = l->l_name) == NULL)
166			name = "unnamed";
167		printf("WARNING: kthread `%s' (%d) exits with status %d\n",
168		    name, l->l_lid, ecode);
169	}
170
171	/* Barrier for joining. */
172	if (l->l_pflag & LP_MUSTJOIN) {
173		mutex_enter(&kthread_lock);
174		while (kthread_jtarget != l) {
175			cv_wait(&kthread_cv, &kthread_lock);
176		}
177		kthread_jtarget = NULL;
178		cv_broadcast(&kthread_cv);
179		mutex_exit(&kthread_lock);
180	}
181
182	/* If the kernel lock is held, we need to drop it now. */
183	if ((l->l_pflag & LP_MPSAFE) == 0) {
184		KERNEL_UNLOCK_LAST(l);
185	}
186
187	/* And exit.. */
188	lwp_exit(l);
189	panic("kthread_exit");
190}
191
192/*
193 * Wait for a kthread to exit, as pthread_join().
194 */
195int
196kthread_join(lwp_t *l)
197{
198
199	KASSERT((l->l_flag & LW_SYSTEM) != 0);
200	KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
201
202	/*
203	 * - Wait if some other thread has occupied the target.
204	 * - Specify our kthread as a target and notify it.
205	 * - Wait for the target kthread to notify us.
206	 */
207	mutex_enter(&kthread_lock);
208	while (kthread_jtarget) {
209		cv_wait(&kthread_cv, &kthread_lock);
210	}
211	kthread_jtarget = l;
212	cv_broadcast(&kthread_cv);
213	while (kthread_jtarget == l) {
214		cv_wait(&kthread_cv, &kthread_lock);
215	}
216	mutex_exit(&kthread_lock);
217
218	return 0;
219}
220
221/*
222 * kthread_fpu_enter()
223 *
224 *	Allow the current lwp, which must be a kthread, to use the FPU.
225 *	Return a cookie that must be passed to kthread_fpu_exit when
226 *	done.  Must be used only in thread context.  Recursive -- you
227 *	can call kthread_fpu_enter several times in a row as long as
228 *	you pass the cookies in reverse order to kthread_fpu_exit.
229 */
230int
231kthread_fpu_enter(void)
232{
233	struct lwp *l = curlwp;
234	int s;
235
236	KASSERTMSG(!cpu_intr_p(),
237	    "%s is not allowed in interrupt context", __func__);
238	KASSERTMSG(!cpu_softintr_p(),
239	    "%s is not allowed in interrupt context", __func__);
240
241	/*
242	 * Remember whether this thread already had FPU access, and
243	 * mark this thread as having FPU access.
244	 */
245	lwp_lock(l);
246	KASSERTMSG(l->l_flag & LW_SYSTEM,
247	    "%s is allowed only in kthreads", __func__);
248	s = l->l_flag & LW_SYSTEM_FPU;
249	l->l_flag |= LW_SYSTEM_FPU;
250	lwp_unlock(l);
251
252	/* Take MD steps to enable the FPU if necessary.  */
253	if (s == 0)
254		kthread_fpu_enter_md();
255
256	return s;
257}
258
259/*
260 * kthread_fpu_exit(s)
261 *
262 *	Restore the current lwp's FPU access to what it was before the
263 *	matching call to kthread_fpu_enter() that returned s.  Must be
264 *	used only in thread context.
265 */
266void
267kthread_fpu_exit(int s)
268{
269	struct lwp *l = curlwp;
270
271	KASSERT(s == (s & LW_SYSTEM_FPU));
272	KASSERTMSG(!cpu_intr_p(),
273	    "%s is not allowed in interrupt context", __func__);
274	KASSERTMSG(!cpu_softintr_p(),
275	    "%s is not allowed in interrupt context", __func__);
276
277	lwp_lock(l);
278	KASSERTMSG(l->l_flag & LW_SYSTEM,
279	    "%s is allowed only in kthreads", __func__);
280	KASSERT(l->l_flag & LW_SYSTEM_FPU);
281	l->l_flag ^= s ^ LW_SYSTEM_FPU;
282	lwp_unlock(l);
283
284	/* Take MD steps to zero and disable the FPU if necessary.  */
285	if (s == 0)
286		kthread_fpu_exit_md();
287}
288