Deleted Added
full compact
kern_condvar.c (73925) kern_condvar.c (74912)
1/*-
2 * Copyright (c) 2000 Jake Burkholder <jake@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2000 Jake Burkholder <jake@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_condvar.c 73925 2001-03-07 03:26:39Z jhb $
26 * $FreeBSD: head/sys/kern/kern_condvar.c 74912 2001-03-28 09:03:24Z jhb $
27 */
28
29#include "opt_ktrace.h"
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/proc.h>
34#include <sys/kernel.h>
35#include <sys/ktr.h>
36#include <sys/condvar.h>
37#include <sys/mutex.h>
38#include <sys/signalvar.h>
39#include <sys/resourcevar.h>
40#ifdef KTRACE
41#include <sys/uio.h>
42#include <sys/ktrace.h>
43#endif
44
45/*
46 * Common sanity checks for cv_wait* functions.
47 */
48#define CV_ASSERT(cvp, mp, p) do { \
49 KASSERT((p) != NULL, ("%s: curproc NULL", __FUNCTION__)); \
50 KASSERT((p)->p_stat == SRUN, ("%s: not SRUN", __FUNCTION__)); \
51 KASSERT((cvp) != NULL, ("%s: cvp NULL", __FUNCTION__)); \
52 KASSERT((mp) != NULL, ("%s: mp NULL", __FUNCTION__)); \
53 mtx_assert((mp), MA_OWNED | MA_NOTRECURSED); \
54} while (0)
55
56#ifdef CV_DEBUG
57#define CV_WAIT_VALIDATE(cvp, mp) do { \
58 if (TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
59 /* Only waiter. */ \
60 (cvp)->cv_mtx = (mp); \
61 } else { \
62 /* \
63 * Other waiter; assert that we're using the \
64 * same mutex. \
65 */ \
66 KASSERT((cvp)->cv_mtx == (mp), \
67 ("%s: Multiple mutexes", __FUNCTION__)); \
68 } \
69} while (0)
70#define CV_SIGNAL_VALIDATE(cvp) do { \
71 if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
72 KASSERT(mtx_owned((cvp)->cv_mtx), \
73 ("%s: Mutex not owned", __FUNCTION__)); \
74 } \
75} while (0)
76#else
77#define CV_WAIT_VALIDATE(cvp, mp)
78#define CV_SIGNAL_VALIDATE(cvp)
79#endif
80
81static void cv_timedwait_end(void *arg);
82
83/*
84 * Initialize a condition variable. Must be called before use.
85 */
86void
87cv_init(struct cv *cvp, const char *desc)
88{
89
90 TAILQ_INIT(&cvp->cv_waitq);
91 cvp->cv_mtx = NULL;
92 cvp->cv_description = desc;
93}
94
95/*
96 * Destroy a condition variable. The condition variable must be re-initialized
97 * in order to be re-used.
98 */
99void
100cv_destroy(struct cv *cvp)
101{
102
103 KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __FUNCTION__));
104}
105
106/*
107 * Common code for cv_wait* functions. All require sched_lock.
108 */
109
110/*
111 * Switch context.
112 */
113static __inline void
114cv_switch(struct proc *p)
115{
116
117 p->p_stat = SSLEEP;
118 p->p_stats->p_ru.ru_nvcsw++;
119 mi_switch();
120 CTR3(KTR_PROC, "cv_switch: resume proc %p (pid %d, %s)", p, p->p_pid,
121 p->p_comm);
122}
123
124/*
125 * Switch context, catching signals.
126 */
127static __inline int
128cv_switch_catch(struct proc *p)
129{
130 int sig;
131
132 /*
133 * We put ourselves on the sleep queue and start our timeout before
134 * calling CURSIG, as we could stop there, and a wakeup or a SIGCONT (or
135 * both) could occur while we were stopped. A SIGCONT would cause us to
136 * be marked as SSLEEP without resuming us, thus we must be ready for
137 * sleep when CURSIG is called. If the wakeup happens while we're
138 * stopped, p->p_wchan will be 0 upon return from CURSIG.
139 */
140 p->p_sflag |= PS_SINTR;
141 mtx_unlock_spin(&sched_lock);
142 sig = CURSIG(p);
143 mtx_lock_spin(&sched_lock);
144 if (sig != 0) {
145 if (p->p_wchan != NULL)
146 cv_waitq_remove(p);
147 p->p_stat = SRUN;
148 } else if (p->p_wchan != NULL) {
149 cv_switch(p);
150 }
151 p->p_sflag &= ~PS_SINTR;
152
153 return sig;
154}
155
156/*
157 * Add a process to the wait queue of a condition variable.
158 */
159static __inline void
160cv_waitq_add(struct cv *cvp, struct proc *p)
161{
162
163 /*
164 * Process may be sitting on a slpque if asleep() was called, remove it
165 * before re-adding.
166 */
167 if (p->p_wchan != NULL)
168 unsleep(p);
169
170 p->p_sflag |= PS_CVWAITQ;
171 p->p_wchan = cvp;
172 p->p_wmesg = cvp->cv_description;
173 p->p_slptime = 0;
174 p->p_pri.pri_native = p->p_pri.pri_level;
175 CTR3(KTR_PROC, "cv_waitq_add: proc %p (pid %d, %s)", p, p->p_pid,
176 p->p_comm);
177 TAILQ_INSERT_TAIL(&cvp->cv_waitq, p, p_slpq);
178}
179
180/*
181 * Wait on a condition variable. The current process is placed on the condition
182 * variable's wait queue and suspended. A cv_signal or cv_broadcast on the same
183 * condition variable will resume the process. The mutex is released before
184 * sleeping and will be held on return. It is recommended that the mutex be
185 * held when cv_signal or cv_broadcast are called.
186 */
187void
188cv_wait(struct cv *cvp, struct mtx *mp)
189{
190 struct proc *p;
191 WITNESS_SAVE_DECL(mp);
192
193 p = CURPROC;
194#ifdef KTRACE
195 if (p && KTRPOINT(p, KTR_CSW))
196 ktrcsw(p->p_tracep, 1, 0);
197#endif
198 CV_ASSERT(cvp, mp, p);
199 WITNESS_SLEEP(0, mp);
27 */
28
29#include "opt_ktrace.h"
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/proc.h>
34#include <sys/kernel.h>
35#include <sys/ktr.h>
36#include <sys/condvar.h>
37#include <sys/mutex.h>
38#include <sys/signalvar.h>
39#include <sys/resourcevar.h>
40#ifdef KTRACE
41#include <sys/uio.h>
42#include <sys/ktrace.h>
43#endif
44
45/*
46 * Common sanity checks for cv_wait* functions.
47 */
48#define CV_ASSERT(cvp, mp, p) do { \
49 KASSERT((p) != NULL, ("%s: curproc NULL", __FUNCTION__)); \
50 KASSERT((p)->p_stat == SRUN, ("%s: not SRUN", __FUNCTION__)); \
51 KASSERT((cvp) != NULL, ("%s: cvp NULL", __FUNCTION__)); \
52 KASSERT((mp) != NULL, ("%s: mp NULL", __FUNCTION__)); \
53 mtx_assert((mp), MA_OWNED | MA_NOTRECURSED); \
54} while (0)
55
56#ifdef CV_DEBUG
57#define CV_WAIT_VALIDATE(cvp, mp) do { \
58 if (TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
59 /* Only waiter. */ \
60 (cvp)->cv_mtx = (mp); \
61 } else { \
62 /* \
63 * Other waiter; assert that we're using the \
64 * same mutex. \
65 */ \
66 KASSERT((cvp)->cv_mtx == (mp), \
67 ("%s: Multiple mutexes", __FUNCTION__)); \
68 } \
69} while (0)
70#define CV_SIGNAL_VALIDATE(cvp) do { \
71 if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
72 KASSERT(mtx_owned((cvp)->cv_mtx), \
73 ("%s: Mutex not owned", __FUNCTION__)); \
74 } \
75} while (0)
76#else
77#define CV_WAIT_VALIDATE(cvp, mp)
78#define CV_SIGNAL_VALIDATE(cvp)
79#endif
80
81static void cv_timedwait_end(void *arg);
82
83/*
84 * Initialize a condition variable. Must be called before use.
85 */
86void
87cv_init(struct cv *cvp, const char *desc)
88{
89
90 TAILQ_INIT(&cvp->cv_waitq);
91 cvp->cv_mtx = NULL;
92 cvp->cv_description = desc;
93}
94
95/*
96 * Destroy a condition variable. The condition variable must be re-initialized
97 * in order to be re-used.
98 */
99void
100cv_destroy(struct cv *cvp)
101{
102
103 KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __FUNCTION__));
104}
105
106/*
107 * Common code for cv_wait* functions. All require sched_lock.
108 */
109
110/*
111 * Switch context.
112 */
113static __inline void
114cv_switch(struct proc *p)
115{
116
117 p->p_stat = SSLEEP;
118 p->p_stats->p_ru.ru_nvcsw++;
119 mi_switch();
120 CTR3(KTR_PROC, "cv_switch: resume proc %p (pid %d, %s)", p, p->p_pid,
121 p->p_comm);
122}
123
124/*
125 * Switch context, catching signals.
126 */
127static __inline int
128cv_switch_catch(struct proc *p)
129{
130 int sig;
131
132 /*
133 * We put ourselves on the sleep queue and start our timeout before
134 * calling CURSIG, as we could stop there, and a wakeup or a SIGCONT (or
135 * both) could occur while we were stopped. A SIGCONT would cause us to
136 * be marked as SSLEEP without resuming us, thus we must be ready for
137 * sleep when CURSIG is called. If the wakeup happens while we're
138 * stopped, p->p_wchan will be 0 upon return from CURSIG.
139 */
140 p->p_sflag |= PS_SINTR;
141 mtx_unlock_spin(&sched_lock);
142 sig = CURSIG(p);
143 mtx_lock_spin(&sched_lock);
144 if (sig != 0) {
145 if (p->p_wchan != NULL)
146 cv_waitq_remove(p);
147 p->p_stat = SRUN;
148 } else if (p->p_wchan != NULL) {
149 cv_switch(p);
150 }
151 p->p_sflag &= ~PS_SINTR;
152
153 return sig;
154}
155
156/*
157 * Add a process to the wait queue of a condition variable.
158 */
159static __inline void
160cv_waitq_add(struct cv *cvp, struct proc *p)
161{
162
163 /*
164 * Process may be sitting on a slpque if asleep() was called, remove it
165 * before re-adding.
166 */
167 if (p->p_wchan != NULL)
168 unsleep(p);
169
170 p->p_sflag |= PS_CVWAITQ;
171 p->p_wchan = cvp;
172 p->p_wmesg = cvp->cv_description;
173 p->p_slptime = 0;
174 p->p_pri.pri_native = p->p_pri.pri_level;
175 CTR3(KTR_PROC, "cv_waitq_add: proc %p (pid %d, %s)", p, p->p_pid,
176 p->p_comm);
177 TAILQ_INSERT_TAIL(&cvp->cv_waitq, p, p_slpq);
178}
179
180/*
181 * Wait on a condition variable. The current process is placed on the condition
182 * variable's wait queue and suspended. A cv_signal or cv_broadcast on the same
183 * condition variable will resume the process. The mutex is released before
184 * sleeping and will be held on return. It is recommended that the mutex be
185 * held when cv_signal or cv_broadcast are called.
186 */
187void
188cv_wait(struct cv *cvp, struct mtx *mp)
189{
190 struct proc *p;
191 WITNESS_SAVE_DECL(mp);
192
193 p = CURPROC;
194#ifdef KTRACE
195 if (p && KTRPOINT(p, KTR_CSW))
196 ktrcsw(p->p_tracep, 1, 0);
197#endif
198 CV_ASSERT(cvp, mp, p);
199 WITNESS_SLEEP(0, mp);
200 WITNESS_SAVE(mp, mp);
200 WITNESS_SAVE(&mp->mtx_object, mp);
201
202 mtx_lock_spin(&sched_lock);
203 if (cold || panicstr) {
204 /*
205 * After a panic, or during autoconfiguration, just give
206 * interrupts a chance, then just return; don't run any other
207 * procs or panic below, in case this is the idle process and
208 * already asleep.
209 */
210 mtx_unlock_spin(&sched_lock);
211 return;
212 }
213 CV_WAIT_VALIDATE(cvp, mp);
214
215 DROP_GIANT_NOSWITCH();
216 mtx_unlock_flags(mp, MTX_NOSWITCH);
217
218 cv_waitq_add(cvp, p);
219 cv_switch(p);
220
221 mtx_unlock_spin(&sched_lock);
222#ifdef KTRACE
223 if (KTRPOINT(p, KTR_CSW))
224 ktrcsw(p->p_tracep, 0, 0);
225#endif
226 PICKUP_GIANT();
227 mtx_lock(mp);
201
202 mtx_lock_spin(&sched_lock);
203 if (cold || panicstr) {
204 /*
205 * After a panic, or during autoconfiguration, just give
206 * interrupts a chance, then just return; don't run any other
207 * procs or panic below, in case this is the idle process and
208 * already asleep.
209 */
210 mtx_unlock_spin(&sched_lock);
211 return;
212 }
213 CV_WAIT_VALIDATE(cvp, mp);
214
215 DROP_GIANT_NOSWITCH();
216 mtx_unlock_flags(mp, MTX_NOSWITCH);
217
218 cv_waitq_add(cvp, p);
219 cv_switch(p);
220
221 mtx_unlock_spin(&sched_lock);
222#ifdef KTRACE
223 if (KTRPOINT(p, KTR_CSW))
224 ktrcsw(p->p_tracep, 0, 0);
225#endif
226 PICKUP_GIANT();
227 mtx_lock(mp);
228 WITNESS_RESTORE(mp, mp);
228 WITNESS_RESTORE(&mp->mtx_object, mp);
229}
230
231/*
232 * Wait on a condition variable, allowing interruption by signals. Return 0 if
233 * the process was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
234 * a signal was caught. If ERESTART is returned the system call should be
235 * restarted if possible.
236 */
237int
238cv_wait_sig(struct cv *cvp, struct mtx *mp)
239{
240 struct proc *p;
241 int rval;
242 int sig;
243 WITNESS_SAVE_DECL(mp);
244
245 p = CURPROC;
246 rval = 0;
247#ifdef KTRACE
248 if (p && KTRPOINT(p, KTR_CSW))
249 ktrcsw(p->p_tracep, 1, 0);
250#endif
251 CV_ASSERT(cvp, mp, p);
252 WITNESS_SLEEP(0, mp);
229}
230
231/*
232 * Wait on a condition variable, allowing interruption by signals. Return 0 if
233 * the process was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
234 * a signal was caught. If ERESTART is returned the system call should be
235 * restarted if possible.
236 */
237int
238cv_wait_sig(struct cv *cvp, struct mtx *mp)
239{
240 struct proc *p;
241 int rval;
242 int sig;
243 WITNESS_SAVE_DECL(mp);
244
245 p = CURPROC;
246 rval = 0;
247#ifdef KTRACE
248 if (p && KTRPOINT(p, KTR_CSW))
249 ktrcsw(p->p_tracep, 1, 0);
250#endif
251 CV_ASSERT(cvp, mp, p);
252 WITNESS_SLEEP(0, mp);
253 WITNESS_SAVE(mp, mp);
253 WITNESS_SAVE(&mp->mtx_object, mp);
254
255 mtx_lock_spin(&sched_lock);
256 if (cold || panicstr) {
257 /*
258 * After a panic, or during autoconfiguration, just give
259 * interrupts a chance, then just return; don't run any other
260 * procs or panic below, in case this is the idle process and
261 * already asleep.
262 */
263 mtx_unlock_spin(&sched_lock);
264 return 0;
265 }
266 CV_WAIT_VALIDATE(cvp, mp);
267
268 DROP_GIANT_NOSWITCH();
269 mtx_unlock_flags(mp, MTX_NOSWITCH);
270
271 cv_waitq_add(cvp, p);
272 sig = cv_switch_catch(p);
273
274 mtx_unlock_spin(&sched_lock);
275 PICKUP_GIANT();
276
277 if (sig == 0)
278 sig = CURSIG(p);
279 if (sig != 0) {
280 PROC_LOCK(p);
281 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
282 rval = EINTR;
283 else
284 rval = ERESTART;
285 PROC_UNLOCK(p);
286 }
287
288#ifdef KTRACE
289 if (KTRPOINT(p, KTR_CSW))
290 ktrcsw(p->p_tracep, 0, 0);
291#endif
292 mtx_lock(mp);
254
255 mtx_lock_spin(&sched_lock);
256 if (cold || panicstr) {
257 /*
258 * After a panic, or during autoconfiguration, just give
259 * interrupts a chance, then just return; don't run any other
260 * procs or panic below, in case this is the idle process and
261 * already asleep.
262 */
263 mtx_unlock_spin(&sched_lock);
264 return 0;
265 }
266 CV_WAIT_VALIDATE(cvp, mp);
267
268 DROP_GIANT_NOSWITCH();
269 mtx_unlock_flags(mp, MTX_NOSWITCH);
270
271 cv_waitq_add(cvp, p);
272 sig = cv_switch_catch(p);
273
274 mtx_unlock_spin(&sched_lock);
275 PICKUP_GIANT();
276
277 if (sig == 0)
278 sig = CURSIG(p);
279 if (sig != 0) {
280 PROC_LOCK(p);
281 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
282 rval = EINTR;
283 else
284 rval = ERESTART;
285 PROC_UNLOCK(p);
286 }
287
288#ifdef KTRACE
289 if (KTRPOINT(p, KTR_CSW))
290 ktrcsw(p->p_tracep, 0, 0);
291#endif
292 mtx_lock(mp);
293 WITNESS_RESTORE(mp, mp);
293 WITNESS_RESTORE(&mp->mtx_object, mp);
294
295 return (rval);
296}
297
298/*
299 * Wait on a condition variable for at most timo/hz seconds. Returns 0 if the
300 * process was resumed by cv_signal or cv_broadcast, EWOULDBLOCK if the timeout
301 * expires.
302 */
303int
304cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
305{
306 struct proc *p;
307 int rval;
308 WITNESS_SAVE_DECL(mp);
309
310 p = CURPROC;
311 rval = 0;
312#ifdef KTRACE
313 if (p && KTRPOINT(p, KTR_CSW))
314 ktrcsw(p->p_tracep, 1, 0);
315#endif
316 CV_ASSERT(cvp, mp, p);
317 WITNESS_SLEEP(0, mp);
294
295 return (rval);
296}
297
298/*
299 * Wait on a condition variable for at most timo/hz seconds. Returns 0 if the
300 * process was resumed by cv_signal or cv_broadcast, EWOULDBLOCK if the timeout
301 * expires.
302 */
303int
304cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
305{
306 struct proc *p;
307 int rval;
308 WITNESS_SAVE_DECL(mp);
309
310 p = CURPROC;
311 rval = 0;
312#ifdef KTRACE
313 if (p && KTRPOINT(p, KTR_CSW))
314 ktrcsw(p->p_tracep, 1, 0);
315#endif
316 CV_ASSERT(cvp, mp, p);
317 WITNESS_SLEEP(0, mp);
318 WITNESS_SAVE(mp, mp);
318 WITNESS_SAVE(&mp->mtx_object, mp);
319
320 mtx_lock_spin(&sched_lock);
321 if (cold || panicstr) {
322 /*
323 * After a panic, or during autoconfiguration, just give
324 * interrupts a chance, then just return; don't run any other
325 * procs or panic below, in case this is the idle process and
326 * already asleep.
327 */
328 mtx_unlock_spin(&sched_lock);
329 return 0;
330 }
331 CV_WAIT_VALIDATE(cvp, mp);
332
333 DROP_GIANT_NOSWITCH();
334 mtx_unlock_flags(mp, MTX_NOSWITCH);
335
336 cv_waitq_add(cvp, p);
337 callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
338 cv_switch(p);
339
340 if (p->p_sflag & PS_TIMEOUT) {
341 p->p_sflag &= ~PS_TIMEOUT;
342 rval = EWOULDBLOCK;
343 } else
344 callout_stop(&p->p_slpcallout);
345
346 mtx_unlock_spin(&sched_lock);
347#ifdef KTRACE
348 if (KTRPOINT(p, KTR_CSW))
349 ktrcsw(p->p_tracep, 0, 0);
350#endif
351 PICKUP_GIANT();
352 mtx_lock(mp);
319
320 mtx_lock_spin(&sched_lock);
321 if (cold || panicstr) {
322 /*
323 * After a panic, or during autoconfiguration, just give
324 * interrupts a chance, then just return; don't run any other
325 * procs or panic below, in case this is the idle process and
326 * already asleep.
327 */
328 mtx_unlock_spin(&sched_lock);
329 return 0;
330 }
331 CV_WAIT_VALIDATE(cvp, mp);
332
333 DROP_GIANT_NOSWITCH();
334 mtx_unlock_flags(mp, MTX_NOSWITCH);
335
336 cv_waitq_add(cvp, p);
337 callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
338 cv_switch(p);
339
340 if (p->p_sflag & PS_TIMEOUT) {
341 p->p_sflag &= ~PS_TIMEOUT;
342 rval = EWOULDBLOCK;
343 } else
344 callout_stop(&p->p_slpcallout);
345
346 mtx_unlock_spin(&sched_lock);
347#ifdef KTRACE
348 if (KTRPOINT(p, KTR_CSW))
349 ktrcsw(p->p_tracep, 0, 0);
350#endif
351 PICKUP_GIANT();
352 mtx_lock(mp);
353 WITNESS_RESTORE(mp, mp);
353 WITNESS_RESTORE(&mp->mtx_object, mp);
354
355 return (rval);
356}
357
358/*
359 * Wait on a condition variable for at most timo/hz seconds, allowing
360 * interruption by signals. Returns 0 if the process was resumed by cv_signal
361 * or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
362 * a signal was caught.
363 */
364int
365cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
366{
367 struct proc *p;
368 int rval;
369 int sig;
370 WITNESS_SAVE_DECL(mp);
371
372 p = CURPROC;
373 rval = 0;
374#ifdef KTRACE
375 if (p && KTRPOINT(p, KTR_CSW))
376 ktrcsw(p->p_tracep, 1, 0);
377#endif
378 CV_ASSERT(cvp, mp, p);
379 WITNESS_SLEEP(0, mp);
354
355 return (rval);
356}
357
358/*
359 * Wait on a condition variable for at most timo/hz seconds, allowing
360 * interruption by signals. Returns 0 if the process was resumed by cv_signal
361 * or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
362 * a signal was caught.
363 */
364int
365cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
366{
367 struct proc *p;
368 int rval;
369 int sig;
370 WITNESS_SAVE_DECL(mp);
371
372 p = CURPROC;
373 rval = 0;
374#ifdef KTRACE
375 if (p && KTRPOINT(p, KTR_CSW))
376 ktrcsw(p->p_tracep, 1, 0);
377#endif
378 CV_ASSERT(cvp, mp, p);
379 WITNESS_SLEEP(0, mp);
380 WITNESS_SAVE(mp, mp);
380 WITNESS_SAVE(&mp->mtx_object, mp);
381
382 mtx_lock_spin(&sched_lock);
383 if (cold || panicstr) {
384 /*
385 * After a panic, or during autoconfiguration, just give
386 * interrupts a chance, then just return; don't run any other
387 * procs or panic below, in case this is the idle process and
388 * already asleep.
389 */
390 mtx_unlock_spin(&sched_lock);
391 return 0;
392 }
393 CV_WAIT_VALIDATE(cvp, mp);
394
395 DROP_GIANT_NOSWITCH();
396 mtx_unlock_flags(mp, MTX_NOSWITCH);
397
398 cv_waitq_add(cvp, p);
399 callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
400 sig = cv_switch_catch(p);
401
402 if (p->p_sflag & PS_TIMEOUT) {
403 p->p_sflag &= ~PS_TIMEOUT;
404 rval = EWOULDBLOCK;
405 } else
406 callout_stop(&p->p_slpcallout);
407
408 mtx_unlock_spin(&sched_lock);
409 PICKUP_GIANT();
410
411 if (sig == 0)
412 sig = CURSIG(p);
413 if (sig != 0) {
414 PROC_LOCK(p);
415 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
416 rval = EINTR;
417 else
418 rval = ERESTART;
419 PROC_UNLOCK(p);
420 }
421
422#ifdef KTRACE
423 if (KTRPOINT(p, KTR_CSW))
424 ktrcsw(p->p_tracep, 0, 0);
425#endif
426 mtx_lock(mp);
381
382 mtx_lock_spin(&sched_lock);
383 if (cold || panicstr) {
384 /*
385 * After a panic, or during autoconfiguration, just give
386 * interrupts a chance, then just return; don't run any other
387 * procs or panic below, in case this is the idle process and
388 * already asleep.
389 */
390 mtx_unlock_spin(&sched_lock);
391 return 0;
392 }
393 CV_WAIT_VALIDATE(cvp, mp);
394
395 DROP_GIANT_NOSWITCH();
396 mtx_unlock_flags(mp, MTX_NOSWITCH);
397
398 cv_waitq_add(cvp, p);
399 callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
400 sig = cv_switch_catch(p);
401
402 if (p->p_sflag & PS_TIMEOUT) {
403 p->p_sflag &= ~PS_TIMEOUT;
404 rval = EWOULDBLOCK;
405 } else
406 callout_stop(&p->p_slpcallout);
407
408 mtx_unlock_spin(&sched_lock);
409 PICKUP_GIANT();
410
411 if (sig == 0)
412 sig = CURSIG(p);
413 if (sig != 0) {
414 PROC_LOCK(p);
415 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
416 rval = EINTR;
417 else
418 rval = ERESTART;
419 PROC_UNLOCK(p);
420 }
421
422#ifdef KTRACE
423 if (KTRPOINT(p, KTR_CSW))
424 ktrcsw(p->p_tracep, 0, 0);
425#endif
426 mtx_lock(mp);
427 WITNESS_RESTORE(mp, mp);
427 WITNESS_RESTORE(&mp->mtx_object, mp);
428
429 return (rval);
430}
431
432/*
433 * Common code for signal and broadcast. Assumes waitq is not empty. Must be
434 * called with sched_lock held.
435 */
436static __inline void
437cv_wakeup(struct cv *cvp)
438{
439 struct proc *p;
440
441 mtx_assert(&sched_lock, MA_OWNED);
442 p = TAILQ_FIRST(&cvp->cv_waitq);
443 KASSERT(p->p_wchan == cvp, ("%s: bogus wchan", __FUNCTION__));
444 KASSERT(p->p_sflag & PS_CVWAITQ, ("%s: not on waitq", __FUNCTION__));
445 TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
446 p->p_sflag &= ~PS_CVWAITQ;
447 p->p_wchan = 0;
448 if (p->p_stat == SSLEEP) {
449 /* OPTIMIZED EXPANSION OF setrunnable(p); */
450 CTR3(KTR_PROC, "cv_signal: proc %p (pid %d, %s)",
451 p, p->p_pid, p->p_comm);
452 if (p->p_slptime > 1)
453 updatepri(p);
454 p->p_slptime = 0;
455 p->p_stat = SRUN;
456 if (p->p_sflag & PS_INMEM) {
457 setrunqueue(p);
458 maybe_resched(p);
459 } else {
460 p->p_sflag |= PS_SWAPINREQ;
461 wakeup(&proc0);
462 }
463 /* END INLINE EXPANSION */
464 }
465}
466
467/*
468 * Signal a condition variable, wakes up one waiting process. Will also wakeup
469 * the swapper if the process is not in memory, so that it can bring the
470 * sleeping process in. Note that this may also result in additional processes
471 * being made runnable. Should be called with the same mutex as was passed to
472 * cv_wait held.
473 */
474void
475cv_signal(struct cv *cvp)
476{
477
478 KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
479 mtx_lock_spin(&sched_lock);
480 if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
481 CV_SIGNAL_VALIDATE(cvp);
482 cv_wakeup(cvp);
483 }
484 mtx_unlock_spin(&sched_lock);
485}
486
487/*
488 * Broadcast a signal to a condition variable. Wakes up all waiting processes.
489 * Should be called with the same mutex as was passed to cv_wait held.
490 */
491void
492cv_broadcast(struct cv *cvp)
493{
494
495 KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
496 mtx_lock_spin(&sched_lock);
497 CV_SIGNAL_VALIDATE(cvp);
498 while (!TAILQ_EMPTY(&cvp->cv_waitq))
499 cv_wakeup(cvp);
500 mtx_unlock_spin(&sched_lock);
501}
502
503/*
504 * Remove a process from the wait queue of its condition variable. This may be
505 * called externally.
506 */
507void
508cv_waitq_remove(struct proc *p)
509{
510 struct cv *cvp;
511
512 mtx_lock_spin(&sched_lock);
513 if ((cvp = p->p_wchan) != NULL && p->p_sflag & PS_CVWAITQ) {
514 TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
515 p->p_sflag &= ~PS_CVWAITQ;
516 p->p_wchan = NULL;
517 }
518 mtx_unlock_spin(&sched_lock);
519}
520
521/*
522 * Timeout function for cv_timedwait. Put the process on the runqueue and set
523 * its timeout flag.
524 */
525static void
526cv_timedwait_end(void *arg)
527{
528 struct proc *p;
529
530 p = arg;
531 CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid,
532 p->p_comm);
533 mtx_lock_spin(&sched_lock);
534 if (p->p_wchan != NULL) {
535 if (p->p_stat == SSLEEP)
536 setrunnable(p);
537 else
538 cv_waitq_remove(p);
539 p->p_sflag |= PS_TIMEOUT;
540 }
541 mtx_unlock_spin(&sched_lock);
542}
428
429 return (rval);
430}
431
432/*
433 * Common code for signal and broadcast. Assumes waitq is not empty. Must be
434 * called with sched_lock held.
435 */
436static __inline void
437cv_wakeup(struct cv *cvp)
438{
439 struct proc *p;
440
441 mtx_assert(&sched_lock, MA_OWNED);
442 p = TAILQ_FIRST(&cvp->cv_waitq);
443 KASSERT(p->p_wchan == cvp, ("%s: bogus wchan", __FUNCTION__));
444 KASSERT(p->p_sflag & PS_CVWAITQ, ("%s: not on waitq", __FUNCTION__));
445 TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
446 p->p_sflag &= ~PS_CVWAITQ;
447 p->p_wchan = 0;
448 if (p->p_stat == SSLEEP) {
449 /* OPTIMIZED EXPANSION OF setrunnable(p); */
450 CTR3(KTR_PROC, "cv_signal: proc %p (pid %d, %s)",
451 p, p->p_pid, p->p_comm);
452 if (p->p_slptime > 1)
453 updatepri(p);
454 p->p_slptime = 0;
455 p->p_stat = SRUN;
456 if (p->p_sflag & PS_INMEM) {
457 setrunqueue(p);
458 maybe_resched(p);
459 } else {
460 p->p_sflag |= PS_SWAPINREQ;
461 wakeup(&proc0);
462 }
463 /* END INLINE EXPANSION */
464 }
465}
466
467/*
468 * Signal a condition variable, wakes up one waiting process. Will also wakeup
469 * the swapper if the process is not in memory, so that it can bring the
470 * sleeping process in. Note that this may also result in additional processes
471 * being made runnable. Should be called with the same mutex as was passed to
472 * cv_wait held.
473 */
474void
475cv_signal(struct cv *cvp)
476{
477
478 KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
479 mtx_lock_spin(&sched_lock);
480 if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
481 CV_SIGNAL_VALIDATE(cvp);
482 cv_wakeup(cvp);
483 }
484 mtx_unlock_spin(&sched_lock);
485}
486
487/*
488 * Broadcast a signal to a condition variable. Wakes up all waiting processes.
489 * Should be called with the same mutex as was passed to cv_wait held.
490 */
491void
492cv_broadcast(struct cv *cvp)
493{
494
495 KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
496 mtx_lock_spin(&sched_lock);
497 CV_SIGNAL_VALIDATE(cvp);
498 while (!TAILQ_EMPTY(&cvp->cv_waitq))
499 cv_wakeup(cvp);
500 mtx_unlock_spin(&sched_lock);
501}
502
503/*
504 * Remove a process from the wait queue of its condition variable. This may be
505 * called externally.
506 */
507void
508cv_waitq_remove(struct proc *p)
509{
510 struct cv *cvp;
511
512 mtx_lock_spin(&sched_lock);
513 if ((cvp = p->p_wchan) != NULL && p->p_sflag & PS_CVWAITQ) {
514 TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
515 p->p_sflag &= ~PS_CVWAITQ;
516 p->p_wchan = NULL;
517 }
518 mtx_unlock_spin(&sched_lock);
519}
520
521/*
522 * Timeout function for cv_timedwait. Put the process on the runqueue and set
523 * its timeout flag.
524 */
525static void
526cv_timedwait_end(void *arg)
527{
528 struct proc *p;
529
530 p = arg;
531 CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid,
532 p->p_comm);
533 mtx_lock_spin(&sched_lock);
534 if (p->p_wchan != NULL) {
535 if (p->p_stat == SSLEEP)
536 setrunnable(p);
537 else
538 cv_waitq_remove(p);
539 p->p_sflag |= PS_TIMEOUT;
540 }
541 mtx_unlock_spin(&sched_lock);
542}