kern_condvar.c revision 116182
1168404Spjd/*-
2168404Spjd * Copyright (c) 2000 Jake Burkholder <jake@freebsd.org>.
3168404Spjd * All rights reserved.
4168404Spjd *
5168404Spjd * Redistribution and use in source and binary forms, with or without
6168404Spjd * modification, are permitted provided that the following conditions
7168404Spjd * are met:
8168404Spjd * 1. Redistributions of source code must retain the above copyright
9168404Spjd *    notice, this list of conditions and the following disclaimer.
10168404Spjd * 2. Redistributions in binary form must reproduce the above copyright
11168404Spjd *    notice, this list of conditions and the following disclaimer in the
12168404Spjd *    documentation and/or other materials provided with the distribution.
13168404Spjd *
14168404Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15168404Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16168404Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17168404Spjd * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18168404Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19168404Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20168404Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21168404Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22219089Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23229565Smm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24229565Smm * SUCH DAMAGE.
25249643Smm */
26168404Spjd
27168404Spjd#include <sys/cdefs.h>
28219089Spjd__FBSDID("$FreeBSD: head/sys/kern/kern_condvar.c 116182 2003-06-11 00:56:59Z obrien $");
29219089Spjd
30168404Spjd#include "opt_ktrace.h"
31168404Spjd
32168404Spjd#include <sys/param.h>
33168404Spjd#include <sys/systm.h>
34168404Spjd#include <sys/lock.h>
35168404Spjd#include <sys/mutex.h>
36168404Spjd#include <sys/proc.h>
37168404Spjd#include <sys/kernel.h>
38168404Spjd#include <sys/ktr.h>
39168404Spjd#include <sys/condvar.h>
40168404Spjd#include <sys/sched.h>
41168404Spjd#include <sys/signalvar.h>
42168404Spjd#include <sys/resourcevar.h>
43168404Spjd#ifdef KTRACE
44168404Spjd#include <sys/uio.h>
45168404Spjd#include <sys/ktrace.h>
46168404Spjd#endif
47168404Spjd
48168404Spjd/*
49185029Spjd * Common sanity checks for cv_wait* functions.
50168404Spjd */
51168404Spjd#define	CV_ASSERT(cvp, mp, td) do {					\
52219089Spjd	KASSERT((td) != NULL, ("%s: curthread NULL", __func__));	\
53243674Smm	KASSERT(TD_IS_RUNNING(td), ("%s: not TDS_RUNNING", __func__));	\
54168404Spjd	KASSERT((cvp) != NULL, ("%s: cvp NULL", __func__));		\
55168962Spjd	KASSERT((mp) != NULL, ("%s: mp NULL", __func__));		\
56168404Spjd	mtx_assert((mp), MA_OWNED | MA_NOTRECURSED);			\
57168404Spjd} while (0)
58168404Spjd
59185029Spjd#ifdef INVARIANTS
60168962Spjd#define	CV_WAIT_VALIDATE(cvp, mp) do {					\
61168404Spjd	if (TAILQ_EMPTY(&(cvp)->cv_waitq)) {				\
62185029Spjd		/* Only waiter. */					\
63185029Spjd		(cvp)->cv_mtx = (mp);					\
64232728Smm	} else {							\
65219089Spjd		/*							\
66168404Spjd		 * Other waiter; assert that we're using the		\
67168404Spjd		 * same mutex.						\
68168404Spjd		 */							\
69185029Spjd		KASSERT((cvp)->cv_mtx == (mp),				\
70168404Spjd		    ("%s: Multiple mutexes", __func__));		\
71185029Spjd	}								\
72219089Spjd} while (0)
73185029Spjd
74185029Spjd#define	CV_SIGNAL_VALIDATE(cvp) do {					\
75185029Spjd	if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) {				\
76219089Spjd		KASSERT(mtx_owned((cvp)->cv_mtx),			\
77168713Spjd		    ("%s: Mutex not owned", __func__));			\
78168404Spjd	}								\
79168404Spjd} while (0)
80168404Spjd
81185029Spjd#else
82185029Spjd#define	CV_WAIT_VALIDATE(cvp, mp)
83185029Spjd#define	CV_SIGNAL_VALIDATE(cvp)
84185029Spjd#endif
85185029Spjd
86185029Spjdstatic void cv_timedwait_end(void *arg);
87185029Spjd
88185029Spjd/*
89185029Spjd * Initialize a condition variable.  Must be called before use.
90185029Spjd */
91185029Spjdvoid
92191990Sattiliocv_init(struct cv *cvp, const char *desc)
93191990Sattilio{
94191990Sattilio
95191990Sattilio	TAILQ_INIT(&cvp->cv_waitq);
96168404Spjd	cvp->cv_mtx = NULL;
97191990Sattilio	cvp->cv_description = desc;
98196982Spjd}
99196982Spjd
100222167Srmacklem/*
101168404Spjd * Destroy a condition variable.  The condition variable must be re-initialized
102168404Spjd * in order to be re-used.
103168404Spjd */
104168404Spjdvoid
105168404Spjdcv_destroy(struct cv *cvp)
106168404Spjd{
107168404Spjd
108168404Spjd	KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __func__));
109168404Spjd}
110168404Spjd
111196982Spjd/*
112168404Spjd * Common code for cv_wait* functions.  All require sched_lock.
113168404Spjd */
114168404Spjd
115185029Spjd/*
116168404Spjd * Switch context.
117168404Spjd */
118168404Spjdstatic __inline void
119168404Spjdcv_switch(struct thread *td)
120168404Spjd{
121168404Spjd	TD_SET_SLEEPING(td);
122168404Spjd	td->td_proc->p_stats->p_ru.ru_nvcsw++;
123168404Spjd	mi_switch();
124168404Spjd	CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
125168404Spjd	    td->td_proc->p_pid, td->td_proc->p_comm);
126191990Sattilio}
127168404Spjd
128168404Spjd/*
129168404Spjd * Switch context, catching signals.
130168404Spjd */
131168404Spjdstatic __inline int
132168404Spjdcv_switch_catch(struct thread *td)
133168404Spjd{
134168404Spjd	struct proc *p;
135168404Spjd	int sig;
136168404Spjd
137168404Spjd	/*
138168404Spjd	 * We put ourselves on the sleep queue and start our timeout before
139168404Spjd	 * calling cursig, as we could stop there, and a wakeup or a SIGCONT (or
140168404Spjd	 * both) could occur while we were stopped.  A SIGCONT would cause us to
141209962Smm	 * be marked as TDS_SLP without resuming us, thus we must be ready for
142168404Spjd	 * sleep when cursig is called.  If the wakeup happens while we're
143168404Spjd	 * stopped, td->td_wchan will be 0 upon return from cursig,
144191990Sattilio	 * and TD_ON_SLEEPQ() will return false.
145168404Spjd	 */
146168404Spjd	td->td_flags |= TDF_SINTR;
147168404Spjd	mtx_unlock_spin(&sched_lock);
148168404Spjd	p = td->td_proc;
149209962Smm	PROC_LOCK(p);
150209962Smm	mtx_lock(&p->p_sigacts->ps_mtx);
151209962Smm	sig = cursig(td);
152209962Smm	mtx_unlock(&p->p_sigacts->ps_mtx);
153209962Smm	if (thread_suspend_check(1))
154209962Smm		sig = SIGSTOP;
155209962Smm	mtx_lock_spin(&sched_lock);
156209962Smm	PROC_UNLOCK(p);
157209962Smm	if (sig != 0) {
158209962Smm		if (TD_ON_SLEEPQ(td))
159209962Smm			cv_waitq_remove(td);
160168404Spjd		TD_SET_RUNNING(td);
161219089Spjd	} else if (TD_ON_SLEEPQ(td)) {
162219089Spjd		cv_switch(td);
163168404Spjd	}
164168404Spjd	td->td_flags &= ~TDF_SINTR;
165168404Spjd
166168404Spjd	return sig;
167168404Spjd}
168168404Spjd
169168404Spjd/*
170168404Spjd * Add a thread to the wait queue of a condition variable.
171168404Spjd */
172168404Spjdstatic __inline void
173168404Spjdcv_waitq_add(struct cv *cvp, struct thread *td)
174168404Spjd{
175168404Spjd
176219089Spjd	td->td_flags |= TDF_CVWAITQ;
177219089Spjd	TD_SET_ON_SLEEPQ(td);
178219089Spjd	td->td_wchan = cvp;
179219089Spjd	td->td_wmesg = cvp->cv_description;
180219089Spjd	CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
181219089Spjd	    td->td_proc->p_pid, td->td_proc->p_comm);
182219089Spjd	TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
183219089Spjd	sched_sleep(td, td->td_priority);
184219089Spjd}
185219089Spjd
186219089Spjd/*
187219089Spjd * Wait on a condition variable.  The current thread is placed on the condition
188219089Spjd * variable's wait queue and suspended.  A cv_signal or cv_broadcast on the same
189219089Spjd * condition variable will resume the thread.  The mutex is released before
190219089Spjd * sleeping and will be held on return.  It is recommended that the mutex be
191219089Spjd * held when cv_signal or cv_broadcast are called.
192219089Spjd */
193219089Spjdvoid
194219089Spjdcv_wait(struct cv *cvp, struct mtx *mp)
195219089Spjd{
196219089Spjd	struct thread *td;
197219089Spjd	WITNESS_SAVE_DECL(mp);
198219089Spjd
199219089Spjd	td = curthread;
200219089Spjd#ifdef KTRACE
201219089Spjd	if (KTRPOINT(td, KTR_CSW))
202219089Spjd		ktrcsw(1, 0);
203219089Spjd#endif
204219089Spjd	CV_ASSERT(cvp, mp, td);
205219089Spjd	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
206219089Spjd	    "Waiting on \"%s\"", cvp->cv_description);
207219089Spjd	WITNESS_SAVE(&mp->mtx_object, mp);
208219089Spjd
209219089Spjd	if (cold ) {
210219089Spjd		/*
211219089Spjd		 * During autoconfiguration, just give interrupts
212219089Spjd		 * a chance, then just return.  Don't run any other
213219089Spjd		 * thread or panic below, in case this is the idle
214219089Spjd		 * process and already asleep.
215219089Spjd		 */
216219089Spjd		return;
217219089Spjd	}
218219089Spjd
219219089Spjd	mtx_lock_spin(&sched_lock);
220219089Spjd
221219089Spjd	CV_WAIT_VALIDATE(cvp, mp);
222219089Spjd
223219089Spjd	DROP_GIANT();
224219089Spjd	mtx_unlock(mp);
225219089Spjd
226219089Spjd	cv_waitq_add(cvp, td);
227219089Spjd	cv_switch(td);
228219089Spjd
229219089Spjd	mtx_unlock_spin(&sched_lock);
230168404Spjd#ifdef KTRACE
231168404Spjd	if (KTRPOINT(td, KTR_CSW))
232168404Spjd		ktrcsw(0, 0);
233168404Spjd#endif
234168404Spjd	PICKUP_GIANT();
235168404Spjd	mtx_lock(mp);
236168404Spjd	WITNESS_RESTORE(&mp->mtx_object, mp);
237168404Spjd}
238168404Spjd
239168404Spjd/*
240168404Spjd * Wait on a condition variable, allowing interruption by signals.  Return 0 if
241168404Spjd * the thread was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
242168404Spjd * a signal was caught.  If ERESTART is returned the system call should be
243168404Spjd * restarted if possible.
244168404Spjd */
245168404Spjdint
246168404Spjdcv_wait_sig(struct cv *cvp, struct mtx *mp)
247168404Spjd{
248168404Spjd	struct thread *td;
249168404Spjd	struct proc *p;
250168404Spjd	int rval;
251168404Spjd	int sig;
252168404Spjd	WITNESS_SAVE_DECL(mp);
253168404Spjd
254168404Spjd	td = curthread;
255168404Spjd	p = td->td_proc;
256168404Spjd	rval = 0;
257168404Spjd#ifdef KTRACE
258168404Spjd	if (KTRPOINT(td, KTR_CSW))
259168404Spjd		ktrcsw(1, 0);
260168404Spjd#endif
261168404Spjd	CV_ASSERT(cvp, mp, td);
262168404Spjd	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
263168404Spjd	    "Waiting on \"%s\"", cvp->cv_description);
264168404Spjd	WITNESS_SAVE(&mp->mtx_object, mp);
265168404Spjd
266168404Spjd	if (cold || panicstr) {
267168404Spjd		/*
268168404Spjd		 * After a panic, or during autoconfiguration, just give
269168404Spjd		 * interrupts a chance, then just return; don't run any other
270168404Spjd		 * procs or panic below, in case this is the idle process and
271168404Spjd		 * already asleep.
272168404Spjd		 */
273168404Spjd		return 0;
274168404Spjd	}
275168404Spjd
276168404Spjd	mtx_lock_spin(&sched_lock);
277168404Spjd
278168404Spjd	CV_WAIT_VALIDATE(cvp, mp);
279168404Spjd
280204101Spjd	DROP_GIANT();
281168404Spjd	mtx_unlock(mp);
282168404Spjd
283168404Spjd	cv_waitq_add(cvp, td);
284168404Spjd	sig = cv_switch_catch(td);
285168404Spjd
286168404Spjd	mtx_unlock_spin(&sched_lock);
287168404Spjd
288168404Spjd	PROC_LOCK(p);
289168404Spjd	mtx_lock(&p->p_sigacts->ps_mtx);
290168404Spjd	if (sig == 0)
291168404Spjd		sig = cursig(td);	/* XXXKSE */
292168404Spjd	if (sig != 0) {
293168404Spjd		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
294168404Spjd			rval = EINTR;
295168404Spjd		else
296168404Spjd			rval = ERESTART;
297168404Spjd	}
298168404Spjd	mtx_unlock(&p->p_sigacts->ps_mtx);
299168404Spjd	if (p->p_flag & P_WEXIT)
300168404Spjd		rval = EINTR;
301168404Spjd	PROC_UNLOCK(p);
302168404Spjd
303168404Spjd#ifdef KTRACE
304168404Spjd	if (KTRPOINT(td, KTR_CSW))
305168404Spjd		ktrcsw(0, 0);
306168404Spjd#endif
307168404Spjd	PICKUP_GIANT();
308168404Spjd	mtx_lock(mp);
309168404Spjd	WITNESS_RESTORE(&mp->mtx_object, mp);
310168404Spjd
311168404Spjd	return (rval);
312168404Spjd}
313168404Spjd
314168404Spjd/*
315168404Spjd * Wait on a condition variable for at most timo/hz seconds.  Returns 0 if the
316168404Spjd * process was resumed by cv_signal or cv_broadcast, EWOULDBLOCK if the timeout
317168404Spjd * expires.
318168404Spjd */
319168404Spjdint
320168404Spjdcv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
321168404Spjd{
322168404Spjd	struct thread *td;
323168404Spjd	int rval;
324168404Spjd	WITNESS_SAVE_DECL(mp);
325168404Spjd
326168404Spjd	td = curthread;
327168404Spjd	rval = 0;
328168404Spjd#ifdef KTRACE
329168404Spjd	if (KTRPOINT(td, KTR_CSW))
330168404Spjd		ktrcsw(1, 0);
331168404Spjd#endif
332168404Spjd	CV_ASSERT(cvp, mp, td);
333185029Spjd	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
334185029Spjd	    "Waiting on \"%s\"", cvp->cv_description);
335185029Spjd	WITNESS_SAVE(&mp->mtx_object, mp);
336185029Spjd
337185029Spjd	if (cold || panicstr) {
338185029Spjd		/*
339185029Spjd		 * After a panic, or during autoconfiguration, just give
340185029Spjd		 * interrupts a chance, then just return; don't run any other
341168404Spjd		 * thread or panic below, in case this is the idle process and
342185029Spjd		 * already asleep.
343185029Spjd		 */
344185029Spjd		return 0;
345185029Spjd	}
346185029Spjd
347185029Spjd	mtx_lock_spin(&sched_lock);
348185029Spjd
349185029Spjd	CV_WAIT_VALIDATE(cvp, mp);
350185029Spjd
351185029Spjd	DROP_GIANT();
352185029Spjd	mtx_unlock(mp);
353185029Spjd
354185029Spjd	cv_waitq_add(cvp, td);
355168404Spjd	callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
356168404Spjd	cv_switch(td);
357168404Spjd
358168404Spjd	if (td->td_flags & TDF_TIMEOUT) {
359168404Spjd		td->td_flags &= ~TDF_TIMEOUT;
360168404Spjd		rval = EWOULDBLOCK;
361168404Spjd	} else if (td->td_flags & TDF_TIMOFAIL)
362168404Spjd		td->td_flags &= ~TDF_TIMOFAIL;
363185029Spjd	else if (callout_stop(&td->td_slpcallout) == 0) {
364185029Spjd		/*
365185029Spjd		 * Work around race with cv_timedwait_end similar to that
366185029Spjd		 * between msleep and endtsleep.
367185029Spjd		 * Go back to sleep.
368185029Spjd		 */
369185029Spjd		TD_SET_SLEEPING(td);
370185029Spjd		td->td_proc->p_stats->p_ru.ru_nivcsw++;
371224174Smm		mi_switch();
372224174Smm		td->td_flags &= ~TDF_TIMOFAIL;
373224174Smm	}
374224174Smm
375224174Smm	mtx_unlock_spin(&sched_lock);
376224174Smm#ifdef KTRACE
377224174Smm	if (KTRPOINT(td, KTR_CSW))
378224174Smm		ktrcsw(0, 0);
379168404Spjd#endif
380168404Spjd	PICKUP_GIANT();
381168404Spjd	mtx_lock(mp);
382168404Spjd	WITNESS_RESTORE(&mp->mtx_object, mp);
383168404Spjd
384168404Spjd	return (rval);
385168404Spjd}
386168404Spjd
387168404Spjd/*
388168404Spjd * Wait on a condition variable for at most timo/hz seconds, allowing
389168404Spjd * interruption by signals.  Returns 0 if the thread was resumed by cv_signal
390168404Spjd * or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
391168404Spjd * a signal was caught.
392185029Spjd */
393248369Smmint
394248369Smmcv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
395248369Smm{
396248369Smm	struct thread *td;
397248369Smm	struct proc *p;
398248369Smm	int rval;
399249643Smm	int sig;
400248369Smm	WITNESS_SAVE_DECL(mp);
401248369Smm
402249643Smm	td = curthread;
403248369Smm	p = td->td_proc;
404248369Smm	rval = 0;
405248369Smm#ifdef KTRACE
406248369Smm	if (KTRPOINT(td, KTR_CSW))
407168404Spjd		ktrcsw(1, 0);
408168404Spjd#endif
409168404Spjd	CV_ASSERT(cvp, mp, td);
410168404Spjd	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
411168404Spjd	    "Waiting on \"%s\"", cvp->cv_description);
412168404Spjd	WITNESS_SAVE(&mp->mtx_object, mp);
413168404Spjd
414168404Spjd	if (cold || panicstr) {
415196965Spjd		/*
416196965Spjd		 * After a panic, or during autoconfiguration, just give
417196965Spjd		 * interrupts a chance, then just return; don't run any other
418196965Spjd		 * thread or panic below, in case this is the idle process and
419196965Spjd		 * already asleep.
420196965Spjd		 */
421196965Spjd		return 0;
422168404Spjd	}
423168404Spjd
424168404Spjd	mtx_lock_spin(&sched_lock);
425168404Spjd
426168404Spjd	CV_WAIT_VALIDATE(cvp, mp);
427219089Spjd
428219089Spjd	DROP_GIANT();
429168404Spjd	mtx_unlock(mp);
430168404Spjd
431168404Spjd	cv_waitq_add(cvp, td);
432168404Spjd	callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
433168404Spjd	sig = cv_switch_catch(td);
434168404Spjd
435168404Spjd	if (td->td_flags & TDF_TIMEOUT) {
436168404Spjd		td->td_flags &= ~TDF_TIMEOUT;
437168404Spjd		rval = EWOULDBLOCK;
438168404Spjd	} else if (td->td_flags & TDF_TIMOFAIL)
439168404Spjd		td->td_flags &= ~TDF_TIMOFAIL;
440168404Spjd	else if (callout_stop(&td->td_slpcallout) == 0) {
441168404Spjd		/*
442168404Spjd		 * Work around race with cv_timedwait_end similar to that
443168404Spjd		 * between msleep and endtsleep.
444168404Spjd		 * Go back to sleep.
445168404Spjd		 */
446168404Spjd		TD_SET_SLEEPING(td);
447168404Spjd		td->td_proc->p_stats->p_ru.ru_nivcsw++;
448168404Spjd		mi_switch();
449168404Spjd		td->td_flags &= ~TDF_TIMOFAIL;
450168404Spjd	}
451168404Spjd	mtx_unlock_spin(&sched_lock);
452168404Spjd
453168404Spjd	PROC_LOCK(p);
454168404Spjd	mtx_lock(&p->p_sigacts->ps_mtx);
455168404Spjd	if (sig == 0)
456168404Spjd		sig = cursig(td);
457168404Spjd	if (sig != 0) {
458168404Spjd		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
459168404Spjd			rval = EINTR;
460168404Spjd		else
461185029Spjd			rval = ERESTART;
462185029Spjd	}
463185029Spjd	mtx_unlock(&p->p_sigacts->ps_mtx);
464185029Spjd	if (p->p_flag & P_WEXIT)
465185029Spjd		rval = EINTR;
466185029Spjd	PROC_UNLOCK(p);
467185029Spjd
468168404Spjd#ifdef KTRACE
469168404Spjd	if (KTRPOINT(td, KTR_CSW))
470185029Spjd		ktrcsw(0, 0);
471185029Spjd#endif
472185029Spjd	PICKUP_GIANT();
473185029Spjd	mtx_lock(mp);
474185029Spjd	WITNESS_RESTORE(&mp->mtx_object, mp);
475185029Spjd
476185029Spjd	return (rval);
477185029Spjd}
478185029Spjd
479185029Spjd/*
480185029Spjd * Common code for signal and broadcast.  Assumes waitq is not empty.  Must be
481185029Spjd * called with sched_lock held.
482185029Spjd */
483185029Spjdstatic __inline void
484185029Spjdcv_wakeup(struct cv *cvp)
485185029Spjd{
486185029Spjd	struct thread *td;
487185029Spjd
488185029Spjd	mtx_assert(&sched_lock, MA_OWNED);
489185029Spjd	td = TAILQ_FIRST(&cvp->cv_waitq);
490185029Spjd	KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __func__));
491168404Spjd	KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __func__));
492168404Spjd	cv_waitq_remove(td);
493168404Spjd	TD_CLR_SLEEPING(td);
494168404Spjd	setrunnable(td);
495168404Spjd}
496168404Spjd
497168404Spjd/*
498249643Smm * Signal a condition variable, wakes up one waiting thread.  Will also wakeup
499249643Smm * the swapper if the process is not in memory, so that it can bring the
500249643Smm * sleeping process in.  Note that this may also result in additional threads
501168404Spjd * being made runnable.  Should be called with the same mutex as was passed to
502249643Smm * cv_wait held.
503168404Spjd */
504249643Smmvoid
505168404Spjdcv_signal(struct cv *cvp)
506249643Smm{
507249643Smm
508168404Spjd	KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
509249643Smm	mtx_lock_spin(&sched_lock);
510249643Smm	if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
511168404Spjd		CV_SIGNAL_VALIDATE(cvp);
512249643Smm		cv_wakeup(cvp);
513168404Spjd	}
514249643Smm	mtx_unlock_spin(&sched_lock);
515168404Spjd}
516249643Smm
517224174Smm/*
518249643Smm * Broadcast a signal to a condition variable.  Wakes up all waiting threads.
519185029Spjd * Should be called with the same mutex as was passed to cv_wait held.
520249643Smm */
521249643Smmvoid
522249643Smmcv_broadcast(struct cv *cvp)
523249643Smm{
524249643Smm
525168404Spjd	KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
526168404Spjd	mtx_lock_spin(&sched_lock);
527168404Spjd	CV_SIGNAL_VALIDATE(cvp);
528168404Spjd	while (!TAILQ_EMPTY(&cvp->cv_waitq))
529168404Spjd		cv_wakeup(cvp);
530168404Spjd	mtx_unlock_spin(&sched_lock);
531168404Spjd}
532168404Spjd
533168404Spjd/*
534168404Spjd * Remove a thread from the wait queue of its condition variable.  This may be
535168404Spjd * called externally.
536168404Spjd */
537168404Spjdvoid
538168404Spjdcv_waitq_remove(struct thread *td)
539185029Spjd{
540185029Spjd	struct cv *cvp;
541168404Spjd
542185029Spjd	mtx_assert(&sched_lock, MA_OWNED);
543185029Spjd	if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
544168404Spjd		TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
545168404Spjd		td->td_flags &= ~TDF_CVWAITQ;
546168404Spjd		td->td_wmesg = NULL;
547168404Spjd		TD_CLR_ON_SLEEPQ(td);
548168404Spjd	}
549168404Spjd}
550168404Spjd
551168404Spjd/*
552249643Smm * Timeout function for cv_timedwait.  Put the thread on the runqueue and set
553249643Smm * its timeout flag.
554249643Smm */
555249643Smmstatic void
556249643Smmcv_timedwait_end(void *arg)
557249643Smm{
558249643Smm	struct thread *td;
559249643Smm
560249643Smm	td = arg;
561249643Smm	CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)",
562249643Smm	    td, td->td_proc->p_pid, td->td_proc->p_comm);
563249643Smm	mtx_lock_spin(&sched_lock);
564249643Smm	if (TD_ON_SLEEPQ(td)) {
565249643Smm		cv_waitq_remove(td);
566249643Smm		td->td_flags |= TDF_TIMEOUT;
567249643Smm	} else {
568249643Smm		td->td_flags |= TDF_TIMOFAIL;
569249643Smm	}
570249643Smm	TD_CLR_SLEEPING(td);
571249643Smm	setrunnable(td);
572249643Smm	mtx_unlock_spin(&sched_lock);
573249643Smm}
574249643Smm
575249643Smm/*
576168404Spjd * For now only abort interruptable waits.
577168404Spjd * The others will have to either complete on their own or have a timeout.
578168404Spjd */
579219089Spjdvoid
580219089Spjdcv_abort(struct thread *td)
581219089Spjd{
582209962Smm
583219089Spjd	CTR3(KTR_PROC, "cv_abort: thread %p (pid %d, %s)", td,
584219089Spjd	    td->td_proc->p_pid, td->td_proc->p_comm);
585219089Spjd	mtx_lock_spin(&sched_lock);
586219089Spjd	if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
587249643Smm		if (TD_ON_SLEEPQ(td)) {
588209962Smm			cv_waitq_remove(td);
589219089Spjd		}
590219089Spjd		TD_CLR_SLEEPING(td);
591219089Spjd		setrunnable(td);
592219089Spjd	}
593219089Spjd	mtx_unlock_spin(&sched_lock);
594219089Spjd}
595219089Spjd
596249643Smm