thr_cond.c revision 139023
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_cond.c 139023 2004-12-18 18:07:37Z deischen $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <pthread.h>
38#include "thr_private.h"
39
40#define	THR_IN_CONDQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
41#define	THR_CONDQ_SET(thr)	(thr)->sflags |= THR_FLAGS_IN_SYNCQ
42#define	THR_CONDQ_CLEAR(thr)	(thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
43
44/*
45 * Prototypes
46 */
47static inline struct pthread	*cond_queue_deq(pthread_cond_t);
48static inline void		cond_queue_remove(pthread_cond_t, pthread_t);
49static inline void		cond_queue_enq(pthread_cond_t, pthread_t);
50static void			cond_wait_backout(void *);
51static inline void		check_continuation(struct pthread *,
52				    struct pthread_cond *, pthread_mutex_t *);
53
54/*
55 * Double underscore versions are cancellation points.  Single underscore
56 * versions are not and are provided for libc internal usage (which
57 * shouldn't introduce cancellation points).
58 */
59__weak_reference(__pthread_cond_wait, pthread_cond_wait);
60__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
61
62__weak_reference(_pthread_cond_init, pthread_cond_init);
63__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
64__weak_reference(_pthread_cond_signal, pthread_cond_signal);
65__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
66
67
68int
69_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
70{
71	enum pthread_cond_type type;
72	pthread_cond_t	pcond;
73	int		flags;
74	int             rval = 0;
75
76	if (cond == NULL)
77		rval = EINVAL;
78	else {
79		/*
80		 * Check if a pointer to a condition variable attribute
81		 * structure was passed by the caller:
82		 */
83		if (cond_attr != NULL && *cond_attr != NULL) {
84			/* Default to a fast condition variable: */
85			type = (*cond_attr)->c_type;
86			flags = (*cond_attr)->c_flags;
87		} else {
88			/* Default to a fast condition variable: */
89			type = COND_TYPE_FAST;
90			flags = 0;
91		}
92
93		/* Process according to condition variable type: */
94		switch (type) {
95		/* Fast condition variable: */
96		case COND_TYPE_FAST:
97			/* Nothing to do here. */
98			break;
99
100		/* Trap invalid condition variable types: */
101		default:
102			/* Return an invalid argument error: */
103			rval = EINVAL;
104			break;
105		}
106
107		/* Check for no errors: */
108		if (rval == 0) {
109			if ((pcond = (pthread_cond_t)
110			    malloc(sizeof(struct pthread_cond))) == NULL) {
111				rval = ENOMEM;
112			} else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
113			    _thr_lock_wait, _thr_lock_wakeup) != 0) {
114				free(pcond);
115				rval = ENOMEM;
116			} else {
117				/*
118				 * Initialise the condition variable
119				 * structure:
120				 */
121				TAILQ_INIT(&pcond->c_queue);
122				pcond->c_flags = COND_FLAGS_INITED;
123				pcond->c_type = type;
124				pcond->c_mutex = NULL;
125				pcond->c_seqno = 0;
126				*cond = pcond;
127			}
128		}
129	}
130	/* Return the completion status: */
131	return (rval);
132}
133
134int
135_pthread_cond_destroy(pthread_cond_t *cond)
136{
137	struct pthread_cond	*cv;
138	struct pthread		*curthread = _get_curthread();
139	int			rval = 0;
140
141	if (cond == NULL || *cond == NULL)
142		rval = EINVAL;
143	else {
144		/* Lock the condition variable structure: */
145		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
146
147		/*
148		 * NULL the caller's pointer now that the condition
149		 * variable has been destroyed:
150		 */
151		cv = *cond;
152		*cond = NULL;
153
154		/* Unlock the condition variable structure: */
155		THR_LOCK_RELEASE(curthread, &cv->c_lock);
156
157		/* Free the cond lock structure: */
158		_lock_destroy(&cv->c_lock);
159
160		/*
161		 * Free the memory allocated for the condition
162		 * variable structure:
163		 */
164		free(cv);
165
166	}
167	/* Return the completion status: */
168	return (rval);
169}
170
171int
172_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
173{
174	struct pthread	*curthread = _get_curthread();
175	int	rval = 0;
176	int	done = 0;
177	int	mutex_locked = 1;
178	int	seqno;
179
180	if (cond == NULL)
181		return (EINVAL);
182
183	/*
184	 * If the condition variable is statically initialized,
185	 * perform the dynamic initialization:
186	 */
187	if (*cond == NULL &&
188	    (rval = pthread_cond_init(cond, NULL)) != 0)
189		return (rval);
190
191	if (!_kse_isthreaded())
192		_kse_setthreaded(1);
193
194	/*
195	 * Enter a loop waiting for a condition signal or broadcast
196	 * to wake up this thread.  A loop is needed in case the waiting
197	 * thread is interrupted by a signal to execute a signal handler.
198	 * It is not (currently) possible to remain in the waiting queue
199	 * while running a handler.  Instead, the thread is interrupted
200	 * and backed out of the waiting queue prior to executing the
201	 * signal handler.
202	 */
203
204	/* Lock the condition variable structure: */
205	THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
206	seqno = (*cond)->c_seqno;
207	do {
208		/*
209		 * If the condvar was statically allocated, properly
210		 * initialize the tail queue.
211		 */
212		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
213			TAILQ_INIT(&(*cond)->c_queue);
214			(*cond)->c_flags |= COND_FLAGS_INITED;
215		}
216
217		/* Process according to condition variable type: */
218		switch ((*cond)->c_type) {
219		/* Fast condition variable: */
220		case COND_TYPE_FAST:
221			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
222			    ((*cond)->c_mutex != *mutex))) {
223				/* Return invalid argument error: */
224				rval = EINVAL;
225			} else {
226				/* Reset the timeout and interrupted flags: */
227				curthread->timeout = 0;
228				curthread->interrupted = 0;
229
230				/*
231				 * Queue the running thread for the condition
232				 * variable:
233				 */
234				cond_queue_enq(*cond, curthread);
235
236				/* Wait forever: */
237				curthread->wakeup_time.tv_sec = -1;
238
239				/* Unlock the mutex: */
240				if (mutex_locked &&
241				    ((rval = _mutex_cv_unlock(mutex)) != 0)) {
242					/*
243					 * Cannot unlock the mutex, so remove
244					 * the running thread from the condition
245					 * variable queue:
246					 */
247					cond_queue_remove(*cond, curthread);
248				}
249				else {
250					/* Remember the mutex: */
251					(*cond)->c_mutex = *mutex;
252
253					/*
254					 * Don't unlock the mutex the next
255					 * time through the loop (if the
256					 * thread has to be requeued after
257					 * handling a signal).
258					 */
259					mutex_locked = 0;
260
261					/*
262					 * This thread is active and is in a
263					 * critical region (holding the cv
264					 * lock); we should be able to safely
265					 * set the state.
266					 */
267					THR_SCHED_LOCK(curthread, curthread);
268					THR_SET_STATE(curthread, PS_COND_WAIT);
269
270					/* Remember the CV: */
271					curthread->data.cond = *cond;
272					curthread->sigbackout = cond_wait_backout;
273					THR_SCHED_UNLOCK(curthread, curthread);
274
275					/* Unlock the CV structure: */
276					THR_LOCK_RELEASE(curthread,
277					    &(*cond)->c_lock);
278
279					/* Schedule the next thread: */
280					_thr_sched_switch(curthread);
281
282					/*
283					 * XXX - This really isn't a good check
284					 * since there can be more than one
285					 * thread waiting on the CV.  Signals
286					 * sent to threads waiting on mutexes
287					 * or CVs should really be deferred
288					 * until the threads are no longer
289					 * waiting, but POSIX says that signals
290					 * should be sent "as soon as possible".
291					 */
292					done = (seqno != (*cond)->c_seqno);
293					if (done && !THR_IN_CONDQ(curthread)) {
294						/*
295						 * The thread is dequeued, so
296						 * it is safe to clear these.
297						 */
298						curthread->data.cond = NULL;
299						curthread->sigbackout = NULL;
300						check_continuation(curthread,
301						    NULL, mutex);
302						return (_mutex_cv_lock(mutex));
303					}
304
305					/* Relock the CV structure: */
306					THR_LOCK_ACQUIRE(curthread,
307					    &(*cond)->c_lock);
308
309					/*
310					 * Clear these after taking the lock to
311					 * prevent a race condition where a
312					 * signal can arrive before dequeueing
313					 * the thread.
314					 */
315					curthread->data.cond = NULL;
316					curthread->sigbackout = NULL;
317					done = (seqno != (*cond)->c_seqno);
318
319					if (THR_IN_CONDQ(curthread)) {
320						cond_queue_remove(*cond,
321						    curthread);
322
323						/* Check for no more waiters: */
324						if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
325							(*cond)->c_mutex = NULL;
326					}
327				}
328			}
329			break;
330
331		/* Trap invalid condition variable types: */
332		default:
333			/* Return an invalid argument error: */
334			rval = EINVAL;
335			break;
336		}
337
338		check_continuation(curthread, *cond,
339		    mutex_locked ? NULL : mutex);
340	} while ((done == 0) && (rval == 0));
341
342	/* Unlock the condition variable structure: */
343	THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
344
345	if (mutex_locked == 0)
346		_mutex_cv_lock(mutex);
347
348	/* Return the completion status: */
349	return (rval);
350}
351
352__strong_reference(_pthread_cond_wait, _thr_cond_wait);
353
354int
355__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
356{
357	struct pthread *curthread = _get_curthread();
358	int ret;
359
360	_thr_cancel_enter(curthread);
361	ret = _pthread_cond_wait(cond, mutex);
362	_thr_cancel_leave(curthread, 1);
363	return (ret);
364}
365
366int
367_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
368		       const struct timespec * abstime)
369{
370	struct pthread	*curthread = _get_curthread();
371	int	rval = 0;
372	int	done = 0;
373	int	mutex_locked = 1;
374	int	seqno;
375
376	THR_ASSERT(curthread->locklevel == 0,
377	    "cv_timedwait: locklevel is not zero!");
378
379	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
380	    abstime->tv_nsec >= 1000000000)
381		return (EINVAL);
382	/*
383	 * If the condition variable is statically initialized, perform dynamic
384	 * initialization.
385	 */
386	if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
387		return (rval);
388
389	if (!_kse_isthreaded())
390		_kse_setthreaded(1);
391
392	/*
393	 * Enter a loop waiting for a condition signal or broadcast
394	 * to wake up this thread.  A loop is needed in case the waiting
395	 * thread is interrupted by a signal to execute a signal handler.
396	 * It is not (currently) possible to remain in the waiting queue
397	 * while running a handler.  Instead, the thread is interrupted
398	 * and backed out of the waiting queue prior to executing the
399	 * signal handler.
400	 */
401
402	/* Lock the condition variable structure: */
403	THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
404	seqno = (*cond)->c_seqno;
405	do {
406		/*
407		 * If the condvar was statically allocated, properly
408		 * initialize the tail queue.
409		 */
410		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
411			TAILQ_INIT(&(*cond)->c_queue);
412			(*cond)->c_flags |= COND_FLAGS_INITED;
413		}
414
415		/* Process according to condition variable type: */
416		switch ((*cond)->c_type) {
417		/* Fast condition variable: */
418		case COND_TYPE_FAST:
419			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
420			    ((*cond)->c_mutex != *mutex))) {
421				/* Return invalid argument error: */
422				rval = EINVAL;
423			} else {
424				/* Set the wakeup time: */
425				curthread->wakeup_time.tv_sec = abstime->tv_sec;
426				curthread->wakeup_time.tv_nsec =
427				    abstime->tv_nsec;
428
429				/* Reset the timeout and interrupted flags: */
430				curthread->timeout = 0;
431				curthread->interrupted = 0;
432
433				/*
434				 * Queue the running thread for the condition
435				 * variable:
436				 */
437				cond_queue_enq(*cond, curthread);
438
439				/* Unlock the mutex: */
440				if (mutex_locked &&
441				   ((rval = _mutex_cv_unlock(mutex)) != 0)) {
442					/*
443					 * Cannot unlock the mutex; remove the
444					 * running thread from the condition
445					 * variable queue:
446					 */
447					cond_queue_remove(*cond, curthread);
448				} else {
449					/* Remember the mutex: */
450					(*cond)->c_mutex = *mutex;
451
452					/*
453					 * Don't unlock the mutex the next
454					 * time through the loop (if the
455					 * thread has to be requeued after
456					 * handling a signal).
457					 */
458					mutex_locked = 0;
459
460					/*
461					 * This thread is active and is in a
462					 * critical region (holding the cv
463					 * lock); we should be able to safely
464					 * set the state.
465					 */
466					THR_SCHED_LOCK(curthread, curthread);
467					THR_SET_STATE(curthread, PS_COND_WAIT);
468
469					/* Remember the CV: */
470					curthread->data.cond = *cond;
471					curthread->sigbackout = cond_wait_backout;
472					THR_SCHED_UNLOCK(curthread, curthread);
473
474					/* Unlock the CV structure: */
475					THR_LOCK_RELEASE(curthread,
476					    &(*cond)->c_lock);
477
478					/* Schedule the next thread: */
479					_thr_sched_switch(curthread);
480
481					/*
482					 * XXX - This really isn't a good check
483					 * since there can be more than one
484					 * thread waiting on the CV.  Signals
485					 * sent to threads waiting on mutexes
486					 * or CVs should really be deferred
487					 * until the threads are no longer
488					 * waiting, but POSIX says that signals
489					 * should be sent "as soon as possible".
490					 */
491					done = (seqno != (*cond)->c_seqno);
492					if (done && !THR_IN_CONDQ(curthread)) {
493						/*
494						 * The thread is dequeued, so
495						 * it is safe to clear these.
496						 */
497						curthread->data.cond = NULL;
498						curthread->sigbackout = NULL;
499						check_continuation(curthread,
500						    NULL, mutex);
501						return (_mutex_cv_lock(mutex));
502					}
503
504					/* Relock the CV structure: */
505					THR_LOCK_ACQUIRE(curthread,
506					    &(*cond)->c_lock);
507
508					/*
509					 * Clear these after taking the lock to
510					 * prevent a race condition where a
511					 * signal can arrive before dequeueing
512					 * the thread.
513					 */
514					curthread->data.cond = NULL;
515					curthread->sigbackout = NULL;
516
517					done = (seqno != (*cond)->c_seqno);
518
519					if (THR_IN_CONDQ(curthread)) {
520						cond_queue_remove(*cond,
521						    curthread);
522
523						/* Check for no more waiters: */
524						if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
525							(*cond)->c_mutex = NULL;
526					}
527
528					if (curthread->timeout != 0) {
529						/* The wait timedout. */
530						rval = ETIMEDOUT;
531					}
532				}
533			}
534			break;
535
536		/* Trap invalid condition variable types: */
537		default:
538			/* Return an invalid argument error: */
539			rval = EINVAL;
540			break;
541		}
542
543		check_continuation(curthread, *cond,
544		    mutex_locked ? NULL : mutex);
545	} while ((done == 0) && (rval == 0));
546
547	/* Unlock the condition variable structure: */
548	THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
549
550	if (mutex_locked == 0)
551		_mutex_cv_lock(mutex);
552
553	/* Return the completion status: */
554	return (rval);
555}
556
557int
558__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
559		       const struct timespec *abstime)
560{
561	struct pthread *curthread = _get_curthread();
562	int ret;
563
564	_thr_cancel_enter(curthread);
565	ret = _pthread_cond_timedwait(cond, mutex, abstime);
566	_thr_cancel_leave(curthread, 1);
567	return (ret);
568}
569
570
571int
572_pthread_cond_signal(pthread_cond_t * cond)
573{
574	struct pthread	*curthread = _get_curthread();
575	struct pthread	*pthread;
576	struct kse_mailbox *kmbx;
577	int		rval = 0;
578
579	THR_ASSERT(curthread->locklevel == 0,
580	    "cv_timedwait: locklevel is not zero!");
581	if (cond == NULL)
582		rval = EINVAL;
583       /*
584        * If the condition variable is statically initialized, perform dynamic
585        * initialization.
586        */
587	else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
588		/* Lock the condition variable structure: */
589		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
590
591		/* Process according to condition variable type: */
592		switch ((*cond)->c_type) {
593		/* Fast condition variable: */
594		case COND_TYPE_FAST:
595			/* Increment the sequence number: */
596			(*cond)->c_seqno++;
597
598			/*
599			 * Wakeups have to be done with the CV lock held;
600			 * otherwise there is a race condition where the
601			 * thread can timeout, run on another KSE, and enter
602			 * another blocking state (including blocking on a CV).
603			 */
604			if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
605			    != NULL) {
606				THR_SCHED_LOCK(curthread, pthread);
607				cond_queue_remove(*cond, pthread);
608				pthread->sigbackout = NULL;
609				if ((pthread->kseg == curthread->kseg) &&
610				    (pthread->active_priority >
611				    curthread->active_priority))
612					curthread->critical_yield = 1;
613				kmbx = _thr_setrunnable_unlocked(pthread);
614				THR_SCHED_UNLOCK(curthread, pthread);
615				if (kmbx != NULL)
616					kse_wakeup(kmbx);
617			}
618			/* Check for no more waiters: */
619			if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
620				(*cond)->c_mutex = NULL;
621			break;
622
623		/* Trap invalid condition variable types: */
624		default:
625			/* Return an invalid argument error: */
626			rval = EINVAL;
627			break;
628		}
629
630		/* Unlock the condition variable structure: */
631		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
632	}
633
634	/* Return the completion status: */
635	return (rval);
636}
637
638__strong_reference(_pthread_cond_signal, _thr_cond_signal);
639
640int
641_pthread_cond_broadcast(pthread_cond_t * cond)
642{
643	struct pthread	*curthread = _get_curthread();
644	struct pthread	*pthread;
645	struct kse_mailbox *kmbx;
646	int		rval = 0;
647
648	THR_ASSERT(curthread->locklevel == 0,
649	    "cv_timedwait: locklevel is not zero!");
650	if (cond == NULL)
651		rval = EINVAL;
652       /*
653        * If the condition variable is statically initialized, perform dynamic
654        * initialization.
655        */
656	else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
657		/* Lock the condition variable structure: */
658		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
659
660		/* Process according to condition variable type: */
661		switch ((*cond)->c_type) {
662		/* Fast condition variable: */
663		case COND_TYPE_FAST:
664			/* Increment the sequence number: */
665			(*cond)->c_seqno++;
666
667			/*
668			 * Enter a loop to bring all threads off the
669			 * condition queue:
670			 */
671			while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
672			    != NULL) {
673				THR_SCHED_LOCK(curthread, pthread);
674				cond_queue_remove(*cond, pthread);
675				pthread->sigbackout = NULL;
676				if ((pthread->kseg == curthread->kseg) &&
677				    (pthread->active_priority >
678				    curthread->active_priority))
679					curthread->critical_yield = 1;
680				kmbx = _thr_setrunnable_unlocked(pthread);
681				THR_SCHED_UNLOCK(curthread, pthread);
682				if (kmbx != NULL)
683					kse_wakeup(kmbx);
684			}
685
686			/* There are no more waiting threads: */
687			(*cond)->c_mutex = NULL;
688			break;
689
690		/* Trap invalid condition variable types: */
691		default:
692			/* Return an invalid argument error: */
693			rval = EINVAL;
694			break;
695		}
696
697		/* Unlock the condition variable structure: */
698		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
699	}
700
701	/* Return the completion status: */
702	return (rval);
703}
704
705__strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
706
707static inline void
708check_continuation(struct pthread *curthread, struct pthread_cond *cond,
709    pthread_mutex_t *mutex)
710{
711	if ((curthread->interrupted != 0) &&
712	    (curthread->continuation != NULL)) {
713		if (cond != NULL)
714			/* Unlock the condition variable structure: */
715			THR_LOCK_RELEASE(curthread, &cond->c_lock);
716		/*
717		 * Note that even though this thread may have been
718		 * canceled, POSIX requires that the mutex be
719		 * reaquired prior to cancellation.
720		 */
721		if (mutex != NULL)
722			_mutex_cv_lock(mutex);
723		curthread->continuation((void *) curthread);
724		PANIC("continuation returned in pthread_cond_wait.\n");
725	}
726}
727
728static void
729cond_wait_backout(void *arg)
730{
731	struct pthread *curthread = (struct pthread *)arg;
732	pthread_cond_t	cond;
733
734	cond = curthread->data.cond;
735	if (cond != NULL) {
736		/* Lock the condition variable structure: */
737		THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
738
739		/* Process according to condition variable type: */
740		switch (cond->c_type) {
741		/* Fast condition variable: */
742		case COND_TYPE_FAST:
743			cond_queue_remove(cond, curthread);
744
745			/* Check for no more waiters: */
746			if (TAILQ_FIRST(&cond->c_queue) == NULL)
747				cond->c_mutex = NULL;
748			break;
749
750		default:
751			break;
752		}
753
754		/* Unlock the condition variable structure: */
755		THR_LOCK_RELEASE(curthread, &cond->c_lock);
756	}
757	/* No need to call this again. */
758	curthread->sigbackout = NULL;
759}
760
761/*
762 * Dequeue a waiting thread from the head of a condition queue in
763 * descending priority order.
764 */
765static inline struct pthread *
766cond_queue_deq(pthread_cond_t cond)
767{
768	struct pthread	*pthread;
769
770	while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
771		TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
772		THR_CONDQ_CLEAR(pthread);
773		if ((pthread->timeout == 0) && (pthread->interrupted == 0))
774			/*
775			 * Only exit the loop when we find a thread
776			 * that hasn't timed out or been canceled;
777			 * those threads are already running and don't
778			 * need their run state changed.
779			 */
780			break;
781	}
782
783	return (pthread);
784}
785
786/*
787 * Remove a waiting thread from a condition queue in descending priority
788 * order.
789 */
790static inline void
791cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
792{
793	/*
794	 * Because pthread_cond_timedwait() can timeout as well
795	 * as be signaled by another thread, it is necessary to
796	 * guard against removing the thread from the queue if
797	 * it isn't in the queue.
798	 */
799	if (THR_IN_CONDQ(pthread)) {
800		TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
801		THR_CONDQ_CLEAR(pthread);
802	}
803}
804
805/*
806 * Enqueue a waiting thread to a condition queue in descending priority
807 * order.
808 */
809static inline void
810cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
811{
812	struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
813
814	THR_ASSERT(!THR_IN_SYNCQ(pthread),
815	    "cond_queue_enq: thread already queued!");
816
817	/*
818	 * For the common case of all threads having equal priority,
819	 * we perform a quick check against the priority of the thread
820	 * at the tail of the queue.
821	 */
822	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
823		TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
824	else {
825		tid = TAILQ_FIRST(&cond->c_queue);
826		while (pthread->active_priority <= tid->active_priority)
827			tid = TAILQ_NEXT(tid, sqe);
828		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
829	}
830	THR_CONDQ_SET(pthread);
831	pthread->data.cond = cond;
832}
833