thr_cond.c revision 114524
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_cond.c 114524 2003-05-02 11:39:00Z davidxu $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <pthread.h>
38#include "thr_private.h"
39
40#define	THR_IN_CONDQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
41#define	THR_IN_CONDQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
42#define	THR_CONDQ_SET(thr)	(thr)->sflags |= THR_FLAGS_IN_SYNCQ
43#define	THR_CONDQ_CLEAR(thr)	(thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
44
45/*
46 * Prototypes
47 */
48static inline struct pthread	*cond_queue_deq(pthread_cond_t);
49static inline void		cond_queue_remove(pthread_cond_t, pthread_t);
50static inline void		cond_queue_enq(pthread_cond_t, pthread_t);
51
52__weak_reference(_pthread_cond_init, pthread_cond_init);
53__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
54__weak_reference(_pthread_cond_wait, pthread_cond_wait);
55__weak_reference(_pthread_cond_timedwait, pthread_cond_timedwait);
56__weak_reference(_pthread_cond_signal, pthread_cond_signal);
57__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
58
59
60int
61_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
62{
63	enum pthread_cond_type type;
64	pthread_cond_t	pcond;
65	int		flags;
66	int             rval = 0;
67
68	if (cond == NULL)
69		rval = EINVAL;
70	else {
71		/*
72		 * Check if a pointer to a condition variable attribute
73		 * structure was passed by the caller:
74		 */
75		if (cond_attr != NULL && *cond_attr != NULL) {
76			/* Default to a fast condition variable: */
77			type = (*cond_attr)->c_type;
78			flags = (*cond_attr)->c_flags;
79		} else {
80			/* Default to a fast condition variable: */
81			type = COND_TYPE_FAST;
82			flags = 0;
83		}
84
85		/* Process according to condition variable type: */
86		switch (type) {
87		/* Fast condition variable: */
88		case COND_TYPE_FAST:
89			/* Nothing to do here. */
90			break;
91
92		/* Trap invalid condition variable types: */
93		default:
94			/* Return an invalid argument error: */
95			rval = EINVAL;
96			break;
97		}
98
99		/* Check for no errors: */
100		if (rval == 0) {
101			if ((pcond = (pthread_cond_t)
102			    malloc(sizeof(struct pthread_cond))) == NULL) {
103				rval = ENOMEM;
104			} else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
105			    _thr_lock_wait, _thr_lock_wakeup) != 0) {
106				free(pcond);
107				rval = ENOMEM;
108			} else {
109				/*
110				 * Initialise the condition variable
111				 * structure:
112				 */
113				TAILQ_INIT(&pcond->c_queue);
114				pcond->c_flags |= COND_FLAGS_INITED;
115				pcond->c_type = type;
116				pcond->c_mutex = NULL;
117				pcond->c_seqno = 0;
118				*cond = pcond;
119			}
120		}
121	}
122	/* Return the completion status: */
123	return (rval);
124}
125
126int
127_pthread_cond_destroy(pthread_cond_t *cond)
128{
129	struct pthread_cond	*cv;
130	struct pthread		*curthread = _get_curthread();
131	int			rval = 0;
132
133	if (cond == NULL || *cond == NULL)
134		rval = EINVAL;
135	else {
136		/* Lock the condition variable structure: */
137		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
138
139		/*
140		 * NULL the caller's pointer now that the condition
141		 * variable has been destroyed:
142		 */
143		cv = *cond;
144		*cond = NULL;
145
146		/* Unlock the condition variable structure: */
147		THR_LOCK_RELEASE(curthread, &cv->c_lock);
148
149		/*
150		 * Free the memory allocated for the condition
151		 * variable structure:
152		 */
153		free(cv);
154
155	}
156	/* Return the completion status: */
157	return (rval);
158}
159
160int
161_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
162{
163	struct pthread	*curthread = _get_curthread();
164	int	rval = 0;
165	int	done = 0;
166	int	interrupted = 0;
167	int	unlock_mutex = 1;
168	int	seqno;
169
170	_thr_enter_cancellation_point(curthread);
171
172	if (cond == NULL) {
173		_thr_leave_cancellation_point(curthread);
174		return (EINVAL);
175	}
176
177	/*
178	 * If the condition variable is statically initialized,
179	 * perform the dynamic initialization:
180	 */
181	if (*cond == NULL &&
182	    (rval = pthread_cond_init(cond, NULL)) != 0) {
183		_thr_leave_cancellation_point(curthread);
184		return (rval);
185	}
186
187	/*
188	 * Enter a loop waiting for a condition signal or broadcast
189	 * to wake up this thread.  A loop is needed in case the waiting
190	 * thread is interrupted by a signal to execute a signal handler.
191	 * It is not (currently) possible to remain in the waiting queue
192	 * while running a handler.  Instead, the thread is interrupted
193	 * and backed out of the waiting queue prior to executing the
194	 * signal handler.
195	 */
196	do {
197		/* Lock the condition variable structure: */
198		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
199
200		/*
201		 * If the condvar was statically allocated, properly
202		 * initialize the tail queue.
203		 */
204		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
205			TAILQ_INIT(&(*cond)->c_queue);
206			(*cond)->c_flags |= COND_FLAGS_INITED;
207		}
208
209		/* Process according to condition variable type: */
210		switch ((*cond)->c_type) {
211		/* Fast condition variable: */
212		case COND_TYPE_FAST:
213			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
214			    ((*cond)->c_mutex != *mutex))) {
215				/* Unlock the condition variable structure: */
216				THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
217
218				/* Return invalid argument error: */
219				rval = EINVAL;
220			} else {
221				/* Reset the timeout and interrupted flags: */
222				curthread->timeout = 0;
223				curthread->interrupted = 0;
224
225				/*
226				 * Queue the running thread for the condition
227				 * variable:
228				 */
229				cond_queue_enq(*cond, curthread);
230
231				/* Remember the mutex and sequence number: */
232				(*cond)->c_mutex = *mutex;
233				seqno = (*cond)->c_seqno;
234
235				/* Wait forever: */
236				curthread->wakeup_time.tv_sec = -1;
237
238				/* Unlock the mutex: */
239				if ((unlock_mutex != 0) &&
240				    ((rval = _mutex_cv_unlock(mutex)) != 0)) {
241					/*
242					 * Cannot unlock the mutex, so remove
243					 * the running thread from the condition
244					 * variable queue:
245					 */
246					cond_queue_remove(*cond, curthread);
247
248					/* Check for no more waiters: */
249					if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
250						(*cond)->c_mutex = NULL;
251
252					/* Unlock the condition variable structure: */
253					THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
254				}
255				else {
256					/*
257					 * Don't unlock the mutex the next
258					 * time through the loop (if the
259					 * thread has to be requeued after
260					 * handling a signal).
261					 */
262					unlock_mutex = 0;
263
264					/*
265					 * This thread is active and is in a
266					 * critical region (holding the cv
267					 * lock); we should be able to safely
268					 * set the state.
269					 */
270					THR_LOCK_SWITCH(curthread);
271					THR_SET_STATE(curthread, PS_COND_WAIT);
272
273					/* Remember the CV: */
274					curthread->data.cond = *cond;
275
276					/* Unlock the CV structure: */
277					THR_LOCK_RELEASE(curthread,
278					    &(*cond)->c_lock);
279
280					/* Schedule the next thread: */
281					_thr_sched_switch(curthread);
282
283					curthread->data.cond = NULL;
284					THR_UNLOCK_SWITCH(curthread);
285
286					/*
287					 * XXX - This really isn't a good check
288					 * since there can be more than one
289					 * thread waiting on the CV.  Signals
290					 * sent to threads waiting on mutexes
291					 * or CVs should really be deferred
292					 * until the threads are no longer
293					 * waiting, but POSIX says that signals
294					 * should be sent "as soon as possible".
295					 */
296					done = (seqno != (*cond)->c_seqno);
297
298					if (THR_IN_SYNCQ(curthread)) {
299						/*
300						 * Lock the condition variable
301						 * while removing the thread.
302						 */
303						THR_LOCK_ACQUIRE(curthread,
304						    &(*cond)->c_lock);
305
306						cond_queue_remove(*cond,
307						    curthread);
308
309						/* Check for no more waiters: */
310						if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
311							(*cond)->c_mutex = NULL;
312
313						THR_LOCK_RELEASE(curthread,
314						    &(*cond)->c_lock);
315					}
316
317					/*
318					 * Save the interrupted flag; locking
319					 * the mutex may destroy it.
320					 */
321					interrupted = curthread->interrupted;
322
323					/*
324					 * Note that even though this thread may
325					 * have been canceled, POSIX requires
326					 * that the mutex be reaquired prior to
327					 * cancellation.
328					 */
329					if (done || interrupted) {
330						rval = _mutex_cv_lock(mutex);
331						unlock_mutex = 1;
332					}
333				}
334			}
335			break;
336
337		/* Trap invalid condition variable types: */
338		default:
339			/* Unlock the condition variable structure: */
340			THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
341
342			/* Return an invalid argument error: */
343			rval = EINVAL;
344			break;
345		}
346
347		if ((interrupted != 0) && (curthread->continuation != NULL))
348			curthread->continuation((void *) curthread);
349	} while ((done == 0) && (rval == 0));
350
351	_thr_leave_cancellation_point(curthread);
352
353	/* Return the completion status: */
354	return (rval);
355}
356
357int
358__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
359{
360	struct pthread *curthread = _get_curthread();
361	int ret;
362
363	_thr_enter_cancellation_point(curthread);
364	ret = _pthread_cond_wait(cond, mutex);
365	_thr_leave_cancellation_point(curthread);
366	return (ret);
367}
368
369int
370_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
371		       const struct timespec * abstime)
372{
373	struct pthread	*curthread = _get_curthread();
374	int	rval = 0;
375	int	done = 0;
376	int	interrupted = 0;
377	int	unlock_mutex = 1;
378	int	seqno;
379
380	THR_ASSERT(curthread->locklevel == 0,
381	    "cv_timedwait: locklevel is not zero!");
382	_thr_enter_cancellation_point(curthread);
383
384	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
385	    abstime->tv_nsec >= 1000000000) {
386		_thr_leave_cancellation_point(curthread);
387		return (EINVAL);
388	}
389	/*
390	 * If the condition variable is statically initialized, perform dynamic
391	 * initialization.
392	 */
393	if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) {
394		_thr_leave_cancellation_point(curthread);
395		return (rval);
396	}
397
398	/*
399	 * Enter a loop waiting for a condition signal or broadcast
400	 * to wake up this thread.  A loop is needed in case the waiting
401	 * thread is interrupted by a signal to execute a signal handler.
402	 * It is not (currently) possible to remain in the waiting queue
403	 * while running a handler.  Instead, the thread is interrupted
404	 * and backed out of the waiting queue prior to executing the
405	 * signal handler.
406	 */
407	do {
408		/* Lock the condition variable structure: */
409		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
410
411		/*
412		 * If the condvar was statically allocated, properly
413		 * initialize the tail queue.
414		 */
415		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
416			TAILQ_INIT(&(*cond)->c_queue);
417			(*cond)->c_flags |= COND_FLAGS_INITED;
418		}
419
420		/* Process according to condition variable type: */
421		switch ((*cond)->c_type) {
422		/* Fast condition variable: */
423		case COND_TYPE_FAST:
424			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
425			    ((*cond)->c_mutex != *mutex))) {
426				/* Return invalid argument error: */
427				rval = EINVAL;
428
429				/* Unlock the condition variable structure: */
430				THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
431			} else {
432				/* Set the wakeup time: */
433				curthread->wakeup_time.tv_sec = abstime->tv_sec;
434				curthread->wakeup_time.tv_nsec =
435				    abstime->tv_nsec;
436
437				/* Reset the timeout and interrupted flags: */
438				curthread->timeout = 0;
439				curthread->interrupted = 0;
440
441				/*
442				 * Queue the running thread for the condition
443				 * variable:
444				 */
445				cond_queue_enq(*cond, curthread);
446
447				/* Remember the mutex and sequence number: */
448				(*cond)->c_mutex = *mutex;
449				seqno = (*cond)->c_seqno;
450
451				/* Unlock the mutex: */
452				if ((unlock_mutex != 0) &&
453				   ((rval = _mutex_cv_unlock(mutex)) != 0)) {
454					/*
455					 * Cannot unlock the mutex; remove the
456					 * running thread from the condition
457					 * variable queue:
458					 */
459					cond_queue_remove(*cond, curthread);
460
461					/* Check for no more waiters: */
462					if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
463						(*cond)->c_mutex = NULL;
464
465					/* Unlock the condition variable structure: */
466					THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
467				} else {
468					/*
469					 * Don't unlock the mutex the next
470					 * time through the loop (if the
471					 * thread has to be requeued after
472					 * handling a signal).
473					 */
474					unlock_mutex = 0;
475
476					/*
477					 * This thread is active and is in a
478					 * critical region (holding the cv
479					 * lock); we should be able to safely
480					 * set the state.
481					 */
482					THR_LOCK_SWITCH(curthread);
483					THR_SET_STATE(curthread, PS_COND_WAIT);
484
485					/* Remember the CV: */
486					curthread->data.cond = *cond;
487
488					/* Unlock the CV structure: */
489					THR_LOCK_RELEASE(curthread,
490					    &(*cond)->c_lock);
491
492					/* Schedule the next thread: */
493					_thr_sched_switch(curthread);
494
495					curthread->data.cond = NULL;
496					THR_UNLOCK_SWITCH(curthread);
497
498					/*
499					 * XXX - This really isn't a good check
500					 * since there can be more than one
501					 * thread waiting on the CV.  Signals
502					 * sent to threads waiting on mutexes
503					 * or CVs should really be deferred
504					 * until the threads are no longer
505					 * waiting, but POSIX says that signals
506					 * should be sent "as soon as possible".
507					 */
508					done = (seqno != (*cond)->c_seqno);
509
510					if (THR_IN_CONDQ(curthread)) {
511						/*
512						 * Lock the condition variable
513						 * while removing the thread.
514						 */
515						THR_LOCK_ACQUIRE(curthread,
516						    &(*cond)->c_lock);
517
518						cond_queue_remove(*cond,
519						    curthread);
520
521						/* Check for no more waiters: */
522						if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
523							(*cond)->c_mutex = NULL;
524
525						THR_LOCK_RELEASE(curthread,
526						    &(*cond)->c_lock);
527					}
528
529					/*
530					 * Save the interrupted flag; locking
531					 * the mutex may destroy it.
532					 */
533					interrupted = curthread->interrupted;
534					if (curthread->timeout != 0) {
535						/* The wait timedout. */
536						rval = ETIMEDOUT;
537						(void)_mutex_cv_lock(mutex);
538					} else if ((interrupted == 0) ||
539					    (done != 0))
540						rval = _mutex_cv_lock(mutex);
541				}
542			}
543			break;
544
545		/* Trap invalid condition variable types: */
546		default:
547			/* Unlock the condition variable structure: */
548			THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
549
550			/* Return an invalid argument error: */
551			rval = EINVAL;
552			break;
553		}
554
555		if ((interrupted != 0) && (curthread->continuation != NULL))
556			curthread->continuation((void *)curthread);
557	} while ((done == 0) && (rval == 0));
558
559	_thr_leave_cancellation_point(curthread);
560
561	/* Return the completion status: */
562	return (rval);
563}
564
565int
566__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
567		       const struct timespec *abstime)
568{
569	struct pthread *curthread = _get_curthread();
570	int ret;
571
572	_thr_enter_cancellation_point(curthread);
573	ret = _pthread_cond_timedwait(cond, mutex, abstime);
574	_thr_leave_cancellation_point(curthread);
575	return (ret);
576}
577
578
579int
580_pthread_cond_signal(pthread_cond_t * cond)
581{
582	struct pthread	*curthread = _get_curthread();
583	struct pthread	*pthread;
584	int		rval = 0;
585
586	THR_ASSERT(curthread->locklevel == 0,
587	    "cv_timedwait: locklevel is not zero!");
588	if (cond == NULL)
589		rval = EINVAL;
590       /*
591        * If the condition variable is statically initialized, perform dynamic
592        * initialization.
593        */
594	else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
595		/* Lock the condition variable structure: */
596		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
597
598		/* Process according to condition variable type: */
599		switch ((*cond)->c_type) {
600		/* Fast condition variable: */
601		case COND_TYPE_FAST:
602			/* Increment the sequence number: */
603			(*cond)->c_seqno++;
604
605			/*
606			 * Wakeups have to be done with the CV lock held;
607			 * otherwise there is a race condition where the
608			 * thread can timeout, run on another KSE, and enter
609			 * another blocking state (including blocking on a CV).
610			 */
611			if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
612			    != NULL) {
613				THR_SCHED_LOCK(curthread, pthread);
614				cond_queue_remove(*cond, pthread);
615				_thr_setrunnable_unlocked(pthread);
616				THR_SCHED_UNLOCK(curthread, pthread);
617			}
618			/* Check for no more waiters: */
619			if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
620				(*cond)->c_mutex = NULL;
621			break;
622
623		/* Trap invalid condition variable types: */
624		default:
625			/* Return an invalid argument error: */
626			rval = EINVAL;
627			break;
628		}
629
630		/* Unlock the condition variable structure: */
631		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
632	}
633
634	/* Return the completion status: */
635	return (rval);
636}
637
638int
639_pthread_cond_broadcast(pthread_cond_t * cond)
640{
641	struct pthread	*curthread = _get_curthread();
642	struct pthread	*pthread;
643	int		rval = 0;
644
645	THR_ASSERT(curthread->locklevel == 0,
646	    "cv_timedwait: locklevel is not zero!");
647	if (cond == NULL)
648		rval = EINVAL;
649       /*
650        * If the condition variable is statically initialized, perform dynamic
651        * initialization.
652        */
653	else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
654		/* Lock the condition variable structure: */
655		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
656
657		/* Process according to condition variable type: */
658		switch ((*cond)->c_type) {
659		/* Fast condition variable: */
660		case COND_TYPE_FAST:
661			/* Increment the sequence number: */
662			(*cond)->c_seqno++;
663
664			/*
665			 * Enter a loop to bring all threads off the
666			 * condition queue:
667			 */
668			while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
669			    != NULL) {
670				THR_SCHED_LOCK(curthread, pthread);
671				cond_queue_remove(*cond, pthread);
672				_thr_setrunnable_unlocked(pthread);
673				THR_SCHED_UNLOCK(curthread, pthread);
674			}
675
676			/* There are no more waiting threads: */
677			(*cond)->c_mutex = NULL;
678			break;
679
680		/* Trap invalid condition variable types: */
681		default:
682			/* Return an invalid argument error: */
683			rval = EINVAL;
684			break;
685		}
686
687		/* Unlock the condition variable structure: */
688		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
689	}
690
691	/* Return the completion status: */
692	return (rval);
693}
694
695void
696_cond_wait_backout(struct pthread *curthread)
697{
698	pthread_cond_t	cond;
699
700	cond = curthread->data.cond;
701	if (cond != NULL) {
702		/* Lock the condition variable structure: */
703		THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
704
705		/* Process according to condition variable type: */
706		switch (cond->c_type) {
707		/* Fast condition variable: */
708		case COND_TYPE_FAST:
709			cond_queue_remove(cond, curthread);
710
711			/* Check for no more waiters: */
712			if (TAILQ_FIRST(&cond->c_queue) == NULL)
713				cond->c_mutex = NULL;
714			break;
715
716		default:
717			break;
718		}
719
720		/* Unlock the condition variable structure: */
721		THR_LOCK_RELEASE(curthread, &cond->c_lock);
722	}
723}
724
725/*
726 * Dequeue a waiting thread from the head of a condition queue in
727 * descending priority order.
728 */
729static inline struct pthread *
730cond_queue_deq(pthread_cond_t cond)
731{
732	struct pthread	*pthread;
733
734	while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
735		TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
736		THR_CONDQ_SET(pthread);
737		if ((pthread->timeout == 0) && (pthread->interrupted == 0))
738			/*
739			 * Only exit the loop when we find a thread
740			 * that hasn't timed out or been canceled;
741			 * those threads are already running and don't
742			 * need their run state changed.
743			 */
744			break;
745	}
746
747	return (pthread);
748}
749
750/*
751 * Remove a waiting thread from a condition queue in descending priority
752 * order.
753 */
754static inline void
755cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
756{
757	/*
758	 * Because pthread_cond_timedwait() can timeout as well
759	 * as be signaled by another thread, it is necessary to
760	 * guard against removing the thread from the queue if
761	 * it isn't in the queue.
762	 */
763	if (THR_IN_CONDQ(pthread)) {
764		TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
765		THR_CONDQ_CLEAR(pthread);
766	}
767}
768
769/*
770 * Enqueue a waiting thread to a condition queue in descending priority
771 * order.
772 */
773static inline void
774cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
775{
776	struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
777
778	THR_ASSERT(!THR_IN_SYNCQ(pthread),
779	    "cond_queue_enq: thread already queued!");
780
781	/*
782	 * For the common case of all threads having equal priority,
783	 * we perform a quick check against the priority of the thread
784	 * at the tail of the queue.
785	 */
786	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
787		TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
788	else {
789		tid = TAILQ_FIRST(&cond->c_queue);
790		while (pthread->active_priority <= tid->active_priority)
791			tid = TAILQ_NEXT(tid, sqe);
792		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
793	}
794	THR_CONDQ_SET(pthread);
795	pthread->data.cond = cond;
796}
797