1112918Sjeff/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3112918Sjeff * All rights reserved.
4112918Sjeff *
5112918Sjeff * Redistribution and use in source and binary forms, with or without
6112918Sjeff * modification, are permitted provided that the following conditions
7112918Sjeff * are met:
8112918Sjeff * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11112918Sjeff * 2. Redistributions in binary form must reproduce the above copyright
12112918Sjeff *    notice, this list of conditions and the following disclaimer in the
13112918Sjeff *    documentation and/or other materials provided with the distribution.
14112918Sjeff *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25112918Sjeff *
26112918Sjeff * $FreeBSD$
27112918Sjeff */
28144518Sdavidxu
29157457Sdavidxu#include "namespace.h"
30112918Sjeff#include <stdlib.h>
31112918Sjeff#include <errno.h>
32112918Sjeff#include <string.h>
33112918Sjeff#include <pthread.h>
34144518Sdavidxu#include <limits.h>
35157457Sdavidxu#include "un-namespace.h"
36144518Sdavidxu
37112918Sjeff#include "thr_private.h"
38112918Sjeff
39112918Sjeff/*
40144518Sdavidxu * Prototypes
41115389Smtm */
42157457Sdavidxuint	__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
43157457Sdavidxuint	__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
44157457Sdavidxu		       const struct timespec * abstime);
45144518Sdavidxustatic int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
46144518Sdavidxustatic int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
47144518Sdavidxu		    const struct timespec *abstime, int cancel);
48216641Sdavidxustatic int cond_signal_common(pthread_cond_t *cond);
49216641Sdavidxustatic int cond_broadcast_common(pthread_cond_t *cond);
50115389Smtm
51115389Smtm/*
52144518Sdavidxu * Double underscore versions are cancellation points.  Single underscore
53144518Sdavidxu * versions are not and are provided for libc internal usage (which
54144518Sdavidxu * shouldn't introduce cancellation points).
55112918Sjeff */
56144518Sdavidxu__weak_reference(__pthread_cond_wait, pthread_cond_wait);
57144518Sdavidxu__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
58112918Sjeff
59112918Sjeff__weak_reference(_pthread_cond_init, pthread_cond_init);
60112918Sjeff__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
61112918Sjeff__weak_reference(_pthread_cond_signal, pthread_cond_signal);
62112918Sjeff__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
63112918Sjeff
64216641Sdavidxu#define CV_PSHARED(cvp)	(((cvp)->__flags & USYNC_PROCESS_SHARED) != 0)
65216641Sdavidxu
66144518Sdavidxustatic int
67144518Sdavidxucond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
68144518Sdavidxu{
69216641Sdavidxu	struct pthread_cond	*cvp;
70216641Sdavidxu	int	error = 0;
71112918Sjeff
72216641Sdavidxu	if ((cvp = (pthread_cond_t)
73164877Sdavidxu	    calloc(1, sizeof(struct pthread_cond))) == NULL) {
74216641Sdavidxu		error = ENOMEM;
75144518Sdavidxu	} else {
76144518Sdavidxu		/*
77144518Sdavidxu		 * Initialise the condition variable structure:
78144518Sdavidxu		 */
79144518Sdavidxu		if (cond_attr == NULL || *cond_attr == NULL) {
80216641Sdavidxu			cvp->__clock_id = CLOCK_REALTIME;
81144518Sdavidxu		} else {
82216641Sdavidxu			if ((*cond_attr)->c_pshared)
83216641Sdavidxu				cvp->__flags |= USYNC_PROCESS_SHARED;
84216641Sdavidxu			cvp->__clock_id = (*cond_attr)->c_clockid;
85144518Sdavidxu		}
86216641Sdavidxu		*cond = cvp;
87144518Sdavidxu	}
88216641Sdavidxu	return (error);
89144518Sdavidxu}
90112918Sjeff
91144518Sdavidxustatic int
92144518Sdavidxuinit_static(struct pthread *thread, pthread_cond_t *cond)
93112918Sjeff{
94144518Sdavidxu	int ret;
95112918Sjeff
96144518Sdavidxu	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
97144518Sdavidxu
98112918Sjeff	if (*cond == NULL)
99144518Sdavidxu		ret = cond_init(cond, NULL);
100144518Sdavidxu	else
101144518Sdavidxu		ret = 0;
102112918Sjeff
103144518Sdavidxu	THR_LOCK_RELEASE(thread, &_cond_static_lock);
104112918Sjeff
105144518Sdavidxu	return (ret);
106112918Sjeff}
107112918Sjeff
108213241Sdavidxu#define CHECK_AND_INIT_COND							\
109216641Sdavidxu	if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) {		\
110216641Sdavidxu		if (cvp == THR_COND_INITIALIZER) {				\
111213241Sdavidxu			int ret;						\
112213241Sdavidxu			ret = init_static(_get_curthread(), cond);		\
113213241Sdavidxu			if (ret)						\
114213241Sdavidxu				return (ret);					\
115216641Sdavidxu		} else if (cvp == THR_COND_DESTROYED) {				\
116213241Sdavidxu			return (EINVAL);					\
117213241Sdavidxu		}								\
118216641Sdavidxu		cvp = *cond;							\
119213241Sdavidxu	}
120213241Sdavidxu
121112918Sjeffint
122112918Sjeff_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
123112918Sjeff{
124112918Sjeff
125144518Sdavidxu	*cond = NULL;
126144518Sdavidxu	return (cond_init(cond, cond_attr));
127112918Sjeff}
128112918Sjeff
129112918Sjeffint
130112918Sjeff_pthread_cond_destroy(pthread_cond_t *cond)
131112918Sjeff{
132216641Sdavidxu	struct pthread_cond	*cvp;
133216641Sdavidxu	int			error = 0;
134112918Sjeff
135216641Sdavidxu	if ((cvp = *cond) == THR_COND_INITIALIZER)
136216641Sdavidxu		error = 0;
137216641Sdavidxu	else if (cvp == THR_COND_DESTROYED)
138216641Sdavidxu		error = EINVAL;
139144518Sdavidxu	else {
140216641Sdavidxu		cvp = *cond;
141213241Sdavidxu		*cond = THR_COND_DESTROYED;
142112918Sjeff
143144518Sdavidxu		/*
144144518Sdavidxu		 * Free the memory allocated for the condition
145144518Sdavidxu		 * variable structure:
146144518Sdavidxu		 */
147216641Sdavidxu		free(cvp);
148144518Sdavidxu	}
149216641Sdavidxu	return (error);
150112918Sjeff}
151112918Sjeff
152211524Sdavidxu/*
153211524Sdavidxu * Cancellation behaivor:
154211524Sdavidxu *   Thread may be canceled at start, if thread is canceled, it means it
155211524Sdavidxu *   did not get a wakeup from pthread_cond_signal(), otherwise, it is
156211524Sdavidxu *   not canceled.
157211524Sdavidxu *   Thread cancellation never cause wakeup from pthread_cond_signal()
158211524Sdavidxu *   to be lost.
159211524Sdavidxu */
160115035Smtmstatic int
161216641Sdavidxucond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
162144518Sdavidxu	const struct timespec *abstime, int cancel)
163115035Smtm{
164144518Sdavidxu	struct pthread	*curthread = _get_curthread();
165216641Sdavidxu	int		recurse;
166216641Sdavidxu	int		error, error2 = 0;
167112918Sjeff
168216641Sdavidxu	error = _mutex_cv_detach(mp, &recurse);
169216641Sdavidxu	if (error != 0)
170216641Sdavidxu		return (error);
171216641Sdavidxu
172216641Sdavidxu	if (cancel) {
173216641Sdavidxu		_thr_cancel_enter2(curthread, 0);
174216641Sdavidxu		error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
175216641Sdavidxu			(struct umutex *)&mp->m_lock, abstime,
176216641Sdavidxu			CVWAIT_ABSTIME|CVWAIT_CLOCKID);
177216641Sdavidxu		_thr_cancel_leave(curthread, 0);
178216641Sdavidxu	} else {
179216641Sdavidxu		error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
180216641Sdavidxu			(struct umutex *)&mp->m_lock, abstime,
181216641Sdavidxu			CVWAIT_ABSTIME|CVWAIT_CLOCKID);
182216641Sdavidxu	}
183216641Sdavidxu
184112918Sjeff	/*
185216641Sdavidxu	 * Note that PP mutex and ROBUST mutex may return
186216641Sdavidxu	 * interesting error codes.
187112918Sjeff	 */
188216641Sdavidxu	if (error == 0) {
189216641Sdavidxu		error2 = _mutex_cv_lock(mp, recurse);
190216641Sdavidxu	} else if (error == EINTR || error == ETIMEDOUT) {
191216641Sdavidxu		error2 = _mutex_cv_lock(mp, recurse);
192216641Sdavidxu		if (error2 == 0 && cancel)
193216641Sdavidxu			_thr_testcancel(curthread);
194216641Sdavidxu		if (error == EINTR)
195216641Sdavidxu			error = 0;
196216641Sdavidxu	} else {
197216641Sdavidxu		/* We know that it didn't unlock the mutex. */
198216641Sdavidxu		error2 = _mutex_cv_attach(mp, recurse);
199216641Sdavidxu		if (error2 == 0 && cancel)
200216641Sdavidxu			_thr_testcancel(curthread);
201112918Sjeff	}
202216641Sdavidxu	return (error2 != 0 ? error2 : error);
203216641Sdavidxu}
204164877Sdavidxu
205216641Sdavidxu/*
206216641Sdavidxu * Thread waits in userland queue whenever possible, when thread
207216641Sdavidxu * is signaled or broadcasted, it is removed from the queue, and
208216641Sdavidxu * is saved in curthread's defer_waiters[] buffer, but won't be
209216641Sdavidxu * woken up until mutex is unlocked.
210216641Sdavidxu */
211112918Sjeff
212216641Sdavidxustatic int
213216641Sdavidxucond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
214216641Sdavidxu	const struct timespec *abstime, int cancel)
215216641Sdavidxu{
216216641Sdavidxu	struct pthread	*curthread = _get_curthread();
217216641Sdavidxu	struct sleepqueue *sq;
218216641Sdavidxu	int	recurse;
219216641Sdavidxu	int	error;
220239200Sdavidxu	int	defered;
221112918Sjeff
222216641Sdavidxu	if (curthread->wchan != NULL)
223216641Sdavidxu		PANIC("thread was already on queue.");
224216641Sdavidxu
225216641Sdavidxu	if (cancel)
226216641Sdavidxu		_thr_testcancel(curthread);
227216641Sdavidxu
228216641Sdavidxu	_sleepq_lock(cvp);
229216641Sdavidxu	/*
230216641Sdavidxu	 * set __has_user_waiters before unlocking mutex, this allows
231216641Sdavidxu	 * us to check it without locking in pthread_cond_signal().
232216641Sdavidxu	 */
233216641Sdavidxu	cvp->__has_user_waiters = 1;
234239200Sdavidxu	defered = 0;
235239200Sdavidxu	(void)_mutex_cv_unlock(mp, &recurse, &defered);
236216641Sdavidxu	curthread->mutex_obj = mp;
237216641Sdavidxu	_sleepq_add(cvp, curthread);
238216641Sdavidxu	for(;;) {
239216641Sdavidxu		_thr_clear_wake(curthread);
240216641Sdavidxu		_sleepq_unlock(cvp);
241239200Sdavidxu		if (defered) {
242239206Sdavidxu			defered = 0;
243239200Sdavidxu			if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
244239200Sdavidxu				(void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
245239200Sdavidxu					 mp->m_lock.m_flags, 0, 0);
246239200Sdavidxu		}
247239200Sdavidxu		if (curthread->nwaiter_defer > 0) {
248239200Sdavidxu			_thr_wake_all(curthread->defer_waiters,
249239200Sdavidxu				curthread->nwaiter_defer);
250239200Sdavidxu			curthread->nwaiter_defer = 0;
251239200Sdavidxu		}
252216641Sdavidxu
253216641Sdavidxu		if (cancel) {
254216641Sdavidxu			_thr_cancel_enter2(curthread, 0);
255216641Sdavidxu			error = _thr_sleep(curthread, cvp->__clock_id, abstime);
256216641Sdavidxu			_thr_cancel_leave(curthread, 0);
257216641Sdavidxu		} else {
258216641Sdavidxu			error = _thr_sleep(curthread, cvp->__clock_id, abstime);
259216641Sdavidxu		}
260216641Sdavidxu
261216641Sdavidxu		_sleepq_lock(cvp);
262216641Sdavidxu		if (curthread->wchan == NULL) {
263216641Sdavidxu			error = 0;
264216641Sdavidxu			break;
265216641Sdavidxu		} else if (cancel && SHOULD_CANCEL(curthread)) {
266216641Sdavidxu			sq = _sleepq_lookup(cvp);
267216641Sdavidxu			cvp->__has_user_waiters =
268216641Sdavidxu				_sleepq_remove(sq, curthread);
269216641Sdavidxu			_sleepq_unlock(cvp);
270216641Sdavidxu			curthread->mutex_obj = NULL;
271216641Sdavidxu			_mutex_cv_lock(mp, recurse);
272216641Sdavidxu			if (!THR_IN_CRITICAL(curthread))
273216641Sdavidxu				_pthread_exit(PTHREAD_CANCELED);
274216641Sdavidxu			else /* this should not happen */
275216641Sdavidxu				return (0);
276216641Sdavidxu		} else if (error == ETIMEDOUT) {
277216641Sdavidxu			sq = _sleepq_lookup(cvp);
278216641Sdavidxu			cvp->__has_user_waiters =
279216641Sdavidxu				_sleepq_remove(sq, curthread);
280216641Sdavidxu			break;
281216641Sdavidxu		}
282112918Sjeff	}
283216641Sdavidxu	_sleepq_unlock(cvp);
284216641Sdavidxu	curthread->mutex_obj = NULL;
285216641Sdavidxu	_mutex_cv_lock(mp, recurse);
286216641Sdavidxu	return (error);
287112918Sjeff}
288112918Sjeff
289216641Sdavidxustatic int
290216641Sdavidxucond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
291216641Sdavidxu	const struct timespec *abstime, int cancel)
292216641Sdavidxu{
293216641Sdavidxu	struct pthread	*curthread = _get_curthread();
294216641Sdavidxu	struct pthread_cond *cvp;
295216641Sdavidxu	struct pthread_mutex *mp;
296216641Sdavidxu	int	error;
297216641Sdavidxu
298216641Sdavidxu	CHECK_AND_INIT_COND
299216641Sdavidxu
300216641Sdavidxu	mp = *mutex;
301216641Sdavidxu
302216641Sdavidxu	if ((error = _mutex_owned(curthread, mp)) != 0)
303216641Sdavidxu		return (error);
304216641Sdavidxu
305216641Sdavidxu	if (curthread->attr.sched_policy != SCHED_OTHER ||
306216641Sdavidxu	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT|
307216641Sdavidxu		USYNC_PROCESS_SHARED)) != 0 ||
308216641Sdavidxu	    (cvp->__flags & USYNC_PROCESS_SHARED) != 0)
309216641Sdavidxu		return cond_wait_kernel(cvp, mp, abstime, cancel);
310216641Sdavidxu	else
311216641Sdavidxu		return cond_wait_user(cvp, mp, abstime, cancel);
312216641Sdavidxu}
313216641Sdavidxu
314112918Sjeffint
315144518Sdavidxu_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
316112918Sjeff{
317144518Sdavidxu
318144518Sdavidxu	return (cond_wait_common(cond, mutex, NULL, 0));
319112918Sjeff}
320112918Sjeff
321112918Sjeffint
322144518Sdavidxu__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
323112918Sjeff{
324144518Sdavidxu
325144518Sdavidxu	return (cond_wait_common(cond, mutex, NULL, 1));
326115277Smtm}
327115277Smtm
328144518Sdavidxuint
329216641Sdavidxu_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
330144518Sdavidxu		       const struct timespec * abstime)
331115277Smtm{
332112918Sjeff
333144518Sdavidxu	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
334144518Sdavidxu	    abstime->tv_nsec >= 1000000000)
335112918Sjeff		return (EINVAL);
336112918Sjeff
337144518Sdavidxu	return (cond_wait_common(cond, mutex, abstime, 0));
338112918Sjeff}
339112918Sjeff
340144518Sdavidxuint
341144518Sdavidxu__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
342144518Sdavidxu		       const struct timespec *abstime)
343112918Sjeff{
344112918Sjeff
345144518Sdavidxu	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
346144518Sdavidxu	    abstime->tv_nsec >= 1000000000)
347144518Sdavidxu		return (EINVAL);
348112918Sjeff
349144518Sdavidxu	return (cond_wait_common(cond, mutex, abstime, 1));
350112918Sjeff}
351112918Sjeff
352144518Sdavidxustatic int
353216641Sdavidxucond_signal_common(pthread_cond_t *cond)
354112918Sjeff{
355144518Sdavidxu	struct pthread	*curthread = _get_curthread();
356216641Sdavidxu	struct pthread *td;
357216641Sdavidxu	struct pthread_cond *cvp;
358216641Sdavidxu	struct pthread_mutex *mp;
359216641Sdavidxu	struct sleepqueue *sq;
360216641Sdavidxu	int	*waddr;
361216641Sdavidxu	int	pshared;
362112918Sjeff
363112918Sjeff	/*
364144518Sdavidxu	 * If the condition variable is statically initialized, perform dynamic
365144518Sdavidxu	 * initialization.
366112918Sjeff	 */
367213241Sdavidxu	CHECK_AND_INIT_COND
368144518Sdavidxu
369216641Sdavidxu	pshared = CV_PSHARED(cvp);
370216641Sdavidxu
371216641Sdavidxu	_thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters);
372216641Sdavidxu
373216641Sdavidxu	if (pshared || cvp->__has_user_waiters == 0)
374216641Sdavidxu		return (0);
375216641Sdavidxu
376216641Sdavidxu	curthread = _get_curthread();
377216641Sdavidxu	waddr = NULL;
378216641Sdavidxu	_sleepq_lock(cvp);
379216641Sdavidxu	sq = _sleepq_lookup(cvp);
380216641Sdavidxu	if (sq == NULL) {
381216641Sdavidxu		_sleepq_unlock(cvp);
382216641Sdavidxu		return (0);
383216641Sdavidxu	}
384216641Sdavidxu
385216641Sdavidxu	td = _sleepq_first(sq);
386216641Sdavidxu	mp = td->mutex_obj;
387216641Sdavidxu	cvp->__has_user_waiters = _sleepq_remove(sq, td);
388216641Sdavidxu	if (mp->m_owner == curthread) {
389216641Sdavidxu		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
390216641Sdavidxu			_thr_wake_all(curthread->defer_waiters,
391216641Sdavidxu					curthread->nwaiter_defer);
392216641Sdavidxu			curthread->nwaiter_defer = 0;
393216641Sdavidxu		}
394216641Sdavidxu		curthread->defer_waiters[curthread->nwaiter_defer++] =
395216641Sdavidxu			&td->wake_addr->value;
396216641Sdavidxu		mp->m_flags |= PMUTEX_FLAG_DEFERED;
397216641Sdavidxu	} else {
398216641Sdavidxu		waddr = &td->wake_addr->value;
399216641Sdavidxu	}
400216641Sdavidxu	_sleepq_unlock(cvp);
401216641Sdavidxu	if (waddr != NULL)
402216641Sdavidxu		_thr_set_wake(waddr);
403216641Sdavidxu	return (0);
404112918Sjeff}
405112918Sjeff
406216641Sdavidxustruct broadcast_arg {
407216641Sdavidxu	struct pthread *curthread;
408216641Sdavidxu	unsigned int *waddrs[MAX_DEFER_WAITERS];
409216641Sdavidxu	int count;
410216641Sdavidxu};
411216641Sdavidxu
412216641Sdavidxustatic void
413216641Sdavidxudrop_cb(struct pthread *td, void *arg)
414216641Sdavidxu{
415216641Sdavidxu	struct broadcast_arg *ba = arg;
416216641Sdavidxu	struct pthread_mutex *mp;
417216641Sdavidxu	struct pthread *curthread = ba->curthread;
418216641Sdavidxu
419216641Sdavidxu	mp = td->mutex_obj;
420216641Sdavidxu	if (mp->m_owner == curthread) {
421216641Sdavidxu		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
422216641Sdavidxu			_thr_wake_all(curthread->defer_waiters,
423216641Sdavidxu				curthread->nwaiter_defer);
424216641Sdavidxu			curthread->nwaiter_defer = 0;
425216641Sdavidxu		}
426216641Sdavidxu		curthread->defer_waiters[curthread->nwaiter_defer++] =
427216641Sdavidxu			&td->wake_addr->value;
428216641Sdavidxu		mp->m_flags |= PMUTEX_FLAG_DEFERED;
429216641Sdavidxu	} else {
430216641Sdavidxu		if (ba->count >= MAX_DEFER_WAITERS) {
431216641Sdavidxu			_thr_wake_all(ba->waddrs, ba->count);
432216641Sdavidxu			ba->count = 0;
433216641Sdavidxu		}
434216641Sdavidxu		ba->waddrs[ba->count++] = &td->wake_addr->value;
435216641Sdavidxu	}
436216641Sdavidxu}
437216641Sdavidxu
438216641Sdavidxustatic int
439216641Sdavidxucond_broadcast_common(pthread_cond_t *cond)
440216641Sdavidxu{
441216641Sdavidxu	int    pshared;
442216641Sdavidxu	struct pthread_cond *cvp;
443216641Sdavidxu	struct sleepqueue *sq;
444216641Sdavidxu	struct broadcast_arg ba;
445216641Sdavidxu
446216641Sdavidxu	/*
447216641Sdavidxu	 * If the condition variable is statically initialized, perform dynamic
448216641Sdavidxu	 * initialization.
449216641Sdavidxu	 */
450216641Sdavidxu	CHECK_AND_INIT_COND
451216641Sdavidxu
452216641Sdavidxu	pshared = CV_PSHARED(cvp);
453216641Sdavidxu
454216641Sdavidxu	_thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters);
455216641Sdavidxu
456216641Sdavidxu	if (pshared || cvp->__has_user_waiters == 0)
457216641Sdavidxu		return (0);
458216641Sdavidxu
459216641Sdavidxu	ba.curthread = _get_curthread();
460216641Sdavidxu	ba.count = 0;
461216641Sdavidxu
462216641Sdavidxu	_sleepq_lock(cvp);
463216641Sdavidxu	sq = _sleepq_lookup(cvp);
464216641Sdavidxu	if (sq == NULL) {
465216641Sdavidxu		_sleepq_unlock(cvp);
466216641Sdavidxu		return (0);
467216641Sdavidxu	}
468216641Sdavidxu	_sleepq_drop(sq, drop_cb, &ba);
469216641Sdavidxu	cvp->__has_user_waiters = 0;
470216641Sdavidxu	_sleepq_unlock(cvp);
471216641Sdavidxu	if (ba.count > 0)
472216641Sdavidxu		_thr_wake_all(ba.waddrs, ba.count);
473216641Sdavidxu	return (0);
474216641Sdavidxu}
475216641Sdavidxu
476144518Sdavidxuint
477144518Sdavidxu_pthread_cond_signal(pthread_cond_t * cond)
478112918Sjeff{
479112918Sjeff
480216641Sdavidxu	return (cond_signal_common(cond));
481112918Sjeff}
482115389Smtm
483144518Sdavidxuint
484144518Sdavidxu_pthread_cond_broadcast(pthread_cond_t * cond)
485115389Smtm{
486144518Sdavidxu
487216641Sdavidxu	return (cond_broadcast_common(cond));
488115389Smtm}
489