thr_cond.c revision 296162
1112918Sjeff/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3296162Skib * Copyright (c) 2015 The FreeBSD Foundation
4112918Sjeff * All rights reserved.
5112918Sjeff *
6296162Skib * Portions of this software were developed by Konstantin Belousov
7296162Skib * under sponsorship from the FreeBSD Foundation.
8296162Skib *
9112918Sjeff * Redistribution and use in source and binary forms, with or without
10112918Sjeff * modification, are permitted provided that the following conditions
11112918Sjeff * are met:
12112918Sjeff * 1. Redistributions of source code must retain the above copyright
13144518Sdavidxu *    notice unmodified, this list of conditions, and the following
14144518Sdavidxu *    disclaimer.
15112918Sjeff * 2. Redistributions in binary form must reproduce the above copyright
16112918Sjeff *    notice, this list of conditions and the following disclaimer in the
17112918Sjeff *    documentation and/or other materials provided with the distribution.
18112918Sjeff *
19144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29112918Sjeff *
30112918Sjeff * $FreeBSD: head/lib/libthr/thread/thr_cond.c 296162 2016-02-28 17:52:33Z kib $
31112918Sjeff */
32144518Sdavidxu
33157457Sdavidxu#include "namespace.h"
34112918Sjeff#include <stdlib.h>
35112918Sjeff#include <errno.h>
36112918Sjeff#include <string.h>
37112918Sjeff#include <pthread.h>
38144518Sdavidxu#include <limits.h>
39157457Sdavidxu#include "un-namespace.h"
40144518Sdavidxu
41112918Sjeff#include "thr_private.h"
42112918Sjeff
43112918Sjeff/*
44144518Sdavidxu * Prototypes
45115389Smtm */
46157457Sdavidxuint	__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
47157457Sdavidxuint	__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
48157457Sdavidxu		       const struct timespec * abstime);
49144518Sdavidxustatic int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
50144518Sdavidxustatic int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
51144518Sdavidxu		    const struct timespec *abstime, int cancel);
52216641Sdavidxustatic int cond_signal_common(pthread_cond_t *cond);
53216641Sdavidxustatic int cond_broadcast_common(pthread_cond_t *cond);
54115389Smtm
55115389Smtm/*
56144518Sdavidxu * Double underscore versions are cancellation points.  Single underscore
57144518Sdavidxu * versions are not and are provided for libc internal usage (which
58144518Sdavidxu * shouldn't introduce cancellation points).
59112918Sjeff */
60144518Sdavidxu__weak_reference(__pthread_cond_wait, pthread_cond_wait);
61144518Sdavidxu__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
62112918Sjeff
63112918Sjeff__weak_reference(_pthread_cond_init, pthread_cond_init);
64112918Sjeff__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
65112918Sjeff__weak_reference(_pthread_cond_signal, pthread_cond_signal);
66112918Sjeff__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
67112918Sjeff
68216641Sdavidxu#define CV_PSHARED(cvp)	(((cvp)->__flags & USYNC_PROCESS_SHARED) != 0)
69216641Sdavidxu
70296162Skibstatic void
71296162Skibcond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
72296162Skib{
73296162Skib
74296162Skib	if (cattr == NULL) {
75296162Skib		cvp->__clock_id = CLOCK_REALTIME;
76296162Skib	} else {
77296162Skib		if (cattr->c_pshared)
78296162Skib			cvp->__flags |= USYNC_PROCESS_SHARED;
79296162Skib		cvp->__clock_id = cattr->c_clockid;
80296162Skib	}
81296162Skib}
82296162Skib
83144518Sdavidxustatic int
84144518Sdavidxucond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
85144518Sdavidxu{
86296162Skib	struct pthread_cond *cvp;
87296162Skib	const struct pthread_cond_attr *cattr;
88296162Skib	int pshared;
89112918Sjeff
90296162Skib	cattr = cond_attr != NULL ? *cond_attr : NULL;
91296162Skib	if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
92296162Skib		pshared = 0;
93296162Skib		cvp = calloc(1, sizeof(struct pthread_cond));
94296162Skib		if (cvp == NULL)
95296162Skib			return (ENOMEM);
96144518Sdavidxu	} else {
97296162Skib		pshared = 1;
98296162Skib		cvp = __thr_pshared_offpage(cond, 1);
99296162Skib		if (cvp == NULL)
100296162Skib			return (EFAULT);
101144518Sdavidxu	}
102296162Skib
103296162Skib	/*
104296162Skib	 * Initialise the condition variable structure:
105296162Skib	 */
106296162Skib	cond_init_body(cvp, cattr);
107296162Skib	*cond = pshared ? THR_PSHARED_PTR : cvp;
108296162Skib	return (0);
109144518Sdavidxu}
110112918Sjeff
111144518Sdavidxustatic int
112144518Sdavidxuinit_static(struct pthread *thread, pthread_cond_t *cond)
113112918Sjeff{
114144518Sdavidxu	int ret;
115112918Sjeff
116144518Sdavidxu	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
117144518Sdavidxu
118112918Sjeff	if (*cond == NULL)
119144518Sdavidxu		ret = cond_init(cond, NULL);
120144518Sdavidxu	else
121144518Sdavidxu		ret = 0;
122112918Sjeff
123144518Sdavidxu	THR_LOCK_RELEASE(thread, &_cond_static_lock);
124112918Sjeff
125144518Sdavidxu	return (ret);
126112918Sjeff}
127112918Sjeff
128213241Sdavidxu#define CHECK_AND_INIT_COND							\
129296162Skib	if (*cond == THR_PSHARED_PTR) {						\
130296162Skib		cvp = __thr_pshared_offpage(cond, 0);				\
131296162Skib		if (cvp == NULL)						\
132296162Skib			return (EINVAL);					\
133296162Skib	} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) {	\
134216641Sdavidxu		if (cvp == THR_COND_INITIALIZER) {				\
135213241Sdavidxu			int ret;						\
136213241Sdavidxu			ret = init_static(_get_curthread(), cond);		\
137213241Sdavidxu			if (ret)						\
138213241Sdavidxu				return (ret);					\
139216641Sdavidxu		} else if (cvp == THR_COND_DESTROYED) {				\
140213241Sdavidxu			return (EINVAL);					\
141213241Sdavidxu		}								\
142216641Sdavidxu		cvp = *cond;							\
143213241Sdavidxu	}
144213241Sdavidxu
145112918Sjeffint
146112918Sjeff_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
147112918Sjeff{
148112918Sjeff
149144518Sdavidxu	*cond = NULL;
150144518Sdavidxu	return (cond_init(cond, cond_attr));
151112918Sjeff}
152112918Sjeff
153112918Sjeffint
154112918Sjeff_pthread_cond_destroy(pthread_cond_t *cond)
155112918Sjeff{
156296162Skib	struct pthread_cond *cvp;
157296162Skib	int error;
158112918Sjeff
159296162Skib	error = 0;
160296162Skib	if (*cond == THR_PSHARED_PTR) {
161296162Skib		cvp = __thr_pshared_offpage(cond, 0);
162296162Skib		if (cvp != NULL)
163296162Skib			__thr_pshared_destroy(cond);
164296162Skib		*cond = THR_COND_DESTROYED;
165296162Skib	} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
166296162Skib		/* nothing */
167296162Skib	} else if (cvp == THR_COND_DESTROYED) {
168216641Sdavidxu		error = EINVAL;
169296162Skib	} else {
170216641Sdavidxu		cvp = *cond;
171213241Sdavidxu		*cond = THR_COND_DESTROYED;
172216641Sdavidxu		free(cvp);
173144518Sdavidxu	}
174216641Sdavidxu	return (error);
175112918Sjeff}
176112918Sjeff
177211524Sdavidxu/*
178270972Srpaulo * Cancellation behavior:
179211524Sdavidxu *   Thread may be canceled at start, if thread is canceled, it means it
180211524Sdavidxu *   did not get a wakeup from pthread_cond_signal(), otherwise, it is
181211524Sdavidxu *   not canceled.
182211524Sdavidxu *   Thread cancellation never cause wakeup from pthread_cond_signal()
183211524Sdavidxu *   to be lost.
184211524Sdavidxu */
185115035Smtmstatic int
186216641Sdavidxucond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
187144518Sdavidxu	const struct timespec *abstime, int cancel)
188115035Smtm{
189144518Sdavidxu	struct pthread	*curthread = _get_curthread();
190216641Sdavidxu	int		recurse;
191216641Sdavidxu	int		error, error2 = 0;
192112918Sjeff
193216641Sdavidxu	error = _mutex_cv_detach(mp, &recurse);
194216641Sdavidxu	if (error != 0)
195216641Sdavidxu		return (error);
196216641Sdavidxu
197216641Sdavidxu	if (cancel) {
198216641Sdavidxu		_thr_cancel_enter2(curthread, 0);
199216641Sdavidxu		error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
200216641Sdavidxu			(struct umutex *)&mp->m_lock, abstime,
201216641Sdavidxu			CVWAIT_ABSTIME|CVWAIT_CLOCKID);
202216641Sdavidxu		_thr_cancel_leave(curthread, 0);
203216641Sdavidxu	} else {
204216641Sdavidxu		error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
205216641Sdavidxu			(struct umutex *)&mp->m_lock, abstime,
206216641Sdavidxu			CVWAIT_ABSTIME|CVWAIT_CLOCKID);
207216641Sdavidxu	}
208216641Sdavidxu
209112918Sjeff	/*
210216641Sdavidxu	 * Note that PP mutex and ROBUST mutex may return
211216641Sdavidxu	 * interesting error codes.
212112918Sjeff	 */
213216641Sdavidxu	if (error == 0) {
214216641Sdavidxu		error2 = _mutex_cv_lock(mp, recurse);
215216641Sdavidxu	} else if (error == EINTR || error == ETIMEDOUT) {
216216641Sdavidxu		error2 = _mutex_cv_lock(mp, recurse);
217216641Sdavidxu		if (error2 == 0 && cancel)
218216641Sdavidxu			_thr_testcancel(curthread);
219216641Sdavidxu		if (error == EINTR)
220216641Sdavidxu			error = 0;
221216641Sdavidxu	} else {
222216641Sdavidxu		/* We know that it didn't unlock the mutex. */
223216641Sdavidxu		error2 = _mutex_cv_attach(mp, recurse);
224216641Sdavidxu		if (error2 == 0 && cancel)
225216641Sdavidxu			_thr_testcancel(curthread);
226112918Sjeff	}
227216641Sdavidxu	return (error2 != 0 ? error2 : error);
228216641Sdavidxu}
229164877Sdavidxu
230216641Sdavidxu/*
231216641Sdavidxu * Thread waits in userland queue whenever possible, when thread
232216641Sdavidxu * is signaled or broadcasted, it is removed from the queue, and
233216641Sdavidxu * is saved in curthread's defer_waiters[] buffer, but won't be
234216641Sdavidxu * woken up until mutex is unlocked.
235216641Sdavidxu */
236112918Sjeff
237216641Sdavidxustatic int
238216641Sdavidxucond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
239216641Sdavidxu	const struct timespec *abstime, int cancel)
240216641Sdavidxu{
241216641Sdavidxu	struct pthread	*curthread = _get_curthread();
242216641Sdavidxu	struct sleepqueue *sq;
243216641Sdavidxu	int	recurse;
244216641Sdavidxu	int	error;
245239200Sdavidxu	int	defered;
246112918Sjeff
247216641Sdavidxu	if (curthread->wchan != NULL)
248216641Sdavidxu		PANIC("thread was already on queue.");
249216641Sdavidxu
250216641Sdavidxu	if (cancel)
251216641Sdavidxu		_thr_testcancel(curthread);
252216641Sdavidxu
253216641Sdavidxu	_sleepq_lock(cvp);
254216641Sdavidxu	/*
255216641Sdavidxu	 * set __has_user_waiters before unlocking mutex, this allows
256216641Sdavidxu	 * us to check it without locking in pthread_cond_signal().
257216641Sdavidxu	 */
258216641Sdavidxu	cvp->__has_user_waiters = 1;
259239200Sdavidxu	defered = 0;
260239200Sdavidxu	(void)_mutex_cv_unlock(mp, &recurse, &defered);
261216641Sdavidxu	curthread->mutex_obj = mp;
262216641Sdavidxu	_sleepq_add(cvp, curthread);
263216641Sdavidxu	for(;;) {
264216641Sdavidxu		_thr_clear_wake(curthread);
265216641Sdavidxu		_sleepq_unlock(cvp);
266239200Sdavidxu		if (defered) {
267239206Sdavidxu			defered = 0;
268239200Sdavidxu			if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
269239200Sdavidxu				(void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
270239200Sdavidxu					 mp->m_lock.m_flags, 0, 0);
271239200Sdavidxu		}
272239200Sdavidxu		if (curthread->nwaiter_defer > 0) {
273239200Sdavidxu			_thr_wake_all(curthread->defer_waiters,
274239200Sdavidxu				curthread->nwaiter_defer);
275239200Sdavidxu			curthread->nwaiter_defer = 0;
276239200Sdavidxu		}
277216641Sdavidxu
278216641Sdavidxu		if (cancel) {
279216641Sdavidxu			_thr_cancel_enter2(curthread, 0);
280216641Sdavidxu			error = _thr_sleep(curthread, cvp->__clock_id, abstime);
281216641Sdavidxu			_thr_cancel_leave(curthread, 0);
282216641Sdavidxu		} else {
283216641Sdavidxu			error = _thr_sleep(curthread, cvp->__clock_id, abstime);
284216641Sdavidxu		}
285216641Sdavidxu
286216641Sdavidxu		_sleepq_lock(cvp);
287216641Sdavidxu		if (curthread->wchan == NULL) {
288216641Sdavidxu			error = 0;
289216641Sdavidxu			break;
290216641Sdavidxu		} else if (cancel && SHOULD_CANCEL(curthread)) {
291216641Sdavidxu			sq = _sleepq_lookup(cvp);
292216641Sdavidxu			cvp->__has_user_waiters =
293216641Sdavidxu				_sleepq_remove(sq, curthread);
294216641Sdavidxu			_sleepq_unlock(cvp);
295216641Sdavidxu			curthread->mutex_obj = NULL;
296216641Sdavidxu			_mutex_cv_lock(mp, recurse);
297216641Sdavidxu			if (!THR_IN_CRITICAL(curthread))
298216641Sdavidxu				_pthread_exit(PTHREAD_CANCELED);
299216641Sdavidxu			else /* this should not happen */
300216641Sdavidxu				return (0);
301216641Sdavidxu		} else if (error == ETIMEDOUT) {
302216641Sdavidxu			sq = _sleepq_lookup(cvp);
303216641Sdavidxu			cvp->__has_user_waiters =
304216641Sdavidxu				_sleepq_remove(sq, curthread);
305216641Sdavidxu			break;
306216641Sdavidxu		}
307112918Sjeff	}
308216641Sdavidxu	_sleepq_unlock(cvp);
309216641Sdavidxu	curthread->mutex_obj = NULL;
310216641Sdavidxu	_mutex_cv_lock(mp, recurse);
311216641Sdavidxu	return (error);
312112918Sjeff}
313112918Sjeff
314216641Sdavidxustatic int
315216641Sdavidxucond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
316216641Sdavidxu	const struct timespec *abstime, int cancel)
317216641Sdavidxu{
318216641Sdavidxu	struct pthread	*curthread = _get_curthread();
319216641Sdavidxu	struct pthread_cond *cvp;
320216641Sdavidxu	struct pthread_mutex *mp;
321216641Sdavidxu	int	error;
322216641Sdavidxu
323216641Sdavidxu	CHECK_AND_INIT_COND
324216641Sdavidxu
325296162Skib	if (*mutex == THR_PSHARED_PTR) {
326296162Skib		mp = __thr_pshared_offpage(mutex, 0);
327296162Skib		if (mp == NULL)
328296162Skib			return (EINVAL);
329296162Skib	} else {
330296162Skib		mp = *mutex;
331296162Skib	}
332216641Sdavidxu
333216641Sdavidxu	if ((error = _mutex_owned(curthread, mp)) != 0)
334216641Sdavidxu		return (error);
335216641Sdavidxu
336216641Sdavidxu	if (curthread->attr.sched_policy != SCHED_OTHER ||
337216641Sdavidxu	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT|
338216641Sdavidxu		USYNC_PROCESS_SHARED)) != 0 ||
339216641Sdavidxu	    (cvp->__flags & USYNC_PROCESS_SHARED) != 0)
340216641Sdavidxu		return cond_wait_kernel(cvp, mp, abstime, cancel);
341216641Sdavidxu	else
342216641Sdavidxu		return cond_wait_user(cvp, mp, abstime, cancel);
343216641Sdavidxu}
344216641Sdavidxu
345112918Sjeffint
346144518Sdavidxu_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
347112918Sjeff{
348144518Sdavidxu
349144518Sdavidxu	return (cond_wait_common(cond, mutex, NULL, 0));
350112918Sjeff}
351112918Sjeff
352112918Sjeffint
353144518Sdavidxu__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
354112918Sjeff{
355144518Sdavidxu
356144518Sdavidxu	return (cond_wait_common(cond, mutex, NULL, 1));
357115277Smtm}
358115277Smtm
359144518Sdavidxuint
360216641Sdavidxu_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
361144518Sdavidxu		       const struct timespec * abstime)
362115277Smtm{
363112918Sjeff
364144518Sdavidxu	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
365144518Sdavidxu	    abstime->tv_nsec >= 1000000000)
366112918Sjeff		return (EINVAL);
367112918Sjeff
368144518Sdavidxu	return (cond_wait_common(cond, mutex, abstime, 0));
369112918Sjeff}
370112918Sjeff
371144518Sdavidxuint
372144518Sdavidxu__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
373144518Sdavidxu		       const struct timespec *abstime)
374112918Sjeff{
375112918Sjeff
376144518Sdavidxu	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
377144518Sdavidxu	    abstime->tv_nsec >= 1000000000)
378144518Sdavidxu		return (EINVAL);
379112918Sjeff
380144518Sdavidxu	return (cond_wait_common(cond, mutex, abstime, 1));
381112918Sjeff}
382112918Sjeff
383144518Sdavidxustatic int
384216641Sdavidxucond_signal_common(pthread_cond_t *cond)
385112918Sjeff{
386144518Sdavidxu	struct pthread	*curthread = _get_curthread();
387216641Sdavidxu	struct pthread *td;
388216641Sdavidxu	struct pthread_cond *cvp;
389216641Sdavidxu	struct pthread_mutex *mp;
390216641Sdavidxu	struct sleepqueue *sq;
391216641Sdavidxu	int	*waddr;
392216641Sdavidxu	int	pshared;
393112918Sjeff
394112918Sjeff	/*
395144518Sdavidxu	 * If the condition variable is statically initialized, perform dynamic
396144518Sdavidxu	 * initialization.
397112918Sjeff	 */
398213241Sdavidxu	CHECK_AND_INIT_COND
399144518Sdavidxu
400216641Sdavidxu	pshared = CV_PSHARED(cvp);
401216641Sdavidxu
402216641Sdavidxu	_thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters);
403216641Sdavidxu
404216641Sdavidxu	if (pshared || cvp->__has_user_waiters == 0)
405216641Sdavidxu		return (0);
406216641Sdavidxu
407216641Sdavidxu	curthread = _get_curthread();
408216641Sdavidxu	waddr = NULL;
409216641Sdavidxu	_sleepq_lock(cvp);
410216641Sdavidxu	sq = _sleepq_lookup(cvp);
411216641Sdavidxu	if (sq == NULL) {
412216641Sdavidxu		_sleepq_unlock(cvp);
413216641Sdavidxu		return (0);
414216641Sdavidxu	}
415216641Sdavidxu
416216641Sdavidxu	td = _sleepq_first(sq);
417216641Sdavidxu	mp = td->mutex_obj;
418216641Sdavidxu	cvp->__has_user_waiters = _sleepq_remove(sq, td);
419296162Skib	if (mp->m_owner == TID(curthread)) {
420216641Sdavidxu		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
421216641Sdavidxu			_thr_wake_all(curthread->defer_waiters,
422216641Sdavidxu					curthread->nwaiter_defer);
423216641Sdavidxu			curthread->nwaiter_defer = 0;
424216641Sdavidxu		}
425216641Sdavidxu		curthread->defer_waiters[curthread->nwaiter_defer++] =
426216641Sdavidxu			&td->wake_addr->value;
427216641Sdavidxu		mp->m_flags |= PMUTEX_FLAG_DEFERED;
428216641Sdavidxu	} else {
429216641Sdavidxu		waddr = &td->wake_addr->value;
430216641Sdavidxu	}
431216641Sdavidxu	_sleepq_unlock(cvp);
432216641Sdavidxu	if (waddr != NULL)
433216641Sdavidxu		_thr_set_wake(waddr);
434216641Sdavidxu	return (0);
435112918Sjeff}
436112918Sjeff
437216641Sdavidxustruct broadcast_arg {
438216641Sdavidxu	struct pthread *curthread;
439216641Sdavidxu	unsigned int *waddrs[MAX_DEFER_WAITERS];
440216641Sdavidxu	int count;
441216641Sdavidxu};
442216641Sdavidxu
443216641Sdavidxustatic void
444216641Sdavidxudrop_cb(struct pthread *td, void *arg)
445216641Sdavidxu{
446216641Sdavidxu	struct broadcast_arg *ba = arg;
447216641Sdavidxu	struct pthread_mutex *mp;
448216641Sdavidxu	struct pthread *curthread = ba->curthread;
449216641Sdavidxu
450216641Sdavidxu	mp = td->mutex_obj;
451296162Skib	if (mp->m_owner == TID(curthread)) {
452216641Sdavidxu		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
453216641Sdavidxu			_thr_wake_all(curthread->defer_waiters,
454216641Sdavidxu				curthread->nwaiter_defer);
455216641Sdavidxu			curthread->nwaiter_defer = 0;
456216641Sdavidxu		}
457216641Sdavidxu		curthread->defer_waiters[curthread->nwaiter_defer++] =
458216641Sdavidxu			&td->wake_addr->value;
459216641Sdavidxu		mp->m_flags |= PMUTEX_FLAG_DEFERED;
460216641Sdavidxu	} else {
461216641Sdavidxu		if (ba->count >= MAX_DEFER_WAITERS) {
462216641Sdavidxu			_thr_wake_all(ba->waddrs, ba->count);
463216641Sdavidxu			ba->count = 0;
464216641Sdavidxu		}
465216641Sdavidxu		ba->waddrs[ba->count++] = &td->wake_addr->value;
466216641Sdavidxu	}
467216641Sdavidxu}
468216641Sdavidxu
469216641Sdavidxustatic int
470216641Sdavidxucond_broadcast_common(pthread_cond_t *cond)
471216641Sdavidxu{
472216641Sdavidxu	int    pshared;
473216641Sdavidxu	struct pthread_cond *cvp;
474216641Sdavidxu	struct sleepqueue *sq;
475216641Sdavidxu	struct broadcast_arg ba;
476216641Sdavidxu
477216641Sdavidxu	/*
478216641Sdavidxu	 * If the condition variable is statically initialized, perform dynamic
479216641Sdavidxu	 * initialization.
480216641Sdavidxu	 */
481216641Sdavidxu	CHECK_AND_INIT_COND
482216641Sdavidxu
483216641Sdavidxu	pshared = CV_PSHARED(cvp);
484216641Sdavidxu
485216641Sdavidxu	_thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters);
486216641Sdavidxu
487216641Sdavidxu	if (pshared || cvp->__has_user_waiters == 0)
488216641Sdavidxu		return (0);
489216641Sdavidxu
490216641Sdavidxu	ba.curthread = _get_curthread();
491216641Sdavidxu	ba.count = 0;
492216641Sdavidxu
493216641Sdavidxu	_sleepq_lock(cvp);
494216641Sdavidxu	sq = _sleepq_lookup(cvp);
495216641Sdavidxu	if (sq == NULL) {
496216641Sdavidxu		_sleepq_unlock(cvp);
497216641Sdavidxu		return (0);
498216641Sdavidxu	}
499216641Sdavidxu	_sleepq_drop(sq, drop_cb, &ba);
500216641Sdavidxu	cvp->__has_user_waiters = 0;
501216641Sdavidxu	_sleepq_unlock(cvp);
502216641Sdavidxu	if (ba.count > 0)
503216641Sdavidxu		_thr_wake_all(ba.waddrs, ba.count);
504216641Sdavidxu	return (0);
505216641Sdavidxu}
506216641Sdavidxu
507144518Sdavidxuint
508144518Sdavidxu_pthread_cond_signal(pthread_cond_t * cond)
509112918Sjeff{
510112918Sjeff
511216641Sdavidxu	return (cond_signal_common(cond));
512112918Sjeff}
513115389Smtm
514144518Sdavidxuint
515144518Sdavidxu_pthread_cond_broadcast(pthread_cond_t * cond)
516115389Smtm{
517144518Sdavidxu
518216641Sdavidxu	return (cond_broadcast_common(cond));
519115389Smtm}
520