thr_cond.c revision 331722
1331722Seadler/* 2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3296162Skib * Copyright (c) 2015 The FreeBSD Foundation 4112918Sjeff * All rights reserved. 5112918Sjeff * 6296162Skib * Portions of this software were developed by Konstantin Belousov 7296162Skib * under sponsorship from the FreeBSD Foundation. 8296162Skib * 9112918Sjeff * Redistribution and use in source and binary forms, with or without 10112918Sjeff * modification, are permitted provided that the following conditions 11112918Sjeff * are met: 12112918Sjeff * 1. Redistributions of source code must retain the above copyright 13144518Sdavidxu * notice unmodified, this list of conditions, and the following 14144518Sdavidxu * disclaimer. 15112918Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16112918Sjeff * notice, this list of conditions and the following disclaimer in the 17112918Sjeff * documentation and/or other materials provided with the distribution. 18112918Sjeff * 19144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29112918Sjeff */ 30144518Sdavidxu 31297706Skib#include <sys/cdefs.h> 32297706Skib__FBSDID("$FreeBSD: stable/11/lib/libthr/thread/thr_cond.c 331722 2018-03-29 02:50:57Z eadler $"); 33297706Skib 34157457Sdavidxu#include "namespace.h" 35112918Sjeff#include <stdlib.h> 36112918Sjeff#include <errno.h> 37112918Sjeff#include <string.h> 38112918Sjeff#include <pthread.h> 39144518Sdavidxu#include <limits.h> 40157457Sdavidxu#include "un-namespace.h" 41144518Sdavidxu 42112918Sjeff#include "thr_private.h" 43112918Sjeff 44297701Skib_Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 45297701Skib "pthread_cond too large"); 46297701Skib 47112918Sjeff/* 48144518Sdavidxu * Prototypes 49115389Smtm */ 50157457Sdavidxuint __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 51157457Sdavidxuint __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 52157457Sdavidxu const struct timespec * abstime); 53144518Sdavidxustatic int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 54144518Sdavidxustatic int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 55144518Sdavidxu const struct timespec *abstime, int cancel); 56216641Sdavidxustatic int cond_signal_common(pthread_cond_t *cond); 57216641Sdavidxustatic int cond_broadcast_common(pthread_cond_t *cond); 58115389Smtm 59115389Smtm/* 60144518Sdavidxu * Double underscore versions are cancellation points. Single underscore 61144518Sdavidxu * versions are not and are provided for libc internal usage (which 62144518Sdavidxu * shouldn't introduce cancellation points). 63112918Sjeff */ 64144518Sdavidxu__weak_reference(__pthread_cond_wait, pthread_cond_wait); 65144518Sdavidxu__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 66112918Sjeff 67112918Sjeff__weak_reference(_pthread_cond_init, pthread_cond_init); 68112918Sjeff__weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 69112918Sjeff__weak_reference(_pthread_cond_signal, pthread_cond_signal); 70112918Sjeff__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 71112918Sjeff 72300971Skib#define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) 73216641Sdavidxu 74296162Skibstatic void 75296162Skibcond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 76296162Skib{ 77296162Skib 78296162Skib if (cattr == NULL) { 79300971Skib cvp->kcond.c_clockid = CLOCK_REALTIME; 80296162Skib } else { 81296162Skib if (cattr->c_pshared) 82300971Skib cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; 83300971Skib cvp->kcond.c_clockid = cattr->c_clockid; 84296162Skib } 85296162Skib} 86296162Skib 87144518Sdavidxustatic int 88144518Sdavidxucond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 89144518Sdavidxu{ 90296162Skib struct pthread_cond *cvp; 91296162Skib const struct pthread_cond_attr *cattr; 92296162Skib int pshared; 93112918Sjeff 94296162Skib cattr = cond_attr != NULL ? *cond_attr : NULL; 95296162Skib if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 96296162Skib pshared = 0; 97296162Skib cvp = calloc(1, sizeof(struct pthread_cond)); 98296162Skib if (cvp == NULL) 99296162Skib return (ENOMEM); 100144518Sdavidxu } else { 101296162Skib pshared = 1; 102296162Skib cvp = __thr_pshared_offpage(cond, 1); 103296162Skib if (cvp == NULL) 104296162Skib return (EFAULT); 105144518Sdavidxu } 106296162Skib 107296162Skib /* 108296162Skib * Initialise the condition variable structure: 109296162Skib */ 110296162Skib cond_init_body(cvp, cattr); 111296162Skib *cond = pshared ? THR_PSHARED_PTR : cvp; 112296162Skib return (0); 113144518Sdavidxu} 114112918Sjeff 115144518Sdavidxustatic int 116144518Sdavidxuinit_static(struct pthread *thread, pthread_cond_t *cond) 117112918Sjeff{ 118144518Sdavidxu int ret; 119112918Sjeff 120144518Sdavidxu THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 121144518Sdavidxu 122112918Sjeff if (*cond == NULL) 123144518Sdavidxu ret = cond_init(cond, NULL); 124144518Sdavidxu else 125144518Sdavidxu ret = 0; 126112918Sjeff 127144518Sdavidxu THR_LOCK_RELEASE(thread, &_cond_static_lock); 128112918Sjeff 129144518Sdavidxu return (ret); 130112918Sjeff} 131112918Sjeff 132213241Sdavidxu#define CHECK_AND_INIT_COND \ 133296162Skib if (*cond == THR_PSHARED_PTR) { \ 134296162Skib cvp = __thr_pshared_offpage(cond, 0); \ 135296162Skib if (cvp == NULL) \ 136296162Skib return (EINVAL); \ 137296162Skib } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 138216641Sdavidxu if (cvp == THR_COND_INITIALIZER) { \ 139213241Sdavidxu int ret; \ 140213241Sdavidxu ret = init_static(_get_curthread(), cond); \ 141213241Sdavidxu if (ret) \ 142213241Sdavidxu return (ret); \ 143216641Sdavidxu } else if (cvp == THR_COND_DESTROYED) { \ 144213241Sdavidxu return (EINVAL); \ 145213241Sdavidxu } \ 146216641Sdavidxu cvp = *cond; \ 147213241Sdavidxu } 148213241Sdavidxu 149112918Sjeffint 150112918Sjeff_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 151112918Sjeff{ 152112918Sjeff 153144518Sdavidxu *cond = NULL; 154144518Sdavidxu return (cond_init(cond, cond_attr)); 155112918Sjeff} 156112918Sjeff 157112918Sjeffint 158112918Sjeff_pthread_cond_destroy(pthread_cond_t *cond) 159112918Sjeff{ 160296162Skib struct pthread_cond *cvp; 161296162Skib int error; 162112918Sjeff 163296162Skib error = 0; 164296162Skib if (*cond == THR_PSHARED_PTR) { 165296162Skib cvp = __thr_pshared_offpage(cond, 0); 166296162Skib if (cvp != NULL) 167296162Skib __thr_pshared_destroy(cond); 168296162Skib *cond = THR_COND_DESTROYED; 169296162Skib } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 170296162Skib /* nothing */ 171296162Skib } else if (cvp == THR_COND_DESTROYED) { 172216641Sdavidxu error = EINVAL; 173296162Skib } else { 174216641Sdavidxu cvp = *cond; 175213241Sdavidxu *cond = THR_COND_DESTROYED; 176216641Sdavidxu free(cvp); 177144518Sdavidxu } 178216641Sdavidxu return (error); 179112918Sjeff} 180112918Sjeff 181211524Sdavidxu/* 182270972Srpaulo * Cancellation behavior: 183211524Sdavidxu * Thread may be canceled at start, if thread is canceled, it means it 184211524Sdavidxu * did not get a wakeup from pthread_cond_signal(), otherwise, it is 185211524Sdavidxu * not canceled. 186211524Sdavidxu * Thread cancellation never cause wakeup from pthread_cond_signal() 187211524Sdavidxu * to be lost. 188211524Sdavidxu */ 189115035Smtmstatic int 190216641Sdavidxucond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 191300043Skib const struct timespec *abstime, int cancel) 192115035Smtm{ 193300043Skib struct pthread *curthread; 194300043Skib int error, error2, recurse, robust; 195112918Sjeff 196300043Skib curthread = _get_curthread(); 197300043Skib robust = _mutex_enter_robust(curthread, mp); 198300043Skib 199216641Sdavidxu error = _mutex_cv_detach(mp, &recurse); 200300043Skib if (error != 0) { 201300043Skib if (robust) 202300043Skib _mutex_leave_robust(curthread, mp); 203216641Sdavidxu return (error); 204300043Skib } 205216641Sdavidxu 206300043Skib if (cancel) 207216641Sdavidxu _thr_cancel_enter2(curthread, 0); 208300971Skib error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, 209300971Skib CVWAIT_ABSTIME | CVWAIT_CLOCKID); 210300043Skib if (cancel) 211216641Sdavidxu _thr_cancel_leave(curthread, 0); 212216641Sdavidxu 213112918Sjeff /* 214216641Sdavidxu * Note that PP mutex and ROBUST mutex may return 215216641Sdavidxu * interesting error codes. 216112918Sjeff */ 217216641Sdavidxu if (error == 0) { 218300043Skib error2 = _mutex_cv_lock(mp, recurse, true); 219216641Sdavidxu } else if (error == EINTR || error == ETIMEDOUT) { 220300043Skib error2 = _mutex_cv_lock(mp, recurse, true); 221300043Skib /* 222300043Skib * Do not do cancellation on EOWNERDEAD there. The 223300043Skib * cancellation cleanup handler will use the protected 224300043Skib * state and unlock the mutex without making the state 225300043Skib * consistent and the state will be unrecoverable. 226300043Skib */ 227310016Skib if (error2 == 0 && cancel) { 228310016Skib if (robust) { 229310016Skib _mutex_leave_robust(curthread, mp); 230310016Skib robust = false; 231310016Skib } 232216641Sdavidxu _thr_testcancel(curthread); 233310016Skib } 234300043Skib 235216641Sdavidxu if (error == EINTR) 236216641Sdavidxu error = 0; 237216641Sdavidxu } else { 238216641Sdavidxu /* We know that it didn't unlock the mutex. */ 239300043Skib _mutex_cv_attach(mp, recurse); 240310016Skib if (cancel) { 241310016Skib if (robust) { 242310016Skib _mutex_leave_robust(curthread, mp); 243310016Skib robust = false; 244310016Skib } 245216641Sdavidxu _thr_testcancel(curthread); 246310016Skib } 247300043Skib error2 = 0; 248112918Sjeff } 249300043Skib if (robust) 250300043Skib _mutex_leave_robust(curthread, mp); 251216641Sdavidxu return (error2 != 0 ? error2 : error); 252216641Sdavidxu} 253164877Sdavidxu 254216641Sdavidxu/* 255216641Sdavidxu * Thread waits in userland queue whenever possible, when thread 256216641Sdavidxu * is signaled or broadcasted, it is removed from the queue, and 257216641Sdavidxu * is saved in curthread's defer_waiters[] buffer, but won't be 258216641Sdavidxu * woken up until mutex is unlocked. 259216641Sdavidxu */ 260112918Sjeff 261216641Sdavidxustatic int 262216641Sdavidxucond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 263300043Skib const struct timespec *abstime, int cancel) 264216641Sdavidxu{ 265300043Skib struct pthread *curthread; 266216641Sdavidxu struct sleepqueue *sq; 267300043Skib int deferred, error, error2, recurse; 268112918Sjeff 269300043Skib curthread = _get_curthread(); 270216641Sdavidxu if (curthread->wchan != NULL) 271301136Scem PANIC("thread %p was already on queue.", curthread); 272216641Sdavidxu 273216641Sdavidxu if (cancel) 274216641Sdavidxu _thr_testcancel(curthread); 275216641Sdavidxu 276216641Sdavidxu _sleepq_lock(cvp); 277216641Sdavidxu /* 278216641Sdavidxu * set __has_user_waiters before unlocking mutex, this allows 279216641Sdavidxu * us to check it without locking in pthread_cond_signal(). 280216641Sdavidxu */ 281216641Sdavidxu cvp->__has_user_waiters = 1; 282300043Skib deferred = 0; 283300043Skib (void)_mutex_cv_unlock(mp, &recurse, &deferred); 284216641Sdavidxu curthread->mutex_obj = mp; 285216641Sdavidxu _sleepq_add(cvp, curthread); 286216641Sdavidxu for(;;) { 287216641Sdavidxu _thr_clear_wake(curthread); 288216641Sdavidxu _sleepq_unlock(cvp); 289300043Skib if (deferred) { 290300043Skib deferred = 0; 291239200Sdavidxu if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 292300043Skib (void)_umtx_op_err(&mp->m_lock, 293300043Skib UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 294300043Skib 0, 0); 295239200Sdavidxu } 296239200Sdavidxu if (curthread->nwaiter_defer > 0) { 297239200Sdavidxu _thr_wake_all(curthread->defer_waiters, 298300043Skib curthread->nwaiter_defer); 299239200Sdavidxu curthread->nwaiter_defer = 0; 300239200Sdavidxu } 301216641Sdavidxu 302300043Skib if (cancel) 303216641Sdavidxu _thr_cancel_enter2(curthread, 0); 304300971Skib error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); 305300043Skib if (cancel) 306216641Sdavidxu _thr_cancel_leave(curthread, 0); 307216641Sdavidxu 308216641Sdavidxu _sleepq_lock(cvp); 309216641Sdavidxu if (curthread->wchan == NULL) { 310216641Sdavidxu error = 0; 311216641Sdavidxu break; 312216641Sdavidxu } else if (cancel && SHOULD_CANCEL(curthread)) { 313216641Sdavidxu sq = _sleepq_lookup(cvp); 314300043Skib cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 315216641Sdavidxu _sleepq_unlock(cvp); 316216641Sdavidxu curthread->mutex_obj = NULL; 317300043Skib error2 = _mutex_cv_lock(mp, recurse, false); 318216641Sdavidxu if (!THR_IN_CRITICAL(curthread)) 319216641Sdavidxu _pthread_exit(PTHREAD_CANCELED); 320216641Sdavidxu else /* this should not happen */ 321300043Skib return (error2); 322216641Sdavidxu } else if (error == ETIMEDOUT) { 323216641Sdavidxu sq = _sleepq_lookup(cvp); 324216641Sdavidxu cvp->__has_user_waiters = 325300043Skib _sleepq_remove(sq, curthread); 326216641Sdavidxu break; 327216641Sdavidxu } 328112918Sjeff } 329216641Sdavidxu _sleepq_unlock(cvp); 330216641Sdavidxu curthread->mutex_obj = NULL; 331300043Skib error2 = _mutex_cv_lock(mp, recurse, false); 332300043Skib if (error == 0) 333300043Skib error = error2; 334216641Sdavidxu return (error); 335112918Sjeff} 336112918Sjeff 337216641Sdavidxustatic int 338216641Sdavidxucond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 339216641Sdavidxu const struct timespec *abstime, int cancel) 340216641Sdavidxu{ 341216641Sdavidxu struct pthread *curthread = _get_curthread(); 342216641Sdavidxu struct pthread_cond *cvp; 343216641Sdavidxu struct pthread_mutex *mp; 344216641Sdavidxu int error; 345216641Sdavidxu 346216641Sdavidxu CHECK_AND_INIT_COND 347216641Sdavidxu 348296162Skib if (*mutex == THR_PSHARED_PTR) { 349296162Skib mp = __thr_pshared_offpage(mutex, 0); 350296162Skib if (mp == NULL) 351296162Skib return (EINVAL); 352296162Skib } else { 353296162Skib mp = *mutex; 354296162Skib } 355216641Sdavidxu 356216641Sdavidxu if ((error = _mutex_owned(curthread, mp)) != 0) 357216641Sdavidxu return (error); 358216641Sdavidxu 359216641Sdavidxu if (curthread->attr.sched_policy != SCHED_OTHER || 360300043Skib (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 361300971Skib USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) 362300043Skib return (cond_wait_kernel(cvp, mp, abstime, cancel)); 363216641Sdavidxu else 364300043Skib return (cond_wait_user(cvp, mp, abstime, cancel)); 365216641Sdavidxu} 366216641Sdavidxu 367112918Sjeffint 368144518Sdavidxu_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 369112918Sjeff{ 370144518Sdavidxu 371144518Sdavidxu return (cond_wait_common(cond, mutex, NULL, 0)); 372112918Sjeff} 373112918Sjeff 374112918Sjeffint 375144518Sdavidxu__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 376112918Sjeff{ 377144518Sdavidxu 378144518Sdavidxu return (cond_wait_common(cond, mutex, NULL, 1)); 379115277Smtm} 380115277Smtm 381144518Sdavidxuint 382216641Sdavidxu_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 383144518Sdavidxu const struct timespec * abstime) 384115277Smtm{ 385112918Sjeff 386144518Sdavidxu if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 387144518Sdavidxu abstime->tv_nsec >= 1000000000) 388112918Sjeff return (EINVAL); 389112918Sjeff 390144518Sdavidxu return (cond_wait_common(cond, mutex, abstime, 0)); 391112918Sjeff} 392112918Sjeff 393144518Sdavidxuint 394144518Sdavidxu__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 395144518Sdavidxu const struct timespec *abstime) 396112918Sjeff{ 397112918Sjeff 398144518Sdavidxu if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 399144518Sdavidxu abstime->tv_nsec >= 1000000000) 400144518Sdavidxu return (EINVAL); 401112918Sjeff 402144518Sdavidxu return (cond_wait_common(cond, mutex, abstime, 1)); 403112918Sjeff} 404112918Sjeff 405144518Sdavidxustatic int 406216641Sdavidxucond_signal_common(pthread_cond_t *cond) 407112918Sjeff{ 408144518Sdavidxu struct pthread *curthread = _get_curthread(); 409216641Sdavidxu struct pthread *td; 410216641Sdavidxu struct pthread_cond *cvp; 411216641Sdavidxu struct pthread_mutex *mp; 412216641Sdavidxu struct sleepqueue *sq; 413216641Sdavidxu int *waddr; 414216641Sdavidxu int pshared; 415112918Sjeff 416112918Sjeff /* 417144518Sdavidxu * If the condition variable is statically initialized, perform dynamic 418144518Sdavidxu * initialization. 419112918Sjeff */ 420213241Sdavidxu CHECK_AND_INIT_COND 421144518Sdavidxu 422216641Sdavidxu pshared = CV_PSHARED(cvp); 423216641Sdavidxu 424300971Skib _thr_ucond_signal(&cvp->kcond); 425216641Sdavidxu 426216641Sdavidxu if (pshared || cvp->__has_user_waiters == 0) 427216641Sdavidxu return (0); 428216641Sdavidxu 429216641Sdavidxu curthread = _get_curthread(); 430216641Sdavidxu waddr = NULL; 431216641Sdavidxu _sleepq_lock(cvp); 432216641Sdavidxu sq = _sleepq_lookup(cvp); 433216641Sdavidxu if (sq == NULL) { 434216641Sdavidxu _sleepq_unlock(cvp); 435216641Sdavidxu return (0); 436216641Sdavidxu } 437216641Sdavidxu 438216641Sdavidxu td = _sleepq_first(sq); 439216641Sdavidxu mp = td->mutex_obj; 440216641Sdavidxu cvp->__has_user_waiters = _sleepq_remove(sq, td); 441300043Skib if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 442216641Sdavidxu if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 443216641Sdavidxu _thr_wake_all(curthread->defer_waiters, 444300043Skib curthread->nwaiter_defer); 445216641Sdavidxu curthread->nwaiter_defer = 0; 446216641Sdavidxu } 447216641Sdavidxu curthread->defer_waiters[curthread->nwaiter_defer++] = 448300043Skib &td->wake_addr->value; 449300043Skib mp->m_flags |= PMUTEX_FLAG_DEFERRED; 450216641Sdavidxu } else { 451216641Sdavidxu waddr = &td->wake_addr->value; 452216641Sdavidxu } 453216641Sdavidxu _sleepq_unlock(cvp); 454216641Sdavidxu if (waddr != NULL) 455216641Sdavidxu _thr_set_wake(waddr); 456216641Sdavidxu return (0); 457112918Sjeff} 458112918Sjeff 459216641Sdavidxustruct broadcast_arg { 460216641Sdavidxu struct pthread *curthread; 461216641Sdavidxu unsigned int *waddrs[MAX_DEFER_WAITERS]; 462216641Sdavidxu int count; 463216641Sdavidxu}; 464216641Sdavidxu 465216641Sdavidxustatic void 466216641Sdavidxudrop_cb(struct pthread *td, void *arg) 467216641Sdavidxu{ 468216641Sdavidxu struct broadcast_arg *ba = arg; 469216641Sdavidxu struct pthread_mutex *mp; 470216641Sdavidxu struct pthread *curthread = ba->curthread; 471216641Sdavidxu 472216641Sdavidxu mp = td->mutex_obj; 473300043Skib if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 474216641Sdavidxu if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 475216641Sdavidxu _thr_wake_all(curthread->defer_waiters, 476300043Skib curthread->nwaiter_defer); 477216641Sdavidxu curthread->nwaiter_defer = 0; 478216641Sdavidxu } 479216641Sdavidxu curthread->defer_waiters[curthread->nwaiter_defer++] = 480300043Skib &td->wake_addr->value; 481300043Skib mp->m_flags |= PMUTEX_FLAG_DEFERRED; 482216641Sdavidxu } else { 483216641Sdavidxu if (ba->count >= MAX_DEFER_WAITERS) { 484216641Sdavidxu _thr_wake_all(ba->waddrs, ba->count); 485216641Sdavidxu ba->count = 0; 486216641Sdavidxu } 487216641Sdavidxu ba->waddrs[ba->count++] = &td->wake_addr->value; 488216641Sdavidxu } 489216641Sdavidxu} 490216641Sdavidxu 491216641Sdavidxustatic int 492216641Sdavidxucond_broadcast_common(pthread_cond_t *cond) 493216641Sdavidxu{ 494216641Sdavidxu int pshared; 495216641Sdavidxu struct pthread_cond *cvp; 496216641Sdavidxu struct sleepqueue *sq; 497216641Sdavidxu struct broadcast_arg ba; 498216641Sdavidxu 499216641Sdavidxu /* 500216641Sdavidxu * If the condition variable is statically initialized, perform dynamic 501216641Sdavidxu * initialization. 502216641Sdavidxu */ 503216641Sdavidxu CHECK_AND_INIT_COND 504216641Sdavidxu 505216641Sdavidxu pshared = CV_PSHARED(cvp); 506216641Sdavidxu 507300971Skib _thr_ucond_broadcast(&cvp->kcond); 508216641Sdavidxu 509216641Sdavidxu if (pshared || cvp->__has_user_waiters == 0) 510216641Sdavidxu return (0); 511216641Sdavidxu 512216641Sdavidxu ba.curthread = _get_curthread(); 513216641Sdavidxu ba.count = 0; 514216641Sdavidxu 515216641Sdavidxu _sleepq_lock(cvp); 516216641Sdavidxu sq = _sleepq_lookup(cvp); 517216641Sdavidxu if (sq == NULL) { 518216641Sdavidxu _sleepq_unlock(cvp); 519216641Sdavidxu return (0); 520216641Sdavidxu } 521216641Sdavidxu _sleepq_drop(sq, drop_cb, &ba); 522216641Sdavidxu cvp->__has_user_waiters = 0; 523216641Sdavidxu _sleepq_unlock(cvp); 524216641Sdavidxu if (ba.count > 0) 525216641Sdavidxu _thr_wake_all(ba.waddrs, ba.count); 526216641Sdavidxu return (0); 527216641Sdavidxu} 528216641Sdavidxu 529144518Sdavidxuint 530144518Sdavidxu_pthread_cond_signal(pthread_cond_t * cond) 531112918Sjeff{ 532112918Sjeff 533216641Sdavidxu return (cond_signal_common(cond)); 534112918Sjeff} 535115389Smtm 536144518Sdavidxuint 537144518Sdavidxu_pthread_cond_broadcast(pthread_cond_t * cond) 538115389Smtm{ 539144518Sdavidxu 540216641Sdavidxu return (cond_broadcast_common(cond)); 541115389Smtm} 542