Deleted Added
full compact
thr_cond.c (213241) thr_cond.c (216641)
1/*
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_cond.c 213241 2010-09-28 04:57:56Z davidxu $
26 * $FreeBSD: head/lib/libthr/thread/thr_cond.c 216641 2010-12-22 05:01:52Z davidxu $
27 */
28
29#include "namespace.h"
30#include <stdlib.h>
31#include <errno.h>
32#include <string.h>
33#include <pthread.h>
34#include <limits.h>

--- 5 unchanged lines hidden (view full) ---

40 * Prototypes
41 */
42int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
43int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
44 const struct timespec * abstime);
45static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
46static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
47 const struct timespec *abstime, int cancel);
27 */
28
29#include "namespace.h"
30#include <stdlib.h>
31#include <errno.h>
32#include <string.h>
33#include <pthread.h>
34#include <limits.h>

--- 5 unchanged lines hidden (view full) ---

40 * Prototypes
41 */
42int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
43int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
44 const struct timespec * abstime);
45static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
46static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
47 const struct timespec *abstime, int cancel);
48static int cond_signal_common(pthread_cond_t *cond, int broadcast);
48static int cond_signal_common(pthread_cond_t *cond);
49static int cond_broadcast_common(pthread_cond_t *cond);
49
50/*
51 * Double underscore versions are cancellation points. Single underscore
52 * versions are not and are provided for libc internal usage (which
53 * shouldn't introduce cancellation points).
54 */
55__weak_reference(__pthread_cond_wait, pthread_cond_wait);
56__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
57
58__weak_reference(_pthread_cond_init, pthread_cond_init);
59__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
60__weak_reference(_pthread_cond_signal, pthread_cond_signal);
61__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
62
50
51/*
52 * Double underscore versions are cancellation points. Single underscore
53 * versions are not and are provided for libc internal usage (which
54 * shouldn't introduce cancellation points).
55 */
56__weak_reference(__pthread_cond_wait, pthread_cond_wait);
57__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
58
59__weak_reference(_pthread_cond_init, pthread_cond_init);
60__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
61__weak_reference(_pthread_cond_signal, pthread_cond_signal);
62__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
63
64#define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0)
65
63static int
64cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
65{
66static int
67cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
68{
66 pthread_cond_t pcond;
67 int rval = 0;
69 struct pthread_cond *cvp;
70 int error = 0;
68
71
69 if ((pcond = (pthread_cond_t)
72 if ((cvp = (pthread_cond_t)
70 calloc(1, sizeof(struct pthread_cond))) == NULL) {
73 calloc(1, sizeof(struct pthread_cond))) == NULL) {
71 rval = ENOMEM;
74 error = ENOMEM;
72 } else {
73 /*
74 * Initialise the condition variable structure:
75 */
76 if (cond_attr == NULL || *cond_attr == NULL) {
75 } else {
76 /*
77 * Initialise the condition variable structure:
78 */
79 if (cond_attr == NULL || *cond_attr == NULL) {
77 pcond->c_pshared = 0;
78 pcond->c_clockid = CLOCK_REALTIME;
80 cvp->__clock_id = CLOCK_REALTIME;
79 } else {
81 } else {
80 pcond->c_pshared = (*cond_attr)->c_pshared;
81 pcond->c_clockid = (*cond_attr)->c_clockid;
82 if ((*cond_attr)->c_pshared)
83 cvp->__flags |= USYNC_PROCESS_SHARED;
84 cvp->__clock_id = (*cond_attr)->c_clockid;
82 }
85 }
83 _thr_umutex_init(&pcond->c_lock);
84 *cond = pcond;
86 *cond = cvp;
85 }
87 }
86 /* Return the completion status: */
87 return (rval);
88 return (error);
88}
89
90static int
91init_static(struct pthread *thread, pthread_cond_t *cond)
92{
93 int ret;
94
95 THR_LOCK_ACQUIRE(thread, &_cond_static_lock);

--- 4 unchanged lines hidden (view full) ---

100 ret = 0;
101
102 THR_LOCK_RELEASE(thread, &_cond_static_lock);
103
104 return (ret);
105}
106
107#define CHECK_AND_INIT_COND \
89}
90
91static int
92init_static(struct pthread *thread, pthread_cond_t *cond)
93{
94 int ret;
95
96 THR_LOCK_ACQUIRE(thread, &_cond_static_lock);

--- 4 unchanged lines hidden (view full) ---

101 ret = 0;
102
103 THR_LOCK_RELEASE(thread, &_cond_static_lock);
104
105 return (ret);
106}
107
108#define CHECK_AND_INIT_COND \
108 if (__predict_false((cv = (*cond)) <= THR_COND_DESTROYED)) { \
109 if (cv == THR_COND_INITIALIZER) { \
109 if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
110 if (cvp == THR_COND_INITIALIZER) { \
110 int ret; \
111 ret = init_static(_get_curthread(), cond); \
112 if (ret) \
113 return (ret); \
111 int ret; \
112 ret = init_static(_get_curthread(), cond); \
113 if (ret) \
114 return (ret); \
114 } else if (cv == THR_COND_DESTROYED) { \
115 } else if (cvp == THR_COND_DESTROYED) { \
115 return (EINVAL); \
116 } \
116 return (EINVAL); \
117 } \
117 cv = *cond; \
118 cvp = *cond; \
118 }
119
120int
121_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
122{
123
124 *cond = NULL;
125 return (cond_init(cond, cond_attr));
126}
127
128int
129_pthread_cond_destroy(pthread_cond_t *cond)
130{
119 }
120
121int
122_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
123{
124
125 *cond = NULL;
126 return (cond_init(cond, cond_attr));
127}
128
129int
130_pthread_cond_destroy(pthread_cond_t *cond)
131{
131 struct pthread *curthread = _get_curthread();
132 struct pthread_cond *cv;
133 int rval = 0;
132 struct pthread_cond *cvp;
133 int error = 0;
134
134
135 if ((cv = *cond) == THR_COND_INITIALIZER)
136 rval = 0;
137 else if (cv == THR_COND_DESTROYED)
138 rval = EINVAL;
135 if ((cvp = *cond) == THR_COND_INITIALIZER)
136 error = 0;
137 else if (cvp == THR_COND_DESTROYED)
138 error = EINVAL;
139 else {
139 else {
140 cv = *cond;
141 THR_UMUTEX_LOCK(curthread, &cv->c_lock);
140 cvp = *cond;
142 *cond = THR_COND_DESTROYED;
141 *cond = THR_COND_DESTROYED;
143 THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
144
145 /*
146 * Free the memory allocated for the condition
147 * variable structure:
148 */
142
143 /*
144 * Free the memory allocated for the condition
145 * variable structure:
146 */
149 free(cv);
147 free(cvp);
150 }
148 }
151 return (rval);
149 return (error);
152}
153
150}
151
154struct cond_cancel_info
155{
156 pthread_mutex_t *mutex;
157 pthread_cond_t *cond;
158 int count;
159};
160
161static void
162cond_cancel_handler(void *arg)
163{
164 struct pthread *curthread = _get_curthread();
165 struct cond_cancel_info *info = (struct cond_cancel_info *)arg;
166 pthread_cond_t cv;
167
168 if (info->cond != NULL) {
169 cv = *(info->cond);
170 THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
171 }
172 _mutex_cv_lock(info->mutex, info->count);
173}
174
175/*
176 * Cancellation behaivor:
177 * Thread may be canceled at start, if thread is canceled, it means it
178 * did not get a wakeup from pthread_cond_signal(), otherwise, it is
179 * not canceled.
180 * Thread cancellation never cause wakeup from pthread_cond_signal()
181 * to be lost.
182 */
183static int
152/*
153 * Cancellation behaivor:
154 * Thread may be canceled at start, if thread is canceled, it means it
155 * did not get a wakeup from pthread_cond_signal(), otherwise, it is
156 * not canceled.
157 * Thread cancellation never cause wakeup from pthread_cond_signal()
158 * to be lost.
159 */
160static int
184cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
161cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
185 const struct timespec *abstime, int cancel)
186{
187 struct pthread *curthread = _get_curthread();
162 const struct timespec *abstime, int cancel)
163{
164 struct pthread *curthread = _get_curthread();
188 struct timespec ts, ts2, *tsp;
189 struct cond_cancel_info info;
190 pthread_cond_t cv;
191 int ret;
165 int recurse;
166 int error, error2 = 0;
192
167
168 error = _mutex_cv_detach(mp, &recurse);
169 if (error != 0)
170 return (error);
171
172 if (cancel) {
173 _thr_cancel_enter2(curthread, 0);
174 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
175 (struct umutex *)&mp->m_lock, abstime,
176 CVWAIT_ABSTIME|CVWAIT_CLOCKID);
177 _thr_cancel_leave(curthread, 0);
178 } else {
179 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
180 (struct umutex *)&mp->m_lock, abstime,
181 CVWAIT_ABSTIME|CVWAIT_CLOCKID);
182 }
183
193 /*
184 /*
194 * If the condition variable is statically initialized,
195 * perform the dynamic initialization:
185 * Note that PP mutex and ROBUST mutex may return
186 * interesting error codes.
196 */
187 */
197 CHECK_AND_INIT_COND
198
199 cv = *cond;
200 THR_UMUTEX_LOCK(curthread, &cv->c_lock);
201 ret = _mutex_cv_unlock(mutex, &info.count);
202 if (__predict_false(ret != 0)) {
203 THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
204 return (ret);
188 if (error == 0) {
189 error2 = _mutex_cv_lock(mp, recurse);
190 } else if (error == EINTR || error == ETIMEDOUT) {
191 error2 = _mutex_cv_lock(mp, recurse);
192 if (error2 == 0 && cancel)
193 _thr_testcancel(curthread);
194 if (error == EINTR)
195 error = 0;
196 } else {
197 /* We know that it didn't unlock the mutex. */
198 error2 = _mutex_cv_attach(mp, recurse);
199 if (error2 == 0 && cancel)
200 _thr_testcancel(curthread);
205 }
201 }
202 return (error2 != 0 ? error2 : error);
203}
206
204
207 info.mutex = mutex;
208 info.cond = cond;
205/*
206 * Thread waits in userland queue whenever possible, when thread
207 * is signaled or broadcasted, it is removed from the queue, and
208 * is saved in curthread's defer_waiters[] buffer, but won't be
209 * woken up until mutex is unlocked.
210 */
209
211
210 if (abstime != NULL) {
211 clock_gettime(cv->c_clockid, &ts);
212 TIMESPEC_SUB(&ts2, abstime, &ts);
213 tsp = &ts2;
214 } else
215 tsp = NULL;
212static int
213cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
214 const struct timespec *abstime, int cancel)
215{
216 struct pthread *curthread = _get_curthread();
217 struct sleepqueue *sq;
218 int recurse;
219 int error;
216
220
217 if (cancel) {
218 THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info);
219 _thr_cancel_enter2(curthread, 0);
220 ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 1);
221 info.cond = NULL;
222 _thr_cancel_leave(curthread, (ret != 0));
223 THR_CLEANUP_POP(curthread, 0);
224 } else {
225 ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 0);
221 if (curthread->wchan != NULL)
222 PANIC("thread was already on queue.");
223
224 if (cancel)
225 _thr_testcancel(curthread);
226
227 _sleepq_lock(cvp);
228 /*
229 * set __has_user_waiters before unlocking mutex, this allows
230 * us to check it without locking in pthread_cond_signal().
231 */
232 cvp->__has_user_waiters = 1;
233 curthread->will_sleep = 1;
234 (void)_mutex_cv_unlock(mp, &recurse);
235 curthread->mutex_obj = mp;
236 _sleepq_add(cvp, curthread);
237 for(;;) {
238 _thr_clear_wake(curthread);
239 _sleepq_unlock(cvp);
240
241 if (cancel) {
242 _thr_cancel_enter2(curthread, 0);
243 error = _thr_sleep(curthread, cvp->__clock_id, abstime);
244 _thr_cancel_leave(curthread, 0);
245 } else {
246 error = _thr_sleep(curthread, cvp->__clock_id, abstime);
247 }
248
249 if (curthread->wchan == NULL) {
250 error = 0;
251 goto out;
252 }
253
254 _sleepq_lock(cvp);
255 if (curthread->wchan == NULL) {
256 error = 0;
257 break;
258 } else if (cancel && SHOULD_CANCEL(curthread)) {
259 sq = _sleepq_lookup(cvp);
260 cvp->__has_user_waiters =
261 _sleepq_remove(sq, curthread);
262 _sleepq_unlock(cvp);
263 curthread->mutex_obj = NULL;
264 _mutex_cv_lock(mp, recurse);
265 if (!THR_IN_CRITICAL(curthread))
266 _pthread_exit(PTHREAD_CANCELED);
267 else /* this should not happen */
268 return (0);
269 } else if (error == ETIMEDOUT) {
270 sq = _sleepq_lookup(cvp);
271 cvp->__has_user_waiters =
272 _sleepq_remove(sq, curthread);
273 break;
274 }
226 }
275 }
227 if (ret == EINTR)
228 ret = 0;
229 _mutex_cv_lock(mutex, info.count);
230 return (ret);
276 _sleepq_unlock(cvp);
277out:
278 curthread->mutex_obj = NULL;
279 _mutex_cv_lock(mp, recurse);
280 return (error);
231}
232
281}
282
283static int
284cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
285 const struct timespec *abstime, int cancel)
286{
287 struct pthread *curthread = _get_curthread();
288 struct pthread_cond *cvp;
289 struct pthread_mutex *mp;
290 int error;
291
292 CHECK_AND_INIT_COND
293
294 mp = *mutex;
295
296 if ((error = _mutex_owned(curthread, mp)) != 0)
297 return (error);
298
299 if (curthread->attr.sched_policy != SCHED_OTHER ||
300 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT|
301 USYNC_PROCESS_SHARED)) != 0 ||
302 (cvp->__flags & USYNC_PROCESS_SHARED) != 0)
303 return cond_wait_kernel(cvp, mp, abstime, cancel);
304 else
305 return cond_wait_user(cvp, mp, abstime, cancel);
306}
307
233int
234_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
235{
236
237 return (cond_wait_common(cond, mutex, NULL, 0));
238}
239
240int
241__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
242{
243
244 return (cond_wait_common(cond, mutex, NULL, 1));
245}
246
247int
308int
309_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
310{
311
312 return (cond_wait_common(cond, mutex, NULL, 0));
313}
314
315int
316__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
317{
318
319 return (cond_wait_common(cond, mutex, NULL, 1));
320}
321
322int
248_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
323_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
249 const struct timespec * abstime)
250{
251
252 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
253 abstime->tv_nsec >= 1000000000)
254 return (EINVAL);
255
256 return (cond_wait_common(cond, mutex, abstime, 0));

--- 7 unchanged lines hidden (view full) ---

264 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
265 abstime->tv_nsec >= 1000000000)
266 return (EINVAL);
267
268 return (cond_wait_common(cond, mutex, abstime, 1));
269}
270
271static int
324 const struct timespec * abstime)
325{
326
327 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
328 abstime->tv_nsec >= 1000000000)
329 return (EINVAL);
330
331 return (cond_wait_common(cond, mutex, abstime, 0));

--- 7 unchanged lines hidden (view full) ---

339 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
340 abstime->tv_nsec >= 1000000000)
341 return (EINVAL);
342
343 return (cond_wait_common(cond, mutex, abstime, 1));
344}
345
346static int
272cond_signal_common(pthread_cond_t *cond, int broadcast)
347cond_signal_common(pthread_cond_t *cond)
273{
274 struct pthread *curthread = _get_curthread();
348{
349 struct pthread *curthread = _get_curthread();
275 pthread_cond_t cv;
276 int ret = 0;
350 struct pthread *td;
351 struct pthread_cond *cvp;
352 struct pthread_mutex *mp;
353 struct sleepqueue *sq;
354 int *waddr;
355 int pshared;
277
278 /*
279 * If the condition variable is statically initialized, perform dynamic
280 * initialization.
281 */
282 CHECK_AND_INIT_COND
283
356
357 /*
358 * If the condition variable is statically initialized, perform dynamic
359 * initialization.
360 */
361 CHECK_AND_INIT_COND
362
284 THR_UMUTEX_LOCK(curthread, &cv->c_lock);
285 if (!broadcast)
286 ret = _thr_ucond_signal(&cv->c_kerncv);
287 else
288 ret = _thr_ucond_broadcast(&cv->c_kerncv);
289 THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
290 return (ret);
363 pshared = CV_PSHARED(cvp);
364
365 _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters);
366
367 if (pshared || cvp->__has_user_waiters == 0)
368 return (0);
369
370 curthread = _get_curthread();
371 waddr = NULL;
372 _sleepq_lock(cvp);
373 sq = _sleepq_lookup(cvp);
374 if (sq == NULL) {
375 _sleepq_unlock(cvp);
376 return (0);
377 }
378
379 td = _sleepq_first(sq);
380 mp = td->mutex_obj;
381 cvp->__has_user_waiters = _sleepq_remove(sq, td);
382 if (mp->m_owner == curthread) {
383 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
384 _thr_wake_all(curthread->defer_waiters,
385 curthread->nwaiter_defer);
386 curthread->nwaiter_defer = 0;
387 }
388 curthread->defer_waiters[curthread->nwaiter_defer++] =
389 &td->wake_addr->value;
390 mp->m_flags |= PMUTEX_FLAG_DEFERED;
391 } else {
392 waddr = &td->wake_addr->value;
393 }
394 _sleepq_unlock(cvp);
395 if (waddr != NULL)
396 _thr_set_wake(waddr);
397 return (0);
291}
292
398}
399
400struct broadcast_arg {
401 struct pthread *curthread;
402 unsigned int *waddrs[MAX_DEFER_WAITERS];
403 int count;
404};
405
406static void
407drop_cb(struct pthread *td, void *arg)
408{
409 struct broadcast_arg *ba = arg;
410 struct pthread_mutex *mp;
411 struct pthread *curthread = ba->curthread;
412
413 mp = td->mutex_obj;
414 if (mp->m_owner == curthread) {
415 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
416 _thr_wake_all(curthread->defer_waiters,
417 curthread->nwaiter_defer);
418 curthread->nwaiter_defer = 0;
419 }
420 curthread->defer_waiters[curthread->nwaiter_defer++] =
421 &td->wake_addr->value;
422 mp->m_flags |= PMUTEX_FLAG_DEFERED;
423 } else {
424 if (ba->count >= MAX_DEFER_WAITERS) {
425 _thr_wake_all(ba->waddrs, ba->count);
426 ba->count = 0;
427 }
428 ba->waddrs[ba->count++] = &td->wake_addr->value;
429 }
430}
431
432static int
433cond_broadcast_common(pthread_cond_t *cond)
434{
435 int pshared;
436 struct pthread_cond *cvp;
437 struct sleepqueue *sq;
438 struct broadcast_arg ba;
439
440 /*
441 * If the condition variable is statically initialized, perform dynamic
442 * initialization.
443 */
444 CHECK_AND_INIT_COND
445
446 pshared = CV_PSHARED(cvp);
447
448 _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters);
449
450 if (pshared || cvp->__has_user_waiters == 0)
451 return (0);
452
453 ba.curthread = _get_curthread();
454 ba.count = 0;
455
456 _sleepq_lock(cvp);
457 sq = _sleepq_lookup(cvp);
458 if (sq == NULL) {
459 _sleepq_unlock(cvp);
460 return (0);
461 }
462 _sleepq_drop(sq, drop_cb, &ba);
463 cvp->__has_user_waiters = 0;
464 _sleepq_unlock(cvp);
465 if (ba.count > 0)
466 _thr_wake_all(ba.waddrs, ba.count);
467 return (0);
468}
469
293int
294_pthread_cond_signal(pthread_cond_t * cond)
295{
296
470int
471_pthread_cond_signal(pthread_cond_t * cond)
472{
473
297 return (cond_signal_common(cond, 0));
474 return (cond_signal_common(cond));
298}
299
300int
301_pthread_cond_broadcast(pthread_cond_t * cond)
302{
303
475}
476
477int
478_pthread_cond_broadcast(pthread_cond_t * cond)
479{
480
304 return (cond_signal_common(cond, 1));
481 return (cond_broadcast_common(cond));
305}
482}