Deleted Added
full compact
thr_mutex.c (212077) thr_mutex.c (213241)
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 16 unchanged lines hidden (view full) ---

25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 16 unchanged lines hidden (view full) ---

25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: head/lib/libthr/thread/thr_mutex.c 212077 2010-09-01 03:11:21Z davidxu $
33 * $FreeBSD: head/lib/libthr/thread/thr_mutex.c 213241 2010-09-28 04:57:56Z davidxu $
34 */
35
36#include "namespace.h"
37#include <stdlib.h>
38#include <errno.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/queue.h>

--- 77 unchanged lines hidden (view full) ---

119
120__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124
125static int
126mutex_init(pthread_mutex_t *mutex,
34 */
35
36#include "namespace.h"
37#include <stdlib.h>
38#include <errno.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/queue.h>

--- 77 unchanged lines hidden (view full) ---

119
120__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124
125static int
126mutex_init(pthread_mutex_t *mutex,
127 const pthread_mutexattr_t *mutex_attr,
127 const struct pthread_mutex_attr *mutex_attr,
128 void *(calloc_cb)(size_t, size_t))
129{
130 const struct pthread_mutex_attr *attr;
131 struct pthread_mutex *pmutex;
132
133 if (mutex_attr == NULL) {
134 attr = &_pthread_mutexattr_default;
135 } else {
128 void *(calloc_cb)(size_t, size_t))
129{
130 const struct pthread_mutex_attr *attr;
131 struct pthread_mutex *pmutex;
132
133 if (mutex_attr == NULL) {
134 attr = &_pthread_mutexattr_default;
135 } else {
136 attr = *mutex_attr;
136 attr = mutex_attr;
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139 return (EINVAL);
140 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 attr->m_protocol > PTHREAD_PRIO_PROTECT)
142 return (EINVAL);
143 }
144 if ((pmutex = (pthread_mutex_t)
145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146 return (ENOMEM);
147
148 pmutex->m_type = attr->m_type;
149 pmutex->m_owner = NULL;
150 pmutex->m_count = 0;
151 pmutex->m_refcount = 0;
152 pmutex->m_spinloops = 0;
153 pmutex->m_yieldloops = 0;
154 MUTEX_INIT_LINK(pmutex);
155 switch(attr->m_protocol) {
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139 return (EINVAL);
140 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 attr->m_protocol > PTHREAD_PRIO_PROTECT)
142 return (EINVAL);
143 }
144 if ((pmutex = (pthread_mutex_t)
145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146 return (ENOMEM);
147
148 pmutex->m_type = attr->m_type;
149 pmutex->m_owner = NULL;
150 pmutex->m_count = 0;
151 pmutex->m_refcount = 0;
152 pmutex->m_spinloops = 0;
153 pmutex->m_yieldloops = 0;
154 MUTEX_INIT_LINK(pmutex);
155 switch(attr->m_protocol) {
156 case PTHREAD_PRIO_NONE:
157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 pmutex->m_lock.m_flags = 0;
159 break;
156 case PTHREAD_PRIO_INHERIT:
157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
159 break;
160 case PTHREAD_PRIO_PROTECT:
161 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
162 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
163 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
164 break;
160 case PTHREAD_PRIO_INHERIT:
161 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
162 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
163 break;
164 case PTHREAD_PRIO_PROTECT:
165 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
166 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
167 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
168 break;
165 case PTHREAD_PRIO_NONE:
166 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
167 pmutex->m_lock.m_flags = 0;
168 }
169
170 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
171 pmutex->m_spinloops =
172 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
173 pmutex->m_yieldloops = _thr_yieldloops;
174 }
175
176 *mutex = pmutex;
177 return (0);
178}
179
180static int
181init_static(struct pthread *thread, pthread_mutex_t *mutex)
182{
183 int ret;
184
185 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
186
169 }
170
171 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
172 pmutex->m_spinloops =
173 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
174 pmutex->m_yieldloops = _thr_yieldloops;
175 }
176
177 *mutex = pmutex;
178 return (0);
179}
180
181static int
182init_static(struct pthread *thread, pthread_mutex_t *mutex)
183{
184 int ret;
185
186 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
187
187 if (*mutex == NULL)
188 ret = mutex_init(mutex, NULL, calloc);
188 if (*mutex == THR_MUTEX_INITIALIZER)
189 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
190 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
191 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
189 else
190 ret = 0;
192 else
193 ret = 0;
191
192 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
193
194 return (ret);
195}
196
197static void
198set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
199{

--- 5 unchanged lines hidden (view full) ---

205 else
206 m->m_lock.m_ceilings[1] = -1;
207}
208
209int
210__pthread_mutex_init(pthread_mutex_t *mutex,
211 const pthread_mutexattr_t *mutex_attr)
212{
194 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
195
196 return (ret);
197}
198
199static void
200set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
201{

--- 5 unchanged lines hidden (view full) ---

207 else
208 m->m_lock.m_ceilings[1] = -1;
209}
210
211int
212__pthread_mutex_init(pthread_mutex_t *mutex,
213 const pthread_mutexattr_t *mutex_attr)
214{
213 return mutex_init(mutex, mutex_attr, calloc);
215 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
214}
215
216/* This function is used internally by malloc. */
217int
218_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
219 void *(calloc_cb)(size_t, size_t))
220{
221 static const struct pthread_mutex_attr attr = {
222 .m_type = PTHREAD_MUTEX_NORMAL,
223 .m_protocol = PTHREAD_PRIO_NONE,
224 .m_ceiling = 0
225 };
216}
217
218/* This function is used internally by malloc. */
219int
220_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
221 void *(calloc_cb)(size_t, size_t))
222{
223 static const struct pthread_mutex_attr attr = {
224 .m_type = PTHREAD_MUTEX_NORMAL,
225 .m_protocol = PTHREAD_PRIO_NONE,
226 .m_ceiling = 0
227 };
226 static const struct pthread_mutex_attr *pattr = &attr;
227 int ret;
228
228 int ret;
229
229 ret = mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb);
230 ret = mutex_init(mutex, &attr, calloc_cb);
230 if (ret == 0)
231 (*mutex)->m_private = 1;
232 return (ret);
233}
234
235void
236_mutex_fork(struct pthread *curthread)
237{

--- 18 unchanged lines hidden (view full) ---

256int
257_pthread_mutex_destroy(pthread_mutex_t *mutex)
258{
259 struct pthread *curthread = _get_curthread();
260 pthread_mutex_t m;
261 uint32_t id;
262 int ret = 0;
263
231 if (ret == 0)
232 (*mutex)->m_private = 1;
233 return (ret);
234}
235
236void
237_mutex_fork(struct pthread *curthread)
238{

--- 18 unchanged lines hidden (view full) ---

257int
258_pthread_mutex_destroy(pthread_mutex_t *mutex)
259{
260 struct pthread *curthread = _get_curthread();
261 pthread_mutex_t m;
262 uint32_t id;
263 int ret = 0;
264
264 if (__predict_false(*mutex == NULL))
265 m = *mutex;
266 if (m < THR_MUTEX_DESTROYED) {
267 ret = 0;
268 } else if (m == THR_MUTEX_DESTROYED) {
265 ret = EINVAL;
269 ret = EINVAL;
266 else {
270 } else {
267 id = TID(curthread);
268
269 /*
270 * Try to lock the mutex structure, we only need to
271 * try once, if failed, the mutex is in used.
272 */
271 id = TID(curthread);
272
273 /*
274 * Try to lock the mutex structure, we only need to
275 * try once, if failed, the mutex is in used.
276 */
273 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
277 ret = _thr_umutex_trylock(&m->m_lock, id);
274 if (ret)
275 return (ret);
278 if (ret)
279 return (ret);
276 m = *mutex;
277 /*
278 * Check mutex other fields to see if this mutex is
279 * in use. Mostly for prority mutex types, or there
280 * are condition variables referencing it.
281 */
282 if (m->m_owner != NULL || m->m_refcount != 0) {
283 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
284 set_inherited_priority(curthread, m);
285 _thr_umutex_unlock(&m->m_lock, id);
286 ret = EBUSY;
287 } else {
280 /*
281 * Check mutex other fields to see if this mutex is
282 * in use. Mostly for prority mutex types, or there
283 * are condition variables referencing it.
284 */
285 if (m->m_owner != NULL || m->m_refcount != 0) {
286 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
287 set_inherited_priority(curthread, m);
288 _thr_umutex_unlock(&m->m_lock, id);
289 ret = EBUSY;
290 } else {
288 /*
289 * Save a pointer to the mutex so it can be free'd
290 * and set the caller's pointer to NULL.
291 */
292 *mutex = NULL;
291 *mutex = THR_MUTEX_DESTROYED;
293
294 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
295 set_inherited_priority(curthread, m);
296 _thr_umutex_unlock(&m->m_lock, id);
297
298 MUTEX_ASSERT_NOT_OWNED(m);
299 free(m);
300 }

--- 8 unchanged lines hidden (view full) ---

309 /* Add to the list of owned mutexes: */ \
310 MUTEX_ASSERT_NOT_OWNED((m)); \
311 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
312 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
313 else \
314 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
315 } while (0)
316
292
293 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
294 set_inherited_priority(curthread, m);
295 _thr_umutex_unlock(&m->m_lock, id);
296
297 MUTEX_ASSERT_NOT_OWNED(m);
298 free(m);
299 }

--- 8 unchanged lines hidden (view full) ---

308 /* Add to the list of owned mutexes: */ \
309 MUTEX_ASSERT_NOT_OWNED((m)); \
310 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
311 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
312 else \
313 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
314 } while (0)
315
316#define CHECK_AND_INIT_MUTEX \
317 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
318 if (m == THR_MUTEX_DESTROYED) \
319 return (EINVAL); \
320 int ret; \
321 ret = init_static(_get_curthread(), mutex); \
322 if (ret) \
323 return (ret); \
324 m = *mutex; \
325 }
326
317static int
327static int
318mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
328mutex_trylock_common(pthread_mutex_t *mutex)
319{
329{
320 struct pthread_mutex *m;
330 struct pthread *curthread = _get_curthread();
331 struct pthread_mutex *m = *mutex;
321 uint32_t id;
322 int ret;
323
324 id = TID(curthread);
332 uint32_t id;
333 int ret;
334
335 id = TID(curthread);
325 m = *mutex;
326 if (m->m_private)
327 THR_CRITICAL_ENTER(curthread);
328 ret = _thr_umutex_trylock(&m->m_lock, id);
336 if (m->m_private)
337 THR_CRITICAL_ENTER(curthread);
338 ret = _thr_umutex_trylock(&m->m_lock, id);
329 if (ret == 0) {
339 if (__predict_true(ret == 0)) {
330 ENQUEUE_MUTEX(curthread, m);
331 } else if (m->m_owner == curthread) {
332 ret = mutex_self_trylock(m);
333 } /* else {} */
334 if (ret && m->m_private)
335 THR_CRITICAL_LEAVE(curthread);
336 return (ret);
337}
338
339int
340__pthread_mutex_trylock(pthread_mutex_t *mutex)
341{
340 ENQUEUE_MUTEX(curthread, m);
341 } else if (m->m_owner == curthread) {
342 ret = mutex_self_trylock(m);
343 } /* else {} */
344 if (ret && m->m_private)
345 THR_CRITICAL_LEAVE(curthread);
346 return (ret);
347}
348
349int
350__pthread_mutex_trylock(pthread_mutex_t *mutex)
351{
342 struct pthread *curthread = _get_curthread();
343 int ret;
352 struct pthread_mutex *m;
344
353
345 /*
346 * If the mutex is statically initialized, perform the dynamic
347 * initialization:
348 */
349 if (__predict_false(*mutex == NULL)) {
350 ret = init_static(curthread, mutex);
351 if (__predict_false(ret))
352 return (ret);
353 }
354 return (mutex_trylock_common(curthread, mutex));
354 CHECK_AND_INIT_MUTEX
355
356 return (mutex_trylock_common(mutex));
355}
356
357static int
358mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
359 const struct timespec *abstime)
360{
361 uint32_t id, owner;
362 int count;

--- 4 unchanged lines hidden (view full) ---

367
368 id = TID(curthread);
369 /*
370 * For adaptive mutexes, spin for a bit in the expectation
371 * that if the application requests this mutex type then
372 * the lock is likely to be released quickly and it is
373 * faster than entering the kernel
374 */
357}
358
359static int
360mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
361 const struct timespec *abstime)
362{
363 uint32_t id, owner;
364 int count;

--- 4 unchanged lines hidden (view full) ---

369
370 id = TID(curthread);
371 /*
372 * For adaptive mutexes, spin for a bit in the expectation
373 * that if the application requests this mutex type then
374 * the lock is likely to be released quickly and it is
375 * faster than entering the kernel
376 */
375 if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT))
376 goto sleep_in_kernel;
377 if (__predict_false(
378 (m->m_lock.m_flags &
379 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
380 goto sleep_in_kernel;
377
378 if (!_thr_is_smp)
379 goto yield_loop;
380
381 count = m->m_spinloops;
382 while (count--) {
383 owner = m->m_lock.m_owner;
384 if ((owner & ~UMUTEX_CONTESTED) == 0) {

--- 31 unchanged lines hidden (view full) ---

416done:
417 if (ret == 0)
418 ENQUEUE_MUTEX(curthread, m);
419
420 return (ret);
421}
422
423static inline int
381
382 if (!_thr_is_smp)
383 goto yield_loop;
384
385 count = m->m_spinloops;
386 while (count--) {
387 owner = m->m_lock.m_owner;
388 if ((owner & ~UMUTEX_CONTESTED) == 0) {

--- 31 unchanged lines hidden (view full) ---

420done:
421 if (ret == 0)
422 ENQUEUE_MUTEX(curthread, m);
423
424 return (ret);
425}
426
427static inline int
424mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m,
428mutex_lock_common(struct pthread_mutex *m,
425 const struct timespec *abstime)
426{
429 const struct timespec *abstime)
430{
431 struct pthread *curthread = _get_curthread();
427 int ret;
428
429 if (m->m_private)
430 THR_CRITICAL_ENTER(curthread);
431 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
432 ENQUEUE_MUTEX(curthread, m);
433 ret = 0;
434 } else {
435 ret = mutex_lock_sleep(curthread, m, abstime);
436 }
437 if (ret && m->m_private)
438 THR_CRITICAL_LEAVE(curthread);
439 return (ret);
440}
441
442int
443__pthread_mutex_lock(pthread_mutex_t *mutex)
444{
432 int ret;
433
434 if (m->m_private)
435 THR_CRITICAL_ENTER(curthread);
436 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
437 ENQUEUE_MUTEX(curthread, m);
438 ret = 0;
439 } else {
440 ret = mutex_lock_sleep(curthread, m, abstime);
441 }
442 if (ret && m->m_private)
443 THR_CRITICAL_LEAVE(curthread);
444 return (ret);
445}
446
447int
448__pthread_mutex_lock(pthread_mutex_t *mutex)
449{
445 struct pthread *curthread;
446 struct pthread_mutex *m;
447 int ret;
450 struct pthread_mutex *m;
448
449 _thr_check_init();
450
451
452 _thr_check_init();
453
451 curthread = _get_curthread();
454 CHECK_AND_INIT_MUTEX
452
455
453 /*
454 * If the mutex is statically initialized, perform the dynamic
455 * initialization:
456 */
457 if (__predict_false((m = *mutex) == NULL)) {
458 ret = init_static(curthread, mutex);
459 if (__predict_false(ret))
460 return (ret);
461 m = *mutex;
462 }
463
464 return (mutex_lock_common(curthread, m, NULL));
456 return (mutex_lock_common(m, NULL));
465}
466
467int
468__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
469{
457}
458
459int
460__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
461{
470 struct pthread *curthread;
471 struct pthread_mutex *m;
472 int ret;
462 struct pthread_mutex *m;
473
474 _thr_check_init();
475
463
464 _thr_check_init();
465
476 curthread = _get_curthread();
466 CHECK_AND_INIT_MUTEX
477
467
478 /*
479 * If the mutex is statically initialized, perform the dynamic
480 * initialization:
481 */
482 if (__predict_false((m = *mutex) == NULL)) {
483 ret = init_static(curthread, mutex);
484 if (__predict_false(ret))
485 return (ret);
486 m = *mutex;
487 }
488 return (mutex_lock_common(curthread, m, abstime));
468 return (mutex_lock_common(m, abstime));
489}
490
491int
492_pthread_mutex_unlock(pthread_mutex_t *m)
493{
494 return (mutex_unlock_common(m));
495}
496
497int
469}
470
471int
472_pthread_mutex_unlock(pthread_mutex_t *m)
473{
474 return (mutex_unlock_common(m));
475}
476
477int
498_mutex_cv_lock(pthread_mutex_t *m, int count)
478_mutex_cv_lock(pthread_mutex_t *mutex, int count)
499{
479{
480 struct pthread_mutex *m;
500 int ret;
501
481 int ret;
482
502 ret = mutex_lock_common(_get_curthread(), *m, NULL);
483 m = *mutex;
484 ret = mutex_lock_common(m, NULL);
503 if (ret == 0) {
485 if (ret == 0) {
504 (*m)->m_refcount--;
505 (*m)->m_count += count;
486 m->m_refcount--;
487 m->m_count += count;
506 }
507 return (ret);
508}
509
510static int
488 }
489 return (ret);
490}
491
492static int
511mutex_self_trylock(pthread_mutex_t m)
493mutex_self_trylock(struct pthread_mutex *m)
512{
513 int ret;
514
515 switch (m->m_type) {
516 case PTHREAD_MUTEX_ERRORCHECK:
517 case PTHREAD_MUTEX_NORMAL:
518 ret = EBUSY;
519 break;

--- 11 unchanged lines hidden (view full) ---

531 /* Trap invalid mutex types; */
532 ret = EINVAL;
533 }
534
535 return (ret);
536}
537
538static int
494{
495 int ret;
496
497 switch (m->m_type) {
498 case PTHREAD_MUTEX_ERRORCHECK:
499 case PTHREAD_MUTEX_NORMAL:
500 ret = EBUSY;
501 break;

--- 11 unchanged lines hidden (view full) ---

513 /* Trap invalid mutex types; */
514 ret = EINVAL;
515 }
516
517 return (ret);
518}
519
520static int
539mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
521mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
540{
541 struct timespec ts1, ts2;
542 int ret;
543
544 switch (m->m_type) {
545 case PTHREAD_MUTEX_ERRORCHECK:
546 case PTHREAD_MUTEX_ADAPTIVE_NP:
547 if (abstime) {

--- 58 unchanged lines hidden (view full) ---

606
607static int
608mutex_unlock_common(pthread_mutex_t *mutex)
609{
610 struct pthread *curthread = _get_curthread();
611 struct pthread_mutex *m;
612 uint32_t id;
613
522{
523 struct timespec ts1, ts2;
524 int ret;
525
526 switch (m->m_type) {
527 case PTHREAD_MUTEX_ERRORCHECK:
528 case PTHREAD_MUTEX_ADAPTIVE_NP:
529 if (abstime) {

--- 58 unchanged lines hidden (view full) ---

588
589static int
590mutex_unlock_common(pthread_mutex_t *mutex)
591{
592 struct pthread *curthread = _get_curthread();
593 struct pthread_mutex *m;
594 uint32_t id;
595
614 if (__predict_false((m = *mutex) == NULL))
615 return (EINVAL);
596 m = *mutex;
597 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
598 if (m == THR_MUTEX_DESTROYED)
599 return (EINVAL);
600 return (EPERM);
601 }
616
617 /*
618 * Check if the running thread is not the owner of the mutex.
619 */
620 if (__predict_false(m->m_owner != curthread))
621 return (EPERM);
622
623 id = TID(curthread);
624 if (__predict_false(
625 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
626 m->m_count > 0)) {
627 m->m_count--;
628 } else {
629 m->m_owner = NULL;
630 /* Remove the mutex from the threads queue. */
631 MUTEX_ASSERT_IS_OWNED(m);
602
603 /*
604 * Check if the running thread is not the owner of the mutex.
605 */
606 if (__predict_false(m->m_owner != curthread))
607 return (EPERM);
608
609 id = TID(curthread);
610 if (__predict_false(
611 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
612 m->m_count > 0)) {
613 m->m_count--;
614 } else {
615 m->m_owner = NULL;
616 /* Remove the mutex from the threads queue. */
617 MUTEX_ASSERT_IS_OWNED(m);
632 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
618 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
633 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
634 else {
635 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
636 set_inherited_priority(curthread, m);
637 }
638 MUTEX_INIT_LINK(m);
639 _thr_umutex_unlock(&m->m_lock, id);
640 }
641 if (m->m_private)
642 THR_CRITICAL_LEAVE(curthread);
643 return (0);
644}
645
646int
647_mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
648{
649 struct pthread *curthread = _get_curthread();
650 struct pthread_mutex *m;
651
619 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
620 else {
621 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
622 set_inherited_priority(curthread, m);
623 }
624 MUTEX_INIT_LINK(m);
625 _thr_umutex_unlock(&m->m_lock, id);
626 }
627 if (m->m_private)
628 THR_CRITICAL_LEAVE(curthread);
629 return (0);
630}
631
632int
633_mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
634{
635 struct pthread *curthread = _get_curthread();
636 struct pthread_mutex *m;
637
652 if (__predict_false((m = *mutex) == NULL))
653 return (EINVAL);
654
638 m = *mutex;
655 /*
656 * Check if the running thread is not the owner of the mutex.
657 */
658 if (__predict_false(m->m_owner != curthread))
659 return (EPERM);
660
661 /*
662 * Clear the count in case this is a recursive mutex.
663 */
664 *count = m->m_count;
665 m->m_refcount++;
666 m->m_count = 0;
667 m->m_owner = NULL;
668 /* Remove the mutex from the threads queue. */
669 MUTEX_ASSERT_IS_OWNED(m);
639 /*
640 * Check if the running thread is not the owner of the mutex.
641 */
642 if (__predict_false(m->m_owner != curthread))
643 return (EPERM);
644
645 /*
646 * Clear the count in case this is a recursive mutex.
647 */
648 *count = m->m_count;
649 m->m_refcount++;
650 m->m_count = 0;
651 m->m_owner = NULL;
652 /* Remove the mutex from the threads queue. */
653 MUTEX_ASSERT_IS_OWNED(m);
670 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
654 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
671 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
672 else {
673 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
674 set_inherited_priority(curthread, m);
675 }
676 MUTEX_INIT_LINK(m);
677 _thr_umutex_unlock(&m->m_lock, TID(curthread));
678
679 if (m->m_private)
680 THR_CRITICAL_LEAVE(curthread);
681 return (0);
682}
683
684int
685_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
686 int *prioceiling)
687{
655 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
656 else {
657 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
658 set_inherited_priority(curthread, m);
659 }
660 MUTEX_INIT_LINK(m);
661 _thr_umutex_unlock(&m->m_lock, TID(curthread));
662
663 if (m->m_private)
664 THR_CRITICAL_LEAVE(curthread);
665 return (0);
666}
667
668int
669_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
670 int *prioceiling)
671{
672 struct pthread_mutex *m;
688 int ret;
689
673 int ret;
674
690 if (*mutex == NULL)
675 m = *mutex;
676 if ((m <= THR_MUTEX_DESTROYED) ||
677 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
691 ret = EINVAL;
678 ret = EINVAL;
692 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
693 ret = EINVAL;
694 else {
679 else {
695 *prioceiling = (*mutex)->m_lock.m_ceilings[0];
680 *prioceiling = m->m_lock.m_ceilings[0];
696 ret = 0;
697 }
698
681 ret = 0;
682 }
683
699 return(ret);
684 return (ret);
700}
701
702int
703_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
704 int ceiling, int *old_ceiling)
705{
706 struct pthread *curthread = _get_curthread();
707 struct pthread_mutex *m, *m1, *m2;
708 int ret;
709
710 m = *mutex;
685}
686
687int
688_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
689 int ceiling, int *old_ceiling)
690{
691 struct pthread *curthread = _get_curthread();
692 struct pthread_mutex *m, *m1, *m2;
693 int ret;
694
695 m = *mutex;
711 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
696 if ((m <= THR_MUTEX_DESTROYED) ||
697 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
712 return (EINVAL);
713
714 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
715 if (ret != 0)
716 return (ret);
717
718 if (m->m_owner == curthread) {
719 MUTEX_ASSERT_IS_OWNED(m);

--- 12 unchanged lines hidden (view full) ---

732 }
733 }
734 return (0);
735}
736
737int
738_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
739{
698 return (EINVAL);
699
700 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
701 if (ret != 0)
702 return (ret);
703
704 if (m->m_owner == curthread) {
705 MUTEX_ASSERT_IS_OWNED(m);

--- 12 unchanged lines hidden (view full) ---

718 }
719 }
720 return (0);
721}
722
723int
724_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
725{
740 if (*mutex == NULL)
741 return (EINVAL);
742 *count = (*mutex)->m_spinloops;
726 struct pthread_mutex *m;
727
728 CHECK_AND_INIT_MUTEX
729
730 *count = m->m_spinloops;
743 return (0);
744}
745
746int
747__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
748{
731 return (0);
732}
733
734int
735__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
736{
749 struct pthread *curthread = _get_curthread();
750 int ret;
737 struct pthread_mutex *m;
751
738
752 if (__predict_false(*mutex == NULL)) {
753 ret = init_static(curthread, mutex);
754 if (__predict_false(ret))
755 return (ret);
756 }
757 (*mutex)->m_spinloops = count;
739 CHECK_AND_INIT_MUTEX
740
741 m->m_spinloops = count;
758 return (0);
759}
760
761int
762_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
763{
742 return (0);
743}
744
745int
746_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
747{
764 if (*mutex == NULL)
765 return (EINVAL);
766 *count = (*mutex)->m_yieldloops;
748 struct pthread_mutex *m;
749
750 CHECK_AND_INIT_MUTEX
751
752 *count = m->m_yieldloops;
767 return (0);
768}
769
770int
771__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
772{
753 return (0);
754}
755
756int
757__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
758{
773 struct pthread *curthread = _get_curthread();
774 int ret;
759 struct pthread_mutex *m;
775
760
776 if (__predict_false(*mutex == NULL)) {
777 ret = init_static(curthread, mutex);
778 if (__predict_false(ret))
779 return (ret);
780 }
781 (*mutex)->m_yieldloops = count;
761 CHECK_AND_INIT_MUTEX
762
763 m->m_yieldloops = count;
782 return (0);
783}
784
785int
786_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
787{
764 return (0);
765}
766
767int
768_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
769{
788 struct pthread *curthread = _get_curthread();
789 int ret;
770 struct pthread_mutex *m;
790
771
791 if (__predict_false(*mutex == NULL)) {
792 ret = init_static(curthread, mutex);
793 if (__predict_false(ret))
794 return (ret);
795 }
796 return ((*mutex)->m_owner == curthread);
772 m = *mutex;
773 if (m <= THR_MUTEX_DESTROYED)
774 return (0);
775 return (m->m_owner == _get_curthread());
797}
776}