Deleted Added
sdiff udiff text old ( 212077 ) new ( 213241 )
full compact
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 16 unchanged lines hidden (view full) ---

25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: head/lib/libthr/thread/thr_mutex.c 213241 2010-09-28 04:57:56Z davidxu $
34 */
35
36#include "namespace.h"
37#include <stdlib.h>
38#include <errno.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/queue.h>

--- 77 unchanged lines hidden (view full) ---

119
120__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124
125static int
126mutex_init(pthread_mutex_t *mutex,
127 const struct pthread_mutex_attr *mutex_attr,
128 void *(calloc_cb)(size_t, size_t))
129{
130 const struct pthread_mutex_attr *attr;
131 struct pthread_mutex *pmutex;
132
133 if (mutex_attr == NULL) {
134 attr = &_pthread_mutexattr_default;
135 } else {
136 attr = mutex_attr;
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139 return (EINVAL);
140 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 attr->m_protocol > PTHREAD_PRIO_PROTECT)
142 return (EINVAL);
143 }
144 if ((pmutex = (pthread_mutex_t)
145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146 return (ENOMEM);
147
148 pmutex->m_type = attr->m_type;
149 pmutex->m_owner = NULL;
150 pmutex->m_count = 0;
151 pmutex->m_refcount = 0;
152 pmutex->m_spinloops = 0;
153 pmutex->m_yieldloops = 0;
154 MUTEX_INIT_LINK(pmutex);
155 switch(attr->m_protocol) {
156 case PTHREAD_PRIO_NONE:
157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 pmutex->m_lock.m_flags = 0;
159 break;
160 case PTHREAD_PRIO_INHERIT:
161 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
162 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
163 break;
164 case PTHREAD_PRIO_PROTECT:
165 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
166 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
167 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
168 break;
169 }
170
171 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
172 pmutex->m_spinloops =
173 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
174 pmutex->m_yieldloops = _thr_yieldloops;
175 }
176
177 *mutex = pmutex;
178 return (0);
179}
180
181static int
182init_static(struct pthread *thread, pthread_mutex_t *mutex)
183{
184 int ret;
185
186 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
187
188 if (*mutex == THR_MUTEX_INITIALIZER)
189 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
190 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
191 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
192 else
193 ret = 0;
194 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
195
196 return (ret);
197}
198
199static void
200set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
201{

--- 5 unchanged lines hidden (view full) ---

207 else
208 m->m_lock.m_ceilings[1] = -1;
209}
210
211int
212__pthread_mutex_init(pthread_mutex_t *mutex,
213 const pthread_mutexattr_t *mutex_attr)
214{
215 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
216}
217
218/* This function is used internally by malloc. */
219int
220_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
221 void *(calloc_cb)(size_t, size_t))
222{
223 static const struct pthread_mutex_attr attr = {
224 .m_type = PTHREAD_MUTEX_NORMAL,
225 .m_protocol = PTHREAD_PRIO_NONE,
226 .m_ceiling = 0
227 };
228 int ret;
229
230 ret = mutex_init(mutex, &attr, calloc_cb);
231 if (ret == 0)
232 (*mutex)->m_private = 1;
233 return (ret);
234}
235
236void
237_mutex_fork(struct pthread *curthread)
238{

--- 18 unchanged lines hidden (view full) ---

257int
258_pthread_mutex_destroy(pthread_mutex_t *mutex)
259{
260 struct pthread *curthread = _get_curthread();
261 pthread_mutex_t m;
262 uint32_t id;
263 int ret = 0;
264
265 m = *mutex;
266 if (m < THR_MUTEX_DESTROYED) {
267 ret = 0;
268 } else if (m == THR_MUTEX_DESTROYED) {
269 ret = EINVAL;
270 } else {
271 id = TID(curthread);
272
273 /*
274 * Try to lock the mutex structure, we only need to
275 * try once, if failed, the mutex is in used.
276 */
277 ret = _thr_umutex_trylock(&m->m_lock, id);
278 if (ret)
279 return (ret);
280 /*
281 * Check mutex other fields to see if this mutex is
282 * in use. Mostly for prority mutex types, or there
283 * are condition variables referencing it.
284 */
285 if (m->m_owner != NULL || m->m_refcount != 0) {
286 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
287 set_inherited_priority(curthread, m);
288 _thr_umutex_unlock(&m->m_lock, id);
289 ret = EBUSY;
290 } else {
291 *mutex = THR_MUTEX_DESTROYED;
292
293 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
294 set_inherited_priority(curthread, m);
295 _thr_umutex_unlock(&m->m_lock, id);
296
297 MUTEX_ASSERT_NOT_OWNED(m);
298 free(m);
299 }

--- 8 unchanged lines hidden (view full) ---

308 /* Add to the list of owned mutexes: */ \
309 MUTEX_ASSERT_NOT_OWNED((m)); \
310 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
311 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
312 else \
313 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
314 } while (0)
315
316#define CHECK_AND_INIT_MUTEX \
317 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
318 if (m == THR_MUTEX_DESTROYED) \
319 return (EINVAL); \
320 int ret; \
321 ret = init_static(_get_curthread(), mutex); \
322 if (ret) \
323 return (ret); \
324 m = *mutex; \
325 }
326
327static int
328mutex_trylock_common(pthread_mutex_t *mutex)
329{
330 struct pthread *curthread = _get_curthread();
331 struct pthread_mutex *m = *mutex;
332 uint32_t id;
333 int ret;
334
335 id = TID(curthread);
336 if (m->m_private)
337 THR_CRITICAL_ENTER(curthread);
338 ret = _thr_umutex_trylock(&m->m_lock, id);
339 if (__predict_true(ret == 0)) {
340 ENQUEUE_MUTEX(curthread, m);
341 } else if (m->m_owner == curthread) {
342 ret = mutex_self_trylock(m);
343 } /* else {} */
344 if (ret && m->m_private)
345 THR_CRITICAL_LEAVE(curthread);
346 return (ret);
347}
348
349int
350__pthread_mutex_trylock(pthread_mutex_t *mutex)
351{
352 struct pthread_mutex *m;
353
354 CHECK_AND_INIT_MUTEX
355
356 return (mutex_trylock_common(mutex));
357}
358
359static int
360mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
361 const struct timespec *abstime)
362{
363 uint32_t id, owner;
364 int count;

--- 4 unchanged lines hidden (view full) ---

369
370 id = TID(curthread);
371 /*
372 * For adaptive mutexes, spin for a bit in the expectation
373 * that if the application requests this mutex type then
374 * the lock is likely to be released quickly and it is
375 * faster than entering the kernel
376 */
377 if (__predict_false(
378 (m->m_lock.m_flags &
379 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
380 goto sleep_in_kernel;
381
382 if (!_thr_is_smp)
383 goto yield_loop;
384
385 count = m->m_spinloops;
386 while (count--) {
387 owner = m->m_lock.m_owner;
388 if ((owner & ~UMUTEX_CONTESTED) == 0) {

--- 31 unchanged lines hidden (view full) ---

420done:
421 if (ret == 0)
422 ENQUEUE_MUTEX(curthread, m);
423
424 return (ret);
425}
426
427static inline int
428mutex_lock_common(struct pthread_mutex *m,
429 const struct timespec *abstime)
430{
431 struct pthread *curthread = _get_curthread();
432 int ret;
433
434 if (m->m_private)
435 THR_CRITICAL_ENTER(curthread);
436 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
437 ENQUEUE_MUTEX(curthread, m);
438 ret = 0;
439 } else {
440 ret = mutex_lock_sleep(curthread, m, abstime);
441 }
442 if (ret && m->m_private)
443 THR_CRITICAL_LEAVE(curthread);
444 return (ret);
445}
446
447int
448__pthread_mutex_lock(pthread_mutex_t *mutex)
449{
450 struct pthread_mutex *m;
451
452 _thr_check_init();
453
454 CHECK_AND_INIT_MUTEX
455
456 return (mutex_lock_common(m, NULL));
457}
458
459int
460__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
461{
462 struct pthread_mutex *m;
463
464 _thr_check_init();
465
466 CHECK_AND_INIT_MUTEX
467
468 return (mutex_lock_common(m, abstime));
469}
470
471int
472_pthread_mutex_unlock(pthread_mutex_t *m)
473{
474 return (mutex_unlock_common(m));
475}
476
477int
478_mutex_cv_lock(pthread_mutex_t *mutex, int count)
479{
480 struct pthread_mutex *m;
481 int ret;
482
483 m = *mutex;
484 ret = mutex_lock_common(m, NULL);
485 if (ret == 0) {
486 m->m_refcount--;
487 m->m_count += count;
488 }
489 return (ret);
490}
491
492static int
493mutex_self_trylock(struct pthread_mutex *m)
494{
495 int ret;
496
497 switch (m->m_type) {
498 case PTHREAD_MUTEX_ERRORCHECK:
499 case PTHREAD_MUTEX_NORMAL:
500 ret = EBUSY;
501 break;

--- 11 unchanged lines hidden (view full) ---

513 /* Trap invalid mutex types; */
514 ret = EINVAL;
515 }
516
517 return (ret);
518}
519
520static int
521mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
522{
523 struct timespec ts1, ts2;
524 int ret;
525
526 switch (m->m_type) {
527 case PTHREAD_MUTEX_ERRORCHECK:
528 case PTHREAD_MUTEX_ADAPTIVE_NP:
529 if (abstime) {

--- 58 unchanged lines hidden (view full) ---

588
589static int
590mutex_unlock_common(pthread_mutex_t *mutex)
591{
592 struct pthread *curthread = _get_curthread();
593 struct pthread_mutex *m;
594 uint32_t id;
595
596 m = *mutex;
597 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
598 if (m == THR_MUTEX_DESTROYED)
599 return (EINVAL);
600 return (EPERM);
601 }
602
603 /*
604 * Check if the running thread is not the owner of the mutex.
605 */
606 if (__predict_false(m->m_owner != curthread))
607 return (EPERM);
608
609 id = TID(curthread);
610 if (__predict_false(
611 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
612 m->m_count > 0)) {
613 m->m_count--;
614 } else {
615 m->m_owner = NULL;
616 /* Remove the mutex from the threads queue. */
617 MUTEX_ASSERT_IS_OWNED(m);
618 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
619 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
620 else {
621 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
622 set_inherited_priority(curthread, m);
623 }
624 MUTEX_INIT_LINK(m);
625 _thr_umutex_unlock(&m->m_lock, id);
626 }
627 if (m->m_private)
628 THR_CRITICAL_LEAVE(curthread);
629 return (0);
630}
631
632int
633_mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
634{
635 struct pthread *curthread = _get_curthread();
636 struct pthread_mutex *m;
637
638 m = *mutex;
639 /*
640 * Check if the running thread is not the owner of the mutex.
641 */
642 if (__predict_false(m->m_owner != curthread))
643 return (EPERM);
644
645 /*
646 * Clear the count in case this is a recursive mutex.
647 */
648 *count = m->m_count;
649 m->m_refcount++;
650 m->m_count = 0;
651 m->m_owner = NULL;
652 /* Remove the mutex from the threads queue. */
653 MUTEX_ASSERT_IS_OWNED(m);
654 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
655 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
656 else {
657 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
658 set_inherited_priority(curthread, m);
659 }
660 MUTEX_INIT_LINK(m);
661 _thr_umutex_unlock(&m->m_lock, TID(curthread));
662
663 if (m->m_private)
664 THR_CRITICAL_LEAVE(curthread);
665 return (0);
666}
667
668int
669_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
670 int *prioceiling)
671{
672 struct pthread_mutex *m;
673 int ret;
674
675 m = *mutex;
676 if ((m <= THR_MUTEX_DESTROYED) ||
677 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
678 ret = EINVAL;
679 else {
680 *prioceiling = m->m_lock.m_ceilings[0];
681 ret = 0;
682 }
683
684 return (ret);
685}
686
687int
688_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
689 int ceiling, int *old_ceiling)
690{
691 struct pthread *curthread = _get_curthread();
692 struct pthread_mutex *m, *m1, *m2;
693 int ret;
694
695 m = *mutex;
696 if ((m <= THR_MUTEX_DESTROYED) ||
697 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
698 return (EINVAL);
699
700 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
701 if (ret != 0)
702 return (ret);
703
704 if (m->m_owner == curthread) {
705 MUTEX_ASSERT_IS_OWNED(m);

--- 12 unchanged lines hidden (view full) ---

718 }
719 }
720 return (0);
721}
722
723int
724_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
725{
726 struct pthread_mutex *m;
727
728 CHECK_AND_INIT_MUTEX
729
730 *count = m->m_spinloops;
731 return (0);
732}
733
734int
735__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
736{
737 struct pthread_mutex *m;
738
739 CHECK_AND_INIT_MUTEX
740
741 m->m_spinloops = count;
742 return (0);
743}
744
745int
746_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
747{
748 struct pthread_mutex *m;
749
750 CHECK_AND_INIT_MUTEX
751
752 *count = m->m_yieldloops;
753 return (0);
754}
755
756int
757__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
758{
759 struct pthread_mutex *m;
760
761 CHECK_AND_INIT_MUTEX
762
763 m->m_yieldloops = count;
764 return (0);
765}
766
767int
768_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
769{
770 struct pthread_mutex *m;
771
772 m = *mutex;
773 if (m <= THR_MUTEX_DESTROYED)
774 return (0);
775 return (m->m_owner == _get_curthread());
776}