1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD$
34 */
35
36#include "namespace.h"
37#include <stdlib.h>
38#include <errno.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/queue.h>
42#include <pthread.h>
43#include <pthread_np.h>
44#include "un-namespace.h"
45
46#include "thr_private.h"
47
48#if defined(_PTHREADS_INVARIANTS)
49#define MUTEX_INIT_LINK(m) 		do {		\
50	(m)->m_qe.tqe_prev = NULL;			\
51	(m)->m_qe.tqe_next = NULL;			\
52} while (0)
53#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
54	if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55		PANIC("mutex is not on list");		\
56} while (0)
57#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
58	if (__predict_false((m)->m_qe.tqe_prev != NULL ||	\
59	    (m)->m_qe.tqe_next != NULL))	\
60		PANIC("mutex is on list");		\
61} while (0)
62#else
63#define MUTEX_INIT_LINK(m)
64#define MUTEX_ASSERT_IS_OWNED(m)
65#define MUTEX_ASSERT_NOT_OWNED(m)
66#endif
67
68/*
69 * For adaptive mutexes, how many times to spin doing trylock2
70 * before entering the kernel to block
71 */
72#define MUTEX_ADAPTIVE_SPINS	2000
73
74/*
75 * Prototypes
76 */
77int	__pthread_mutex_init(pthread_mutex_t *mutex,
78		const pthread_mutexattr_t *mutex_attr);
79int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
80int	__pthread_mutex_lock(pthread_mutex_t *mutex);
81int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
82		const struct timespec *abstime);
83int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84    		void *(calloc_cb)(size_t, size_t));
85int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
91
92static int	mutex_self_trylock(pthread_mutex_t);
93static int	mutex_self_lock(pthread_mutex_t,
94				const struct timespec *abstime);
95static int	mutex_unlock_common(struct pthread_mutex *, int, int *);
96static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97				const struct timespec *);
98
99__weak_reference(__pthread_mutex_init, pthread_mutex_init);
100__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
107
108/* Single underscore versions provided for libc internal usage: */
109/* No difference between libc and application usage of these: */
110__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
112
113__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
115
116__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
119
120__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124
125static int
126mutex_init(pthread_mutex_t *mutex,
127    const struct pthread_mutex_attr *mutex_attr,
128    void *(calloc_cb)(size_t, size_t))
129{
130	const struct pthread_mutex_attr *attr;
131	struct pthread_mutex *pmutex;
132
133	if (mutex_attr == NULL) {
134		attr = &_pthread_mutexattr_default;
135	} else {
136		attr = mutex_attr;
137		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139			return (EINVAL);
140		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
142			return (EINVAL);
143	}
144	if ((pmutex = (pthread_mutex_t)
145		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146		return (ENOMEM);
147
148	pmutex->m_flags = attr->m_type;
149	pmutex->m_owner = NULL;
150	pmutex->m_count = 0;
151	pmutex->m_spinloops = 0;
152	pmutex->m_yieldloops = 0;
153	MUTEX_INIT_LINK(pmutex);
154	switch(attr->m_protocol) {
155	case PTHREAD_PRIO_NONE:
156		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
157		pmutex->m_lock.m_flags = 0;
158		break;
159	case PTHREAD_PRIO_INHERIT:
160		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
161		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
162		break;
163	case PTHREAD_PRIO_PROTECT:
164		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
165		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
166		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
167		break;
168	}
169
170	if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
171		pmutex->m_spinloops =
172		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
173		pmutex->m_yieldloops = _thr_yieldloops;
174	}
175
176	*mutex = pmutex;
177	return (0);
178}
179
180static int
181init_static(struct pthread *thread, pthread_mutex_t *mutex)
182{
183	int ret;
184
185	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
186
187	if (*mutex == THR_MUTEX_INITIALIZER)
188		ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
189	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
190		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
191	else
192		ret = 0;
193	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
194
195	return (ret);
196}
197
198static void
199set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
200{
201	struct pthread_mutex *m2;
202
203	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
204	if (m2 != NULL)
205		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
206	else
207		m->m_lock.m_ceilings[1] = -1;
208}
209
210int
211__pthread_mutex_init(pthread_mutex_t *mutex,
212    const pthread_mutexattr_t *mutex_attr)
213{
214	return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
215}
216
217/* This function is used internally by malloc. */
218int
219_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
220    void *(calloc_cb)(size_t, size_t))
221{
222	static const struct pthread_mutex_attr attr = {
223		.m_type = PTHREAD_MUTEX_NORMAL,
224		.m_protocol = PTHREAD_PRIO_NONE,
225		.m_ceiling = 0
226	};
227	int ret;
228
229	ret = mutex_init(mutex, &attr, calloc_cb);
230	if (ret == 0)
231		(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
232	return (ret);
233}
234
235void
236_mutex_fork(struct pthread *curthread)
237{
238	struct pthread_mutex *m;
239
240	/*
241	 * Fix mutex ownership for child process.
242	 * note that process shared mutex should not
243	 * be inherited because owner is forking thread
244	 * which is in parent process, they should be
245	 * removed from the owned mutex list, current,
246	 * process shared mutex is not supported, so I
247	 * am not worried.
248	 */
249
250	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
251		m->m_lock.m_owner = TID(curthread);
252	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
253		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
254}
255
256int
257_pthread_mutex_destroy(pthread_mutex_t *mutex)
258{
259	pthread_mutex_t m;
260	int ret;
261
262	m = *mutex;
263	if (m < THR_MUTEX_DESTROYED) {
264		ret = 0;
265	} else if (m == THR_MUTEX_DESTROYED) {
266		ret = EINVAL;
267	} else {
268		if (m->m_owner != NULL) {
269			ret = EBUSY;
270		} else {
271			*mutex = THR_MUTEX_DESTROYED;
272			MUTEX_ASSERT_NOT_OWNED(m);
273			free(m);
274			ret = 0;
275		}
276	}
277
278	return (ret);
279}
280
281#define ENQUEUE_MUTEX(curthread, m)  					\
282	do {								\
283		(m)->m_owner = curthread;				\
284		/* Add to the list of owned mutexes: */			\
285		MUTEX_ASSERT_NOT_OWNED((m));				\
286		if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
287			TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
288		else							\
289			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
290	} while (0)
291
292#define DEQUEUE_MUTEX(curthread, m)					\
293		(m)->m_owner = NULL;					\
294		MUTEX_ASSERT_IS_OWNED(m);				\
295		if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \
296			TAILQ_REMOVE(&curthread->mutexq, (m), m_qe);		\
297		else {							\
298			TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe);	\
299			set_inherited_priority(curthread, m);		\
300		}							\
301		MUTEX_INIT_LINK(m);
302
303#define CHECK_AND_INIT_MUTEX						\
304	if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) {	\
305		if (m == THR_MUTEX_DESTROYED)				\
306			return (EINVAL);				\
307		int ret;						\
308		ret = init_static(_get_curthread(), mutex);		\
309		if (ret)						\
310			return (ret);					\
311		m = *mutex;						\
312	}
313
314static int
315mutex_trylock_common(pthread_mutex_t *mutex)
316{
317	struct pthread *curthread = _get_curthread();
318	struct pthread_mutex *m = *mutex;
319	uint32_t id;
320	int ret;
321
322	id = TID(curthread);
323	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
324		THR_CRITICAL_ENTER(curthread);
325	ret = _thr_umutex_trylock(&m->m_lock, id);
326	if (__predict_true(ret == 0)) {
327		ENQUEUE_MUTEX(curthread, m);
328	} else if (m->m_owner == curthread) {
329		ret = mutex_self_trylock(m);
330	} /* else {} */
331	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
332		THR_CRITICAL_LEAVE(curthread);
333	return (ret);
334}
335
336int
337__pthread_mutex_trylock(pthread_mutex_t *mutex)
338{
339	struct pthread_mutex *m;
340
341	CHECK_AND_INIT_MUTEX
342
343	return (mutex_trylock_common(mutex));
344}
345
346static int
347mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
348	const struct timespec *abstime)
349{
350	uint32_t	id, owner;
351	int	count;
352	int	ret;
353
354	if (m->m_owner == curthread)
355		return mutex_self_lock(m, abstime);
356
357	id = TID(curthread);
358	/*
359	 * For adaptive mutexes, spin for a bit in the expectation
360	 * that if the application requests this mutex type then
361	 * the lock is likely to be released quickly and it is
362	 * faster than entering the kernel
363	 */
364	if (__predict_false(
365		(m->m_lock.m_flags &
366		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
367			goto sleep_in_kernel;
368
369	if (!_thr_is_smp)
370		goto yield_loop;
371
372	count = m->m_spinloops;
373	while (count--) {
374		owner = m->m_lock.m_owner;
375		if ((owner & ~UMUTEX_CONTESTED) == 0) {
376			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
377				ret = 0;
378				goto done;
379			}
380		}
381		CPU_SPINWAIT;
382	}
383
384yield_loop:
385	count = m->m_yieldloops;
386	while (count--) {
387		_sched_yield();
388		owner = m->m_lock.m_owner;
389		if ((owner & ~UMUTEX_CONTESTED) == 0) {
390			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
391				ret = 0;
392				goto done;
393			}
394		}
395	}
396
397sleep_in_kernel:
398	if (abstime == NULL) {
399		ret = __thr_umutex_lock(&m->m_lock, id);
400	} else if (__predict_false(
401		   abstime->tv_nsec < 0 ||
402		   abstime->tv_nsec >= 1000000000)) {
403		ret = EINVAL;
404	} else {
405		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
406	}
407done:
408	if (ret == 0)
409		ENQUEUE_MUTEX(curthread, m);
410
411	return (ret);
412}
413
414static inline int
415mutex_lock_common(struct pthread_mutex *m,
416	const struct timespec *abstime, int cvattach)
417{
418	struct pthread *curthread  = _get_curthread();
419	int ret;
420
421	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
422		THR_CRITICAL_ENTER(curthread);
423	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
424		ENQUEUE_MUTEX(curthread, m);
425		ret = 0;
426	} else {
427		ret = mutex_lock_sleep(curthread, m, abstime);
428	}
429	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
430		THR_CRITICAL_LEAVE(curthread);
431	return (ret);
432}
433
434int
435__pthread_mutex_lock(pthread_mutex_t *mutex)
436{
437	struct pthread_mutex	*m;
438
439	_thr_check_init();
440
441	CHECK_AND_INIT_MUTEX
442
443	return (mutex_lock_common(m, NULL, 0));
444}
445
446int
447__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
448{
449	struct pthread_mutex	*m;
450
451	_thr_check_init();
452
453	CHECK_AND_INIT_MUTEX
454
455	return (mutex_lock_common(m, abstime, 0));
456}
457
458int
459_pthread_mutex_unlock(pthread_mutex_t *mutex)
460{
461	struct pthread_mutex *mp;
462
463	mp = *mutex;
464	return (mutex_unlock_common(mp, 0, NULL));
465}
466
467int
468_mutex_cv_lock(struct pthread_mutex *m, int count)
469{
470	int	error;
471
472	error = mutex_lock_common(m, NULL, 1);
473	if (error == 0)
474		m->m_count = count;
475	return (error);
476}
477
478int
479_mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
480{
481
482	/*
483	 * Clear the count in case this is a recursive mutex.
484	 */
485	*count = m->m_count;
486	m->m_count = 0;
487	(void)mutex_unlock_common(m, 1, defer);
488        return (0);
489}
490
491int
492_mutex_cv_attach(struct pthread_mutex *m, int count)
493{
494	struct pthread *curthread = _get_curthread();
495
496	ENQUEUE_MUTEX(curthread, m);
497	m->m_count = count;
498	return (0);
499}
500
501int
502_mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
503{
504	struct pthread *curthread = _get_curthread();
505	int     defered;
506	int     error;
507
508	if ((error = _mutex_owned(curthread, mp)) != 0)
509                return (error);
510
511	/*
512	 * Clear the count in case this is a recursive mutex.
513	 */
514	*recurse = mp->m_count;
515	mp->m_count = 0;
516	DEQUEUE_MUTEX(curthread, mp);
517
518	/* Will this happen in real-world ? */
519        if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
520		defered = 1;
521		mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
522	} else
523		defered = 0;
524
525	if (defered)  {
526		_thr_wake_all(curthread->defer_waiters,
527				curthread->nwaiter_defer);
528		curthread->nwaiter_defer = 0;
529	}
530	return (0);
531}
532
533static int
534mutex_self_trylock(struct pthread_mutex *m)
535{
536	int	ret;
537
538	switch (PMUTEX_TYPE(m->m_flags)) {
539	case PTHREAD_MUTEX_ERRORCHECK:
540	case PTHREAD_MUTEX_NORMAL:
541	case PTHREAD_MUTEX_ADAPTIVE_NP:
542		ret = EBUSY;
543		break;
544
545	case PTHREAD_MUTEX_RECURSIVE:
546		/* Increment the lock count: */
547		if (m->m_count + 1 > 0) {
548			m->m_count++;
549			ret = 0;
550		} else
551			ret = EAGAIN;
552		break;
553
554	default:
555		/* Trap invalid mutex types; */
556		ret = EINVAL;
557	}
558
559	return (ret);
560}
561
562static int
563mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
564{
565	struct timespec	ts1, ts2;
566	int	ret;
567
568	switch (PMUTEX_TYPE(m->m_flags)) {
569	case PTHREAD_MUTEX_ERRORCHECK:
570	case PTHREAD_MUTEX_ADAPTIVE_NP:
571		if (abstime) {
572			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
573			    abstime->tv_nsec >= 1000000000) {
574				ret = EINVAL;
575			} else {
576				clock_gettime(CLOCK_REALTIME, &ts1);
577				TIMESPEC_SUB(&ts2, abstime, &ts1);
578				__sys_nanosleep(&ts2, NULL);
579				ret = ETIMEDOUT;
580			}
581		} else {
582			/*
583			 * POSIX specifies that mutexes should return
584			 * EDEADLK if a recursive lock is detected.
585			 */
586			ret = EDEADLK;
587		}
588		break;
589
590	case PTHREAD_MUTEX_NORMAL:
591		/*
592		 * What SS2 define as a 'normal' mutex.  Intentionally
593		 * deadlock on attempts to get a lock you already own.
594		 */
595		ret = 0;
596		if (abstime) {
597			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
598			    abstime->tv_nsec >= 1000000000) {
599				ret = EINVAL;
600			} else {
601				clock_gettime(CLOCK_REALTIME, &ts1);
602				TIMESPEC_SUB(&ts2, abstime, &ts1);
603				__sys_nanosleep(&ts2, NULL);
604				ret = ETIMEDOUT;
605			}
606		} else {
607			ts1.tv_sec = 30;
608			ts1.tv_nsec = 0;
609			for (;;)
610				__sys_nanosleep(&ts1, NULL);
611		}
612		break;
613
614	case PTHREAD_MUTEX_RECURSIVE:
615		/* Increment the lock count: */
616		if (m->m_count + 1 > 0) {
617			m->m_count++;
618			ret = 0;
619		} else
620			ret = EAGAIN;
621		break;
622
623	default:
624		/* Trap invalid mutex types; */
625		ret = EINVAL;
626	}
627
628	return (ret);
629}
630
631static int
632mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
633{
634	struct pthread *curthread = _get_curthread();
635	uint32_t id;
636	int defered, error;
637
638	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
639		if (m == THR_MUTEX_DESTROYED)
640			return (EINVAL);
641		return (EPERM);
642	}
643
644	/*
645	 * Check if the running thread is not the owner of the mutex.
646	 */
647	if (__predict_false(m->m_owner != curthread))
648		return (EPERM);
649
650	error = 0;
651	id = TID(curthread);
652	if (__predict_false(
653		PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
654		m->m_count > 0)) {
655		m->m_count--;
656	} else {
657		if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
658			defered = 1;
659			m->m_flags &= ~PMUTEX_FLAG_DEFERED;
660        	} else
661			defered = 0;
662
663		DEQUEUE_MUTEX(curthread, m);
664		error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
665
666		if (mtx_defer == NULL && defered)  {
667			_thr_wake_all(curthread->defer_waiters,
668				curthread->nwaiter_defer);
669			curthread->nwaiter_defer = 0;
670		}
671	}
672	if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
673		THR_CRITICAL_LEAVE(curthread);
674	return (error);
675}
676
677int
678_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
679			      int *prioceiling)
680{
681	struct pthread_mutex *m;
682	int ret;
683
684	m = *mutex;
685	if ((m <= THR_MUTEX_DESTROYED) ||
686	    (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
687		ret = EINVAL;
688	else {
689		*prioceiling = m->m_lock.m_ceilings[0];
690		ret = 0;
691	}
692
693	return (ret);
694}
695
696int
697_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
698			      int ceiling, int *old_ceiling)
699{
700	struct pthread *curthread = _get_curthread();
701	struct pthread_mutex *m, *m1, *m2;
702	int ret;
703
704	m = *mutex;
705	if ((m <= THR_MUTEX_DESTROYED) ||
706	    (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
707		return (EINVAL);
708
709	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
710	if (ret != 0)
711		return (ret);
712
713	if (m->m_owner == curthread) {
714		MUTEX_ASSERT_IS_OWNED(m);
715		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
716		m2 = TAILQ_NEXT(m, m_qe);
717		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
718		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
719			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
720			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
721				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
722					TAILQ_INSERT_BEFORE(m2, m, m_qe);
723					return (0);
724				}
725			}
726			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
727		}
728	}
729	return (0);
730}
731
732int
733_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
734{
735	struct pthread_mutex	*m;
736
737	CHECK_AND_INIT_MUTEX
738
739	*count = m->m_spinloops;
740	return (0);
741}
742
743int
744__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
745{
746	struct pthread_mutex	*m;
747
748	CHECK_AND_INIT_MUTEX
749
750	m->m_spinloops = count;
751	return (0);
752}
753
754int
755_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
756{
757	struct pthread_mutex	*m;
758
759	CHECK_AND_INIT_MUTEX
760
761	*count = m->m_yieldloops;
762	return (0);
763}
764
765int
766__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
767{
768	struct pthread_mutex	*m;
769
770	CHECK_AND_INIT_MUTEX
771
772	m->m_yieldloops = count;
773	return (0);
774}
775
776int
777_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
778{
779	struct pthread_mutex	*m;
780
781	m = *mutex;
782	if (m <= THR_MUTEX_DESTROYED)
783		return (0);
784	return (m->m_owner == _get_curthread());
785}
786
787int
788_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
789{
790	if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
791		if (mp == THR_MUTEX_DESTROYED)
792			return (EINVAL);
793		return (EPERM);
794	}
795      	if (mp->m_owner != curthread)
796		return (EPERM);
797	return (0);
798}
799