1112918Sjeff/*
2112918Sjeff * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3157194Sdavidxu * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4112918Sjeff * All rights reserved.
5112918Sjeff *
6112918Sjeff * Redistribution and use in source and binary forms, with or without
7112918Sjeff * modification, are permitted provided that the following conditions
8112918Sjeff * are met:
9112918Sjeff * 1. Redistributions of source code must retain the above copyright
10112918Sjeff *    notice, this list of conditions and the following disclaimer.
11112918Sjeff * 2. Redistributions in binary form must reproduce the above copyright
12112918Sjeff *    notice, this list of conditions and the following disclaimer in the
13112918Sjeff *    documentation and/or other materials provided with the distribution.
14112918Sjeff * 3. All advertising materials mentioning features or use of this software
15112918Sjeff *    must display the following acknowledgement:
16112918Sjeff *	This product includes software developed by John Birrell.
17112918Sjeff * 4. Neither the name of the author nor the names of any co-contributors
18112918Sjeff *    may be used to endorse or promote products derived from this software
19112918Sjeff *    without specific prior written permission.
20112918Sjeff *
21112918Sjeff * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22112918Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23112918Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24112918Sjeff * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25112918Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26112918Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27112918Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28112918Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29112918Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30112918Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31112918Sjeff * SUCH DAMAGE.
32112918Sjeff *
33112918Sjeff * $FreeBSD$
34112918Sjeff */
35144518Sdavidxu
36157457Sdavidxu#include "namespace.h"
37112918Sjeff#include <stdlib.h>
38112918Sjeff#include <errno.h>
39112918Sjeff#include <string.h>
40112918Sjeff#include <sys/param.h>
41112918Sjeff#include <sys/queue.h>
42112918Sjeff#include <pthread.h>
43177600Sru#include <pthread_np.h>
44157457Sdavidxu#include "un-namespace.h"
45157457Sdavidxu
46112918Sjeff#include "thr_private.h"
47112918Sjeff
48112958Sjeff#if defined(_PTHREADS_INVARIANTS)
49144518Sdavidxu#define MUTEX_INIT_LINK(m) 		do {		\
50112958Sjeff	(m)->m_qe.tqe_prev = NULL;			\
51112958Sjeff	(m)->m_qe.tqe_next = NULL;			\
52112958Sjeff} while (0)
53144518Sdavidxu#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
54179411Sdavidxu	if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55112958Sjeff		PANIC("mutex is not on list");		\
56112958Sjeff} while (0)
57144518Sdavidxu#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
58179411Sdavidxu	if (__predict_false((m)->m_qe.tqe_prev != NULL ||	\
59179411Sdavidxu	    (m)->m_qe.tqe_next != NULL))	\
60112958Sjeff		PANIC("mutex is on list");		\
61112958Sjeff} while (0)
62112958Sjeff#else
63144518Sdavidxu#define MUTEX_INIT_LINK(m)
64144518Sdavidxu#define MUTEX_ASSERT_IS_OWNED(m)
65144518Sdavidxu#define MUTEX_ASSERT_NOT_OWNED(m)
66112958Sjeff#endif
67112958Sjeff
68112918Sjeff/*
69173206Sdavidxu * For adaptive mutexes, how many times to spin doing trylock2
70173206Sdavidxu * before entering the kernel to block
71173206Sdavidxu */
72178587Skris#define MUTEX_ADAPTIVE_SPINS	2000
73173206Sdavidxu
74173206Sdavidxu/*
75112918Sjeff * Prototypes
76112918Sjeff */
77157457Sdavidxuint	__pthread_mutex_init(pthread_mutex_t *mutex,
78157457Sdavidxu		const pthread_mutexattr_t *mutex_attr);
79157457Sdavidxuint	__pthread_mutex_trylock(pthread_mutex_t *mutex);
80157457Sdavidxuint	__pthread_mutex_lock(pthread_mutex_t *mutex);
81157457Sdavidxuint	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
82157457Sdavidxu		const struct timespec *abstime);
83174696Sdavidxuint	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84174696Sdavidxu    		void *(calloc_cb)(size_t, size_t));
85174696Sdavidxuint	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86174696Sdavidxuint	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87174696Sdavidxuint	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88174696Sdavidxuint	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89174696Sdavidxuint	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90174585Sdavidxuint	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
91157457Sdavidxu
92157457Sdavidxustatic int	mutex_self_trylock(pthread_mutex_t);
93157457Sdavidxustatic int	mutex_self_lock(pthread_mutex_t,
94144518Sdavidxu				const struct timespec *abstime);
95216641Sdavidxustatic int	mutex_unlock_common(struct pthread_mutex *, int);
96179970Sdavidxustatic int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97179970Sdavidxu				const struct timespec *);
98112958Sjeff
99144518Sdavidxu__weak_reference(__pthread_mutex_init, pthread_mutex_init);
100179411Sdavidxu__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101144518Sdavidxu__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102179411Sdavidxu__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103144518Sdavidxu__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104179411Sdavidxu__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105144518Sdavidxu__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106179411Sdavidxu__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
107112958Sjeff
108112918Sjeff/* Single underscore versions provided for libc internal usage: */
109112918Sjeff/* No difference between libc and application usage of these: */
110112918Sjeff__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111144518Sdavidxu__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
112112918Sjeff
113157194Sdavidxu__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114157194Sdavidxu__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
115157194Sdavidxu
116174585Sdavidxu__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117179411Sdavidxu__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118174585Sdavidxu__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
119174585Sdavidxu
120174585Sdavidxu__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121179411Sdavidxu__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122174585Sdavidxu__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123176049Sdes__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124174585Sdavidxu
125144518Sdavidxustatic int
126144518Sdavidxumutex_init(pthread_mutex_t *mutex,
127213241Sdavidxu    const struct pthread_mutex_attr *mutex_attr,
128173967Sjasone    void *(calloc_cb)(size_t, size_t))
129112918Sjeff{
130156102Sdavidxu	const struct pthread_mutex_attr *attr;
131144518Sdavidxu	struct pthread_mutex *pmutex;
132112918Sjeff
133156102Sdavidxu	if (mutex_attr == NULL) {
134157194Sdavidxu		attr = &_pthread_mutexattr_default;
135156102Sdavidxu	} else {
136213241Sdavidxu		attr = mutex_attr;
137156102Sdavidxu		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138156102Sdavidxu		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139156102Sdavidxu			return (EINVAL);
140156102Sdavidxu		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141156102Sdavidxu		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
142156102Sdavidxu			return (EINVAL);
143144518Sdavidxu	}
144156102Sdavidxu	if ((pmutex = (pthread_mutex_t)
145173967Sjasone		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146156102Sdavidxu		return (ENOMEM);
147144518Sdavidxu
148216641Sdavidxu	pmutex->m_flags = attr->m_type;
149156102Sdavidxu	pmutex->m_owner = NULL;
150156102Sdavidxu	pmutex->m_count = 0;
151174585Sdavidxu	pmutex->m_spinloops = 0;
152174585Sdavidxu	pmutex->m_yieldloops = 0;
153156102Sdavidxu	MUTEX_INIT_LINK(pmutex);
154161681Sdavidxu	switch(attr->m_protocol) {
155213241Sdavidxu	case PTHREAD_PRIO_NONE:
156213241Sdavidxu		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
157213241Sdavidxu		pmutex->m_lock.m_flags = 0;
158213241Sdavidxu		break;
159161681Sdavidxu	case PTHREAD_PRIO_INHERIT:
160161681Sdavidxu		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
161161681Sdavidxu		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
162161681Sdavidxu		break;
163161681Sdavidxu	case PTHREAD_PRIO_PROTECT:
164161681Sdavidxu		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
165161681Sdavidxu		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
166161681Sdavidxu		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
167161681Sdavidxu		break;
168161681Sdavidxu	}
169174585Sdavidxu
170216641Sdavidxu	if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
171174585Sdavidxu		pmutex->m_spinloops =
172174585Sdavidxu		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
173174585Sdavidxu		pmutex->m_yieldloops = _thr_yieldloops;
174174585Sdavidxu	}
175174585Sdavidxu
176156102Sdavidxu	*mutex = pmutex;
177156102Sdavidxu	return (0);
178112918Sjeff}
179112918Sjeff
180144518Sdavidxustatic int
181144518Sdavidxuinit_static(struct pthread *thread, pthread_mutex_t *mutex)
182112918Sjeff{
183144518Sdavidxu	int ret;
184112918Sjeff
185144518Sdavidxu	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
186112918Sjeff
187213241Sdavidxu	if (*mutex == THR_MUTEX_INITIALIZER)
188213241Sdavidxu		ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
189213241Sdavidxu	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
190213241Sdavidxu		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
191144518Sdavidxu	else
192144518Sdavidxu		ret = 0;
193144518Sdavidxu	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
194144518Sdavidxu
195144518Sdavidxu	return (ret);
196112918Sjeff}
197112918Sjeff
198164178Sdavidxustatic void
199164178Sdavidxuset_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
200164178Sdavidxu{
201164178Sdavidxu	struct pthread_mutex *m2;
202164178Sdavidxu
203164178Sdavidxu	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
204164178Sdavidxu	if (m2 != NULL)
205164178Sdavidxu		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
206164178Sdavidxu	else
207164178Sdavidxu		m->m_lock.m_ceilings[1] = -1;
208164178Sdavidxu}
209164178Sdavidxu
210144518Sdavidxuint
211144518Sdavidxu__pthread_mutex_init(pthread_mutex_t *mutex,
212144518Sdavidxu    const pthread_mutexattr_t *mutex_attr)
213144518Sdavidxu{
214213241Sdavidxu	return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
215144518Sdavidxu}
216144518Sdavidxu
217173967Sjasone/* This function is used internally by malloc. */
218173967Sjasoneint
219173967Sjasone_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
220173967Sjasone    void *(calloc_cb)(size_t, size_t))
221173967Sjasone{
222173967Sjasone	static const struct pthread_mutex_attr attr = {
223174001Sjasone		.m_type = PTHREAD_MUTEX_NORMAL,
224173967Sjasone		.m_protocol = PTHREAD_PRIO_NONE,
225179411Sdavidxu		.m_ceiling = 0
226173967Sjasone	};
227212077Sdavidxu	int ret;
228173967Sjasone
229213241Sdavidxu	ret = mutex_init(mutex, &attr, calloc_cb);
230212077Sdavidxu	if (ret == 0)
231216641Sdavidxu		(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
232212077Sdavidxu	return (ret);
233173967Sjasone}
234173967Sjasone
235144518Sdavidxuvoid
236144518Sdavidxu_mutex_fork(struct pthread *curthread)
237112918Sjeff{
238144518Sdavidxu	struct pthread_mutex *m;
239112918Sjeff
240154350Sdavidxu	/*
241154350Sdavidxu	 * Fix mutex ownership for child process.
242154350Sdavidxu	 * note that process shared mutex should not
243154350Sdavidxu	 * be inherited because owner is forking thread
244154350Sdavidxu	 * which is in parent process, they should be
245154422Sdavidxu	 * removed from the owned mutex list, current,
246154350Sdavidxu	 * process shared mutex is not supported, so I
247154350Sdavidxu	 * am not worried.
248154350Sdavidxu	 */
249161681Sdavidxu
250157194Sdavidxu	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
251161681Sdavidxu		m->m_lock.m_owner = TID(curthread);
252161681Sdavidxu	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
253161681Sdavidxu		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
254112918Sjeff}
255112918Sjeff
256144518Sdavidxuint
257144518Sdavidxu_pthread_mutex_destroy(pthread_mutex_t *mutex)
258125966Smtm{
259164178Sdavidxu	pthread_mutex_t m;
260214410Sdavidxu	int ret;
261144518Sdavidxu
262213241Sdavidxu	m = *mutex;
263213241Sdavidxu	if (m < THR_MUTEX_DESTROYED) {
264213241Sdavidxu		ret = 0;
265213241Sdavidxu	} else if (m == THR_MUTEX_DESTROYED) {
266144518Sdavidxu		ret = EINVAL;
267213241Sdavidxu	} else {
268216641Sdavidxu		if (m->m_owner != NULL) {
269144518Sdavidxu			ret = EBUSY;
270144518Sdavidxu		} else {
271213241Sdavidxu			*mutex = THR_MUTEX_DESTROYED;
272144518Sdavidxu			MUTEX_ASSERT_NOT_OWNED(m);
273157194Sdavidxu			free(m);
274214410Sdavidxu			ret = 0;
275144518Sdavidxu		}
276144518Sdavidxu	}
277144518Sdavidxu
278144518Sdavidxu	return (ret);
279125966Smtm}
280125966Smtm
281173173Sdavidxu#define ENQUEUE_MUTEX(curthread, m)  					\
282174535Sdavidxu	do {								\
283174535Sdavidxu		(m)->m_owner = curthread;				\
284173173Sdavidxu		/* Add to the list of owned mutexes: */			\
285174535Sdavidxu		MUTEX_ASSERT_NOT_OWNED((m));				\
286174535Sdavidxu		if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
287174535Sdavidxu			TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
288173173Sdavidxu		else							\
289174535Sdavidxu			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
290174535Sdavidxu	} while (0)
291173173Sdavidxu
292216641Sdavidxu#define DEQUEUE_MUTEX(curthread, m)					\
293216641Sdavidxu		(m)->m_owner = NULL;					\
294216641Sdavidxu		MUTEX_ASSERT_IS_OWNED(m);				\
295216641Sdavidxu		if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \
296216641Sdavidxu			TAILQ_REMOVE(&curthread->mutexq, (m), m_qe);		\
297216641Sdavidxu		else {							\
298216641Sdavidxu			TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe);	\
299216641Sdavidxu			set_inherited_priority(curthread, m);		\
300216641Sdavidxu		}							\
301216641Sdavidxu		MUTEX_INIT_LINK(m);
302216641Sdavidxu
303213241Sdavidxu#define CHECK_AND_INIT_MUTEX						\
304213241Sdavidxu	if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) {	\
305213241Sdavidxu		if (m == THR_MUTEX_DESTROYED)				\
306213241Sdavidxu			return (EINVAL);				\
307213241Sdavidxu		int ret;						\
308213241Sdavidxu		ret = init_static(_get_curthread(), mutex);		\
309213241Sdavidxu		if (ret)						\
310213241Sdavidxu			return (ret);					\
311213241Sdavidxu		m = *mutex;						\
312213241Sdavidxu	}
313213241Sdavidxu
314144518Sdavidxustatic int
315213241Sdavidxumutex_trylock_common(pthread_mutex_t *mutex)
316125966Smtm{
317213241Sdavidxu	struct pthread *curthread = _get_curthread();
318213241Sdavidxu	struct pthread_mutex *m = *mutex;
319161681Sdavidxu	uint32_t id;
320157194Sdavidxu	int ret;
321125966Smtm
322161681Sdavidxu	id = TID(curthread);
323216641Sdavidxu	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
324212077Sdavidxu		THR_CRITICAL_ENTER(curthread);
325161681Sdavidxu	ret = _thr_umutex_trylock(&m->m_lock, id);
326213241Sdavidxu	if (__predict_true(ret == 0)) {
327173173Sdavidxu		ENQUEUE_MUTEX(curthread, m);
328157194Sdavidxu	} else if (m->m_owner == curthread) {
329157457Sdavidxu		ret = mutex_self_trylock(m);
330157194Sdavidxu	} /* else {} */
331216641Sdavidxu	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
332212077Sdavidxu		THR_CRITICAL_LEAVE(curthread);
333144518Sdavidxu	return (ret);
334125966Smtm}
335125966Smtm
336112918Sjeffint
337112918Sjeff__pthread_mutex_trylock(pthread_mutex_t *mutex)
338112918Sjeff{
339213241Sdavidxu	struct pthread_mutex *m;
340112918Sjeff
341213241Sdavidxu	CHECK_AND_INIT_MUTEX
342213241Sdavidxu
343213241Sdavidxu	return (mutex_trylock_common(mutex));
344112918Sjeff}
345112918Sjeff
346112918Sjeffstatic int
347179970Sdavidxumutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
348179970Sdavidxu	const struct timespec *abstime)
349112918Sjeff{
350179970Sdavidxu	uint32_t	id, owner;
351179970Sdavidxu	int	count;
352157591Sdavidxu	int	ret;
353112918Sjeff
354179970Sdavidxu	if (m->m_owner == curthread)
355179970Sdavidxu		return mutex_self_lock(m, abstime);
356179970Sdavidxu
357161681Sdavidxu	id = TID(curthread);
358179411Sdavidxu	/*
359179411Sdavidxu	 * For adaptive mutexes, spin for a bit in the expectation
360179411Sdavidxu	 * that if the application requests this mutex type then
361179411Sdavidxu	 * the lock is likely to be released quickly and it is
362179411Sdavidxu	 * faster than entering the kernel
363179411Sdavidxu	 */
364213241Sdavidxu	if (__predict_false(
365213241Sdavidxu		(m->m_lock.m_flags &
366213241Sdavidxu		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
367213241Sdavidxu			goto sleep_in_kernel;
368173207Sdavidxu
369179411Sdavidxu	if (!_thr_is_smp)
370179411Sdavidxu		goto yield_loop;
371173154Skris
372179411Sdavidxu	count = m->m_spinloops;
373179411Sdavidxu	while (count--) {
374179970Sdavidxu		owner = m->m_lock.m_owner;
375179970Sdavidxu		if ((owner & ~UMUTEX_CONTESTED) == 0) {
376179970Sdavidxu			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
377179970Sdavidxu				ret = 0;
378174585Sdavidxu				goto done;
379179970Sdavidxu			}
380173154Skris		}
381179411Sdavidxu		CPU_SPINWAIT;
382179411Sdavidxu	}
383173154Skris
384179411Sdavidxuyield_loop:
385179411Sdavidxu	count = m->m_yieldloops;
386179411Sdavidxu	while (count--) {
387179411Sdavidxu		_sched_yield();
388179970Sdavidxu		owner = m->m_lock.m_owner;
389179970Sdavidxu		if ((owner & ~UMUTEX_CONTESTED) == 0) {
390179970Sdavidxu			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
391179970Sdavidxu				ret = 0;
392179970Sdavidxu				goto done;
393179970Sdavidxu			}
394179970Sdavidxu		}
395179411Sdavidxu	}
396179411Sdavidxu
397173208Sdavidxusleep_in_kernel:
398179411Sdavidxu	if (abstime == NULL) {
399179970Sdavidxu		ret = __thr_umutex_lock(&m->m_lock, id);
400179411Sdavidxu	} else if (__predict_false(
401179970Sdavidxu		   abstime->tv_nsec < 0 ||
402179411Sdavidxu		   abstime->tv_nsec >= 1000000000)) {
403179411Sdavidxu		ret = EINVAL;
404179411Sdavidxu	} else {
405179970Sdavidxu		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
406179411Sdavidxu	}
407173206Sdavidxudone:
408179411Sdavidxu	if (ret == 0)
409179411Sdavidxu		ENQUEUE_MUTEX(curthread, m);
410179970Sdavidxu
411144518Sdavidxu	return (ret);
412125966Smtm}
413112958Sjeff
414179411Sdavidxustatic inline int
415213241Sdavidxumutex_lock_common(struct pthread_mutex *m,
416216641Sdavidxu	const struct timespec *abstime, int cvattach)
417125966Smtm{
418213241Sdavidxu	struct pthread *curthread  = _get_curthread();
419212077Sdavidxu	int ret;
420112958Sjeff
421216641Sdavidxu	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
422212077Sdavidxu		THR_CRITICAL_ENTER(curthread);
423179970Sdavidxu	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
424179411Sdavidxu		ENQUEUE_MUTEX(curthread, m);
425212077Sdavidxu		ret = 0;
426212077Sdavidxu	} else {
427212077Sdavidxu		ret = mutex_lock_sleep(curthread, m, abstime);
428179970Sdavidxu	}
429216641Sdavidxu	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
430212077Sdavidxu		THR_CRITICAL_LEAVE(curthread);
431212077Sdavidxu	return (ret);
432112918Sjeff}
433112918Sjeff
434112918Sjeffint
435179411Sdavidxu__pthread_mutex_lock(pthread_mutex_t *mutex)
436112918Sjeff{
437213241Sdavidxu	struct pthread_mutex	*m;
438112918Sjeff
439144518Sdavidxu	_thr_check_init();
440112918Sjeff
441213241Sdavidxu	CHECK_AND_INIT_MUTEX
442144518Sdavidxu
443216641Sdavidxu	return (mutex_lock_common(m, NULL, 0));
444112918Sjeff}
445112918Sjeff
446112918Sjeffint
447179411Sdavidxu__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
448112918Sjeff{
449213241Sdavidxu	struct pthread_mutex	*m;
450112918Sjeff
451144518Sdavidxu	_thr_check_init();
452112918Sjeff
453213241Sdavidxu	CHECK_AND_INIT_MUTEX
454144518Sdavidxu
455216641Sdavidxu	return (mutex_lock_common(m, abstime, 0));
456112918Sjeff}
457112918Sjeff
458112958Sjeffint
459216641Sdavidxu_pthread_mutex_unlock(pthread_mutex_t *mutex)
460112958Sjeff{
461216641Sdavidxu	struct pthread_mutex *mp;
462216641Sdavidxu
463216641Sdavidxu	mp = *mutex;
464216641Sdavidxu	return (mutex_unlock_common(mp, 0));
465112958Sjeff}
466112918Sjeff
467112918Sjeffint
468216641Sdavidxu_mutex_cv_lock(struct pthread_mutex *m, int count)
469117145Smtm{
470216641Sdavidxu	int	error;
471144518Sdavidxu
472216641Sdavidxu	error = mutex_lock_common(m, NULL, 1);
473216641Sdavidxu	if (error == 0)
474216641Sdavidxu		m->m_count = count;
475216641Sdavidxu	return (error);
476216641Sdavidxu}
477216641Sdavidxu
478216641Sdavidxuint
479216641Sdavidxu_mutex_cv_unlock(struct pthread_mutex *m, int *count)
480216641Sdavidxu{
481216641Sdavidxu
482216641Sdavidxu	/*
483216641Sdavidxu	 * Clear the count in case this is a recursive mutex.
484216641Sdavidxu	 */
485216641Sdavidxu	*count = m->m_count;
486216641Sdavidxu	m->m_count = 0;
487216641Sdavidxu	(void)mutex_unlock_common(m, 1);
488216641Sdavidxu        return (0);
489216641Sdavidxu}
490216641Sdavidxu
491216641Sdavidxuint
492216641Sdavidxu_mutex_cv_attach(struct pthread_mutex *m, int count)
493216641Sdavidxu{
494216641Sdavidxu	struct pthread *curthread = _get_curthread();
495216641Sdavidxu
496216641Sdavidxu	ENQUEUE_MUTEX(curthread, m);
497216641Sdavidxu	m->m_count = count;
498217047Sdavidxu	return (0);
499216641Sdavidxu}
500216641Sdavidxu
501216641Sdavidxuint
502216641Sdavidxu_mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
503216641Sdavidxu{
504216641Sdavidxu	struct pthread *curthread = _get_curthread();
505216641Sdavidxu	int     defered;
506216641Sdavidxu	int     error;
507216641Sdavidxu
508216641Sdavidxu	if ((error = _mutex_owned(curthread, mp)) != 0)
509216641Sdavidxu                return (error);
510216641Sdavidxu
511216641Sdavidxu	/*
512216641Sdavidxu	 * Clear the count in case this is a recursive mutex.
513216641Sdavidxu	 */
514216641Sdavidxu	*recurse = mp->m_count;
515216641Sdavidxu	mp->m_count = 0;
516216641Sdavidxu	DEQUEUE_MUTEX(curthread, mp);
517216641Sdavidxu
518216641Sdavidxu	/* Will this happen in real-world ? */
519216641Sdavidxu        if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
520216641Sdavidxu		defered = 1;
521216641Sdavidxu		mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
522216641Sdavidxu	} else
523216641Sdavidxu		defered = 0;
524216641Sdavidxu
525216641Sdavidxu	if (defered)  {
526216641Sdavidxu		_thr_wake_all(curthread->defer_waiters,
527216641Sdavidxu				curthread->nwaiter_defer);
528216641Sdavidxu		curthread->nwaiter_defer = 0;
529157591Sdavidxu	}
530216641Sdavidxu	return (0);
531112918Sjeff}
532112918Sjeff
533144518Sdavidxustatic int
534213241Sdavidxumutex_self_trylock(struct pthread_mutex *m)
535112918Sjeff{
536112918Sjeff	int	ret;
537144518Sdavidxu
538216641Sdavidxu	switch (PMUTEX_TYPE(m->m_flags)) {
539144518Sdavidxu	case PTHREAD_MUTEX_ERRORCHECK:
540144518Sdavidxu	case PTHREAD_MUTEX_NORMAL:
541236275Sdavidxu	case PTHREAD_MUTEX_ADAPTIVE_NP:
542144518Sdavidxu		ret = EBUSY;
543144518Sdavidxu		break;
544144518Sdavidxu
545144518Sdavidxu	case PTHREAD_MUTEX_RECURSIVE:
546144518Sdavidxu		/* Increment the lock count: */
547144518Sdavidxu		if (m->m_count + 1 > 0) {
548144518Sdavidxu			m->m_count++;
549144518Sdavidxu			ret = 0;
550144518Sdavidxu		} else
551144518Sdavidxu			ret = EAGAIN;
552144518Sdavidxu		break;
553144518Sdavidxu
554144518Sdavidxu	default:
555144518Sdavidxu		/* Trap invalid mutex types; */
556144518Sdavidxu		ret = EINVAL;
557144518Sdavidxu	}
558144518Sdavidxu
559112958Sjeff	return (ret);
560112958Sjeff}
561112918Sjeff
562144518Sdavidxustatic int
563213241Sdavidxumutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
564112958Sjeff{
565157194Sdavidxu	struct timespec	ts1, ts2;
566157194Sdavidxu	int	ret;
567144518Sdavidxu
568216641Sdavidxu	switch (PMUTEX_TYPE(m->m_flags)) {
569112958Sjeff	case PTHREAD_MUTEX_ERRORCHECK:
570173174Skris	case PTHREAD_MUTEX_ADAPTIVE_NP:
571144518Sdavidxu		if (abstime) {
572179411Sdavidxu			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
573179411Sdavidxu			    abstime->tv_nsec >= 1000000000) {
574179411Sdavidxu				ret = EINVAL;
575179411Sdavidxu			} else {
576179411Sdavidxu				clock_gettime(CLOCK_REALTIME, &ts1);
577179411Sdavidxu				TIMESPEC_SUB(&ts2, abstime, &ts1);
578179411Sdavidxu				__sys_nanosleep(&ts2, NULL);
579179411Sdavidxu				ret = ETIMEDOUT;
580179411Sdavidxu			}
581144518Sdavidxu		} else {
582144518Sdavidxu			/*
583144518Sdavidxu			 * POSIX specifies that mutexes should return
584144518Sdavidxu			 * EDEADLK if a recursive lock is detected.
585144518Sdavidxu			 */
586144518Sdavidxu			ret = EDEADLK;
587144518Sdavidxu		}
588112958Sjeff		break;
589112958Sjeff
590112958Sjeff	case PTHREAD_MUTEX_NORMAL:
591112958Sjeff		/*
592112958Sjeff		 * What SS2 define as a 'normal' mutex.  Intentionally
593112958Sjeff		 * deadlock on attempts to get a lock you already own.
594112958Sjeff		 */
595144518Sdavidxu		ret = 0;
596144518Sdavidxu		if (abstime) {
597179411Sdavidxu			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
598179411Sdavidxu			    abstime->tv_nsec >= 1000000000) {
599179411Sdavidxu				ret = EINVAL;
600179411Sdavidxu			} else {
601179411Sdavidxu				clock_gettime(CLOCK_REALTIME, &ts1);
602179411Sdavidxu				TIMESPEC_SUB(&ts2, abstime, &ts1);
603179411Sdavidxu				__sys_nanosleep(&ts2, NULL);
604179411Sdavidxu				ret = ETIMEDOUT;
605179411Sdavidxu			}
606144518Sdavidxu		} else {
607144518Sdavidxu			ts1.tv_sec = 30;
608144518Sdavidxu			ts1.tv_nsec = 0;
609144518Sdavidxu			for (;;)
610144518Sdavidxu				__sys_nanosleep(&ts1, NULL);
611144518Sdavidxu		}
612112958Sjeff		break;
613112958Sjeff
614144518Sdavidxu	case PTHREAD_MUTEX_RECURSIVE:
615144518Sdavidxu		/* Increment the lock count: */
616144518Sdavidxu		if (m->m_count + 1 > 0) {
617144518Sdavidxu			m->m_count++;
618144518Sdavidxu			ret = 0;
619144518Sdavidxu		} else
620144518Sdavidxu			ret = EAGAIN;
621144518Sdavidxu		break;
622144518Sdavidxu
623129482Smtm	default:
624144518Sdavidxu		/* Trap invalid mutex types; */
625144518Sdavidxu		ret = EINVAL;
626112958Sjeff	}
627144518Sdavidxu
628144518Sdavidxu	return (ret);
629112958Sjeff}
630112958Sjeff
631144518Sdavidxustatic int
632216641Sdavidxumutex_unlock_common(struct pthread_mutex *m, int cv)
633112958Sjeff{
634144518Sdavidxu	struct pthread *curthread = _get_curthread();
635161681Sdavidxu	uint32_t id;
636216641Sdavidxu	int defered;
637144518Sdavidxu
638213241Sdavidxu	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
639213241Sdavidxu		if (m == THR_MUTEX_DESTROYED)
640213241Sdavidxu			return (EINVAL);
641213241Sdavidxu		return (EPERM);
642213241Sdavidxu	}
643144518Sdavidxu
644112958Sjeff	/*
645157194Sdavidxu	 * Check if the running thread is not the owner of the mutex.
646112958Sjeff	 */
647157591Sdavidxu	if (__predict_false(m->m_owner != curthread))
648157591Sdavidxu		return (EPERM);
649157591Sdavidxu
650161681Sdavidxu	id = TID(curthread);
651157591Sdavidxu	if (__predict_false(
652216641Sdavidxu		PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
653157194Sdavidxu		m->m_count > 0)) {
654157194Sdavidxu		m->m_count--;
655144518Sdavidxu	} else {
656216687Sdavidxu		if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
657216641Sdavidxu			defered = 1;
658216641Sdavidxu			m->m_flags &= ~PMUTEX_FLAG_DEFERED;
659216641Sdavidxu        	} else
660216641Sdavidxu                	defered = 0;
661216641Sdavidxu
662216641Sdavidxu		DEQUEUE_MUTEX(curthread, m);
663161681Sdavidxu		_thr_umutex_unlock(&m->m_lock, id);
664112958Sjeff
665216687Sdavidxu		if (curthread->will_sleep == 0 && defered)  {
666216641Sdavidxu			_thr_wake_all(curthread->defer_waiters,
667216641Sdavidxu				curthread->nwaiter_defer);
668216641Sdavidxu			curthread->nwaiter_defer = 0;
669216641Sdavidxu		}
670213257Sdavidxu	}
671216641Sdavidxu	if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
672212077Sdavidxu		THR_CRITICAL_LEAVE(curthread);
673157591Sdavidxu	return (0);
674157591Sdavidxu}
675157591Sdavidxu
676157194Sdavidxuint
677157194Sdavidxu_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
678157194Sdavidxu			      int *prioceiling)
679112958Sjeff{
680213241Sdavidxu	struct pthread_mutex *m;
681157194Sdavidxu	int ret;
682112958Sjeff
683213241Sdavidxu	m = *mutex;
684213241Sdavidxu	if ((m <= THR_MUTEX_DESTROYED) ||
685213241Sdavidxu	    (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
686157194Sdavidxu		ret = EINVAL;
687157457Sdavidxu	else {
688213241Sdavidxu		*prioceiling = m->m_lock.m_ceilings[0];
689157457Sdavidxu		ret = 0;
690157457Sdavidxu	}
691112958Sjeff
692213241Sdavidxu	return (ret);
693112958Sjeff}
694112958Sjeff
695157194Sdavidxuint
696157194Sdavidxu_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
697161681Sdavidxu			      int ceiling, int *old_ceiling)
698112958Sjeff{
699165789Sdavidxu	struct pthread *curthread = _get_curthread();
700165789Sdavidxu	struct pthread_mutex *m, *m1, *m2;
701165789Sdavidxu	int ret;
702112958Sjeff
703165789Sdavidxu	m = *mutex;
704213241Sdavidxu	if ((m <= THR_MUTEX_DESTROYED) ||
705213241Sdavidxu	    (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
706165789Sdavidxu		return (EINVAL);
707165789Sdavidxu
708165789Sdavidxu	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
709165789Sdavidxu	if (ret != 0)
710165789Sdavidxu		return (ret);
711165789Sdavidxu
712165789Sdavidxu	if (m->m_owner == curthread) {
713165789Sdavidxu		MUTEX_ASSERT_IS_OWNED(m);
714165789Sdavidxu		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
715165789Sdavidxu		m2 = TAILQ_NEXT(m, m_qe);
716173803Sdavidxu		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
717173803Sdavidxu		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
718165789Sdavidxu			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
719165789Sdavidxu			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
720173803Sdavidxu				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
721165789Sdavidxu					TAILQ_INSERT_BEFORE(m2, m, m_qe);
722165789Sdavidxu					return (0);
723165789Sdavidxu				}
724165789Sdavidxu			}
725165791Sdavidxu			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
726165789Sdavidxu		}
727165789Sdavidxu	}
728165789Sdavidxu	return (0);
729112958Sjeff}
730174585Sdavidxu
731174585Sdavidxuint
732174585Sdavidxu_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
733174585Sdavidxu{
734213241Sdavidxu	struct pthread_mutex	*m;
735213241Sdavidxu
736213241Sdavidxu	CHECK_AND_INIT_MUTEX
737213241Sdavidxu
738213241Sdavidxu	*count = m->m_spinloops;
739177600Sru	return (0);
740174585Sdavidxu}
741174585Sdavidxu
742174585Sdavidxuint
743174585Sdavidxu__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
744174585Sdavidxu{
745213241Sdavidxu	struct pthread_mutex	*m;
746174585Sdavidxu
747213241Sdavidxu	CHECK_AND_INIT_MUTEX
748213241Sdavidxu
749213241Sdavidxu	m->m_spinloops = count;
750174585Sdavidxu	return (0);
751174585Sdavidxu}
752174585Sdavidxu
753174585Sdavidxuint
754174585Sdavidxu_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
755174585Sdavidxu{
756213241Sdavidxu	struct pthread_mutex	*m;
757213241Sdavidxu
758213241Sdavidxu	CHECK_AND_INIT_MUTEX
759213241Sdavidxu
760213241Sdavidxu	*count = m->m_yieldloops;
761177600Sru	return (0);
762174585Sdavidxu}
763174585Sdavidxu
764174585Sdavidxuint
765174585Sdavidxu__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
766174585Sdavidxu{
767213241Sdavidxu	struct pthread_mutex	*m;
768174585Sdavidxu
769213241Sdavidxu	CHECK_AND_INIT_MUTEX
770213241Sdavidxu
771213241Sdavidxu	m->m_yieldloops = count;
772174585Sdavidxu	return (0);
773174585Sdavidxu}
774175958Sdes
775175958Sdesint
776176049Sdes_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
777175958Sdes{
778213241Sdavidxu	struct pthread_mutex	*m;
779175958Sdes
780213241Sdavidxu	m = *mutex;
781213241Sdavidxu	if (m <= THR_MUTEX_DESTROYED)
782213241Sdavidxu		return (0);
783213241Sdavidxu	return (m->m_owner == _get_curthread());
784175958Sdes}
785216641Sdavidxu
786216641Sdavidxuint
787216641Sdavidxu_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
788216641Sdavidxu{
789216641Sdavidxu	if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
790216641Sdavidxu		if (mp == THR_MUTEX_DESTROYED)
791216641Sdavidxu			return (EINVAL);
792216641Sdavidxu		return (EPERM);
793216641Sdavidxu	}
794216641Sdavidxu      	if (mp->m_owner != curthread)
795216641Sdavidxu		return (EPERM);
796216641Sdavidxu	return (0);
797216641Sdavidxu}
798