thr_mutex.c revision 113658
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 113658 2003-04-18 05:04:16Z deischen $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67/*
68 * Prototypes
69 */
70static void		mutex_handoff(struct pthread *, struct pthread_mutex *);
71static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
72static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
73static int		mutex_unlock_common(pthread_mutex_t *, int);
74static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
75static void		mutex_rescan_owned (struct pthread *, struct pthread *,
76			    struct pthread_mutex *);
77static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
78static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
79static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
80
81
82static struct pthread_mutex_attr	static_mutex_attr =
83    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
84static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
85
86/* Single underscore versions provided for libc internal usage: */
87__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
88__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
89
90/* No difference between libc and application usage of these: */
91__weak_reference(_pthread_mutex_init, pthread_mutex_init);
92__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
93__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
94
95
96
97int
98_pthread_mutex_init(pthread_mutex_t *mutex,
99    const pthread_mutexattr_t *mutex_attr)
100{
101	struct pthread_mutex *pmutex;
102	enum pthread_mutextype type;
103	int		protocol;
104	int		ceiling;
105	int		flags;
106	int		ret = 0;
107
108	if (mutex == NULL)
109		ret = EINVAL;
110
111	/* Check if default mutex attributes: */
112	else if (mutex_attr == NULL || *mutex_attr == NULL) {
113		/* Default to a (error checking) POSIX mutex: */
114		type = PTHREAD_MUTEX_ERRORCHECK;
115		protocol = PTHREAD_PRIO_NONE;
116		ceiling = THR_MAX_PRIORITY;
117		flags = 0;
118	}
119
120	/* Check mutex type: */
121	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
122	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
123		/* Return an invalid argument error: */
124		ret = EINVAL;
125
126	/* Check mutex protocol: */
127	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
128	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
129		/* Return an invalid argument error: */
130		ret = EINVAL;
131
132	else {
133		/* Use the requested mutex type and protocol: */
134		type = (*mutex_attr)->m_type;
135		protocol = (*mutex_attr)->m_protocol;
136		ceiling = (*mutex_attr)->m_ceiling;
137		flags = (*mutex_attr)->m_flags;
138	}
139
140	/* Check no errors so far: */
141	if (ret == 0) {
142		if ((pmutex = (pthread_mutex_t)
143		    malloc(sizeof(struct pthread_mutex))) == NULL)
144			ret = ENOMEM;
145		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
146		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
147			free(pmutex);
148			*mutex = NULL;
149			ret = ENOMEM;
150		} else {
151			/* Set the mutex flags: */
152			pmutex->m_flags = flags;
153
154			/* Process according to mutex type: */
155			switch (type) {
156			/* case PTHREAD_MUTEX_DEFAULT: */
157			case PTHREAD_MUTEX_ERRORCHECK:
158			case PTHREAD_MUTEX_NORMAL:
159				/* Nothing to do here. */
160				break;
161
162			/* Single UNIX Spec 2 recursive mutex: */
163			case PTHREAD_MUTEX_RECURSIVE:
164				/* Reset the mutex count: */
165				pmutex->m_count = 0;
166				break;
167
168			/* Trap invalid mutex types: */
169			default:
170				/* Return an invalid argument error: */
171				ret = EINVAL;
172				break;
173			}
174			if (ret == 0) {
175				/* Initialise the rest of the mutex: */
176				TAILQ_INIT(&pmutex->m_queue);
177				pmutex->m_flags |= MUTEX_FLAGS_INITED;
178				pmutex->m_owner = NULL;
179				pmutex->m_type = type;
180				pmutex->m_protocol = protocol;
181				pmutex->m_refcount = 0;
182				if (protocol == PTHREAD_PRIO_PROTECT)
183					pmutex->m_prio = ceiling;
184				else
185					pmutex->m_prio = -1;
186				pmutex->m_saved_prio = 0;
187				MUTEX_INIT_LINK(pmutex);
188				*mutex = pmutex;
189			} else {
190				free(pmutex);
191				*mutex = NULL;
192			}
193		}
194	}
195	/* Return the completion status: */
196	return (ret);
197}
198
199int
200_pthread_mutex_destroy(pthread_mutex_t *mutex)
201{
202	struct pthread	*curthread = _get_curthread();
203	pthread_mutex_t m;
204	int ret = 0;
205
206	if (mutex == NULL || *mutex == NULL)
207		ret = EINVAL;
208	else {
209		/* Lock the mutex structure: */
210		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
211
212		/*
213		 * Check to see if this mutex is in use:
214		 */
215		if (((*mutex)->m_owner != NULL) ||
216		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
217		    ((*mutex)->m_refcount != 0)) {
218			ret = EBUSY;
219
220			/* Unlock the mutex structure: */
221			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
222		} else {
223			/*
224			 * Save a pointer to the mutex so it can be free'd
225			 * and set the caller's pointer to NULL:
226			 */
227			m = *mutex;
228			*mutex = NULL;
229
230			/* Unlock the mutex structure: */
231			THR_LOCK_RELEASE(curthread, &m->m_lock);
232
233			/*
234			 * Free the memory allocated for the mutex
235			 * structure:
236			 */
237			MUTEX_ASSERT_NOT_OWNED(m);
238			free(m);
239		}
240	}
241
242	/* Return the completion status: */
243	return (ret);
244}
245
246static int
247init_static(struct pthread *thread, pthread_mutex_t *mutex)
248{
249	int ret;
250
251	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
252
253	if (*mutex == NULL)
254		ret = pthread_mutex_init(mutex, NULL);
255	else
256		ret = 0;
257
258	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
259
260	return (ret);
261}
262
263static int
264init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
265{
266	int ret;
267
268	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
269
270	if (*mutex == NULL)
271		ret = pthread_mutex_init(mutex, &static_mattr);
272	else
273		ret = 0;
274
275	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
276
277	return (ret);
278}
279
280static int
281mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
282{
283	int ret = 0;
284
285	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
286	    "Uninitialized mutex in pthread_mutex_trylock_basic");
287
288	/* Lock the mutex structure: */
289	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
290
291	/*
292	 * If the mutex was statically allocated, properly
293	 * initialize the tail queue.
294	 */
295	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
296		TAILQ_INIT(&(*mutex)->m_queue);
297		MUTEX_INIT_LINK(*mutex);
298		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
299	}
300
301	/* Process according to mutex type: */
302	switch ((*mutex)->m_protocol) {
303	/* Default POSIX mutex: */
304	case PTHREAD_PRIO_NONE:
305		/* Check if this mutex is not locked: */
306		if ((*mutex)->m_owner == NULL) {
307			/* Lock the mutex for the running thread: */
308			(*mutex)->m_owner = curthread;
309
310			/* Add to the list of owned mutexes: */
311			MUTEX_ASSERT_NOT_OWNED(*mutex);
312			TAILQ_INSERT_TAIL(&curthread->mutexq,
313			    (*mutex), m_qe);
314		} else if ((*mutex)->m_owner == curthread)
315			ret = mutex_self_trylock(curthread, *mutex);
316		else
317			/* Return a busy error: */
318			ret = EBUSY;
319		break;
320
321	/* POSIX priority inheritence mutex: */
322	case PTHREAD_PRIO_INHERIT:
323		/* Check if this mutex is not locked: */
324		if ((*mutex)->m_owner == NULL) {
325			/* Lock the mutex for the running thread: */
326			(*mutex)->m_owner = curthread;
327
328			/* Track number of priority mutexes owned: */
329			curthread->priority_mutex_count++;
330
331			/*
332			 * The mutex takes on the attributes of the
333			 * running thread when there are no waiters.
334			 */
335			(*mutex)->m_prio = curthread->active_priority;
336			(*mutex)->m_saved_prio =
337			    curthread->inherited_priority;
338
339			/* Add to the list of owned mutexes: */
340			MUTEX_ASSERT_NOT_OWNED(*mutex);
341			TAILQ_INSERT_TAIL(&curthread->mutexq,
342			    (*mutex), m_qe);
343		} else if ((*mutex)->m_owner == curthread)
344			ret = mutex_self_trylock(curthread, *mutex);
345		else
346			/* Return a busy error: */
347			ret = EBUSY;
348		break;
349
350	/* POSIX priority protection mutex: */
351	case PTHREAD_PRIO_PROTECT:
352		/* Check for a priority ceiling violation: */
353		if (curthread->active_priority > (*mutex)->m_prio)
354			ret = EINVAL;
355
356		/* Check if this mutex is not locked: */
357		else if ((*mutex)->m_owner == NULL) {
358			/* Lock the mutex for the running thread: */
359			(*mutex)->m_owner = curthread;
360
361			/* Track number of priority mutexes owned: */
362			curthread->priority_mutex_count++;
363
364			/*
365			 * The running thread inherits the ceiling
366			 * priority of the mutex and executes at that
367			 * priority.
368			 */
369			curthread->active_priority = (*mutex)->m_prio;
370			(*mutex)->m_saved_prio =
371			    curthread->inherited_priority;
372			curthread->inherited_priority =
373			    (*mutex)->m_prio;
374
375			/* Add to the list of owned mutexes: */
376			MUTEX_ASSERT_NOT_OWNED(*mutex);
377			TAILQ_INSERT_TAIL(&curthread->mutexq,
378			    (*mutex), m_qe);
379		} else if ((*mutex)->m_owner == curthread)
380			ret = mutex_self_trylock(curthread, *mutex);
381		else
382			/* Return a busy error: */
383			ret = EBUSY;
384		break;
385
386	/* Trap invalid mutex types: */
387	default:
388		/* Return an invalid argument error: */
389		ret = EINVAL;
390		break;
391	}
392
393	/* Unlock the mutex structure: */
394	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
395
396	/* Return the completion status: */
397	return (ret);
398}
399
400int
401__pthread_mutex_trylock(pthread_mutex_t *mutex)
402{
403	struct pthread *curthread = _get_curthread();
404	int ret = 0;
405
406	if (mutex == NULL)
407		ret = EINVAL;
408
409	/*
410	 * If the mutex is statically initialized, perform the dynamic
411	 * initialization:
412	 */
413	else if ((*mutex != NULL) ||
414	    ((ret = init_static(curthread, mutex)) == 0))
415		ret = mutex_trylock_common(curthread, mutex);
416
417	return (ret);
418}
419
420int
421_pthread_mutex_trylock(pthread_mutex_t *mutex)
422{
423	struct pthread	*curthread = _get_curthread();
424	int	ret = 0;
425
426	if (mutex == NULL)
427		ret = EINVAL;
428
429	/*
430	 * If the mutex is statically initialized, perform the dynamic
431	 * initialization marking the mutex private (delete safe):
432	 */
433	else if ((*mutex != NULL) ||
434	    ((ret = init_static_private(curthread, mutex)) == 0))
435		ret = mutex_trylock_common(curthread, mutex);
436
437	return (ret);
438}
439
440static int
441mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
442{
443	int	ret = 0;
444
445	THR_ASSERT((m != NULL) && (*m != NULL),
446	    "Uninitialized mutex in pthread_mutex_trylock_basic");
447
448	/* Reset the interrupted flag: */
449	curthread->interrupted = 0;
450
451	/*
452	 * Enter a loop waiting to become the mutex owner.  We need a
453	 * loop in case the waiting thread is interrupted by a signal
454	 * to execute a signal handler.  It is not (currently) possible
455	 * to remain in the waiting queue while running a handler.
456	 * Instead, the thread is interrupted and backed out of the
457	 * waiting queue prior to executing the signal handler.
458	 */
459	do {
460		/* Lock the mutex structure: */
461		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
462
463		/*
464		 * If the mutex was statically allocated, properly
465		 * initialize the tail queue.
466		 */
467		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
468			TAILQ_INIT(&(*m)->m_queue);
469			(*m)->m_flags |= MUTEX_FLAGS_INITED;
470			MUTEX_INIT_LINK(*m);
471		}
472
473		/* Process according to mutex type: */
474		switch ((*m)->m_protocol) {
475		/* Default POSIX mutex: */
476		case PTHREAD_PRIO_NONE:
477			if ((*m)->m_owner == NULL) {
478				/* Lock the mutex for this thread: */
479				(*m)->m_owner = curthread;
480
481				/* Add to the list of owned mutexes: */
482				MUTEX_ASSERT_NOT_OWNED(*m);
483				TAILQ_INSERT_TAIL(&curthread->mutexq,
484				    (*m), m_qe);
485
486				/* Unlock the mutex structure: */
487				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
488			} else if ((*m)->m_owner == curthread) {
489				ret = mutex_self_lock(curthread, *m);
490
491				/* Unlock the mutex structure: */
492				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
493			} else {
494				/*
495				 * Join the queue of threads waiting to lock
496				 * the mutex and save a pointer to the mutex.
497				 */
498				mutex_queue_enq(*m, curthread);
499				curthread->data.mutex = *m;
500
501				/*
502				 * This thread is active and is in a critical
503				 * region (holding the mutex lock); we should
504				 * be able to safely set the state.
505				 */
506				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
507
508				/* Unlock the mutex structure: */
509				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
510
511				/* Schedule the next thread: */
512				_thr_sched_switch(curthread);
513			}
514			break;
515
516		/* POSIX priority inheritence mutex: */
517		case PTHREAD_PRIO_INHERIT:
518			/* Check if this mutex is not locked: */
519			if ((*m)->m_owner == NULL) {
520				/* Lock the mutex for this thread: */
521				(*m)->m_owner = curthread;
522
523				/* Track number of priority mutexes owned: */
524				curthread->priority_mutex_count++;
525
526				/*
527				 * The mutex takes on attributes of the
528				 * running thread when there are no waiters.
529				 * Make sure the thread's scheduling lock is
530				 * held while priorities are adjusted.
531				 */
532				THR_SCHED_LOCK(curthread, curthread);
533				(*m)->m_prio = curthread->active_priority;
534				(*m)->m_saved_prio =
535				    curthread->inherited_priority;
536				curthread->inherited_priority = (*m)->m_prio;
537				THR_SCHED_UNLOCK(curthread, curthread);
538
539				/* Add to the list of owned mutexes: */
540				MUTEX_ASSERT_NOT_OWNED(*m);
541				TAILQ_INSERT_TAIL(&curthread->mutexq,
542				    (*m), m_qe);
543
544				/* Unlock the mutex structure: */
545				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
546			} else if ((*m)->m_owner == curthread) {
547				ret = mutex_self_lock(curthread, *m);
548
549				/* Unlock the mutex structure: */
550				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
551			} else {
552				/*
553				 * Join the queue of threads waiting to lock
554				 * the mutex and save a pointer to the mutex.
555				 */
556				mutex_queue_enq(*m, curthread);
557				curthread->data.mutex = *m;
558
559				/*
560				 * This thread is active and is in a critical
561				 * region (holding the mutex lock); we should
562				 * be able to safely set the state.
563				 */
564				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
565
566				if (curthread->active_priority > (*m)->m_prio)
567					/* Adjust priorities: */
568					mutex_priority_adjust(curthread, *m);
569
570				/* Unlock the mutex structure: */
571				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
572
573				/* Schedule the next thread: */
574				_thr_sched_switch(curthread);
575			}
576			break;
577
578		/* POSIX priority protection mutex: */
579		case PTHREAD_PRIO_PROTECT:
580			/* Check for a priority ceiling violation: */
581			if (curthread->active_priority > (*m)->m_prio) {
582				/* Unlock the mutex structure: */
583				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
584				ret = EINVAL;
585			}
586			/* Check if this mutex is not locked: */
587			else if ((*m)->m_owner == NULL) {
588				/*
589				 * Lock the mutex for the running
590				 * thread:
591				 */
592				(*m)->m_owner = curthread;
593
594				/* Track number of priority mutexes owned: */
595				curthread->priority_mutex_count++;
596
597				/*
598				 * The running thread inherits the ceiling
599				 * priority of the mutex and executes at that
600				 * priority.  Make sure the thread's
601				 * scheduling lock is held while priorities
602				 * are adjusted.
603				 */
604				THR_SCHED_LOCK(curthread, curthread);
605				curthread->active_priority = (*m)->m_prio;
606				(*m)->m_saved_prio =
607				    curthread->inherited_priority;
608				curthread->inherited_priority = (*m)->m_prio;
609				THR_SCHED_UNLOCK(curthread, curthread);
610
611				/* Add to the list of owned mutexes: */
612				MUTEX_ASSERT_NOT_OWNED(*m);
613				TAILQ_INSERT_TAIL(&curthread->mutexq,
614				    (*m), m_qe);
615
616				/* Unlock the mutex structure: */
617				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
618			} else if ((*m)->m_owner == curthread) {
619				ret = mutex_self_lock(curthread, *m);
620
621				/* Unlock the mutex structure: */
622				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
623			} else {
624				/*
625				 * Join the queue of threads waiting to lock
626				 * the mutex and save a pointer to the mutex.
627				 */
628				mutex_queue_enq(*m, curthread);
629				curthread->data.mutex = *m;
630
631				/* Clear any previous error: */
632				curthread->error = 0;
633
634				/*
635				 * This thread is active and is in a critical
636				 * region (holding the mutex lock); we should
637				 * be able to safely set the state.
638				 */
639				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
640
641				/* Unlock the mutex structure: */
642				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
643
644				/* Schedule the next thread: */
645				_thr_sched_switch(curthread);
646
647				/*
648				 * The threads priority may have changed while
649				 * waiting for the mutex causing a ceiling
650				 * violation.
651				 */
652				ret = curthread->error;
653				curthread->error = 0;
654			}
655			break;
656
657		/* Trap invalid mutex types: */
658		default:
659			/* Unlock the mutex structure: */
660			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
661
662			/* Return an invalid argument error: */
663			ret = EINVAL;
664			break;
665		}
666
667	} while (((*m)->m_owner != curthread) && (ret == 0) &&
668	    (curthread->interrupted == 0));
669
670	/*
671	 * Check to see if this thread was interrupted and
672	 * is still in the mutex queue of waiting threads:
673	 */
674	if (curthread->interrupted != 0) {
675		/* Remove this thread from the mutex queue. */
676		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
677		if (THR_IN_SYNCQ(curthread))
678			mutex_queue_remove(*m, curthread);
679		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
680
681		/* Check for asynchronous cancellation. */
682		if (curthread->continuation != NULL)
683			curthread->continuation((void *) curthread);
684	}
685
686	/* Return the completion status: */
687	return (ret);
688}
689
690int
691__pthread_mutex_lock(pthread_mutex_t *m)
692{
693	struct pthread *curthread;
694	int	ret = 0;
695
696	if (_thr_initial == NULL)
697		_libpthread_init(NULL);
698
699	curthread = _get_curthread();
700	if (m == NULL)
701		ret = EINVAL;
702
703	/*
704	 * If the mutex is statically initialized, perform the dynamic
705	 * initialization:
706	 */
707	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
708		ret = mutex_lock_common(curthread, m);
709
710	return (ret);
711}
712
713int
714_pthread_mutex_lock(pthread_mutex_t *m)
715{
716	struct pthread *curthread;
717	int	ret = 0;
718
719	if (_thr_initial == NULL)
720		_libpthread_init(NULL);
721	curthread = _get_curthread();
722
723	if (m == NULL)
724		ret = EINVAL;
725
726	/*
727	 * If the mutex is statically initialized, perform the dynamic
728	 * initialization marking it private (delete safe):
729	 */
730	else if ((*m != NULL) ||
731	    ((ret = init_static_private(curthread, m)) == 0))
732		ret = mutex_lock_common(curthread, m);
733
734	return (ret);
735}
736
737int
738_pthread_mutex_unlock(pthread_mutex_t *m)
739{
740	return (mutex_unlock_common(m, /* add reference */ 0));
741}
742
743int
744_mutex_cv_unlock(pthread_mutex_t *m)
745{
746	return (mutex_unlock_common(m, /* add reference */ 1));
747}
748
749int
750_mutex_cv_lock(pthread_mutex_t *m)
751{
752	int	ret;
753	if ((ret = _pthread_mutex_lock(m)) == 0)
754		(*m)->m_refcount--;
755	return (ret);
756}
757
758static inline int
759mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
760{
761	int	ret = 0;
762
763	switch (m->m_type) {
764	/* case PTHREAD_MUTEX_DEFAULT: */
765	case PTHREAD_MUTEX_ERRORCHECK:
766	case PTHREAD_MUTEX_NORMAL:
767		/*
768		 * POSIX specifies that mutexes should return EDEADLK if a
769		 * recursive lock is detected.
770		 */
771		if (m->m_owner == curthread)
772			ret = EDEADLK;
773		else
774			ret = EBUSY;
775		break;
776
777	case PTHREAD_MUTEX_RECURSIVE:
778		/* Increment the lock count: */
779		m->m_count++;
780		break;
781
782	default:
783		/* Trap invalid mutex types; */
784		ret = EINVAL;
785	}
786
787	return (ret);
788}
789
790static inline int
791mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
792{
793	int ret = 0;
794
795	switch (m->m_type) {
796	/* case PTHREAD_MUTEX_DEFAULT: */
797	case PTHREAD_MUTEX_ERRORCHECK:
798		/*
799		 * POSIX specifies that mutexes should return EDEADLK if a
800		 * recursive lock is detected.
801		 */
802		ret = EDEADLK;
803		break;
804
805	case PTHREAD_MUTEX_NORMAL:
806		/*
807		 * What SS2 define as a 'normal' mutex.  Intentionally
808		 * deadlock on attempts to get a lock you already own.
809		 */
810		THR_SET_STATE(curthread, PS_DEADLOCK);
811
812		/* Unlock the mutex structure: */
813		THR_LOCK_RELEASE(curthread, &m->m_lock);
814
815		/* Schedule the next thread: */
816		_thr_sched_switch(curthread);
817		break;
818
819	case PTHREAD_MUTEX_RECURSIVE:
820		/* Increment the lock count: */
821		m->m_count++;
822		break;
823
824	default:
825		/* Trap invalid mutex types; */
826		ret = EINVAL;
827	}
828
829	return (ret);
830}
831
832static int
833mutex_unlock_common(pthread_mutex_t *m, int add_reference)
834{
835	struct pthread	*curthread = _get_curthread();
836	int	ret = 0;
837
838	if (m == NULL || *m == NULL)
839		ret = EINVAL;
840	else {
841		/* Lock the mutex structure: */
842		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
843
844		/* Process according to mutex type: */
845		switch ((*m)->m_protocol) {
846		/* Default POSIX mutex: */
847		case PTHREAD_PRIO_NONE:
848			/*
849			 * Check if the running thread is not the owner of the
850			 * mutex:
851			 */
852			if ((*m)->m_owner != curthread)
853				/*
854				 * Return an invalid argument error for no
855				 * owner and a permission error otherwise:
856				 */
857				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
858
859			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
860			    ((*m)->m_count > 0))
861				/* Decrement the count: */
862				(*m)->m_count--;
863			else {
864				/*
865				 * Clear the count in case this is a recursive
866				 * mutex.
867				 */
868				(*m)->m_count = 0;
869
870				/* Remove the mutex from the threads queue. */
871				MUTEX_ASSERT_IS_OWNED(*m);
872				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
873				    (*m), m_qe);
874				MUTEX_INIT_LINK(*m);
875
876				/*
877				 * Hand off the mutex to the next waiting
878				 * thread:
879				 */
880				mutex_handoff(curthread, *m);
881			}
882			break;
883
884		/* POSIX priority inheritence mutex: */
885		case PTHREAD_PRIO_INHERIT:
886			/*
887			 * Check if the running thread is not the owner of the
888			 * mutex:
889			 */
890			if ((*m)->m_owner != curthread)
891				/*
892				 * Return an invalid argument error for no
893				 * owner and a permission error otherwise:
894				 */
895				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
896
897			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
898			    ((*m)->m_count > 0))
899				/* Decrement the count: */
900				(*m)->m_count--;
901			else {
902				/*
903				 * Clear the count in case this is recursive
904				 * mutex.
905				 */
906				(*m)->m_count = 0;
907
908				/*
909				 * Restore the threads inherited priority and
910				 * recompute the active priority (being careful
911				 * not to override changes in the threads base
912				 * priority subsequent to locking the mutex).
913				 */
914				THR_SCHED_LOCK(curthread, curthread);
915				curthread->inherited_priority =
916					(*m)->m_saved_prio;
917				curthread->active_priority =
918				    MAX(curthread->inherited_priority,
919				    curthread->base_priority);
920				THR_SCHED_UNLOCK(curthread, curthread);
921
922				/*
923				 * This thread now owns one less priority mutex.
924				 */
925				curthread->priority_mutex_count--;
926
927				/* Remove the mutex from the threads queue. */
928				MUTEX_ASSERT_IS_OWNED(*m);
929				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
930				    (*m), m_qe);
931				MUTEX_INIT_LINK(*m);
932
933				/*
934				 * Hand off the mutex to the next waiting
935				 * thread:
936				 */
937				mutex_handoff(curthread, *m);
938			}
939			break;
940
941		/* POSIX priority ceiling mutex: */
942		case PTHREAD_PRIO_PROTECT:
943			/*
944			 * Check if the running thread is not the owner of the
945			 * mutex:
946			 */
947			if ((*m)->m_owner != curthread)
948				/*
949				 * Return an invalid argument error for no
950				 * owner and a permission error otherwise:
951				 */
952				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
953
954			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
955			    ((*m)->m_count > 0))
956				/* Decrement the count: */
957				(*m)->m_count--;
958			else {
959				/*
960				 * Clear the count in case this is a recursive
961				 * mutex.
962				 */
963				(*m)->m_count = 0;
964
965				/*
966				 * Restore the threads inherited priority and
967				 * recompute the active priority (being careful
968				 * not to override changes in the threads base
969				 * priority subsequent to locking the mutex).
970				 */
971				THR_SCHED_LOCK(curthread, curthread);
972				curthread->inherited_priority =
973				    (*m)->m_saved_prio;
974				curthread->active_priority =
975				    MAX(curthread->inherited_priority,
976				    curthread->base_priority);
977				THR_SCHED_UNLOCK(curthread, curthread);
978
979				/*
980				 * This thread now owns one less priority mutex.
981				 */
982				curthread->priority_mutex_count--;
983
984				/* Remove the mutex from the threads queue. */
985				MUTEX_ASSERT_IS_OWNED(*m);
986				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
987				    (*m), m_qe);
988				MUTEX_INIT_LINK(*m);
989
990				/*
991				 * Hand off the mutex to the next waiting
992				 * thread:
993				 */
994				mutex_handoff(curthread, *m);
995			}
996			break;
997
998		/* Trap invalid mutex types: */
999		default:
1000			/* Return an invalid argument error: */
1001			ret = EINVAL;
1002			break;
1003		}
1004
1005		if ((ret == 0) && (add_reference != 0))
1006			/* Increment the reference count: */
1007			(*m)->m_refcount++;
1008
1009		/* Unlock the mutex structure: */
1010		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1011	}
1012
1013	/* Return the completion status: */
1014	return (ret);
1015}
1016
1017
1018/*
1019 * This function is called when a change in base priority occurs for
1020 * a thread that is holding or waiting for a priority protection or
1021 * inheritence mutex.  A change in a threads base priority can effect
1022 * changes to active priorities of other threads and to the ordering
1023 * of mutex locking by waiting threads.
1024 *
1025 * This must be called without the target thread's scheduling lock held.
1026 */
1027void
1028_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1029    int propagate_prio)
1030{
1031	struct pthread_mutex *m;
1032
1033	/* Adjust the priorites of any owned priority mutexes: */
1034	if (pthread->priority_mutex_count > 0) {
1035		/*
1036		 * Rescan the mutexes owned by this thread and correct
1037		 * their priorities to account for this threads change
1038		 * in priority.  This has the side effect of changing
1039		 * the threads active priority.
1040		 *
1041		 * Be sure to lock the first mutex in the list of owned
1042		 * mutexes.  This acts as a barrier against another
1043		 * simultaneous call to change the threads priority
1044		 * and from the owning thread releasing the mutex.
1045		 */
1046		m = TAILQ_FIRST(&pthread->mutexq);
1047		if (m != NULL) {
1048			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1049			/*
1050			 * Make sure the thread still owns the lock.
1051			 */
1052			if (m == TAILQ_FIRST(&pthread->mutexq))
1053				mutex_rescan_owned(curthread, pthread,
1054				    /* rescan all owned */ NULL);
1055			THR_LOCK_RELEASE(curthread, &m->m_lock);
1056		}
1057	}
1058
1059	/*
1060	 * If this thread is waiting on a priority inheritence mutex,
1061	 * check for priority adjustments.  A change in priority can
1062	 * also cause a ceiling violation(*) for a thread waiting on
1063	 * a priority protection mutex; we don't perform the check here
1064	 * as it is done in pthread_mutex_unlock.
1065	 *
1066	 * (*) It should be noted that a priority change to a thread
1067	 *     _after_ taking and owning a priority ceiling mutex
1068	 *     does not affect ownership of that mutex; the ceiling
1069	 *     priority is only checked before mutex ownership occurs.
1070	 */
1071	if (propagate_prio != 0) {
1072		/*
1073		 * Lock the thread's scheduling queue.  This is a bit
1074		 * convoluted; the "in synchronization queue flag" can
1075		 * only be cleared with both the thread's scheduling and
1076		 * mutex locks held.  The thread's pointer to the wanted
1077		 * mutex is guaranteed to be valid during this time.
1078		 */
1079		THR_SCHED_LOCK(curthread, pthread);
1080
1081		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1082		    ((m = pthread->data.mutex) == NULL))
1083			THR_SCHED_UNLOCK(curthread, pthread);
1084		else {
1085			/*
1086			 * This thread is currently waiting on a mutex; unlock
1087			 * the scheduling queue lock and lock the mutex.  We
1088			 * can't hold both at the same time because the locking
1089			 * order could cause a deadlock.
1090			 */
1091			THR_SCHED_UNLOCK(curthread, pthread);
1092			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1093
1094			/*
1095			 * Check to make sure this thread is still in the
1096			 * same state (the lock above can yield the CPU to
1097			 * another thread or the thread may be running on
1098			 * another CPU).
1099			 */
1100			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1101			    (pthread->data.mutex == m)) {
1102				/*
1103				 * Remove and reinsert this thread into
1104				 * the list of waiting threads to preserve
1105				 * decreasing priority order.
1106				 */
1107				mutex_queue_remove(m, pthread);
1108				mutex_queue_enq(m, pthread);
1109
1110				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1111					/* Adjust priorities: */
1112					mutex_priority_adjust(curthread, m);
1113			}
1114
1115			/* Unlock the mutex structure: */
1116			THR_LOCK_RELEASE(curthread, &m->m_lock);
1117		}
1118	}
1119}
1120
1121/*
1122 * Called when a new thread is added to the mutex waiting queue or
1123 * when a threads priority changes that is already in the mutex
1124 * waiting queue.
1125 *
1126 * This must be called with the mutex locked by the current thread.
1127 */
1128static void
1129mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1130{
1131	pthread_mutex_t	m = mutex;
1132	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1133	int		done, temp_prio;
1134
1135	/*
1136	 * Calculate the mutex priority as the maximum of the highest
1137	 * active priority of any waiting threads and the owning threads
1138	 * active priority(*).
1139	 *
1140	 * (*) Because the owning threads current active priority may
1141	 *     reflect priority inherited from this mutex (and the mutex
1142	 *     priority may have changed) we must recalculate the active
1143	 *     priority based on the threads saved inherited priority
1144	 *     and its base priority.
1145	 */
1146	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1147	temp_prio = MAX(pthread_next->active_priority,
1148	    MAX(m->m_saved_prio, pthread->base_priority));
1149
1150	/* See if this mutex really needs adjusting: */
1151	if (temp_prio == m->m_prio)
1152		/* No need to propagate the priority: */
1153		return;
1154
1155	/* Set new priority of the mutex: */
1156	m->m_prio = temp_prio;
1157
1158	/*
1159	 * Don't unlock the mutex passed in as an argument.  It is
1160	 * expected to be locked and unlocked by the caller.
1161	 */
1162	done = 1;
1163	do {
1164		/*
1165		 * Save the threads priority before rescanning the
1166		 * owned mutexes:
1167		 */
1168		temp_prio = pthread->active_priority;
1169
1170		/*
1171		 * Fix the priorities for all mutexes held by the owning
1172		 * thread since taking this mutex.  This also has a
1173		 * potential side-effect of changing the threads priority.
1174		 *
1175		 * At this point the mutex is locked by the current thread.
1176		 * The owning thread can't release the mutex until it is
1177		 * unlocked, so we should be able to safely walk its list
1178		 * of owned mutexes.
1179		 */
1180		mutex_rescan_owned(curthread, pthread, m);
1181
1182		/*
1183		 * If this isn't the first time through the loop,
1184		 * the current mutex needs to be unlocked.
1185		 */
1186		if (done == 0)
1187			THR_LOCK_RELEASE(curthread, &m->m_lock);
1188
1189		/* Assume we're done unless told otherwise: */
1190		done = 1;
1191
1192		/*
1193		 * If the thread is currently waiting on a mutex, check
1194		 * to see if the threads new priority has affected the
1195		 * priority of the mutex.
1196		 */
1197		if ((temp_prio != pthread->active_priority) &&
1198		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1199		    ((m = pthread->data.mutex) != NULL) &&
1200		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1201			/* Lock the mutex structure: */
1202			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1203
1204			/*
1205			 * Make sure the thread is still waiting on the
1206			 * mutex:
1207			 */
1208			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1209			    (m == pthread->data.mutex)) {
1210				/*
1211				 * The priority for this thread has changed.
1212				 * Remove and reinsert this thread into the
1213				 * list of waiting threads to preserve
1214				 * decreasing priority order.
1215				 */
1216				mutex_queue_remove(m, pthread);
1217				mutex_queue_enq(m, pthread);
1218
1219				/*
1220				 * Grab the waiting thread with highest
1221				 * priority:
1222				 */
1223				pthread_next = TAILQ_FIRST(&m->m_queue);
1224
1225				/*
1226				 * Calculate the mutex priority as the maximum
1227				 * of the highest active priority of any
1228				 * waiting threads and the owning threads
1229				 * active priority.
1230				 */
1231				temp_prio = MAX(pthread_next->active_priority,
1232				    MAX(m->m_saved_prio,
1233				    m->m_owner->base_priority));
1234
1235				if (temp_prio != m->m_prio) {
1236					/*
1237					 * The priority needs to be propagated
1238					 * to the mutex this thread is waiting
1239					 * on and up to the owner of that mutex.
1240					 */
1241					m->m_prio = temp_prio;
1242					pthread = m->m_owner;
1243
1244					/* We're not done yet: */
1245					done = 0;
1246				}
1247			}
1248			/* Only release the mutex if we're done: */
1249			if (done != 0)
1250				THR_LOCK_RELEASE(curthread, &m->m_lock);
1251		}
1252	} while (done == 0);
1253}
1254
1255static void
1256mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1257    struct pthread_mutex *mutex)
1258{
1259	struct pthread_mutex	*m;
1260	struct pthread		*pthread_next;
1261	int			active_prio, inherited_prio;
1262
1263	/*
1264	 * Start walking the mutexes the thread has taken since
1265	 * taking this mutex.
1266	 */
1267	if (mutex == NULL) {
1268		/*
1269		 * A null mutex means start at the beginning of the owned
1270		 * mutex list.
1271		 */
1272		m = TAILQ_FIRST(&pthread->mutexq);
1273
1274		/* There is no inherited priority yet. */
1275		inherited_prio = 0;
1276	} else {
1277		/*
1278		 * The caller wants to start after a specific mutex.  It
1279		 * is assumed that this mutex is a priority inheritence
1280		 * mutex and that its priority has been correctly
1281		 * calculated.
1282		 */
1283		m = TAILQ_NEXT(mutex, m_qe);
1284
1285		/* Start inheriting priority from the specified mutex. */
1286		inherited_prio = mutex->m_prio;
1287	}
1288	active_prio = MAX(inherited_prio, pthread->base_priority);
1289
1290	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1291		/*
1292		 * We only want to deal with priority inheritence
1293		 * mutexes.  This might be optimized by only placing
1294		 * priority inheritence mutexes into the owned mutex
1295		 * list, but it may prove to be useful having all
1296		 * owned mutexes in this list.  Consider a thread
1297		 * exiting while holding mutexes...
1298		 */
1299		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1300			/*
1301			 * Fix the owners saved (inherited) priority to
1302			 * reflect the priority of the previous mutex.
1303			 */
1304			m->m_saved_prio = inherited_prio;
1305
1306			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1307				/* Recalculate the priority of the mutex: */
1308				m->m_prio = MAX(active_prio,
1309				     pthread_next->active_priority);
1310			else
1311				m->m_prio = active_prio;
1312
1313			/* Recalculate new inherited and active priorities: */
1314			inherited_prio = m->m_prio;
1315			active_prio = MAX(m->m_prio, pthread->base_priority);
1316		}
1317	}
1318
1319	/*
1320	 * Fix the threads inherited priority and recalculate its
1321	 * active priority.
1322	 */
1323	pthread->inherited_priority = inherited_prio;
1324	active_prio = MAX(inherited_prio, pthread->base_priority);
1325
1326	if (active_prio != pthread->active_priority) {
1327		/* Lock the thread's scheduling queue: */
1328		THR_SCHED_LOCK(curthread, pthread);
1329
1330		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1331			/*
1332			 * This thread is not in a run queue.  Just set
1333			 * its active priority.
1334			 */
1335			pthread->active_priority = active_prio;
1336		}
1337		else {
1338			/*
1339			 * This thread is in a run queue.  Remove it from
1340			 * the queue before changing its priority:
1341			 */
1342			THR_RUNQ_REMOVE(pthread);
1343
1344			/*
1345			 * POSIX states that if the priority is being
1346			 * lowered, the thread must be inserted at the
1347			 * head of the queue for its priority if it owns
1348			 * any priority protection or inheritence mutexes.
1349			 */
1350			if ((active_prio < pthread->active_priority) &&
1351			    (pthread->priority_mutex_count > 0)) {
1352				/* Set the new active priority. */
1353				pthread->active_priority = active_prio;
1354
1355				THR_RUNQ_INSERT_HEAD(pthread);
1356			} else {
1357				/* Set the new active priority. */
1358				pthread->active_priority = active_prio;
1359
1360				THR_RUNQ_INSERT_TAIL(pthread);
1361			}
1362		}
1363		THR_SCHED_UNLOCK(curthread, pthread);
1364	}
1365}
1366
1367void
1368_mutex_unlock_private(pthread_t pthread)
1369{
1370	struct pthread_mutex	*m, *m_next;
1371
1372	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1373		m_next = TAILQ_NEXT(m, m_qe);
1374		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1375			pthread_mutex_unlock(&m);
1376	}
1377}
1378
1379/*
1380 * This is called by the current thread when it wants to back out of a
1381 * mutex_lock in order to run a signal handler.
1382 */
1383void
1384_mutex_lock_backout(struct pthread *curthread)
1385{
1386	struct pthread_mutex *m;
1387
1388	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1389		/*
1390		 * Any other thread may clear the "in sync queue flag",
1391		 * but only the current thread can clear the pointer
1392		 * to the mutex.  So if the flag is set, we can
1393		 * guarantee that the pointer to the mutex is valid.
1394		 * The only problem may be if the mutex is destroyed
1395		 * out from under us, but that should be considered
1396		 * an application bug.
1397		 */
1398		m = curthread->data.mutex;
1399
1400		/* Lock the mutex structure: */
1401		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1402
1403
1404		/*
1405		 * Check to make sure this thread doesn't already own
1406		 * the mutex.  Since mutexes are unlocked with direct
1407		 * handoffs, it is possible the previous owner gave it
1408		 * to us after we checked the sync queue flag and before
1409		 * we locked the mutex structure.
1410		 */
1411		if (m->m_owner == curthread) {
1412			THR_LOCK_RELEASE(curthread, &m->m_lock);
1413			mutex_unlock_common(&m, /* add_reference */ 0);
1414		} else {
1415			/*
1416			 * Remove ourselves from the mutex queue and
1417			 * clear the pointer to the mutex.  We may no
1418			 * longer be in the mutex queue, but the removal
1419			 * function will DTRT.
1420			 */
1421			mutex_queue_remove(m, curthread);
1422			curthread->data.mutex = NULL;
1423			THR_LOCK_RELEASE(curthread, &m->m_lock);
1424		}
1425	}
1426}
1427
1428/*
1429 * Dequeue a waiting thread from the head of a mutex queue in descending
1430 * priority order.
1431 *
1432 * In order to properly dequeue a thread from the mutex queue and
1433 * make it runnable without the possibility of errant wakeups, it
1434 * is necessary to lock the thread's scheduling queue while also
1435 * holding the mutex lock.
1436 */
1437static void
1438mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1439{
1440	struct pthread	*pthread;
1441
1442	/* Keep dequeueing until we find a valid thread: */
1443	mutex->m_owner = NULL;
1444	pthread = TAILQ_FIRST(&mutex->m_queue);
1445	while (pthread != NULL) {
1446		/* Take the thread's scheduling lock: */
1447		THR_SCHED_LOCK(curthread, pthread);
1448
1449		/* Remove the thread from the mutex queue: */
1450		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1451		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1452
1453		/* This thread is no longer waiting for this mutex. */
1454		pthread->data.mutex = NULL;
1455
1456		/*
1457		 * Only exit the loop if the thread hasn't been
1458		 * cancelled.
1459		 */
1460		switch (mutex->m_protocol) {
1461		case PTHREAD_PRIO_NONE:
1462			/*
1463			 * Assign the new owner and add the mutex to the
1464			 * thread's list of owned mutexes.
1465			 */
1466			mutex->m_owner = pthread;
1467			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1468			break;
1469
1470		case PTHREAD_PRIO_INHERIT:
1471			/*
1472			 * Assign the new owner and add the mutex to the
1473			 * thread's list of owned mutexes.
1474			 */
1475			mutex->m_owner = pthread;
1476			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1477
1478			/* Track number of priority mutexes owned: */
1479			pthread->priority_mutex_count++;
1480
1481			/*
1482			 * Set the priority of the mutex.  Since our waiting
1483			 * threads are in descending priority order, the
1484			 * priority of the mutex becomes the active priority
1485			 * of the thread we just dequeued.
1486			 */
1487			mutex->m_prio = pthread->active_priority;
1488
1489			/* Save the owning threads inherited priority: */
1490			mutex->m_saved_prio = pthread->inherited_priority;
1491
1492			/*
1493			 * The owning threads inherited priority now becomes
1494			 * his active priority (the priority of the mutex).
1495			 */
1496			pthread->inherited_priority = mutex->m_prio;
1497			break;
1498
1499		case PTHREAD_PRIO_PROTECT:
1500			if (pthread->active_priority > mutex->m_prio) {
1501				/*
1502				 * Either the mutex ceiling priority has
1503				 * been lowered and/or this threads priority
1504			 	 * has been raised subsequent to the thread
1505				 * being queued on the waiting list.
1506				 */
1507				pthread->error = EINVAL;
1508			}
1509			else {
1510				/*
1511				 * Assign the new owner and add the mutex
1512				 * to the thread's list of owned mutexes.
1513				 */
1514				mutex->m_owner = pthread;
1515				TAILQ_INSERT_TAIL(&pthread->mutexq,
1516				    mutex, m_qe);
1517
1518				/* Track number of priority mutexes owned: */
1519				pthread->priority_mutex_count++;
1520
1521				/*
1522				 * Save the owning threads inherited
1523				 * priority:
1524				 */
1525				mutex->m_saved_prio =
1526				    pthread->inherited_priority;
1527
1528				/*
1529				 * The owning thread inherits the ceiling
1530				 * priority of the mutex and executes at
1531				 * that priority:
1532				 */
1533				pthread->inherited_priority = mutex->m_prio;
1534				pthread->active_priority = mutex->m_prio;
1535
1536			}
1537			break;
1538		}
1539
1540		/* Make the thread runnable and unlock the scheduling queue: */
1541		_thr_setrunnable_unlocked(pthread);
1542		THR_SCHED_UNLOCK(curthread, pthread);
1543
1544		if (mutex->m_owner == pthread)
1545			/* We're done; a valid owner was found. */
1546			break;
1547		else
1548			/* Get the next thread from the waiting queue: */
1549			pthread = TAILQ_NEXT(pthread, sqe);
1550	}
1551
1552	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1553		/* This mutex has no priority: */
1554		mutex->m_prio = 0;
1555}
1556
1557/*
1558 * Dequeue a waiting thread from the head of a mutex queue in descending
1559 * priority order.
1560 */
1561static inline pthread_t
1562mutex_queue_deq(struct pthread_mutex *mutex)
1563{
1564	pthread_t pthread;
1565
1566	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1567		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1568		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1569
1570		/*
1571		 * Only exit the loop if the thread hasn't been
1572		 * cancelled.
1573		 */
1574		if (pthread->interrupted == 0)
1575			break;
1576	}
1577
1578	return (pthread);
1579}
1580
1581/*
1582 * Remove a waiting thread from a mutex queue in descending priority order.
1583 */
1584static inline void
1585mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1586{
1587	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1588		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1589		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1590	}
1591}
1592
1593/*
1594 * Enqueue a waiting thread to a queue in descending priority order.
1595 */
1596static inline void
1597mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1598{
1599	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1600
1601	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1602	/*
1603	 * For the common case of all threads having equal priority,
1604	 * we perform a quick check against the priority of the thread
1605	 * at the tail of the queue.
1606	 */
1607	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1608		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1609	else {
1610		tid = TAILQ_FIRST(&mutex->m_queue);
1611		while (pthread->active_priority <= tid->active_priority)
1612			tid = TAILQ_NEXT(tid, sqe);
1613		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1614	}
1615	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1616}
1617