thr_mutex.c revision 123310
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 123310 2003-12-09 00:52:28Z davidxu $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67#define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
68#define	MUTEX_DESTROY(m) do {		\
69	_lock_destroy(&(m)->m_lock);	\
70	free(m);			\
71} while (0)
72
73
74/*
75 * Prototypes
76 */
77static struct kse_mailbox *mutex_handoff(struct pthread *,
78			    struct pthread_mutex *);
79static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
80static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
81static int		mutex_unlock_common(pthread_mutex_t *, int);
82static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83static void		mutex_rescan_owned (struct pthread *, struct pthread *,
84			    struct pthread_mutex *);
85static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
86static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
87static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
88
89
90static struct pthread_mutex_attr	static_mutex_attr =
91    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
92static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
93
94/* Single underscore versions provided for libc internal usage: */
95__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
96__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
97__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
98
99/* No difference between libc and application usage of these: */
100__weak_reference(_pthread_mutex_init, pthread_mutex_init);
101__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
102__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
103
104
105
106int
107_pthread_mutex_init(pthread_mutex_t *mutex,
108    const pthread_mutexattr_t *mutex_attr)
109{
110	struct pthread_mutex *pmutex;
111	enum pthread_mutextype type;
112	int		protocol;
113	int		ceiling;
114	int		flags;
115	int		ret = 0;
116
117	if (mutex == NULL)
118		ret = EINVAL;
119
120	/* Check if default mutex attributes: */
121	else if (mutex_attr == NULL || *mutex_attr == NULL) {
122		/* Default to a (error checking) POSIX mutex: */
123		type = PTHREAD_MUTEX_ERRORCHECK;
124		protocol = PTHREAD_PRIO_NONE;
125		ceiling = THR_MAX_PRIORITY;
126		flags = 0;
127	}
128
129	/* Check mutex type: */
130	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132		/* Return an invalid argument error: */
133		ret = EINVAL;
134
135	/* Check mutex protocol: */
136	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138		/* Return an invalid argument error: */
139		ret = EINVAL;
140
141	else {
142		/* Use the requested mutex type and protocol: */
143		type = (*mutex_attr)->m_type;
144		protocol = (*mutex_attr)->m_protocol;
145		ceiling = (*mutex_attr)->m_ceiling;
146		flags = (*mutex_attr)->m_flags;
147	}
148
149	/* Check no errors so far: */
150	if (ret == 0) {
151		if ((pmutex = (pthread_mutex_t)
152		    malloc(sizeof(struct pthread_mutex))) == NULL)
153			ret = ENOMEM;
154		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
155		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
156			free(pmutex);
157			*mutex = NULL;
158			ret = ENOMEM;
159		} else {
160			/* Set the mutex flags: */
161			pmutex->m_flags = flags;
162
163			/* Process according to mutex type: */
164			switch (type) {
165			/* case PTHREAD_MUTEX_DEFAULT: */
166			case PTHREAD_MUTEX_ERRORCHECK:
167			case PTHREAD_MUTEX_NORMAL:
168				/* Nothing to do here. */
169				break;
170
171			/* Single UNIX Spec 2 recursive mutex: */
172			case PTHREAD_MUTEX_RECURSIVE:
173				/* Reset the mutex count: */
174				pmutex->m_count = 0;
175				break;
176
177			/* Trap invalid mutex types: */
178			default:
179				/* Return an invalid argument error: */
180				ret = EINVAL;
181				break;
182			}
183			if (ret == 0) {
184				/* Initialise the rest of the mutex: */
185				TAILQ_INIT(&pmutex->m_queue);
186				pmutex->m_flags |= MUTEX_FLAGS_INITED;
187				pmutex->m_owner = NULL;
188				pmutex->m_type = type;
189				pmutex->m_protocol = protocol;
190				pmutex->m_refcount = 0;
191				if (protocol == PTHREAD_PRIO_PROTECT)
192					pmutex->m_prio = ceiling;
193				else
194					pmutex->m_prio = -1;
195				pmutex->m_saved_prio = 0;
196				MUTEX_INIT_LINK(pmutex);
197				*mutex = pmutex;
198			} else {
199				/* Free the mutex lock structure: */
200				MUTEX_DESTROY(pmutex);
201				*mutex = NULL;
202			}
203		}
204	}
205	/* Return the completion status: */
206	return (ret);
207}
208
209void
210_thr_mutex_reinit(pthread_mutex_t *mutex)
211{
212	_lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
213	    _thr_lock_wait, _thr_lock_wakeup);
214	TAILQ_INIT(&(*mutex)->m_queue);
215	(*mutex)->m_owner = NULL;
216	(*mutex)->m_count = 0;
217	(*mutex)->m_refcount = 0;
218	(*mutex)->m_prio = 0;
219	(*mutex)->m_saved_prio = 0;
220}
221
222int
223_pthread_mutex_destroy(pthread_mutex_t *mutex)
224{
225	struct pthread	*curthread = _get_curthread();
226	pthread_mutex_t m;
227	int ret = 0;
228
229	if (mutex == NULL || *mutex == NULL)
230		ret = EINVAL;
231	else {
232		/* Lock the mutex structure: */
233		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
234
235		/*
236		 * Check to see if this mutex is in use:
237		 */
238		if (((*mutex)->m_owner != NULL) ||
239		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
240		    ((*mutex)->m_refcount != 0)) {
241			ret = EBUSY;
242
243			/* Unlock the mutex structure: */
244			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
245		} else {
246			/*
247			 * Save a pointer to the mutex so it can be free'd
248			 * and set the caller's pointer to NULL:
249			 */
250			m = *mutex;
251			*mutex = NULL;
252
253			/* Unlock the mutex structure: */
254			THR_LOCK_RELEASE(curthread, &m->m_lock);
255
256			/*
257			 * Free the memory allocated for the mutex
258			 * structure:
259			 */
260			MUTEX_ASSERT_NOT_OWNED(m);
261			MUTEX_DESTROY(m);
262		}
263	}
264
265	/* Return the completion status: */
266	return (ret);
267}
268
269static int
270init_static(struct pthread *thread, pthread_mutex_t *mutex)
271{
272	int ret;
273
274	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
275
276	if (*mutex == NULL)
277		ret = pthread_mutex_init(mutex, NULL);
278	else
279		ret = 0;
280
281	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
282
283	return (ret);
284}
285
286static int
287init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
288{
289	int ret;
290
291	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
292
293	if (*mutex == NULL)
294		ret = pthread_mutex_init(mutex, &static_mattr);
295	else
296		ret = 0;
297
298	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
299
300	return (ret);
301}
302
303static int
304mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
305{
306	int ret = 0;
307
308	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
309	    "Uninitialized mutex in pthread_mutex_trylock_basic");
310
311	/* Lock the mutex structure: */
312	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
313
314	/*
315	 * If the mutex was statically allocated, properly
316	 * initialize the tail queue.
317	 */
318	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
319		TAILQ_INIT(&(*mutex)->m_queue);
320		MUTEX_INIT_LINK(*mutex);
321		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
322	}
323
324	/* Process according to mutex type: */
325	switch ((*mutex)->m_protocol) {
326	/* Default POSIX mutex: */
327	case PTHREAD_PRIO_NONE:
328		/* Check if this mutex is not locked: */
329		if ((*mutex)->m_owner == NULL) {
330			/* Lock the mutex for the running thread: */
331			(*mutex)->m_owner = curthread;
332
333			/* Add to the list of owned mutexes: */
334			MUTEX_ASSERT_NOT_OWNED(*mutex);
335			TAILQ_INSERT_TAIL(&curthread->mutexq,
336			    (*mutex), m_qe);
337		} else if ((*mutex)->m_owner == curthread)
338			ret = mutex_self_trylock(curthread, *mutex);
339		else
340			/* Return a busy error: */
341			ret = EBUSY;
342		break;
343
344	/* POSIX priority inheritence mutex: */
345	case PTHREAD_PRIO_INHERIT:
346		/* Check if this mutex is not locked: */
347		if ((*mutex)->m_owner == NULL) {
348			/* Lock the mutex for the running thread: */
349			(*mutex)->m_owner = curthread;
350
351			THR_SCHED_LOCK(curthread, curthread);
352			/* Track number of priority mutexes owned: */
353			curthread->priority_mutex_count++;
354
355			/*
356			 * The mutex takes on the attributes of the
357			 * running thread when there are no waiters.
358			 */
359			(*mutex)->m_prio = curthread->active_priority;
360			(*mutex)->m_saved_prio =
361			    curthread->inherited_priority;
362			curthread->inherited_priority = (*mutex)->m_prio;
363			THR_SCHED_UNLOCK(curthread, curthread);
364
365			/* Add to the list of owned mutexes: */
366			MUTEX_ASSERT_NOT_OWNED(*mutex);
367			TAILQ_INSERT_TAIL(&curthread->mutexq,
368			    (*mutex), m_qe);
369		} else if ((*mutex)->m_owner == curthread)
370			ret = mutex_self_trylock(curthread, *mutex);
371		else
372			/* Return a busy error: */
373			ret = EBUSY;
374		break;
375
376	/* POSIX priority protection mutex: */
377	case PTHREAD_PRIO_PROTECT:
378		/* Check for a priority ceiling violation: */
379		if (curthread->active_priority > (*mutex)->m_prio)
380			ret = EINVAL;
381
382		/* Check if this mutex is not locked: */
383		else if ((*mutex)->m_owner == NULL) {
384			/* Lock the mutex for the running thread: */
385			(*mutex)->m_owner = curthread;
386
387			THR_SCHED_LOCK(curthread, curthread);
388			/* Track number of priority mutexes owned: */
389			curthread->priority_mutex_count++;
390
391			/*
392			 * The running thread inherits the ceiling
393			 * priority of the mutex and executes at that
394			 * priority.
395			 */
396			curthread->active_priority = (*mutex)->m_prio;
397			(*mutex)->m_saved_prio =
398			    curthread->inherited_priority;
399			curthread->inherited_priority =
400			    (*mutex)->m_prio;
401			THR_SCHED_UNLOCK(curthread, curthread);
402			/* Add to the list of owned mutexes: */
403			MUTEX_ASSERT_NOT_OWNED(*mutex);
404			TAILQ_INSERT_TAIL(&curthread->mutexq,
405			    (*mutex), m_qe);
406		} else if ((*mutex)->m_owner == curthread)
407			ret = mutex_self_trylock(curthread, *mutex);
408		else
409			/* Return a busy error: */
410			ret = EBUSY;
411		break;
412
413	/* Trap invalid mutex types: */
414	default:
415		/* Return an invalid argument error: */
416		ret = EINVAL;
417		break;
418	}
419
420	/* Unlock the mutex structure: */
421	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
422
423	/* Return the completion status: */
424	return (ret);
425}
426
427int
428__pthread_mutex_trylock(pthread_mutex_t *mutex)
429{
430	struct pthread *curthread = _get_curthread();
431	int ret = 0;
432
433	if (mutex == NULL)
434		ret = EINVAL;
435
436	/*
437	 * If the mutex is statically initialized, perform the dynamic
438	 * initialization:
439	 */
440	else if ((*mutex != NULL) ||
441	    ((ret = init_static(curthread, mutex)) == 0))
442		ret = mutex_trylock_common(curthread, mutex);
443
444	return (ret);
445}
446
447int
448_pthread_mutex_trylock(pthread_mutex_t *mutex)
449{
450	struct pthread	*curthread = _get_curthread();
451	int	ret = 0;
452
453	if (mutex == NULL)
454		ret = EINVAL;
455
456	/*
457	 * If the mutex is statically initialized, perform the dynamic
458	 * initialization marking the mutex private (delete safe):
459	 */
460	else if ((*mutex != NULL) ||
461	    ((ret = init_static_private(curthread, mutex)) == 0))
462		ret = mutex_trylock_common(curthread, mutex);
463
464	return (ret);
465}
466
467static int
468mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
469	const struct timespec * abstime)
470{
471	int	ret = 0;
472
473	THR_ASSERT((m != NULL) && (*m != NULL),
474	    "Uninitialized mutex in pthread_mutex_trylock_basic");
475
476	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
477	    abstime->tv_nsec >= 1000000000))
478		return (EINVAL);
479
480	/* Reset the interrupted flag: */
481	curthread->interrupted = 0;
482	curthread->timeout = 0;
483	curthread->wakeup_time.tv_sec = -1;
484
485	/*
486	 * Enter a loop waiting to become the mutex owner.  We need a
487	 * loop in case the waiting thread is interrupted by a signal
488	 * to execute a signal handler.  It is not (currently) possible
489	 * to remain in the waiting queue while running a handler.
490	 * Instead, the thread is interrupted and backed out of the
491	 * waiting queue prior to executing the signal handler.
492	 */
493	do {
494		/* Lock the mutex structure: */
495		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
496
497		/*
498		 * If the mutex was statically allocated, properly
499		 * initialize the tail queue.
500		 */
501		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
502			TAILQ_INIT(&(*m)->m_queue);
503			(*m)->m_flags |= MUTEX_FLAGS_INITED;
504			MUTEX_INIT_LINK(*m);
505		}
506
507		/* Process according to mutex type: */
508		switch ((*m)->m_protocol) {
509		/* Default POSIX mutex: */
510		case PTHREAD_PRIO_NONE:
511			if ((*m)->m_owner == NULL) {
512				/* Lock the mutex for this thread: */
513				(*m)->m_owner = curthread;
514
515				/* Add to the list of owned mutexes: */
516				MUTEX_ASSERT_NOT_OWNED(*m);
517				TAILQ_INSERT_TAIL(&curthread->mutexq,
518				    (*m), m_qe);
519
520				/* Unlock the mutex structure: */
521				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
522			} else if ((*m)->m_owner == curthread) {
523				ret = mutex_self_lock(curthread, *m);
524
525				/* Unlock the mutex structure: */
526				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
527			} else {
528				/* Set the wakeup time: */
529				if (abstime) {
530					curthread->wakeup_time.tv_sec =
531						abstime->tv_sec;
532					curthread->wakeup_time.tv_nsec =
533						abstime->tv_nsec;
534				}
535
536				/*
537				 * Join the queue of threads waiting to lock
538				 * the mutex and save a pointer to the mutex.
539				 */
540				mutex_queue_enq(*m, curthread);
541				curthread->data.mutex = *m;
542				/*
543				 * This thread is active and is in a critical
544				 * region (holding the mutex lock); we should
545				 * be able to safely set the state.
546				 */
547				THR_SCHED_LOCK(curthread, curthread);
548				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
549				THR_SCHED_UNLOCK(curthread, curthread);
550
551				/* Unlock the mutex structure: */
552				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
553
554				/* Schedule the next thread: */
555				_thr_sched_switch(curthread);
556
557				curthread->data.mutex = NULL;
558				if (THR_IN_MUTEXQ(curthread)) {
559					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
560					mutex_queue_remove(*m, curthread);
561					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
562				}
563			}
564			break;
565
566		/* POSIX priority inheritence mutex: */
567		case PTHREAD_PRIO_INHERIT:
568			/* Check if this mutex is not locked: */
569			if ((*m)->m_owner == NULL) {
570				/* Lock the mutex for this thread: */
571				(*m)->m_owner = curthread;
572
573				THR_SCHED_LOCK(curthread, curthread);
574				/* Track number of priority mutexes owned: */
575				curthread->priority_mutex_count++;
576
577				/*
578				 * The mutex takes on attributes of the
579				 * running thread when there are no waiters.
580				 * Make sure the thread's scheduling lock is
581				 * held while priorities are adjusted.
582				 */
583				(*m)->m_prio = curthread->active_priority;
584				(*m)->m_saved_prio =
585				    curthread->inherited_priority;
586				curthread->inherited_priority = (*m)->m_prio;
587				THR_SCHED_UNLOCK(curthread, curthread);
588
589				/* Add to the list of owned mutexes: */
590				MUTEX_ASSERT_NOT_OWNED(*m);
591				TAILQ_INSERT_TAIL(&curthread->mutexq,
592				    (*m), m_qe);
593
594				/* Unlock the mutex structure: */
595				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
596			} else if ((*m)->m_owner == curthread) {
597				ret = mutex_self_lock(curthread, *m);
598
599				/* Unlock the mutex structure: */
600				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
601			} else {
602				/* Set the wakeup time: */
603				if (abstime) {
604					curthread->wakeup_time.tv_sec =
605						abstime->tv_sec;
606					curthread->wakeup_time.tv_nsec =
607						abstime->tv_nsec;
608				}
609
610				/*
611				 * Join the queue of threads waiting to lock
612				 * the mutex and save a pointer to the mutex.
613				 */
614				mutex_queue_enq(*m, curthread);
615				curthread->data.mutex = *m;
616
617				/*
618				 * This thread is active and is in a critical
619				 * region (holding the mutex lock); we should
620				 * be able to safely set the state.
621				 */
622				if (curthread->active_priority > (*m)->m_prio)
623					/* Adjust priorities: */
624					mutex_priority_adjust(curthread, *m);
625
626				THR_SCHED_LOCK(curthread, curthread);
627				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
628				THR_SCHED_UNLOCK(curthread, curthread);
629
630				/* Unlock the mutex structure: */
631				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
632
633				/* Schedule the next thread: */
634				_thr_sched_switch(curthread);
635
636				curthread->data.mutex = NULL;
637				if (THR_IN_MUTEXQ(curthread)) {
638					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
639					mutex_queue_remove(*m, curthread);
640					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
641				}
642			}
643			break;
644
645		/* POSIX priority protection mutex: */
646		case PTHREAD_PRIO_PROTECT:
647			/* Check for a priority ceiling violation: */
648			if (curthread->active_priority > (*m)->m_prio) {
649				/* Unlock the mutex structure: */
650				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
651				ret = EINVAL;
652			}
653			/* Check if this mutex is not locked: */
654			else if ((*m)->m_owner == NULL) {
655				/*
656				 * Lock the mutex for the running
657				 * thread:
658				 */
659				(*m)->m_owner = curthread;
660
661				THR_SCHED_LOCK(curthread, curthread);
662				/* Track number of priority mutexes owned: */
663				curthread->priority_mutex_count++;
664
665				/*
666				 * The running thread inherits the ceiling
667				 * priority of the mutex and executes at that
668				 * priority.  Make sure the thread's
669				 * scheduling lock is held while priorities
670				 * are adjusted.
671				 */
672				curthread->active_priority = (*m)->m_prio;
673				(*m)->m_saved_prio =
674				    curthread->inherited_priority;
675				curthread->inherited_priority = (*m)->m_prio;
676				THR_SCHED_UNLOCK(curthread, curthread);
677
678				/* Add to the list of owned mutexes: */
679				MUTEX_ASSERT_NOT_OWNED(*m);
680				TAILQ_INSERT_TAIL(&curthread->mutexq,
681				    (*m), m_qe);
682
683				/* Unlock the mutex structure: */
684				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
685			} else if ((*m)->m_owner == curthread) {
686				ret = mutex_self_lock(curthread, *m);
687
688				/* Unlock the mutex structure: */
689				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
690			} else {
691				/* Set the wakeup time: */
692				if (abstime) {
693					curthread->wakeup_time.tv_sec =
694						abstime->tv_sec;
695					curthread->wakeup_time.tv_nsec =
696						abstime->tv_nsec;
697				}
698
699				/*
700				 * Join the queue of threads waiting to lock
701				 * the mutex and save a pointer to the mutex.
702				 */
703				mutex_queue_enq(*m, curthread);
704				curthread->data.mutex = *m;
705
706				/* Clear any previous error: */
707				curthread->error = 0;
708
709				/*
710				 * This thread is active and is in a critical
711				 * region (holding the mutex lock); we should
712				 * be able to safely set the state.
713				 */
714
715				THR_SCHED_LOCK(curthread, curthread);
716				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
717				THR_SCHED_UNLOCK(curthread, curthread);
718
719				/* Unlock the mutex structure: */
720				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
721
722				/* Schedule the next thread: */
723				_thr_sched_switch(curthread);
724
725				curthread->data.mutex = NULL;
726				if (THR_IN_MUTEXQ(curthread)) {
727					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
728					mutex_queue_remove(*m, curthread);
729					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
730				}
731
732				/*
733				 * The threads priority may have changed while
734				 * waiting for the mutex causing a ceiling
735				 * violation.
736				 */
737				ret = curthread->error;
738				curthread->error = 0;
739			}
740			break;
741
742		/* Trap invalid mutex types: */
743		default:
744			/* Unlock the mutex structure: */
745			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
746
747			/* Return an invalid argument error: */
748			ret = EINVAL;
749			break;
750		}
751
752	} while (((*m)->m_owner != curthread) && (ret == 0) &&
753	    (curthread->interrupted == 0) && (curthread->timeout == 0));
754
755	if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
756		ret = ETIMEDOUT;
757
758	/*
759	 * Check to see if this thread was interrupted and
760	 * is still in the mutex queue of waiting threads:
761	 */
762	if (curthread->interrupted != 0) {
763		/* Remove this thread from the mutex queue. */
764		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
765		if (THR_IN_SYNCQ(curthread))
766			mutex_queue_remove(*m, curthread);
767		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
768
769		/* Check for asynchronous cancellation. */
770		if (curthread->continuation != NULL)
771			curthread->continuation((void *) curthread);
772	}
773
774	/* Return the completion status: */
775	return (ret);
776}
777
778int
779__pthread_mutex_lock(pthread_mutex_t *m)
780{
781	struct pthread *curthread;
782	int	ret = 0;
783
784	if (_thr_initial == NULL)
785		_libpthread_init(NULL);
786
787	curthread = _get_curthread();
788	if (m == NULL)
789		ret = EINVAL;
790
791	/*
792	 * If the mutex is statically initialized, perform the dynamic
793	 * initialization:
794	 */
795	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
796		ret = mutex_lock_common(curthread, m, NULL);
797
798	return (ret);
799}
800
801__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
802
803int
804_pthread_mutex_lock(pthread_mutex_t *m)
805{
806	struct pthread *curthread;
807	int	ret = 0;
808
809	if (_thr_initial == NULL)
810		_libpthread_init(NULL);
811	curthread = _get_curthread();
812
813	if (m == NULL)
814		ret = EINVAL;
815
816	/*
817	 * If the mutex is statically initialized, perform the dynamic
818	 * initialization marking it private (delete safe):
819	 */
820	else if ((*m != NULL) ||
821	    ((ret = init_static_private(curthread, m)) == 0))
822		ret = mutex_lock_common(curthread, m, NULL);
823
824	return (ret);
825}
826
827int
828__pthread_mutex_timedlock(pthread_mutex_t *m,
829	const struct timespec *abs_timeout)
830{
831	struct pthread *curthread;
832	int	ret = 0;
833
834	if (_thr_initial == NULL)
835		_libpthread_init(NULL);
836
837	curthread = _get_curthread();
838	if (m == NULL)
839		ret = EINVAL;
840
841	/*
842	 * If the mutex is statically initialized, perform the dynamic
843	 * initialization:
844	 */
845	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
846		ret = mutex_lock_common(curthread, m, abs_timeout);
847
848	return (ret);
849}
850
851int
852_pthread_mutex_timedlock(pthread_mutex_t *m,
853	const struct timespec *abs_timeout)
854{
855	struct pthread *curthread;
856	int	ret = 0;
857
858	if (_thr_initial == NULL)
859		_libpthread_init(NULL);
860	curthread = _get_curthread();
861
862	if (m == NULL)
863		ret = EINVAL;
864
865	/*
866	 * If the mutex is statically initialized, perform the dynamic
867	 * initialization marking it private (delete safe):
868	 */
869	else if ((*m != NULL) ||
870	    ((ret = init_static_private(curthread, m)) == 0))
871		ret = mutex_lock_common(curthread, m, abs_timeout);
872
873	return (ret);
874}
875
876int
877_pthread_mutex_unlock(pthread_mutex_t *m)
878{
879	return (mutex_unlock_common(m, /* add reference */ 0));
880}
881
882__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
883
884int
885_mutex_cv_unlock(pthread_mutex_t *m)
886{
887	return (mutex_unlock_common(m, /* add reference */ 1));
888}
889
890int
891_mutex_cv_lock(pthread_mutex_t *m)
892{
893	struct  pthread *curthread;
894	int	ret;
895
896	curthread = _get_curthread();
897	if ((ret = _pthread_mutex_lock(m)) == 0) {
898		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
899		(*m)->m_refcount--;
900		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
901	}
902	return (ret);
903}
904
905static inline int
906mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
907{
908	int	ret = 0;
909
910	switch (m->m_type) {
911	/* case PTHREAD_MUTEX_DEFAULT: */
912	case PTHREAD_MUTEX_ERRORCHECK:
913	case PTHREAD_MUTEX_NORMAL:
914		/*
915		 * POSIX specifies that mutexes should return EDEADLK if a
916		 * recursive lock is detected.
917		 */
918		if (m->m_owner == curthread)
919			ret = EDEADLK;
920		else
921			ret = EBUSY;
922		break;
923
924	case PTHREAD_MUTEX_RECURSIVE:
925		/* Increment the lock count: */
926		m->m_count++;
927		break;
928
929	default:
930		/* Trap invalid mutex types; */
931		ret = EINVAL;
932	}
933
934	return (ret);
935}
936
937static inline int
938mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
939{
940	int ret = 0;
941
942	switch (m->m_type) {
943	/* case PTHREAD_MUTEX_DEFAULT: */
944	case PTHREAD_MUTEX_ERRORCHECK:
945		/*
946		 * POSIX specifies that mutexes should return EDEADLK if a
947		 * recursive lock is detected.
948		 */
949		ret = EDEADLK;
950		break;
951
952	case PTHREAD_MUTEX_NORMAL:
953		/*
954		 * What SS2 define as a 'normal' mutex.  Intentionally
955		 * deadlock on attempts to get a lock you already own.
956		 */
957
958		THR_SCHED_LOCK(curthread, curthread);
959		THR_SET_STATE(curthread, PS_DEADLOCK);
960		THR_SCHED_UNLOCK(curthread, curthread);
961
962		/* Unlock the mutex structure: */
963		THR_LOCK_RELEASE(curthread, &m->m_lock);
964
965		/* Schedule the next thread: */
966		_thr_sched_switch(curthread);
967		break;
968
969	case PTHREAD_MUTEX_RECURSIVE:
970		/* Increment the lock count: */
971		m->m_count++;
972		break;
973
974	default:
975		/* Trap invalid mutex types; */
976		ret = EINVAL;
977	}
978
979	return (ret);
980}
981
982static int
983mutex_unlock_common(pthread_mutex_t *m, int add_reference)
984{
985	struct pthread *curthread = _get_curthread();
986	struct kse_mailbox *kmbx = NULL;
987	int ret = 0;
988
989	if (m == NULL || *m == NULL)
990		ret = EINVAL;
991	else {
992		/* Lock the mutex structure: */
993		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
994
995		/* Process according to mutex type: */
996		switch ((*m)->m_protocol) {
997		/* Default POSIX mutex: */
998		case PTHREAD_PRIO_NONE:
999			/*
1000			 * Check if the running thread is not the owner of the
1001			 * mutex:
1002			 */
1003			if ((*m)->m_owner != curthread)
1004				/*
1005				 * Return an invalid argument error for no
1006				 * owner and a permission error otherwise:
1007				 */
1008				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
1009
1010			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1011			    ((*m)->m_count > 0))
1012				/* Decrement the count: */
1013				(*m)->m_count--;
1014			else {
1015				/*
1016				 * Clear the count in case this is a recursive
1017				 * mutex.
1018				 */
1019				(*m)->m_count = 0;
1020
1021				/* Remove the mutex from the threads queue. */
1022				MUTEX_ASSERT_IS_OWNED(*m);
1023				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1024				    (*m), m_qe);
1025				MUTEX_INIT_LINK(*m);
1026
1027				/*
1028				 * Hand off the mutex to the next waiting
1029				 * thread:
1030				 */
1031				kmbx = mutex_handoff(curthread, *m);
1032			}
1033			break;
1034
1035		/* POSIX priority inheritence mutex: */
1036		case PTHREAD_PRIO_INHERIT:
1037			/*
1038			 * Check if the running thread is not the owner of the
1039			 * mutex:
1040			 */
1041			if ((*m)->m_owner != curthread)
1042				/*
1043				 * Return an invalid argument error for no
1044				 * owner and a permission error otherwise:
1045				 */
1046				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
1047
1048			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1049			    ((*m)->m_count > 0))
1050				/* Decrement the count: */
1051				(*m)->m_count--;
1052			else {
1053				/*
1054				 * Clear the count in case this is recursive
1055				 * mutex.
1056				 */
1057				(*m)->m_count = 0;
1058
1059				/*
1060				 * Restore the threads inherited priority and
1061				 * recompute the active priority (being careful
1062				 * not to override changes in the threads base
1063				 * priority subsequent to locking the mutex).
1064				 */
1065				THR_SCHED_LOCK(curthread, curthread);
1066				curthread->inherited_priority =
1067					(*m)->m_saved_prio;
1068				curthread->active_priority =
1069				    MAX(curthread->inherited_priority,
1070				    curthread->base_priority);
1071
1072				/*
1073				 * This thread now owns one less priority mutex.
1074				 */
1075				curthread->priority_mutex_count--;
1076				THR_SCHED_UNLOCK(curthread, curthread);
1077
1078				/* Remove the mutex from the threads queue. */
1079				MUTEX_ASSERT_IS_OWNED(*m);
1080				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1081				    (*m), m_qe);
1082				MUTEX_INIT_LINK(*m);
1083
1084				/*
1085				 * Hand off the mutex to the next waiting
1086				 * thread:
1087				 */
1088				kmbx = mutex_handoff(curthread, *m);
1089			}
1090			break;
1091
1092		/* POSIX priority ceiling mutex: */
1093		case PTHREAD_PRIO_PROTECT:
1094			/*
1095			 * Check if the running thread is not the owner of the
1096			 * mutex:
1097			 */
1098			if ((*m)->m_owner != curthread)
1099				/*
1100				 * Return an invalid argument error for no
1101				 * owner and a permission error otherwise:
1102				 */
1103				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
1104
1105			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1106			    ((*m)->m_count > 0))
1107				/* Decrement the count: */
1108				(*m)->m_count--;
1109			else {
1110				/*
1111				 * Clear the count in case this is a recursive
1112				 * mutex.
1113				 */
1114				(*m)->m_count = 0;
1115
1116				/*
1117				 * Restore the threads inherited priority and
1118				 * recompute the active priority (being careful
1119				 * not to override changes in the threads base
1120				 * priority subsequent to locking the mutex).
1121				 */
1122				THR_SCHED_LOCK(curthread, curthread);
1123				curthread->inherited_priority =
1124					(*m)->m_saved_prio;
1125				curthread->active_priority =
1126				    MAX(curthread->inherited_priority,
1127				    curthread->base_priority);
1128
1129				/*
1130				 * This thread now owns one less priority mutex.
1131				 */
1132				curthread->priority_mutex_count--;
1133				THR_SCHED_UNLOCK(curthread, curthread);
1134
1135				/* Remove the mutex from the threads queue. */
1136				MUTEX_ASSERT_IS_OWNED(*m);
1137				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1138				    (*m), m_qe);
1139				MUTEX_INIT_LINK(*m);
1140
1141				/*
1142				 * Hand off the mutex to the next waiting
1143				 * thread:
1144				 */
1145				kmbx = mutex_handoff(curthread, *m);
1146			}
1147			break;
1148
1149		/* Trap invalid mutex types: */
1150		default:
1151			/* Return an invalid argument error: */
1152			ret = EINVAL;
1153			break;
1154		}
1155
1156		if ((ret == 0) && (add_reference != 0))
1157			/* Increment the reference count: */
1158			(*m)->m_refcount++;
1159
1160		/* Unlock the mutex structure: */
1161		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1162		if (kmbx != NULL)
1163			kse_wakeup(kmbx);
1164	}
1165
1166	/* Return the completion status: */
1167	return (ret);
1168}
1169
1170
1171/*
1172 * This function is called when a change in base priority occurs for
1173 * a thread that is holding or waiting for a priority protection or
1174 * inheritence mutex.  A change in a threads base priority can effect
1175 * changes to active priorities of other threads and to the ordering
1176 * of mutex locking by waiting threads.
1177 *
1178 * This must be called without the target thread's scheduling lock held.
1179 */
1180void
1181_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1182    int propagate_prio)
1183{
1184	struct pthread_mutex *m;
1185
1186	/* Adjust the priorites of any owned priority mutexes: */
1187	if (pthread->priority_mutex_count > 0) {
1188		/*
1189		 * Rescan the mutexes owned by this thread and correct
1190		 * their priorities to account for this threads change
1191		 * in priority.  This has the side effect of changing
1192		 * the threads active priority.
1193		 *
1194		 * Be sure to lock the first mutex in the list of owned
1195		 * mutexes.  This acts as a barrier against another
1196		 * simultaneous call to change the threads priority
1197		 * and from the owning thread releasing the mutex.
1198		 */
1199		m = TAILQ_FIRST(&pthread->mutexq);
1200		if (m != NULL) {
1201			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1202			/*
1203			 * Make sure the thread still owns the lock.
1204			 */
1205			if (m == TAILQ_FIRST(&pthread->mutexq))
1206				mutex_rescan_owned(curthread, pthread,
1207				    /* rescan all owned */ NULL);
1208			THR_LOCK_RELEASE(curthread, &m->m_lock);
1209		}
1210	}
1211
1212	/*
1213	 * If this thread is waiting on a priority inheritence mutex,
1214	 * check for priority adjustments.  A change in priority can
1215	 * also cause a ceiling violation(*) for a thread waiting on
1216	 * a priority protection mutex; we don't perform the check here
1217	 * as it is done in pthread_mutex_unlock.
1218	 *
1219	 * (*) It should be noted that a priority change to a thread
1220	 *     _after_ taking and owning a priority ceiling mutex
1221	 *     does not affect ownership of that mutex; the ceiling
1222	 *     priority is only checked before mutex ownership occurs.
1223	 */
1224	if (propagate_prio != 0) {
1225		/*
1226		 * Lock the thread's scheduling queue.  This is a bit
1227		 * convoluted; the "in synchronization queue flag" can
1228		 * only be cleared with both the thread's scheduling and
1229		 * mutex locks held.  The thread's pointer to the wanted
1230		 * mutex is guaranteed to be valid during this time.
1231		 */
1232		THR_SCHED_LOCK(curthread, pthread);
1233
1234		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1235		    ((m = pthread->data.mutex) == NULL))
1236			THR_SCHED_UNLOCK(curthread, pthread);
1237		else {
1238			/*
1239			 * This thread is currently waiting on a mutex; unlock
1240			 * the scheduling queue lock and lock the mutex.  We
1241			 * can't hold both at the same time because the locking
1242			 * order could cause a deadlock.
1243			 */
1244			THR_SCHED_UNLOCK(curthread, pthread);
1245			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1246
1247			/*
1248			 * Check to make sure this thread is still in the
1249			 * same state (the lock above can yield the CPU to
1250			 * another thread or the thread may be running on
1251			 * another CPU).
1252			 */
1253			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1254			    (pthread->data.mutex == m)) {
1255				/*
1256				 * Remove and reinsert this thread into
1257				 * the list of waiting threads to preserve
1258				 * decreasing priority order.
1259				 */
1260				mutex_queue_remove(m, pthread);
1261				mutex_queue_enq(m, pthread);
1262
1263				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1264					/* Adjust priorities: */
1265					mutex_priority_adjust(curthread, m);
1266			}
1267
1268			/* Unlock the mutex structure: */
1269			THR_LOCK_RELEASE(curthread, &m->m_lock);
1270		}
1271	}
1272}
1273
1274/*
1275 * Called when a new thread is added to the mutex waiting queue or
1276 * when a threads priority changes that is already in the mutex
1277 * waiting queue.
1278 *
1279 * This must be called with the mutex locked by the current thread.
1280 */
1281static void
1282mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1283{
1284	pthread_mutex_t	m = mutex;
1285	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1286	int		done, temp_prio;
1287
1288	/*
1289	 * Calculate the mutex priority as the maximum of the highest
1290	 * active priority of any waiting threads and the owning threads
1291	 * active priority(*).
1292	 *
1293	 * (*) Because the owning threads current active priority may
1294	 *     reflect priority inherited from this mutex (and the mutex
1295	 *     priority may have changed) we must recalculate the active
1296	 *     priority based on the threads saved inherited priority
1297	 *     and its base priority.
1298	 */
1299	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1300	temp_prio = MAX(pthread_next->active_priority,
1301	    MAX(m->m_saved_prio, pthread->base_priority));
1302
1303	/* See if this mutex really needs adjusting: */
1304	if (temp_prio == m->m_prio)
1305		/* No need to propagate the priority: */
1306		return;
1307
1308	/* Set new priority of the mutex: */
1309	m->m_prio = temp_prio;
1310
1311	/*
1312	 * Don't unlock the mutex passed in as an argument.  It is
1313	 * expected to be locked and unlocked by the caller.
1314	 */
1315	done = 1;
1316	do {
1317		/*
1318		 * Save the threads priority before rescanning the
1319		 * owned mutexes:
1320		 */
1321		temp_prio = pthread->active_priority;
1322
1323		/*
1324		 * Fix the priorities for all mutexes held by the owning
1325		 * thread since taking this mutex.  This also has a
1326		 * potential side-effect of changing the threads priority.
1327		 *
1328		 * At this point the mutex is locked by the current thread.
1329		 * The owning thread can't release the mutex until it is
1330		 * unlocked, so we should be able to safely walk its list
1331		 * of owned mutexes.
1332		 */
1333		mutex_rescan_owned(curthread, pthread, m);
1334
1335		/*
1336		 * If this isn't the first time through the loop,
1337		 * the current mutex needs to be unlocked.
1338		 */
1339		if (done == 0)
1340			THR_LOCK_RELEASE(curthread, &m->m_lock);
1341
1342		/* Assume we're done unless told otherwise: */
1343		done = 1;
1344
1345		/*
1346		 * If the thread is currently waiting on a mutex, check
1347		 * to see if the threads new priority has affected the
1348		 * priority of the mutex.
1349		 */
1350		if ((temp_prio != pthread->active_priority) &&
1351		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1352		    ((m = pthread->data.mutex) != NULL) &&
1353		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1354			/* Lock the mutex structure: */
1355			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1356
1357			/*
1358			 * Make sure the thread is still waiting on the
1359			 * mutex:
1360			 */
1361			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1362			    (m == pthread->data.mutex)) {
1363				/*
1364				 * The priority for this thread has changed.
1365				 * Remove and reinsert this thread into the
1366				 * list of waiting threads to preserve
1367				 * decreasing priority order.
1368				 */
1369				mutex_queue_remove(m, pthread);
1370				mutex_queue_enq(m, pthread);
1371
1372				/*
1373				 * Grab the waiting thread with highest
1374				 * priority:
1375				 */
1376				pthread_next = TAILQ_FIRST(&m->m_queue);
1377
1378				/*
1379				 * Calculate the mutex priority as the maximum
1380				 * of the highest active priority of any
1381				 * waiting threads and the owning threads
1382				 * active priority.
1383				 */
1384				temp_prio = MAX(pthread_next->active_priority,
1385				    MAX(m->m_saved_prio,
1386				    m->m_owner->base_priority));
1387
1388				if (temp_prio != m->m_prio) {
1389					/*
1390					 * The priority needs to be propagated
1391					 * to the mutex this thread is waiting
1392					 * on and up to the owner of that mutex.
1393					 */
1394					m->m_prio = temp_prio;
1395					pthread = m->m_owner;
1396
1397					/* We're not done yet: */
1398					done = 0;
1399				}
1400			}
1401			/* Only release the mutex if we're done: */
1402			if (done != 0)
1403				THR_LOCK_RELEASE(curthread, &m->m_lock);
1404		}
1405	} while (done == 0);
1406}
1407
1408static void
1409mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1410    struct pthread_mutex *mutex)
1411{
1412	struct pthread_mutex	*m;
1413	struct pthread		*pthread_next;
1414	int			active_prio, inherited_prio;
1415
1416	/*
1417	 * Start walking the mutexes the thread has taken since
1418	 * taking this mutex.
1419	 */
1420	if (mutex == NULL) {
1421		/*
1422		 * A null mutex means start at the beginning of the owned
1423		 * mutex list.
1424		 */
1425		m = TAILQ_FIRST(&pthread->mutexq);
1426
1427		/* There is no inherited priority yet. */
1428		inherited_prio = 0;
1429	} else {
1430		/*
1431		 * The caller wants to start after a specific mutex.  It
1432		 * is assumed that this mutex is a priority inheritence
1433		 * mutex and that its priority has been correctly
1434		 * calculated.
1435		 */
1436		m = TAILQ_NEXT(mutex, m_qe);
1437
1438		/* Start inheriting priority from the specified mutex. */
1439		inherited_prio = mutex->m_prio;
1440	}
1441	active_prio = MAX(inherited_prio, pthread->base_priority);
1442
1443	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1444		/*
1445		 * We only want to deal with priority inheritence
1446		 * mutexes.  This might be optimized by only placing
1447		 * priority inheritence mutexes into the owned mutex
1448		 * list, but it may prove to be useful having all
1449		 * owned mutexes in this list.  Consider a thread
1450		 * exiting while holding mutexes...
1451		 */
1452		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1453			/*
1454			 * Fix the owners saved (inherited) priority to
1455			 * reflect the priority of the previous mutex.
1456			 */
1457			m->m_saved_prio = inherited_prio;
1458
1459			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1460				/* Recalculate the priority of the mutex: */
1461				m->m_prio = MAX(active_prio,
1462				     pthread_next->active_priority);
1463			else
1464				m->m_prio = active_prio;
1465
1466			/* Recalculate new inherited and active priorities: */
1467			inherited_prio = m->m_prio;
1468			active_prio = MAX(m->m_prio, pthread->base_priority);
1469		}
1470	}
1471
1472	/*
1473	 * Fix the threads inherited priority and recalculate its
1474	 * active priority.
1475	 */
1476	pthread->inherited_priority = inherited_prio;
1477	active_prio = MAX(inherited_prio, pthread->base_priority);
1478
1479	if (active_prio != pthread->active_priority) {
1480		/* Lock the thread's scheduling queue: */
1481		THR_SCHED_LOCK(curthread, pthread);
1482
1483		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1484			/*
1485			 * This thread is not in a run queue.  Just set
1486			 * its active priority.
1487			 */
1488			pthread->active_priority = active_prio;
1489		}
1490		else {
1491			/*
1492			 * This thread is in a run queue.  Remove it from
1493			 * the queue before changing its priority:
1494			 */
1495			THR_RUNQ_REMOVE(pthread);
1496
1497			/*
1498			 * POSIX states that if the priority is being
1499			 * lowered, the thread must be inserted at the
1500			 * head of the queue for its priority if it owns
1501			 * any priority protection or inheritence mutexes.
1502			 */
1503			if ((active_prio < pthread->active_priority) &&
1504			    (pthread->priority_mutex_count > 0)) {
1505				/* Set the new active priority. */
1506				pthread->active_priority = active_prio;
1507
1508				THR_RUNQ_INSERT_HEAD(pthread);
1509			} else {
1510				/* Set the new active priority. */
1511				pthread->active_priority = active_prio;
1512
1513				THR_RUNQ_INSERT_TAIL(pthread);
1514			}
1515		}
1516		THR_SCHED_UNLOCK(curthread, pthread);
1517	}
1518}
1519
1520void
1521_mutex_unlock_private(pthread_t pthread)
1522{
1523	struct pthread_mutex	*m, *m_next;
1524
1525	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1526		m_next = TAILQ_NEXT(m, m_qe);
1527		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1528			pthread_mutex_unlock(&m);
1529	}
1530}
1531
1532/*
1533 * This is called by the current thread when it wants to back out of a
1534 * mutex_lock in order to run a signal handler.
1535 */
1536void
1537_mutex_lock_backout(struct pthread *curthread)
1538{
1539	struct pthread_mutex *m;
1540
1541	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1542		/*
1543		 * Any other thread may clear the "in sync queue flag",
1544		 * but only the current thread can clear the pointer
1545		 * to the mutex.  So if the flag is set, we can
1546		 * guarantee that the pointer to the mutex is valid.
1547		 * The only problem may be if the mutex is destroyed
1548		 * out from under us, but that should be considered
1549		 * an application bug.
1550		 */
1551		m = curthread->data.mutex;
1552
1553		/* Lock the mutex structure: */
1554		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1555
1556
1557		/*
1558		 * Check to make sure this thread doesn't already own
1559		 * the mutex.  Since mutexes are unlocked with direct
1560		 * handoffs, it is possible the previous owner gave it
1561		 * to us after we checked the sync queue flag and before
1562		 * we locked the mutex structure.
1563		 */
1564		if (m->m_owner == curthread) {
1565			THR_LOCK_RELEASE(curthread, &m->m_lock);
1566			mutex_unlock_common(&m, /* add_reference */ 0);
1567		} else {
1568			/*
1569			 * Remove ourselves from the mutex queue and
1570			 * clear the pointer to the mutex.  We may no
1571			 * longer be in the mutex queue, but the removal
1572			 * function will DTRT.
1573			 */
1574			mutex_queue_remove(m, curthread);
1575			curthread->data.mutex = NULL;
1576			THR_LOCK_RELEASE(curthread, &m->m_lock);
1577		}
1578	}
1579}
1580
1581/*
1582 * Dequeue a waiting thread from the head of a mutex queue in descending
1583 * priority order.
1584 *
1585 * In order to properly dequeue a thread from the mutex queue and
1586 * make it runnable without the possibility of errant wakeups, it
1587 * is necessary to lock the thread's scheduling queue while also
1588 * holding the mutex lock.
1589 */
1590static struct kse_mailbox *
1591mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1592{
1593	struct kse_mailbox *kmbx = NULL;
1594	struct pthread *pthread;
1595
1596	/* Keep dequeueing until we find a valid thread: */
1597	mutex->m_owner = NULL;
1598	pthread = TAILQ_FIRST(&mutex->m_queue);
1599	while (pthread != NULL) {
1600		/* Take the thread's scheduling lock: */
1601		THR_SCHED_LOCK(curthread, pthread);
1602
1603		/* Remove the thread from the mutex queue: */
1604		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1605		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1606
1607		/*
1608		 * Only exit the loop if the thread hasn't been
1609		 * cancelled.
1610		 */
1611		switch (mutex->m_protocol) {
1612		case PTHREAD_PRIO_NONE:
1613			/*
1614			 * Assign the new owner and add the mutex to the
1615			 * thread's list of owned mutexes.
1616			 */
1617			mutex->m_owner = pthread;
1618			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1619			break;
1620
1621		case PTHREAD_PRIO_INHERIT:
1622			/*
1623			 * Assign the new owner and add the mutex to the
1624			 * thread's list of owned mutexes.
1625			 */
1626			mutex->m_owner = pthread;
1627			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1628
1629			/* Track number of priority mutexes owned: */
1630			pthread->priority_mutex_count++;
1631
1632			/*
1633			 * Set the priority of the mutex.  Since our waiting
1634			 * threads are in descending priority order, the
1635			 * priority of the mutex becomes the active priority
1636			 * of the thread we just dequeued.
1637			 */
1638			mutex->m_prio = pthread->active_priority;
1639
1640			/* Save the owning threads inherited priority: */
1641			mutex->m_saved_prio = pthread->inherited_priority;
1642
1643			/*
1644			 * The owning threads inherited priority now becomes
1645			 * his active priority (the priority of the mutex).
1646			 */
1647			pthread->inherited_priority = mutex->m_prio;
1648			break;
1649
1650		case PTHREAD_PRIO_PROTECT:
1651			if (pthread->active_priority > mutex->m_prio) {
1652				/*
1653				 * Either the mutex ceiling priority has
1654				 * been lowered and/or this threads priority
1655			 	 * has been raised subsequent to the thread
1656				 * being queued on the waiting list.
1657				 */
1658				pthread->error = EINVAL;
1659			}
1660			else {
1661				/*
1662				 * Assign the new owner and add the mutex
1663				 * to the thread's list of owned mutexes.
1664				 */
1665				mutex->m_owner = pthread;
1666				TAILQ_INSERT_TAIL(&pthread->mutexq,
1667				    mutex, m_qe);
1668
1669				/* Track number of priority mutexes owned: */
1670				pthread->priority_mutex_count++;
1671
1672				/*
1673				 * Save the owning threads inherited
1674				 * priority:
1675				 */
1676				mutex->m_saved_prio =
1677				    pthread->inherited_priority;
1678
1679				/*
1680				 * The owning thread inherits the ceiling
1681				 * priority of the mutex and executes at
1682				 * that priority:
1683				 */
1684				pthread->inherited_priority = mutex->m_prio;
1685				pthread->active_priority = mutex->m_prio;
1686
1687			}
1688			break;
1689		}
1690
1691		/* Make the thread runnable and unlock the scheduling queue: */
1692		kmbx = _thr_setrunnable_unlocked(pthread);
1693
1694		/* Add a preemption point. */
1695		if ((curthread->kseg == pthread->kseg) &&
1696		    (pthread->active_priority > curthread->active_priority))
1697			curthread->critical_yield = 1;
1698
1699		THR_SCHED_UNLOCK(curthread, pthread);
1700		if (mutex->m_owner == pthread)
1701			/* We're done; a valid owner was found. */
1702			break;
1703		else
1704			/* Get the next thread from the waiting queue: */
1705			pthread = TAILQ_NEXT(pthread, sqe);
1706	}
1707
1708	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1709		/* This mutex has no priority: */
1710		mutex->m_prio = 0;
1711	return (kmbx);
1712}
1713
1714/*
1715 * Dequeue a waiting thread from the head of a mutex queue in descending
1716 * priority order.
1717 */
1718static inline pthread_t
1719mutex_queue_deq(struct pthread_mutex *mutex)
1720{
1721	pthread_t pthread;
1722
1723	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1724		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1725		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1726
1727		/*
1728		 * Only exit the loop if the thread hasn't been
1729		 * cancelled.
1730		 */
1731		if (pthread->interrupted == 0)
1732			break;
1733	}
1734
1735	return (pthread);
1736}
1737
1738/*
1739 * Remove a waiting thread from a mutex queue in descending priority order.
1740 */
1741static inline void
1742mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1743{
1744	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1745		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1746		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1747	}
1748}
1749
1750/*
1751 * Enqueue a waiting thread to a queue in descending priority order.
1752 */
1753static inline void
1754mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1755{
1756	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1757
1758	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1759	/*
1760	 * For the common case of all threads having equal priority,
1761	 * we perform a quick check against the priority of the thread
1762	 * at the tail of the queue.
1763	 */
1764	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1765		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1766	else {
1767		tid = TAILQ_FIRST(&mutex->m_queue);
1768		while (pthread->active_priority <= tid->active_priority)
1769			tid = TAILQ_NEXT(tid, sqe);
1770		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1771	}
1772	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1773}
1774