thr_mutex.c revision 120403
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 120403 2003-09-24 12:52:57Z davidxu $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67#define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
68
69/*
70 * Prototypes
71 */
72static struct kse_mailbox *mutex_handoff(struct pthread *,
73			    struct pthread_mutex *);
74static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
75static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
76static int		mutex_unlock_common(pthread_mutex_t *, int);
77static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
78static void		mutex_rescan_owned (struct pthread *, struct pthread *,
79			    struct pthread_mutex *);
80static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
81static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
82static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
83
84
85static struct pthread_mutex_attr	static_mutex_attr =
86    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
87static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
88
89/* Single underscore versions provided for libc internal usage: */
90__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
91__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
92__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
93
94/* No difference between libc and application usage of these: */
95__weak_reference(_pthread_mutex_init, pthread_mutex_init);
96__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
97__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
98
99
100
101int
102_pthread_mutex_init(pthread_mutex_t *mutex,
103    const pthread_mutexattr_t *mutex_attr)
104{
105	struct pthread_mutex *pmutex;
106	enum pthread_mutextype type;
107	int		protocol;
108	int		ceiling;
109	int		flags;
110	int		ret = 0;
111
112	if (mutex == NULL)
113		ret = EINVAL;
114
115	/* Check if default mutex attributes: */
116	else if (mutex_attr == NULL || *mutex_attr == NULL) {
117		/* Default to a (error checking) POSIX mutex: */
118		type = PTHREAD_MUTEX_ERRORCHECK;
119		protocol = PTHREAD_PRIO_NONE;
120		ceiling = THR_MAX_PRIORITY;
121		flags = 0;
122	}
123
124	/* Check mutex type: */
125	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
126	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
127		/* Return an invalid argument error: */
128		ret = EINVAL;
129
130	/* Check mutex protocol: */
131	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
132	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
133		/* Return an invalid argument error: */
134		ret = EINVAL;
135
136	else {
137		/* Use the requested mutex type and protocol: */
138		type = (*mutex_attr)->m_type;
139		protocol = (*mutex_attr)->m_protocol;
140		ceiling = (*mutex_attr)->m_ceiling;
141		flags = (*mutex_attr)->m_flags;
142	}
143
144	/* Check no errors so far: */
145	if (ret == 0) {
146		if ((pmutex = (pthread_mutex_t)
147		    malloc(sizeof(struct pthread_mutex))) == NULL)
148			ret = ENOMEM;
149		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
150		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
151			free(pmutex);
152			*mutex = NULL;
153			ret = ENOMEM;
154		} else {
155			/* Set the mutex flags: */
156			pmutex->m_flags = flags;
157
158			/* Process according to mutex type: */
159			switch (type) {
160			/* case PTHREAD_MUTEX_DEFAULT: */
161			case PTHREAD_MUTEX_ERRORCHECK:
162			case PTHREAD_MUTEX_NORMAL:
163				/* Nothing to do here. */
164				break;
165
166			/* Single UNIX Spec 2 recursive mutex: */
167			case PTHREAD_MUTEX_RECURSIVE:
168				/* Reset the mutex count: */
169				pmutex->m_count = 0;
170				break;
171
172			/* Trap invalid mutex types: */
173			default:
174				/* Return an invalid argument error: */
175				ret = EINVAL;
176				break;
177			}
178			if (ret == 0) {
179				/* Initialise the rest of the mutex: */
180				TAILQ_INIT(&pmutex->m_queue);
181				pmutex->m_flags |= MUTEX_FLAGS_INITED;
182				pmutex->m_owner = NULL;
183				pmutex->m_type = type;
184				pmutex->m_protocol = protocol;
185				pmutex->m_refcount = 0;
186				if (protocol == PTHREAD_PRIO_PROTECT)
187					pmutex->m_prio = ceiling;
188				else
189					pmutex->m_prio = -1;
190				pmutex->m_saved_prio = 0;
191				MUTEX_INIT_LINK(pmutex);
192				*mutex = pmutex;
193			} else {
194				/* Free the mutex lock structure: */
195				_lock_destroy(&pmutex->m_lock);
196				free(pmutex);
197				*mutex = NULL;
198			}
199		}
200	}
201	/* Return the completion status: */
202	return (ret);
203}
204
205int
206_pthread_mutex_destroy(pthread_mutex_t *mutex)
207{
208	struct pthread	*curthread = _get_curthread();
209	pthread_mutex_t m;
210	int ret = 0;
211
212	if (mutex == NULL || *mutex == NULL)
213		ret = EINVAL;
214	else {
215		/* Lock the mutex structure: */
216		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
217
218		/*
219		 * Check to see if this mutex is in use:
220		 */
221		if (((*mutex)->m_owner != NULL) ||
222		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
223		    ((*mutex)->m_refcount != 0)) {
224			ret = EBUSY;
225
226			/* Unlock the mutex structure: */
227			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
228		} else {
229			/*
230			 * Save a pointer to the mutex so it can be free'd
231			 * and set the caller's pointer to NULL:
232			 */
233			m = *mutex;
234			*mutex = NULL;
235
236			/* Unlock the mutex structure: */
237			THR_LOCK_RELEASE(curthread, &m->m_lock);
238
239			/*
240			 * Free the memory allocated for the mutex
241			 * structure:
242			 */
243			MUTEX_ASSERT_NOT_OWNED(m);
244
245			/* Free the mutex lock structure: */
246			_lock_destroy(&m->m_lock);
247
248			free(m);
249		}
250	}
251
252	/* Return the completion status: */
253	return (ret);
254}
255
256static int
257init_static(struct pthread *thread, pthread_mutex_t *mutex)
258{
259	int ret;
260
261	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
262
263	if (*mutex == NULL)
264		ret = pthread_mutex_init(mutex, NULL);
265	else
266		ret = 0;
267
268	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
269
270	return (ret);
271}
272
273static int
274init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
275{
276	int ret;
277
278	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
279
280	if (*mutex == NULL)
281		ret = pthread_mutex_init(mutex, &static_mattr);
282	else
283		ret = 0;
284
285	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
286
287	return (ret);
288}
289
290static int
291mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
292{
293	int ret = 0;
294
295	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
296	    "Uninitialized mutex in pthread_mutex_trylock_basic");
297
298	/* Lock the mutex structure: */
299	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
300
301	/*
302	 * If the mutex was statically allocated, properly
303	 * initialize the tail queue.
304	 */
305	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
306		TAILQ_INIT(&(*mutex)->m_queue);
307		MUTEX_INIT_LINK(*mutex);
308		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
309	}
310
311	/* Process according to mutex type: */
312	switch ((*mutex)->m_protocol) {
313	/* Default POSIX mutex: */
314	case PTHREAD_PRIO_NONE:
315		/* Check if this mutex is not locked: */
316		if ((*mutex)->m_owner == NULL) {
317			/* Lock the mutex for the running thread: */
318			(*mutex)->m_owner = curthread;
319
320			/* Add to the list of owned mutexes: */
321			MUTEX_ASSERT_NOT_OWNED(*mutex);
322			TAILQ_INSERT_TAIL(&curthread->mutexq,
323			    (*mutex), m_qe);
324		} else if ((*mutex)->m_owner == curthread)
325			ret = mutex_self_trylock(curthread, *mutex);
326		else
327			/* Return a busy error: */
328			ret = EBUSY;
329		break;
330
331	/* POSIX priority inheritence mutex: */
332	case PTHREAD_PRIO_INHERIT:
333		/* Check if this mutex is not locked: */
334		if ((*mutex)->m_owner == NULL) {
335			/* Lock the mutex for the running thread: */
336			(*mutex)->m_owner = curthread;
337
338			THR_SCHED_LOCK(curthread, curthread);
339			/* Track number of priority mutexes owned: */
340			curthread->priority_mutex_count++;
341
342			/*
343			 * The mutex takes on the attributes of the
344			 * running thread when there are no waiters.
345			 */
346			(*mutex)->m_prio = curthread->active_priority;
347			(*mutex)->m_saved_prio =
348			    curthread->inherited_priority;
349			curthread->inherited_priority = (*mutex)->m_prio;
350			THR_SCHED_UNLOCK(curthread, curthread);
351
352			/* Add to the list of owned mutexes: */
353			MUTEX_ASSERT_NOT_OWNED(*mutex);
354			TAILQ_INSERT_TAIL(&curthread->mutexq,
355			    (*mutex), m_qe);
356		} else if ((*mutex)->m_owner == curthread)
357			ret = mutex_self_trylock(curthread, *mutex);
358		else
359			/* Return a busy error: */
360			ret = EBUSY;
361		break;
362
363	/* POSIX priority protection mutex: */
364	case PTHREAD_PRIO_PROTECT:
365		/* Check for a priority ceiling violation: */
366		if (curthread->active_priority > (*mutex)->m_prio)
367			ret = EINVAL;
368
369		/* Check if this mutex is not locked: */
370		else if ((*mutex)->m_owner == NULL) {
371			/* Lock the mutex for the running thread: */
372			(*mutex)->m_owner = curthread;
373
374			THR_SCHED_LOCK(curthread, curthread);
375			/* Track number of priority mutexes owned: */
376			curthread->priority_mutex_count++;
377
378			/*
379			 * The running thread inherits the ceiling
380			 * priority of the mutex and executes at that
381			 * priority.
382			 */
383			curthread->active_priority = (*mutex)->m_prio;
384			(*mutex)->m_saved_prio =
385			    curthread->inherited_priority;
386			curthread->inherited_priority =
387			    (*mutex)->m_prio;
388			THR_SCHED_UNLOCK(curthread, curthread);
389			/* Add to the list of owned mutexes: */
390			MUTEX_ASSERT_NOT_OWNED(*mutex);
391			TAILQ_INSERT_TAIL(&curthread->mutexq,
392			    (*mutex), m_qe);
393		} else if ((*mutex)->m_owner == curthread)
394			ret = mutex_self_trylock(curthread, *mutex);
395		else
396			/* Return a busy error: */
397			ret = EBUSY;
398		break;
399
400	/* Trap invalid mutex types: */
401	default:
402		/* Return an invalid argument error: */
403		ret = EINVAL;
404		break;
405	}
406
407	/* Unlock the mutex structure: */
408	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
409
410	/* Return the completion status: */
411	return (ret);
412}
413
414int
415__pthread_mutex_trylock(pthread_mutex_t *mutex)
416{
417	struct pthread *curthread = _get_curthread();
418	int ret = 0;
419
420	if (mutex == NULL)
421		ret = EINVAL;
422
423	/*
424	 * If the mutex is statically initialized, perform the dynamic
425	 * initialization:
426	 */
427	else if ((*mutex != NULL) ||
428	    ((ret = init_static(curthread, mutex)) == 0))
429		ret = mutex_trylock_common(curthread, mutex);
430
431	return (ret);
432}
433
434int
435_pthread_mutex_trylock(pthread_mutex_t *mutex)
436{
437	struct pthread	*curthread = _get_curthread();
438	int	ret = 0;
439
440	if (mutex == NULL)
441		ret = EINVAL;
442
443	/*
444	 * If the mutex is statically initialized, perform the dynamic
445	 * initialization marking the mutex private (delete safe):
446	 */
447	else if ((*mutex != NULL) ||
448	    ((ret = init_static_private(curthread, mutex)) == 0))
449		ret = mutex_trylock_common(curthread, mutex);
450
451	return (ret);
452}
453
454static int
455mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
456	const struct timespec * abstime)
457{
458	int	ret = 0;
459
460	THR_ASSERT((m != NULL) && (*m != NULL),
461	    "Uninitialized mutex in pthread_mutex_trylock_basic");
462
463	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
464	    abstime->tv_nsec >= 1000000000))
465		return (EINVAL);
466
467	/* Reset the interrupted flag: */
468	curthread->interrupted = 0;
469	curthread->timeout = 0;
470	curthread->wakeup_time.tv_sec = -1;
471
472	/*
473	 * Enter a loop waiting to become the mutex owner.  We need a
474	 * loop in case the waiting thread is interrupted by a signal
475	 * to execute a signal handler.  It is not (currently) possible
476	 * to remain in the waiting queue while running a handler.
477	 * Instead, the thread is interrupted and backed out of the
478	 * waiting queue prior to executing the signal handler.
479	 */
480	do {
481		/* Lock the mutex structure: */
482		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
483
484		/*
485		 * If the mutex was statically allocated, properly
486		 * initialize the tail queue.
487		 */
488		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
489			TAILQ_INIT(&(*m)->m_queue);
490			(*m)->m_flags |= MUTEX_FLAGS_INITED;
491			MUTEX_INIT_LINK(*m);
492		}
493
494		/* Process according to mutex type: */
495		switch ((*m)->m_protocol) {
496		/* Default POSIX mutex: */
497		case PTHREAD_PRIO_NONE:
498			if ((*m)->m_owner == NULL) {
499				/* Lock the mutex for this thread: */
500				(*m)->m_owner = curthread;
501
502				/* Add to the list of owned mutexes: */
503				MUTEX_ASSERT_NOT_OWNED(*m);
504				TAILQ_INSERT_TAIL(&curthread->mutexq,
505				    (*m), m_qe);
506
507				/* Unlock the mutex structure: */
508				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
509			} else if ((*m)->m_owner == curthread) {
510				ret = mutex_self_lock(curthread, *m);
511
512				/* Unlock the mutex structure: */
513				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
514			} else {
515				/* Set the wakeup time: */
516				if (abstime) {
517					curthread->wakeup_time.tv_sec =
518						abstime->tv_sec;
519					curthread->wakeup_time.tv_nsec =
520						abstime->tv_nsec;
521				}
522
523				/*
524				 * Join the queue of threads waiting to lock
525				 * the mutex and save a pointer to the mutex.
526				 */
527				mutex_queue_enq(*m, curthread);
528				curthread->data.mutex = *m;
529				/*
530				 * This thread is active and is in a critical
531				 * region (holding the mutex lock); we should
532				 * be able to safely set the state.
533				 */
534				THR_SCHED_LOCK(curthread, curthread);
535				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
536				THR_SCHED_UNLOCK(curthread, curthread);
537
538				/* Unlock the mutex structure: */
539				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
540
541				/* Schedule the next thread: */
542				_thr_sched_switch(curthread);
543
544				curthread->data.mutex = NULL;
545				if (THR_IN_MUTEXQ(curthread)) {
546					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
547					mutex_queue_remove(*m, curthread);
548					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
549				}
550			}
551			break;
552
553		/* POSIX priority inheritence mutex: */
554		case PTHREAD_PRIO_INHERIT:
555			/* Check if this mutex is not locked: */
556			if ((*m)->m_owner == NULL) {
557				/* Lock the mutex for this thread: */
558				(*m)->m_owner = curthread;
559
560				THR_SCHED_LOCK(curthread, curthread);
561				/* Track number of priority mutexes owned: */
562				curthread->priority_mutex_count++;
563
564				/*
565				 * The mutex takes on attributes of the
566				 * running thread when there are no waiters.
567				 * Make sure the thread's scheduling lock is
568				 * held while priorities are adjusted.
569				 */
570				(*m)->m_prio = curthread->active_priority;
571				(*m)->m_saved_prio =
572				    curthread->inherited_priority;
573				curthread->inherited_priority = (*m)->m_prio;
574				THR_SCHED_UNLOCK(curthread, curthread);
575
576				/* Add to the list of owned mutexes: */
577				MUTEX_ASSERT_NOT_OWNED(*m);
578				TAILQ_INSERT_TAIL(&curthread->mutexq,
579				    (*m), m_qe);
580
581				/* Unlock the mutex structure: */
582				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
583			} else if ((*m)->m_owner == curthread) {
584				ret = mutex_self_lock(curthread, *m);
585
586				/* Unlock the mutex structure: */
587				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
588			} else {
589				/* Set the wakeup time: */
590				if (abstime) {
591					curthread->wakeup_time.tv_sec =
592						abstime->tv_sec;
593					curthread->wakeup_time.tv_nsec =
594						abstime->tv_nsec;
595				}
596
597				/*
598				 * Join the queue of threads waiting to lock
599				 * the mutex and save a pointer to the mutex.
600				 */
601				mutex_queue_enq(*m, curthread);
602				curthread->data.mutex = *m;
603
604				/*
605				 * This thread is active and is in a critical
606				 * region (holding the mutex lock); we should
607				 * be able to safely set the state.
608				 */
609				if (curthread->active_priority > (*m)->m_prio)
610					/* Adjust priorities: */
611					mutex_priority_adjust(curthread, *m);
612
613				THR_SCHED_LOCK(curthread, curthread);
614				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
615				THR_SCHED_UNLOCK(curthread, curthread);
616
617				/* Unlock the mutex structure: */
618				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
619
620				/* Schedule the next thread: */
621				_thr_sched_switch(curthread);
622
623				curthread->data.mutex = NULL;
624				if (THR_IN_MUTEXQ(curthread)) {
625					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
626					mutex_queue_remove(*m, curthread);
627					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
628				}
629			}
630			break;
631
632		/* POSIX priority protection mutex: */
633		case PTHREAD_PRIO_PROTECT:
634			/* Check for a priority ceiling violation: */
635			if (curthread->active_priority > (*m)->m_prio) {
636				/* Unlock the mutex structure: */
637				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
638				ret = EINVAL;
639			}
640			/* Check if this mutex is not locked: */
641			else if ((*m)->m_owner == NULL) {
642				/*
643				 * Lock the mutex for the running
644				 * thread:
645				 */
646				(*m)->m_owner = curthread;
647
648				THR_SCHED_LOCK(curthread, curthread);
649				/* Track number of priority mutexes owned: */
650				curthread->priority_mutex_count++;
651
652				/*
653				 * The running thread inherits the ceiling
654				 * priority of the mutex and executes at that
655				 * priority.  Make sure the thread's
656				 * scheduling lock is held while priorities
657				 * are adjusted.
658				 */
659				curthread->active_priority = (*m)->m_prio;
660				(*m)->m_saved_prio =
661				    curthread->inherited_priority;
662				curthread->inherited_priority = (*m)->m_prio;
663				THR_SCHED_UNLOCK(curthread, curthread);
664
665				/* Add to the list of owned mutexes: */
666				MUTEX_ASSERT_NOT_OWNED(*m);
667				TAILQ_INSERT_TAIL(&curthread->mutexq,
668				    (*m), m_qe);
669
670				/* Unlock the mutex structure: */
671				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
672			} else if ((*m)->m_owner == curthread) {
673				ret = mutex_self_lock(curthread, *m);
674
675				/* Unlock the mutex structure: */
676				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
677			} else {
678				/* Set the wakeup time: */
679				if (abstime) {
680					curthread->wakeup_time.tv_sec =
681						abstime->tv_sec;
682					curthread->wakeup_time.tv_nsec =
683						abstime->tv_nsec;
684				}
685
686				/*
687				 * Join the queue of threads waiting to lock
688				 * the mutex and save a pointer to the mutex.
689				 */
690				mutex_queue_enq(*m, curthread);
691				curthread->data.mutex = *m;
692
693				/* Clear any previous error: */
694				curthread->error = 0;
695
696				/*
697				 * This thread is active and is in a critical
698				 * region (holding the mutex lock); we should
699				 * be able to safely set the state.
700				 */
701
702				THR_SCHED_LOCK(curthread, curthread);
703				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
704				THR_SCHED_UNLOCK(curthread, curthread);
705
706				/* Unlock the mutex structure: */
707				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
708
709				/* Schedule the next thread: */
710				_thr_sched_switch(curthread);
711
712				curthread->data.mutex = NULL;
713				if (THR_IN_MUTEXQ(curthread)) {
714					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
715					mutex_queue_remove(*m, curthread);
716					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
717				}
718
719				/*
720				 * The threads priority may have changed while
721				 * waiting for the mutex causing a ceiling
722				 * violation.
723				 */
724				ret = curthread->error;
725				curthread->error = 0;
726			}
727			break;
728
729		/* Trap invalid mutex types: */
730		default:
731			/* Unlock the mutex structure: */
732			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
733
734			/* Return an invalid argument error: */
735			ret = EINVAL;
736			break;
737		}
738
739	} while (((*m)->m_owner != curthread) && (ret == 0) &&
740	    (curthread->interrupted == 0) && (curthread->timeout == 0));
741
742	if (ret == 0 && curthread->timeout)
743		ret = ETIMEDOUT;
744
745	/*
746	 * Check to see if this thread was interrupted and
747	 * is still in the mutex queue of waiting threads:
748	 */
749	if (curthread->interrupted != 0) {
750		/* Remove this thread from the mutex queue. */
751		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
752		if (THR_IN_SYNCQ(curthread))
753			mutex_queue_remove(*m, curthread);
754		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
755
756		/* Check for asynchronous cancellation. */
757		if (curthread->continuation != NULL)
758			curthread->continuation((void *) curthread);
759	}
760
761	/* Return the completion status: */
762	return (ret);
763}
764
765int
766__pthread_mutex_lock(pthread_mutex_t *m)
767{
768	struct pthread *curthread;
769	int	ret = 0;
770
771	if (_thr_initial == NULL)
772		_libpthread_init(NULL);
773
774	curthread = _get_curthread();
775	if (m == NULL)
776		ret = EINVAL;
777
778	/*
779	 * If the mutex is statically initialized, perform the dynamic
780	 * initialization:
781	 */
782	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
783		ret = mutex_lock_common(curthread, m, NULL);
784
785	return (ret);
786}
787
788__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
789
790int
791_pthread_mutex_lock(pthread_mutex_t *m)
792{
793	struct pthread *curthread;
794	int	ret = 0;
795
796	if (_thr_initial == NULL)
797		_libpthread_init(NULL);
798	curthread = _get_curthread();
799
800	if (m == NULL)
801		ret = EINVAL;
802
803	/*
804	 * If the mutex is statically initialized, perform the dynamic
805	 * initialization marking it private (delete safe):
806	 */
807	else if ((*m != NULL) ||
808	    ((ret = init_static_private(curthread, m)) == 0))
809		ret = mutex_lock_common(curthread, m, NULL);
810
811	return (ret);
812}
813
814int
815__pthread_mutex_timedlock(pthread_mutex_t *m,
816	const struct timespec *abs_timeout)
817{
818	struct pthread *curthread;
819	int	ret = 0;
820
821	if (_thr_initial == NULL)
822		_libpthread_init(NULL);
823
824	curthread = _get_curthread();
825	if (m == NULL)
826		ret = EINVAL;
827
828	/*
829	 * If the mutex is statically initialized, perform the dynamic
830	 * initialization:
831	 */
832	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
833		ret = mutex_lock_common(curthread, m, abs_timeout);
834
835	return (ret);
836}
837
838int
839_pthread_mutex_timedlock(pthread_mutex_t *m,
840	const struct timespec *abs_timeout)
841{
842	struct pthread *curthread;
843	int	ret = 0;
844
845	if (_thr_initial == NULL)
846		_libpthread_init(NULL);
847	curthread = _get_curthread();
848
849	if (m == NULL)
850		ret = EINVAL;
851
852	/*
853	 * If the mutex is statically initialized, perform the dynamic
854	 * initialization marking it private (delete safe):
855	 */
856	else if ((*m != NULL) ||
857	    ((ret = init_static_private(curthread, m)) == 0))
858		ret = mutex_lock_common(curthread, m, abs_timeout);
859
860	return (ret);
861}
862
863int
864_pthread_mutex_unlock(pthread_mutex_t *m)
865{
866	return (mutex_unlock_common(m, /* add reference */ 0));
867}
868
869__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
870
871int
872_mutex_cv_unlock(pthread_mutex_t *m)
873{
874	return (mutex_unlock_common(m, /* add reference */ 1));
875}
876
877int
878_mutex_cv_lock(pthread_mutex_t *m)
879{
880	struct  pthread *curthread;
881	int	ret;
882
883	curthread = _get_curthread();
884	if ((ret = _pthread_mutex_lock(m)) == 0) {
885		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
886		(*m)->m_refcount--;
887		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
888	}
889	return (ret);
890}
891
892static inline int
893mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
894{
895	int	ret = 0;
896
897	switch (m->m_type) {
898	/* case PTHREAD_MUTEX_DEFAULT: */
899	case PTHREAD_MUTEX_ERRORCHECK:
900	case PTHREAD_MUTEX_NORMAL:
901		/*
902		 * POSIX specifies that mutexes should return EDEADLK if a
903		 * recursive lock is detected.
904		 */
905		if (m->m_owner == curthread)
906			ret = EDEADLK;
907		else
908			ret = EBUSY;
909		break;
910
911	case PTHREAD_MUTEX_RECURSIVE:
912		/* Increment the lock count: */
913		m->m_count++;
914		break;
915
916	default:
917		/* Trap invalid mutex types; */
918		ret = EINVAL;
919	}
920
921	return (ret);
922}
923
924static inline int
925mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
926{
927	int ret = 0;
928
929	switch (m->m_type) {
930	/* case PTHREAD_MUTEX_DEFAULT: */
931	case PTHREAD_MUTEX_ERRORCHECK:
932		/*
933		 * POSIX specifies that mutexes should return EDEADLK if a
934		 * recursive lock is detected.
935		 */
936		ret = EDEADLK;
937		break;
938
939	case PTHREAD_MUTEX_NORMAL:
940		/*
941		 * What SS2 define as a 'normal' mutex.  Intentionally
942		 * deadlock on attempts to get a lock you already own.
943		 */
944
945		THR_SCHED_LOCK(curthread, curthread);
946		THR_SET_STATE(curthread, PS_DEADLOCK);
947		THR_SCHED_UNLOCK(curthread, curthread);
948
949		/* Unlock the mutex structure: */
950		THR_LOCK_RELEASE(curthread, &m->m_lock);
951
952		/* Schedule the next thread: */
953		_thr_sched_switch(curthread);
954		break;
955
956	case PTHREAD_MUTEX_RECURSIVE:
957		/* Increment the lock count: */
958		m->m_count++;
959		break;
960
961	default:
962		/* Trap invalid mutex types; */
963		ret = EINVAL;
964	}
965
966	return (ret);
967}
968
969static int
970mutex_unlock_common(pthread_mutex_t *m, int add_reference)
971{
972	struct pthread *curthread = _get_curthread();
973	struct kse_mailbox *kmbx = NULL;
974	int ret = 0;
975
976	if (m == NULL || *m == NULL)
977		ret = EINVAL;
978	else {
979		/* Lock the mutex structure: */
980		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
981
982		/* Process according to mutex type: */
983		switch ((*m)->m_protocol) {
984		/* Default POSIX mutex: */
985		case PTHREAD_PRIO_NONE:
986			/*
987			 * Check if the running thread is not the owner of the
988			 * mutex:
989			 */
990			if ((*m)->m_owner != curthread)
991				/*
992				 * Return an invalid argument error for no
993				 * owner and a permission error otherwise:
994				 */
995				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
996
997			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
998			    ((*m)->m_count > 0))
999				/* Decrement the count: */
1000				(*m)->m_count--;
1001			else {
1002				/*
1003				 * Clear the count in case this is a recursive
1004				 * mutex.
1005				 */
1006				(*m)->m_count = 0;
1007
1008				/* Remove the mutex from the threads queue. */
1009				MUTEX_ASSERT_IS_OWNED(*m);
1010				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1011				    (*m), m_qe);
1012				MUTEX_INIT_LINK(*m);
1013
1014				/*
1015				 * Hand off the mutex to the next waiting
1016				 * thread:
1017				 */
1018				kmbx = mutex_handoff(curthread, *m);
1019			}
1020			break;
1021
1022		/* POSIX priority inheritence mutex: */
1023		case PTHREAD_PRIO_INHERIT:
1024			/*
1025			 * Check if the running thread is not the owner of the
1026			 * mutex:
1027			 */
1028			if ((*m)->m_owner != curthread)
1029				/*
1030				 * Return an invalid argument error for no
1031				 * owner and a permission error otherwise:
1032				 */
1033				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
1034
1035			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1036			    ((*m)->m_count > 0))
1037				/* Decrement the count: */
1038				(*m)->m_count--;
1039			else {
1040				/*
1041				 * Clear the count in case this is recursive
1042				 * mutex.
1043				 */
1044				(*m)->m_count = 0;
1045
1046				/*
1047				 * Restore the threads inherited priority and
1048				 * recompute the active priority (being careful
1049				 * not to override changes in the threads base
1050				 * priority subsequent to locking the mutex).
1051				 */
1052				THR_SCHED_LOCK(curthread, curthread);
1053				curthread->inherited_priority =
1054					(*m)->m_saved_prio;
1055				curthread->active_priority =
1056				    MAX(curthread->inherited_priority,
1057				    curthread->base_priority);
1058
1059				/*
1060				 * This thread now owns one less priority mutex.
1061				 */
1062				curthread->priority_mutex_count--;
1063				THR_SCHED_UNLOCK(curthread, curthread);
1064
1065				/* Remove the mutex from the threads queue. */
1066				MUTEX_ASSERT_IS_OWNED(*m);
1067				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1068				    (*m), m_qe);
1069				MUTEX_INIT_LINK(*m);
1070
1071				/*
1072				 * Hand off the mutex to the next waiting
1073				 * thread:
1074				 */
1075				kmbx = mutex_handoff(curthread, *m);
1076			}
1077			break;
1078
1079		/* POSIX priority ceiling mutex: */
1080		case PTHREAD_PRIO_PROTECT:
1081			/*
1082			 * Check if the running thread is not the owner of the
1083			 * mutex:
1084			 */
1085			if ((*m)->m_owner != curthread)
1086				/*
1087				 * Return an invalid argument error for no
1088				 * owner and a permission error otherwise:
1089				 */
1090				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
1091
1092			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1093			    ((*m)->m_count > 0))
1094				/* Decrement the count: */
1095				(*m)->m_count--;
1096			else {
1097				/*
1098				 * Clear the count in case this is a recursive
1099				 * mutex.
1100				 */
1101				(*m)->m_count = 0;
1102
1103				/*
1104				 * Restore the threads inherited priority and
1105				 * recompute the active priority (being careful
1106				 * not to override changes in the threads base
1107				 * priority subsequent to locking the mutex).
1108				 */
1109				THR_SCHED_LOCK(curthread, curthread);
1110				curthread->inherited_priority =
1111					(*m)->m_saved_prio;
1112				curthread->active_priority =
1113				    MAX(curthread->inherited_priority,
1114				    curthread->base_priority);
1115
1116				/*
1117				 * This thread now owns one less priority mutex.
1118				 */
1119				curthread->priority_mutex_count--;
1120				THR_SCHED_UNLOCK(curthread, curthread);
1121
1122				/* Remove the mutex from the threads queue. */
1123				MUTEX_ASSERT_IS_OWNED(*m);
1124				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1125				    (*m), m_qe);
1126				MUTEX_INIT_LINK(*m);
1127
1128				/*
1129				 * Hand off the mutex to the next waiting
1130				 * thread:
1131				 */
1132				kmbx = mutex_handoff(curthread, *m);
1133			}
1134			break;
1135
1136		/* Trap invalid mutex types: */
1137		default:
1138			/* Return an invalid argument error: */
1139			ret = EINVAL;
1140			break;
1141		}
1142
1143		if ((ret == 0) && (add_reference != 0))
1144			/* Increment the reference count: */
1145			(*m)->m_refcount++;
1146
1147		/* Unlock the mutex structure: */
1148		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1149		if (kmbx != NULL)
1150			kse_wakeup(kmbx);
1151	}
1152
1153	/* Return the completion status: */
1154	return (ret);
1155}
1156
1157
1158/*
1159 * This function is called when a change in base priority occurs for
1160 * a thread that is holding or waiting for a priority protection or
1161 * inheritence mutex.  A change in a threads base priority can effect
1162 * changes to active priorities of other threads and to the ordering
1163 * of mutex locking by waiting threads.
1164 *
1165 * This must be called without the target thread's scheduling lock held.
1166 */
1167void
1168_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1169    int propagate_prio)
1170{
1171	struct pthread_mutex *m;
1172
1173	/* Adjust the priorites of any owned priority mutexes: */
1174	if (pthread->priority_mutex_count > 0) {
1175		/*
1176		 * Rescan the mutexes owned by this thread and correct
1177		 * their priorities to account for this threads change
1178		 * in priority.  This has the side effect of changing
1179		 * the threads active priority.
1180		 *
1181		 * Be sure to lock the first mutex in the list of owned
1182		 * mutexes.  This acts as a barrier against another
1183		 * simultaneous call to change the threads priority
1184		 * and from the owning thread releasing the mutex.
1185		 */
1186		m = TAILQ_FIRST(&pthread->mutexq);
1187		if (m != NULL) {
1188			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1189			/*
1190			 * Make sure the thread still owns the lock.
1191			 */
1192			if (m == TAILQ_FIRST(&pthread->mutexq))
1193				mutex_rescan_owned(curthread, pthread,
1194				    /* rescan all owned */ NULL);
1195			THR_LOCK_RELEASE(curthread, &m->m_lock);
1196		}
1197	}
1198
1199	/*
1200	 * If this thread is waiting on a priority inheritence mutex,
1201	 * check for priority adjustments.  A change in priority can
1202	 * also cause a ceiling violation(*) for a thread waiting on
1203	 * a priority protection mutex; we don't perform the check here
1204	 * as it is done in pthread_mutex_unlock.
1205	 *
1206	 * (*) It should be noted that a priority change to a thread
1207	 *     _after_ taking and owning a priority ceiling mutex
1208	 *     does not affect ownership of that mutex; the ceiling
1209	 *     priority is only checked before mutex ownership occurs.
1210	 */
1211	if (propagate_prio != 0) {
1212		/*
1213		 * Lock the thread's scheduling queue.  This is a bit
1214		 * convoluted; the "in synchronization queue flag" can
1215		 * only be cleared with both the thread's scheduling and
1216		 * mutex locks held.  The thread's pointer to the wanted
1217		 * mutex is guaranteed to be valid during this time.
1218		 */
1219		THR_SCHED_LOCK(curthread, pthread);
1220
1221		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1222		    ((m = pthread->data.mutex) == NULL))
1223			THR_SCHED_UNLOCK(curthread, pthread);
1224		else {
1225			/*
1226			 * This thread is currently waiting on a mutex; unlock
1227			 * the scheduling queue lock and lock the mutex.  We
1228			 * can't hold both at the same time because the locking
1229			 * order could cause a deadlock.
1230			 */
1231			THR_SCHED_UNLOCK(curthread, pthread);
1232			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1233
1234			/*
1235			 * Check to make sure this thread is still in the
1236			 * same state (the lock above can yield the CPU to
1237			 * another thread or the thread may be running on
1238			 * another CPU).
1239			 */
1240			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1241			    (pthread->data.mutex == m)) {
1242				/*
1243				 * Remove and reinsert this thread into
1244				 * the list of waiting threads to preserve
1245				 * decreasing priority order.
1246				 */
1247				mutex_queue_remove(m, pthread);
1248				mutex_queue_enq(m, pthread);
1249
1250				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1251					/* Adjust priorities: */
1252					mutex_priority_adjust(curthread, m);
1253			}
1254
1255			/* Unlock the mutex structure: */
1256			THR_LOCK_RELEASE(curthread, &m->m_lock);
1257		}
1258	}
1259}
1260
1261/*
1262 * Called when a new thread is added to the mutex waiting queue or
1263 * when a threads priority changes that is already in the mutex
1264 * waiting queue.
1265 *
1266 * This must be called with the mutex locked by the current thread.
1267 */
1268static void
1269mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1270{
1271	pthread_mutex_t	m = mutex;
1272	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1273	int		done, temp_prio;
1274
1275	/*
1276	 * Calculate the mutex priority as the maximum of the highest
1277	 * active priority of any waiting threads and the owning threads
1278	 * active priority(*).
1279	 *
1280	 * (*) Because the owning threads current active priority may
1281	 *     reflect priority inherited from this mutex (and the mutex
1282	 *     priority may have changed) we must recalculate the active
1283	 *     priority based on the threads saved inherited priority
1284	 *     and its base priority.
1285	 */
1286	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1287	temp_prio = MAX(pthread_next->active_priority,
1288	    MAX(m->m_saved_prio, pthread->base_priority));
1289
1290	/* See if this mutex really needs adjusting: */
1291	if (temp_prio == m->m_prio)
1292		/* No need to propagate the priority: */
1293		return;
1294
1295	/* Set new priority of the mutex: */
1296	m->m_prio = temp_prio;
1297
1298	/*
1299	 * Don't unlock the mutex passed in as an argument.  It is
1300	 * expected to be locked and unlocked by the caller.
1301	 */
1302	done = 1;
1303	do {
1304		/*
1305		 * Save the threads priority before rescanning the
1306		 * owned mutexes:
1307		 */
1308		temp_prio = pthread->active_priority;
1309
1310		/*
1311		 * Fix the priorities for all mutexes held by the owning
1312		 * thread since taking this mutex.  This also has a
1313		 * potential side-effect of changing the threads priority.
1314		 *
1315		 * At this point the mutex is locked by the current thread.
1316		 * The owning thread can't release the mutex until it is
1317		 * unlocked, so we should be able to safely walk its list
1318		 * of owned mutexes.
1319		 */
1320		mutex_rescan_owned(curthread, pthread, m);
1321
1322		/*
1323		 * If this isn't the first time through the loop,
1324		 * the current mutex needs to be unlocked.
1325		 */
1326		if (done == 0)
1327			THR_LOCK_RELEASE(curthread, &m->m_lock);
1328
1329		/* Assume we're done unless told otherwise: */
1330		done = 1;
1331
1332		/*
1333		 * If the thread is currently waiting on a mutex, check
1334		 * to see if the threads new priority has affected the
1335		 * priority of the mutex.
1336		 */
1337		if ((temp_prio != pthread->active_priority) &&
1338		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1339		    ((m = pthread->data.mutex) != NULL) &&
1340		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1341			/* Lock the mutex structure: */
1342			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1343
1344			/*
1345			 * Make sure the thread is still waiting on the
1346			 * mutex:
1347			 */
1348			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1349			    (m == pthread->data.mutex)) {
1350				/*
1351				 * The priority for this thread has changed.
1352				 * Remove and reinsert this thread into the
1353				 * list of waiting threads to preserve
1354				 * decreasing priority order.
1355				 */
1356				mutex_queue_remove(m, pthread);
1357				mutex_queue_enq(m, pthread);
1358
1359				/*
1360				 * Grab the waiting thread with highest
1361				 * priority:
1362				 */
1363				pthread_next = TAILQ_FIRST(&m->m_queue);
1364
1365				/*
1366				 * Calculate the mutex priority as the maximum
1367				 * of the highest active priority of any
1368				 * waiting threads and the owning threads
1369				 * active priority.
1370				 */
1371				temp_prio = MAX(pthread_next->active_priority,
1372				    MAX(m->m_saved_prio,
1373				    m->m_owner->base_priority));
1374
1375				if (temp_prio != m->m_prio) {
1376					/*
1377					 * The priority needs to be propagated
1378					 * to the mutex this thread is waiting
1379					 * on and up to the owner of that mutex.
1380					 */
1381					m->m_prio = temp_prio;
1382					pthread = m->m_owner;
1383
1384					/* We're not done yet: */
1385					done = 0;
1386				}
1387			}
1388			/* Only release the mutex if we're done: */
1389			if (done != 0)
1390				THR_LOCK_RELEASE(curthread, &m->m_lock);
1391		}
1392	} while (done == 0);
1393}
1394
1395static void
1396mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1397    struct pthread_mutex *mutex)
1398{
1399	struct pthread_mutex	*m;
1400	struct pthread		*pthread_next;
1401	int			active_prio, inherited_prio;
1402
1403	/*
1404	 * Start walking the mutexes the thread has taken since
1405	 * taking this mutex.
1406	 */
1407	if (mutex == NULL) {
1408		/*
1409		 * A null mutex means start at the beginning of the owned
1410		 * mutex list.
1411		 */
1412		m = TAILQ_FIRST(&pthread->mutexq);
1413
1414		/* There is no inherited priority yet. */
1415		inherited_prio = 0;
1416	} else {
1417		/*
1418		 * The caller wants to start after a specific mutex.  It
1419		 * is assumed that this mutex is a priority inheritence
1420		 * mutex and that its priority has been correctly
1421		 * calculated.
1422		 */
1423		m = TAILQ_NEXT(mutex, m_qe);
1424
1425		/* Start inheriting priority from the specified mutex. */
1426		inherited_prio = mutex->m_prio;
1427	}
1428	active_prio = MAX(inherited_prio, pthread->base_priority);
1429
1430	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1431		/*
1432		 * We only want to deal with priority inheritence
1433		 * mutexes.  This might be optimized by only placing
1434		 * priority inheritence mutexes into the owned mutex
1435		 * list, but it may prove to be useful having all
1436		 * owned mutexes in this list.  Consider a thread
1437		 * exiting while holding mutexes...
1438		 */
1439		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1440			/*
1441			 * Fix the owners saved (inherited) priority to
1442			 * reflect the priority of the previous mutex.
1443			 */
1444			m->m_saved_prio = inherited_prio;
1445
1446			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1447				/* Recalculate the priority of the mutex: */
1448				m->m_prio = MAX(active_prio,
1449				     pthread_next->active_priority);
1450			else
1451				m->m_prio = active_prio;
1452
1453			/* Recalculate new inherited and active priorities: */
1454			inherited_prio = m->m_prio;
1455			active_prio = MAX(m->m_prio, pthread->base_priority);
1456		}
1457	}
1458
1459	/*
1460	 * Fix the threads inherited priority and recalculate its
1461	 * active priority.
1462	 */
1463	pthread->inherited_priority = inherited_prio;
1464	active_prio = MAX(inherited_prio, pthread->base_priority);
1465
1466	if (active_prio != pthread->active_priority) {
1467		/* Lock the thread's scheduling queue: */
1468		THR_SCHED_LOCK(curthread, pthread);
1469
1470		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1471			/*
1472			 * This thread is not in a run queue.  Just set
1473			 * its active priority.
1474			 */
1475			pthread->active_priority = active_prio;
1476		}
1477		else {
1478			/*
1479			 * This thread is in a run queue.  Remove it from
1480			 * the queue before changing its priority:
1481			 */
1482			THR_RUNQ_REMOVE(pthread);
1483
1484			/*
1485			 * POSIX states that if the priority is being
1486			 * lowered, the thread must be inserted at the
1487			 * head of the queue for its priority if it owns
1488			 * any priority protection or inheritence mutexes.
1489			 */
1490			if ((active_prio < pthread->active_priority) &&
1491			    (pthread->priority_mutex_count > 0)) {
1492				/* Set the new active priority. */
1493				pthread->active_priority = active_prio;
1494
1495				THR_RUNQ_INSERT_HEAD(pthread);
1496			} else {
1497				/* Set the new active priority. */
1498				pthread->active_priority = active_prio;
1499
1500				THR_RUNQ_INSERT_TAIL(pthread);
1501			}
1502		}
1503		THR_SCHED_UNLOCK(curthread, pthread);
1504	}
1505}
1506
1507void
1508_mutex_unlock_private(pthread_t pthread)
1509{
1510	struct pthread_mutex	*m, *m_next;
1511
1512	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1513		m_next = TAILQ_NEXT(m, m_qe);
1514		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1515			pthread_mutex_unlock(&m);
1516	}
1517}
1518
1519/*
1520 * This is called by the current thread when it wants to back out of a
1521 * mutex_lock in order to run a signal handler.
1522 */
1523void
1524_mutex_lock_backout(struct pthread *curthread)
1525{
1526	struct pthread_mutex *m;
1527
1528	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1529		/*
1530		 * Any other thread may clear the "in sync queue flag",
1531		 * but only the current thread can clear the pointer
1532		 * to the mutex.  So if the flag is set, we can
1533		 * guarantee that the pointer to the mutex is valid.
1534		 * The only problem may be if the mutex is destroyed
1535		 * out from under us, but that should be considered
1536		 * an application bug.
1537		 */
1538		m = curthread->data.mutex;
1539
1540		/* Lock the mutex structure: */
1541		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1542
1543
1544		/*
1545		 * Check to make sure this thread doesn't already own
1546		 * the mutex.  Since mutexes are unlocked with direct
1547		 * handoffs, it is possible the previous owner gave it
1548		 * to us after we checked the sync queue flag and before
1549		 * we locked the mutex structure.
1550		 */
1551		if (m->m_owner == curthread) {
1552			THR_LOCK_RELEASE(curthread, &m->m_lock);
1553			mutex_unlock_common(&m, /* add_reference */ 0);
1554		} else {
1555			/*
1556			 * Remove ourselves from the mutex queue and
1557			 * clear the pointer to the mutex.  We may no
1558			 * longer be in the mutex queue, but the removal
1559			 * function will DTRT.
1560			 */
1561			mutex_queue_remove(m, curthread);
1562			curthread->data.mutex = NULL;
1563			THR_LOCK_RELEASE(curthread, &m->m_lock);
1564		}
1565	}
1566}
1567
1568/*
1569 * Dequeue a waiting thread from the head of a mutex queue in descending
1570 * priority order.
1571 *
1572 * In order to properly dequeue a thread from the mutex queue and
1573 * make it runnable without the possibility of errant wakeups, it
1574 * is necessary to lock the thread's scheduling queue while also
1575 * holding the mutex lock.
1576 */
1577static struct kse_mailbox *
1578mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1579{
1580	struct kse_mailbox *kmbx = NULL;
1581	struct pthread *pthread;
1582
1583	/* Keep dequeueing until we find a valid thread: */
1584	mutex->m_owner = NULL;
1585	pthread = TAILQ_FIRST(&mutex->m_queue);
1586	while (pthread != NULL) {
1587		/* Take the thread's scheduling lock: */
1588		THR_SCHED_LOCK(curthread, pthread);
1589
1590		/* Remove the thread from the mutex queue: */
1591		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1592		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1593
1594		/*
1595		 * Only exit the loop if the thread hasn't been
1596		 * cancelled.
1597		 */
1598		switch (mutex->m_protocol) {
1599		case PTHREAD_PRIO_NONE:
1600			/*
1601			 * Assign the new owner and add the mutex to the
1602			 * thread's list of owned mutexes.
1603			 */
1604			mutex->m_owner = pthread;
1605			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1606			break;
1607
1608		case PTHREAD_PRIO_INHERIT:
1609			/*
1610			 * Assign the new owner and add the mutex to the
1611			 * thread's list of owned mutexes.
1612			 */
1613			mutex->m_owner = pthread;
1614			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1615
1616			/* Track number of priority mutexes owned: */
1617			pthread->priority_mutex_count++;
1618
1619			/*
1620			 * Set the priority of the mutex.  Since our waiting
1621			 * threads are in descending priority order, the
1622			 * priority of the mutex becomes the active priority
1623			 * of the thread we just dequeued.
1624			 */
1625			mutex->m_prio = pthread->active_priority;
1626
1627			/* Save the owning threads inherited priority: */
1628			mutex->m_saved_prio = pthread->inherited_priority;
1629
1630			/*
1631			 * The owning threads inherited priority now becomes
1632			 * his active priority (the priority of the mutex).
1633			 */
1634			pthread->inherited_priority = mutex->m_prio;
1635			break;
1636
1637		case PTHREAD_PRIO_PROTECT:
1638			if (pthread->active_priority > mutex->m_prio) {
1639				/*
1640				 * Either the mutex ceiling priority has
1641				 * been lowered and/or this threads priority
1642			 	 * has been raised subsequent to the thread
1643				 * being queued on the waiting list.
1644				 */
1645				pthread->error = EINVAL;
1646			}
1647			else {
1648				/*
1649				 * Assign the new owner and add the mutex
1650				 * to the thread's list of owned mutexes.
1651				 */
1652				mutex->m_owner = pthread;
1653				TAILQ_INSERT_TAIL(&pthread->mutexq,
1654				    mutex, m_qe);
1655
1656				/* Track number of priority mutexes owned: */
1657				pthread->priority_mutex_count++;
1658
1659				/*
1660				 * Save the owning threads inherited
1661				 * priority:
1662				 */
1663				mutex->m_saved_prio =
1664				    pthread->inherited_priority;
1665
1666				/*
1667				 * The owning thread inherits the ceiling
1668				 * priority of the mutex and executes at
1669				 * that priority:
1670				 */
1671				pthread->inherited_priority = mutex->m_prio;
1672				pthread->active_priority = mutex->m_prio;
1673
1674			}
1675			break;
1676		}
1677
1678		/* Make the thread runnable and unlock the scheduling queue: */
1679		kmbx = _thr_setrunnable_unlocked(pthread);
1680
1681		/* Add a preemption point. */
1682		if ((curthread->kseg == pthread->kseg) &&
1683		    (pthread->active_priority > curthread->active_priority))
1684			curthread->critical_yield = 1;
1685
1686		THR_SCHED_UNLOCK(curthread, pthread);
1687		if (mutex->m_owner == pthread)
1688			/* We're done; a valid owner was found. */
1689			break;
1690		else
1691			/* Get the next thread from the waiting queue: */
1692			pthread = TAILQ_NEXT(pthread, sqe);
1693	}
1694
1695	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1696		/* This mutex has no priority: */
1697		mutex->m_prio = 0;
1698	return (kmbx);
1699}
1700
1701/*
1702 * Dequeue a waiting thread from the head of a mutex queue in descending
1703 * priority order.
1704 */
1705static inline pthread_t
1706mutex_queue_deq(struct pthread_mutex *mutex)
1707{
1708	pthread_t pthread;
1709
1710	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1711		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1712		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1713
1714		/*
1715		 * Only exit the loop if the thread hasn't been
1716		 * cancelled.
1717		 */
1718		if (pthread->interrupted == 0)
1719			break;
1720	}
1721
1722	return (pthread);
1723}
1724
1725/*
1726 * Remove a waiting thread from a mutex queue in descending priority order.
1727 */
1728static inline void
1729mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1730{
1731	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1732		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1733		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1734	}
1735}
1736
1737/*
1738 * Enqueue a waiting thread to a queue in descending priority order.
1739 */
1740static inline void
1741mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1742{
1743	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1744
1745	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1746	/*
1747	 * For the common case of all threads having equal priority,
1748	 * we perform a quick check against the priority of the thread
1749	 * at the tail of the queue.
1750	 */
1751	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1752		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1753	else {
1754		tid = TAILQ_FIRST(&mutex->m_queue);
1755		while (pthread->active_priority <= tid->active_priority)
1756			tid = TAILQ_NEXT(tid, sqe);
1757		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1758	}
1759	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1760}
1761