thr_mutex.c revision 157700
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 157700 2006-04-13 03:09:34Z delphij $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67#define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
68#define	MUTEX_DESTROY(m) do {		\
69	_lock_destroy(&(m)->m_lock);	\
70	free(m);			\
71} while (0)
72
73
74/*
75 * Prototypes
76 */
77static struct kse_mailbox *mutex_handoff(struct pthread *,
78			    struct pthread_mutex *);
79static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
80static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
81static int		mutex_unlock_common(pthread_mutex_t *, int);
82static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83static void		mutex_rescan_owned (struct pthread *, struct pthread *,
84			    struct pthread_mutex *);
85static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
86static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
87static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
88static void		mutex_lock_backout(void *arg);
89
90static struct pthread_mutex_attr	static_mutex_attr =
91    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
92static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
93
94LT10_COMPAT_PRIVATE(__pthread_mutex_init);
95LT10_COMPAT_PRIVATE(_pthread_mutex_init);
96LT10_COMPAT_DEFAULT(pthread_mutex_init);
97LT10_COMPAT_PRIVATE(__pthread_mutex_lock);
98LT10_COMPAT_PRIVATE(_pthread_mutex_lock);
99LT10_COMPAT_DEFAULT(pthread_mutex_lock);
100LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock);
101LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock);
102LT10_COMPAT_DEFAULT(pthread_mutex_timedlock);
103LT10_COMPAT_PRIVATE(__pthread_mutex_trylock);
104LT10_COMPAT_PRIVATE(_pthread_mutex_trylock);
105LT10_COMPAT_DEFAULT(pthread_mutex_trylock);
106LT10_COMPAT_PRIVATE(_pthread_mutex_destroy);
107LT10_COMPAT_DEFAULT(pthread_mutex_destroy);
108LT10_COMPAT_PRIVATE(_pthread_mutex_unlock);
109LT10_COMPAT_DEFAULT(pthread_mutex_unlock);
110
111/* Single underscore versions provided for libc internal usage: */
112__weak_reference(__pthread_mutex_init, pthread_mutex_init);
113__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
114__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
115__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
116
117/* No difference between libc and application usage of these: */
118__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
119__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
120
121
122
123int
124__pthread_mutex_init(pthread_mutex_t *mutex,
125    const pthread_mutexattr_t *mutex_attr)
126{
127	struct pthread_mutex *pmutex;
128	enum pthread_mutextype type;
129	int		protocol;
130	int		ceiling;
131	int		flags;
132	int		ret = 0;
133
134	if (mutex == NULL)
135		ret = EINVAL;
136
137	/* Check if default mutex attributes: */
138	else if (mutex_attr == NULL || *mutex_attr == NULL) {
139		/* Default to a (error checking) POSIX mutex: */
140		type = PTHREAD_MUTEX_ERRORCHECK;
141		protocol = PTHREAD_PRIO_NONE;
142		ceiling = THR_MAX_PRIORITY;
143		flags = 0;
144	}
145
146	/* Check mutex type: */
147	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
148	    ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
149		/* Return an invalid argument error: */
150		ret = EINVAL;
151
152	/* Check mutex protocol: */
153	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
154	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
155		/* Return an invalid argument error: */
156		ret = EINVAL;
157
158	else {
159		/* Use the requested mutex type and protocol: */
160		type = (*mutex_attr)->m_type;
161		protocol = (*mutex_attr)->m_protocol;
162		ceiling = (*mutex_attr)->m_ceiling;
163		flags = (*mutex_attr)->m_flags;
164	}
165
166	/* Check no errors so far: */
167	if (ret == 0) {
168		if ((pmutex = (pthread_mutex_t)
169		    malloc(sizeof(struct pthread_mutex))) == NULL)
170			ret = ENOMEM;
171		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
172		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
173			free(pmutex);
174			*mutex = NULL;
175			ret = ENOMEM;
176		} else {
177			/* Set the mutex flags: */
178			pmutex->m_flags = flags;
179
180			/* Process according to mutex type: */
181			switch (type) {
182			/* case PTHREAD_MUTEX_DEFAULT: */
183			case PTHREAD_MUTEX_ERRORCHECK:
184			case PTHREAD_MUTEX_NORMAL:
185				/* Nothing to do here. */
186				break;
187
188			/* Single UNIX Spec 2 recursive mutex: */
189			case PTHREAD_MUTEX_RECURSIVE:
190				/* Reset the mutex count: */
191				pmutex->m_count = 0;
192				break;
193
194			/* Trap invalid mutex types: */
195			default:
196				/* Return an invalid argument error: */
197				ret = EINVAL;
198				break;
199			}
200			if (ret == 0) {
201				/* Initialise the rest of the mutex: */
202				TAILQ_INIT(&pmutex->m_queue);
203				pmutex->m_flags |= MUTEX_FLAGS_INITED;
204				pmutex->m_owner = NULL;
205				pmutex->m_type = type;
206				pmutex->m_protocol = protocol;
207				pmutex->m_refcount = 0;
208				if (protocol == PTHREAD_PRIO_PROTECT)
209					pmutex->m_prio = ceiling;
210				else
211					pmutex->m_prio = -1;
212				pmutex->m_saved_prio = 0;
213				MUTEX_INIT_LINK(pmutex);
214				*mutex = pmutex;
215			} else {
216				/* Free the mutex lock structure: */
217				MUTEX_DESTROY(pmutex);
218				*mutex = NULL;
219			}
220		}
221	}
222	/* Return the completion status: */
223	return (ret);
224}
225
226int
227_pthread_mutex_init(pthread_mutex_t *mutex,
228    const pthread_mutexattr_t *mutex_attr)
229{
230	struct pthread_mutex_attr mattr, *mattrp;
231
232	if ((mutex_attr == NULL) || (*mutex_attr == NULL))
233		return (__pthread_mutex_init(mutex, &static_mattr));
234	else {
235		mattr = **mutex_attr;
236		mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
237		mattrp = &mattr;
238		return (__pthread_mutex_init(mutex, &mattrp));
239	}
240}
241
242void
243_thr_mutex_reinit(pthread_mutex_t *mutex)
244{
245	_lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
246	    _thr_lock_wait, _thr_lock_wakeup);
247	TAILQ_INIT(&(*mutex)->m_queue);
248	(*mutex)->m_owner = NULL;
249	(*mutex)->m_count = 0;
250	(*mutex)->m_refcount = 0;
251	(*mutex)->m_prio = 0;
252	(*mutex)->m_saved_prio = 0;
253}
254
255int
256_pthread_mutex_destroy(pthread_mutex_t *mutex)
257{
258	struct pthread	*curthread = _get_curthread();
259	pthread_mutex_t m;
260	int ret = 0;
261
262	if (mutex == NULL || *mutex == NULL)
263		ret = EINVAL;
264	else {
265		/* Lock the mutex structure: */
266		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
267
268		/*
269		 * Check to see if this mutex is in use:
270		 */
271		if (((*mutex)->m_owner != NULL) ||
272		    (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
273		    ((*mutex)->m_refcount != 0)) {
274			ret = EBUSY;
275
276			/* Unlock the mutex structure: */
277			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
278		} else {
279			/*
280			 * Save a pointer to the mutex so it can be free'd
281			 * and set the caller's pointer to NULL:
282			 */
283			m = *mutex;
284			*mutex = NULL;
285
286			/* Unlock the mutex structure: */
287			THR_LOCK_RELEASE(curthread, &m->m_lock);
288
289			/*
290			 * Free the memory allocated for the mutex
291			 * structure:
292			 */
293			MUTEX_ASSERT_NOT_OWNED(m);
294			MUTEX_DESTROY(m);
295		}
296	}
297
298	/* Return the completion status: */
299	return (ret);
300}
301
302static int
303init_static(struct pthread *thread, pthread_mutex_t *mutex)
304{
305	int ret;
306
307	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
308
309	if (*mutex == NULL)
310		ret = pthread_mutex_init(mutex, NULL);
311	else
312		ret = 0;
313
314	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
315
316	return (ret);
317}
318
319static int
320init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
321{
322	int ret;
323
324	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
325
326	if (*mutex == NULL)
327		ret = pthread_mutex_init(mutex, &static_mattr);
328	else
329		ret = 0;
330
331	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
332
333	return (ret);
334}
335
336static int
337mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
338{
339	int private;
340	int ret = 0;
341
342	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
343	    "Uninitialized mutex in pthread_mutex_trylock_basic");
344
345	/* Lock the mutex structure: */
346	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
347	private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
348
349	/*
350	 * If the mutex was statically allocated, properly
351	 * initialize the tail queue.
352	 */
353	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
354		TAILQ_INIT(&(*mutex)->m_queue);
355		MUTEX_INIT_LINK(*mutex);
356		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
357	}
358
359	/* Process according to mutex type: */
360	switch ((*mutex)->m_protocol) {
361	/* Default POSIX mutex: */
362	case PTHREAD_PRIO_NONE:
363		/* Check if this mutex is not locked: */
364		if ((*mutex)->m_owner == NULL) {
365			/* Lock the mutex for the running thread: */
366			(*mutex)->m_owner = curthread;
367
368			/* Add to the list of owned mutexes: */
369			MUTEX_ASSERT_NOT_OWNED(*mutex);
370			TAILQ_INSERT_TAIL(&curthread->mutexq,
371			    (*mutex), m_qe);
372		} else if ((*mutex)->m_owner == curthread)
373			ret = mutex_self_trylock(curthread, *mutex);
374		else
375			/* Return a busy error: */
376			ret = EBUSY;
377		break;
378
379	/* POSIX priority inheritence mutex: */
380	case PTHREAD_PRIO_INHERIT:
381		/* Check if this mutex is not locked: */
382		if ((*mutex)->m_owner == NULL) {
383			/* Lock the mutex for the running thread: */
384			(*mutex)->m_owner = curthread;
385
386			THR_SCHED_LOCK(curthread, curthread);
387			/* Track number of priority mutexes owned: */
388			curthread->priority_mutex_count++;
389
390			/*
391			 * The mutex takes on the attributes of the
392			 * running thread when there are no waiters.
393			 */
394			(*mutex)->m_prio = curthread->active_priority;
395			(*mutex)->m_saved_prio =
396			    curthread->inherited_priority;
397			curthread->inherited_priority = (*mutex)->m_prio;
398			THR_SCHED_UNLOCK(curthread, curthread);
399
400			/* Add to the list of owned mutexes: */
401			MUTEX_ASSERT_NOT_OWNED(*mutex);
402			TAILQ_INSERT_TAIL(&curthread->mutexq,
403			    (*mutex), m_qe);
404		} else if ((*mutex)->m_owner == curthread)
405			ret = mutex_self_trylock(curthread, *mutex);
406		else
407			/* Return a busy error: */
408			ret = EBUSY;
409		break;
410
411	/* POSIX priority protection mutex: */
412	case PTHREAD_PRIO_PROTECT:
413		/* Check for a priority ceiling violation: */
414		if (curthread->active_priority > (*mutex)->m_prio)
415			ret = EINVAL;
416
417		/* Check if this mutex is not locked: */
418		else if ((*mutex)->m_owner == NULL) {
419			/* Lock the mutex for the running thread: */
420			(*mutex)->m_owner = curthread;
421
422			THR_SCHED_LOCK(curthread, curthread);
423			/* Track number of priority mutexes owned: */
424			curthread->priority_mutex_count++;
425
426			/*
427			 * The running thread inherits the ceiling
428			 * priority of the mutex and executes at that
429			 * priority.
430			 */
431			curthread->active_priority = (*mutex)->m_prio;
432			(*mutex)->m_saved_prio =
433			    curthread->inherited_priority;
434			curthread->inherited_priority =
435			    (*mutex)->m_prio;
436			THR_SCHED_UNLOCK(curthread, curthread);
437			/* Add to the list of owned mutexes: */
438			MUTEX_ASSERT_NOT_OWNED(*mutex);
439			TAILQ_INSERT_TAIL(&curthread->mutexq,
440			    (*mutex), m_qe);
441		} else if ((*mutex)->m_owner == curthread)
442			ret = mutex_self_trylock(curthread, *mutex);
443		else
444			/* Return a busy error: */
445			ret = EBUSY;
446		break;
447
448	/* Trap invalid mutex types: */
449	default:
450		/* Return an invalid argument error: */
451		ret = EINVAL;
452		break;
453	}
454
455	if (ret == 0 && private)
456		THR_CRITICAL_ENTER(curthread);
457
458	/* Unlock the mutex structure: */
459	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
460
461	/* Return the completion status: */
462	return (ret);
463}
464
465int
466__pthread_mutex_trylock(pthread_mutex_t *mutex)
467{
468	struct pthread *curthread = _get_curthread();
469	int ret = 0;
470
471	if (mutex == NULL)
472		ret = EINVAL;
473
474	/*
475	 * If the mutex is statically initialized, perform the dynamic
476	 * initialization:
477	 */
478	else if ((*mutex != NULL) ||
479	    ((ret = init_static(curthread, mutex)) == 0))
480		ret = mutex_trylock_common(curthread, mutex);
481
482	return (ret);
483}
484
485int
486_pthread_mutex_trylock(pthread_mutex_t *mutex)
487{
488	struct pthread	*curthread = _get_curthread();
489	int	ret = 0;
490
491	if (mutex == NULL)
492		ret = EINVAL;
493
494	/*
495	 * If the mutex is statically initialized, perform the dynamic
496	 * initialization marking the mutex private (delete safe):
497	 */
498	else if ((*mutex != NULL) ||
499	    ((ret = init_static_private(curthread, mutex)) == 0))
500		ret = mutex_trylock_common(curthread, mutex);
501
502	return (ret);
503}
504
505static int
506mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
507	const struct timespec * abstime)
508{
509	int	private;
510	int	ret = 0;
511
512	THR_ASSERT((m != NULL) && (*m != NULL),
513	    "Uninitialized mutex in pthread_mutex_trylock_basic");
514
515	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
516	    abstime->tv_nsec >= 1000000000))
517		return (EINVAL);
518
519	/* Reset the interrupted flag: */
520	curthread->interrupted = 0;
521	curthread->timeout = 0;
522	curthread->wakeup_time.tv_sec = -1;
523
524	private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
525
526	/*
527	 * Enter a loop waiting to become the mutex owner.  We need a
528	 * loop in case the waiting thread is interrupted by a signal
529	 * to execute a signal handler.  It is not (currently) possible
530	 * to remain in the waiting queue while running a handler.
531	 * Instead, the thread is interrupted and backed out of the
532	 * waiting queue prior to executing the signal handler.
533	 */
534	do {
535		/* Lock the mutex structure: */
536		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
537
538		/*
539		 * If the mutex was statically allocated, properly
540		 * initialize the tail queue.
541		 */
542		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
543			TAILQ_INIT(&(*m)->m_queue);
544			(*m)->m_flags |= MUTEX_FLAGS_INITED;
545			MUTEX_INIT_LINK(*m);
546		}
547
548		/* Process according to mutex type: */
549		switch ((*m)->m_protocol) {
550		/* Default POSIX mutex: */
551		case PTHREAD_PRIO_NONE:
552			if ((*m)->m_owner == NULL) {
553				/* Lock the mutex for this thread: */
554				(*m)->m_owner = curthread;
555
556				/* Add to the list of owned mutexes: */
557				MUTEX_ASSERT_NOT_OWNED(*m);
558				TAILQ_INSERT_TAIL(&curthread->mutexq,
559				    (*m), m_qe);
560				if (private)
561					THR_CRITICAL_ENTER(curthread);
562
563				/* Unlock the mutex structure: */
564				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
565			} else if ((*m)->m_owner == curthread) {
566				ret = mutex_self_lock(curthread, *m);
567
568				/* Unlock the mutex structure: */
569				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
570			} else {
571				/*
572				 * Join the queue of threads waiting to lock
573				 * the mutex and save a pointer to the mutex.
574				 */
575				mutex_queue_enq(*m, curthread);
576				curthread->data.mutex = *m;
577				curthread->sigbackout = mutex_lock_backout;
578				/*
579				 * This thread is active and is in a critical
580				 * region (holding the mutex lock); we should
581				 * be able to safely set the state.
582				 */
583				THR_SCHED_LOCK(curthread, curthread);
584				/* Set the wakeup time: */
585				if (abstime) {
586					curthread->wakeup_time.tv_sec =
587						abstime->tv_sec;
588					curthread->wakeup_time.tv_nsec =
589						abstime->tv_nsec;
590				}
591
592				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
593				THR_SCHED_UNLOCK(curthread, curthread);
594
595				/* Unlock the mutex structure: */
596				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
597
598				/* Schedule the next thread: */
599				_thr_sched_switch(curthread);
600
601				if (THR_IN_MUTEXQ(curthread)) {
602					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
603					mutex_queue_remove(*m, curthread);
604					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
605				}
606				/*
607				 * Only clear these after assuring the
608				 * thread is dequeued.
609				 */
610				curthread->data.mutex = NULL;
611				curthread->sigbackout = NULL;
612			}
613			break;
614
615		/* POSIX priority inheritence mutex: */
616		case PTHREAD_PRIO_INHERIT:
617			/* Check if this mutex is not locked: */
618			if ((*m)->m_owner == NULL) {
619				/* Lock the mutex for this thread: */
620				(*m)->m_owner = curthread;
621
622				THR_SCHED_LOCK(curthread, curthread);
623				/* Track number of priority mutexes owned: */
624				curthread->priority_mutex_count++;
625
626				/*
627				 * The mutex takes on attributes of the
628				 * running thread when there are no waiters.
629				 * Make sure the thread's scheduling lock is
630				 * held while priorities are adjusted.
631				 */
632				(*m)->m_prio = curthread->active_priority;
633				(*m)->m_saved_prio =
634				    curthread->inherited_priority;
635				curthread->inherited_priority = (*m)->m_prio;
636				THR_SCHED_UNLOCK(curthread, curthread);
637
638				/* Add to the list of owned mutexes: */
639				MUTEX_ASSERT_NOT_OWNED(*m);
640				TAILQ_INSERT_TAIL(&curthread->mutexq,
641				    (*m), m_qe);
642				if (private)
643					THR_CRITICAL_ENTER(curthread);
644
645				/* Unlock the mutex structure: */
646				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
647			} else if ((*m)->m_owner == curthread) {
648				ret = mutex_self_lock(curthread, *m);
649
650				/* Unlock the mutex structure: */
651				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
652			} else {
653				/*
654				 * Join the queue of threads waiting to lock
655				 * the mutex and save a pointer to the mutex.
656				 */
657				mutex_queue_enq(*m, curthread);
658				curthread->data.mutex = *m;
659				curthread->sigbackout = mutex_lock_backout;
660
661				/*
662				 * This thread is active and is in a critical
663				 * region (holding the mutex lock); we should
664				 * be able to safely set the state.
665				 */
666				if (curthread->active_priority > (*m)->m_prio)
667					/* Adjust priorities: */
668					mutex_priority_adjust(curthread, *m);
669
670				THR_SCHED_LOCK(curthread, curthread);
671				/* Set the wakeup time: */
672				if (abstime) {
673					curthread->wakeup_time.tv_sec =
674						abstime->tv_sec;
675					curthread->wakeup_time.tv_nsec =
676						abstime->tv_nsec;
677				}
678				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
679				THR_SCHED_UNLOCK(curthread, curthread);
680
681				/* Unlock the mutex structure: */
682				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
683
684				/* Schedule the next thread: */
685				_thr_sched_switch(curthread);
686
687				if (THR_IN_MUTEXQ(curthread)) {
688					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
689					mutex_queue_remove(*m, curthread);
690					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
691				}
692				/*
693				 * Only clear these after assuring the
694				 * thread is dequeued.
695				 */
696				curthread->data.mutex = NULL;
697				curthread->sigbackout = NULL;
698			}
699			break;
700
701		/* POSIX priority protection mutex: */
702		case PTHREAD_PRIO_PROTECT:
703			/* Check for a priority ceiling violation: */
704			if (curthread->active_priority > (*m)->m_prio) {
705				/* Unlock the mutex structure: */
706				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
707				ret = EINVAL;
708			}
709			/* Check if this mutex is not locked: */
710			else if ((*m)->m_owner == NULL) {
711				/*
712				 * Lock the mutex for the running
713				 * thread:
714				 */
715				(*m)->m_owner = curthread;
716
717				THR_SCHED_LOCK(curthread, curthread);
718				/* Track number of priority mutexes owned: */
719				curthread->priority_mutex_count++;
720
721				/*
722				 * The running thread inherits the ceiling
723				 * priority of the mutex and executes at that
724				 * priority.  Make sure the thread's
725				 * scheduling lock is held while priorities
726				 * are adjusted.
727				 */
728				curthread->active_priority = (*m)->m_prio;
729				(*m)->m_saved_prio =
730				    curthread->inherited_priority;
731				curthread->inherited_priority = (*m)->m_prio;
732				THR_SCHED_UNLOCK(curthread, curthread);
733
734				/* Add to the list of owned mutexes: */
735				MUTEX_ASSERT_NOT_OWNED(*m);
736				TAILQ_INSERT_TAIL(&curthread->mutexq,
737				    (*m), m_qe);
738				if (private)
739					THR_CRITICAL_ENTER(curthread);
740
741				/* Unlock the mutex structure: */
742				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
743			} else if ((*m)->m_owner == curthread) {
744				ret = mutex_self_lock(curthread, *m);
745
746				/* Unlock the mutex structure: */
747				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
748			} else {
749				/*
750				 * Join the queue of threads waiting to lock
751				 * the mutex and save a pointer to the mutex.
752				 */
753				mutex_queue_enq(*m, curthread);
754				curthread->data.mutex = *m;
755				curthread->sigbackout = mutex_lock_backout;
756
757				/* Clear any previous error: */
758				curthread->error = 0;
759
760				/*
761				 * This thread is active and is in a critical
762				 * region (holding the mutex lock); we should
763				 * be able to safely set the state.
764				 */
765
766				THR_SCHED_LOCK(curthread, curthread);
767				/* Set the wakeup time: */
768				if (abstime) {
769					curthread->wakeup_time.tv_sec =
770						abstime->tv_sec;
771					curthread->wakeup_time.tv_nsec =
772						abstime->tv_nsec;
773				}
774				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
775				THR_SCHED_UNLOCK(curthread, curthread);
776
777				/* Unlock the mutex structure: */
778				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
779
780				/* Schedule the next thread: */
781				_thr_sched_switch(curthread);
782
783				if (THR_IN_MUTEXQ(curthread)) {
784					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
785					mutex_queue_remove(*m, curthread);
786					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
787				}
788				/*
789				 * Only clear these after assuring the
790				 * thread is dequeued.
791				 */
792				curthread->data.mutex = NULL;
793				curthread->sigbackout = NULL;
794
795				/*
796				 * The threads priority may have changed while
797				 * waiting for the mutex causing a ceiling
798				 * violation.
799				 */
800				ret = curthread->error;
801				curthread->error = 0;
802			}
803			break;
804
805		/* Trap invalid mutex types: */
806		default:
807			/* Unlock the mutex structure: */
808			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
809
810			/* Return an invalid argument error: */
811			ret = EINVAL;
812			break;
813		}
814
815	} while (((*m)->m_owner != curthread) && (ret == 0) &&
816	    (curthread->interrupted == 0) && (curthread->timeout == 0));
817
818	if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
819		ret = ETIMEDOUT;
820
821	/*
822	 * Check to see if this thread was interrupted and
823	 * is still in the mutex queue of waiting threads:
824	 */
825	if (curthread->interrupted != 0) {
826		/* Remove this thread from the mutex queue. */
827		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
828		if (THR_IN_SYNCQ(curthread))
829			mutex_queue_remove(*m, curthread);
830		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
831
832		/* Check for asynchronous cancellation. */
833		if (curthread->continuation != NULL)
834			curthread->continuation((void *) curthread);
835	}
836
837	/* Return the completion status: */
838	return (ret);
839}
840
841int
842__pthread_mutex_lock(pthread_mutex_t *m)
843{
844	struct pthread *curthread;
845	int	ret = 0;
846
847	if (_thr_initial == NULL)
848		_libpthread_init(NULL);
849
850	curthread = _get_curthread();
851	if (m == NULL)
852		ret = EINVAL;
853
854	/*
855	 * If the mutex is statically initialized, perform the dynamic
856	 * initialization:
857	 */
858	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
859		ret = mutex_lock_common(curthread, m, NULL);
860
861	return (ret);
862}
863
864__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
865
866int
867_pthread_mutex_lock(pthread_mutex_t *m)
868{
869	struct pthread *curthread;
870	int	ret = 0;
871
872	if (_thr_initial == NULL)
873		_libpthread_init(NULL);
874	curthread = _get_curthread();
875
876	if (m == NULL)
877		ret = EINVAL;
878
879	/*
880	 * If the mutex is statically initialized, perform the dynamic
881	 * initialization marking it private (delete safe):
882	 */
883	else if ((*m != NULL) ||
884	    ((ret = init_static_private(curthread, m)) == 0))
885		ret = mutex_lock_common(curthread, m, NULL);
886
887	return (ret);
888}
889
890int
891__pthread_mutex_timedlock(pthread_mutex_t *m,
892	const struct timespec *abs_timeout)
893{
894	struct pthread *curthread;
895	int	ret = 0;
896
897	if (_thr_initial == NULL)
898		_libpthread_init(NULL);
899
900	curthread = _get_curthread();
901	if (m == NULL)
902		ret = EINVAL;
903
904	/*
905	 * If the mutex is statically initialized, perform the dynamic
906	 * initialization:
907	 */
908	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
909		ret = mutex_lock_common(curthread, m, abs_timeout);
910
911	return (ret);
912}
913
914int
915_pthread_mutex_timedlock(pthread_mutex_t *m,
916	const struct timespec *abs_timeout)
917{
918	struct pthread *curthread;
919	int	ret = 0;
920
921	if (_thr_initial == NULL)
922		_libpthread_init(NULL);
923	curthread = _get_curthread();
924
925	if (m == NULL)
926		ret = EINVAL;
927
928	/*
929	 * If the mutex is statically initialized, perform the dynamic
930	 * initialization marking it private (delete safe):
931	 */
932	else if ((*m != NULL) ||
933	    ((ret = init_static_private(curthread, m)) == 0))
934		ret = mutex_lock_common(curthread, m, abs_timeout);
935
936	return (ret);
937}
938
939int
940_pthread_mutex_unlock(pthread_mutex_t *m)
941{
942	return (mutex_unlock_common(m, /* add reference */ 0));
943}
944
945__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
946
947int
948_mutex_cv_unlock(pthread_mutex_t *m)
949{
950	return (mutex_unlock_common(m, /* add reference */ 1));
951}
952
953int
954_mutex_cv_lock(pthread_mutex_t *m)
955{
956	struct  pthread *curthread;
957	int	ret;
958
959	curthread = _get_curthread();
960	if ((ret = _pthread_mutex_lock(m)) == 0) {
961		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
962		(*m)->m_refcount--;
963		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
964	}
965	return (ret);
966}
967
968static inline int
969mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
970{
971	int	ret = 0;
972
973	switch (m->m_type) {
974	/* case PTHREAD_MUTEX_DEFAULT: */
975	case PTHREAD_MUTEX_ERRORCHECK:
976	case PTHREAD_MUTEX_NORMAL:
977		ret = EBUSY;
978		break;
979
980	case PTHREAD_MUTEX_RECURSIVE:
981		/* Increment the lock count: */
982		m->m_count++;
983		break;
984
985	default:
986		/* Trap invalid mutex types; */
987		ret = EINVAL;
988	}
989
990	return (ret);
991}
992
993static inline int
994mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
995{
996	int ret = 0;
997
998	/*
999	 * Don't allow evil recursive mutexes for private use
1000	 * in libc and libpthread.
1001	 */
1002	if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1003		PANIC("Recurse on a private mutex.");
1004
1005	switch (m->m_type) {
1006	/* case PTHREAD_MUTEX_DEFAULT: */
1007	case PTHREAD_MUTEX_ERRORCHECK:
1008		/*
1009		 * POSIX specifies that mutexes should return EDEADLK if a
1010		 * recursive lock is detected.
1011		 */
1012		ret = EDEADLK;
1013		break;
1014
1015	case PTHREAD_MUTEX_NORMAL:
1016		/*
1017		 * What SS2 define as a 'normal' mutex.  Intentionally
1018		 * deadlock on attempts to get a lock you already own.
1019		 */
1020
1021		THR_SCHED_LOCK(curthread, curthread);
1022		THR_SET_STATE(curthread, PS_DEADLOCK);
1023		THR_SCHED_UNLOCK(curthread, curthread);
1024
1025		/* Unlock the mutex structure: */
1026		THR_LOCK_RELEASE(curthread, &m->m_lock);
1027
1028		/* Schedule the next thread: */
1029		_thr_sched_switch(curthread);
1030		break;
1031
1032	case PTHREAD_MUTEX_RECURSIVE:
1033		/* Increment the lock count: */
1034		m->m_count++;
1035		break;
1036
1037	default:
1038		/* Trap invalid mutex types; */
1039		ret = EINVAL;
1040	}
1041
1042	return (ret);
1043}
1044
1045static int
1046mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1047{
1048	struct pthread *curthread = _get_curthread();
1049	struct kse_mailbox *kmbx = NULL;
1050	int ret = 0;
1051
1052	if (m == NULL || *m == NULL)
1053		ret = EINVAL;
1054	else {
1055		/* Lock the mutex structure: */
1056		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1057
1058		/* Process according to mutex type: */
1059		switch ((*m)->m_protocol) {
1060		/* Default POSIX mutex: */
1061		case PTHREAD_PRIO_NONE:
1062			/*
1063			 * Check if the running thread is not the owner of the
1064			 * mutex:
1065			 */
1066			if ((*m)->m_owner != curthread)
1067				ret = EPERM;
1068			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1069			    ((*m)->m_count > 0))
1070				/* Decrement the count: */
1071				(*m)->m_count--;
1072			else {
1073				/*
1074				 * Clear the count in case this is a recursive
1075				 * mutex.
1076				 */
1077				(*m)->m_count = 0;
1078
1079				/* Remove the mutex from the threads queue. */
1080				MUTEX_ASSERT_IS_OWNED(*m);
1081				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1082				    (*m), m_qe);
1083				MUTEX_INIT_LINK(*m);
1084
1085				/*
1086				 * Hand off the mutex to the next waiting
1087				 * thread:
1088				 */
1089				kmbx = mutex_handoff(curthread, *m);
1090			}
1091			break;
1092
1093		/* POSIX priority inheritence mutex: */
1094		case PTHREAD_PRIO_INHERIT:
1095			/*
1096			 * Check if the running thread is not the owner of the
1097			 * mutex:
1098			 */
1099			if ((*m)->m_owner != curthread)
1100				ret = EPERM;
1101			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1102			    ((*m)->m_count > 0))
1103				/* Decrement the count: */
1104				(*m)->m_count--;
1105			else {
1106				/*
1107				 * Clear the count in case this is recursive
1108				 * mutex.
1109				 */
1110				(*m)->m_count = 0;
1111
1112				/*
1113				 * Restore the threads inherited priority and
1114				 * recompute the active priority (being careful
1115				 * not to override changes in the threads base
1116				 * priority subsequent to locking the mutex).
1117				 */
1118				THR_SCHED_LOCK(curthread, curthread);
1119				curthread->inherited_priority =
1120					(*m)->m_saved_prio;
1121				curthread->active_priority =
1122				    MAX(curthread->inherited_priority,
1123				    curthread->base_priority);
1124
1125				/*
1126				 * This thread now owns one less priority mutex.
1127				 */
1128				curthread->priority_mutex_count--;
1129				THR_SCHED_UNLOCK(curthread, curthread);
1130
1131				/* Remove the mutex from the threads queue. */
1132				MUTEX_ASSERT_IS_OWNED(*m);
1133				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1134				    (*m), m_qe);
1135				MUTEX_INIT_LINK(*m);
1136
1137				/*
1138				 * Hand off the mutex to the next waiting
1139				 * thread:
1140				 */
1141				kmbx = mutex_handoff(curthread, *m);
1142			}
1143			break;
1144
1145		/* POSIX priority ceiling mutex: */
1146		case PTHREAD_PRIO_PROTECT:
1147			/*
1148			 * Check if the running thread is not the owner of the
1149			 * mutex:
1150			 */
1151			if ((*m)->m_owner != curthread)
1152				ret = EPERM;
1153			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1154			    ((*m)->m_count > 0))
1155				/* Decrement the count: */
1156				(*m)->m_count--;
1157			else {
1158				/*
1159				 * Clear the count in case this is a recursive
1160				 * mutex.
1161				 */
1162				(*m)->m_count = 0;
1163
1164				/*
1165				 * Restore the threads inherited priority and
1166				 * recompute the active priority (being careful
1167				 * not to override changes in the threads base
1168				 * priority subsequent to locking the mutex).
1169				 */
1170				THR_SCHED_LOCK(curthread, curthread);
1171				curthread->inherited_priority =
1172					(*m)->m_saved_prio;
1173				curthread->active_priority =
1174				    MAX(curthread->inherited_priority,
1175				    curthread->base_priority);
1176
1177				/*
1178				 * This thread now owns one less priority mutex.
1179				 */
1180				curthread->priority_mutex_count--;
1181				THR_SCHED_UNLOCK(curthread, curthread);
1182
1183				/* Remove the mutex from the threads queue. */
1184				MUTEX_ASSERT_IS_OWNED(*m);
1185				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1186				    (*m), m_qe);
1187				MUTEX_INIT_LINK(*m);
1188
1189				/*
1190				 * Hand off the mutex to the next waiting
1191				 * thread:
1192				 */
1193				kmbx = mutex_handoff(curthread, *m);
1194			}
1195			break;
1196
1197		/* Trap invalid mutex types: */
1198		default:
1199			/* Return an invalid argument error: */
1200			ret = EINVAL;
1201			break;
1202		}
1203
1204		if ((ret == 0) && (add_reference != 0))
1205			/* Increment the reference count: */
1206			(*m)->m_refcount++;
1207
1208		/* Leave the critical region if this is a private mutex. */
1209		if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1210			THR_CRITICAL_LEAVE(curthread);
1211
1212		/* Unlock the mutex structure: */
1213		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1214
1215		if (kmbx != NULL)
1216			kse_wakeup(kmbx);
1217	}
1218
1219	/* Return the completion status: */
1220	return (ret);
1221}
1222
1223
1224/*
1225 * This function is called when a change in base priority occurs for
1226 * a thread that is holding or waiting for a priority protection or
1227 * inheritence mutex.  A change in a threads base priority can effect
1228 * changes to active priorities of other threads and to the ordering
1229 * of mutex locking by waiting threads.
1230 *
1231 * This must be called without the target thread's scheduling lock held.
1232 */
1233void
1234_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1235    int propagate_prio)
1236{
1237	struct pthread_mutex *m;
1238
1239	/* Adjust the priorites of any owned priority mutexes: */
1240	if (pthread->priority_mutex_count > 0) {
1241		/*
1242		 * Rescan the mutexes owned by this thread and correct
1243		 * their priorities to account for this threads change
1244		 * in priority.  This has the side effect of changing
1245		 * the threads active priority.
1246		 *
1247		 * Be sure to lock the first mutex in the list of owned
1248		 * mutexes.  This acts as a barrier against another
1249		 * simultaneous call to change the threads priority
1250		 * and from the owning thread releasing the mutex.
1251		 */
1252		m = TAILQ_FIRST(&pthread->mutexq);
1253		if (m != NULL) {
1254			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1255			/*
1256			 * Make sure the thread still owns the lock.
1257			 */
1258			if (m == TAILQ_FIRST(&pthread->mutexq))
1259				mutex_rescan_owned(curthread, pthread,
1260				    /* rescan all owned */ NULL);
1261			THR_LOCK_RELEASE(curthread, &m->m_lock);
1262		}
1263	}
1264
1265	/*
1266	 * If this thread is waiting on a priority inheritence mutex,
1267	 * check for priority adjustments.  A change in priority can
1268	 * also cause a ceiling violation(*) for a thread waiting on
1269	 * a priority protection mutex; we don't perform the check here
1270	 * as it is done in pthread_mutex_unlock.
1271	 *
1272	 * (*) It should be noted that a priority change to a thread
1273	 *     _after_ taking and owning a priority ceiling mutex
1274	 *     does not affect ownership of that mutex; the ceiling
1275	 *     priority is only checked before mutex ownership occurs.
1276	 */
1277	if (propagate_prio != 0) {
1278		/*
1279		 * Lock the thread's scheduling queue.  This is a bit
1280		 * convoluted; the "in synchronization queue flag" can
1281		 * only be cleared with both the thread's scheduling and
1282		 * mutex locks held.  The thread's pointer to the wanted
1283		 * mutex is guaranteed to be valid during this time.
1284		 */
1285		THR_SCHED_LOCK(curthread, pthread);
1286
1287		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1288		    ((m = pthread->data.mutex) == NULL))
1289			THR_SCHED_UNLOCK(curthread, pthread);
1290		else {
1291			/*
1292			 * This thread is currently waiting on a mutex; unlock
1293			 * the scheduling queue lock and lock the mutex.  We
1294			 * can't hold both at the same time because the locking
1295			 * order could cause a deadlock.
1296			 */
1297			THR_SCHED_UNLOCK(curthread, pthread);
1298			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1299
1300			/*
1301			 * Check to make sure this thread is still in the
1302			 * same state (the lock above can yield the CPU to
1303			 * another thread or the thread may be running on
1304			 * another CPU).
1305			 */
1306			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1307			    (pthread->data.mutex == m)) {
1308				/*
1309				 * Remove and reinsert this thread into
1310				 * the list of waiting threads to preserve
1311				 * decreasing priority order.
1312				 */
1313				mutex_queue_remove(m, pthread);
1314				mutex_queue_enq(m, pthread);
1315
1316				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1317					/* Adjust priorities: */
1318					mutex_priority_adjust(curthread, m);
1319			}
1320
1321			/* Unlock the mutex structure: */
1322			THR_LOCK_RELEASE(curthread, &m->m_lock);
1323		}
1324	}
1325}
1326
1327/*
1328 * Called when a new thread is added to the mutex waiting queue or
1329 * when a threads priority changes that is already in the mutex
1330 * waiting queue.
1331 *
1332 * This must be called with the mutex locked by the current thread.
1333 */
1334static void
1335mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1336{
1337	pthread_mutex_t	m = mutex;
1338	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1339	int		done, temp_prio;
1340
1341	/*
1342	 * Calculate the mutex priority as the maximum of the highest
1343	 * active priority of any waiting threads and the owning threads
1344	 * active priority(*).
1345	 *
1346	 * (*) Because the owning threads current active priority may
1347	 *     reflect priority inherited from this mutex (and the mutex
1348	 *     priority may have changed) we must recalculate the active
1349	 *     priority based on the threads saved inherited priority
1350	 *     and its base priority.
1351	 */
1352	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1353	temp_prio = MAX(pthread_next->active_priority,
1354	    MAX(m->m_saved_prio, pthread->base_priority));
1355
1356	/* See if this mutex really needs adjusting: */
1357	if (temp_prio == m->m_prio)
1358		/* No need to propagate the priority: */
1359		return;
1360
1361	/* Set new priority of the mutex: */
1362	m->m_prio = temp_prio;
1363
1364	/*
1365	 * Don't unlock the mutex passed in as an argument.  It is
1366	 * expected to be locked and unlocked by the caller.
1367	 */
1368	done = 1;
1369	do {
1370		/*
1371		 * Save the threads priority before rescanning the
1372		 * owned mutexes:
1373		 */
1374		temp_prio = pthread->active_priority;
1375
1376		/*
1377		 * Fix the priorities for all mutexes held by the owning
1378		 * thread since taking this mutex.  This also has a
1379		 * potential side-effect of changing the threads priority.
1380		 *
1381		 * At this point the mutex is locked by the current thread.
1382		 * The owning thread can't release the mutex until it is
1383		 * unlocked, so we should be able to safely walk its list
1384		 * of owned mutexes.
1385		 */
1386		mutex_rescan_owned(curthread, pthread, m);
1387
1388		/*
1389		 * If this isn't the first time through the loop,
1390		 * the current mutex needs to be unlocked.
1391		 */
1392		if (done == 0)
1393			THR_LOCK_RELEASE(curthread, &m->m_lock);
1394
1395		/* Assume we're done unless told otherwise: */
1396		done = 1;
1397
1398		/*
1399		 * If the thread is currently waiting on a mutex, check
1400		 * to see if the threads new priority has affected the
1401		 * priority of the mutex.
1402		 */
1403		if ((temp_prio != pthread->active_priority) &&
1404		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1405		    ((m = pthread->data.mutex) != NULL) &&
1406		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1407			/* Lock the mutex structure: */
1408			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1409
1410			/*
1411			 * Make sure the thread is still waiting on the
1412			 * mutex:
1413			 */
1414			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1415			    (m == pthread->data.mutex)) {
1416				/*
1417				 * The priority for this thread has changed.
1418				 * Remove and reinsert this thread into the
1419				 * list of waiting threads to preserve
1420				 * decreasing priority order.
1421				 */
1422				mutex_queue_remove(m, pthread);
1423				mutex_queue_enq(m, pthread);
1424
1425				/*
1426				 * Grab the waiting thread with highest
1427				 * priority:
1428				 */
1429				pthread_next = TAILQ_FIRST(&m->m_queue);
1430
1431				/*
1432				 * Calculate the mutex priority as the maximum
1433				 * of the highest active priority of any
1434				 * waiting threads and the owning threads
1435				 * active priority.
1436				 */
1437				temp_prio = MAX(pthread_next->active_priority,
1438				    MAX(m->m_saved_prio,
1439				    m->m_owner->base_priority));
1440
1441				if (temp_prio != m->m_prio) {
1442					/*
1443					 * The priority needs to be propagated
1444					 * to the mutex this thread is waiting
1445					 * on and up to the owner of that mutex.
1446					 */
1447					m->m_prio = temp_prio;
1448					pthread = m->m_owner;
1449
1450					/* We're not done yet: */
1451					done = 0;
1452				}
1453			}
1454			/* Only release the mutex if we're done: */
1455			if (done != 0)
1456				THR_LOCK_RELEASE(curthread, &m->m_lock);
1457		}
1458	} while (done == 0);
1459}
1460
1461static void
1462mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1463    struct pthread_mutex *mutex)
1464{
1465	struct pthread_mutex	*m;
1466	struct pthread		*pthread_next;
1467	int			active_prio, inherited_prio;
1468
1469	/*
1470	 * Start walking the mutexes the thread has taken since
1471	 * taking this mutex.
1472	 */
1473	if (mutex == NULL) {
1474		/*
1475		 * A null mutex means start at the beginning of the owned
1476		 * mutex list.
1477		 */
1478		m = TAILQ_FIRST(&pthread->mutexq);
1479
1480		/* There is no inherited priority yet. */
1481		inherited_prio = 0;
1482	} else {
1483		/*
1484		 * The caller wants to start after a specific mutex.  It
1485		 * is assumed that this mutex is a priority inheritence
1486		 * mutex and that its priority has been correctly
1487		 * calculated.
1488		 */
1489		m = TAILQ_NEXT(mutex, m_qe);
1490
1491		/* Start inheriting priority from the specified mutex. */
1492		inherited_prio = mutex->m_prio;
1493	}
1494	active_prio = MAX(inherited_prio, pthread->base_priority);
1495
1496	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1497		/*
1498		 * We only want to deal with priority inheritence
1499		 * mutexes.  This might be optimized by only placing
1500		 * priority inheritence mutexes into the owned mutex
1501		 * list, but it may prove to be useful having all
1502		 * owned mutexes in this list.  Consider a thread
1503		 * exiting while holding mutexes...
1504		 */
1505		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1506			/*
1507			 * Fix the owners saved (inherited) priority to
1508			 * reflect the priority of the previous mutex.
1509			 */
1510			m->m_saved_prio = inherited_prio;
1511
1512			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1513				/* Recalculate the priority of the mutex: */
1514				m->m_prio = MAX(active_prio,
1515				     pthread_next->active_priority);
1516			else
1517				m->m_prio = active_prio;
1518
1519			/* Recalculate new inherited and active priorities: */
1520			inherited_prio = m->m_prio;
1521			active_prio = MAX(m->m_prio, pthread->base_priority);
1522		}
1523	}
1524
1525	/*
1526	 * Fix the threads inherited priority and recalculate its
1527	 * active priority.
1528	 */
1529	pthread->inherited_priority = inherited_prio;
1530	active_prio = MAX(inherited_prio, pthread->base_priority);
1531
1532	if (active_prio != pthread->active_priority) {
1533		/* Lock the thread's scheduling queue: */
1534		THR_SCHED_LOCK(curthread, pthread);
1535
1536		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1537			/*
1538			 * This thread is not in a run queue.  Just set
1539			 * its active priority.
1540			 */
1541			pthread->active_priority = active_prio;
1542		}
1543		else {
1544			/*
1545			 * This thread is in a run queue.  Remove it from
1546			 * the queue before changing its priority:
1547			 */
1548			THR_RUNQ_REMOVE(pthread);
1549
1550			/*
1551			 * POSIX states that if the priority is being
1552			 * lowered, the thread must be inserted at the
1553			 * head of the queue for its priority if it owns
1554			 * any priority protection or inheritence mutexes.
1555			 */
1556			if ((active_prio < pthread->active_priority) &&
1557			    (pthread->priority_mutex_count > 0)) {
1558				/* Set the new active priority. */
1559				pthread->active_priority = active_prio;
1560
1561				THR_RUNQ_INSERT_HEAD(pthread);
1562			} else {
1563				/* Set the new active priority. */
1564				pthread->active_priority = active_prio;
1565
1566				THR_RUNQ_INSERT_TAIL(pthread);
1567			}
1568		}
1569		THR_SCHED_UNLOCK(curthread, pthread);
1570	}
1571}
1572
1573void
1574_mutex_unlock_private(pthread_t pthread)
1575{
1576	struct pthread_mutex	*m, *m_next;
1577
1578	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1579		m_next = TAILQ_NEXT(m, m_qe);
1580		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1581			pthread_mutex_unlock(&m);
1582	}
1583}
1584
1585/*
1586 * This is called by the current thread when it wants to back out of a
1587 * mutex_lock in order to run a signal handler.
1588 */
1589static void
1590mutex_lock_backout(void *arg)
1591{
1592	struct pthread *curthread = (struct pthread *)arg;
1593	struct pthread_mutex *m;
1594
1595	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1596		/*
1597		 * Any other thread may clear the "in sync queue flag",
1598		 * but only the current thread can clear the pointer
1599		 * to the mutex.  So if the flag is set, we can
1600		 * guarantee that the pointer to the mutex is valid.
1601		 * The only problem may be if the mutex is destroyed
1602		 * out from under us, but that should be considered
1603		 * an application bug.
1604		 */
1605		m = curthread->data.mutex;
1606
1607		/* Lock the mutex structure: */
1608		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1609
1610
1611		/*
1612		 * Check to make sure this thread doesn't already own
1613		 * the mutex.  Since mutexes are unlocked with direct
1614		 * handoffs, it is possible the previous owner gave it
1615		 * to us after we checked the sync queue flag and before
1616		 * we locked the mutex structure.
1617		 */
1618		if (m->m_owner == curthread) {
1619			THR_LOCK_RELEASE(curthread, &m->m_lock);
1620			mutex_unlock_common(&m, /* add_reference */ 0);
1621		} else {
1622			/*
1623			 * Remove ourselves from the mutex queue and
1624			 * clear the pointer to the mutex.  We may no
1625			 * longer be in the mutex queue, but the removal
1626			 * function will DTRT.
1627			 */
1628			mutex_queue_remove(m, curthread);
1629			curthread->data.mutex = NULL;
1630			THR_LOCK_RELEASE(curthread, &m->m_lock);
1631		}
1632	}
1633	/* No need to call this again. */
1634	curthread->sigbackout = NULL;
1635}
1636
1637/*
1638 * Dequeue a waiting thread from the head of a mutex queue in descending
1639 * priority order.
1640 *
1641 * In order to properly dequeue a thread from the mutex queue and
1642 * make it runnable without the possibility of errant wakeups, it
1643 * is necessary to lock the thread's scheduling queue while also
1644 * holding the mutex lock.
1645 */
1646static struct kse_mailbox *
1647mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1648{
1649	struct kse_mailbox *kmbx = NULL;
1650	struct pthread *pthread;
1651
1652	/* Keep dequeueing until we find a valid thread: */
1653	mutex->m_owner = NULL;
1654	pthread = TAILQ_FIRST(&mutex->m_queue);
1655	while (pthread != NULL) {
1656		/* Take the thread's scheduling lock: */
1657		THR_SCHED_LOCK(curthread, pthread);
1658
1659		/* Remove the thread from the mutex queue: */
1660		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1661		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1662
1663		/*
1664		 * Only exit the loop if the thread hasn't been
1665		 * cancelled.
1666		 */
1667		switch (mutex->m_protocol) {
1668		case PTHREAD_PRIO_NONE:
1669			/*
1670			 * Assign the new owner and add the mutex to the
1671			 * thread's list of owned mutexes.
1672			 */
1673			mutex->m_owner = pthread;
1674			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1675			break;
1676
1677		case PTHREAD_PRIO_INHERIT:
1678			/*
1679			 * Assign the new owner and add the mutex to the
1680			 * thread's list of owned mutexes.
1681			 */
1682			mutex->m_owner = pthread;
1683			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1684
1685			/* Track number of priority mutexes owned: */
1686			pthread->priority_mutex_count++;
1687
1688			/*
1689			 * Set the priority of the mutex.  Since our waiting
1690			 * threads are in descending priority order, the
1691			 * priority of the mutex becomes the active priority
1692			 * of the thread we just dequeued.
1693			 */
1694			mutex->m_prio = pthread->active_priority;
1695
1696			/* Save the owning threads inherited priority: */
1697			mutex->m_saved_prio = pthread->inherited_priority;
1698
1699			/*
1700			 * The owning threads inherited priority now becomes
1701			 * his active priority (the priority of the mutex).
1702			 */
1703			pthread->inherited_priority = mutex->m_prio;
1704			break;
1705
1706		case PTHREAD_PRIO_PROTECT:
1707			if (pthread->active_priority > mutex->m_prio) {
1708				/*
1709				 * Either the mutex ceiling priority has
1710				 * been lowered and/or this threads priority
1711			 	 * has been raised subsequent to the thread
1712				 * being queued on the waiting list.
1713				 */
1714				pthread->error = EINVAL;
1715			}
1716			else {
1717				/*
1718				 * Assign the new owner and add the mutex
1719				 * to the thread's list of owned mutexes.
1720				 */
1721				mutex->m_owner = pthread;
1722				TAILQ_INSERT_TAIL(&pthread->mutexq,
1723				    mutex, m_qe);
1724
1725				/* Track number of priority mutexes owned: */
1726				pthread->priority_mutex_count++;
1727
1728				/*
1729				 * Save the owning threads inherited
1730				 * priority:
1731				 */
1732				mutex->m_saved_prio =
1733				    pthread->inherited_priority;
1734
1735				/*
1736				 * The owning thread inherits the ceiling
1737				 * priority of the mutex and executes at
1738				 * that priority:
1739				 */
1740				pthread->inherited_priority = mutex->m_prio;
1741				pthread->active_priority = mutex->m_prio;
1742
1743			}
1744			break;
1745		}
1746
1747		/* Make the thread runnable and unlock the scheduling queue: */
1748		kmbx = _thr_setrunnable_unlocked(pthread);
1749
1750		/* Add a preemption point. */
1751		if ((curthread->kseg == pthread->kseg) &&
1752		    (pthread->active_priority > curthread->active_priority))
1753			curthread->critical_yield = 1;
1754
1755		if (mutex->m_owner == pthread) {
1756			/* We're done; a valid owner was found. */
1757			if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1758				THR_CRITICAL_ENTER(pthread);
1759			THR_SCHED_UNLOCK(curthread, pthread);
1760			break;
1761		}
1762		THR_SCHED_UNLOCK(curthread, pthread);
1763		/* Get the next thread from the waiting queue: */
1764		pthread = TAILQ_NEXT(pthread, sqe);
1765	}
1766
1767	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1768		/* This mutex has no priority: */
1769		mutex->m_prio = 0;
1770	return (kmbx);
1771}
1772
1773/*
1774 * Dequeue a waiting thread from the head of a mutex queue in descending
1775 * priority order.
1776 */
1777static inline pthread_t
1778mutex_queue_deq(struct pthread_mutex *mutex)
1779{
1780	pthread_t pthread;
1781
1782	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1783		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1784		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1785
1786		/*
1787		 * Only exit the loop if the thread hasn't been
1788		 * cancelled.
1789		 */
1790		if (pthread->interrupted == 0)
1791			break;
1792	}
1793
1794	return (pthread);
1795}
1796
1797/*
1798 * Remove a waiting thread from a mutex queue in descending priority order.
1799 */
1800static inline void
1801mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1802{
1803	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1804		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1805		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1806	}
1807}
1808
1809/*
1810 * Enqueue a waiting thread to a queue in descending priority order.
1811 */
1812static inline void
1813mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1814{
1815	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1816
1817	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1818	/*
1819	 * For the common case of all threads having equal priority,
1820	 * we perform a quick check against the priority of the thread
1821	 * at the tail of the queue.
1822	 */
1823	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1824		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1825	else {
1826		tid = TAILQ_FIRST(&mutex->m_queue);
1827		while (pthread->active_priority <= tid->active_priority)
1828			tid = TAILQ_NEXT(tid, sqe);
1829		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1830	}
1831	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1832}
1833