thr_mutex.c revision 139023
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 139023 2004-12-18 18:07:37Z deischen $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67#define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
68#define	MUTEX_DESTROY(m) do {		\
69	_lock_destroy(&(m)->m_lock);	\
70	free(m);			\
71} while (0)
72
73
74/*
75 * Prototypes
76 */
77static struct kse_mailbox *mutex_handoff(struct pthread *,
78			    struct pthread_mutex *);
79static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
80static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
81static int		mutex_unlock_common(pthread_mutex_t *, int);
82static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83static void		mutex_rescan_owned (struct pthread *, struct pthread *,
84			    struct pthread_mutex *);
85static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
86static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
87static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
88static void		mutex_lock_backout(void *arg);
89
90static struct pthread_mutex_attr	static_mutex_attr =
91    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
92static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
93
94/* Single underscore versions provided for libc internal usage: */
95__weak_reference(__pthread_mutex_init, pthread_mutex_init);
96__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
97__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
98__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
99
100/* No difference between libc and application usage of these: */
101__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
102__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
103
104
105
106int
107__pthread_mutex_init(pthread_mutex_t *mutex,
108    const pthread_mutexattr_t *mutex_attr)
109{
110	struct pthread_mutex *pmutex;
111	enum pthread_mutextype type;
112	int		protocol;
113	int		ceiling;
114	int		flags;
115	int		ret = 0;
116
117	if (mutex == NULL)
118		ret = EINVAL;
119
120	/* Check if default mutex attributes: */
121	else if (mutex_attr == NULL || *mutex_attr == NULL) {
122		/* Default to a (error checking) POSIX mutex: */
123		type = PTHREAD_MUTEX_ERRORCHECK;
124		protocol = PTHREAD_PRIO_NONE;
125		ceiling = THR_MAX_PRIORITY;
126		flags = 0;
127	}
128
129	/* Check mutex type: */
130	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132		/* Return an invalid argument error: */
133		ret = EINVAL;
134
135	/* Check mutex protocol: */
136	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138		/* Return an invalid argument error: */
139		ret = EINVAL;
140
141	else {
142		/* Use the requested mutex type and protocol: */
143		type = (*mutex_attr)->m_type;
144		protocol = (*mutex_attr)->m_protocol;
145		ceiling = (*mutex_attr)->m_ceiling;
146		flags = (*mutex_attr)->m_flags;
147	}
148
149	/* Check no errors so far: */
150	if (ret == 0) {
151		if ((pmutex = (pthread_mutex_t)
152		    malloc(sizeof(struct pthread_mutex))) == NULL)
153			ret = ENOMEM;
154		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
155		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
156			free(pmutex);
157			*mutex = NULL;
158			ret = ENOMEM;
159		} else {
160			/* Set the mutex flags: */
161			pmutex->m_flags = flags;
162
163			/* Process according to mutex type: */
164			switch (type) {
165			/* case PTHREAD_MUTEX_DEFAULT: */
166			case PTHREAD_MUTEX_ERRORCHECK:
167			case PTHREAD_MUTEX_NORMAL:
168				/* Nothing to do here. */
169				break;
170
171			/* Single UNIX Spec 2 recursive mutex: */
172			case PTHREAD_MUTEX_RECURSIVE:
173				/* Reset the mutex count: */
174				pmutex->m_count = 0;
175				break;
176
177			/* Trap invalid mutex types: */
178			default:
179				/* Return an invalid argument error: */
180				ret = EINVAL;
181				break;
182			}
183			if (ret == 0) {
184				/* Initialise the rest of the mutex: */
185				TAILQ_INIT(&pmutex->m_queue);
186				pmutex->m_flags |= MUTEX_FLAGS_INITED;
187				pmutex->m_owner = NULL;
188				pmutex->m_type = type;
189				pmutex->m_protocol = protocol;
190				pmutex->m_refcount = 0;
191				if (protocol == PTHREAD_PRIO_PROTECT)
192					pmutex->m_prio = ceiling;
193				else
194					pmutex->m_prio = -1;
195				pmutex->m_saved_prio = 0;
196				MUTEX_INIT_LINK(pmutex);
197				*mutex = pmutex;
198			} else {
199				/* Free the mutex lock structure: */
200				MUTEX_DESTROY(pmutex);
201				*mutex = NULL;
202			}
203		}
204	}
205	/* Return the completion status: */
206	return (ret);
207}
208
209int
210_pthread_mutex_init(pthread_mutex_t *mutex,
211    const pthread_mutexattr_t *mutex_attr)
212{
213	struct pthread_mutex_attr mattr, *mattrp;
214
215	if ((mutex_attr == NULL) || (*mutex_attr == NULL))
216		return (__pthread_mutex_init(mutex, &static_mattr));
217	else {
218		mattr = **mutex_attr;
219		mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
220		mattrp = &mattr;
221		return (__pthread_mutex_init(mutex, &mattrp));
222	}
223}
224
225void
226_thr_mutex_reinit(pthread_mutex_t *mutex)
227{
228	_lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
229	    _thr_lock_wait, _thr_lock_wakeup);
230	TAILQ_INIT(&(*mutex)->m_queue);
231	(*mutex)->m_owner = NULL;
232	(*mutex)->m_count = 0;
233	(*mutex)->m_refcount = 0;
234	(*mutex)->m_prio = 0;
235	(*mutex)->m_saved_prio = 0;
236}
237
238int
239_pthread_mutex_destroy(pthread_mutex_t *mutex)
240{
241	struct pthread	*curthread = _get_curthread();
242	pthread_mutex_t m;
243	int ret = 0;
244
245	if (mutex == NULL || *mutex == NULL)
246		ret = EINVAL;
247	else {
248		/* Lock the mutex structure: */
249		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
250
251		/*
252		 * Check to see if this mutex is in use:
253		 */
254		if (((*mutex)->m_owner != NULL) ||
255		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
256		    ((*mutex)->m_refcount != 0)) {
257			ret = EBUSY;
258
259			/* Unlock the mutex structure: */
260			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
261		} else {
262			/*
263			 * Save a pointer to the mutex so it can be free'd
264			 * and set the caller's pointer to NULL:
265			 */
266			m = *mutex;
267			*mutex = NULL;
268
269			/* Unlock the mutex structure: */
270			THR_LOCK_RELEASE(curthread, &m->m_lock);
271
272			/*
273			 * Free the memory allocated for the mutex
274			 * structure:
275			 */
276			MUTEX_ASSERT_NOT_OWNED(m);
277			MUTEX_DESTROY(m);
278		}
279	}
280
281	/* Return the completion status: */
282	return (ret);
283}
284
285static int
286init_static(struct pthread *thread, pthread_mutex_t *mutex)
287{
288	int ret;
289
290	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
291
292	if (*mutex == NULL)
293		ret = pthread_mutex_init(mutex, NULL);
294	else
295		ret = 0;
296
297	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
298
299	return (ret);
300}
301
302static int
303init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
304{
305	int ret;
306
307	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
308
309	if (*mutex == NULL)
310		ret = pthread_mutex_init(mutex, &static_mattr);
311	else
312		ret = 0;
313
314	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
315
316	return (ret);
317}
318
319static int
320mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
321{
322	int private;
323	int ret = 0;
324
325	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
326	    "Uninitialized mutex in pthread_mutex_trylock_basic");
327
328	/* Lock the mutex structure: */
329	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
330	private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
331
332	/*
333	 * If the mutex was statically allocated, properly
334	 * initialize the tail queue.
335	 */
336	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
337		TAILQ_INIT(&(*mutex)->m_queue);
338		MUTEX_INIT_LINK(*mutex);
339		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
340	}
341
342	/* Process according to mutex type: */
343	switch ((*mutex)->m_protocol) {
344	/* Default POSIX mutex: */
345	case PTHREAD_PRIO_NONE:
346		/* Check if this mutex is not locked: */
347		if ((*mutex)->m_owner == NULL) {
348			/* Lock the mutex for the running thread: */
349			(*mutex)->m_owner = curthread;
350
351			/* Add to the list of owned mutexes: */
352			MUTEX_ASSERT_NOT_OWNED(*mutex);
353			TAILQ_INSERT_TAIL(&curthread->mutexq,
354			    (*mutex), m_qe);
355		} else if ((*mutex)->m_owner == curthread)
356			ret = mutex_self_trylock(curthread, *mutex);
357		else
358			/* Return a busy error: */
359			ret = EBUSY;
360		break;
361
362	/* POSIX priority inheritence mutex: */
363	case PTHREAD_PRIO_INHERIT:
364		/* Check if this mutex is not locked: */
365		if ((*mutex)->m_owner == NULL) {
366			/* Lock the mutex for the running thread: */
367			(*mutex)->m_owner = curthread;
368
369			THR_SCHED_LOCK(curthread, curthread);
370			/* Track number of priority mutexes owned: */
371			curthread->priority_mutex_count++;
372
373			/*
374			 * The mutex takes on the attributes of the
375			 * running thread when there are no waiters.
376			 */
377			(*mutex)->m_prio = curthread->active_priority;
378			(*mutex)->m_saved_prio =
379			    curthread->inherited_priority;
380			curthread->inherited_priority = (*mutex)->m_prio;
381			THR_SCHED_UNLOCK(curthread, curthread);
382
383			/* Add to the list of owned mutexes: */
384			MUTEX_ASSERT_NOT_OWNED(*mutex);
385			TAILQ_INSERT_TAIL(&curthread->mutexq,
386			    (*mutex), m_qe);
387		} else if ((*mutex)->m_owner == curthread)
388			ret = mutex_self_trylock(curthread, *mutex);
389		else
390			/* Return a busy error: */
391			ret = EBUSY;
392		break;
393
394	/* POSIX priority protection mutex: */
395	case PTHREAD_PRIO_PROTECT:
396		/* Check for a priority ceiling violation: */
397		if (curthread->active_priority > (*mutex)->m_prio)
398			ret = EINVAL;
399
400		/* Check if this mutex is not locked: */
401		else if ((*mutex)->m_owner == NULL) {
402			/* Lock the mutex for the running thread: */
403			(*mutex)->m_owner = curthread;
404
405			THR_SCHED_LOCK(curthread, curthread);
406			/* Track number of priority mutexes owned: */
407			curthread->priority_mutex_count++;
408
409			/*
410			 * The running thread inherits the ceiling
411			 * priority of the mutex and executes at that
412			 * priority.
413			 */
414			curthread->active_priority = (*mutex)->m_prio;
415			(*mutex)->m_saved_prio =
416			    curthread->inherited_priority;
417			curthread->inherited_priority =
418			    (*mutex)->m_prio;
419			THR_SCHED_UNLOCK(curthread, curthread);
420			/* Add to the list of owned mutexes: */
421			MUTEX_ASSERT_NOT_OWNED(*mutex);
422			TAILQ_INSERT_TAIL(&curthread->mutexq,
423			    (*mutex), m_qe);
424		} else if ((*mutex)->m_owner == curthread)
425			ret = mutex_self_trylock(curthread, *mutex);
426		else
427			/* Return a busy error: */
428			ret = EBUSY;
429		break;
430
431	/* Trap invalid mutex types: */
432	default:
433		/* Return an invalid argument error: */
434		ret = EINVAL;
435		break;
436	}
437
438	if (ret == 0 && private)
439		THR_CRITICAL_ENTER(curthread);
440
441	/* Unlock the mutex structure: */
442	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
443
444	/* Return the completion status: */
445	return (ret);
446}
447
448int
449__pthread_mutex_trylock(pthread_mutex_t *mutex)
450{
451	struct pthread *curthread = _get_curthread();
452	int ret = 0;
453
454	if (mutex == NULL)
455		ret = EINVAL;
456
457	/*
458	 * If the mutex is statically initialized, perform the dynamic
459	 * initialization:
460	 */
461	else if ((*mutex != NULL) ||
462	    ((ret = init_static(curthread, mutex)) == 0))
463		ret = mutex_trylock_common(curthread, mutex);
464
465	return (ret);
466}
467
468int
469_pthread_mutex_trylock(pthread_mutex_t *mutex)
470{
471	struct pthread	*curthread = _get_curthread();
472	int	ret = 0;
473
474	if (mutex == NULL)
475		ret = EINVAL;
476
477	/*
478	 * If the mutex is statically initialized, perform the dynamic
479	 * initialization marking the mutex private (delete safe):
480	 */
481	else if ((*mutex != NULL) ||
482	    ((ret = init_static_private(curthread, mutex)) == 0))
483		ret = mutex_trylock_common(curthread, mutex);
484
485	return (ret);
486}
487
488static int
489mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
490	const struct timespec * abstime)
491{
492	int	private;
493	int	ret = 0;
494
495	THR_ASSERT((m != NULL) && (*m != NULL),
496	    "Uninitialized mutex in pthread_mutex_trylock_basic");
497
498	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
499	    abstime->tv_nsec >= 1000000000))
500		return (EINVAL);
501
502	/* Reset the interrupted flag: */
503	curthread->interrupted = 0;
504	curthread->timeout = 0;
505	curthread->wakeup_time.tv_sec = -1;
506
507	private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
508
509	/*
510	 * Enter a loop waiting to become the mutex owner.  We need a
511	 * loop in case the waiting thread is interrupted by a signal
512	 * to execute a signal handler.  It is not (currently) possible
513	 * to remain in the waiting queue while running a handler.
514	 * Instead, the thread is interrupted and backed out of the
515	 * waiting queue prior to executing the signal handler.
516	 */
517	do {
518		/* Lock the mutex structure: */
519		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
520
521		/*
522		 * If the mutex was statically allocated, properly
523		 * initialize the tail queue.
524		 */
525		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
526			TAILQ_INIT(&(*m)->m_queue);
527			(*m)->m_flags |= MUTEX_FLAGS_INITED;
528			MUTEX_INIT_LINK(*m);
529		}
530
531		/* Process according to mutex type: */
532		switch ((*m)->m_protocol) {
533		/* Default POSIX mutex: */
534		case PTHREAD_PRIO_NONE:
535			if ((*m)->m_owner == NULL) {
536				/* Lock the mutex for this thread: */
537				(*m)->m_owner = curthread;
538
539				/* Add to the list of owned mutexes: */
540				MUTEX_ASSERT_NOT_OWNED(*m);
541				TAILQ_INSERT_TAIL(&curthread->mutexq,
542				    (*m), m_qe);
543				if (private)
544					THR_CRITICAL_ENTER(curthread);
545
546				/* Unlock the mutex structure: */
547				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
548			} else if ((*m)->m_owner == curthread) {
549				ret = mutex_self_lock(curthread, *m);
550
551				/* Unlock the mutex structure: */
552				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
553			} else {
554				/* Set the wakeup time: */
555				if (abstime) {
556					curthread->wakeup_time.tv_sec =
557						abstime->tv_sec;
558					curthread->wakeup_time.tv_nsec =
559						abstime->tv_nsec;
560				}
561
562				/*
563				 * Join the queue of threads waiting to lock
564				 * the mutex and save a pointer to the mutex.
565				 */
566				mutex_queue_enq(*m, curthread);
567				curthread->data.mutex = *m;
568				curthread->sigbackout = mutex_lock_backout;
569				/*
570				 * This thread is active and is in a critical
571				 * region (holding the mutex lock); we should
572				 * be able to safely set the state.
573				 */
574				THR_SCHED_LOCK(curthread, curthread);
575				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
576				THR_SCHED_UNLOCK(curthread, curthread);
577
578				/* Unlock the mutex structure: */
579				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
580
581				/* Schedule the next thread: */
582				_thr_sched_switch(curthread);
583
584				if (THR_IN_MUTEXQ(curthread)) {
585					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
586					mutex_queue_remove(*m, curthread);
587					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
588				}
589				/*
590				 * Only clear these after assuring the
591				 * thread is dequeued.
592				 */
593				curthread->data.mutex = NULL;
594				curthread->sigbackout = NULL;
595			}
596			break;
597
598		/* POSIX priority inheritence mutex: */
599		case PTHREAD_PRIO_INHERIT:
600			/* Check if this mutex is not locked: */
601			if ((*m)->m_owner == NULL) {
602				/* Lock the mutex for this thread: */
603				(*m)->m_owner = curthread;
604
605				THR_SCHED_LOCK(curthread, curthread);
606				/* Track number of priority mutexes owned: */
607				curthread->priority_mutex_count++;
608
609				/*
610				 * The mutex takes on attributes of the
611				 * running thread when there are no waiters.
612				 * Make sure the thread's scheduling lock is
613				 * held while priorities are adjusted.
614				 */
615				(*m)->m_prio = curthread->active_priority;
616				(*m)->m_saved_prio =
617				    curthread->inherited_priority;
618				curthread->inherited_priority = (*m)->m_prio;
619				THR_SCHED_UNLOCK(curthread, curthread);
620
621				/* Add to the list of owned mutexes: */
622				MUTEX_ASSERT_NOT_OWNED(*m);
623				TAILQ_INSERT_TAIL(&curthread->mutexq,
624				    (*m), m_qe);
625				if (private)
626					THR_CRITICAL_ENTER(curthread);
627
628				/* Unlock the mutex structure: */
629				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
630			} else if ((*m)->m_owner == curthread) {
631				ret = mutex_self_lock(curthread, *m);
632
633				/* Unlock the mutex structure: */
634				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
635			} else {
636				/* Set the wakeup time: */
637				if (abstime) {
638					curthread->wakeup_time.tv_sec =
639						abstime->tv_sec;
640					curthread->wakeup_time.tv_nsec =
641						abstime->tv_nsec;
642				}
643
644				/*
645				 * Join the queue of threads waiting to lock
646				 * the mutex and save a pointer to the mutex.
647				 */
648				mutex_queue_enq(*m, curthread);
649				curthread->data.mutex = *m;
650				curthread->sigbackout = mutex_lock_backout;
651
652				/*
653				 * This thread is active and is in a critical
654				 * region (holding the mutex lock); we should
655				 * be able to safely set the state.
656				 */
657				if (curthread->active_priority > (*m)->m_prio)
658					/* Adjust priorities: */
659					mutex_priority_adjust(curthread, *m);
660
661				THR_SCHED_LOCK(curthread, curthread);
662				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
663				THR_SCHED_UNLOCK(curthread, curthread);
664
665				/* Unlock the mutex structure: */
666				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
667
668				/* Schedule the next thread: */
669				_thr_sched_switch(curthread);
670
671				if (THR_IN_MUTEXQ(curthread)) {
672					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
673					mutex_queue_remove(*m, curthread);
674					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
675				}
676				/*
677				 * Only clear these after assuring the
678				 * thread is dequeued.
679				 */
680				curthread->data.mutex = NULL;
681				curthread->sigbackout = NULL;
682			}
683			break;
684
685		/* POSIX priority protection mutex: */
686		case PTHREAD_PRIO_PROTECT:
687			/* Check for a priority ceiling violation: */
688			if (curthread->active_priority > (*m)->m_prio) {
689				/* Unlock the mutex structure: */
690				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
691				ret = EINVAL;
692			}
693			/* Check if this mutex is not locked: */
694			else if ((*m)->m_owner == NULL) {
695				/*
696				 * Lock the mutex for the running
697				 * thread:
698				 */
699				(*m)->m_owner = curthread;
700
701				THR_SCHED_LOCK(curthread, curthread);
702				/* Track number of priority mutexes owned: */
703				curthread->priority_mutex_count++;
704
705				/*
706				 * The running thread inherits the ceiling
707				 * priority of the mutex and executes at that
708				 * priority.  Make sure the thread's
709				 * scheduling lock is held while priorities
710				 * are adjusted.
711				 */
712				curthread->active_priority = (*m)->m_prio;
713				(*m)->m_saved_prio =
714				    curthread->inherited_priority;
715				curthread->inherited_priority = (*m)->m_prio;
716				THR_SCHED_UNLOCK(curthread, curthread);
717
718				/* Add to the list of owned mutexes: */
719				MUTEX_ASSERT_NOT_OWNED(*m);
720				TAILQ_INSERT_TAIL(&curthread->mutexq,
721				    (*m), m_qe);
722				if (private)
723					THR_CRITICAL_ENTER(curthread);
724
725				/* Unlock the mutex structure: */
726				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
727			} else if ((*m)->m_owner == curthread) {
728				ret = mutex_self_lock(curthread, *m);
729
730				/* Unlock the mutex structure: */
731				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
732			} else {
733				/* Set the wakeup time: */
734				if (abstime) {
735					curthread->wakeup_time.tv_sec =
736						abstime->tv_sec;
737					curthread->wakeup_time.tv_nsec =
738						abstime->tv_nsec;
739				}
740
741				/*
742				 * Join the queue of threads waiting to lock
743				 * the mutex and save a pointer to the mutex.
744				 */
745				mutex_queue_enq(*m, curthread);
746				curthread->data.mutex = *m;
747				curthread->sigbackout = mutex_lock_backout;
748
749				/* Clear any previous error: */
750				curthread->error = 0;
751
752				/*
753				 * This thread is active and is in a critical
754				 * region (holding the mutex lock); we should
755				 * be able to safely set the state.
756				 */
757
758				THR_SCHED_LOCK(curthread, curthread);
759				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
760				THR_SCHED_UNLOCK(curthread, curthread);
761
762				/* Unlock the mutex structure: */
763				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
764
765				/* Schedule the next thread: */
766				_thr_sched_switch(curthread);
767
768				if (THR_IN_MUTEXQ(curthread)) {
769					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
770					mutex_queue_remove(*m, curthread);
771					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
772				}
773				/*
774				 * Only clear these after assuring the
775				 * thread is dequeued.
776				 */
777				curthread->data.mutex = NULL;
778				curthread->sigbackout = NULL;
779
780				/*
781				 * The threads priority may have changed while
782				 * waiting for the mutex causing a ceiling
783				 * violation.
784				 */
785				ret = curthread->error;
786				curthread->error = 0;
787			}
788			break;
789
790		/* Trap invalid mutex types: */
791		default:
792			/* Unlock the mutex structure: */
793			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
794
795			/* Return an invalid argument error: */
796			ret = EINVAL;
797			break;
798		}
799
800	} while (((*m)->m_owner != curthread) && (ret == 0) &&
801	    (curthread->interrupted == 0) && (curthread->timeout == 0));
802
803	if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
804		ret = ETIMEDOUT;
805
806	/*
807	 * Check to see if this thread was interrupted and
808	 * is still in the mutex queue of waiting threads:
809	 */
810	if (curthread->interrupted != 0) {
811		/* Remove this thread from the mutex queue. */
812		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
813		if (THR_IN_SYNCQ(curthread))
814			mutex_queue_remove(*m, curthread);
815		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
816
817		/* Check for asynchronous cancellation. */
818		if (curthread->continuation != NULL)
819			curthread->continuation((void *) curthread);
820	}
821
822	/* Return the completion status: */
823	return (ret);
824}
825
826int
827__pthread_mutex_lock(pthread_mutex_t *m)
828{
829	struct pthread *curthread;
830	int	ret = 0;
831
832	if (_thr_initial == NULL)
833		_libpthread_init(NULL);
834
835	curthread = _get_curthread();
836	if (m == NULL)
837		ret = EINVAL;
838
839	/*
840	 * If the mutex is statically initialized, perform the dynamic
841	 * initialization:
842	 */
843	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
844		ret = mutex_lock_common(curthread, m, NULL);
845
846	return (ret);
847}
848
849__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
850
851int
852_pthread_mutex_lock(pthread_mutex_t *m)
853{
854	struct pthread *curthread;
855	int	ret = 0;
856
857	if (_thr_initial == NULL)
858		_libpthread_init(NULL);
859	curthread = _get_curthread();
860
861	if (m == NULL)
862		ret = EINVAL;
863
864	/*
865	 * If the mutex is statically initialized, perform the dynamic
866	 * initialization marking it private (delete safe):
867	 */
868	else if ((*m != NULL) ||
869	    ((ret = init_static_private(curthread, m)) == 0))
870		ret = mutex_lock_common(curthread, m, NULL);
871
872	return (ret);
873}
874
875int
876__pthread_mutex_timedlock(pthread_mutex_t *m,
877	const struct timespec *abs_timeout)
878{
879	struct pthread *curthread;
880	int	ret = 0;
881
882	if (_thr_initial == NULL)
883		_libpthread_init(NULL);
884
885	curthread = _get_curthread();
886	if (m == NULL)
887		ret = EINVAL;
888
889	/*
890	 * If the mutex is statically initialized, perform the dynamic
891	 * initialization:
892	 */
893	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
894		ret = mutex_lock_common(curthread, m, abs_timeout);
895
896	return (ret);
897}
898
899int
900_pthread_mutex_timedlock(pthread_mutex_t *m,
901	const struct timespec *abs_timeout)
902{
903	struct pthread *curthread;
904	int	ret = 0;
905
906	if (_thr_initial == NULL)
907		_libpthread_init(NULL);
908	curthread = _get_curthread();
909
910	if (m == NULL)
911		ret = EINVAL;
912
913	/*
914	 * If the mutex is statically initialized, perform the dynamic
915	 * initialization marking it private (delete safe):
916	 */
917	else if ((*m != NULL) ||
918	    ((ret = init_static_private(curthread, m)) == 0))
919		ret = mutex_lock_common(curthread, m, abs_timeout);
920
921	return (ret);
922}
923
924int
925_pthread_mutex_unlock(pthread_mutex_t *m)
926{
927	return (mutex_unlock_common(m, /* add reference */ 0));
928}
929
930__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
931
932int
933_mutex_cv_unlock(pthread_mutex_t *m)
934{
935	return (mutex_unlock_common(m, /* add reference */ 1));
936}
937
938int
939_mutex_cv_lock(pthread_mutex_t *m)
940{
941	struct  pthread *curthread;
942	int	ret;
943
944	curthread = _get_curthread();
945	if ((ret = _pthread_mutex_lock(m)) == 0) {
946		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
947		(*m)->m_refcount--;
948		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
949	}
950	return (ret);
951}
952
953static inline int
954mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
955{
956	int	ret = 0;
957
958	switch (m->m_type) {
959	/* case PTHREAD_MUTEX_DEFAULT: */
960	case PTHREAD_MUTEX_ERRORCHECK:
961	case PTHREAD_MUTEX_NORMAL:
962		ret = EBUSY;
963		break;
964
965	case PTHREAD_MUTEX_RECURSIVE:
966		/* Increment the lock count: */
967		m->m_count++;
968		break;
969
970	default:
971		/* Trap invalid mutex types; */
972		ret = EINVAL;
973	}
974
975	return (ret);
976}
977
978static inline int
979mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
980{
981	int ret = 0;
982
983	/*
984	 * Don't allow evil recursive mutexes for private use
985	 * in libc and libpthread.
986	 */
987	if (m->m_flags & MUTEX_FLAGS_PRIVATE)
988		PANIC("Recurse on a private mutex.");
989
990	switch (m->m_type) {
991	/* case PTHREAD_MUTEX_DEFAULT: */
992	case PTHREAD_MUTEX_ERRORCHECK:
993		/*
994		 * POSIX specifies that mutexes should return EDEADLK if a
995		 * recursive lock is detected.
996		 */
997		ret = EDEADLK;
998		break;
999
1000	case PTHREAD_MUTEX_NORMAL:
1001		/*
1002		 * What SS2 define as a 'normal' mutex.  Intentionally
1003		 * deadlock on attempts to get a lock you already own.
1004		 */
1005
1006		THR_SCHED_LOCK(curthread, curthread);
1007		THR_SET_STATE(curthread, PS_DEADLOCK);
1008		THR_SCHED_UNLOCK(curthread, curthread);
1009
1010		/* Unlock the mutex structure: */
1011		THR_LOCK_RELEASE(curthread, &m->m_lock);
1012
1013		/* Schedule the next thread: */
1014		_thr_sched_switch(curthread);
1015		break;
1016
1017	case PTHREAD_MUTEX_RECURSIVE:
1018		/* Increment the lock count: */
1019		m->m_count++;
1020		break;
1021
1022	default:
1023		/* Trap invalid mutex types; */
1024		ret = EINVAL;
1025	}
1026
1027	return (ret);
1028}
1029
1030static int
1031mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1032{
1033	struct pthread *curthread = _get_curthread();
1034	struct kse_mailbox *kmbx = NULL;
1035	int ret = 0;
1036
1037	if (m == NULL || *m == NULL)
1038		ret = EINVAL;
1039	else {
1040		/* Lock the mutex structure: */
1041		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1042
1043		/* Process according to mutex type: */
1044		switch ((*m)->m_protocol) {
1045		/* Default POSIX mutex: */
1046		case PTHREAD_PRIO_NONE:
1047			/*
1048			 * Check if the running thread is not the owner of the
1049			 * mutex:
1050			 */
1051			if ((*m)->m_owner != curthread)
1052				ret = EPERM;
1053			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1054			    ((*m)->m_count > 0))
1055				/* Decrement the count: */
1056				(*m)->m_count--;
1057			else {
1058				/*
1059				 * Clear the count in case this is a recursive
1060				 * mutex.
1061				 */
1062				(*m)->m_count = 0;
1063
1064				/* Remove the mutex from the threads queue. */
1065				MUTEX_ASSERT_IS_OWNED(*m);
1066				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1067				    (*m), m_qe);
1068				MUTEX_INIT_LINK(*m);
1069
1070				/*
1071				 * Hand off the mutex to the next waiting
1072				 * thread:
1073				 */
1074				kmbx = mutex_handoff(curthread, *m);
1075			}
1076			break;
1077
1078		/* POSIX priority inheritence mutex: */
1079		case PTHREAD_PRIO_INHERIT:
1080			/*
1081			 * Check if the running thread is not the owner of the
1082			 * mutex:
1083			 */
1084			if ((*m)->m_owner != curthread)
1085				ret = EPERM;
1086			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1087			    ((*m)->m_count > 0))
1088				/* Decrement the count: */
1089				(*m)->m_count--;
1090			else {
1091				/*
1092				 * Clear the count in case this is recursive
1093				 * mutex.
1094				 */
1095				(*m)->m_count = 0;
1096
1097				/*
1098				 * Restore the threads inherited priority and
1099				 * recompute the active priority (being careful
1100				 * not to override changes in the threads base
1101				 * priority subsequent to locking the mutex).
1102				 */
1103				THR_SCHED_LOCK(curthread, curthread);
1104				curthread->inherited_priority =
1105					(*m)->m_saved_prio;
1106				curthread->active_priority =
1107				    MAX(curthread->inherited_priority,
1108				    curthread->base_priority);
1109
1110				/*
1111				 * This thread now owns one less priority mutex.
1112				 */
1113				curthread->priority_mutex_count--;
1114				THR_SCHED_UNLOCK(curthread, curthread);
1115
1116				/* Remove the mutex from the threads queue. */
1117				MUTEX_ASSERT_IS_OWNED(*m);
1118				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1119				    (*m), m_qe);
1120				MUTEX_INIT_LINK(*m);
1121
1122				/*
1123				 * Hand off the mutex to the next waiting
1124				 * thread:
1125				 */
1126				kmbx = mutex_handoff(curthread, *m);
1127			}
1128			break;
1129
1130		/* POSIX priority ceiling mutex: */
1131		case PTHREAD_PRIO_PROTECT:
1132			/*
1133			 * Check if the running thread is not the owner of the
1134			 * mutex:
1135			 */
1136			if ((*m)->m_owner != curthread)
1137				ret = EPERM;
1138			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1139			    ((*m)->m_count > 0))
1140				/* Decrement the count: */
1141				(*m)->m_count--;
1142			else {
1143				/*
1144				 * Clear the count in case this is a recursive
1145				 * mutex.
1146				 */
1147				(*m)->m_count = 0;
1148
1149				/*
1150				 * Restore the threads inherited priority and
1151				 * recompute the active priority (being careful
1152				 * not to override changes in the threads base
1153				 * priority subsequent to locking the mutex).
1154				 */
1155				THR_SCHED_LOCK(curthread, curthread);
1156				curthread->inherited_priority =
1157					(*m)->m_saved_prio;
1158				curthread->active_priority =
1159				    MAX(curthread->inherited_priority,
1160				    curthread->base_priority);
1161
1162				/*
1163				 * This thread now owns one less priority mutex.
1164				 */
1165				curthread->priority_mutex_count--;
1166				THR_SCHED_UNLOCK(curthread, curthread);
1167
1168				/* Remove the mutex from the threads queue. */
1169				MUTEX_ASSERT_IS_OWNED(*m);
1170				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1171				    (*m), m_qe);
1172				MUTEX_INIT_LINK(*m);
1173
1174				/*
1175				 * Hand off the mutex to the next waiting
1176				 * thread:
1177				 */
1178				kmbx = mutex_handoff(curthread, *m);
1179			}
1180			break;
1181
1182		/* Trap invalid mutex types: */
1183		default:
1184			/* Return an invalid argument error: */
1185			ret = EINVAL;
1186			break;
1187		}
1188
1189		if ((ret == 0) && (add_reference != 0))
1190			/* Increment the reference count: */
1191			(*m)->m_refcount++;
1192
1193		/* Leave the critical region if this is a private mutex. */
1194		if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1195			THR_CRITICAL_LEAVE(curthread);
1196
1197		/* Unlock the mutex structure: */
1198		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1199
1200		if (kmbx != NULL)
1201			kse_wakeup(kmbx);
1202	}
1203
1204	/* Return the completion status: */
1205	return (ret);
1206}
1207
1208
1209/*
1210 * This function is called when a change in base priority occurs for
1211 * a thread that is holding or waiting for a priority protection or
1212 * inheritence mutex.  A change in a threads base priority can effect
1213 * changes to active priorities of other threads and to the ordering
1214 * of mutex locking by waiting threads.
1215 *
1216 * This must be called without the target thread's scheduling lock held.
1217 */
1218void
1219_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1220    int propagate_prio)
1221{
1222	struct pthread_mutex *m;
1223
1224	/* Adjust the priorites of any owned priority mutexes: */
1225	if (pthread->priority_mutex_count > 0) {
1226		/*
1227		 * Rescan the mutexes owned by this thread and correct
1228		 * their priorities to account for this threads change
1229		 * in priority.  This has the side effect of changing
1230		 * the threads active priority.
1231		 *
1232		 * Be sure to lock the first mutex in the list of owned
1233		 * mutexes.  This acts as a barrier against another
1234		 * simultaneous call to change the threads priority
1235		 * and from the owning thread releasing the mutex.
1236		 */
1237		m = TAILQ_FIRST(&pthread->mutexq);
1238		if (m != NULL) {
1239			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1240			/*
1241			 * Make sure the thread still owns the lock.
1242			 */
1243			if (m == TAILQ_FIRST(&pthread->mutexq))
1244				mutex_rescan_owned(curthread, pthread,
1245				    /* rescan all owned */ NULL);
1246			THR_LOCK_RELEASE(curthread, &m->m_lock);
1247		}
1248	}
1249
1250	/*
1251	 * If this thread is waiting on a priority inheritence mutex,
1252	 * check for priority adjustments.  A change in priority can
1253	 * also cause a ceiling violation(*) for a thread waiting on
1254	 * a priority protection mutex; we don't perform the check here
1255	 * as it is done in pthread_mutex_unlock.
1256	 *
1257	 * (*) It should be noted that a priority change to a thread
1258	 *     _after_ taking and owning a priority ceiling mutex
1259	 *     does not affect ownership of that mutex; the ceiling
1260	 *     priority is only checked before mutex ownership occurs.
1261	 */
1262	if (propagate_prio != 0) {
1263		/*
1264		 * Lock the thread's scheduling queue.  This is a bit
1265		 * convoluted; the "in synchronization queue flag" can
1266		 * only be cleared with both the thread's scheduling and
1267		 * mutex locks held.  The thread's pointer to the wanted
1268		 * mutex is guaranteed to be valid during this time.
1269		 */
1270		THR_SCHED_LOCK(curthread, pthread);
1271
1272		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1273		    ((m = pthread->data.mutex) == NULL))
1274			THR_SCHED_UNLOCK(curthread, pthread);
1275		else {
1276			/*
1277			 * This thread is currently waiting on a mutex; unlock
1278			 * the scheduling queue lock and lock the mutex.  We
1279			 * can't hold both at the same time because the locking
1280			 * order could cause a deadlock.
1281			 */
1282			THR_SCHED_UNLOCK(curthread, pthread);
1283			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1284
1285			/*
1286			 * Check to make sure this thread is still in the
1287			 * same state (the lock above can yield the CPU to
1288			 * another thread or the thread may be running on
1289			 * another CPU).
1290			 */
1291			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1292			    (pthread->data.mutex == m)) {
1293				/*
1294				 * Remove and reinsert this thread into
1295				 * the list of waiting threads to preserve
1296				 * decreasing priority order.
1297				 */
1298				mutex_queue_remove(m, pthread);
1299				mutex_queue_enq(m, pthread);
1300
1301				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1302					/* Adjust priorities: */
1303					mutex_priority_adjust(curthread, m);
1304			}
1305
1306			/* Unlock the mutex structure: */
1307			THR_LOCK_RELEASE(curthread, &m->m_lock);
1308		}
1309	}
1310}
1311
1312/*
1313 * Called when a new thread is added to the mutex waiting queue or
1314 * when a threads priority changes that is already in the mutex
1315 * waiting queue.
1316 *
1317 * This must be called with the mutex locked by the current thread.
1318 */
1319static void
1320mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1321{
1322	pthread_mutex_t	m = mutex;
1323	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1324	int		done, temp_prio;
1325
1326	/*
1327	 * Calculate the mutex priority as the maximum of the highest
1328	 * active priority of any waiting threads and the owning threads
1329	 * active priority(*).
1330	 *
1331	 * (*) Because the owning threads current active priority may
1332	 *     reflect priority inherited from this mutex (and the mutex
1333	 *     priority may have changed) we must recalculate the active
1334	 *     priority based on the threads saved inherited priority
1335	 *     and its base priority.
1336	 */
1337	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1338	temp_prio = MAX(pthread_next->active_priority,
1339	    MAX(m->m_saved_prio, pthread->base_priority));
1340
1341	/* See if this mutex really needs adjusting: */
1342	if (temp_prio == m->m_prio)
1343		/* No need to propagate the priority: */
1344		return;
1345
1346	/* Set new priority of the mutex: */
1347	m->m_prio = temp_prio;
1348
1349	/*
1350	 * Don't unlock the mutex passed in as an argument.  It is
1351	 * expected to be locked and unlocked by the caller.
1352	 */
1353	done = 1;
1354	do {
1355		/*
1356		 * Save the threads priority before rescanning the
1357		 * owned mutexes:
1358		 */
1359		temp_prio = pthread->active_priority;
1360
1361		/*
1362		 * Fix the priorities for all mutexes held by the owning
1363		 * thread since taking this mutex.  This also has a
1364		 * potential side-effect of changing the threads priority.
1365		 *
1366		 * At this point the mutex is locked by the current thread.
1367		 * The owning thread can't release the mutex until it is
1368		 * unlocked, so we should be able to safely walk its list
1369		 * of owned mutexes.
1370		 */
1371		mutex_rescan_owned(curthread, pthread, m);
1372
1373		/*
1374		 * If this isn't the first time through the loop,
1375		 * the current mutex needs to be unlocked.
1376		 */
1377		if (done == 0)
1378			THR_LOCK_RELEASE(curthread, &m->m_lock);
1379
1380		/* Assume we're done unless told otherwise: */
1381		done = 1;
1382
1383		/*
1384		 * If the thread is currently waiting on a mutex, check
1385		 * to see if the threads new priority has affected the
1386		 * priority of the mutex.
1387		 */
1388		if ((temp_prio != pthread->active_priority) &&
1389		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1390		    ((m = pthread->data.mutex) != NULL) &&
1391		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1392			/* Lock the mutex structure: */
1393			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1394
1395			/*
1396			 * Make sure the thread is still waiting on the
1397			 * mutex:
1398			 */
1399			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1400			    (m == pthread->data.mutex)) {
1401				/*
1402				 * The priority for this thread has changed.
1403				 * Remove and reinsert this thread into the
1404				 * list of waiting threads to preserve
1405				 * decreasing priority order.
1406				 */
1407				mutex_queue_remove(m, pthread);
1408				mutex_queue_enq(m, pthread);
1409
1410				/*
1411				 * Grab the waiting thread with highest
1412				 * priority:
1413				 */
1414				pthread_next = TAILQ_FIRST(&m->m_queue);
1415
1416				/*
1417				 * Calculate the mutex priority as the maximum
1418				 * of the highest active priority of any
1419				 * waiting threads and the owning threads
1420				 * active priority.
1421				 */
1422				temp_prio = MAX(pthread_next->active_priority,
1423				    MAX(m->m_saved_prio,
1424				    m->m_owner->base_priority));
1425
1426				if (temp_prio != m->m_prio) {
1427					/*
1428					 * The priority needs to be propagated
1429					 * to the mutex this thread is waiting
1430					 * on and up to the owner of that mutex.
1431					 */
1432					m->m_prio = temp_prio;
1433					pthread = m->m_owner;
1434
1435					/* We're not done yet: */
1436					done = 0;
1437				}
1438			}
1439			/* Only release the mutex if we're done: */
1440			if (done != 0)
1441				THR_LOCK_RELEASE(curthread, &m->m_lock);
1442		}
1443	} while (done == 0);
1444}
1445
1446static void
1447mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1448    struct pthread_mutex *mutex)
1449{
1450	struct pthread_mutex	*m;
1451	struct pthread		*pthread_next;
1452	int			active_prio, inherited_prio;
1453
1454	/*
1455	 * Start walking the mutexes the thread has taken since
1456	 * taking this mutex.
1457	 */
1458	if (mutex == NULL) {
1459		/*
1460		 * A null mutex means start at the beginning of the owned
1461		 * mutex list.
1462		 */
1463		m = TAILQ_FIRST(&pthread->mutexq);
1464
1465		/* There is no inherited priority yet. */
1466		inherited_prio = 0;
1467	} else {
1468		/*
1469		 * The caller wants to start after a specific mutex.  It
1470		 * is assumed that this mutex is a priority inheritence
1471		 * mutex and that its priority has been correctly
1472		 * calculated.
1473		 */
1474		m = TAILQ_NEXT(mutex, m_qe);
1475
1476		/* Start inheriting priority from the specified mutex. */
1477		inherited_prio = mutex->m_prio;
1478	}
1479	active_prio = MAX(inherited_prio, pthread->base_priority);
1480
1481	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1482		/*
1483		 * We only want to deal with priority inheritence
1484		 * mutexes.  This might be optimized by only placing
1485		 * priority inheritence mutexes into the owned mutex
1486		 * list, but it may prove to be useful having all
1487		 * owned mutexes in this list.  Consider a thread
1488		 * exiting while holding mutexes...
1489		 */
1490		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1491			/*
1492			 * Fix the owners saved (inherited) priority to
1493			 * reflect the priority of the previous mutex.
1494			 */
1495			m->m_saved_prio = inherited_prio;
1496
1497			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1498				/* Recalculate the priority of the mutex: */
1499				m->m_prio = MAX(active_prio,
1500				     pthread_next->active_priority);
1501			else
1502				m->m_prio = active_prio;
1503
1504			/* Recalculate new inherited and active priorities: */
1505			inherited_prio = m->m_prio;
1506			active_prio = MAX(m->m_prio, pthread->base_priority);
1507		}
1508	}
1509
1510	/*
1511	 * Fix the threads inherited priority and recalculate its
1512	 * active priority.
1513	 */
1514	pthread->inherited_priority = inherited_prio;
1515	active_prio = MAX(inherited_prio, pthread->base_priority);
1516
1517	if (active_prio != pthread->active_priority) {
1518		/* Lock the thread's scheduling queue: */
1519		THR_SCHED_LOCK(curthread, pthread);
1520
1521		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1522			/*
1523			 * This thread is not in a run queue.  Just set
1524			 * its active priority.
1525			 */
1526			pthread->active_priority = active_prio;
1527		}
1528		else {
1529			/*
1530			 * This thread is in a run queue.  Remove it from
1531			 * the queue before changing its priority:
1532			 */
1533			THR_RUNQ_REMOVE(pthread);
1534
1535			/*
1536			 * POSIX states that if the priority is being
1537			 * lowered, the thread must be inserted at the
1538			 * head of the queue for its priority if it owns
1539			 * any priority protection or inheritence mutexes.
1540			 */
1541			if ((active_prio < pthread->active_priority) &&
1542			    (pthread->priority_mutex_count > 0)) {
1543				/* Set the new active priority. */
1544				pthread->active_priority = active_prio;
1545
1546				THR_RUNQ_INSERT_HEAD(pthread);
1547			} else {
1548				/* Set the new active priority. */
1549				pthread->active_priority = active_prio;
1550
1551				THR_RUNQ_INSERT_TAIL(pthread);
1552			}
1553		}
1554		THR_SCHED_UNLOCK(curthread, pthread);
1555	}
1556}
1557
1558void
1559_mutex_unlock_private(pthread_t pthread)
1560{
1561	struct pthread_mutex	*m, *m_next;
1562
1563	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1564		m_next = TAILQ_NEXT(m, m_qe);
1565		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1566			pthread_mutex_unlock(&m);
1567	}
1568}
1569
1570/*
1571 * This is called by the current thread when it wants to back out of a
1572 * mutex_lock in order to run a signal handler.
1573 */
1574static void
1575mutex_lock_backout(void *arg)
1576{
1577	struct pthread *curthread = (struct pthread *)arg;
1578	struct pthread_mutex *m;
1579
1580	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1581		/*
1582		 * Any other thread may clear the "in sync queue flag",
1583		 * but only the current thread can clear the pointer
1584		 * to the mutex.  So if the flag is set, we can
1585		 * guarantee that the pointer to the mutex is valid.
1586		 * The only problem may be if the mutex is destroyed
1587		 * out from under us, but that should be considered
1588		 * an application bug.
1589		 */
1590		m = curthread->data.mutex;
1591
1592		/* Lock the mutex structure: */
1593		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1594
1595
1596		/*
1597		 * Check to make sure this thread doesn't already own
1598		 * the mutex.  Since mutexes are unlocked with direct
1599		 * handoffs, it is possible the previous owner gave it
1600		 * to us after we checked the sync queue flag and before
1601		 * we locked the mutex structure.
1602		 */
1603		if (m->m_owner == curthread) {
1604			THR_LOCK_RELEASE(curthread, &m->m_lock);
1605			mutex_unlock_common(&m, /* add_reference */ 0);
1606		} else {
1607			/*
1608			 * Remove ourselves from the mutex queue and
1609			 * clear the pointer to the mutex.  We may no
1610			 * longer be in the mutex queue, but the removal
1611			 * function will DTRT.
1612			 */
1613			mutex_queue_remove(m, curthread);
1614			curthread->data.mutex = NULL;
1615			THR_LOCK_RELEASE(curthread, &m->m_lock);
1616		}
1617	}
1618	/* No need to call this again. */
1619	curthread->sigbackout = NULL;
1620}
1621
1622/*
1623 * Dequeue a waiting thread from the head of a mutex queue in descending
1624 * priority order.
1625 *
1626 * In order to properly dequeue a thread from the mutex queue and
1627 * make it runnable without the possibility of errant wakeups, it
1628 * is necessary to lock the thread's scheduling queue while also
1629 * holding the mutex lock.
1630 */
1631static struct kse_mailbox *
1632mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1633{
1634	struct kse_mailbox *kmbx = NULL;
1635	struct pthread *pthread;
1636
1637	/* Keep dequeueing until we find a valid thread: */
1638	mutex->m_owner = NULL;
1639	pthread = TAILQ_FIRST(&mutex->m_queue);
1640	while (pthread != NULL) {
1641		/* Take the thread's scheduling lock: */
1642		THR_SCHED_LOCK(curthread, pthread);
1643
1644		/* Remove the thread from the mutex queue: */
1645		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1646		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1647
1648		/*
1649		 * Only exit the loop if the thread hasn't been
1650		 * cancelled.
1651		 */
1652		switch (mutex->m_protocol) {
1653		case PTHREAD_PRIO_NONE:
1654			/*
1655			 * Assign the new owner and add the mutex to the
1656			 * thread's list of owned mutexes.
1657			 */
1658			mutex->m_owner = pthread;
1659			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1660			break;
1661
1662		case PTHREAD_PRIO_INHERIT:
1663			/*
1664			 * Assign the new owner and add the mutex to the
1665			 * thread's list of owned mutexes.
1666			 */
1667			mutex->m_owner = pthread;
1668			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1669
1670			/* Track number of priority mutexes owned: */
1671			pthread->priority_mutex_count++;
1672
1673			/*
1674			 * Set the priority of the mutex.  Since our waiting
1675			 * threads are in descending priority order, the
1676			 * priority of the mutex becomes the active priority
1677			 * of the thread we just dequeued.
1678			 */
1679			mutex->m_prio = pthread->active_priority;
1680
1681			/* Save the owning threads inherited priority: */
1682			mutex->m_saved_prio = pthread->inherited_priority;
1683
1684			/*
1685			 * The owning threads inherited priority now becomes
1686			 * his active priority (the priority of the mutex).
1687			 */
1688			pthread->inherited_priority = mutex->m_prio;
1689			break;
1690
1691		case PTHREAD_PRIO_PROTECT:
1692			if (pthread->active_priority > mutex->m_prio) {
1693				/*
1694				 * Either the mutex ceiling priority has
1695				 * been lowered and/or this threads priority
1696			 	 * has been raised subsequent to the thread
1697				 * being queued on the waiting list.
1698				 */
1699				pthread->error = EINVAL;
1700			}
1701			else {
1702				/*
1703				 * Assign the new owner and add the mutex
1704				 * to the thread's list of owned mutexes.
1705				 */
1706				mutex->m_owner = pthread;
1707				TAILQ_INSERT_TAIL(&pthread->mutexq,
1708				    mutex, m_qe);
1709
1710				/* Track number of priority mutexes owned: */
1711				pthread->priority_mutex_count++;
1712
1713				/*
1714				 * Save the owning threads inherited
1715				 * priority:
1716				 */
1717				mutex->m_saved_prio =
1718				    pthread->inherited_priority;
1719
1720				/*
1721				 * The owning thread inherits the ceiling
1722				 * priority of the mutex and executes at
1723				 * that priority:
1724				 */
1725				pthread->inherited_priority = mutex->m_prio;
1726				pthread->active_priority = mutex->m_prio;
1727
1728			}
1729			break;
1730		}
1731
1732		/* Make the thread runnable and unlock the scheduling queue: */
1733		kmbx = _thr_setrunnable_unlocked(pthread);
1734
1735		/* Add a preemption point. */
1736		if ((curthread->kseg == pthread->kseg) &&
1737		    (pthread->active_priority > curthread->active_priority))
1738			curthread->critical_yield = 1;
1739
1740		if (mutex->m_owner == pthread) {
1741			/* We're done; a valid owner was found. */
1742			if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1743				THR_CRITICAL_ENTER(pthread);
1744			THR_SCHED_UNLOCK(curthread, pthread);
1745			break;
1746		}
1747		THR_SCHED_UNLOCK(curthread, pthread);
1748		/* Get the next thread from the waiting queue: */
1749		pthread = TAILQ_NEXT(pthread, sqe);
1750	}
1751
1752	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1753		/* This mutex has no priority: */
1754		mutex->m_prio = 0;
1755	return (kmbx);
1756}
1757
1758/*
1759 * Dequeue a waiting thread from the head of a mutex queue in descending
1760 * priority order.
1761 */
1762static inline pthread_t
1763mutex_queue_deq(struct pthread_mutex *mutex)
1764{
1765	pthread_t pthread;
1766
1767	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1768		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1769		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1770
1771		/*
1772		 * Only exit the loop if the thread hasn't been
1773		 * cancelled.
1774		 */
1775		if (pthread->interrupted == 0)
1776			break;
1777	}
1778
1779	return (pthread);
1780}
1781
1782/*
1783 * Remove a waiting thread from a mutex queue in descending priority order.
1784 */
1785static inline void
1786mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1787{
1788	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1789		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1790		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1791	}
1792}
1793
1794/*
1795 * Enqueue a waiting thread to a queue in descending priority order.
1796 */
1797static inline void
1798mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1799{
1800	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1801
1802	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1803	/*
1804	 * For the common case of all threads having equal priority,
1805	 * we perform a quick check against the priority of the thread
1806	 * at the tail of the queue.
1807	 */
1808	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1809		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1810	else {
1811		tid = TAILQ_FIRST(&mutex->m_queue);
1812		while (pthread->active_priority <= tid->active_priority)
1813			tid = TAILQ_NEXT(tid, sqe);
1814		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1815	}
1816	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1817}
1818