thr_mutex.c revision 115080
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 115080 2003-05-16 19:58:30Z deischen $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67/*
68 * Prototypes
69 */
70static void		mutex_handoff(struct pthread *, struct pthread_mutex *);
71static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
72static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
73static int		mutex_unlock_common(pthread_mutex_t *, int);
74static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
75static void		mutex_rescan_owned (struct pthread *, struct pthread *,
76			    struct pthread_mutex *);
77static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
78static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
79static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
80
81
82static struct pthread_mutex_attr	static_mutex_attr =
83    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
84static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
85
86/* Single underscore versions provided for libc internal usage: */
87__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
88__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
89
90/* No difference between libc and application usage of these: */
91__weak_reference(_pthread_mutex_init, pthread_mutex_init);
92__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
93__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
94
95
96
97int
98_pthread_mutex_init(pthread_mutex_t *mutex,
99    const pthread_mutexattr_t *mutex_attr)
100{
101	struct pthread_mutex *pmutex;
102	enum pthread_mutextype type;
103	int		protocol;
104	int		ceiling;
105	int		flags;
106	int		ret = 0;
107
108	if (mutex == NULL)
109		ret = EINVAL;
110
111	/* Check if default mutex attributes: */
112	else if (mutex_attr == NULL || *mutex_attr == NULL) {
113		/* Default to a (error checking) POSIX mutex: */
114		type = PTHREAD_MUTEX_ERRORCHECK;
115		protocol = PTHREAD_PRIO_NONE;
116		ceiling = THR_MAX_PRIORITY;
117		flags = 0;
118	}
119
120	/* Check mutex type: */
121	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
122	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
123		/* Return an invalid argument error: */
124		ret = EINVAL;
125
126	/* Check mutex protocol: */
127	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
128	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
129		/* Return an invalid argument error: */
130		ret = EINVAL;
131
132	else {
133		/* Use the requested mutex type and protocol: */
134		type = (*mutex_attr)->m_type;
135		protocol = (*mutex_attr)->m_protocol;
136		ceiling = (*mutex_attr)->m_ceiling;
137		flags = (*mutex_attr)->m_flags;
138	}
139
140	/* Check no errors so far: */
141	if (ret == 0) {
142		if ((pmutex = (pthread_mutex_t)
143		    malloc(sizeof(struct pthread_mutex))) == NULL)
144			ret = ENOMEM;
145		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
146		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
147			free(pmutex);
148			*mutex = NULL;
149			ret = ENOMEM;
150		} else {
151			/* Set the mutex flags: */
152			pmutex->m_flags = flags;
153
154			/* Process according to mutex type: */
155			switch (type) {
156			/* case PTHREAD_MUTEX_DEFAULT: */
157			case PTHREAD_MUTEX_ERRORCHECK:
158			case PTHREAD_MUTEX_NORMAL:
159				/* Nothing to do here. */
160				break;
161
162			/* Single UNIX Spec 2 recursive mutex: */
163			case PTHREAD_MUTEX_RECURSIVE:
164				/* Reset the mutex count: */
165				pmutex->m_count = 0;
166				break;
167
168			/* Trap invalid mutex types: */
169			default:
170				/* Return an invalid argument error: */
171				ret = EINVAL;
172				break;
173			}
174			if (ret == 0) {
175				/* Initialise the rest of the mutex: */
176				TAILQ_INIT(&pmutex->m_queue);
177				pmutex->m_flags |= MUTEX_FLAGS_INITED;
178				pmutex->m_owner = NULL;
179				pmutex->m_type = type;
180				pmutex->m_protocol = protocol;
181				pmutex->m_refcount = 0;
182				if (protocol == PTHREAD_PRIO_PROTECT)
183					pmutex->m_prio = ceiling;
184				else
185					pmutex->m_prio = -1;
186				pmutex->m_saved_prio = 0;
187				MUTEX_INIT_LINK(pmutex);
188				*mutex = pmutex;
189			} else {
190				free(pmutex);
191				*mutex = NULL;
192			}
193		}
194	}
195	/* Return the completion status: */
196	return (ret);
197}
198
199int
200_pthread_mutex_destroy(pthread_mutex_t *mutex)
201{
202	struct pthread	*curthread = _get_curthread();
203	pthread_mutex_t m;
204	int ret = 0;
205
206	if (mutex == NULL || *mutex == NULL)
207		ret = EINVAL;
208	else {
209		/* Lock the mutex structure: */
210		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
211
212		/*
213		 * Check to see if this mutex is in use:
214		 */
215		if (((*mutex)->m_owner != NULL) ||
216		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
217		    ((*mutex)->m_refcount != 0)) {
218			ret = EBUSY;
219
220			/* Unlock the mutex structure: */
221			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
222		} else {
223			/*
224			 * Save a pointer to the mutex so it can be free'd
225			 * and set the caller's pointer to NULL:
226			 */
227			m = *mutex;
228			*mutex = NULL;
229
230			/* Unlock the mutex structure: */
231			THR_LOCK_RELEASE(curthread, &m->m_lock);
232
233			/*
234			 * Free the memory allocated for the mutex
235			 * structure:
236			 */
237			MUTEX_ASSERT_NOT_OWNED(m);
238			free(m);
239		}
240	}
241
242	/* Return the completion status: */
243	return (ret);
244}
245
246static int
247init_static(struct pthread *thread, pthread_mutex_t *mutex)
248{
249	int ret;
250
251	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
252
253	if (*mutex == NULL)
254		ret = pthread_mutex_init(mutex, NULL);
255	else
256		ret = 0;
257
258	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
259
260	return (ret);
261}
262
263static int
264init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
265{
266	int ret;
267
268	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
269
270	if (*mutex == NULL)
271		ret = pthread_mutex_init(mutex, &static_mattr);
272	else
273		ret = 0;
274
275	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
276
277	return (ret);
278}
279
280static int
281mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
282{
283	int ret = 0;
284
285	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
286	    "Uninitialized mutex in pthread_mutex_trylock_basic");
287
288	/* Lock the mutex structure: */
289	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
290
291	/*
292	 * If the mutex was statically allocated, properly
293	 * initialize the tail queue.
294	 */
295	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
296		TAILQ_INIT(&(*mutex)->m_queue);
297		MUTEX_INIT_LINK(*mutex);
298		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
299	}
300
301	/* Process according to mutex type: */
302	switch ((*mutex)->m_protocol) {
303	/* Default POSIX mutex: */
304	case PTHREAD_PRIO_NONE:
305		/* Check if this mutex is not locked: */
306		if ((*mutex)->m_owner == NULL) {
307			/* Lock the mutex for the running thread: */
308			(*mutex)->m_owner = curthread;
309
310			/* Add to the list of owned mutexes: */
311			MUTEX_ASSERT_NOT_OWNED(*mutex);
312			TAILQ_INSERT_TAIL(&curthread->mutexq,
313			    (*mutex), m_qe);
314		} else if ((*mutex)->m_owner == curthread)
315			ret = mutex_self_trylock(curthread, *mutex);
316		else
317			/* Return a busy error: */
318			ret = EBUSY;
319		break;
320
321	/* POSIX priority inheritence mutex: */
322	case PTHREAD_PRIO_INHERIT:
323		/* Check if this mutex is not locked: */
324		if ((*mutex)->m_owner == NULL) {
325			/* Lock the mutex for the running thread: */
326			(*mutex)->m_owner = curthread;
327
328			THR_SCHED_LOCK(curthread, curthread);
329			/* Track number of priority mutexes owned: */
330			curthread->priority_mutex_count++;
331
332			/*
333			 * The mutex takes on the attributes of the
334			 * running thread when there are no waiters.
335			 */
336			(*mutex)->m_prio = curthread->active_priority;
337			(*mutex)->m_saved_prio =
338			    curthread->inherited_priority;
339			THR_SCHED_UNLOCK(curthread, curthread);
340
341			/* Add to the list of owned mutexes: */
342			MUTEX_ASSERT_NOT_OWNED(*mutex);
343			TAILQ_INSERT_TAIL(&curthread->mutexq,
344			    (*mutex), m_qe);
345		} else if ((*mutex)->m_owner == curthread)
346			ret = mutex_self_trylock(curthread, *mutex);
347		else
348			/* Return a busy error: */
349			ret = EBUSY;
350		break;
351
352	/* POSIX priority protection mutex: */
353	case PTHREAD_PRIO_PROTECT:
354		/* Check for a priority ceiling violation: */
355		if (curthread->active_priority > (*mutex)->m_prio)
356			ret = EINVAL;
357
358		/* Check if this mutex is not locked: */
359		else if ((*mutex)->m_owner == NULL) {
360			/* Lock the mutex for the running thread: */
361			(*mutex)->m_owner = curthread;
362
363			THR_SCHED_LOCK(curthread, curthread);
364			/* Track number of priority mutexes owned: */
365			curthread->priority_mutex_count++;
366
367			/*
368			 * The running thread inherits the ceiling
369			 * priority of the mutex and executes at that
370			 * priority.
371			 */
372			curthread->active_priority = (*mutex)->m_prio;
373			(*mutex)->m_saved_prio =
374			    curthread->inherited_priority;
375			curthread->inherited_priority =
376			    (*mutex)->m_prio;
377			THR_SCHED_UNLOCK(curthread, curthread);
378			/* Add to the list of owned mutexes: */
379			MUTEX_ASSERT_NOT_OWNED(*mutex);
380			TAILQ_INSERT_TAIL(&curthread->mutexq,
381			    (*mutex), m_qe);
382		} else if ((*mutex)->m_owner == curthread)
383			ret = mutex_self_trylock(curthread, *mutex);
384		else
385			/* Return a busy error: */
386			ret = EBUSY;
387		break;
388
389	/* Trap invalid mutex types: */
390	default:
391		/* Return an invalid argument error: */
392		ret = EINVAL;
393		break;
394	}
395
396	/* Unlock the mutex structure: */
397	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
398
399	/* Return the completion status: */
400	return (ret);
401}
402
403int
404__pthread_mutex_trylock(pthread_mutex_t *mutex)
405{
406	struct pthread *curthread = _get_curthread();
407	int ret = 0;
408
409	if (mutex == NULL)
410		ret = EINVAL;
411
412	/*
413	 * If the mutex is statically initialized, perform the dynamic
414	 * initialization:
415	 */
416	else if ((*mutex != NULL) ||
417	    ((ret = init_static(curthread, mutex)) == 0))
418		ret = mutex_trylock_common(curthread, mutex);
419
420	return (ret);
421}
422
423int
424_pthread_mutex_trylock(pthread_mutex_t *mutex)
425{
426	struct pthread	*curthread = _get_curthread();
427	int	ret = 0;
428
429	if (mutex == NULL)
430		ret = EINVAL;
431
432	/*
433	 * If the mutex is statically initialized, perform the dynamic
434	 * initialization marking the mutex private (delete safe):
435	 */
436	else if ((*mutex != NULL) ||
437	    ((ret = init_static_private(curthread, mutex)) == 0))
438		ret = mutex_trylock_common(curthread, mutex);
439
440	return (ret);
441}
442
443static int
444mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
445{
446	int	ret = 0;
447
448	THR_ASSERT((m != NULL) && (*m != NULL),
449	    "Uninitialized mutex in pthread_mutex_trylock_basic");
450
451	/* Reset the interrupted flag: */
452	curthread->interrupted = 0;
453
454	/*
455	 * Enter a loop waiting to become the mutex owner.  We need a
456	 * loop in case the waiting thread is interrupted by a signal
457	 * to execute a signal handler.  It is not (currently) possible
458	 * to remain in the waiting queue while running a handler.
459	 * Instead, the thread is interrupted and backed out of the
460	 * waiting queue prior to executing the signal handler.
461	 */
462	do {
463		/* Lock the mutex structure: */
464		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
465
466		/*
467		 * If the mutex was statically allocated, properly
468		 * initialize the tail queue.
469		 */
470		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
471			TAILQ_INIT(&(*m)->m_queue);
472			(*m)->m_flags |= MUTEX_FLAGS_INITED;
473			MUTEX_INIT_LINK(*m);
474		}
475
476		/* Process according to mutex type: */
477		switch ((*m)->m_protocol) {
478		/* Default POSIX mutex: */
479		case PTHREAD_PRIO_NONE:
480			if ((*m)->m_owner == NULL) {
481				/* Lock the mutex for this thread: */
482				(*m)->m_owner = curthread;
483
484				/* Add to the list of owned mutexes: */
485				MUTEX_ASSERT_NOT_OWNED(*m);
486				TAILQ_INSERT_TAIL(&curthread->mutexq,
487				    (*m), m_qe);
488
489				/* Unlock the mutex structure: */
490				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
491			} else if ((*m)->m_owner == curthread) {
492				ret = mutex_self_lock(curthread, *m);
493
494				/* Unlock the mutex structure: */
495				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
496			} else {
497				/*
498				 * Join the queue of threads waiting to lock
499				 * the mutex and save a pointer to the mutex.
500				 */
501				mutex_queue_enq(*m, curthread);
502				curthread->data.mutex = *m;
503				/*
504				 * This thread is active and is in a critical
505				 * region (holding the mutex lock); we should
506				 * be able to safely set the state.
507				 */
508				THR_SCHED_LOCK(curthread, curthread);
509				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
510				THR_SCHED_UNLOCK(curthread, curthread);
511
512				/* Unlock the mutex structure: */
513				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
514
515				/* Schedule the next thread: */
516				_thr_sched_switch(curthread);
517			}
518			break;
519
520		/* POSIX priority inheritence mutex: */
521		case PTHREAD_PRIO_INHERIT:
522			/* Check if this mutex is not locked: */
523			if ((*m)->m_owner == NULL) {
524				/* Lock the mutex for this thread: */
525				(*m)->m_owner = curthread;
526
527				THR_SCHED_LOCK(curthread, curthread);
528				/* Track number of priority mutexes owned: */
529				curthread->priority_mutex_count++;
530
531				/*
532				 * The mutex takes on attributes of the
533				 * running thread when there are no waiters.
534				 * Make sure the thread's scheduling lock is
535				 * held while priorities are adjusted.
536				 */
537				(*m)->m_prio = curthread->active_priority;
538				(*m)->m_saved_prio =
539				    curthread->inherited_priority;
540				curthread->inherited_priority = (*m)->m_prio;
541				THR_SCHED_UNLOCK(curthread, curthread);
542
543				/* Add to the list of owned mutexes: */
544				MUTEX_ASSERT_NOT_OWNED(*m);
545				TAILQ_INSERT_TAIL(&curthread->mutexq,
546				    (*m), m_qe);
547
548				/* Unlock the mutex structure: */
549				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
550			} else if ((*m)->m_owner == curthread) {
551				ret = mutex_self_lock(curthread, *m);
552
553				/* Unlock the mutex structure: */
554				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
555			} else {
556				/*
557				 * Join the queue of threads waiting to lock
558				 * the mutex and save a pointer to the mutex.
559				 */
560				mutex_queue_enq(*m, curthread);
561				curthread->data.mutex = *m;
562
563				/*
564				 * This thread is active and is in a critical
565				 * region (holding the mutex lock); we should
566				 * be able to safely set the state.
567				 */
568				if (curthread->active_priority > (*m)->m_prio)
569					/* Adjust priorities: */
570					mutex_priority_adjust(curthread, *m);
571
572				THR_SCHED_LOCK(curthread, curthread);
573				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
574				THR_SCHED_UNLOCK(curthread, curthread);
575
576				/* Unlock the mutex structure: */
577				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
578
579				/* Schedule the next thread: */
580				_thr_sched_switch(curthread);
581			}
582			break;
583
584		/* POSIX priority protection mutex: */
585		case PTHREAD_PRIO_PROTECT:
586			/* Check for a priority ceiling violation: */
587			if (curthread->active_priority > (*m)->m_prio) {
588				/* Unlock the mutex structure: */
589				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
590				ret = EINVAL;
591			}
592			/* Check if this mutex is not locked: */
593			else if ((*m)->m_owner == NULL) {
594				/*
595				 * Lock the mutex for the running
596				 * thread:
597				 */
598				(*m)->m_owner = curthread;
599
600				THR_SCHED_LOCK(curthread, curthread);
601				/* Track number of priority mutexes owned: */
602				curthread->priority_mutex_count++;
603
604				/*
605				 * The running thread inherits the ceiling
606				 * priority of the mutex and executes at that
607				 * priority.  Make sure the thread's
608				 * scheduling lock is held while priorities
609				 * are adjusted.
610				 */
611				curthread->active_priority = (*m)->m_prio;
612				(*m)->m_saved_prio =
613				    curthread->inherited_priority;
614				curthread->inherited_priority = (*m)->m_prio;
615				THR_SCHED_UNLOCK(curthread, curthread);
616
617				/* Add to the list of owned mutexes: */
618				MUTEX_ASSERT_NOT_OWNED(*m);
619				TAILQ_INSERT_TAIL(&curthread->mutexq,
620				    (*m), m_qe);
621
622				/* Unlock the mutex structure: */
623				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
624			} else if ((*m)->m_owner == curthread) {
625				ret = mutex_self_lock(curthread, *m);
626
627				/* Unlock the mutex structure: */
628				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
629			} else {
630				/*
631				 * Join the queue of threads waiting to lock
632				 * the mutex and save a pointer to the mutex.
633				 */
634				mutex_queue_enq(*m, curthread);
635				curthread->data.mutex = *m;
636
637				/* Clear any previous error: */
638				curthread->error = 0;
639
640				/*
641				 * This thread is active and is in a critical
642				 * region (holding the mutex lock); we should
643				 * be able to safely set the state.
644				 */
645
646				THR_SCHED_LOCK(curthread, curthread);
647				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
648				THR_SCHED_UNLOCK(curthread, curthread);
649
650				/* Unlock the mutex structure: */
651				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
652
653				/* Schedule the next thread: */
654				_thr_sched_switch(curthread);
655				/*
656				 * The threads priority may have changed while
657				 * waiting for the mutex causing a ceiling
658				 * violation.
659				 */
660				ret = curthread->error;
661				curthread->error = 0;
662			}
663			break;
664
665		/* Trap invalid mutex types: */
666		default:
667			/* Unlock the mutex structure: */
668			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
669
670			/* Return an invalid argument error: */
671			ret = EINVAL;
672			break;
673		}
674
675	} while (((*m)->m_owner != curthread) && (ret == 0) &&
676	    (curthread->interrupted == 0));
677
678	/*
679	 * Check to see if this thread was interrupted and
680	 * is still in the mutex queue of waiting threads:
681	 */
682	if (curthread->interrupted != 0) {
683		/* Remove this thread from the mutex queue. */
684		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
685		if (THR_IN_SYNCQ(curthread))
686			mutex_queue_remove(*m, curthread);
687		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
688
689		/* Check for asynchronous cancellation. */
690		if (curthread->continuation != NULL)
691			curthread->continuation((void *) curthread);
692	}
693
694	/* Return the completion status: */
695	return (ret);
696}
697
698int
699__pthread_mutex_lock(pthread_mutex_t *m)
700{
701	struct pthread *curthread;
702	int	ret = 0;
703
704	if (_thr_initial == NULL)
705		_libpthread_init(NULL);
706
707	curthread = _get_curthread();
708	if (m == NULL)
709		ret = EINVAL;
710
711	/*
712	 * If the mutex is statically initialized, perform the dynamic
713	 * initialization:
714	 */
715	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
716		ret = mutex_lock_common(curthread, m);
717
718	return (ret);
719}
720
721int
722_pthread_mutex_lock(pthread_mutex_t *m)
723{
724	struct pthread *curthread;
725	int	ret = 0;
726
727	if (_thr_initial == NULL)
728		_libpthread_init(NULL);
729	curthread = _get_curthread();
730
731	if (m == NULL)
732		ret = EINVAL;
733
734	/*
735	 * If the mutex is statically initialized, perform the dynamic
736	 * initialization marking it private (delete safe):
737	 */
738	else if ((*m != NULL) ||
739	    ((ret = init_static_private(curthread, m)) == 0))
740		ret = mutex_lock_common(curthread, m);
741
742	return (ret);
743}
744
745int
746_pthread_mutex_unlock(pthread_mutex_t *m)
747{
748	return (mutex_unlock_common(m, /* add reference */ 0));
749}
750
751int
752_mutex_cv_unlock(pthread_mutex_t *m)
753{
754	return (mutex_unlock_common(m, /* add reference */ 1));
755}
756
757int
758_mutex_cv_lock(pthread_mutex_t *m)
759{
760	struct  pthread *curthread;
761	int	ret;
762
763	curthread = _get_curthread();
764	if ((ret = _pthread_mutex_lock(m)) == 0) {
765		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
766		(*m)->m_refcount--;
767		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
768	}
769	return (ret);
770}
771
772static inline int
773mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
774{
775	int	ret = 0;
776
777	switch (m->m_type) {
778	/* case PTHREAD_MUTEX_DEFAULT: */
779	case PTHREAD_MUTEX_ERRORCHECK:
780	case PTHREAD_MUTEX_NORMAL:
781		/*
782		 * POSIX specifies that mutexes should return EDEADLK if a
783		 * recursive lock is detected.
784		 */
785		if (m->m_owner == curthread)
786			ret = EDEADLK;
787		else
788			ret = EBUSY;
789		break;
790
791	case PTHREAD_MUTEX_RECURSIVE:
792		/* Increment the lock count: */
793		m->m_count++;
794		break;
795
796	default:
797		/* Trap invalid mutex types; */
798		ret = EINVAL;
799	}
800
801	return (ret);
802}
803
804static inline int
805mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
806{
807	int ret = 0;
808
809	switch (m->m_type) {
810	/* case PTHREAD_MUTEX_DEFAULT: */
811	case PTHREAD_MUTEX_ERRORCHECK:
812		/*
813		 * POSIX specifies that mutexes should return EDEADLK if a
814		 * recursive lock is detected.
815		 */
816		ret = EDEADLK;
817		break;
818
819	case PTHREAD_MUTEX_NORMAL:
820		/*
821		 * What SS2 define as a 'normal' mutex.  Intentionally
822		 * deadlock on attempts to get a lock you already own.
823		 */
824
825		THR_SCHED_LOCK(curthread, curthread);
826		THR_SET_STATE(curthread, PS_DEADLOCK);
827		THR_SCHED_UNLOCK(curthread, curthread);
828
829		/* Unlock the mutex structure: */
830		THR_LOCK_RELEASE(curthread, &m->m_lock);
831
832		/* Schedule the next thread: */
833		_thr_sched_switch(curthread);
834		break;
835
836	case PTHREAD_MUTEX_RECURSIVE:
837		/* Increment the lock count: */
838		m->m_count++;
839		break;
840
841	default:
842		/* Trap invalid mutex types; */
843		ret = EINVAL;
844	}
845
846	return (ret);
847}
848
849static int
850mutex_unlock_common(pthread_mutex_t *m, int add_reference)
851{
852	struct pthread	*curthread = _get_curthread();
853	int	ret = 0;
854
855	if (m == NULL || *m == NULL)
856		ret = EINVAL;
857	else {
858		/* Lock the mutex structure: */
859		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
860
861		/* Process according to mutex type: */
862		switch ((*m)->m_protocol) {
863		/* Default POSIX mutex: */
864		case PTHREAD_PRIO_NONE:
865			/*
866			 * Check if the running thread is not the owner of the
867			 * mutex:
868			 */
869			if ((*m)->m_owner != curthread)
870				/*
871				 * Return an invalid argument error for no
872				 * owner and a permission error otherwise:
873				 */
874				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
875
876			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
877			    ((*m)->m_count > 0))
878				/* Decrement the count: */
879				(*m)->m_count--;
880			else {
881				/*
882				 * Clear the count in case this is a recursive
883				 * mutex.
884				 */
885				(*m)->m_count = 0;
886
887				/* Remove the mutex from the threads queue. */
888				MUTEX_ASSERT_IS_OWNED(*m);
889				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
890				    (*m), m_qe);
891				MUTEX_INIT_LINK(*m);
892
893				/*
894				 * Hand off the mutex to the next waiting
895				 * thread:
896				 */
897				mutex_handoff(curthread, *m);
898			}
899			break;
900
901		/* POSIX priority inheritence mutex: */
902		case PTHREAD_PRIO_INHERIT:
903			/*
904			 * Check if the running thread is not the owner of the
905			 * mutex:
906			 */
907			if ((*m)->m_owner != curthread)
908				/*
909				 * Return an invalid argument error for no
910				 * owner and a permission error otherwise:
911				 */
912				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
913
914			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
915			    ((*m)->m_count > 0))
916				/* Decrement the count: */
917				(*m)->m_count--;
918			else {
919				/*
920				 * Clear the count in case this is recursive
921				 * mutex.
922				 */
923				(*m)->m_count = 0;
924
925				/*
926				 * Restore the threads inherited priority and
927				 * recompute the active priority (being careful
928				 * not to override changes in the threads base
929				 * priority subsequent to locking the mutex).
930				 */
931				THR_SCHED_LOCK(curthread, curthread);
932				curthread->inherited_priority =
933					(*m)->m_saved_prio;
934				curthread->active_priority =
935				    MAX(curthread->inherited_priority,
936				    curthread->base_priority);
937
938				/*
939				 * This thread now owns one less priority mutex.
940				 */
941				curthread->priority_mutex_count--;
942				THR_SCHED_UNLOCK(curthread, curthread);
943
944				/* Remove the mutex from the threads queue. */
945				MUTEX_ASSERT_IS_OWNED(*m);
946				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
947				    (*m), m_qe);
948				MUTEX_INIT_LINK(*m);
949
950				/*
951				 * Hand off the mutex to the next waiting
952				 * thread:
953				 */
954				mutex_handoff(curthread, *m);
955			}
956			break;
957
958		/* POSIX priority ceiling mutex: */
959		case PTHREAD_PRIO_PROTECT:
960			/*
961			 * Check if the running thread is not the owner of the
962			 * mutex:
963			 */
964			if ((*m)->m_owner != curthread)
965				/*
966				 * Return an invalid argument error for no
967				 * owner and a permission error otherwise:
968				 */
969				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
970
971			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
972			    ((*m)->m_count > 0))
973				/* Decrement the count: */
974				(*m)->m_count--;
975			else {
976				/*
977				 * Clear the count in case this is a recursive
978				 * mutex.
979				 */
980				(*m)->m_count = 0;
981
982				/*
983				 * Restore the threads inherited priority and
984				 * recompute the active priority (being careful
985				 * not to override changes in the threads base
986				 * priority subsequent to locking the mutex).
987				 */
988				THR_SCHED_LOCK(curthread, curthread);
989				curthread->inherited_priority =
990				    (*m)->m_saved_prio;
991				curthread->active_priority =
992				    MAX(curthread->inherited_priority,
993				    curthread->base_priority);
994
995				/*
996				 * This thread now owns one less priority mutex.
997				 */
998				curthread->priority_mutex_count--;
999				THR_SCHED_UNLOCK(curthread, curthread);
1000
1001				/* Remove the mutex from the threads queue. */
1002				MUTEX_ASSERT_IS_OWNED(*m);
1003				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1004				    (*m), m_qe);
1005				MUTEX_INIT_LINK(*m);
1006
1007				/*
1008				 * Hand off the mutex to the next waiting
1009				 * thread:
1010				 */
1011				mutex_handoff(curthread, *m);
1012			}
1013			break;
1014
1015		/* Trap invalid mutex types: */
1016		default:
1017			/* Return an invalid argument error: */
1018			ret = EINVAL;
1019			break;
1020		}
1021
1022		if ((ret == 0) && (add_reference != 0))
1023			/* Increment the reference count: */
1024			(*m)->m_refcount++;
1025
1026		/* Unlock the mutex structure: */
1027		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1028	}
1029
1030	/* Return the completion status: */
1031	return (ret);
1032}
1033
1034
1035/*
1036 * This function is called when a change in base priority occurs for
1037 * a thread that is holding or waiting for a priority protection or
1038 * inheritence mutex.  A change in a threads base priority can effect
1039 * changes to active priorities of other threads and to the ordering
1040 * of mutex locking by waiting threads.
1041 *
1042 * This must be called without the target thread's scheduling lock held.
1043 */
1044void
1045_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1046    int propagate_prio)
1047{
1048	struct pthread_mutex *m;
1049
1050	/* Adjust the priorites of any owned priority mutexes: */
1051	if (pthread->priority_mutex_count > 0) {
1052		/*
1053		 * Rescan the mutexes owned by this thread and correct
1054		 * their priorities to account for this threads change
1055		 * in priority.  This has the side effect of changing
1056		 * the threads active priority.
1057		 *
1058		 * Be sure to lock the first mutex in the list of owned
1059		 * mutexes.  This acts as a barrier against another
1060		 * simultaneous call to change the threads priority
1061		 * and from the owning thread releasing the mutex.
1062		 */
1063		m = TAILQ_FIRST(&pthread->mutexq);
1064		if (m != NULL) {
1065			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1066			/*
1067			 * Make sure the thread still owns the lock.
1068			 */
1069			if (m == TAILQ_FIRST(&pthread->mutexq))
1070				mutex_rescan_owned(curthread, pthread,
1071				    /* rescan all owned */ NULL);
1072			THR_LOCK_RELEASE(curthread, &m->m_lock);
1073		}
1074	}
1075
1076	/*
1077	 * If this thread is waiting on a priority inheritence mutex,
1078	 * check for priority adjustments.  A change in priority can
1079	 * also cause a ceiling violation(*) for a thread waiting on
1080	 * a priority protection mutex; we don't perform the check here
1081	 * as it is done in pthread_mutex_unlock.
1082	 *
1083	 * (*) It should be noted that a priority change to a thread
1084	 *     _after_ taking and owning a priority ceiling mutex
1085	 *     does not affect ownership of that mutex; the ceiling
1086	 *     priority is only checked before mutex ownership occurs.
1087	 */
1088	if (propagate_prio != 0) {
1089		/*
1090		 * Lock the thread's scheduling queue.  This is a bit
1091		 * convoluted; the "in synchronization queue flag" can
1092		 * only be cleared with both the thread's scheduling and
1093		 * mutex locks held.  The thread's pointer to the wanted
1094		 * mutex is guaranteed to be valid during this time.
1095		 */
1096		THR_SCHED_LOCK(curthread, pthread);
1097
1098		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1099		    ((m = pthread->data.mutex) == NULL))
1100			THR_SCHED_UNLOCK(curthread, pthread);
1101		else {
1102			/*
1103			 * This thread is currently waiting on a mutex; unlock
1104			 * the scheduling queue lock and lock the mutex.  We
1105			 * can't hold both at the same time because the locking
1106			 * order could cause a deadlock.
1107			 */
1108			THR_SCHED_UNLOCK(curthread, pthread);
1109			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1110
1111			/*
1112			 * Check to make sure this thread is still in the
1113			 * same state (the lock above can yield the CPU to
1114			 * another thread or the thread may be running on
1115			 * another CPU).
1116			 */
1117			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1118			    (pthread->data.mutex == m)) {
1119				/*
1120				 * Remove and reinsert this thread into
1121				 * the list of waiting threads to preserve
1122				 * decreasing priority order.
1123				 */
1124				mutex_queue_remove(m, pthread);
1125				mutex_queue_enq(m, pthread);
1126
1127				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1128					/* Adjust priorities: */
1129					mutex_priority_adjust(curthread, m);
1130			}
1131
1132			/* Unlock the mutex structure: */
1133			THR_LOCK_RELEASE(curthread, &m->m_lock);
1134		}
1135	}
1136}
1137
1138/*
1139 * Called when a new thread is added to the mutex waiting queue or
1140 * when a threads priority changes that is already in the mutex
1141 * waiting queue.
1142 *
1143 * This must be called with the mutex locked by the current thread.
1144 */
1145static void
1146mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1147{
1148	pthread_mutex_t	m = mutex;
1149	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1150	int		done, temp_prio;
1151
1152	/*
1153	 * Calculate the mutex priority as the maximum of the highest
1154	 * active priority of any waiting threads and the owning threads
1155	 * active priority(*).
1156	 *
1157	 * (*) Because the owning threads current active priority may
1158	 *     reflect priority inherited from this mutex (and the mutex
1159	 *     priority may have changed) we must recalculate the active
1160	 *     priority based on the threads saved inherited priority
1161	 *     and its base priority.
1162	 */
1163	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1164	temp_prio = MAX(pthread_next->active_priority,
1165	    MAX(m->m_saved_prio, pthread->base_priority));
1166
1167	/* See if this mutex really needs adjusting: */
1168	if (temp_prio == m->m_prio)
1169		/* No need to propagate the priority: */
1170		return;
1171
1172	/* Set new priority of the mutex: */
1173	m->m_prio = temp_prio;
1174
1175	/*
1176	 * Don't unlock the mutex passed in as an argument.  It is
1177	 * expected to be locked and unlocked by the caller.
1178	 */
1179	done = 1;
1180	do {
1181		/*
1182		 * Save the threads priority before rescanning the
1183		 * owned mutexes:
1184		 */
1185		temp_prio = pthread->active_priority;
1186
1187		/*
1188		 * Fix the priorities for all mutexes held by the owning
1189		 * thread since taking this mutex.  This also has a
1190		 * potential side-effect of changing the threads priority.
1191		 *
1192		 * At this point the mutex is locked by the current thread.
1193		 * The owning thread can't release the mutex until it is
1194		 * unlocked, so we should be able to safely walk its list
1195		 * of owned mutexes.
1196		 */
1197		mutex_rescan_owned(curthread, pthread, m);
1198
1199		/*
1200		 * If this isn't the first time through the loop,
1201		 * the current mutex needs to be unlocked.
1202		 */
1203		if (done == 0)
1204			THR_LOCK_RELEASE(curthread, &m->m_lock);
1205
1206		/* Assume we're done unless told otherwise: */
1207		done = 1;
1208
1209		/*
1210		 * If the thread is currently waiting on a mutex, check
1211		 * to see if the threads new priority has affected the
1212		 * priority of the mutex.
1213		 */
1214		if ((temp_prio != pthread->active_priority) &&
1215		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1216		    ((m = pthread->data.mutex) != NULL) &&
1217		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1218			/* Lock the mutex structure: */
1219			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1220
1221			/*
1222			 * Make sure the thread is still waiting on the
1223			 * mutex:
1224			 */
1225			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1226			    (m == pthread->data.mutex)) {
1227				/*
1228				 * The priority for this thread has changed.
1229				 * Remove and reinsert this thread into the
1230				 * list of waiting threads to preserve
1231				 * decreasing priority order.
1232				 */
1233				mutex_queue_remove(m, pthread);
1234				mutex_queue_enq(m, pthread);
1235
1236				/*
1237				 * Grab the waiting thread with highest
1238				 * priority:
1239				 */
1240				pthread_next = TAILQ_FIRST(&m->m_queue);
1241
1242				/*
1243				 * Calculate the mutex priority as the maximum
1244				 * of the highest active priority of any
1245				 * waiting threads and the owning threads
1246				 * active priority.
1247				 */
1248				temp_prio = MAX(pthread_next->active_priority,
1249				    MAX(m->m_saved_prio,
1250				    m->m_owner->base_priority));
1251
1252				if (temp_prio != m->m_prio) {
1253					/*
1254					 * The priority needs to be propagated
1255					 * to the mutex this thread is waiting
1256					 * on and up to the owner of that mutex.
1257					 */
1258					m->m_prio = temp_prio;
1259					pthread = m->m_owner;
1260
1261					/* We're not done yet: */
1262					done = 0;
1263				}
1264			}
1265			/* Only release the mutex if we're done: */
1266			if (done != 0)
1267				THR_LOCK_RELEASE(curthread, &m->m_lock);
1268		}
1269	} while (done == 0);
1270}
1271
1272static void
1273mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1274    struct pthread_mutex *mutex)
1275{
1276	struct pthread_mutex	*m;
1277	struct pthread		*pthread_next;
1278	int			active_prio, inherited_prio;
1279
1280	/*
1281	 * Start walking the mutexes the thread has taken since
1282	 * taking this mutex.
1283	 */
1284	if (mutex == NULL) {
1285		/*
1286		 * A null mutex means start at the beginning of the owned
1287		 * mutex list.
1288		 */
1289		m = TAILQ_FIRST(&pthread->mutexq);
1290
1291		/* There is no inherited priority yet. */
1292		inherited_prio = 0;
1293	} else {
1294		/*
1295		 * The caller wants to start after a specific mutex.  It
1296		 * is assumed that this mutex is a priority inheritence
1297		 * mutex and that its priority has been correctly
1298		 * calculated.
1299		 */
1300		m = TAILQ_NEXT(mutex, m_qe);
1301
1302		/* Start inheriting priority from the specified mutex. */
1303		inherited_prio = mutex->m_prio;
1304	}
1305	active_prio = MAX(inherited_prio, pthread->base_priority);
1306
1307	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1308		/*
1309		 * We only want to deal with priority inheritence
1310		 * mutexes.  This might be optimized by only placing
1311		 * priority inheritence mutexes into the owned mutex
1312		 * list, but it may prove to be useful having all
1313		 * owned mutexes in this list.  Consider a thread
1314		 * exiting while holding mutexes...
1315		 */
1316		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1317			/*
1318			 * Fix the owners saved (inherited) priority to
1319			 * reflect the priority of the previous mutex.
1320			 */
1321			m->m_saved_prio = inherited_prio;
1322
1323			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1324				/* Recalculate the priority of the mutex: */
1325				m->m_prio = MAX(active_prio,
1326				     pthread_next->active_priority);
1327			else
1328				m->m_prio = active_prio;
1329
1330			/* Recalculate new inherited and active priorities: */
1331			inherited_prio = m->m_prio;
1332			active_prio = MAX(m->m_prio, pthread->base_priority);
1333		}
1334	}
1335
1336	/*
1337	 * Fix the threads inherited priority and recalculate its
1338	 * active priority.
1339	 */
1340	pthread->inherited_priority = inherited_prio;
1341	active_prio = MAX(inherited_prio, pthread->base_priority);
1342
1343	if (active_prio != pthread->active_priority) {
1344		/* Lock the thread's scheduling queue: */
1345		THR_SCHED_LOCK(curthread, pthread);
1346
1347		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1348			/*
1349			 * This thread is not in a run queue.  Just set
1350			 * its active priority.
1351			 */
1352			pthread->active_priority = active_prio;
1353		}
1354		else {
1355			/*
1356			 * This thread is in a run queue.  Remove it from
1357			 * the queue before changing its priority:
1358			 */
1359			THR_RUNQ_REMOVE(pthread);
1360
1361			/*
1362			 * POSIX states that if the priority is being
1363			 * lowered, the thread must be inserted at the
1364			 * head of the queue for its priority if it owns
1365			 * any priority protection or inheritence mutexes.
1366			 */
1367			if ((active_prio < pthread->active_priority) &&
1368			    (pthread->priority_mutex_count > 0)) {
1369				/* Set the new active priority. */
1370				pthread->active_priority = active_prio;
1371
1372				THR_RUNQ_INSERT_HEAD(pthread);
1373			} else {
1374				/* Set the new active priority. */
1375				pthread->active_priority = active_prio;
1376
1377				THR_RUNQ_INSERT_TAIL(pthread);
1378			}
1379		}
1380		THR_SCHED_UNLOCK(curthread, pthread);
1381	}
1382}
1383
1384void
1385_mutex_unlock_private(pthread_t pthread)
1386{
1387	struct pthread_mutex	*m, *m_next;
1388
1389	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1390		m_next = TAILQ_NEXT(m, m_qe);
1391		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1392			pthread_mutex_unlock(&m);
1393	}
1394}
1395
1396/*
1397 * This is called by the current thread when it wants to back out of a
1398 * mutex_lock in order to run a signal handler.
1399 */
1400void
1401_mutex_lock_backout(struct pthread *curthread)
1402{
1403	struct pthread_mutex *m;
1404
1405	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1406		/*
1407		 * Any other thread may clear the "in sync queue flag",
1408		 * but only the current thread can clear the pointer
1409		 * to the mutex.  So if the flag is set, we can
1410		 * guarantee that the pointer to the mutex is valid.
1411		 * The only problem may be if the mutex is destroyed
1412		 * out from under us, but that should be considered
1413		 * an application bug.
1414		 */
1415		m = curthread->data.mutex;
1416
1417		/* Lock the mutex structure: */
1418		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1419
1420
1421		/*
1422		 * Check to make sure this thread doesn't already own
1423		 * the mutex.  Since mutexes are unlocked with direct
1424		 * handoffs, it is possible the previous owner gave it
1425		 * to us after we checked the sync queue flag and before
1426		 * we locked the mutex structure.
1427		 */
1428		if (m->m_owner == curthread) {
1429			THR_LOCK_RELEASE(curthread, &m->m_lock);
1430			mutex_unlock_common(&m, /* add_reference */ 0);
1431		} else {
1432			/*
1433			 * Remove ourselves from the mutex queue and
1434			 * clear the pointer to the mutex.  We may no
1435			 * longer be in the mutex queue, but the removal
1436			 * function will DTRT.
1437			 */
1438			mutex_queue_remove(m, curthread);
1439			curthread->data.mutex = NULL;
1440			THR_LOCK_RELEASE(curthread, &m->m_lock);
1441		}
1442	}
1443}
1444
1445/*
1446 * Dequeue a waiting thread from the head of a mutex queue in descending
1447 * priority order.
1448 *
1449 * In order to properly dequeue a thread from the mutex queue and
1450 * make it runnable without the possibility of errant wakeups, it
1451 * is necessary to lock the thread's scheduling queue while also
1452 * holding the mutex lock.
1453 */
1454static void
1455mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1456{
1457	struct pthread	*pthread;
1458
1459	/* Keep dequeueing until we find a valid thread: */
1460	mutex->m_owner = NULL;
1461	pthread = TAILQ_FIRST(&mutex->m_queue);
1462	while (pthread != NULL) {
1463		/* Take the thread's scheduling lock: */
1464		THR_SCHED_LOCK(curthread, pthread);
1465
1466		/* Remove the thread from the mutex queue: */
1467		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1468		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1469
1470		/* This thread is no longer waiting for this mutex. */
1471		pthread->data.mutex = NULL;
1472
1473		/*
1474		 * Only exit the loop if the thread hasn't been
1475		 * cancelled.
1476		 */
1477		switch (mutex->m_protocol) {
1478		case PTHREAD_PRIO_NONE:
1479			/*
1480			 * Assign the new owner and add the mutex to the
1481			 * thread's list of owned mutexes.
1482			 */
1483			mutex->m_owner = pthread;
1484			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1485			break;
1486
1487		case PTHREAD_PRIO_INHERIT:
1488			/*
1489			 * Assign the new owner and add the mutex to the
1490			 * thread's list of owned mutexes.
1491			 */
1492			mutex->m_owner = pthread;
1493			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1494
1495			/* Track number of priority mutexes owned: */
1496			pthread->priority_mutex_count++;
1497
1498			/*
1499			 * Set the priority of the mutex.  Since our waiting
1500			 * threads are in descending priority order, the
1501			 * priority of the mutex becomes the active priority
1502			 * of the thread we just dequeued.
1503			 */
1504			mutex->m_prio = pthread->active_priority;
1505
1506			/* Save the owning threads inherited priority: */
1507			mutex->m_saved_prio = pthread->inherited_priority;
1508
1509			/*
1510			 * The owning threads inherited priority now becomes
1511			 * his active priority (the priority of the mutex).
1512			 */
1513			pthread->inherited_priority = mutex->m_prio;
1514			break;
1515
1516		case PTHREAD_PRIO_PROTECT:
1517			if (pthread->active_priority > mutex->m_prio) {
1518				/*
1519				 * Either the mutex ceiling priority has
1520				 * been lowered and/or this threads priority
1521			 	 * has been raised subsequent to the thread
1522				 * being queued on the waiting list.
1523				 */
1524				pthread->error = EINVAL;
1525			}
1526			else {
1527				/*
1528				 * Assign the new owner and add the mutex
1529				 * to the thread's list of owned mutexes.
1530				 */
1531				mutex->m_owner = pthread;
1532				TAILQ_INSERT_TAIL(&pthread->mutexq,
1533				    mutex, m_qe);
1534
1535				/* Track number of priority mutexes owned: */
1536				pthread->priority_mutex_count++;
1537
1538				/*
1539				 * Save the owning threads inherited
1540				 * priority:
1541				 */
1542				mutex->m_saved_prio =
1543				    pthread->inherited_priority;
1544
1545				/*
1546				 * The owning thread inherits the ceiling
1547				 * priority of the mutex and executes at
1548				 * that priority:
1549				 */
1550				pthread->inherited_priority = mutex->m_prio;
1551				pthread->active_priority = mutex->m_prio;
1552
1553			}
1554			break;
1555		}
1556
1557		/* Make the thread runnable and unlock the scheduling queue: */
1558		_thr_setrunnable_unlocked(pthread);
1559		THR_SCHED_UNLOCK(curthread, pthread);
1560
1561		if (mutex->m_owner == pthread)
1562			/* We're done; a valid owner was found. */
1563			break;
1564		else
1565			/* Get the next thread from the waiting queue: */
1566			pthread = TAILQ_NEXT(pthread, sqe);
1567	}
1568
1569	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1570		/* This mutex has no priority: */
1571		mutex->m_prio = 0;
1572}
1573
1574/*
1575 * Dequeue a waiting thread from the head of a mutex queue in descending
1576 * priority order.
1577 */
1578static inline pthread_t
1579mutex_queue_deq(struct pthread_mutex *mutex)
1580{
1581	pthread_t pthread;
1582
1583	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1584		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1585		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1586
1587		/*
1588		 * Only exit the loop if the thread hasn't been
1589		 * cancelled.
1590		 */
1591		if (pthread->interrupted == 0)
1592			break;
1593	}
1594
1595	return (pthread);
1596}
1597
1598/*
1599 * Remove a waiting thread from a mutex queue in descending priority order.
1600 */
1601static inline void
1602mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1603{
1604	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1605		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1606		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1607	}
1608}
1609
1610/*
1611 * Enqueue a waiting thread to a queue in descending priority order.
1612 */
1613static inline void
1614mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1615{
1616	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1617
1618	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1619	/*
1620	 * For the common case of all threads having equal priority,
1621	 * we perform a quick check against the priority of the thread
1622	 * at the tail of the queue.
1623	 */
1624	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1625		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1626	else {
1627		tid = TAILQ_FIRST(&mutex->m_queue);
1628		while (pthread->active_priority <= tid->active_priority)
1629			tid = TAILQ_NEXT(tid, sqe);
1630		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1631	}
1632	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1633}
1634