thr_mutex.c revision 118206
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 118206 2003-07-30 13:28:05Z deischen $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#include <pthread.h>
40#include "thr_private.h"
41
42#if defined(_PTHREADS_INVARIANTS)
43#define MUTEX_INIT_LINK(m) 		do {		\
44	(m)->m_qe.tqe_prev = NULL;			\
45	(m)->m_qe.tqe_next = NULL;			\
46} while (0)
47#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
48	if ((m)->m_qe.tqe_prev == NULL)			\
49		PANIC("mutex is not on list");		\
50} while (0)
51#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52	if (((m)->m_qe.tqe_prev != NULL) ||		\
53	    ((m)->m_qe.tqe_next != NULL))		\
54		PANIC("mutex is on list");		\
55} while (0)
56#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
57	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58	    "thread in syncq when it shouldn't be.");	\
59} while (0);
60#else
61#define MUTEX_INIT_LINK(m)
62#define MUTEX_ASSERT_IS_OWNED(m)
63#define MUTEX_ASSERT_NOT_OWNED(m)
64#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
65#endif
66
67/*
68 * Prototypes
69 */
70static struct kse_mailbox *mutex_handoff(struct pthread *,
71			    struct pthread_mutex *);
72static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
73static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
74static int		mutex_unlock_common(pthread_mutex_t *, int);
75static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
76static void		mutex_rescan_owned (struct pthread *, struct pthread *,
77			    struct pthread_mutex *);
78static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
79static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
80static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
81
82
83static struct pthread_mutex_attr	static_mutex_attr =
84    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
85static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
86
87/* Single underscore versions provided for libc internal usage: */
88__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
89__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
90
91/* No difference between libc and application usage of these: */
92__weak_reference(_pthread_mutex_init, pthread_mutex_init);
93__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
94__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
95
96
97
98int
99_pthread_mutex_init(pthread_mutex_t *mutex,
100    const pthread_mutexattr_t *mutex_attr)
101{
102	struct pthread_mutex *pmutex;
103	enum pthread_mutextype type;
104	int		protocol;
105	int		ceiling;
106	int		flags;
107	int		ret = 0;
108
109	if (mutex == NULL)
110		ret = EINVAL;
111
112	/* Check if default mutex attributes: */
113	else if (mutex_attr == NULL || *mutex_attr == NULL) {
114		/* Default to a (error checking) POSIX mutex: */
115		type = PTHREAD_MUTEX_ERRORCHECK;
116		protocol = PTHREAD_PRIO_NONE;
117		ceiling = THR_MAX_PRIORITY;
118		flags = 0;
119	}
120
121	/* Check mutex type: */
122	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
123	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
124		/* Return an invalid argument error: */
125		ret = EINVAL;
126
127	/* Check mutex protocol: */
128	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
129	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
130		/* Return an invalid argument error: */
131		ret = EINVAL;
132
133	else {
134		/* Use the requested mutex type and protocol: */
135		type = (*mutex_attr)->m_type;
136		protocol = (*mutex_attr)->m_protocol;
137		ceiling = (*mutex_attr)->m_ceiling;
138		flags = (*mutex_attr)->m_flags;
139	}
140
141	/* Check no errors so far: */
142	if (ret == 0) {
143		if ((pmutex = (pthread_mutex_t)
144		    malloc(sizeof(struct pthread_mutex))) == NULL)
145			ret = ENOMEM;
146		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
147		    _thr_lock_wait, _thr_lock_wakeup) != 0) {
148			free(pmutex);
149			*mutex = NULL;
150			ret = ENOMEM;
151		} else {
152			/* Set the mutex flags: */
153			pmutex->m_flags = flags;
154
155			/* Process according to mutex type: */
156			switch (type) {
157			/* case PTHREAD_MUTEX_DEFAULT: */
158			case PTHREAD_MUTEX_ERRORCHECK:
159			case PTHREAD_MUTEX_NORMAL:
160				/* Nothing to do here. */
161				break;
162
163			/* Single UNIX Spec 2 recursive mutex: */
164			case PTHREAD_MUTEX_RECURSIVE:
165				/* Reset the mutex count: */
166				pmutex->m_count = 0;
167				break;
168
169			/* Trap invalid mutex types: */
170			default:
171				/* Return an invalid argument error: */
172				ret = EINVAL;
173				break;
174			}
175			if (ret == 0) {
176				/* Initialise the rest of the mutex: */
177				TAILQ_INIT(&pmutex->m_queue);
178				pmutex->m_flags |= MUTEX_FLAGS_INITED;
179				pmutex->m_owner = NULL;
180				pmutex->m_type = type;
181				pmutex->m_protocol = protocol;
182				pmutex->m_refcount = 0;
183				if (protocol == PTHREAD_PRIO_PROTECT)
184					pmutex->m_prio = ceiling;
185				else
186					pmutex->m_prio = -1;
187				pmutex->m_saved_prio = 0;
188				MUTEX_INIT_LINK(pmutex);
189				*mutex = pmutex;
190			} else {
191				/* Free the mutex lock structure: */
192				_lock_destroy(&pmutex->m_lock);
193				free(pmutex);
194				*mutex = NULL;
195			}
196		}
197	}
198	/* Return the completion status: */
199	return (ret);
200}
201
202int
203_pthread_mutex_destroy(pthread_mutex_t *mutex)
204{
205	struct pthread	*curthread = _get_curthread();
206	pthread_mutex_t m;
207	int ret = 0;
208
209	if (mutex == NULL || *mutex == NULL)
210		ret = EINVAL;
211	else {
212		/* Lock the mutex structure: */
213		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
214
215		/*
216		 * Check to see if this mutex is in use:
217		 */
218		if (((*mutex)->m_owner != NULL) ||
219		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
220		    ((*mutex)->m_refcount != 0)) {
221			ret = EBUSY;
222
223			/* Unlock the mutex structure: */
224			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
225		} else {
226			/*
227			 * Save a pointer to the mutex so it can be free'd
228			 * and set the caller's pointer to NULL:
229			 */
230			m = *mutex;
231			*mutex = NULL;
232
233			/* Unlock the mutex structure: */
234			THR_LOCK_RELEASE(curthread, &m->m_lock);
235
236			/*
237			 * Free the memory allocated for the mutex
238			 * structure:
239			 */
240			MUTEX_ASSERT_NOT_OWNED(m);
241
242			/* Free the mutex lock structure: */
243			_lock_destroy(&m->m_lock);
244
245			free(m);
246		}
247	}
248
249	/* Return the completion status: */
250	return (ret);
251}
252
253static int
254init_static(struct pthread *thread, pthread_mutex_t *mutex)
255{
256	int ret;
257
258	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
259
260	if (*mutex == NULL)
261		ret = pthread_mutex_init(mutex, NULL);
262	else
263		ret = 0;
264
265	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
266
267	return (ret);
268}
269
270static int
271init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
272{
273	int ret;
274
275	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
276
277	if (*mutex == NULL)
278		ret = pthread_mutex_init(mutex, &static_mattr);
279	else
280		ret = 0;
281
282	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
283
284	return (ret);
285}
286
287static int
288mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
289{
290	int ret = 0;
291
292	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
293	    "Uninitialized mutex in pthread_mutex_trylock_basic");
294
295	/* Lock the mutex structure: */
296	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
297
298	/*
299	 * If the mutex was statically allocated, properly
300	 * initialize the tail queue.
301	 */
302	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
303		TAILQ_INIT(&(*mutex)->m_queue);
304		MUTEX_INIT_LINK(*mutex);
305		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
306	}
307
308	/* Process according to mutex type: */
309	switch ((*mutex)->m_protocol) {
310	/* Default POSIX mutex: */
311	case PTHREAD_PRIO_NONE:
312		/* Check if this mutex is not locked: */
313		if ((*mutex)->m_owner == NULL) {
314			/* Lock the mutex for the running thread: */
315			(*mutex)->m_owner = curthread;
316
317			/* Add to the list of owned mutexes: */
318			MUTEX_ASSERT_NOT_OWNED(*mutex);
319			TAILQ_INSERT_TAIL(&curthread->mutexq,
320			    (*mutex), m_qe);
321		} else if ((*mutex)->m_owner == curthread)
322			ret = mutex_self_trylock(curthread, *mutex);
323		else
324			/* Return a busy error: */
325			ret = EBUSY;
326		break;
327
328	/* POSIX priority inheritence mutex: */
329	case PTHREAD_PRIO_INHERIT:
330		/* Check if this mutex is not locked: */
331		if ((*mutex)->m_owner == NULL) {
332			/* Lock the mutex for the running thread: */
333			(*mutex)->m_owner = curthread;
334
335			THR_SCHED_LOCK(curthread, curthread);
336			/* Track number of priority mutexes owned: */
337			curthread->priority_mutex_count++;
338
339			/*
340			 * The mutex takes on the attributes of the
341			 * running thread when there are no waiters.
342			 */
343			(*mutex)->m_prio = curthread->active_priority;
344			(*mutex)->m_saved_prio =
345			    curthread->inherited_priority;
346			THR_SCHED_UNLOCK(curthread, curthread);
347
348			/* Add to the list of owned mutexes: */
349			MUTEX_ASSERT_NOT_OWNED(*mutex);
350			TAILQ_INSERT_TAIL(&curthread->mutexq,
351			    (*mutex), m_qe);
352		} else if ((*mutex)->m_owner == curthread)
353			ret = mutex_self_trylock(curthread, *mutex);
354		else
355			/* Return a busy error: */
356			ret = EBUSY;
357		break;
358
359	/* POSIX priority protection mutex: */
360	case PTHREAD_PRIO_PROTECT:
361		/* Check for a priority ceiling violation: */
362		if (curthread->active_priority > (*mutex)->m_prio)
363			ret = EINVAL;
364
365		/* Check if this mutex is not locked: */
366		else if ((*mutex)->m_owner == NULL) {
367			/* Lock the mutex for the running thread: */
368			(*mutex)->m_owner = curthread;
369
370			THR_SCHED_LOCK(curthread, curthread);
371			/* Track number of priority mutexes owned: */
372			curthread->priority_mutex_count++;
373
374			/*
375			 * The running thread inherits the ceiling
376			 * priority of the mutex and executes at that
377			 * priority.
378			 */
379			curthread->active_priority = (*mutex)->m_prio;
380			(*mutex)->m_saved_prio =
381			    curthread->inherited_priority;
382			curthread->inherited_priority =
383			    (*mutex)->m_prio;
384			THR_SCHED_UNLOCK(curthread, curthread);
385			/* Add to the list of owned mutexes: */
386			MUTEX_ASSERT_NOT_OWNED(*mutex);
387			TAILQ_INSERT_TAIL(&curthread->mutexq,
388			    (*mutex), m_qe);
389		} else if ((*mutex)->m_owner == curthread)
390			ret = mutex_self_trylock(curthread, *mutex);
391		else
392			/* Return a busy error: */
393			ret = EBUSY;
394		break;
395
396	/* Trap invalid mutex types: */
397	default:
398		/* Return an invalid argument error: */
399		ret = EINVAL;
400		break;
401	}
402
403	/* Unlock the mutex structure: */
404	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
405
406	/* Return the completion status: */
407	return (ret);
408}
409
410int
411__pthread_mutex_trylock(pthread_mutex_t *mutex)
412{
413	struct pthread *curthread = _get_curthread();
414	int ret = 0;
415
416	if (mutex == NULL)
417		ret = EINVAL;
418
419	/*
420	 * If the mutex is statically initialized, perform the dynamic
421	 * initialization:
422	 */
423	else if ((*mutex != NULL) ||
424	    ((ret = init_static(curthread, mutex)) == 0))
425		ret = mutex_trylock_common(curthread, mutex);
426
427	return (ret);
428}
429
430int
431_pthread_mutex_trylock(pthread_mutex_t *mutex)
432{
433	struct pthread	*curthread = _get_curthread();
434	int	ret = 0;
435
436	if (mutex == NULL)
437		ret = EINVAL;
438
439	/*
440	 * If the mutex is statically initialized, perform the dynamic
441	 * initialization marking the mutex private (delete safe):
442	 */
443	else if ((*mutex != NULL) ||
444	    ((ret = init_static_private(curthread, mutex)) == 0))
445		ret = mutex_trylock_common(curthread, mutex);
446
447	return (ret);
448}
449
450static int
451mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
452{
453	int	ret = 0;
454
455	THR_ASSERT((m != NULL) && (*m != NULL),
456	    "Uninitialized mutex in pthread_mutex_trylock_basic");
457
458	/* Reset the interrupted flag: */
459	curthread->interrupted = 0;
460
461	/*
462	 * Enter a loop waiting to become the mutex owner.  We need a
463	 * loop in case the waiting thread is interrupted by a signal
464	 * to execute a signal handler.  It is not (currently) possible
465	 * to remain in the waiting queue while running a handler.
466	 * Instead, the thread is interrupted and backed out of the
467	 * waiting queue prior to executing the signal handler.
468	 */
469	do {
470		/* Lock the mutex structure: */
471		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
472
473		/*
474		 * If the mutex was statically allocated, properly
475		 * initialize the tail queue.
476		 */
477		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
478			TAILQ_INIT(&(*m)->m_queue);
479			(*m)->m_flags |= MUTEX_FLAGS_INITED;
480			MUTEX_INIT_LINK(*m);
481		}
482
483		/* Process according to mutex type: */
484		switch ((*m)->m_protocol) {
485		/* Default POSIX mutex: */
486		case PTHREAD_PRIO_NONE:
487			if ((*m)->m_owner == NULL) {
488				/* Lock the mutex for this thread: */
489				(*m)->m_owner = curthread;
490
491				/* Add to the list of owned mutexes: */
492				MUTEX_ASSERT_NOT_OWNED(*m);
493				TAILQ_INSERT_TAIL(&curthread->mutexq,
494				    (*m), m_qe);
495
496				/* Unlock the mutex structure: */
497				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
498			} else if ((*m)->m_owner == curthread) {
499				ret = mutex_self_lock(curthread, *m);
500
501				/* Unlock the mutex structure: */
502				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
503			} else {
504				/*
505				 * Join the queue of threads waiting to lock
506				 * the mutex and save a pointer to the mutex.
507				 */
508				mutex_queue_enq(*m, curthread);
509				curthread->data.mutex = *m;
510				/*
511				 * This thread is active and is in a critical
512				 * region (holding the mutex lock); we should
513				 * be able to safely set the state.
514				 */
515				THR_SCHED_LOCK(curthread, curthread);
516				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
517				THR_SCHED_UNLOCK(curthread, curthread);
518
519				/* Unlock the mutex structure: */
520				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
521
522				/* Schedule the next thread: */
523				_thr_sched_switch(curthread);
524			}
525			break;
526
527		/* POSIX priority inheritence mutex: */
528		case PTHREAD_PRIO_INHERIT:
529			/* Check if this mutex is not locked: */
530			if ((*m)->m_owner == NULL) {
531				/* Lock the mutex for this thread: */
532				(*m)->m_owner = curthread;
533
534				THR_SCHED_LOCK(curthread, curthread);
535				/* Track number of priority mutexes owned: */
536				curthread->priority_mutex_count++;
537
538				/*
539				 * The mutex takes on attributes of the
540				 * running thread when there are no waiters.
541				 * Make sure the thread's scheduling lock is
542				 * held while priorities are adjusted.
543				 */
544				(*m)->m_prio = curthread->active_priority;
545				(*m)->m_saved_prio =
546				    curthread->inherited_priority;
547				curthread->inherited_priority = (*m)->m_prio;
548				THR_SCHED_UNLOCK(curthread, curthread);
549
550				/* Add to the list of owned mutexes: */
551				MUTEX_ASSERT_NOT_OWNED(*m);
552				TAILQ_INSERT_TAIL(&curthread->mutexq,
553				    (*m), m_qe);
554
555				/* Unlock the mutex structure: */
556				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
557			} else if ((*m)->m_owner == curthread) {
558				ret = mutex_self_lock(curthread, *m);
559
560				/* Unlock the mutex structure: */
561				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
562			} else {
563				/*
564				 * Join the queue of threads waiting to lock
565				 * the mutex and save a pointer to the mutex.
566				 */
567				mutex_queue_enq(*m, curthread);
568				curthread->data.mutex = *m;
569
570				/*
571				 * This thread is active and is in a critical
572				 * region (holding the mutex lock); we should
573				 * be able to safely set the state.
574				 */
575				if (curthread->active_priority > (*m)->m_prio)
576					/* Adjust priorities: */
577					mutex_priority_adjust(curthread, *m);
578
579				THR_SCHED_LOCK(curthread, curthread);
580				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
581				THR_SCHED_UNLOCK(curthread, curthread);
582
583				/* Unlock the mutex structure: */
584				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
585
586				/* Schedule the next thread: */
587				_thr_sched_switch(curthread);
588			}
589			break;
590
591		/* POSIX priority protection mutex: */
592		case PTHREAD_PRIO_PROTECT:
593			/* Check for a priority ceiling violation: */
594			if (curthread->active_priority > (*m)->m_prio) {
595				/* Unlock the mutex structure: */
596				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
597				ret = EINVAL;
598			}
599			/* Check if this mutex is not locked: */
600			else if ((*m)->m_owner == NULL) {
601				/*
602				 * Lock the mutex for the running
603				 * thread:
604				 */
605				(*m)->m_owner = curthread;
606
607				THR_SCHED_LOCK(curthread, curthread);
608				/* Track number of priority mutexes owned: */
609				curthread->priority_mutex_count++;
610
611				/*
612				 * The running thread inherits the ceiling
613				 * priority of the mutex and executes at that
614				 * priority.  Make sure the thread's
615				 * scheduling lock is held while priorities
616				 * are adjusted.
617				 */
618				curthread->active_priority = (*m)->m_prio;
619				(*m)->m_saved_prio =
620				    curthread->inherited_priority;
621				curthread->inherited_priority = (*m)->m_prio;
622				THR_SCHED_UNLOCK(curthread, curthread);
623
624				/* Add to the list of owned mutexes: */
625				MUTEX_ASSERT_NOT_OWNED(*m);
626				TAILQ_INSERT_TAIL(&curthread->mutexq,
627				    (*m), m_qe);
628
629				/* Unlock the mutex structure: */
630				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
631			} else if ((*m)->m_owner == curthread) {
632				ret = mutex_self_lock(curthread, *m);
633
634				/* Unlock the mutex structure: */
635				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
636			} else {
637				/*
638				 * Join the queue of threads waiting to lock
639				 * the mutex and save a pointer to the mutex.
640				 */
641				mutex_queue_enq(*m, curthread);
642				curthread->data.mutex = *m;
643
644				/* Clear any previous error: */
645				curthread->error = 0;
646
647				/*
648				 * This thread is active and is in a critical
649				 * region (holding the mutex lock); we should
650				 * be able to safely set the state.
651				 */
652
653				THR_SCHED_LOCK(curthread, curthread);
654				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
655				THR_SCHED_UNLOCK(curthread, curthread);
656
657				/* Unlock the mutex structure: */
658				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
659
660				/* Schedule the next thread: */
661				_thr_sched_switch(curthread);
662				/*
663				 * The threads priority may have changed while
664				 * waiting for the mutex causing a ceiling
665				 * violation.
666				 */
667				ret = curthread->error;
668				curthread->error = 0;
669			}
670			break;
671
672		/* Trap invalid mutex types: */
673		default:
674			/* Unlock the mutex structure: */
675			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
676
677			/* Return an invalid argument error: */
678			ret = EINVAL;
679			break;
680		}
681
682	} while (((*m)->m_owner != curthread) && (ret == 0) &&
683	    (curthread->interrupted == 0));
684
685	/*
686	 * Check to see if this thread was interrupted and
687	 * is still in the mutex queue of waiting threads:
688	 */
689	if (curthread->interrupted != 0) {
690		/* Remove this thread from the mutex queue. */
691		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
692		if (THR_IN_SYNCQ(curthread))
693			mutex_queue_remove(*m, curthread);
694		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
695
696		/* Check for asynchronous cancellation. */
697		if (curthread->continuation != NULL)
698			curthread->continuation((void *) curthread);
699	}
700
701	/* Return the completion status: */
702	return (ret);
703}
704
705int
706__pthread_mutex_lock(pthread_mutex_t *m)
707{
708	struct pthread *curthread;
709	int	ret = 0;
710
711	if (_thr_initial == NULL)
712		_libpthread_init(NULL);
713
714	curthread = _get_curthread();
715	if (m == NULL)
716		ret = EINVAL;
717
718	/*
719	 * If the mutex is statically initialized, perform the dynamic
720	 * initialization:
721	 */
722	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
723		ret = mutex_lock_common(curthread, m);
724
725	return (ret);
726}
727
728__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
729
730int
731_pthread_mutex_lock(pthread_mutex_t *m)
732{
733	struct pthread *curthread;
734	int	ret = 0;
735
736	if (_thr_initial == NULL)
737		_libpthread_init(NULL);
738	curthread = _get_curthread();
739
740	if (m == NULL)
741		ret = EINVAL;
742
743	/*
744	 * If the mutex is statically initialized, perform the dynamic
745	 * initialization marking it private (delete safe):
746	 */
747	else if ((*m != NULL) ||
748	    ((ret = init_static_private(curthread, m)) == 0))
749		ret = mutex_lock_common(curthread, m);
750
751	return (ret);
752}
753
754int
755_pthread_mutex_unlock(pthread_mutex_t *m)
756{
757	return (mutex_unlock_common(m, /* add reference */ 0));
758}
759
760__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
761
762int
763_mutex_cv_unlock(pthread_mutex_t *m)
764{
765	return (mutex_unlock_common(m, /* add reference */ 1));
766}
767
768int
769_mutex_cv_lock(pthread_mutex_t *m)
770{
771	struct  pthread *curthread;
772	int	ret;
773
774	curthread = _get_curthread();
775	if ((ret = _pthread_mutex_lock(m)) == 0) {
776		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
777		(*m)->m_refcount--;
778		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
779	}
780	return (ret);
781}
782
783static inline int
784mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
785{
786	int	ret = 0;
787
788	switch (m->m_type) {
789	/* case PTHREAD_MUTEX_DEFAULT: */
790	case PTHREAD_MUTEX_ERRORCHECK:
791	case PTHREAD_MUTEX_NORMAL:
792		/*
793		 * POSIX specifies that mutexes should return EDEADLK if a
794		 * recursive lock is detected.
795		 */
796		if (m->m_owner == curthread)
797			ret = EDEADLK;
798		else
799			ret = EBUSY;
800		break;
801
802	case PTHREAD_MUTEX_RECURSIVE:
803		/* Increment the lock count: */
804		m->m_count++;
805		break;
806
807	default:
808		/* Trap invalid mutex types; */
809		ret = EINVAL;
810	}
811
812	return (ret);
813}
814
815static inline int
816mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
817{
818	int ret = 0;
819
820	switch (m->m_type) {
821	/* case PTHREAD_MUTEX_DEFAULT: */
822	case PTHREAD_MUTEX_ERRORCHECK:
823		/*
824		 * POSIX specifies that mutexes should return EDEADLK if a
825		 * recursive lock is detected.
826		 */
827		ret = EDEADLK;
828		break;
829
830	case PTHREAD_MUTEX_NORMAL:
831		/*
832		 * What SS2 define as a 'normal' mutex.  Intentionally
833		 * deadlock on attempts to get a lock you already own.
834		 */
835
836		THR_SCHED_LOCK(curthread, curthread);
837		THR_SET_STATE(curthread, PS_DEADLOCK);
838		THR_SCHED_UNLOCK(curthread, curthread);
839
840		/* Unlock the mutex structure: */
841		THR_LOCK_RELEASE(curthread, &m->m_lock);
842
843		/* Schedule the next thread: */
844		_thr_sched_switch(curthread);
845		break;
846
847	case PTHREAD_MUTEX_RECURSIVE:
848		/* Increment the lock count: */
849		m->m_count++;
850		break;
851
852	default:
853		/* Trap invalid mutex types; */
854		ret = EINVAL;
855	}
856
857	return (ret);
858}
859
860static int
861mutex_unlock_common(pthread_mutex_t *m, int add_reference)
862{
863	struct pthread *curthread = _get_curthread();
864	struct kse_mailbox *kmbx = NULL;
865	int ret = 0;
866
867	if (m == NULL || *m == NULL)
868		ret = EINVAL;
869	else {
870		/* Lock the mutex structure: */
871		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
872
873		/* Process according to mutex type: */
874		switch ((*m)->m_protocol) {
875		/* Default POSIX mutex: */
876		case PTHREAD_PRIO_NONE:
877			/*
878			 * Check if the running thread is not the owner of the
879			 * mutex:
880			 */
881			if ((*m)->m_owner != curthread)
882				/*
883				 * Return an invalid argument error for no
884				 * owner and a permission error otherwise:
885				 */
886				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
887
888			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
889			    ((*m)->m_count > 0))
890				/* Decrement the count: */
891				(*m)->m_count--;
892			else {
893				/*
894				 * Clear the count in case this is a recursive
895				 * mutex.
896				 */
897				(*m)->m_count = 0;
898
899				/* Remove the mutex from the threads queue. */
900				MUTEX_ASSERT_IS_OWNED(*m);
901				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
902				    (*m), m_qe);
903				MUTEX_INIT_LINK(*m);
904
905				/*
906				 * Hand off the mutex to the next waiting
907				 * thread:
908				 */
909				kmbx = mutex_handoff(curthread, *m);
910			}
911			break;
912
913		/* POSIX priority inheritence mutex: */
914		case PTHREAD_PRIO_INHERIT:
915			/*
916			 * Check if the running thread is not the owner of the
917			 * mutex:
918			 */
919			if ((*m)->m_owner != curthread)
920				/*
921				 * Return an invalid argument error for no
922				 * owner and a permission error otherwise:
923				 */
924				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
925
926			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
927			    ((*m)->m_count > 0))
928				/* Decrement the count: */
929				(*m)->m_count--;
930			else {
931				/*
932				 * Clear the count in case this is recursive
933				 * mutex.
934				 */
935				(*m)->m_count = 0;
936
937				/*
938				 * Restore the threads inherited priority and
939				 * recompute the active priority (being careful
940				 * not to override changes in the threads base
941				 * priority subsequent to locking the mutex).
942				 */
943				THR_SCHED_LOCK(curthread, curthread);
944				curthread->inherited_priority =
945					(*m)->m_saved_prio;
946				curthread->active_priority =
947				    MAX(curthread->inherited_priority,
948				    curthread->base_priority);
949
950				/*
951				 * This thread now owns one less priority mutex.
952				 */
953				curthread->priority_mutex_count--;
954				THR_SCHED_UNLOCK(curthread, curthread);
955
956				/* Remove the mutex from the threads queue. */
957				MUTEX_ASSERT_IS_OWNED(*m);
958				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
959				    (*m), m_qe);
960				MUTEX_INIT_LINK(*m);
961
962				/*
963				 * Hand off the mutex to the next waiting
964				 * thread:
965				 */
966				kmbx = mutex_handoff(curthread, *m);
967			}
968			break;
969
970		/* POSIX priority ceiling mutex: */
971		case PTHREAD_PRIO_PROTECT:
972			/*
973			 * Check if the running thread is not the owner of the
974			 * mutex:
975			 */
976			if ((*m)->m_owner != curthread)
977				/*
978				 * Return an invalid argument error for no
979				 * owner and a permission error otherwise:
980				 */
981				ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
982
983			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
984			    ((*m)->m_count > 0))
985				/* Decrement the count: */
986				(*m)->m_count--;
987			else {
988				/*
989				 * Clear the count in case this is a recursive
990				 * mutex.
991				 */
992				(*m)->m_count = 0;
993
994				/*
995				 * Restore the threads inherited priority and
996				 * recompute the active priority (being careful
997				 * not to override changes in the threads base
998				 * priority subsequent to locking the mutex).
999				 */
1000				THR_SCHED_LOCK(curthread, curthread);
1001				curthread->inherited_priority =
1002					(*m)->m_saved_prio;
1003				curthread->active_priority =
1004				    MAX(curthread->inherited_priority,
1005				    curthread->base_priority);
1006
1007				/*
1008				 * This thread now owns one less priority mutex.
1009				 */
1010				curthread->priority_mutex_count--;
1011				THR_SCHED_UNLOCK(curthread, curthread);
1012
1013				/* Remove the mutex from the threads queue. */
1014				MUTEX_ASSERT_IS_OWNED(*m);
1015				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1016				    (*m), m_qe);
1017				MUTEX_INIT_LINK(*m);
1018
1019				/*
1020				 * Hand off the mutex to the next waiting
1021				 * thread:
1022				 */
1023				kmbx = mutex_handoff(curthread, *m);
1024			}
1025			break;
1026
1027		/* Trap invalid mutex types: */
1028		default:
1029			/* Return an invalid argument error: */
1030			ret = EINVAL;
1031			break;
1032		}
1033
1034		if ((ret == 0) && (add_reference != 0))
1035			/* Increment the reference count: */
1036			(*m)->m_refcount++;
1037
1038		/* Unlock the mutex structure: */
1039		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1040		if (kmbx != NULL)
1041			kse_wakeup(kmbx);
1042	}
1043
1044	/* Return the completion status: */
1045	return (ret);
1046}
1047
1048
1049/*
1050 * This function is called when a change in base priority occurs for
1051 * a thread that is holding or waiting for a priority protection or
1052 * inheritence mutex.  A change in a threads base priority can effect
1053 * changes to active priorities of other threads and to the ordering
1054 * of mutex locking by waiting threads.
1055 *
1056 * This must be called without the target thread's scheduling lock held.
1057 */
1058void
1059_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1060    int propagate_prio)
1061{
1062	struct pthread_mutex *m;
1063
1064	/* Adjust the priorites of any owned priority mutexes: */
1065	if (pthread->priority_mutex_count > 0) {
1066		/*
1067		 * Rescan the mutexes owned by this thread and correct
1068		 * their priorities to account for this threads change
1069		 * in priority.  This has the side effect of changing
1070		 * the threads active priority.
1071		 *
1072		 * Be sure to lock the first mutex in the list of owned
1073		 * mutexes.  This acts as a barrier against another
1074		 * simultaneous call to change the threads priority
1075		 * and from the owning thread releasing the mutex.
1076		 */
1077		m = TAILQ_FIRST(&pthread->mutexq);
1078		if (m != NULL) {
1079			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1080			/*
1081			 * Make sure the thread still owns the lock.
1082			 */
1083			if (m == TAILQ_FIRST(&pthread->mutexq))
1084				mutex_rescan_owned(curthread, pthread,
1085				    /* rescan all owned */ NULL);
1086			THR_LOCK_RELEASE(curthread, &m->m_lock);
1087		}
1088	}
1089
1090	/*
1091	 * If this thread is waiting on a priority inheritence mutex,
1092	 * check for priority adjustments.  A change in priority can
1093	 * also cause a ceiling violation(*) for a thread waiting on
1094	 * a priority protection mutex; we don't perform the check here
1095	 * as it is done in pthread_mutex_unlock.
1096	 *
1097	 * (*) It should be noted that a priority change to a thread
1098	 *     _after_ taking and owning a priority ceiling mutex
1099	 *     does not affect ownership of that mutex; the ceiling
1100	 *     priority is only checked before mutex ownership occurs.
1101	 */
1102	if (propagate_prio != 0) {
1103		/*
1104		 * Lock the thread's scheduling queue.  This is a bit
1105		 * convoluted; the "in synchronization queue flag" can
1106		 * only be cleared with both the thread's scheduling and
1107		 * mutex locks held.  The thread's pointer to the wanted
1108		 * mutex is guaranteed to be valid during this time.
1109		 */
1110		THR_SCHED_LOCK(curthread, pthread);
1111
1112		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1113		    ((m = pthread->data.mutex) == NULL))
1114			THR_SCHED_UNLOCK(curthread, pthread);
1115		else {
1116			/*
1117			 * This thread is currently waiting on a mutex; unlock
1118			 * the scheduling queue lock and lock the mutex.  We
1119			 * can't hold both at the same time because the locking
1120			 * order could cause a deadlock.
1121			 */
1122			THR_SCHED_UNLOCK(curthread, pthread);
1123			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1124
1125			/*
1126			 * Check to make sure this thread is still in the
1127			 * same state (the lock above can yield the CPU to
1128			 * another thread or the thread may be running on
1129			 * another CPU).
1130			 */
1131			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1132			    (pthread->data.mutex == m)) {
1133				/*
1134				 * Remove and reinsert this thread into
1135				 * the list of waiting threads to preserve
1136				 * decreasing priority order.
1137				 */
1138				mutex_queue_remove(m, pthread);
1139				mutex_queue_enq(m, pthread);
1140
1141				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1142					/* Adjust priorities: */
1143					mutex_priority_adjust(curthread, m);
1144			}
1145
1146			/* Unlock the mutex structure: */
1147			THR_LOCK_RELEASE(curthread, &m->m_lock);
1148		}
1149	}
1150}
1151
1152/*
1153 * Called when a new thread is added to the mutex waiting queue or
1154 * when a threads priority changes that is already in the mutex
1155 * waiting queue.
1156 *
1157 * This must be called with the mutex locked by the current thread.
1158 */
1159static void
1160mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1161{
1162	pthread_mutex_t	m = mutex;
1163	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1164	int		done, temp_prio;
1165
1166	/*
1167	 * Calculate the mutex priority as the maximum of the highest
1168	 * active priority of any waiting threads and the owning threads
1169	 * active priority(*).
1170	 *
1171	 * (*) Because the owning threads current active priority may
1172	 *     reflect priority inherited from this mutex (and the mutex
1173	 *     priority may have changed) we must recalculate the active
1174	 *     priority based on the threads saved inherited priority
1175	 *     and its base priority.
1176	 */
1177	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1178	temp_prio = MAX(pthread_next->active_priority,
1179	    MAX(m->m_saved_prio, pthread->base_priority));
1180
1181	/* See if this mutex really needs adjusting: */
1182	if (temp_prio == m->m_prio)
1183		/* No need to propagate the priority: */
1184		return;
1185
1186	/* Set new priority of the mutex: */
1187	m->m_prio = temp_prio;
1188
1189	/*
1190	 * Don't unlock the mutex passed in as an argument.  It is
1191	 * expected to be locked and unlocked by the caller.
1192	 */
1193	done = 1;
1194	do {
1195		/*
1196		 * Save the threads priority before rescanning the
1197		 * owned mutexes:
1198		 */
1199		temp_prio = pthread->active_priority;
1200
1201		/*
1202		 * Fix the priorities for all mutexes held by the owning
1203		 * thread since taking this mutex.  This also has a
1204		 * potential side-effect of changing the threads priority.
1205		 *
1206		 * At this point the mutex is locked by the current thread.
1207		 * The owning thread can't release the mutex until it is
1208		 * unlocked, so we should be able to safely walk its list
1209		 * of owned mutexes.
1210		 */
1211		mutex_rescan_owned(curthread, pthread, m);
1212
1213		/*
1214		 * If this isn't the first time through the loop,
1215		 * the current mutex needs to be unlocked.
1216		 */
1217		if (done == 0)
1218			THR_LOCK_RELEASE(curthread, &m->m_lock);
1219
1220		/* Assume we're done unless told otherwise: */
1221		done = 1;
1222
1223		/*
1224		 * If the thread is currently waiting on a mutex, check
1225		 * to see if the threads new priority has affected the
1226		 * priority of the mutex.
1227		 */
1228		if ((temp_prio != pthread->active_priority) &&
1229		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1230		    ((m = pthread->data.mutex) != NULL) &&
1231		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1232			/* Lock the mutex structure: */
1233			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1234
1235			/*
1236			 * Make sure the thread is still waiting on the
1237			 * mutex:
1238			 */
1239			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1240			    (m == pthread->data.mutex)) {
1241				/*
1242				 * The priority for this thread has changed.
1243				 * Remove and reinsert this thread into the
1244				 * list of waiting threads to preserve
1245				 * decreasing priority order.
1246				 */
1247				mutex_queue_remove(m, pthread);
1248				mutex_queue_enq(m, pthread);
1249
1250				/*
1251				 * Grab the waiting thread with highest
1252				 * priority:
1253				 */
1254				pthread_next = TAILQ_FIRST(&m->m_queue);
1255
1256				/*
1257				 * Calculate the mutex priority as the maximum
1258				 * of the highest active priority of any
1259				 * waiting threads and the owning threads
1260				 * active priority.
1261				 */
1262				temp_prio = MAX(pthread_next->active_priority,
1263				    MAX(m->m_saved_prio,
1264				    m->m_owner->base_priority));
1265
1266				if (temp_prio != m->m_prio) {
1267					/*
1268					 * The priority needs to be propagated
1269					 * to the mutex this thread is waiting
1270					 * on and up to the owner of that mutex.
1271					 */
1272					m->m_prio = temp_prio;
1273					pthread = m->m_owner;
1274
1275					/* We're not done yet: */
1276					done = 0;
1277				}
1278			}
1279			/* Only release the mutex if we're done: */
1280			if (done != 0)
1281				THR_LOCK_RELEASE(curthread, &m->m_lock);
1282		}
1283	} while (done == 0);
1284}
1285
1286static void
1287mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1288    struct pthread_mutex *mutex)
1289{
1290	struct pthread_mutex	*m;
1291	struct pthread		*pthread_next;
1292	int			active_prio, inherited_prio;
1293
1294	/*
1295	 * Start walking the mutexes the thread has taken since
1296	 * taking this mutex.
1297	 */
1298	if (mutex == NULL) {
1299		/*
1300		 * A null mutex means start at the beginning of the owned
1301		 * mutex list.
1302		 */
1303		m = TAILQ_FIRST(&pthread->mutexq);
1304
1305		/* There is no inherited priority yet. */
1306		inherited_prio = 0;
1307	} else {
1308		/*
1309		 * The caller wants to start after a specific mutex.  It
1310		 * is assumed that this mutex is a priority inheritence
1311		 * mutex and that its priority has been correctly
1312		 * calculated.
1313		 */
1314		m = TAILQ_NEXT(mutex, m_qe);
1315
1316		/* Start inheriting priority from the specified mutex. */
1317		inherited_prio = mutex->m_prio;
1318	}
1319	active_prio = MAX(inherited_prio, pthread->base_priority);
1320
1321	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1322		/*
1323		 * We only want to deal with priority inheritence
1324		 * mutexes.  This might be optimized by only placing
1325		 * priority inheritence mutexes into the owned mutex
1326		 * list, but it may prove to be useful having all
1327		 * owned mutexes in this list.  Consider a thread
1328		 * exiting while holding mutexes...
1329		 */
1330		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1331			/*
1332			 * Fix the owners saved (inherited) priority to
1333			 * reflect the priority of the previous mutex.
1334			 */
1335			m->m_saved_prio = inherited_prio;
1336
1337			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1338				/* Recalculate the priority of the mutex: */
1339				m->m_prio = MAX(active_prio,
1340				     pthread_next->active_priority);
1341			else
1342				m->m_prio = active_prio;
1343
1344			/* Recalculate new inherited and active priorities: */
1345			inherited_prio = m->m_prio;
1346			active_prio = MAX(m->m_prio, pthread->base_priority);
1347		}
1348	}
1349
1350	/*
1351	 * Fix the threads inherited priority and recalculate its
1352	 * active priority.
1353	 */
1354	pthread->inherited_priority = inherited_prio;
1355	active_prio = MAX(inherited_prio, pthread->base_priority);
1356
1357	if (active_prio != pthread->active_priority) {
1358		/* Lock the thread's scheduling queue: */
1359		THR_SCHED_LOCK(curthread, pthread);
1360
1361		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1362			/*
1363			 * This thread is not in a run queue.  Just set
1364			 * its active priority.
1365			 */
1366			pthread->active_priority = active_prio;
1367		}
1368		else {
1369			/*
1370			 * This thread is in a run queue.  Remove it from
1371			 * the queue before changing its priority:
1372			 */
1373			THR_RUNQ_REMOVE(pthread);
1374
1375			/*
1376			 * POSIX states that if the priority is being
1377			 * lowered, the thread must be inserted at the
1378			 * head of the queue for its priority if it owns
1379			 * any priority protection or inheritence mutexes.
1380			 */
1381			if ((active_prio < pthread->active_priority) &&
1382			    (pthread->priority_mutex_count > 0)) {
1383				/* Set the new active priority. */
1384				pthread->active_priority = active_prio;
1385
1386				THR_RUNQ_INSERT_HEAD(pthread);
1387			} else {
1388				/* Set the new active priority. */
1389				pthread->active_priority = active_prio;
1390
1391				THR_RUNQ_INSERT_TAIL(pthread);
1392			}
1393		}
1394		THR_SCHED_UNLOCK(curthread, pthread);
1395	}
1396}
1397
1398void
1399_mutex_unlock_private(pthread_t pthread)
1400{
1401	struct pthread_mutex	*m, *m_next;
1402
1403	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1404		m_next = TAILQ_NEXT(m, m_qe);
1405		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1406			pthread_mutex_unlock(&m);
1407	}
1408}
1409
1410/*
1411 * This is called by the current thread when it wants to back out of a
1412 * mutex_lock in order to run a signal handler.
1413 */
1414void
1415_mutex_lock_backout(struct pthread *curthread)
1416{
1417	struct pthread_mutex *m;
1418
1419	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1420		/*
1421		 * Any other thread may clear the "in sync queue flag",
1422		 * but only the current thread can clear the pointer
1423		 * to the mutex.  So if the flag is set, we can
1424		 * guarantee that the pointer to the mutex is valid.
1425		 * The only problem may be if the mutex is destroyed
1426		 * out from under us, but that should be considered
1427		 * an application bug.
1428		 */
1429		m = curthread->data.mutex;
1430
1431		/* Lock the mutex structure: */
1432		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1433
1434
1435		/*
1436		 * Check to make sure this thread doesn't already own
1437		 * the mutex.  Since mutexes are unlocked with direct
1438		 * handoffs, it is possible the previous owner gave it
1439		 * to us after we checked the sync queue flag and before
1440		 * we locked the mutex structure.
1441		 */
1442		if (m->m_owner == curthread) {
1443			THR_LOCK_RELEASE(curthread, &m->m_lock);
1444			mutex_unlock_common(&m, /* add_reference */ 0);
1445		} else {
1446			/*
1447			 * Remove ourselves from the mutex queue and
1448			 * clear the pointer to the mutex.  We may no
1449			 * longer be in the mutex queue, but the removal
1450			 * function will DTRT.
1451			 */
1452			mutex_queue_remove(m, curthread);
1453			curthread->data.mutex = NULL;
1454			THR_LOCK_RELEASE(curthread, &m->m_lock);
1455		}
1456	}
1457}
1458
1459/*
1460 * Dequeue a waiting thread from the head of a mutex queue in descending
1461 * priority order.
1462 *
1463 * In order to properly dequeue a thread from the mutex queue and
1464 * make it runnable without the possibility of errant wakeups, it
1465 * is necessary to lock the thread's scheduling queue while also
1466 * holding the mutex lock.
1467 */
1468static struct kse_mailbox *
1469mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1470{
1471	struct kse_mailbox *kmbx = NULL;
1472	struct pthread *pthread;
1473
1474	/* Keep dequeueing until we find a valid thread: */
1475	mutex->m_owner = NULL;
1476	pthread = TAILQ_FIRST(&mutex->m_queue);
1477	while (pthread != NULL) {
1478		/* Take the thread's scheduling lock: */
1479		THR_SCHED_LOCK(curthread, pthread);
1480
1481		/* Remove the thread from the mutex queue: */
1482		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1483		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1484
1485		/* This thread is no longer waiting for this mutex. */
1486		pthread->data.mutex = NULL;
1487
1488		/*
1489		 * Only exit the loop if the thread hasn't been
1490		 * cancelled.
1491		 */
1492		switch (mutex->m_protocol) {
1493		case PTHREAD_PRIO_NONE:
1494			/*
1495			 * Assign the new owner and add the mutex to the
1496			 * thread's list of owned mutexes.
1497			 */
1498			mutex->m_owner = pthread;
1499			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1500			break;
1501
1502		case PTHREAD_PRIO_INHERIT:
1503			/*
1504			 * Assign the new owner and add the mutex to the
1505			 * thread's list of owned mutexes.
1506			 */
1507			mutex->m_owner = pthread;
1508			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1509
1510			/* Track number of priority mutexes owned: */
1511			pthread->priority_mutex_count++;
1512
1513			/*
1514			 * Set the priority of the mutex.  Since our waiting
1515			 * threads are in descending priority order, the
1516			 * priority of the mutex becomes the active priority
1517			 * of the thread we just dequeued.
1518			 */
1519			mutex->m_prio = pthread->active_priority;
1520
1521			/* Save the owning threads inherited priority: */
1522			mutex->m_saved_prio = pthread->inherited_priority;
1523
1524			/*
1525			 * The owning threads inherited priority now becomes
1526			 * his active priority (the priority of the mutex).
1527			 */
1528			pthread->inherited_priority = mutex->m_prio;
1529			break;
1530
1531		case PTHREAD_PRIO_PROTECT:
1532			if (pthread->active_priority > mutex->m_prio) {
1533				/*
1534				 * Either the mutex ceiling priority has
1535				 * been lowered and/or this threads priority
1536			 	 * has been raised subsequent to the thread
1537				 * being queued on the waiting list.
1538				 */
1539				pthread->error = EINVAL;
1540			}
1541			else {
1542				/*
1543				 * Assign the new owner and add the mutex
1544				 * to the thread's list of owned mutexes.
1545				 */
1546				mutex->m_owner = pthread;
1547				TAILQ_INSERT_TAIL(&pthread->mutexq,
1548				    mutex, m_qe);
1549
1550				/* Track number of priority mutexes owned: */
1551				pthread->priority_mutex_count++;
1552
1553				/*
1554				 * Save the owning threads inherited
1555				 * priority:
1556				 */
1557				mutex->m_saved_prio =
1558				    pthread->inherited_priority;
1559
1560				/*
1561				 * The owning thread inherits the ceiling
1562				 * priority of the mutex and executes at
1563				 * that priority:
1564				 */
1565				pthread->inherited_priority = mutex->m_prio;
1566				pthread->active_priority = mutex->m_prio;
1567
1568			}
1569			break;
1570		}
1571
1572		/* Make the thread runnable and unlock the scheduling queue: */
1573		kmbx = _thr_setrunnable_unlocked(pthread);
1574
1575		/* Add a preemption point. */
1576		if ((curthread->kseg == pthread->kseg) &&
1577		    (pthread->active_priority > curthread->active_priority))
1578			curthread->critical_yield = 1;
1579
1580		THR_SCHED_UNLOCK(curthread, pthread);
1581		if (mutex->m_owner == pthread)
1582			/* We're done; a valid owner was found. */
1583			break;
1584		else
1585			/* Get the next thread from the waiting queue: */
1586			pthread = TAILQ_NEXT(pthread, sqe);
1587	}
1588
1589	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1590		/* This mutex has no priority: */
1591		mutex->m_prio = 0;
1592	return (kmbx);
1593}
1594
1595/*
1596 * Dequeue a waiting thread from the head of a mutex queue in descending
1597 * priority order.
1598 */
1599static inline pthread_t
1600mutex_queue_deq(struct pthread_mutex *mutex)
1601{
1602	pthread_t pthread;
1603
1604	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1605		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1606		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1607
1608		/*
1609		 * Only exit the loop if the thread hasn't been
1610		 * cancelled.
1611		 */
1612		if (pthread->interrupted == 0)
1613			break;
1614	}
1615
1616	return (pthread);
1617}
1618
1619/*
1620 * Remove a waiting thread from a mutex queue in descending priority order.
1621 */
1622static inline void
1623mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1624{
1625	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1626		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1627		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1628	}
1629}
1630
1631/*
1632 * Enqueue a waiting thread to a queue in descending priority order.
1633 */
1634static inline void
1635mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1636{
1637	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1638
1639	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1640	/*
1641	 * For the common case of all threads having equal priority,
1642	 * we perform a quick check against the priority of the thread
1643	 * at the tail of the queue.
1644	 */
1645	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1646		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1647	else {
1648		tid = TAILQ_FIRST(&mutex->m_queue);
1649		while (pthread->active_priority <= tid->active_priority)
1650			tid = TAILQ_NEXT(tid, sqe);
1651		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1652	}
1653	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1654}
1655