thr_mutex.c revision 53812
1209878Snwhitehorn/*
2209878Snwhitehorn * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3209878Snwhitehorn * All rights reserved.
4209878Snwhitehorn *
5209878Snwhitehorn * Redistribution and use in source and binary forms, with or without
6209878Snwhitehorn * modification, are permitted provided that the following conditions
7209878Snwhitehorn * are met:
8209878Snwhitehorn * 1. Redistributions of source code must retain the above copyright
9209878Snwhitehorn *    notice, this list of conditions and the following disclaimer.
10209878Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
11209878Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
12209878Snwhitehorn *    documentation and/or other materials provided with the distribution.
13209878Snwhitehorn * 3. All advertising materials mentioning features or use of this software
14209878Snwhitehorn *    must display the following acknowledgement:
15209878Snwhitehorn *	This product includes software developed by John Birrell.
16209878Snwhitehorn * 4. Neither the name of the author nor the names of any co-contributors
17209878Snwhitehorn *    may be used to endorse or promote products derived from this software
18209878Snwhitehorn *    without specific prior written permission.
19209878Snwhitehorn *
20209878Snwhitehorn * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21209878Snwhitehorn * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22209878Snwhitehorn * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23209878Snwhitehorn * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24209878Snwhitehorn * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25209878Snwhitehorn * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26209878Snwhitehorn * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27209878Snwhitehorn * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28209878Snwhitehorn * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29209878Snwhitehorn * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30209878Snwhitehorn * SUCH DAMAGE.
31209878Snwhitehorn *
32209878Snwhitehorn * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 53812 1999-11-28 05:38:13Z alfred $
33209878Snwhitehorn */
34209878Snwhitehorn#include <stdlib.h>
35209878Snwhitehorn#include <errno.h>
36209878Snwhitehorn#include <string.h>
37209878Snwhitehorn#include <sys/param.h>
38209878Snwhitehorn#include <sys/queue.h>
39209878Snwhitehorn#ifdef _THREAD_SAFE
40209878Snwhitehorn#include <pthread.h>
41209878Snwhitehorn#include "pthread_private.h"
42209878Snwhitehorn
43209878Snwhitehorn#if defined(_PTHREADS_INVARIANTS)
44209878Snwhitehorn#define _MUTEX_INIT_LINK(m) 		do {		\
45209878Snwhitehorn	(m)->m_qe.tqe_prev = NULL;			\
46209878Snwhitehorn	(m)->m_qe.tqe_next = NULL;			\
47209878Snwhitehorn} while (0)
48209878Snwhitehorn#define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
49209878Snwhitehorn	if ((m)->m_qe.tqe_prev == NULL)			\
50209878Snwhitehorn		PANIC("mutex is not on list");		\
51209878Snwhitehorn} while (0)
52209878Snwhitehorn#define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
53209878Snwhitehorn	if (((m)->m_qe.tqe_prev != NULL) ||		\
54209878Snwhitehorn	    ((m)->m_qe.tqe_next != NULL))		\
55209878Snwhitehorn		PANIC("mutex is on list");		\
56209878Snwhitehorn} while (0)
57209878Snwhitehorn#else
58209878Snwhitehorn#define _MUTEX_INIT_LINK(m)
59209878Snwhitehorn#define _MUTEX_ASSERT_IS_OWNED(m)
60209878Snwhitehorn#define _MUTEX_ASSERT_NOT_OWNED(m)
61209878Snwhitehorn#endif
62209878Snwhitehorn
63209878Snwhitehorn/*
64209878Snwhitehorn * Prototypes
65209878Snwhitehorn */
66209878Snwhitehornstatic inline int	mutex_self_trylock(pthread_mutex_t);
67209878Snwhitehornstatic inline int	mutex_self_lock(pthread_mutex_t);
68209878Snwhitehornstatic inline int	mutex_unlock_common(pthread_mutex_t *, int);
69209878Snwhitehornstatic void		mutex_priority_adjust(pthread_mutex_t);
70209878Snwhitehornstatic void		mutex_rescan_owned (pthread_t, pthread_mutex_t);
71209878Snwhitehornstatic inline pthread_t	mutex_queue_deq(pthread_mutex_t);
72209878Snwhitehornstatic inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
73209878Snwhitehornstatic inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
74209878Snwhitehorn
75209878Snwhitehorn
76209878Snwhitehornstatic spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77209878Snwhitehorn
78209878Snwhitehorn/* Reinitialize a mutex to defaults. */
79209878Snwhitehornint
80209878Snwhitehorn_mutex_reinit(pthread_mutex_t * mutex)
81209878Snwhitehorn{
82209878Snwhitehorn	int ret = 0;
83209878Snwhitehorn
84209878Snwhitehorn	if (mutex == NULL)
85209878Snwhitehorn		ret = EINVAL;
86209878Snwhitehorn	else if (*mutex == NULL)
87209878Snwhitehorn		ret = pthread_mutex_init(mutex, NULL);
88209878Snwhitehorn	else {
89209878Snwhitehorn		/*
90209878Snwhitehorn		 * Initialize the mutex structure:
91209878Snwhitehorn		 */
92209878Snwhitehorn		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93209878Snwhitehorn		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94209878Snwhitehorn		TAILQ_INIT(&(*mutex)->m_queue);
95209878Snwhitehorn		(*mutex)->m_owner = NULL;
96209878Snwhitehorn		(*mutex)->m_data.m_count = 0;
97209878Snwhitehorn		(*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98209878Snwhitehorn		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99209878Snwhitehorn		(*mutex)->m_refcount = 0;
100209878Snwhitehorn		(*mutex)->m_prio = 0;
101209878Snwhitehorn		(*mutex)->m_saved_prio = 0;
102209878Snwhitehorn		_MUTEX_INIT_LINK(*mutex);
103209878Snwhitehorn		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
104209878Snwhitehorn	}
105209878Snwhitehorn	return (ret);
106209878Snwhitehorn}
107209878Snwhitehorn
108209878Snwhitehornint
109209878Snwhitehornpthread_mutex_init(pthread_mutex_t * mutex,
110209878Snwhitehorn		   const pthread_mutexattr_t * mutex_attr)
111209878Snwhitehorn{
112209878Snwhitehorn	enum pthread_mutextype	type;
113209878Snwhitehorn	int		protocol;
114209878Snwhitehorn	int		ceiling;
115209878Snwhitehorn	pthread_mutex_t	pmutex;
116209878Snwhitehorn	int             ret = 0;
117209878Snwhitehorn
118209878Snwhitehorn	if (mutex == NULL)
119209878Snwhitehorn		ret = EINVAL;
120209878Snwhitehorn
121209878Snwhitehorn	/* Check if default mutex attributes: */
122209878Snwhitehorn	else if (mutex_attr == NULL || *mutex_attr == NULL) {
123209878Snwhitehorn		/* Default to a (error checking) POSIX mutex: */
124209878Snwhitehorn		type = PTHREAD_MUTEX_ERRORCHECK;
125209878Snwhitehorn		protocol = PTHREAD_PRIO_NONE;
126209878Snwhitehorn		ceiling = PTHREAD_MAX_PRIORITY;
127209878Snwhitehorn	}
128209878Snwhitehorn
129209878Snwhitehorn	/* Check mutex type: */
130209878Snwhitehorn	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131209878Snwhitehorn	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132209878Snwhitehorn		/* Return an invalid argument error: */
133209878Snwhitehorn		ret = EINVAL;
134209878Snwhitehorn
135209878Snwhitehorn	/* Check mutex protocol: */
136209878Snwhitehorn	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137209878Snwhitehorn	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138209878Snwhitehorn		/* Return an invalid argument error: */
139209878Snwhitehorn		ret = EINVAL;
140209878Snwhitehorn
141209878Snwhitehorn	else {
142209878Snwhitehorn		/* Use the requested mutex type and protocol: */
143209878Snwhitehorn		type = (*mutex_attr)->m_type;
144209878Snwhitehorn		protocol = (*mutex_attr)->m_protocol;
145209878Snwhitehorn		ceiling = (*mutex_attr)->m_ceiling;
146209878Snwhitehorn	}
147209878Snwhitehorn
148209878Snwhitehorn	/* Check no errors so far: */
149209878Snwhitehorn	if (ret == 0) {
150209878Snwhitehorn		if ((pmutex = (pthread_mutex_t)
151209878Snwhitehorn		    malloc(sizeof(struct pthread_mutex))) == NULL)
152209878Snwhitehorn			ret = ENOMEM;
153209878Snwhitehorn		else {
154209878Snwhitehorn			/* Reset the mutex flags: */
155209878Snwhitehorn			pmutex->m_flags = 0;
156209878Snwhitehorn
157209878Snwhitehorn			/* Process according to mutex type: */
158209878Snwhitehorn			switch (type) {
159209878Snwhitehorn			/* case PTHREAD_MUTEX_DEFAULT: */
160209878Snwhitehorn			case PTHREAD_MUTEX_ERRORCHECK:
161209878Snwhitehorn			case PTHREAD_MUTEX_NORMAL:
162209878Snwhitehorn				/* Nothing to do here. */
163209878Snwhitehorn				break;
164209878Snwhitehorn
165209878Snwhitehorn			/* Single UNIX Spec 2 recursive mutex: */
166209878Snwhitehorn			case PTHREAD_MUTEX_RECURSIVE:
167209878Snwhitehorn				/* Reset the mutex count: */
168209878Snwhitehorn				pmutex->m_data.m_count = 0;
169209878Snwhitehorn				break;
170209878Snwhitehorn
171209878Snwhitehorn			/* Trap invalid mutex types: */
172209878Snwhitehorn			default:
173209878Snwhitehorn				/* Return an invalid argument error: */
174209878Snwhitehorn				ret = EINVAL;
175209878Snwhitehorn				break;
176209878Snwhitehorn			}
177209878Snwhitehorn			if (ret == 0) {
178209878Snwhitehorn				/* Initialise the rest of the mutex: */
179209878Snwhitehorn				TAILQ_INIT(&pmutex->m_queue);
180209878Snwhitehorn				pmutex->m_flags |= MUTEX_FLAGS_INITED;
181209878Snwhitehorn				pmutex->m_owner = NULL;
182209878Snwhitehorn				pmutex->m_type = type;
183209878Snwhitehorn				pmutex->m_protocol = protocol;
184209878Snwhitehorn				pmutex->m_refcount = 0;
185209878Snwhitehorn				if (protocol == PTHREAD_PRIO_PROTECT)
186209878Snwhitehorn					pmutex->m_prio = ceiling;
187209878Snwhitehorn				else
188209878Snwhitehorn					pmutex->m_prio = 0;
189209878Snwhitehorn				pmutex->m_saved_prio = 0;
190209878Snwhitehorn				_MUTEX_INIT_LINK(pmutex);
191209878Snwhitehorn				memset(&pmutex->lock, 0, sizeof(pmutex->lock));
192209878Snwhitehorn				*mutex = pmutex;
193209878Snwhitehorn			} else {
194209878Snwhitehorn				free(pmutex);
195209878Snwhitehorn				*mutex = NULL;
196209878Snwhitehorn			}
197209878Snwhitehorn		}
198209878Snwhitehorn	}
199209878Snwhitehorn	/* Return the completion status: */
200209878Snwhitehorn	return(ret);
201209878Snwhitehorn}
202209878Snwhitehorn
203209878Snwhitehornint
204209878Snwhitehornpthread_mutex_destroy(pthread_mutex_t * mutex)
205209878Snwhitehorn{
206209878Snwhitehorn	int ret = 0;
207209878Snwhitehorn
208209878Snwhitehorn	if (mutex == NULL || *mutex == NULL)
209209878Snwhitehorn		ret = EINVAL;
210209878Snwhitehorn	else {
211209878Snwhitehorn		/* Lock the mutex structure: */
212209878Snwhitehorn		_SPINLOCK(&(*mutex)->lock);
213209878Snwhitehorn
214209878Snwhitehorn		/*
215209878Snwhitehorn		 * Check to see if this mutex is in use:
216209878Snwhitehorn		 */
217209878Snwhitehorn		if (((*mutex)->m_owner != NULL) ||
218209878Snwhitehorn		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219209878Snwhitehorn		    ((*mutex)->m_refcount != 0)) {
220209878Snwhitehorn			ret = EBUSY;
221209878Snwhitehorn
222209878Snwhitehorn			/* Unlock the mutex structure: */
223209878Snwhitehorn			_SPINUNLOCK(&(*mutex)->lock);
224209878Snwhitehorn		}
225209878Snwhitehorn		else {
226209878Snwhitehorn			/*
227209878Snwhitehorn			 * Free the memory allocated for the mutex
228209878Snwhitehorn			 * structure:
229209878Snwhitehorn			 */
230209878Snwhitehorn			_MUTEX_ASSERT_NOT_OWNED(*mutex);
231209878Snwhitehorn			free(*mutex);
232209878Snwhitehorn
233209878Snwhitehorn			/*
234209878Snwhitehorn			 * Leave the caller's pointer NULL now that
235209878Snwhitehorn			 * the mutex has been destroyed:
236209878Snwhitehorn			 */
237209878Snwhitehorn			*mutex = NULL;
238209878Snwhitehorn		}
239209878Snwhitehorn	}
240209878Snwhitehorn
241209878Snwhitehorn	/* Return the completion status: */
242209878Snwhitehorn	return (ret);
243209878Snwhitehorn}
244209878Snwhitehorn
245209878Snwhitehornstatic int
246209878Snwhitehorninit_static (pthread_mutex_t *mutex)
247209878Snwhitehorn{
248209878Snwhitehorn	int ret;
249209878Snwhitehorn
250209878Snwhitehorn	_SPINLOCK(&static_init_lock);
251209878Snwhitehorn
252209878Snwhitehorn	if (*mutex == NULL)
253209878Snwhitehorn		ret = pthread_mutex_init(mutex, NULL);
254209878Snwhitehorn	else
255209878Snwhitehorn		ret = 0;
256209878Snwhitehorn
257209878Snwhitehorn	_SPINUNLOCK(&static_init_lock);
258209878Snwhitehorn
259209878Snwhitehorn	return(ret);
260209878Snwhitehorn}
261209878Snwhitehorn
262209878Snwhitehornint
263209878Snwhitehornpthread_mutex_trylock(pthread_mutex_t * mutex)
264209878Snwhitehorn{
265209878Snwhitehorn	int             ret = 0;
266209878Snwhitehorn
267209878Snwhitehorn	if (mutex == NULL)
268209878Snwhitehorn		ret = EINVAL;
269209878Snwhitehorn
270209878Snwhitehorn	/*
271209878Snwhitehorn	 * If the mutex is statically initialized, perform the dynamic
272209878Snwhitehorn	 * initialization:
273209878Snwhitehorn	 */
274209878Snwhitehorn	else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
275209878Snwhitehorn		/*
276209878Snwhitehorn		 * Defer signals to protect the scheduling queues from
277209878Snwhitehorn		 * access by the signal handler:
278209878Snwhitehorn		 */
279209878Snwhitehorn		_thread_kern_sig_defer();
280209878Snwhitehorn
281209878Snwhitehorn		/* Lock the mutex structure: */
282209878Snwhitehorn		_SPINLOCK(&(*mutex)->lock);
283209878Snwhitehorn
284209878Snwhitehorn		/*
285209878Snwhitehorn		 * If the mutex was statically allocated, properly
286209878Snwhitehorn		 * initialize the tail queue.
287209878Snwhitehorn		 */
288209878Snwhitehorn		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
289209878Snwhitehorn			TAILQ_INIT(&(*mutex)->m_queue);
290209878Snwhitehorn			_MUTEX_INIT_LINK(*mutex);
291209878Snwhitehorn			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
292209878Snwhitehorn		}
293209878Snwhitehorn
294209878Snwhitehorn		/* Process according to mutex type: */
295209878Snwhitehorn		switch ((*mutex)->m_protocol) {
296209878Snwhitehorn		/* Default POSIX mutex: */
297209878Snwhitehorn		case PTHREAD_PRIO_NONE:
298209878Snwhitehorn			/* Check if this mutex is not locked: */
299209878Snwhitehorn			if ((*mutex)->m_owner == NULL) {
300209878Snwhitehorn				/* Lock the mutex for the running thread: */
301209878Snwhitehorn				(*mutex)->m_owner = _thread_run;
302209878Snwhitehorn
303209878Snwhitehorn				/* Add to the list of owned mutexes: */
304209878Snwhitehorn				_MUTEX_ASSERT_NOT_OWNED(*mutex);
305209878Snwhitehorn				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
306209878Snwhitehorn				    (*mutex), m_qe);
307209878Snwhitehorn			} else if ((*mutex)->m_owner == _thread_run)
308				ret = mutex_self_trylock(*mutex);
309			else
310				/* Return a busy error: */
311				ret = EBUSY;
312			break;
313
314		/* POSIX priority inheritence mutex: */
315		case PTHREAD_PRIO_INHERIT:
316			/* Check if this mutex is not locked: */
317			if ((*mutex)->m_owner == NULL) {
318				/* Lock the mutex for the running thread: */
319				(*mutex)->m_owner = _thread_run;
320
321				/* Track number of priority mutexes owned: */
322				_thread_run->priority_mutex_count++;
323
324				/*
325				 * The mutex takes on the attributes of the
326				 * running thread when there are no waiters.
327				 */
328				(*mutex)->m_prio = _thread_run->active_priority;
329				(*mutex)->m_saved_prio =
330				    _thread_run->inherited_priority;
331
332				/* Add to the list of owned mutexes: */
333				_MUTEX_ASSERT_NOT_OWNED(*mutex);
334				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
335				    (*mutex), m_qe);
336			} else if ((*mutex)->m_owner == _thread_run)
337				ret = mutex_self_trylock(*mutex);
338			else
339				/* Return a busy error: */
340				ret = EBUSY;
341			break;
342
343		/* POSIX priority protection mutex: */
344		case PTHREAD_PRIO_PROTECT:
345			/* Check for a priority ceiling violation: */
346			if (_thread_run->active_priority > (*mutex)->m_prio)
347				ret = EINVAL;
348
349			/* Check if this mutex is not locked: */
350			else if ((*mutex)->m_owner == NULL) {
351				/* Lock the mutex for the running thread: */
352				(*mutex)->m_owner = _thread_run;
353
354				/* Track number of priority mutexes owned: */
355				_thread_run->priority_mutex_count++;
356
357				/*
358				 * The running thread inherits the ceiling
359				 * priority of the mutex and executes at that
360				 * priority.
361				 */
362				_thread_run->active_priority = (*mutex)->m_prio;
363				(*mutex)->m_saved_prio =
364				    _thread_run->inherited_priority;
365				_thread_run->inherited_priority =
366				    (*mutex)->m_prio;
367
368				/* Add to the list of owned mutexes: */
369				_MUTEX_ASSERT_NOT_OWNED(*mutex);
370				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
371				    (*mutex), m_qe);
372			} else if ((*mutex)->m_owner == _thread_run)
373				ret = mutex_self_trylock(*mutex);
374			else
375				/* Return a busy error: */
376				ret = EBUSY;
377			break;
378
379		/* Trap invalid mutex types: */
380		default:
381			/* Return an invalid argument error: */
382			ret = EINVAL;
383			break;
384		}
385
386		/* Unlock the mutex structure: */
387		_SPINUNLOCK(&(*mutex)->lock);
388
389		/*
390		 * Undefer and handle pending signals, yielding if
391		 * necessary:
392		 */
393		_thread_kern_sig_undefer();
394	}
395
396	/* Return the completion status: */
397	return (ret);
398}
399
400int
401pthread_mutex_lock(pthread_mutex_t * mutex)
402{
403	int             ret = 0;
404
405	if (mutex == NULL)
406		ret = EINVAL;
407
408	/*
409	 * If the mutex is statically initialized, perform the dynamic
410	 * initialization:
411	 */
412	else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
413		/*
414		 * Defer signals to protect the scheduling queues from
415		 * access by the signal handler:
416		 */
417		_thread_kern_sig_defer();
418
419		/* Lock the mutex structure: */
420		_SPINLOCK(&(*mutex)->lock);
421
422		/*
423		 * If the mutex was statically allocated, properly
424		 * initialize the tail queue.
425		 */
426		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
427			TAILQ_INIT(&(*mutex)->m_queue);
428			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
429			_MUTEX_INIT_LINK(*mutex);
430		}
431
432		/* Reset the interrupted flag: */
433		_thread_run->interrupted = 0;
434
435		/* Process according to mutex type: */
436		switch ((*mutex)->m_protocol) {
437		/* Default POSIX mutex: */
438		case PTHREAD_PRIO_NONE:
439			if ((*mutex)->m_owner == NULL) {
440				/* Lock the mutex for this thread: */
441				(*mutex)->m_owner = _thread_run;
442
443				/* Add to the list of owned mutexes: */
444				_MUTEX_ASSERT_NOT_OWNED(*mutex);
445				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
446				    (*mutex), m_qe);
447
448			} else if ((*mutex)->m_owner == _thread_run)
449				ret = mutex_self_lock(*mutex);
450			else {
451				/*
452				 * Join the queue of threads waiting to lock
453				 * the mutex:
454				 */
455				mutex_queue_enq(*mutex, _thread_run);
456
457				/*
458				 * Keep a pointer to the mutex this thread
459				 * is waiting on:
460				 */
461				_thread_run->data.mutex = *mutex;
462
463				/*
464				 * Unlock the mutex structure and schedule the
465				 * next thread:
466				 */
467				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
468				    &(*mutex)->lock, __FILE__, __LINE__);
469
470				/* Lock the mutex structure again: */
471				_SPINLOCK(&(*mutex)->lock);
472			}
473			break;
474
475		/* POSIX priority inheritence mutex: */
476		case PTHREAD_PRIO_INHERIT:
477			/* Check if this mutex is not locked: */
478			if ((*mutex)->m_owner == NULL) {
479				/* Lock the mutex for this thread: */
480				(*mutex)->m_owner = _thread_run;
481
482				/* Track number of priority mutexes owned: */
483				_thread_run->priority_mutex_count++;
484
485				/*
486				 * The mutex takes on attributes of the
487				 * running thread when there are no waiters.
488				 */
489				(*mutex)->m_prio = _thread_run->active_priority;
490				(*mutex)->m_saved_prio =
491				    _thread_run->inherited_priority;
492				_thread_run->inherited_priority =
493				    (*mutex)->m_prio;
494
495				/* Add to the list of owned mutexes: */
496				_MUTEX_ASSERT_NOT_OWNED(*mutex);
497				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
498				    (*mutex), m_qe);
499
500			} else if ((*mutex)->m_owner == _thread_run)
501				ret = mutex_self_lock(*mutex);
502			else {
503				/*
504				 * Join the queue of threads waiting to lock
505				 * the mutex:
506				 */
507				mutex_queue_enq(*mutex, _thread_run);
508
509				/*
510				 * Keep a pointer to the mutex this thread
511				 * is waiting on:
512				 */
513				_thread_run->data.mutex = *mutex;
514
515				if (_thread_run->active_priority >
516				    (*mutex)->m_prio)
517					/* Adjust priorities: */
518					mutex_priority_adjust(*mutex);
519
520				/*
521				 * Unlock the mutex structure and schedule the
522				 * next thread:
523				 */
524				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
525				    &(*mutex)->lock, __FILE__, __LINE__);
526
527				/* Lock the mutex structure again: */
528				_SPINLOCK(&(*mutex)->lock);
529			}
530			break;
531
532		/* POSIX priority protection mutex: */
533		case PTHREAD_PRIO_PROTECT:
534			/* Check for a priority ceiling violation: */
535			if (_thread_run->active_priority > (*mutex)->m_prio)
536				ret = EINVAL;
537
538			/* Check if this mutex is not locked: */
539			else if ((*mutex)->m_owner == NULL) {
540				/*
541				 * Lock the mutex for the running
542				 * thread:
543				 */
544				(*mutex)->m_owner = _thread_run;
545
546				/* Track number of priority mutexes owned: */
547				_thread_run->priority_mutex_count++;
548
549				/*
550				 * The running thread inherits the ceiling
551				 * priority of the mutex and executes at that
552				 * priority:
553				 */
554				_thread_run->active_priority = (*mutex)->m_prio;
555				(*mutex)->m_saved_prio =
556				    _thread_run->inherited_priority;
557				_thread_run->inherited_priority =
558				    (*mutex)->m_prio;
559
560				/* Add to the list of owned mutexes: */
561				_MUTEX_ASSERT_NOT_OWNED(*mutex);
562				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
563				    (*mutex), m_qe);
564			} else if ((*mutex)->m_owner == _thread_run)
565				ret = mutex_self_lock(*mutex);
566			else {
567				/*
568				 * Join the queue of threads waiting to lock
569				 * the mutex:
570				 */
571				mutex_queue_enq(*mutex, _thread_run);
572
573				/*
574				 * Keep a pointer to the mutex this thread
575				 * is waiting on:
576				 */
577				_thread_run->data.mutex = *mutex;
578
579				/* Clear any previous error: */
580				_thread_run->error = 0;
581
582				/*
583				 * Unlock the mutex structure and schedule the
584				 * next thread:
585				 */
586				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
587				    &(*mutex)->lock, __FILE__, __LINE__);
588
589				/* Lock the mutex structure again: */
590				_SPINLOCK(&(*mutex)->lock);
591
592				/*
593				 * The threads priority may have changed while
594				 * waiting for the mutex causing a ceiling
595				 * violation.
596				 */
597				ret = _thread_run->error;
598				_thread_run->error = 0;
599			}
600			break;
601
602		/* Trap invalid mutex types: */
603		default:
604			/* Return an invalid argument error: */
605			ret = EINVAL;
606			break;
607		}
608
609		/*
610		 * Check to see if this thread was interrupted and
611		 * is still in the mutex queue of waiting threads:
612		 */
613		if (_thread_run->interrupted != 0)
614			mutex_queue_remove(*mutex, _thread_run);
615
616		/* Unlock the mutex structure: */
617		_SPINUNLOCK(&(*mutex)->lock);
618
619		/*
620		 * Undefer and handle pending signals, yielding if
621		 * necessary:
622		 */
623		_thread_kern_sig_undefer();
624
625		if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
626			_thread_exit_cleanup();
627			pthread_exit(PTHREAD_CANCELED);
628		}
629	}
630
631	/* Return the completion status: */
632	return (ret);
633}
634
635int
636pthread_mutex_unlock(pthread_mutex_t * mutex)
637{
638	return (mutex_unlock_common(mutex, /* add reference */ 0));
639}
640
641int
642_mutex_cv_unlock(pthread_mutex_t * mutex)
643{
644	return (mutex_unlock_common(mutex, /* add reference */ 1));
645}
646
647int
648_mutex_cv_lock(pthread_mutex_t * mutex)
649{
650	int ret;
651	if ((ret = pthread_mutex_lock(mutex)) == 0)
652		(*mutex)->m_refcount--;
653	return (ret);
654}
655
656static inline int
657mutex_self_trylock(pthread_mutex_t mutex)
658{
659	int ret = 0;
660
661	switch (mutex->m_type) {
662
663	/* case PTHREAD_MUTEX_DEFAULT: */
664	case PTHREAD_MUTEX_ERRORCHECK:
665	case PTHREAD_MUTEX_NORMAL:
666		/*
667		 * POSIX specifies that mutexes should return EDEADLK if a
668		 * recursive lock is detected.
669		 */
670		ret = EBUSY;
671		break;
672
673	case PTHREAD_MUTEX_RECURSIVE:
674		/* Increment the lock count: */
675		mutex->m_data.m_count++;
676		break;
677
678	default:
679		/* Trap invalid mutex types; */
680		ret = EINVAL;
681	}
682
683	return(ret);
684}
685
686static inline int
687mutex_self_lock(pthread_mutex_t mutex)
688{
689	int ret = 0;
690
691	switch (mutex->m_type) {
692	/* case PTHREAD_MUTEX_DEFAULT: */
693	case PTHREAD_MUTEX_ERRORCHECK:
694		/*
695		 * POSIX specifies that mutexes should return EDEADLK if a
696		 * recursive lock is detected.
697		 */
698		ret = EDEADLK;
699		break;
700
701	case PTHREAD_MUTEX_NORMAL:
702		/*
703		 * What SS2 define as a 'normal' mutex.  Intentionally
704		 * deadlock on attempts to get a lock you already own.
705		 */
706		_thread_kern_sched_state_unlock(PS_DEADLOCK,
707		    &mutex->lock, __FILE__, __LINE__);
708		break;
709
710	case PTHREAD_MUTEX_RECURSIVE:
711		/* Increment the lock count: */
712		mutex->m_data.m_count++;
713		break;
714
715	default:
716		/* Trap invalid mutex types; */
717		ret = EINVAL;
718	}
719
720	return(ret);
721}
722
723static inline int
724mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
725{
726	int ret = 0;
727
728	if (mutex == NULL || *mutex == NULL) {
729		ret = EINVAL;
730	} else {
731		/*
732		 * Defer signals to protect the scheduling queues from
733		 * access by the signal handler:
734		 */
735		_thread_kern_sig_defer();
736
737		/* Lock the mutex structure: */
738		_SPINLOCK(&(*mutex)->lock);
739
740		/* Process according to mutex type: */
741		switch ((*mutex)->m_protocol) {
742		/* Default POSIX mutex: */
743		case PTHREAD_PRIO_NONE:
744			/*
745			 * Check if the running thread is not the owner of the
746			 * mutex:
747			 */
748			if ((*mutex)->m_owner != _thread_run) {
749				/*
750				 * Return an invalid argument error for no
751				 * owner and a permission error otherwise:
752				 */
753				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
754			}
755			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
756			    ((*mutex)->m_data.m_count > 1)) {
757				/* Decrement the count: */
758				(*mutex)->m_data.m_count--;
759			} else {
760				/*
761				 * Clear the count in case this is recursive
762				 * mutex.
763				 */
764				(*mutex)->m_data.m_count = 0;
765
766				/* Remove the mutex from the threads queue. */
767				_MUTEX_ASSERT_IS_OWNED(*mutex);
768				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
769				    (*mutex), m_qe);
770				_MUTEX_INIT_LINK(*mutex);
771
772				/*
773				 * Get the next thread from the queue of
774				 * threads waiting on the mutex:
775				 */
776				if (((*mutex)->m_owner =
777			  	    mutex_queue_deq(*mutex)) != NULL) {
778					/*
779					 * Allow the new owner of the mutex to
780					 * run:
781					 */
782					PTHREAD_NEW_STATE((*mutex)->m_owner,
783					    PS_RUNNING);
784
785					/*
786					 * Add the mutex to the threads list of
787					 * owned mutexes:
788					 */
789					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
790					    (*mutex), m_qe);
791
792					/*
793					 * The owner is no longer waiting for
794					 * this mutex:
795					 */
796					(*mutex)->m_owner->data.mutex = NULL;
797				}
798			}
799			break;
800
801		/* POSIX priority inheritence mutex: */
802		case PTHREAD_PRIO_INHERIT:
803			/*
804			 * Check if the running thread is not the owner of the
805			 * mutex:
806			 */
807			if ((*mutex)->m_owner != _thread_run) {
808				/*
809				 * Return an invalid argument error for no
810				 * owner and a permission error otherwise:
811				 */
812				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
813			}
814			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
815			    ((*mutex)->m_data.m_count > 1)) {
816				/* Decrement the count: */
817				(*mutex)->m_data.m_count--;
818			} else {
819				/*
820				 * Clear the count in case this is recursive
821				 * mutex.
822				 */
823				(*mutex)->m_data.m_count = 0;
824
825				/*
826				 * Restore the threads inherited priority and
827				 * recompute the active priority (being careful
828				 * not to override changes in the threads base
829				 * priority subsequent to locking the mutex).
830				 */
831				_thread_run->inherited_priority =
832					(*mutex)->m_saved_prio;
833				_thread_run->active_priority =
834				    MAX(_thread_run->inherited_priority,
835				    _thread_run->base_priority);
836
837				/*
838				 * This thread now owns one less priority mutex.
839				 */
840				_thread_run->priority_mutex_count--;
841
842				/* Remove the mutex from the threads queue. */
843				_MUTEX_ASSERT_IS_OWNED(*mutex);
844				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
845				    (*mutex), m_qe);
846				_MUTEX_INIT_LINK(*mutex);
847
848				/*
849				 * Get the next thread from the queue of threads
850				 * waiting on the mutex:
851				 */
852				if (((*mutex)->m_owner =
853				    mutex_queue_deq(*mutex)) == NULL)
854					/* This mutex has no priority. */
855					(*mutex)->m_prio = 0;
856				else {
857					/*
858					 * Track number of priority mutexes owned:
859					 */
860					(*mutex)->m_owner->priority_mutex_count++;
861
862					/*
863					 * Add the mutex to the threads list
864					 * of owned mutexes:
865					 */
866					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
867					    (*mutex), m_qe);
868
869					/*
870					 * The owner is no longer waiting for
871					 * this mutex:
872					 */
873					(*mutex)->m_owner->data.mutex = NULL;
874
875					/*
876					 * Set the priority of the mutex.  Since
877					 * our waiting threads are in descending
878					 * priority order, the priority of the
879					 * mutex becomes the active priority of
880					 * the thread we just dequeued.
881					 */
882					(*mutex)->m_prio =
883					    (*mutex)->m_owner->active_priority;
884
885					/*
886					 * Save the owning threads inherited
887					 * priority:
888					 */
889					(*mutex)->m_saved_prio =
890						(*mutex)->m_owner->inherited_priority;
891
892					/*
893					 * The owning threads inherited priority
894					 * now becomes his active priority (the
895					 * priority of the mutex).
896					 */
897					(*mutex)->m_owner->inherited_priority =
898						(*mutex)->m_prio;
899
900					/*
901					 * Allow the new owner of the mutex to
902					 * run:
903					 */
904					PTHREAD_NEW_STATE((*mutex)->m_owner,
905					    PS_RUNNING);
906				}
907			}
908			break;
909
910		/* POSIX priority ceiling mutex: */
911		case PTHREAD_PRIO_PROTECT:
912			/*
913			 * Check if the running thread is not the owner of the
914			 * mutex:
915			 */
916			if ((*mutex)->m_owner != _thread_run) {
917				/*
918				 * Return an invalid argument error for no
919				 * owner and a permission error otherwise:
920				 */
921				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
922			}
923			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
924			    ((*mutex)->m_data.m_count > 1)) {
925				/* Decrement the count: */
926				(*mutex)->m_data.m_count--;
927			} else {
928				/*
929				 * Clear the count in case this is recursive
930				 * mutex.
931				 */
932				(*mutex)->m_data.m_count = 0;
933
934				/*
935				 * Restore the threads inherited priority and
936				 * recompute the active priority (being careful
937				 * not to override changes in the threads base
938				 * priority subsequent to locking the mutex).
939				 */
940				_thread_run->inherited_priority =
941					(*mutex)->m_saved_prio;
942				_thread_run->active_priority =
943				    MAX(_thread_run->inherited_priority,
944				    _thread_run->base_priority);
945
946				/*
947				 * This thread now owns one less priority mutex.
948				 */
949				_thread_run->priority_mutex_count--;
950
951				/* Remove the mutex from the threads queue. */
952				_MUTEX_ASSERT_IS_OWNED(*mutex);
953				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
954				    (*mutex), m_qe);
955				_MUTEX_INIT_LINK(*mutex);
956
957				/*
958				 * Enter a loop to find a waiting thread whose
959				 * active priority will not cause a ceiling
960				 * violation:
961				 */
962				while ((((*mutex)->m_owner =
963				    mutex_queue_deq(*mutex)) != NULL) &&
964				    ((*mutex)->m_owner->active_priority >
965				     (*mutex)->m_prio)) {
966					/*
967					 * Either the mutex ceiling priority
968					 * been lowered and/or this threads
969					 * priority has been raised subsequent
970					 * to this thread being queued on the
971					 * waiting list.
972					 */
973					(*mutex)->m_owner->error = EINVAL;
974					PTHREAD_NEW_STATE((*mutex)->m_owner,
975					    PS_RUNNING);
976					/*
977					 * The thread is no longer waiting for
978					 * this mutex:
979					 */
980					(*mutex)->m_owner->data.mutex = NULL;
981				}
982
983				/* Check for a new owner: */
984				if ((*mutex)->m_owner != NULL) {
985					/*
986					 * Track number of priority mutexes owned:
987					 */
988					(*mutex)->m_owner->priority_mutex_count++;
989
990					/*
991					 * Add the mutex to the threads list
992					 * of owned mutexes:
993					 */
994					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
995					    (*mutex), m_qe);
996
997					/*
998					 * The owner is no longer waiting for
999					 * this mutex:
1000					 */
1001					(*mutex)->m_owner->data.mutex = NULL;
1002
1003					/*
1004					 * Save the owning threads inherited
1005					 * priority:
1006					 */
1007					(*mutex)->m_saved_prio =
1008						(*mutex)->m_owner->inherited_priority;
1009
1010					/*
1011					 * The owning thread inherits the
1012					 * ceiling priority of the mutex and
1013					 * executes at that priority:
1014					 */
1015					(*mutex)->m_owner->inherited_priority =
1016					    (*mutex)->m_prio;
1017					(*mutex)->m_owner->active_priority =
1018					    (*mutex)->m_prio;
1019
1020					/*
1021					 * Allow the new owner of the mutex to
1022					 * run:
1023					 */
1024					PTHREAD_NEW_STATE((*mutex)->m_owner,
1025					    PS_RUNNING);
1026				}
1027			}
1028			break;
1029
1030		/* Trap invalid mutex types: */
1031		default:
1032			/* Return an invalid argument error: */
1033			ret = EINVAL;
1034			break;
1035		}
1036
1037		if ((ret == 0) && (add_reference != 0)) {
1038			/* Increment the reference count: */
1039			(*mutex)->m_refcount++;
1040		}
1041
1042		/* Unlock the mutex structure: */
1043		_SPINUNLOCK(&(*mutex)->lock);
1044
1045		/*
1046		 * Undefer and handle pending signals, yielding if
1047		 * necessary:
1048		 */
1049		_thread_kern_sig_undefer();
1050	}
1051
1052	/* Return the completion status: */
1053	return (ret);
1054}
1055
1056
1057/*
1058 * This function is called when a change in base priority occurs for
1059 * a thread that is holding or waiting for a priority protection or
1060 * inheritence mutex.  A change in a threads base priority can effect
1061 * changes to active priorities of other threads and to the ordering
1062 * of mutex locking by waiting threads.
1063 *
1064 * This must be called while thread scheduling is deferred.
1065 */
1066void
1067_mutex_notify_priochange(pthread_t pthread)
1068{
1069	/* Adjust the priorites of any owned priority mutexes: */
1070	if (pthread->priority_mutex_count > 0) {
1071		/*
1072		 * Rescan the mutexes owned by this thread and correct
1073		 * their priorities to account for this threads change
1074		 * in priority.  This has the side effect of changing
1075		 * the threads active priority.
1076		 */
1077		mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1078	}
1079
1080	/*
1081	 * If this thread is waiting on a priority inheritence mutex,
1082	 * check for priority adjustments.  A change in priority can
1083	 * also effect a ceiling violation(*) for a thread waiting on
1084	 * a priority protection mutex; we don't perform the check here
1085	 * as it is done in pthread_mutex_unlock.
1086	 *
1087	 * (*) It should be noted that a priority change to a thread
1088	 *     _after_ taking and owning a priority ceiling mutex
1089	 *     does not affect ownership of that mutex; the ceiling
1090	 *     priority is only checked before mutex ownership occurs.
1091	 */
1092	if (pthread->state == PS_MUTEX_WAIT) {
1093		/* Lock the mutex structure: */
1094		_SPINLOCK(&pthread->data.mutex->lock);
1095
1096		/*
1097		 * Check to make sure this thread is still in the same state
1098		 * (the spinlock above can yield the CPU to another thread):
1099		 */
1100		if (pthread->state == PS_MUTEX_WAIT) {
1101			/*
1102			 * Remove and reinsert this thread into the list of
1103			 * waiting threads to preserve decreasing priority
1104			 * order.
1105			 */
1106			mutex_queue_remove(pthread->data.mutex, pthread);
1107			mutex_queue_enq(pthread->data.mutex, pthread);
1108
1109			if (pthread->data.mutex->m_protocol ==
1110			     PTHREAD_PRIO_INHERIT) {
1111				/* Adjust priorities: */
1112				mutex_priority_adjust(pthread->data.mutex);
1113			}
1114		}
1115
1116		/* Unlock the mutex structure: */
1117		_SPINUNLOCK(&pthread->data.mutex->lock);
1118	}
1119}
1120
1121/*
1122 * Called when a new thread is added to the mutex waiting queue or
1123 * when a threads priority changes that is already in the mutex
1124 * waiting queue.
1125 */
1126static void
1127mutex_priority_adjust(pthread_mutex_t mutex)
1128{
1129	pthread_t	pthread_next, pthread = mutex->m_owner;
1130	int		temp_prio;
1131	pthread_mutex_t	m = mutex;
1132
1133	/*
1134	 * Calculate the mutex priority as the maximum of the highest
1135	 * active priority of any waiting threads and the owning threads
1136	 * active priority(*).
1137	 *
1138	 * (*) Because the owning threads current active priority may
1139	 *     reflect priority inherited from this mutex (and the mutex
1140	 *     priority may have changed) we must recalculate the active
1141	 *     priority based on the threads saved inherited priority
1142	 *     and its base priority.
1143	 */
1144	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1145	temp_prio = MAX(pthread_next->active_priority,
1146	    MAX(m->m_saved_prio, pthread->base_priority));
1147
1148	/* See if this mutex really needs adjusting: */
1149	if (temp_prio == m->m_prio)
1150		/* No need to propagate the priority: */
1151		return;
1152
1153	/* Set new priority of the mutex: */
1154	m->m_prio = temp_prio;
1155
1156	while (m != NULL) {
1157		/*
1158		 * Save the threads priority before rescanning the
1159		 * owned mutexes:
1160		 */
1161		temp_prio = pthread->active_priority;
1162
1163		/*
1164		 * Fix the priorities for all the mutexes this thread has
1165		 * locked since taking this mutex.  This also has a
1166		 * potential side-effect of changing the threads priority.
1167		 */
1168		mutex_rescan_owned(pthread, m);
1169
1170		/*
1171		 * If the thread is currently waiting on a mutex, check
1172		 * to see if the threads new priority has affected the
1173		 * priority of the mutex.
1174		 */
1175		if ((temp_prio != pthread->active_priority) &&
1176		    (pthread->state == PS_MUTEX_WAIT) &&
1177		    (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1178			/* Grab the mutex this thread is waiting on: */
1179			m = pthread->data.mutex;
1180
1181			/*
1182			 * The priority for this thread has changed.  Remove
1183			 * and reinsert this thread into the list of waiting
1184			 * threads to preserve decreasing priority order.
1185			 */
1186			mutex_queue_remove(m, pthread);
1187			mutex_queue_enq(m, pthread);
1188
1189			/* Grab the waiting thread with highest priority: */
1190			pthread_next = TAILQ_FIRST(&m->m_queue);
1191
1192			/*
1193			 * Calculate the mutex priority as the maximum of the
1194			 * highest active priority of any waiting threads and
1195			 * the owning threads active priority.
1196			 */
1197			temp_prio = MAX(pthread_next->active_priority,
1198			    MAX(m->m_saved_prio, m->m_owner->base_priority));
1199
1200			if (temp_prio != m->m_prio) {
1201				/*
1202				 * The priority needs to be propagated to the
1203				 * mutex this thread is waiting on and up to
1204				 * the owner of that mutex.
1205				 */
1206				m->m_prio = temp_prio;
1207				pthread = m->m_owner;
1208			}
1209			else
1210				/* We're done: */
1211				m = NULL;
1212
1213		}
1214		else
1215			/* We're done: */
1216			m = NULL;
1217	}
1218}
1219
1220static void
1221mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
1222{
1223	int		active_prio, inherited_prio;
1224	pthread_mutex_t	m;
1225	pthread_t	pthread_next;
1226
1227	/*
1228	 * Start walking the mutexes the thread has taken since
1229	 * taking this mutex.
1230	 */
1231	if (mutex == NULL) {
1232		/*
1233		 * A null mutex means start at the beginning of the owned
1234		 * mutex list.
1235		 */
1236		m = TAILQ_FIRST(&pthread->mutexq);
1237
1238		/* There is no inherited priority yet. */
1239		inherited_prio = 0;
1240	}
1241	else {
1242		/*
1243		 * The caller wants to start after a specific mutex.  It
1244		 * is assumed that this mutex is a priority inheritence
1245		 * mutex and that its priority has been correctly
1246		 * calculated.
1247		 */
1248		m = TAILQ_NEXT(mutex, m_qe);
1249
1250		/* Start inheriting priority from the specified mutex. */
1251		inherited_prio = mutex->m_prio;
1252	}
1253	active_prio = MAX(inherited_prio, pthread->base_priority);
1254
1255	while (m != NULL) {
1256		/*
1257		 * We only want to deal with priority inheritence
1258		 * mutexes.  This might be optimized by only placing
1259		 * priority inheritence mutexes into the owned mutex
1260		 * list, but it may prove to be useful having all
1261		 * owned mutexes in this list.  Consider a thread
1262		 * exiting while holding mutexes...
1263		 */
1264		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1265			/*
1266			 * Fix the owners saved (inherited) priority to
1267			 * reflect the priority of the previous mutex.
1268			 */
1269			m->m_saved_prio = inherited_prio;
1270
1271			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1272				/* Recalculate the priority of the mutex: */
1273				m->m_prio = MAX(active_prio,
1274				     pthread_next->active_priority);
1275			else
1276				m->m_prio = active_prio;
1277
1278			/* Recalculate new inherited and active priorities: */
1279			inherited_prio = m->m_prio;
1280			active_prio = MAX(m->m_prio, pthread->base_priority);
1281		}
1282
1283		/* Advance to the next mutex owned by this thread: */
1284		m = TAILQ_NEXT(m, m_qe);
1285	}
1286
1287	/*
1288	 * Fix the threads inherited priority and recalculate its
1289	 * active priority.
1290	 */
1291	pthread->inherited_priority = inherited_prio;
1292	active_prio = MAX(inherited_prio, pthread->base_priority);
1293
1294	if (active_prio != pthread->active_priority) {
1295		/*
1296		 * If this thread is in the priority queue, it must be
1297		 * removed and reinserted for its new priority.
1298	 	 */
1299		if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1300			/*
1301			 * Remove the thread from the priority queue
1302			 * before changing its priority:
1303			 */
1304			PTHREAD_PRIOQ_REMOVE(pthread);
1305
1306			/*
1307			 * POSIX states that if the priority is being
1308			 * lowered, the thread must be inserted at the
1309			 * head of the queue for its priority if it owns
1310			 * any priority protection or inheritence mutexes.
1311			 */
1312			if ((active_prio < pthread->active_priority) &&
1313			    (pthread->priority_mutex_count > 0)) {
1314				/* Set the new active priority. */
1315				pthread->active_priority = active_prio;
1316
1317				PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1318			}
1319			else {
1320				/* Set the new active priority. */
1321				pthread->active_priority = active_prio;
1322
1323				PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1324			}
1325		}
1326		else {
1327			/* Set the new active priority. */
1328			pthread->active_priority = active_prio;
1329		}
1330	}
1331}
1332
1333void
1334_mutex_unlock_private(pthread_t pthread)
1335{
1336	struct pthread_mutex	*m, *m_next;
1337
1338	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1339		m_next = TAILQ_NEXT(m, m_qe);
1340		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1341			pthread_mutex_unlock(&m);
1342	}
1343}
1344
1345/*
1346 * Dequeue a waiting thread from the head of a mutex queue in descending
1347 * priority order.
1348 */
1349static inline pthread_t
1350mutex_queue_deq(pthread_mutex_t mutex)
1351{
1352	pthread_t pthread;
1353
1354	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1355		TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1356		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1357
1358		/*
1359		 * Only exit the loop if the thread hasn't been
1360		 * cancelled.
1361		 */
1362		if (pthread->interrupted == 0)
1363			break;
1364	}
1365
1366	return(pthread);
1367}
1368
1369/*
1370 * Remove a waiting thread from a mutex queue in descending priority order.
1371 */
1372static inline void
1373mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1374{
1375	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1376		TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1377		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1378	}
1379}
1380
1381/*
1382 * Enqueue a waiting thread to a queue in descending priority order.
1383 */
1384static inline void
1385mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1386{
1387	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1388
1389	/*
1390	 * For the common case of all threads having equal priority,
1391	 * we perform a quick check against the priority of the thread
1392	 * at the tail of the queue.
1393	 */
1394	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1395		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
1396	else {
1397		tid = TAILQ_FIRST(&mutex->m_queue);
1398		while (pthread->active_priority <= tid->active_priority)
1399			tid = TAILQ_NEXT(tid, qe);
1400		TAILQ_INSERT_BEFORE(tid, pthread, qe);
1401	}
1402	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1403}
1404
1405#endif
1406