thr_mutex.c revision 64574
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 64574 2000-08-13 01:30:36Z alfred $
33 */
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <sys/param.h>
38#include <sys/queue.h>
39#ifdef _THREAD_SAFE
40#include <pthread.h>
41#include "pthread_private.h"
42
43#if defined(_PTHREADS_INVARIANTS)
44#define _MUTEX_INIT_LINK(m) 		do {		\
45	(m)->m_qe.tqe_prev = NULL;			\
46	(m)->m_qe.tqe_next = NULL;			\
47} while (0)
48#define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
49	if ((m)->m_qe.tqe_prev == NULL)			\
50		PANIC("mutex is not on list");		\
51} while (0)
52#define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
53	if (((m)->m_qe.tqe_prev != NULL) ||		\
54	    ((m)->m_qe.tqe_next != NULL))		\
55		PANIC("mutex is on list");		\
56} while (0)
57#else
58#define _MUTEX_INIT_LINK(m)
59#define _MUTEX_ASSERT_IS_OWNED(m)
60#define _MUTEX_ASSERT_NOT_OWNED(m)
61#endif
62
63/*
64 * Prototypes
65 */
66static inline int	mutex_self_trylock(pthread_mutex_t);
67static inline int	mutex_self_lock(pthread_mutex_t);
68static inline int	mutex_unlock_common(pthread_mutex_t *, int);
69static void		mutex_priority_adjust(pthread_mutex_t);
70static void		mutex_rescan_owned (pthread_t, pthread_mutex_t);
71static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
72static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
73static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
74
75
76static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77
78/* Reinitialize a mutex to defaults. */
79int
80_mutex_reinit(pthread_mutex_t * mutex)
81{
82	int ret = 0;
83
84	if (mutex == NULL)
85		ret = EINVAL;
86	else if (*mutex == NULL)
87		ret = pthread_mutex_init(mutex, NULL);
88	else {
89		/*
90		 * Initialize the mutex structure:
91		 */
92		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94		TAILQ_INIT(&(*mutex)->m_queue);
95		(*mutex)->m_owner = NULL;
96		(*mutex)->m_data.m_count = 0;
97		(*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99		(*mutex)->m_refcount = 0;
100		(*mutex)->m_prio = 0;
101		(*mutex)->m_saved_prio = 0;
102		_MUTEX_INIT_LINK(*mutex);
103		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
104	}
105	return (ret);
106}
107
108int
109pthread_mutex_init(pthread_mutex_t * mutex,
110		   const pthread_mutexattr_t * mutex_attr)
111{
112	enum pthread_mutextype	type;
113	int		protocol;
114	int		ceiling;
115	pthread_mutex_t	pmutex;
116	int             ret = 0;
117
118	if (mutex == NULL)
119		ret = EINVAL;
120
121	/* Check if default mutex attributes: */
122	else if (mutex_attr == NULL || *mutex_attr == NULL) {
123		/* Default to a (error checking) POSIX mutex: */
124		type = PTHREAD_MUTEX_ERRORCHECK;
125		protocol = PTHREAD_PRIO_NONE;
126		ceiling = PTHREAD_MAX_PRIORITY;
127	}
128
129	/* Check mutex type: */
130	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132		/* Return an invalid argument error: */
133		ret = EINVAL;
134
135	/* Check mutex protocol: */
136	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138		/* Return an invalid argument error: */
139		ret = EINVAL;
140
141	else {
142		/* Use the requested mutex type and protocol: */
143		type = (*mutex_attr)->m_type;
144		protocol = (*mutex_attr)->m_protocol;
145		ceiling = (*mutex_attr)->m_ceiling;
146	}
147
148	/* Check no errors so far: */
149	if (ret == 0) {
150		if ((pmutex = (pthread_mutex_t)
151		    malloc(sizeof(struct pthread_mutex))) == NULL)
152			ret = ENOMEM;
153		else {
154			/* Reset the mutex flags: */
155			pmutex->m_flags = 0;
156
157			/* Process according to mutex type: */
158			switch (type) {
159			/* case PTHREAD_MUTEX_DEFAULT: */
160			case PTHREAD_MUTEX_ERRORCHECK:
161			case PTHREAD_MUTEX_NORMAL:
162				/* Nothing to do here. */
163				break;
164
165			/* Single UNIX Spec 2 recursive mutex: */
166			case PTHREAD_MUTEX_RECURSIVE:
167				/* Reset the mutex count: */
168				pmutex->m_data.m_count = 0;
169				break;
170
171			/* Trap invalid mutex types: */
172			default:
173				/* Return an invalid argument error: */
174				ret = EINVAL;
175				break;
176			}
177			if (ret == 0) {
178				/* Initialise the rest of the mutex: */
179				TAILQ_INIT(&pmutex->m_queue);
180				pmutex->m_flags |= MUTEX_FLAGS_INITED;
181				pmutex->m_owner = NULL;
182				pmutex->m_type = type;
183				pmutex->m_protocol = protocol;
184				pmutex->m_refcount = 0;
185				if (protocol == PTHREAD_PRIO_PROTECT)
186					pmutex->m_prio = ceiling;
187				else
188					pmutex->m_prio = 0;
189				pmutex->m_saved_prio = 0;
190				_MUTEX_INIT_LINK(pmutex);
191				memset(&pmutex->lock, 0, sizeof(pmutex->lock));
192				*mutex = pmutex;
193			} else {
194				free(pmutex);
195				*mutex = NULL;
196			}
197		}
198	}
199	/* Return the completion status: */
200	return(ret);
201}
202
203int
204pthread_mutex_destroy(pthread_mutex_t * mutex)
205{
206	int ret = 0;
207
208	if (mutex == NULL || *mutex == NULL)
209		ret = EINVAL;
210	else {
211		/* Lock the mutex structure: */
212		_SPINLOCK(&(*mutex)->lock);
213
214		/*
215		 * Check to see if this mutex is in use:
216		 */
217		if (((*mutex)->m_owner != NULL) ||
218		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219		    ((*mutex)->m_refcount != 0)) {
220			ret = EBUSY;
221
222			/* Unlock the mutex structure: */
223			_SPINUNLOCK(&(*mutex)->lock);
224		}
225		else {
226			/*
227			 * Free the memory allocated for the mutex
228			 * structure:
229			 */
230			_MUTEX_ASSERT_NOT_OWNED(*mutex);
231			free(*mutex);
232
233			/*
234			 * Leave the caller's pointer NULL now that
235			 * the mutex has been destroyed:
236			 */
237			*mutex = NULL;
238		}
239	}
240
241	/* Return the completion status: */
242	return (ret);
243}
244
245static int
246init_static(pthread_mutex_t *mutex)
247{
248	int ret;
249
250	_SPINLOCK(&static_init_lock);
251
252	if (*mutex == NULL)
253		ret = pthread_mutex_init(mutex, NULL);
254	else
255		ret = 0;
256
257	_SPINUNLOCK(&static_init_lock);
258
259	return(ret);
260}
261
262int
263pthread_mutex_trylock(pthread_mutex_t * mutex)
264{
265	int             ret = 0;
266
267	if (mutex == NULL)
268		ret = EINVAL;
269
270	/*
271	 * If the mutex is statically initialized, perform the dynamic
272	 * initialization:
273	 */
274	else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
275		/*
276		 * Defer signals to protect the scheduling queues from
277		 * access by the signal handler:
278		 */
279		_thread_kern_sig_defer();
280
281		/* Lock the mutex structure: */
282		_SPINLOCK(&(*mutex)->lock);
283
284		/*
285		 * If the mutex was statically allocated, properly
286		 * initialize the tail queue.
287		 */
288		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
289			TAILQ_INIT(&(*mutex)->m_queue);
290			_MUTEX_INIT_LINK(*mutex);
291			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
292		}
293
294		/* Process according to mutex type: */
295		switch ((*mutex)->m_protocol) {
296		/* Default POSIX mutex: */
297		case PTHREAD_PRIO_NONE:
298			/* Check if this mutex is not locked: */
299			if ((*mutex)->m_owner == NULL) {
300				/* Lock the mutex for the running thread: */
301				(*mutex)->m_owner = _thread_run;
302
303				/* Add to the list of owned mutexes: */
304				_MUTEX_ASSERT_NOT_OWNED(*mutex);
305				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
306				    (*mutex), m_qe);
307			} else if ((*mutex)->m_owner == _thread_run)
308				ret = mutex_self_trylock(*mutex);
309			else
310				/* Return a busy error: */
311				ret = EBUSY;
312			break;
313
314		/* POSIX priority inheritence mutex: */
315		case PTHREAD_PRIO_INHERIT:
316			/* Check if this mutex is not locked: */
317			if ((*mutex)->m_owner == NULL) {
318				/* Lock the mutex for the running thread: */
319				(*mutex)->m_owner = _thread_run;
320
321				/* Track number of priority mutexes owned: */
322				_thread_run->priority_mutex_count++;
323
324				/*
325				 * The mutex takes on the attributes of the
326				 * running thread when there are no waiters.
327				 */
328				(*mutex)->m_prio = _thread_run->active_priority;
329				(*mutex)->m_saved_prio =
330				    _thread_run->inherited_priority;
331
332				/* Add to the list of owned mutexes: */
333				_MUTEX_ASSERT_NOT_OWNED(*mutex);
334				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
335				    (*mutex), m_qe);
336			} else if ((*mutex)->m_owner == _thread_run)
337				ret = mutex_self_trylock(*mutex);
338			else
339				/* Return a busy error: */
340				ret = EBUSY;
341			break;
342
343		/* POSIX priority protection mutex: */
344		case PTHREAD_PRIO_PROTECT:
345			/* Check for a priority ceiling violation: */
346			if (_thread_run->active_priority > (*mutex)->m_prio)
347				ret = EINVAL;
348
349			/* Check if this mutex is not locked: */
350			else if ((*mutex)->m_owner == NULL) {
351				/* Lock the mutex for the running thread: */
352				(*mutex)->m_owner = _thread_run;
353
354				/* Track number of priority mutexes owned: */
355				_thread_run->priority_mutex_count++;
356
357				/*
358				 * The running thread inherits the ceiling
359				 * priority of the mutex and executes at that
360				 * priority.
361				 */
362				_thread_run->active_priority = (*mutex)->m_prio;
363				(*mutex)->m_saved_prio =
364				    _thread_run->inherited_priority;
365				_thread_run->inherited_priority =
366				    (*mutex)->m_prio;
367
368				/* Add to the list of owned mutexes: */
369				_MUTEX_ASSERT_NOT_OWNED(*mutex);
370				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
371				    (*mutex), m_qe);
372			} else if ((*mutex)->m_owner == _thread_run)
373				ret = mutex_self_trylock(*mutex);
374			else
375				/* Return a busy error: */
376				ret = EBUSY;
377			break;
378
379		/* Trap invalid mutex types: */
380		default:
381			/* Return an invalid argument error: */
382			ret = EINVAL;
383			break;
384		}
385
386		/* Unlock the mutex structure: */
387		_SPINUNLOCK(&(*mutex)->lock);
388
389		/*
390		 * Undefer and handle pending signals, yielding if
391		 * necessary:
392		 */
393		_thread_kern_sig_undefer();
394	}
395
396	/* Return the completion status: */
397	return (ret);
398}
399
400int
401pthread_mutex_lock(pthread_mutex_t * mutex)
402{
403	int             ret = 0;
404
405	if (mutex == NULL)
406		ret = EINVAL;
407
408	/*
409	 * If the mutex is statically initialized, perform the dynamic
410	 * initialization:
411	 */
412	else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
413		/*
414		 * Defer signals to protect the scheduling queues from
415		 * access by the signal handler:
416		 */
417		_thread_kern_sig_defer();
418
419		/* Lock the mutex structure: */
420		_SPINLOCK(&(*mutex)->lock);
421
422		/*
423		 * If the mutex was statically allocated, properly
424		 * initialize the tail queue.
425		 */
426		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
427			TAILQ_INIT(&(*mutex)->m_queue);
428			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
429			_MUTEX_INIT_LINK(*mutex);
430		}
431
432		/* Reset the interrupted flag: */
433		_thread_run->interrupted = 0;
434
435		/* Process according to mutex type: */
436		switch ((*mutex)->m_protocol) {
437		/* Default POSIX mutex: */
438		case PTHREAD_PRIO_NONE:
439			if ((*mutex)->m_owner == NULL) {
440				/* Lock the mutex for this thread: */
441				(*mutex)->m_owner = _thread_run;
442
443				/* Add to the list of owned mutexes: */
444				_MUTEX_ASSERT_NOT_OWNED(*mutex);
445				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
446				    (*mutex), m_qe);
447
448			} else if ((*mutex)->m_owner == _thread_run)
449				ret = mutex_self_lock(*mutex);
450			else {
451				/*
452				 * Join the queue of threads waiting to lock
453				 * the mutex:
454				 */
455				mutex_queue_enq(*mutex, _thread_run);
456
457				/*
458				 * Keep a pointer to the mutex this thread
459				 * is waiting on:
460				 */
461				_thread_run->data.mutex = *mutex;
462
463				/*
464				 * Unlock the mutex structure and schedule the
465				 * next thread:
466				 */
467				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
468				    &(*mutex)->lock, __FILE__, __LINE__);
469
470				/* Lock the mutex structure again: */
471				_SPINLOCK(&(*mutex)->lock);
472			}
473			break;
474
475		/* POSIX priority inheritence mutex: */
476		case PTHREAD_PRIO_INHERIT:
477			/* Check if this mutex is not locked: */
478			if ((*mutex)->m_owner == NULL) {
479				/* Lock the mutex for this thread: */
480				(*mutex)->m_owner = _thread_run;
481
482				/* Track number of priority mutexes owned: */
483				_thread_run->priority_mutex_count++;
484
485				/*
486				 * The mutex takes on attributes of the
487				 * running thread when there are no waiters.
488				 */
489				(*mutex)->m_prio = _thread_run->active_priority;
490				(*mutex)->m_saved_prio =
491				    _thread_run->inherited_priority;
492				_thread_run->inherited_priority =
493				    (*mutex)->m_prio;
494
495				/* Add to the list of owned mutexes: */
496				_MUTEX_ASSERT_NOT_OWNED(*mutex);
497				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
498				    (*mutex), m_qe);
499
500			} else if ((*mutex)->m_owner == _thread_run)
501				ret = mutex_self_lock(*mutex);
502			else {
503				/*
504				 * Join the queue of threads waiting to lock
505				 * the mutex:
506				 */
507				mutex_queue_enq(*mutex, _thread_run);
508
509				/*
510				 * Keep a pointer to the mutex this thread
511				 * is waiting on:
512				 */
513				_thread_run->data.mutex = *mutex;
514
515				if (_thread_run->active_priority >
516				    (*mutex)->m_prio)
517					/* Adjust priorities: */
518					mutex_priority_adjust(*mutex);
519
520				/*
521				 * Unlock the mutex structure and schedule the
522				 * next thread:
523				 */
524				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
525				    &(*mutex)->lock, __FILE__, __LINE__);
526
527				/* Lock the mutex structure again: */
528				_SPINLOCK(&(*mutex)->lock);
529			}
530			break;
531
532		/* POSIX priority protection mutex: */
533		case PTHREAD_PRIO_PROTECT:
534			/* Check for a priority ceiling violation: */
535			if (_thread_run->active_priority > (*mutex)->m_prio)
536				ret = EINVAL;
537
538			/* Check if this mutex is not locked: */
539			else if ((*mutex)->m_owner == NULL) {
540				/*
541				 * Lock the mutex for the running
542				 * thread:
543				 */
544				(*mutex)->m_owner = _thread_run;
545
546				/* Track number of priority mutexes owned: */
547				_thread_run->priority_mutex_count++;
548
549				/*
550				 * The running thread inherits the ceiling
551				 * priority of the mutex and executes at that
552				 * priority:
553				 */
554				_thread_run->active_priority = (*mutex)->m_prio;
555				(*mutex)->m_saved_prio =
556				    _thread_run->inherited_priority;
557				_thread_run->inherited_priority =
558				    (*mutex)->m_prio;
559
560				/* Add to the list of owned mutexes: */
561				_MUTEX_ASSERT_NOT_OWNED(*mutex);
562				TAILQ_INSERT_TAIL(&_thread_run->mutexq,
563				    (*mutex), m_qe);
564			} else if ((*mutex)->m_owner == _thread_run)
565				ret = mutex_self_lock(*mutex);
566			else {
567				/*
568				 * Join the queue of threads waiting to lock
569				 * the mutex:
570				 */
571				mutex_queue_enq(*mutex, _thread_run);
572
573				/*
574				 * Keep a pointer to the mutex this thread
575				 * is waiting on:
576				 */
577				_thread_run->data.mutex = *mutex;
578
579				/* Clear any previous error: */
580				_thread_run->error = 0;
581
582				/*
583				 * Unlock the mutex structure and schedule the
584				 * next thread:
585				 */
586				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
587				    &(*mutex)->lock, __FILE__, __LINE__);
588
589				/* Lock the mutex structure again: */
590				_SPINLOCK(&(*mutex)->lock);
591
592				/*
593				 * The threads priority may have changed while
594				 * waiting for the mutex causing a ceiling
595				 * violation.
596				 */
597				ret = _thread_run->error;
598				_thread_run->error = 0;
599			}
600			break;
601
602		/* Trap invalid mutex types: */
603		default:
604			/* Return an invalid argument error: */
605			ret = EINVAL;
606			break;
607		}
608
609		/*
610		 * Check to see if this thread was interrupted and
611		 * is still in the mutex queue of waiting threads:
612		 */
613		if (_thread_run->interrupted != 0) {
614			mutex_queue_remove(*mutex, _thread_run);
615		}
616
617		/* Unlock the mutex structure: */
618		_SPINUNLOCK(&(*mutex)->lock);
619
620		/*
621		 * Undefer and handle pending signals, yielding if
622		 * necessary:
623		 */
624		_thread_kern_sig_undefer();
625
626		if (_thread_run->interrupted != 0 &&
627		    _thread_run->continuation != NULL)
628			_thread_run->continuation((void *) _thread_run);
629	}
630
631	/* Return the completion status: */
632	return (ret);
633}
634
635int
636pthread_mutex_unlock(pthread_mutex_t * mutex)
637{
638	return (mutex_unlock_common(mutex, /* add reference */ 0));
639}
640
641int
642_mutex_cv_unlock(pthread_mutex_t * mutex)
643{
644	return (mutex_unlock_common(mutex, /* add reference */ 1));
645}
646
647int
648_mutex_cv_lock(pthread_mutex_t * mutex)
649{
650	int ret;
651	if ((ret = pthread_mutex_lock(mutex)) == 0)
652		(*mutex)->m_refcount--;
653	return (ret);
654}
655
656static inline int
657mutex_self_trylock(pthread_mutex_t mutex)
658{
659	int ret = 0;
660
661	switch (mutex->m_type) {
662
663	/* case PTHREAD_MUTEX_DEFAULT: */
664	case PTHREAD_MUTEX_ERRORCHECK:
665	case PTHREAD_MUTEX_NORMAL:
666		/*
667		 * POSIX specifies that mutexes should return EDEADLK if a
668		 * recursive lock is detected.
669		 */
670		ret = EBUSY;
671		break;
672
673	case PTHREAD_MUTEX_RECURSIVE:
674		/* Increment the lock count: */
675		mutex->m_data.m_count++;
676		break;
677
678	default:
679		/* Trap invalid mutex types; */
680		ret = EINVAL;
681	}
682
683	return(ret);
684}
685
686static inline int
687mutex_self_lock(pthread_mutex_t mutex)
688{
689	int ret = 0;
690
691	switch (mutex->m_type) {
692	/* case PTHREAD_MUTEX_DEFAULT: */
693	case PTHREAD_MUTEX_ERRORCHECK:
694		/*
695		 * POSIX specifies that mutexes should return EDEADLK if a
696		 * recursive lock is detected.
697		 */
698		ret = EDEADLK;
699		break;
700
701	case PTHREAD_MUTEX_NORMAL:
702		/*
703		 * What SS2 define as a 'normal' mutex.  Intentionally
704		 * deadlock on attempts to get a lock you already own.
705		 */
706		_thread_kern_sched_state_unlock(PS_DEADLOCK,
707		    &mutex->lock, __FILE__, __LINE__);
708		break;
709
710	case PTHREAD_MUTEX_RECURSIVE:
711		/* Increment the lock count: */
712		mutex->m_data.m_count++;
713		break;
714
715	default:
716		/* Trap invalid mutex types; */
717		ret = EINVAL;
718	}
719
720	return(ret);
721}
722
723static inline int
724mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
725{
726	int ret = 0;
727
728	if (mutex == NULL || *mutex == NULL) {
729		ret = EINVAL;
730	} else {
731		/*
732		 * Defer signals to protect the scheduling queues from
733		 * access by the signal handler:
734		 */
735		_thread_kern_sig_defer();
736
737		/* Lock the mutex structure: */
738		_SPINLOCK(&(*mutex)->lock);
739
740		/* Process according to mutex type: */
741		switch ((*mutex)->m_protocol) {
742		/* Default POSIX mutex: */
743		case PTHREAD_PRIO_NONE:
744			/*
745			 * Check if the running thread is not the owner of the
746			 * mutex:
747			 */
748			if ((*mutex)->m_owner != _thread_run) {
749				/*
750				 * Return an invalid argument error for no
751				 * owner and a permission error otherwise:
752				 */
753				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
754			}
755			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
756			    ((*mutex)->m_data.m_count > 0)) {
757				/* Decrement the count: */
758				(*mutex)->m_data.m_count--;
759			} else {
760				/*
761				 * Clear the count in case this is recursive
762				 * mutex.
763				 */
764				(*mutex)->m_data.m_count = 0;
765
766				/* Remove the mutex from the threads queue. */
767				_MUTEX_ASSERT_IS_OWNED(*mutex);
768				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
769				    (*mutex), m_qe);
770				_MUTEX_INIT_LINK(*mutex);
771
772				/*
773				 * Get the next thread from the queue of
774				 * threads waiting on the mutex:
775				 */
776				if (((*mutex)->m_owner =
777			  	    mutex_queue_deq(*mutex)) != NULL) {
778					/*
779					 * Unless the new owner of the mutex is
780					 * currently suspended, allow the owner
781					 * to run.  If the thread is suspended,
782					 * make a note that the thread isn't in
783					 * a wait queue any more.
784					 */
785					if (((*mutex)->m_owner->state !=
786					    PS_SUSPENDED)) {
787						PTHREAD_NEW_STATE((*mutex)->m_owner,
788						    PS_RUNNING);
789					} else {
790						(*mutex)->m_owner->suspended =
791						    SUSP_NOWAIT;
792					}
793
794					/*
795					 * Add the mutex to the threads list of
796					 * owned mutexes:
797					 */
798					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
799					    (*mutex), m_qe);
800
801					/*
802					 * The owner is no longer waiting for
803					 * this mutex:
804					 */
805					(*mutex)->m_owner->data.mutex = NULL;
806				}
807			}
808			break;
809
810		/* POSIX priority inheritence mutex: */
811		case PTHREAD_PRIO_INHERIT:
812			/*
813			 * Check if the running thread is not the owner of the
814			 * mutex:
815			 */
816			if ((*mutex)->m_owner != _thread_run) {
817				/*
818				 * Return an invalid argument error for no
819				 * owner and a permission error otherwise:
820				 */
821				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
822			}
823			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
824			    ((*mutex)->m_data.m_count > 0)) {
825				/* Decrement the count: */
826				(*mutex)->m_data.m_count--;
827			} else {
828				/*
829				 * Clear the count in case this is recursive
830				 * mutex.
831				 */
832				(*mutex)->m_data.m_count = 0;
833
834				/*
835				 * Restore the threads inherited priority and
836				 * recompute the active priority (being careful
837				 * not to override changes in the threads base
838				 * priority subsequent to locking the mutex).
839				 */
840				_thread_run->inherited_priority =
841					(*mutex)->m_saved_prio;
842				_thread_run->active_priority =
843				    MAX(_thread_run->inherited_priority,
844				    _thread_run->base_priority);
845
846				/*
847				 * This thread now owns one less priority mutex.
848				 */
849				_thread_run->priority_mutex_count--;
850
851				/* Remove the mutex from the threads queue. */
852				_MUTEX_ASSERT_IS_OWNED(*mutex);
853				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
854				    (*mutex), m_qe);
855				_MUTEX_INIT_LINK(*mutex);
856
857				/*
858				 * Get the next thread from the queue of threads
859				 * waiting on the mutex:
860				 */
861				if (((*mutex)->m_owner =
862				    mutex_queue_deq(*mutex)) == NULL)
863					/* This mutex has no priority. */
864					(*mutex)->m_prio = 0;
865				else {
866					/*
867					 * Track number of priority mutexes owned:
868					 */
869					(*mutex)->m_owner->priority_mutex_count++;
870
871					/*
872					 * Add the mutex to the threads list
873					 * of owned mutexes:
874					 */
875					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
876					    (*mutex), m_qe);
877
878					/*
879					 * The owner is no longer waiting for
880					 * this mutex:
881					 */
882					(*mutex)->m_owner->data.mutex = NULL;
883
884					/*
885					 * Set the priority of the mutex.  Since
886					 * our waiting threads are in descending
887					 * priority order, the priority of the
888					 * mutex becomes the active priority of
889					 * the thread we just dequeued.
890					 */
891					(*mutex)->m_prio =
892					    (*mutex)->m_owner->active_priority;
893
894					/*
895					 * Save the owning threads inherited
896					 * priority:
897					 */
898					(*mutex)->m_saved_prio =
899						(*mutex)->m_owner->inherited_priority;
900
901					/*
902					 * The owning threads inherited priority
903					 * now becomes his active priority (the
904					 * priority of the mutex).
905					 */
906					(*mutex)->m_owner->inherited_priority =
907						(*mutex)->m_prio;
908
909					/*
910					 * Unless the new owner of the mutex is
911					 * currently suspended, allow the owner
912					 * to run.  If the thread is suspended,
913					 * make a note that the thread isn't in
914					 * a wait queue any more.
915					 */
916					if (((*mutex)->m_owner->state !=
917					    PS_SUSPENDED)) {
918						PTHREAD_NEW_STATE((*mutex)->m_owner,
919						    PS_RUNNING);
920					} else {
921						(*mutex)->m_owner->suspended =
922						    SUSP_NOWAIT;
923					}
924				}
925			}
926			break;
927
928		/* POSIX priority ceiling mutex: */
929		case PTHREAD_PRIO_PROTECT:
930			/*
931			 * Check if the running thread is not the owner of the
932			 * mutex:
933			 */
934			if ((*mutex)->m_owner != _thread_run) {
935				/*
936				 * Return an invalid argument error for no
937				 * owner and a permission error otherwise:
938				 */
939				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
940			}
941			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
942			    ((*mutex)->m_data.m_count > 0)) {
943				/* Decrement the count: */
944				(*mutex)->m_data.m_count--;
945			} else {
946				/*
947				 * Clear the count in case this is recursive
948				 * mutex.
949				 */
950				(*mutex)->m_data.m_count = 0;
951
952				/*
953				 * Restore the threads inherited priority and
954				 * recompute the active priority (being careful
955				 * not to override changes in the threads base
956				 * priority subsequent to locking the mutex).
957				 */
958				_thread_run->inherited_priority =
959					(*mutex)->m_saved_prio;
960				_thread_run->active_priority =
961				    MAX(_thread_run->inherited_priority,
962				    _thread_run->base_priority);
963
964				/*
965				 * This thread now owns one less priority mutex.
966				 */
967				_thread_run->priority_mutex_count--;
968
969				/* Remove the mutex from the threads queue. */
970				_MUTEX_ASSERT_IS_OWNED(*mutex);
971				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
972				    (*mutex), m_qe);
973				_MUTEX_INIT_LINK(*mutex);
974
975				/*
976				 * Enter a loop to find a waiting thread whose
977				 * active priority will not cause a ceiling
978				 * violation:
979				 */
980				while ((((*mutex)->m_owner =
981				    mutex_queue_deq(*mutex)) != NULL) &&
982				    ((*mutex)->m_owner->active_priority >
983				     (*mutex)->m_prio)) {
984					/*
985					 * Either the mutex ceiling priority
986					 * been lowered and/or this threads
987					 * priority has been raised subsequent
988					 * to this thread being queued on the
989					 * waiting list.
990					 */
991					(*mutex)->m_owner->error = EINVAL;
992					PTHREAD_NEW_STATE((*mutex)->m_owner,
993					    PS_RUNNING);
994					/*
995					 * The thread is no longer waiting for
996					 * this mutex:
997					 */
998					(*mutex)->m_owner->data.mutex = NULL;
999				}
1000
1001				/* Check for a new owner: */
1002				if ((*mutex)->m_owner != NULL) {
1003					/*
1004					 * Track number of priority mutexes owned:
1005					 */
1006					(*mutex)->m_owner->priority_mutex_count++;
1007
1008					/*
1009					 * Add the mutex to the threads list
1010					 * of owned mutexes:
1011					 */
1012					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1013					    (*mutex), m_qe);
1014
1015					/*
1016					 * The owner is no longer waiting for
1017					 * this mutex:
1018					 */
1019					(*mutex)->m_owner->data.mutex = NULL;
1020
1021					/*
1022					 * Save the owning threads inherited
1023					 * priority:
1024					 */
1025					(*mutex)->m_saved_prio =
1026						(*mutex)->m_owner->inherited_priority;
1027
1028					/*
1029					 * The owning thread inherits the
1030					 * ceiling priority of the mutex and
1031					 * executes at that priority:
1032					 */
1033					(*mutex)->m_owner->inherited_priority =
1034					    (*mutex)->m_prio;
1035					(*mutex)->m_owner->active_priority =
1036					    (*mutex)->m_prio;
1037
1038					/*
1039					 * Unless the new owner of the mutex is
1040					 * currently suspended, allow the owner
1041					 * to run.  If the thread is suspended,
1042					 * make a note that the thread isn't in
1043					 * a wait queue any more.
1044					 */
1045					if (((*mutex)->m_owner->state !=
1046					    PS_SUSPENDED)) {
1047						PTHREAD_NEW_STATE((*mutex)->m_owner,
1048						    PS_RUNNING);
1049					} else {
1050						(*mutex)->m_owner->suspended =
1051						    SUSP_NOWAIT;
1052					}
1053				}
1054			}
1055			break;
1056
1057		/* Trap invalid mutex types: */
1058		default:
1059			/* Return an invalid argument error: */
1060			ret = EINVAL;
1061			break;
1062		}
1063
1064		if ((ret == 0) && (add_reference != 0)) {
1065			/* Increment the reference count: */
1066			(*mutex)->m_refcount++;
1067		}
1068
1069		/* Unlock the mutex structure: */
1070		_SPINUNLOCK(&(*mutex)->lock);
1071
1072		/*
1073		 * Undefer and handle pending signals, yielding if
1074		 * necessary:
1075		 */
1076		_thread_kern_sig_undefer();
1077	}
1078
1079	/* Return the completion status: */
1080	return (ret);
1081}
1082
1083
1084/*
1085 * This function is called when a change in base priority occurs for
1086 * a thread that is holding or waiting for a priority protection or
1087 * inheritence mutex.  A change in a threads base priority can effect
1088 * changes to active priorities of other threads and to the ordering
1089 * of mutex locking by waiting threads.
1090 *
1091 * This must be called while thread scheduling is deferred.
1092 */
1093void
1094_mutex_notify_priochange(pthread_t pthread)
1095{
1096	/* Adjust the priorites of any owned priority mutexes: */
1097	if (pthread->priority_mutex_count > 0) {
1098		/*
1099		 * Rescan the mutexes owned by this thread and correct
1100		 * their priorities to account for this threads change
1101		 * in priority.  This has the side effect of changing
1102		 * the threads active priority.
1103		 */
1104		mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1105	}
1106
1107	/*
1108	 * If this thread is waiting on a priority inheritence mutex,
1109	 * check for priority adjustments.  A change in priority can
1110	 * also effect a ceiling violation(*) for a thread waiting on
1111	 * a priority protection mutex; we don't perform the check here
1112	 * as it is done in pthread_mutex_unlock.
1113	 *
1114	 * (*) It should be noted that a priority change to a thread
1115	 *     _after_ taking and owning a priority ceiling mutex
1116	 *     does not affect ownership of that mutex; the ceiling
1117	 *     priority is only checked before mutex ownership occurs.
1118	 */
1119	if (pthread->state == PS_MUTEX_WAIT) {
1120		/* Lock the mutex structure: */
1121		_SPINLOCK(&pthread->data.mutex->lock);
1122
1123		/*
1124		 * Check to make sure this thread is still in the same state
1125		 * (the spinlock above can yield the CPU to another thread):
1126		 */
1127		if (pthread->state == PS_MUTEX_WAIT) {
1128			/*
1129			 * Remove and reinsert this thread into the list of
1130			 * waiting threads to preserve decreasing priority
1131			 * order.
1132			 */
1133			mutex_queue_remove(pthread->data.mutex, pthread);
1134			mutex_queue_enq(pthread->data.mutex, pthread);
1135
1136			if (pthread->data.mutex->m_protocol ==
1137			     PTHREAD_PRIO_INHERIT) {
1138				/* Adjust priorities: */
1139				mutex_priority_adjust(pthread->data.mutex);
1140			}
1141		}
1142
1143		/* Unlock the mutex structure: */
1144		_SPINUNLOCK(&pthread->data.mutex->lock);
1145	}
1146}
1147
1148/*
1149 * Called when a new thread is added to the mutex waiting queue or
1150 * when a threads priority changes that is already in the mutex
1151 * waiting queue.
1152 */
1153static void
1154mutex_priority_adjust(pthread_mutex_t mutex)
1155{
1156	pthread_t	pthread_next, pthread = mutex->m_owner;
1157	int		temp_prio;
1158	pthread_mutex_t	m = mutex;
1159
1160	/*
1161	 * Calculate the mutex priority as the maximum of the highest
1162	 * active priority of any waiting threads and the owning threads
1163	 * active priority(*).
1164	 *
1165	 * (*) Because the owning threads current active priority may
1166	 *     reflect priority inherited from this mutex (and the mutex
1167	 *     priority may have changed) we must recalculate the active
1168	 *     priority based on the threads saved inherited priority
1169	 *     and its base priority.
1170	 */
1171	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1172	temp_prio = MAX(pthread_next->active_priority,
1173	    MAX(m->m_saved_prio, pthread->base_priority));
1174
1175	/* See if this mutex really needs adjusting: */
1176	if (temp_prio == m->m_prio)
1177		/* No need to propagate the priority: */
1178		return;
1179
1180	/* Set new priority of the mutex: */
1181	m->m_prio = temp_prio;
1182
1183	while (m != NULL) {
1184		/*
1185		 * Save the threads priority before rescanning the
1186		 * owned mutexes:
1187		 */
1188		temp_prio = pthread->active_priority;
1189
1190		/*
1191		 * Fix the priorities for all the mutexes this thread has
1192		 * locked since taking this mutex.  This also has a
1193		 * potential side-effect of changing the threads priority.
1194		 */
1195		mutex_rescan_owned(pthread, m);
1196
1197		/*
1198		 * If the thread is currently waiting on a mutex, check
1199		 * to see if the threads new priority has affected the
1200		 * priority of the mutex.
1201		 */
1202		if ((temp_prio != pthread->active_priority) &&
1203		    (pthread->state == PS_MUTEX_WAIT) &&
1204		    (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1205			/* Grab the mutex this thread is waiting on: */
1206			m = pthread->data.mutex;
1207
1208			/*
1209			 * The priority for this thread has changed.  Remove
1210			 * and reinsert this thread into the list of waiting
1211			 * threads to preserve decreasing priority order.
1212			 */
1213			mutex_queue_remove(m, pthread);
1214			mutex_queue_enq(m, pthread);
1215
1216			/* Grab the waiting thread with highest priority: */
1217			pthread_next = TAILQ_FIRST(&m->m_queue);
1218
1219			/*
1220			 * Calculate the mutex priority as the maximum of the
1221			 * highest active priority of any waiting threads and
1222			 * the owning threads active priority.
1223			 */
1224			temp_prio = MAX(pthread_next->active_priority,
1225			    MAX(m->m_saved_prio, m->m_owner->base_priority));
1226
1227			if (temp_prio != m->m_prio) {
1228				/*
1229				 * The priority needs to be propagated to the
1230				 * mutex this thread is waiting on and up to
1231				 * the owner of that mutex.
1232				 */
1233				m->m_prio = temp_prio;
1234				pthread = m->m_owner;
1235			}
1236			else
1237				/* We're done: */
1238				m = NULL;
1239
1240		}
1241		else
1242			/* We're done: */
1243			m = NULL;
1244	}
1245}
1246
1247static void
1248mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1249{
1250	int		active_prio, inherited_prio;
1251	pthread_mutex_t	m;
1252	pthread_t	pthread_next;
1253
1254	/*
1255	 * Start walking the mutexes the thread has taken since
1256	 * taking this mutex.
1257	 */
1258	if (mutex == NULL) {
1259		/*
1260		 * A null mutex means start at the beginning of the owned
1261		 * mutex list.
1262		 */
1263		m = TAILQ_FIRST(&pthread->mutexq);
1264
1265		/* There is no inherited priority yet. */
1266		inherited_prio = 0;
1267	}
1268	else {
1269		/*
1270		 * The caller wants to start after a specific mutex.  It
1271		 * is assumed that this mutex is a priority inheritence
1272		 * mutex and that its priority has been correctly
1273		 * calculated.
1274		 */
1275		m = TAILQ_NEXT(mutex, m_qe);
1276
1277		/* Start inheriting priority from the specified mutex. */
1278		inherited_prio = mutex->m_prio;
1279	}
1280	active_prio = MAX(inherited_prio, pthread->base_priority);
1281
1282	while (m != NULL) {
1283		/*
1284		 * We only want to deal with priority inheritence
1285		 * mutexes.  This might be optimized by only placing
1286		 * priority inheritence mutexes into the owned mutex
1287		 * list, but it may prove to be useful having all
1288		 * owned mutexes in this list.  Consider a thread
1289		 * exiting while holding mutexes...
1290		 */
1291		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1292			/*
1293			 * Fix the owners saved (inherited) priority to
1294			 * reflect the priority of the previous mutex.
1295			 */
1296			m->m_saved_prio = inherited_prio;
1297
1298			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1299				/* Recalculate the priority of the mutex: */
1300				m->m_prio = MAX(active_prio,
1301				     pthread_next->active_priority);
1302			else
1303				m->m_prio = active_prio;
1304
1305			/* Recalculate new inherited and active priorities: */
1306			inherited_prio = m->m_prio;
1307			active_prio = MAX(m->m_prio, pthread->base_priority);
1308		}
1309
1310		/* Advance to the next mutex owned by this thread: */
1311		m = TAILQ_NEXT(m, m_qe);
1312	}
1313
1314	/*
1315	 * Fix the threads inherited priority and recalculate its
1316	 * active priority.
1317	 */
1318	pthread->inherited_priority = inherited_prio;
1319	active_prio = MAX(inherited_prio, pthread->base_priority);
1320
1321	if (active_prio != pthread->active_priority) {
1322		/*
1323		 * If this thread is in the priority queue, it must be
1324		 * removed and reinserted for its new priority.
1325	 	 */
1326		if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1327			/*
1328			 * Remove the thread from the priority queue
1329			 * before changing its priority:
1330			 */
1331			PTHREAD_PRIOQ_REMOVE(pthread);
1332
1333			/*
1334			 * POSIX states that if the priority is being
1335			 * lowered, the thread must be inserted at the
1336			 * head of the queue for its priority if it owns
1337			 * any priority protection or inheritence mutexes.
1338			 */
1339			if ((active_prio < pthread->active_priority) &&
1340			    (pthread->priority_mutex_count > 0)) {
1341				/* Set the new active priority. */
1342				pthread->active_priority = active_prio;
1343
1344				PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1345			}
1346			else {
1347				/* Set the new active priority. */
1348				pthread->active_priority = active_prio;
1349
1350				PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1351			}
1352		}
1353		else {
1354			/* Set the new active priority. */
1355			pthread->active_priority = active_prio;
1356		}
1357	}
1358}
1359
1360void
1361_mutex_unlock_private(pthread_t pthread)
1362{
1363	struct pthread_mutex	*m, *m_next;
1364
1365	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1366		m_next = TAILQ_NEXT(m, m_qe);
1367		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1368			pthread_mutex_unlock(&m);
1369	}
1370}
1371
1372/*
1373 * Dequeue a waiting thread from the head of a mutex queue in descending
1374 * priority order.
1375 */
1376static inline pthread_t
1377mutex_queue_deq(pthread_mutex_t mutex)
1378{
1379	pthread_t pthread;
1380
1381	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1382		TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1383		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1384
1385		/*
1386		 * Only exit the loop if the thread hasn't been
1387		 * cancelled.
1388		 */
1389		if (pthread->interrupted == 0)
1390			break;
1391	}
1392
1393	return(pthread);
1394}
1395
1396/*
1397 * Remove a waiting thread from a mutex queue in descending priority order.
1398 */
1399static inline void
1400mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1401{
1402	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1403		TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1404		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1405	}
1406}
1407
1408/*
1409 * Enqueue a waiting thread to a queue in descending priority order.
1410 */
1411static inline void
1412mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1413{
1414	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1415
1416	/*
1417	 * For the common case of all threads having equal priority,
1418	 * we perform a quick check against the priority of the thread
1419	 * at the tail of the queue.
1420	 */
1421	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1422		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
1423	else {
1424		tid = TAILQ_FIRST(&mutex->m_queue);
1425		while (pthread->active_priority <= tid->active_priority)
1426			tid = TAILQ_NEXT(tid, qe);
1427		TAILQ_INSERT_BEFORE(tid, pthread, qe);
1428	}
1429	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1430}
1431
1432#endif
1433