thr_mutex.c revision 173967
1/*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 173967 2007-11-27 03:16:44Z jasone $
30 */
31#include <stdlib.h>
32#include <errno.h>
33#include <string.h>
34#include <sys/param.h>
35#include <sys/queue.h>
36#include <pthread.h>
37#include "thr_private.h"
38
39#if defined(_PTHREADS_INVARIANTS)
40#define MUTEX_INIT_LINK(m) 		do {		\
41	(m)->m_qe.tqe_prev = NULL;			\
42	(m)->m_qe.tqe_next = NULL;			\
43} while (0)
44#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
45	if ((m)->m_qe.tqe_prev == NULL)			\
46		PANIC("mutex is not on list");		\
47} while (0)
48#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
49	if (((m)->m_qe.tqe_prev != NULL) ||		\
50	    ((m)->m_qe.tqe_next != NULL))		\
51		PANIC("mutex is on list");		\
52} while (0)
53#define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
54	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
55	    "thread in syncq when it shouldn't be.");	\
56} while (0);
57#else
58#define MUTEX_INIT_LINK(m)
59#define MUTEX_ASSERT_IS_OWNED(m)
60#define MUTEX_ASSERT_NOT_OWNED(m)
61#define	THR_ASSERT_NOT_IN_SYNCQ(thr)
62#endif
63
64#define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
65#define	MUTEX_DESTROY(m) do {		\
66	_lock_destroy(&(m)->m_lock);	\
67	free(m);			\
68} while (0)
69
70
71/*
72 * Prototypes
73 */
74static struct kse_mailbox *mutex_handoff(struct pthread *,
75			    struct pthread_mutex *);
76static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
77static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t);
78static int		mutex_unlock_common(pthread_mutex_t *, int);
79static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
80static void		mutex_rescan_owned (struct pthread *, struct pthread *,
81			    struct pthread_mutex *);
82static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
83static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
84static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
85static void		mutex_lock_backout(void *arg);
86
87static struct pthread_mutex_attr	static_mutex_attr =
88    PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
89static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
90
91LT10_COMPAT_PRIVATE(__pthread_mutex_init);
92LT10_COMPAT_PRIVATE(_pthread_mutex_init);
93LT10_COMPAT_DEFAULT(pthread_mutex_init);
94LT10_COMPAT_PRIVATE(__pthread_mutex_lock);
95LT10_COMPAT_PRIVATE(_pthread_mutex_lock);
96LT10_COMPAT_DEFAULT(pthread_mutex_lock);
97LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock);
98LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock);
99LT10_COMPAT_DEFAULT(pthread_mutex_timedlock);
100LT10_COMPAT_PRIVATE(__pthread_mutex_trylock);
101LT10_COMPAT_PRIVATE(_pthread_mutex_trylock);
102LT10_COMPAT_DEFAULT(pthread_mutex_trylock);
103LT10_COMPAT_PRIVATE(_pthread_mutex_destroy);
104LT10_COMPAT_DEFAULT(pthread_mutex_destroy);
105LT10_COMPAT_PRIVATE(_pthread_mutex_unlock);
106LT10_COMPAT_DEFAULT(pthread_mutex_unlock);
107
108/* Single underscore versions provided for libc internal usage: */
109__weak_reference(__pthread_mutex_init, pthread_mutex_init);
110__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
111__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
112__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
113
114/* No difference between libc and application usage of these: */
115__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
116__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
117
118static int
119thr_mutex_init(pthread_mutex_t *mutex,
120    const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
121{
122	struct pthread_mutex *pmutex;
123	enum pthread_mutextype type;
124	int		protocol;
125	int		ceiling;
126	int		flags;
127	int		ret = 0;
128
129	if (mutex == NULL)
130		ret = EINVAL;
131
132	/* Check if default mutex attributes: */
133	else if (mutex_attr == NULL || *mutex_attr == NULL) {
134		/* Default to a (error checking) POSIX mutex: */
135		type = PTHREAD_MUTEX_ERRORCHECK;
136		protocol = PTHREAD_PRIO_NONE;
137		ceiling = THR_MAX_PRIORITY;
138		flags = 0;
139	}
140
141	/* Check mutex type: */
142	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
143	    ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
144		/* Return an invalid argument error: */
145		ret = EINVAL;
146
147	/* Check mutex protocol: */
148	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
149	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
150		/* Return an invalid argument error: */
151		ret = EINVAL;
152
153	else {
154		/* Use the requested mutex type and protocol: */
155		type = (*mutex_attr)->m_type;
156		protocol = (*mutex_attr)->m_protocol;
157		ceiling = (*mutex_attr)->m_ceiling;
158		flags = (*mutex_attr)->m_flags;
159	}
160
161	/* Check no errors so far: */
162	if (ret == 0) {
163		if ((pmutex = (pthread_mutex_t)
164		    calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
165			ret = ENOMEM;
166		else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
167		    _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
168			free(pmutex);
169			*mutex = NULL;
170			ret = ENOMEM;
171		} else {
172			/* Set the mutex flags: */
173			pmutex->m_flags = flags;
174
175			/* Process according to mutex type: */
176			switch (type) {
177			/* case PTHREAD_MUTEX_DEFAULT: */
178			case PTHREAD_MUTEX_ERRORCHECK:
179			case PTHREAD_MUTEX_NORMAL:
180			case PTHREAD_MUTEX_ADAPTIVE_NP:
181				/* Nothing to do here. */
182				break;
183
184			/* Single UNIX Spec 2 recursive mutex: */
185			case PTHREAD_MUTEX_RECURSIVE:
186				/* Reset the mutex count: */
187				pmutex->m_count = 0;
188				break;
189
190			/* Trap invalid mutex types: */
191			default:
192				/* Return an invalid argument error: */
193				ret = EINVAL;
194				break;
195			}
196			if (ret == 0) {
197				/* Initialise the rest of the mutex: */
198				TAILQ_INIT(&pmutex->m_queue);
199				pmutex->m_flags |= MUTEX_FLAGS_INITED;
200				pmutex->m_owner = NULL;
201				pmutex->m_type = type;
202				pmutex->m_protocol = protocol;
203				pmutex->m_refcount = 0;
204				if (protocol == PTHREAD_PRIO_PROTECT)
205					pmutex->m_prio = ceiling;
206				else
207					pmutex->m_prio = -1;
208				pmutex->m_saved_prio = 0;
209				MUTEX_INIT_LINK(pmutex);
210				*mutex = pmutex;
211			} else {
212				/* Free the mutex lock structure: */
213				MUTEX_DESTROY(pmutex);
214				*mutex = NULL;
215			}
216		}
217	}
218	/* Return the completion status: */
219	return (ret);
220}
221
222int
223__pthread_mutex_init(pthread_mutex_t *mutex,
224    const pthread_mutexattr_t *mutex_attr)
225{
226
227	return (thr_mutex_init(mutex, mutex_attr, calloc));
228}
229
230int
231_pthread_mutex_init(pthread_mutex_t *mutex,
232    const pthread_mutexattr_t *mutex_attr)
233{
234	struct pthread_mutex_attr mattr, *mattrp;
235
236	if ((mutex_attr == NULL) || (*mutex_attr == NULL))
237		return (__pthread_mutex_init(mutex, &static_mattr));
238	else {
239		mattr = **mutex_attr;
240		mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
241		mattrp = &mattr;
242		return (__pthread_mutex_init(mutex, &mattrp));
243	}
244}
245
246/* This function is used internally by malloc. */
247int
248_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
249    void *(calloc_cb)(size_t, size_t))
250{
251	static const struct pthread_mutex_attr attr = {
252		.m_type = PTHREAD_MUTEX_NORMAL,
253		.m_protocol = PTHREAD_PRIO_NONE,
254		.m_ceiling = 0,
255		.m_flags = 0
256	};
257
258	return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&attr,
259	    calloc_cb));
260}
261
262void
263_thr_mutex_reinit(pthread_mutex_t *mutex)
264{
265	_lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
266	    _thr_lock_wait, _thr_lock_wakeup);
267	TAILQ_INIT(&(*mutex)->m_queue);
268	(*mutex)->m_owner = NULL;
269	(*mutex)->m_count = 0;
270	(*mutex)->m_refcount = 0;
271	(*mutex)->m_prio = 0;
272	(*mutex)->m_saved_prio = 0;
273}
274
275int
276_pthread_mutex_destroy(pthread_mutex_t *mutex)
277{
278	struct pthread	*curthread = _get_curthread();
279	pthread_mutex_t m;
280	int ret = 0;
281
282	if (mutex == NULL || *mutex == NULL)
283		ret = EINVAL;
284	else {
285		/* Lock the mutex structure: */
286		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
287
288		/*
289		 * Check to see if this mutex is in use:
290		 */
291		if (((*mutex)->m_owner != NULL) ||
292		    (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
293		    ((*mutex)->m_refcount != 0)) {
294			ret = EBUSY;
295
296			/* Unlock the mutex structure: */
297			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
298		} else {
299			/*
300			 * Save a pointer to the mutex so it can be free'd
301			 * and set the caller's pointer to NULL:
302			 */
303			m = *mutex;
304			*mutex = NULL;
305
306			/* Unlock the mutex structure: */
307			THR_LOCK_RELEASE(curthread, &m->m_lock);
308
309			/*
310			 * Free the memory allocated for the mutex
311			 * structure:
312			 */
313			MUTEX_ASSERT_NOT_OWNED(m);
314			MUTEX_DESTROY(m);
315		}
316	}
317
318	/* Return the completion status: */
319	return (ret);
320}
321
322static int
323init_static(struct pthread *thread, pthread_mutex_t *mutex)
324{
325	int ret;
326
327	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
328
329	if (*mutex == NULL)
330		ret = pthread_mutex_init(mutex, NULL);
331	else
332		ret = 0;
333
334	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
335
336	return (ret);
337}
338
339static int
340init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
341{
342	int ret;
343
344	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
345
346	if (*mutex == NULL)
347		ret = pthread_mutex_init(mutex, &static_mattr);
348	else
349		ret = 0;
350
351	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
352
353	return (ret);
354}
355
356static int
357mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
358{
359	int private;
360	int ret = 0;
361
362	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
363	    "Uninitialized mutex in pthread_mutex_trylock_basic");
364
365	/* Lock the mutex structure: */
366	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
367	private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
368
369	/*
370	 * If the mutex was statically allocated, properly
371	 * initialize the tail queue.
372	 */
373	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
374		TAILQ_INIT(&(*mutex)->m_queue);
375		MUTEX_INIT_LINK(*mutex);
376		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
377	}
378
379	/* Process according to mutex type: */
380	switch ((*mutex)->m_protocol) {
381	/* Default POSIX mutex: */
382	case PTHREAD_PRIO_NONE:
383		/* Check if this mutex is not locked: */
384		if ((*mutex)->m_owner == NULL) {
385			/* Lock the mutex for the running thread: */
386			(*mutex)->m_owner = curthread;
387
388			/* Add to the list of owned mutexes: */
389			MUTEX_ASSERT_NOT_OWNED(*mutex);
390			TAILQ_INSERT_TAIL(&curthread->mutexq,
391			    (*mutex), m_qe);
392		} else if ((*mutex)->m_owner == curthread)
393			ret = mutex_self_trylock(curthread, *mutex);
394		else
395			/* Return a busy error: */
396			ret = EBUSY;
397		break;
398
399	/* POSIX priority inheritence mutex: */
400	case PTHREAD_PRIO_INHERIT:
401		/* Check if this mutex is not locked: */
402		if ((*mutex)->m_owner == NULL) {
403			/* Lock the mutex for the running thread: */
404			(*mutex)->m_owner = curthread;
405
406			THR_SCHED_LOCK(curthread, curthread);
407			/* Track number of priority mutexes owned: */
408			curthread->priority_mutex_count++;
409
410			/*
411			 * The mutex takes on the attributes of the
412			 * running thread when there are no waiters.
413			 */
414			(*mutex)->m_prio = curthread->active_priority;
415			(*mutex)->m_saved_prio =
416			    curthread->inherited_priority;
417			curthread->inherited_priority = (*mutex)->m_prio;
418			THR_SCHED_UNLOCK(curthread, curthread);
419
420			/* Add to the list of owned mutexes: */
421			MUTEX_ASSERT_NOT_OWNED(*mutex);
422			TAILQ_INSERT_TAIL(&curthread->mutexq,
423			    (*mutex), m_qe);
424		} else if ((*mutex)->m_owner == curthread)
425			ret = mutex_self_trylock(curthread, *mutex);
426		else
427			/* Return a busy error: */
428			ret = EBUSY;
429		break;
430
431	/* POSIX priority protection mutex: */
432	case PTHREAD_PRIO_PROTECT:
433		/* Check for a priority ceiling violation: */
434		if (curthread->active_priority > (*mutex)->m_prio)
435			ret = EINVAL;
436
437		/* Check if this mutex is not locked: */
438		else if ((*mutex)->m_owner == NULL) {
439			/* Lock the mutex for the running thread: */
440			(*mutex)->m_owner = curthread;
441
442			THR_SCHED_LOCK(curthread, curthread);
443			/* Track number of priority mutexes owned: */
444			curthread->priority_mutex_count++;
445
446			/*
447			 * The running thread inherits the ceiling
448			 * priority of the mutex and executes at that
449			 * priority.
450			 */
451			curthread->active_priority = (*mutex)->m_prio;
452			(*mutex)->m_saved_prio =
453			    curthread->inherited_priority;
454			curthread->inherited_priority =
455			    (*mutex)->m_prio;
456			THR_SCHED_UNLOCK(curthread, curthread);
457			/* Add to the list of owned mutexes: */
458			MUTEX_ASSERT_NOT_OWNED(*mutex);
459			TAILQ_INSERT_TAIL(&curthread->mutexq,
460			    (*mutex), m_qe);
461		} else if ((*mutex)->m_owner == curthread)
462			ret = mutex_self_trylock(curthread, *mutex);
463		else
464			/* Return a busy error: */
465			ret = EBUSY;
466		break;
467
468	/* Trap invalid mutex types: */
469	default:
470		/* Return an invalid argument error: */
471		ret = EINVAL;
472		break;
473	}
474
475	if (ret == 0 && private)
476		THR_CRITICAL_ENTER(curthread);
477
478	/* Unlock the mutex structure: */
479	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
480
481	/* Return the completion status: */
482	return (ret);
483}
484
485int
486__pthread_mutex_trylock(pthread_mutex_t *mutex)
487{
488	struct pthread *curthread = _get_curthread();
489	int ret = 0;
490
491	if (mutex == NULL)
492		ret = EINVAL;
493
494	/*
495	 * If the mutex is statically initialized, perform the dynamic
496	 * initialization:
497	 */
498	else if ((*mutex != NULL) ||
499	    ((ret = init_static(curthread, mutex)) == 0))
500		ret = mutex_trylock_common(curthread, mutex);
501
502	return (ret);
503}
504
505int
506_pthread_mutex_trylock(pthread_mutex_t *mutex)
507{
508	struct pthread	*curthread = _get_curthread();
509	int	ret = 0;
510
511	if (mutex == NULL)
512		ret = EINVAL;
513
514	/*
515	 * If the mutex is statically initialized, perform the dynamic
516	 * initialization marking the mutex private (delete safe):
517	 */
518	else if ((*mutex != NULL) ||
519	    ((ret = init_static_private(curthread, mutex)) == 0))
520		ret = mutex_trylock_common(curthread, mutex);
521
522	return (ret);
523}
524
525static int
526mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
527	const struct timespec * abstime)
528{
529	int	private;
530	int	ret = 0;
531
532	THR_ASSERT((m != NULL) && (*m != NULL),
533	    "Uninitialized mutex in pthread_mutex_trylock_basic");
534
535	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
536	    abstime->tv_nsec >= 1000000000))
537		return (EINVAL);
538
539	/* Reset the interrupted flag: */
540	curthread->interrupted = 0;
541	curthread->timeout = 0;
542	curthread->wakeup_time.tv_sec = -1;
543
544	private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
545
546	/*
547	 * Enter a loop waiting to become the mutex owner.  We need a
548	 * loop in case the waiting thread is interrupted by a signal
549	 * to execute a signal handler.  It is not (currently) possible
550	 * to remain in the waiting queue while running a handler.
551	 * Instead, the thread is interrupted and backed out of the
552	 * waiting queue prior to executing the signal handler.
553	 */
554	do {
555		/* Lock the mutex structure: */
556		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
557
558		/*
559		 * If the mutex was statically allocated, properly
560		 * initialize the tail queue.
561		 */
562		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
563			TAILQ_INIT(&(*m)->m_queue);
564			(*m)->m_flags |= MUTEX_FLAGS_INITED;
565			MUTEX_INIT_LINK(*m);
566		}
567
568		/* Process according to mutex type: */
569		switch ((*m)->m_protocol) {
570		/* Default POSIX mutex: */
571		case PTHREAD_PRIO_NONE:
572			if ((*m)->m_owner == NULL) {
573				/* Lock the mutex for this thread: */
574				(*m)->m_owner = curthread;
575
576				/* Add to the list of owned mutexes: */
577				MUTEX_ASSERT_NOT_OWNED(*m);
578				TAILQ_INSERT_TAIL(&curthread->mutexq,
579				    (*m), m_qe);
580				if (private)
581					THR_CRITICAL_ENTER(curthread);
582
583				/* Unlock the mutex structure: */
584				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
585			} else if ((*m)->m_owner == curthread) {
586				ret = mutex_self_lock(curthread, *m);
587
588				/* Unlock the mutex structure: */
589				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
590			} else {
591				/*
592				 * Join the queue of threads waiting to lock
593				 * the mutex and save a pointer to the mutex.
594				 */
595				mutex_queue_enq(*m, curthread);
596				curthread->data.mutex = *m;
597				curthread->sigbackout = mutex_lock_backout;
598				/*
599				 * This thread is active and is in a critical
600				 * region (holding the mutex lock); we should
601				 * be able to safely set the state.
602				 */
603				THR_SCHED_LOCK(curthread, curthread);
604				/* Set the wakeup time: */
605				if (abstime) {
606					curthread->wakeup_time.tv_sec =
607						abstime->tv_sec;
608					curthread->wakeup_time.tv_nsec =
609						abstime->tv_nsec;
610				}
611
612				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
613				THR_SCHED_UNLOCK(curthread, curthread);
614
615				/* Unlock the mutex structure: */
616				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
617
618				/* Schedule the next thread: */
619				_thr_sched_switch(curthread);
620
621				if (THR_IN_MUTEXQ(curthread)) {
622					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
623					mutex_queue_remove(*m, curthread);
624					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
625				}
626				/*
627				 * Only clear these after assuring the
628				 * thread is dequeued.
629				 */
630				curthread->data.mutex = NULL;
631				curthread->sigbackout = NULL;
632			}
633			break;
634
635		/* POSIX priority inheritence mutex: */
636		case PTHREAD_PRIO_INHERIT:
637			/* Check if this mutex is not locked: */
638			if ((*m)->m_owner == NULL) {
639				/* Lock the mutex for this thread: */
640				(*m)->m_owner = curthread;
641
642				THR_SCHED_LOCK(curthread, curthread);
643				/* Track number of priority mutexes owned: */
644				curthread->priority_mutex_count++;
645
646				/*
647				 * The mutex takes on attributes of the
648				 * running thread when there are no waiters.
649				 * Make sure the thread's scheduling lock is
650				 * held while priorities are adjusted.
651				 */
652				(*m)->m_prio = curthread->active_priority;
653				(*m)->m_saved_prio =
654				    curthread->inherited_priority;
655				curthread->inherited_priority = (*m)->m_prio;
656				THR_SCHED_UNLOCK(curthread, curthread);
657
658				/* Add to the list of owned mutexes: */
659				MUTEX_ASSERT_NOT_OWNED(*m);
660				TAILQ_INSERT_TAIL(&curthread->mutexq,
661				    (*m), m_qe);
662				if (private)
663					THR_CRITICAL_ENTER(curthread);
664
665				/* Unlock the mutex structure: */
666				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
667			} else if ((*m)->m_owner == curthread) {
668				ret = mutex_self_lock(curthread, *m);
669
670				/* Unlock the mutex structure: */
671				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
672			} else {
673				/*
674				 * Join the queue of threads waiting to lock
675				 * the mutex and save a pointer to the mutex.
676				 */
677				mutex_queue_enq(*m, curthread);
678				curthread->data.mutex = *m;
679				curthread->sigbackout = mutex_lock_backout;
680
681				/*
682				 * This thread is active and is in a critical
683				 * region (holding the mutex lock); we should
684				 * be able to safely set the state.
685				 */
686				if (curthread->active_priority > (*m)->m_prio)
687					/* Adjust priorities: */
688					mutex_priority_adjust(curthread, *m);
689
690				THR_SCHED_LOCK(curthread, curthread);
691				/* Set the wakeup time: */
692				if (abstime) {
693					curthread->wakeup_time.tv_sec =
694						abstime->tv_sec;
695					curthread->wakeup_time.tv_nsec =
696						abstime->tv_nsec;
697				}
698				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
699				THR_SCHED_UNLOCK(curthread, curthread);
700
701				/* Unlock the mutex structure: */
702				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
703
704				/* Schedule the next thread: */
705				_thr_sched_switch(curthread);
706
707				if (THR_IN_MUTEXQ(curthread)) {
708					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
709					mutex_queue_remove(*m, curthread);
710					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
711				}
712				/*
713				 * Only clear these after assuring the
714				 * thread is dequeued.
715				 */
716				curthread->data.mutex = NULL;
717				curthread->sigbackout = NULL;
718			}
719			break;
720
721		/* POSIX priority protection mutex: */
722		case PTHREAD_PRIO_PROTECT:
723			/* Check for a priority ceiling violation: */
724			if (curthread->active_priority > (*m)->m_prio) {
725				/* Unlock the mutex structure: */
726				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
727				ret = EINVAL;
728			}
729			/* Check if this mutex is not locked: */
730			else if ((*m)->m_owner == NULL) {
731				/*
732				 * Lock the mutex for the running
733				 * thread:
734				 */
735				(*m)->m_owner = curthread;
736
737				THR_SCHED_LOCK(curthread, curthread);
738				/* Track number of priority mutexes owned: */
739				curthread->priority_mutex_count++;
740
741				/*
742				 * The running thread inherits the ceiling
743				 * priority of the mutex and executes at that
744				 * priority.  Make sure the thread's
745				 * scheduling lock is held while priorities
746				 * are adjusted.
747				 */
748				curthread->active_priority = (*m)->m_prio;
749				(*m)->m_saved_prio =
750				    curthread->inherited_priority;
751				curthread->inherited_priority = (*m)->m_prio;
752				THR_SCHED_UNLOCK(curthread, curthread);
753
754				/* Add to the list of owned mutexes: */
755				MUTEX_ASSERT_NOT_OWNED(*m);
756				TAILQ_INSERT_TAIL(&curthread->mutexq,
757				    (*m), m_qe);
758				if (private)
759					THR_CRITICAL_ENTER(curthread);
760
761				/* Unlock the mutex structure: */
762				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
763			} else if ((*m)->m_owner == curthread) {
764				ret = mutex_self_lock(curthread, *m);
765
766				/* Unlock the mutex structure: */
767				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
768			} else {
769				/*
770				 * Join the queue of threads waiting to lock
771				 * the mutex and save a pointer to the mutex.
772				 */
773				mutex_queue_enq(*m, curthread);
774				curthread->data.mutex = *m;
775				curthread->sigbackout = mutex_lock_backout;
776
777				/* Clear any previous error: */
778				curthread->error = 0;
779
780				/*
781				 * This thread is active and is in a critical
782				 * region (holding the mutex lock); we should
783				 * be able to safely set the state.
784				 */
785
786				THR_SCHED_LOCK(curthread, curthread);
787				/* Set the wakeup time: */
788				if (abstime) {
789					curthread->wakeup_time.tv_sec =
790						abstime->tv_sec;
791					curthread->wakeup_time.tv_nsec =
792						abstime->tv_nsec;
793				}
794				THR_SET_STATE(curthread, PS_MUTEX_WAIT);
795				THR_SCHED_UNLOCK(curthread, curthread);
796
797				/* Unlock the mutex structure: */
798				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
799
800				/* Schedule the next thread: */
801				_thr_sched_switch(curthread);
802
803				if (THR_IN_MUTEXQ(curthread)) {
804					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
805					mutex_queue_remove(*m, curthread);
806					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
807				}
808				/*
809				 * Only clear these after assuring the
810				 * thread is dequeued.
811				 */
812				curthread->data.mutex = NULL;
813				curthread->sigbackout = NULL;
814
815				/*
816				 * The threads priority may have changed while
817				 * waiting for the mutex causing a ceiling
818				 * violation.
819				 */
820				ret = curthread->error;
821				curthread->error = 0;
822			}
823			break;
824
825		/* Trap invalid mutex types: */
826		default:
827			/* Unlock the mutex structure: */
828			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
829
830			/* Return an invalid argument error: */
831			ret = EINVAL;
832			break;
833		}
834
835	} while (((*m)->m_owner != curthread) && (ret == 0) &&
836	    (curthread->interrupted == 0) && (curthread->timeout == 0));
837
838	if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
839		ret = ETIMEDOUT;
840
841	/*
842	 * Check to see if this thread was interrupted and
843	 * is still in the mutex queue of waiting threads:
844	 */
845	if (curthread->interrupted != 0) {
846		/* Remove this thread from the mutex queue. */
847		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
848		if (THR_IN_SYNCQ(curthread))
849			mutex_queue_remove(*m, curthread);
850		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
851
852		/* Check for asynchronous cancellation. */
853		if (curthread->continuation != NULL)
854			curthread->continuation((void *) curthread);
855	}
856
857	/* Return the completion status: */
858	return (ret);
859}
860
861int
862__pthread_mutex_lock(pthread_mutex_t *m)
863{
864	struct pthread *curthread;
865	int	ret = 0;
866
867	if (_thr_initial == NULL)
868		_libpthread_init(NULL);
869
870	curthread = _get_curthread();
871	if (m == NULL)
872		ret = EINVAL;
873
874	/*
875	 * If the mutex is statically initialized, perform the dynamic
876	 * initialization:
877	 */
878	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
879		ret = mutex_lock_common(curthread, m, NULL);
880
881	return (ret);
882}
883
884__strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
885
886int
887_pthread_mutex_lock(pthread_mutex_t *m)
888{
889	struct pthread *curthread;
890	int	ret = 0;
891
892	if (_thr_initial == NULL)
893		_libpthread_init(NULL);
894	curthread = _get_curthread();
895
896	if (m == NULL)
897		ret = EINVAL;
898
899	/*
900	 * If the mutex is statically initialized, perform the dynamic
901	 * initialization marking it private (delete safe):
902	 */
903	else if ((*m != NULL) ||
904	    ((ret = init_static_private(curthread, m)) == 0))
905		ret = mutex_lock_common(curthread, m, NULL);
906
907	return (ret);
908}
909
910int
911__pthread_mutex_timedlock(pthread_mutex_t *m,
912	const struct timespec *abs_timeout)
913{
914	struct pthread *curthread;
915	int	ret = 0;
916
917	if (_thr_initial == NULL)
918		_libpthread_init(NULL);
919
920	curthread = _get_curthread();
921	if (m == NULL)
922		ret = EINVAL;
923
924	/*
925	 * If the mutex is statically initialized, perform the dynamic
926	 * initialization:
927	 */
928	else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
929		ret = mutex_lock_common(curthread, m, abs_timeout);
930
931	return (ret);
932}
933
934int
935_pthread_mutex_timedlock(pthread_mutex_t *m,
936	const struct timespec *abs_timeout)
937{
938	struct pthread *curthread;
939	int	ret = 0;
940
941	if (_thr_initial == NULL)
942		_libpthread_init(NULL);
943	curthread = _get_curthread();
944
945	if (m == NULL)
946		ret = EINVAL;
947
948	/*
949	 * If the mutex is statically initialized, perform the dynamic
950	 * initialization marking it private (delete safe):
951	 */
952	else if ((*m != NULL) ||
953	    ((ret = init_static_private(curthread, m)) == 0))
954		ret = mutex_lock_common(curthread, m, abs_timeout);
955
956	return (ret);
957}
958
959int
960_pthread_mutex_unlock(pthread_mutex_t *m)
961{
962	return (mutex_unlock_common(m, /* add reference */ 0));
963}
964
965__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
966
967int
968_mutex_cv_unlock(pthread_mutex_t *m)
969{
970	return (mutex_unlock_common(m, /* add reference */ 1));
971}
972
973int
974_mutex_cv_lock(pthread_mutex_t *m)
975{
976	struct  pthread *curthread;
977	int	ret;
978
979	curthread = _get_curthread();
980	if ((ret = _pthread_mutex_lock(m)) == 0) {
981		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
982		(*m)->m_refcount--;
983		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
984	}
985	return (ret);
986}
987
988static inline int
989mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
990{
991	int	ret = 0;
992
993	switch (m->m_type) {
994	/* case PTHREAD_MUTEX_DEFAULT: */
995	case PTHREAD_MUTEX_ERRORCHECK:
996	case PTHREAD_MUTEX_NORMAL:
997	case PTHREAD_MUTEX_ADAPTIVE_NP:
998		ret = EBUSY;
999		break;
1000
1001	case PTHREAD_MUTEX_RECURSIVE:
1002		/* Increment the lock count: */
1003		m->m_count++;
1004		break;
1005
1006	default:
1007		/* Trap invalid mutex types; */
1008		ret = EINVAL;
1009	}
1010
1011	return (ret);
1012}
1013
1014static inline int
1015mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1016{
1017	int ret = 0;
1018
1019	/*
1020	 * Don't allow evil recursive mutexes for private use
1021	 * in libc and libpthread.
1022	 */
1023	if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1024		PANIC("Recurse on a private mutex.");
1025
1026	switch (m->m_type) {
1027	/* case PTHREAD_MUTEX_DEFAULT: */
1028	case PTHREAD_MUTEX_ERRORCHECK:
1029	case PTHREAD_MUTEX_ADAPTIVE_NP:
1030		/*
1031		 * POSIX specifies that mutexes should return EDEADLK if a
1032		 * recursive lock is detected.
1033		 */
1034		ret = EDEADLK;
1035		break;
1036
1037	case PTHREAD_MUTEX_NORMAL:
1038		/*
1039		 * What SS2 define as a 'normal' mutex.  Intentionally
1040		 * deadlock on attempts to get a lock you already own.
1041		 */
1042
1043		THR_SCHED_LOCK(curthread, curthread);
1044		THR_SET_STATE(curthread, PS_DEADLOCK);
1045		THR_SCHED_UNLOCK(curthread, curthread);
1046
1047		/* Unlock the mutex structure: */
1048		THR_LOCK_RELEASE(curthread, &m->m_lock);
1049
1050		/* Schedule the next thread: */
1051		_thr_sched_switch(curthread);
1052		break;
1053
1054	case PTHREAD_MUTEX_RECURSIVE:
1055		/* Increment the lock count: */
1056		m->m_count++;
1057		break;
1058
1059	default:
1060		/* Trap invalid mutex types; */
1061		ret = EINVAL;
1062	}
1063
1064	return (ret);
1065}
1066
1067static int
1068mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1069{
1070	struct pthread *curthread = _get_curthread();
1071	struct kse_mailbox *kmbx = NULL;
1072	int ret = 0;
1073
1074	if (m == NULL || *m == NULL)
1075		ret = EINVAL;
1076	else {
1077		/* Lock the mutex structure: */
1078		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1079
1080		/* Process according to mutex type: */
1081		switch ((*m)->m_protocol) {
1082		/* Default POSIX mutex: */
1083		case PTHREAD_PRIO_NONE:
1084			/*
1085			 * Check if the running thread is not the owner of the
1086			 * mutex:
1087			 */
1088			if ((*m)->m_owner != curthread)
1089				ret = EPERM;
1090			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1091			    ((*m)->m_count > 0))
1092				/* Decrement the count: */
1093				(*m)->m_count--;
1094			else {
1095				/*
1096				 * Clear the count in case this is a recursive
1097				 * mutex.
1098				 */
1099				(*m)->m_count = 0;
1100
1101				/* Remove the mutex from the threads queue. */
1102				MUTEX_ASSERT_IS_OWNED(*m);
1103				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1104				    (*m), m_qe);
1105				MUTEX_INIT_LINK(*m);
1106
1107				/*
1108				 * Hand off the mutex to the next waiting
1109				 * thread:
1110				 */
1111				kmbx = mutex_handoff(curthread, *m);
1112			}
1113			break;
1114
1115		/* POSIX priority inheritence mutex: */
1116		case PTHREAD_PRIO_INHERIT:
1117			/*
1118			 * Check if the running thread is not the owner of the
1119			 * mutex:
1120			 */
1121			if ((*m)->m_owner != curthread)
1122				ret = EPERM;
1123			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1124			    ((*m)->m_count > 0))
1125				/* Decrement the count: */
1126				(*m)->m_count--;
1127			else {
1128				/*
1129				 * Clear the count in case this is recursive
1130				 * mutex.
1131				 */
1132				(*m)->m_count = 0;
1133
1134				/*
1135				 * Restore the threads inherited priority and
1136				 * recompute the active priority (being careful
1137				 * not to override changes in the threads base
1138				 * priority subsequent to locking the mutex).
1139				 */
1140				THR_SCHED_LOCK(curthread, curthread);
1141				curthread->inherited_priority =
1142					(*m)->m_saved_prio;
1143				curthread->active_priority =
1144				    MAX(curthread->inherited_priority,
1145				    curthread->base_priority);
1146
1147				/*
1148				 * This thread now owns one less priority mutex.
1149				 */
1150				curthread->priority_mutex_count--;
1151				THR_SCHED_UNLOCK(curthread, curthread);
1152
1153				/* Remove the mutex from the threads queue. */
1154				MUTEX_ASSERT_IS_OWNED(*m);
1155				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1156				    (*m), m_qe);
1157				MUTEX_INIT_LINK(*m);
1158
1159				/*
1160				 * Hand off the mutex to the next waiting
1161				 * thread:
1162				 */
1163				kmbx = mutex_handoff(curthread, *m);
1164			}
1165			break;
1166
1167		/* POSIX priority ceiling mutex: */
1168		case PTHREAD_PRIO_PROTECT:
1169			/*
1170			 * Check if the running thread is not the owner of the
1171			 * mutex:
1172			 */
1173			if ((*m)->m_owner != curthread)
1174				ret = EPERM;
1175			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1176			    ((*m)->m_count > 0))
1177				/* Decrement the count: */
1178				(*m)->m_count--;
1179			else {
1180				/*
1181				 * Clear the count in case this is a recursive
1182				 * mutex.
1183				 */
1184				(*m)->m_count = 0;
1185
1186				/*
1187				 * Restore the threads inherited priority and
1188				 * recompute the active priority (being careful
1189				 * not to override changes in the threads base
1190				 * priority subsequent to locking the mutex).
1191				 */
1192				THR_SCHED_LOCK(curthread, curthread);
1193				curthread->inherited_priority =
1194					(*m)->m_saved_prio;
1195				curthread->active_priority =
1196				    MAX(curthread->inherited_priority,
1197				    curthread->base_priority);
1198
1199				/*
1200				 * This thread now owns one less priority mutex.
1201				 */
1202				curthread->priority_mutex_count--;
1203				THR_SCHED_UNLOCK(curthread, curthread);
1204
1205				/* Remove the mutex from the threads queue. */
1206				MUTEX_ASSERT_IS_OWNED(*m);
1207				TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1208				    (*m), m_qe);
1209				MUTEX_INIT_LINK(*m);
1210
1211				/*
1212				 * Hand off the mutex to the next waiting
1213				 * thread:
1214				 */
1215				kmbx = mutex_handoff(curthread, *m);
1216			}
1217			break;
1218
1219		/* Trap invalid mutex types: */
1220		default:
1221			/* Return an invalid argument error: */
1222			ret = EINVAL;
1223			break;
1224		}
1225
1226		if ((ret == 0) && (add_reference != 0))
1227			/* Increment the reference count: */
1228			(*m)->m_refcount++;
1229
1230		/* Leave the critical region if this is a private mutex. */
1231		if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1232			THR_CRITICAL_LEAVE(curthread);
1233
1234		/* Unlock the mutex structure: */
1235		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1236
1237		if (kmbx != NULL)
1238			kse_wakeup(kmbx);
1239	}
1240
1241	/* Return the completion status: */
1242	return (ret);
1243}
1244
1245
1246/*
1247 * This function is called when a change in base priority occurs for
1248 * a thread that is holding or waiting for a priority protection or
1249 * inheritence mutex.  A change in a threads base priority can effect
1250 * changes to active priorities of other threads and to the ordering
1251 * of mutex locking by waiting threads.
1252 *
1253 * This must be called without the target thread's scheduling lock held.
1254 */
1255void
1256_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1257    int propagate_prio)
1258{
1259	struct pthread_mutex *m;
1260
1261	/* Adjust the priorites of any owned priority mutexes: */
1262	if (pthread->priority_mutex_count > 0) {
1263		/*
1264		 * Rescan the mutexes owned by this thread and correct
1265		 * their priorities to account for this threads change
1266		 * in priority.  This has the side effect of changing
1267		 * the threads active priority.
1268		 *
1269		 * Be sure to lock the first mutex in the list of owned
1270		 * mutexes.  This acts as a barrier against another
1271		 * simultaneous call to change the threads priority
1272		 * and from the owning thread releasing the mutex.
1273		 */
1274		m = TAILQ_FIRST(&pthread->mutexq);
1275		if (m != NULL) {
1276			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1277			/*
1278			 * Make sure the thread still owns the lock.
1279			 */
1280			if (m == TAILQ_FIRST(&pthread->mutexq))
1281				mutex_rescan_owned(curthread, pthread,
1282				    /* rescan all owned */ NULL);
1283			THR_LOCK_RELEASE(curthread, &m->m_lock);
1284		}
1285	}
1286
1287	/*
1288	 * If this thread is waiting on a priority inheritence mutex,
1289	 * check for priority adjustments.  A change in priority can
1290	 * also cause a ceiling violation(*) for a thread waiting on
1291	 * a priority protection mutex; we don't perform the check here
1292	 * as it is done in pthread_mutex_unlock.
1293	 *
1294	 * (*) It should be noted that a priority change to a thread
1295	 *     _after_ taking and owning a priority ceiling mutex
1296	 *     does not affect ownership of that mutex; the ceiling
1297	 *     priority is only checked before mutex ownership occurs.
1298	 */
1299	if (propagate_prio != 0) {
1300		/*
1301		 * Lock the thread's scheduling queue.  This is a bit
1302		 * convoluted; the "in synchronization queue flag" can
1303		 * only be cleared with both the thread's scheduling and
1304		 * mutex locks held.  The thread's pointer to the wanted
1305		 * mutex is guaranteed to be valid during this time.
1306		 */
1307		THR_SCHED_LOCK(curthread, pthread);
1308
1309		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1310		    ((m = pthread->data.mutex) == NULL))
1311			THR_SCHED_UNLOCK(curthread, pthread);
1312		else {
1313			/*
1314			 * This thread is currently waiting on a mutex; unlock
1315			 * the scheduling queue lock and lock the mutex.  We
1316			 * can't hold both at the same time because the locking
1317			 * order could cause a deadlock.
1318			 */
1319			THR_SCHED_UNLOCK(curthread, pthread);
1320			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1321
1322			/*
1323			 * Check to make sure this thread is still in the
1324			 * same state (the lock above can yield the CPU to
1325			 * another thread or the thread may be running on
1326			 * another CPU).
1327			 */
1328			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1329			    (pthread->data.mutex == m)) {
1330				/*
1331				 * Remove and reinsert this thread into
1332				 * the list of waiting threads to preserve
1333				 * decreasing priority order.
1334				 */
1335				mutex_queue_remove(m, pthread);
1336				mutex_queue_enq(m, pthread);
1337
1338				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1339					/* Adjust priorities: */
1340					mutex_priority_adjust(curthread, m);
1341			}
1342
1343			/* Unlock the mutex structure: */
1344			THR_LOCK_RELEASE(curthread, &m->m_lock);
1345		}
1346	}
1347}
1348
1349/*
1350 * Called when a new thread is added to the mutex waiting queue or
1351 * when a threads priority changes that is already in the mutex
1352 * waiting queue.
1353 *
1354 * This must be called with the mutex locked by the current thread.
1355 */
1356static void
1357mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1358{
1359	pthread_mutex_t	m = mutex;
1360	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1361	int		done, temp_prio;
1362
1363	/*
1364	 * Calculate the mutex priority as the maximum of the highest
1365	 * active priority of any waiting threads and the owning threads
1366	 * active priority(*).
1367	 *
1368	 * (*) Because the owning threads current active priority may
1369	 *     reflect priority inherited from this mutex (and the mutex
1370	 *     priority may have changed) we must recalculate the active
1371	 *     priority based on the threads saved inherited priority
1372	 *     and its base priority.
1373	 */
1374	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1375	temp_prio = MAX(pthread_next->active_priority,
1376	    MAX(m->m_saved_prio, pthread->base_priority));
1377
1378	/* See if this mutex really needs adjusting: */
1379	if (temp_prio == m->m_prio)
1380		/* No need to propagate the priority: */
1381		return;
1382
1383	/* Set new priority of the mutex: */
1384	m->m_prio = temp_prio;
1385
1386	/*
1387	 * Don't unlock the mutex passed in as an argument.  It is
1388	 * expected to be locked and unlocked by the caller.
1389	 */
1390	done = 1;
1391	do {
1392		/*
1393		 * Save the threads priority before rescanning the
1394		 * owned mutexes:
1395		 */
1396		temp_prio = pthread->active_priority;
1397
1398		/*
1399		 * Fix the priorities for all mutexes held by the owning
1400		 * thread since taking this mutex.  This also has a
1401		 * potential side-effect of changing the threads priority.
1402		 *
1403		 * At this point the mutex is locked by the current thread.
1404		 * The owning thread can't release the mutex until it is
1405		 * unlocked, so we should be able to safely walk its list
1406		 * of owned mutexes.
1407		 */
1408		mutex_rescan_owned(curthread, pthread, m);
1409
1410		/*
1411		 * If this isn't the first time through the loop,
1412		 * the current mutex needs to be unlocked.
1413		 */
1414		if (done == 0)
1415			THR_LOCK_RELEASE(curthread, &m->m_lock);
1416
1417		/* Assume we're done unless told otherwise: */
1418		done = 1;
1419
1420		/*
1421		 * If the thread is currently waiting on a mutex, check
1422		 * to see if the threads new priority has affected the
1423		 * priority of the mutex.
1424		 */
1425		if ((temp_prio != pthread->active_priority) &&
1426		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1427		    ((m = pthread->data.mutex) != NULL) &&
1428		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1429			/* Lock the mutex structure: */
1430			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1431
1432			/*
1433			 * Make sure the thread is still waiting on the
1434			 * mutex:
1435			 */
1436			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1437			    (m == pthread->data.mutex)) {
1438				/*
1439				 * The priority for this thread has changed.
1440				 * Remove and reinsert this thread into the
1441				 * list of waiting threads to preserve
1442				 * decreasing priority order.
1443				 */
1444				mutex_queue_remove(m, pthread);
1445				mutex_queue_enq(m, pthread);
1446
1447				/*
1448				 * Grab the waiting thread with highest
1449				 * priority:
1450				 */
1451				pthread_next = TAILQ_FIRST(&m->m_queue);
1452
1453				/*
1454				 * Calculate the mutex priority as the maximum
1455				 * of the highest active priority of any
1456				 * waiting threads and the owning threads
1457				 * active priority.
1458				 */
1459				temp_prio = MAX(pthread_next->active_priority,
1460				    MAX(m->m_saved_prio,
1461				    m->m_owner->base_priority));
1462
1463				if (temp_prio != m->m_prio) {
1464					/*
1465					 * The priority needs to be propagated
1466					 * to the mutex this thread is waiting
1467					 * on and up to the owner of that mutex.
1468					 */
1469					m->m_prio = temp_prio;
1470					pthread = m->m_owner;
1471
1472					/* We're not done yet: */
1473					done = 0;
1474				}
1475			}
1476			/* Only release the mutex if we're done: */
1477			if (done != 0)
1478				THR_LOCK_RELEASE(curthread, &m->m_lock);
1479		}
1480	} while (done == 0);
1481}
1482
1483static void
1484mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1485    struct pthread_mutex *mutex)
1486{
1487	struct pthread_mutex	*m;
1488	struct pthread		*pthread_next;
1489	int			active_prio, inherited_prio;
1490
1491	/*
1492	 * Start walking the mutexes the thread has taken since
1493	 * taking this mutex.
1494	 */
1495	if (mutex == NULL) {
1496		/*
1497		 * A null mutex means start at the beginning of the owned
1498		 * mutex list.
1499		 */
1500		m = TAILQ_FIRST(&pthread->mutexq);
1501
1502		/* There is no inherited priority yet. */
1503		inherited_prio = 0;
1504	} else {
1505		/*
1506		 * The caller wants to start after a specific mutex.  It
1507		 * is assumed that this mutex is a priority inheritence
1508		 * mutex and that its priority has been correctly
1509		 * calculated.
1510		 */
1511		m = TAILQ_NEXT(mutex, m_qe);
1512
1513		/* Start inheriting priority from the specified mutex. */
1514		inherited_prio = mutex->m_prio;
1515	}
1516	active_prio = MAX(inherited_prio, pthread->base_priority);
1517
1518	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1519		/*
1520		 * We only want to deal with priority inheritence
1521		 * mutexes.  This might be optimized by only placing
1522		 * priority inheritence mutexes into the owned mutex
1523		 * list, but it may prove to be useful having all
1524		 * owned mutexes in this list.  Consider a thread
1525		 * exiting while holding mutexes...
1526		 */
1527		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1528			/*
1529			 * Fix the owners saved (inherited) priority to
1530			 * reflect the priority of the previous mutex.
1531			 */
1532			m->m_saved_prio = inherited_prio;
1533
1534			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1535				/* Recalculate the priority of the mutex: */
1536				m->m_prio = MAX(active_prio,
1537				     pthread_next->active_priority);
1538			else
1539				m->m_prio = active_prio;
1540
1541			/* Recalculate new inherited and active priorities: */
1542			inherited_prio = m->m_prio;
1543			active_prio = MAX(m->m_prio, pthread->base_priority);
1544		}
1545	}
1546
1547	/*
1548	 * Fix the threads inherited priority and recalculate its
1549	 * active priority.
1550	 */
1551	pthread->inherited_priority = inherited_prio;
1552	active_prio = MAX(inherited_prio, pthread->base_priority);
1553
1554	if (active_prio != pthread->active_priority) {
1555		/* Lock the thread's scheduling queue: */
1556		THR_SCHED_LOCK(curthread, pthread);
1557
1558		if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1559			/*
1560			 * This thread is not in a run queue.  Just set
1561			 * its active priority.
1562			 */
1563			pthread->active_priority = active_prio;
1564		}
1565		else {
1566			/*
1567			 * This thread is in a run queue.  Remove it from
1568			 * the queue before changing its priority:
1569			 */
1570			THR_RUNQ_REMOVE(pthread);
1571
1572			/*
1573			 * POSIX states that if the priority is being
1574			 * lowered, the thread must be inserted at the
1575			 * head of the queue for its priority if it owns
1576			 * any priority protection or inheritence mutexes.
1577			 */
1578			if ((active_prio < pthread->active_priority) &&
1579			    (pthread->priority_mutex_count > 0)) {
1580				/* Set the new active priority. */
1581				pthread->active_priority = active_prio;
1582
1583				THR_RUNQ_INSERT_HEAD(pthread);
1584			} else {
1585				/* Set the new active priority. */
1586				pthread->active_priority = active_prio;
1587
1588				THR_RUNQ_INSERT_TAIL(pthread);
1589			}
1590		}
1591		THR_SCHED_UNLOCK(curthread, pthread);
1592	}
1593}
1594
1595void
1596_mutex_unlock_private(pthread_t pthread)
1597{
1598	struct pthread_mutex	*m, *m_next;
1599
1600	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1601		m_next = TAILQ_NEXT(m, m_qe);
1602		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1603			pthread_mutex_unlock(&m);
1604	}
1605}
1606
1607/*
1608 * This is called by the current thread when it wants to back out of a
1609 * mutex_lock in order to run a signal handler.
1610 */
1611static void
1612mutex_lock_backout(void *arg)
1613{
1614	struct pthread *curthread = (struct pthread *)arg;
1615	struct pthread_mutex *m;
1616
1617	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1618		/*
1619		 * Any other thread may clear the "in sync queue flag",
1620		 * but only the current thread can clear the pointer
1621		 * to the mutex.  So if the flag is set, we can
1622		 * guarantee that the pointer to the mutex is valid.
1623		 * The only problem may be if the mutex is destroyed
1624		 * out from under us, but that should be considered
1625		 * an application bug.
1626		 */
1627		m = curthread->data.mutex;
1628
1629		/* Lock the mutex structure: */
1630		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1631
1632
1633		/*
1634		 * Check to make sure this thread doesn't already own
1635		 * the mutex.  Since mutexes are unlocked with direct
1636		 * handoffs, it is possible the previous owner gave it
1637		 * to us after we checked the sync queue flag and before
1638		 * we locked the mutex structure.
1639		 */
1640		if (m->m_owner == curthread) {
1641			THR_LOCK_RELEASE(curthread, &m->m_lock);
1642			mutex_unlock_common(&m, /* add_reference */ 0);
1643		} else {
1644			/*
1645			 * Remove ourselves from the mutex queue and
1646			 * clear the pointer to the mutex.  We may no
1647			 * longer be in the mutex queue, but the removal
1648			 * function will DTRT.
1649			 */
1650			mutex_queue_remove(m, curthread);
1651			curthread->data.mutex = NULL;
1652			THR_LOCK_RELEASE(curthread, &m->m_lock);
1653		}
1654	}
1655	/* No need to call this again. */
1656	curthread->sigbackout = NULL;
1657}
1658
1659/*
1660 * Dequeue a waiting thread from the head of a mutex queue in descending
1661 * priority order.
1662 *
1663 * In order to properly dequeue a thread from the mutex queue and
1664 * make it runnable without the possibility of errant wakeups, it
1665 * is necessary to lock the thread's scheduling queue while also
1666 * holding the mutex lock.
1667 */
1668static struct kse_mailbox *
1669mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1670{
1671	struct kse_mailbox *kmbx = NULL;
1672	struct pthread *pthread;
1673
1674	/* Keep dequeueing until we find a valid thread: */
1675	mutex->m_owner = NULL;
1676	pthread = TAILQ_FIRST(&mutex->m_queue);
1677	while (pthread != NULL) {
1678		/* Take the thread's scheduling lock: */
1679		THR_SCHED_LOCK(curthread, pthread);
1680
1681		/* Remove the thread from the mutex queue: */
1682		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1683		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1684
1685		/*
1686		 * Only exit the loop if the thread hasn't been
1687		 * cancelled.
1688		 */
1689		switch (mutex->m_protocol) {
1690		case PTHREAD_PRIO_NONE:
1691			/*
1692			 * Assign the new owner and add the mutex to the
1693			 * thread's list of owned mutexes.
1694			 */
1695			mutex->m_owner = pthread;
1696			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1697			break;
1698
1699		case PTHREAD_PRIO_INHERIT:
1700			/*
1701			 * Assign the new owner and add the mutex to the
1702			 * thread's list of owned mutexes.
1703			 */
1704			mutex->m_owner = pthread;
1705			TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1706
1707			/* Track number of priority mutexes owned: */
1708			pthread->priority_mutex_count++;
1709
1710			/*
1711			 * Set the priority of the mutex.  Since our waiting
1712			 * threads are in descending priority order, the
1713			 * priority of the mutex becomes the active priority
1714			 * of the thread we just dequeued.
1715			 */
1716			mutex->m_prio = pthread->active_priority;
1717
1718			/* Save the owning threads inherited priority: */
1719			mutex->m_saved_prio = pthread->inherited_priority;
1720
1721			/*
1722			 * The owning threads inherited priority now becomes
1723			 * his active priority (the priority of the mutex).
1724			 */
1725			pthread->inherited_priority = mutex->m_prio;
1726			break;
1727
1728		case PTHREAD_PRIO_PROTECT:
1729			if (pthread->active_priority > mutex->m_prio) {
1730				/*
1731				 * Either the mutex ceiling priority has
1732				 * been lowered and/or this threads priority
1733			 	 * has been raised subsequent to the thread
1734				 * being queued on the waiting list.
1735				 */
1736				pthread->error = EINVAL;
1737			}
1738			else {
1739				/*
1740				 * Assign the new owner and add the mutex
1741				 * to the thread's list of owned mutexes.
1742				 */
1743				mutex->m_owner = pthread;
1744				TAILQ_INSERT_TAIL(&pthread->mutexq,
1745				    mutex, m_qe);
1746
1747				/* Track number of priority mutexes owned: */
1748				pthread->priority_mutex_count++;
1749
1750				/*
1751				 * Save the owning threads inherited
1752				 * priority:
1753				 */
1754				mutex->m_saved_prio =
1755				    pthread->inherited_priority;
1756
1757				/*
1758				 * The owning thread inherits the ceiling
1759				 * priority of the mutex and executes at
1760				 * that priority:
1761				 */
1762				pthread->inherited_priority = mutex->m_prio;
1763				pthread->active_priority = mutex->m_prio;
1764
1765			}
1766			break;
1767		}
1768
1769		/* Make the thread runnable and unlock the scheduling queue: */
1770		kmbx = _thr_setrunnable_unlocked(pthread);
1771
1772		/* Add a preemption point. */
1773		if ((curthread->kseg == pthread->kseg) &&
1774		    (pthread->active_priority > curthread->active_priority))
1775			curthread->critical_yield = 1;
1776
1777		if (mutex->m_owner == pthread) {
1778			/* We're done; a valid owner was found. */
1779			if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1780				THR_CRITICAL_ENTER(pthread);
1781			THR_SCHED_UNLOCK(curthread, pthread);
1782			break;
1783		}
1784		THR_SCHED_UNLOCK(curthread, pthread);
1785		/* Get the next thread from the waiting queue: */
1786		pthread = TAILQ_NEXT(pthread, sqe);
1787	}
1788
1789	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1790		/* This mutex has no priority: */
1791		mutex->m_prio = 0;
1792	return (kmbx);
1793}
1794
1795/*
1796 * Dequeue a waiting thread from the head of a mutex queue in descending
1797 * priority order.
1798 */
1799static inline pthread_t
1800mutex_queue_deq(struct pthread_mutex *mutex)
1801{
1802	pthread_t pthread;
1803
1804	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1805		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1806		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1807
1808		/*
1809		 * Only exit the loop if the thread hasn't been
1810		 * cancelled.
1811		 */
1812		if (pthread->interrupted == 0)
1813			break;
1814	}
1815
1816	return (pthread);
1817}
1818
1819/*
1820 * Remove a waiting thread from a mutex queue in descending priority order.
1821 */
1822static inline void
1823mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1824{
1825	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1826		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1827		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1828	}
1829}
1830
1831/*
1832 * Enqueue a waiting thread to a queue in descending priority order.
1833 */
1834static inline void
1835mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1836{
1837	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1838
1839	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1840	/*
1841	 * For the common case of all threads having equal priority,
1842	 * we perform a quick check against the priority of the thread
1843	 * at the tail of the queue.
1844	 */
1845	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1846		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1847	else {
1848		tid = TAILQ_FIRST(&mutex->m_queue);
1849		while (pthread->active_priority <= tid->active_priority)
1850			tid = TAILQ_NEXT(tid, sqe);
1851		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1852	}
1853	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1854}
1855