1/*	$NetBSD: pthread_mutex.c,v 1.83 2022/04/10 10:38:33 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2001, 2003, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * To track threads waiting for mutexes to be released, we use lockless
34 * lists built on atomic operations and memory barriers.
35 *
36 * A simple spinlock would be faster and make the code easier to
37 * follow, but spinlocks are problematic in userspace.  If a thread is
38 * preempted by the kernel while holding a spinlock, any other thread
39 * attempting to acquire that spinlock will needlessly busy wait.
40 *
41 * There is no good way to know that the holding thread is no longer
42 * running, nor to request a wake-up once it has begun running again.
43 * Of more concern, threads in the SCHED_FIFO class do not have a
44 * limited time quantum and so could spin forever, preventing the
45 * thread holding the spinlock from getting CPU time: it would never
46 * be released.
47 */
48
49#include <sys/cdefs.h>
50__RCSID("$NetBSD: pthread_mutex.c,v 1.83 2022/04/10 10:38:33 riastradh Exp $");
51
52/* Need to use libc-private names for atomic operations. */
53#include "../../common/lib/libc/atomic/atomic_op_namespace.h"
54
55#include <sys/types.h>
56#include <sys/lwpctl.h>
57#include <sys/sched.h>
58#include <sys/lock.h>
59
60#include <errno.h>
61#include <limits.h>
62#include <stdlib.h>
63#include <time.h>
64#include <string.h>
65#include <stdio.h>
66
67#include "pthread.h"
68#include "pthread_int.h"
69#include "reentrant.h"
70
71#define	MUTEX_RECURSIVE_BIT		((uintptr_t)0x02)
72#define	MUTEX_PROTECT_BIT		((uintptr_t)0x08)
73#define	MUTEX_THREAD			((uintptr_t)~0x0f)
74
75#define	MUTEX_RECURSIVE(x)		((uintptr_t)(x) & MUTEX_RECURSIVE_BIT)
76#define	MUTEX_PROTECT(x)		((uintptr_t)(x) & MUTEX_PROTECT_BIT)
77#define	MUTEX_OWNER(x)			((uintptr_t)(x) & MUTEX_THREAD)
78
79#define	MUTEX_GET_TYPE(x)		\
80    ((int)(((uintptr_t)(x) & 0x000000ff) >> 0))
81#define	MUTEX_SET_TYPE(x, t) 		\
82    (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0))
83#define	MUTEX_GET_PROTOCOL(x)		\
84    ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8))
85#define	MUTEX_SET_PROTOCOL(x, p)	\
86    (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8))
87#define	MUTEX_GET_CEILING(x)		\
88    ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16))
89#define	MUTEX_SET_CEILING(x, c)	\
90    (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16))
91
92#if __GNUC_PREREQ__(3, 0)
93#define	NOINLINE		__attribute ((noinline))
94#else
95#define	NOINLINE		/* nothing */
96#endif
97
98struct waiter {
99	struct waiter	*volatile next;
100	lwpid_t		volatile lid;
101};
102
103static void	pthread__mutex_wakeup(pthread_t, struct pthread__waiter *);
104static int	pthread__mutex_lock_slow(pthread_mutex_t *,
105    const struct timespec *);
106static void	pthread__mutex_pause(void);
107
108int		_pthread_mutex_held_np(pthread_mutex_t *);
109pthread_t	_pthread_mutex_owner_np(pthread_mutex_t *);
110
111__weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
112__weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
113
114__strong_alias(__libc_mutex_init,pthread_mutex_init)
115__strong_alias(__libc_mutex_lock,pthread_mutex_lock)
116__strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
117__strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
118__strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
119
120__strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
121__strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
122__strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
123
124int
125pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr)
126{
127	uintptr_t type, proto, val, ceil;
128
129#if 0
130	/*
131	 * Always initialize the mutex structure, maybe be used later
132	 * and the cost should be minimal.
133	 */
134	if (__predict_false(__uselibcstub))
135		return __libc_mutex_init_stub(ptm, attr);
136#endif
137
138	pthread__error(EINVAL, "Invalid mutes attribute",
139	    attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
140
141	if (attr == NULL) {
142		type = PTHREAD_MUTEX_NORMAL;
143		proto = PTHREAD_PRIO_NONE;
144		ceil = 0;
145	} else {
146		val = (uintptr_t)attr->ptma_private;
147
148		type = MUTEX_GET_TYPE(val);
149		proto = MUTEX_GET_PROTOCOL(val);
150		ceil = MUTEX_GET_CEILING(val);
151	}
152	switch (type) {
153	case PTHREAD_MUTEX_ERRORCHECK:
154		__cpu_simple_lock_set(&ptm->ptm_errorcheck);
155		ptm->ptm_owner = NULL;
156		break;
157	case PTHREAD_MUTEX_RECURSIVE:
158		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
159		ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT;
160		break;
161	default:
162		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
163		ptm->ptm_owner = NULL;
164		break;
165	}
166	switch (proto) {
167	case PTHREAD_PRIO_PROTECT:
168		val = (uintptr_t)ptm->ptm_owner;
169		val |= MUTEX_PROTECT_BIT;
170		ptm->ptm_owner = (void *)val;
171		break;
172
173	}
174	ptm->ptm_magic = _PT_MUTEX_MAGIC;
175	ptm->ptm_waiters = NULL;
176	ptm->ptm_recursed = 0;
177	ptm->ptm_ceiling = (unsigned char)ceil;
178
179	return 0;
180}
181
182int
183pthread_mutex_destroy(pthread_mutex_t *ptm)
184{
185
186	if (__predict_false(__uselibcstub))
187		return __libc_mutex_destroy_stub(ptm);
188
189	pthread__error(EINVAL, "Invalid mutex",
190	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
191	pthread__error(EBUSY, "Destroying locked mutex",
192	    MUTEX_OWNER(ptm->ptm_owner) == 0);
193
194	ptm->ptm_magic = _PT_MUTEX_DEAD;
195	return 0;
196}
197
198int
199pthread_mutex_lock(pthread_mutex_t *ptm)
200{
201	pthread_t self;
202	void *val;
203
204	if (__predict_false(__uselibcstub))
205		return __libc_mutex_lock_stub(ptm);
206
207	pthread__error(EINVAL, "Invalid mutex",
208	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
209
210	self = pthread__self();
211	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
212	if (__predict_true(val == NULL)) {
213#ifndef PTHREAD__ATOMIC_IS_MEMBAR
214		membar_enter();
215#endif
216		return 0;
217	}
218	return pthread__mutex_lock_slow(ptm, NULL);
219}
220
221int
222pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts)
223{
224	pthread_t self;
225	void *val;
226
227	pthread__error(EINVAL, "Invalid mutex",
228	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
229
230	self = pthread__self();
231	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
232	if (__predict_true(val == NULL)) {
233#ifndef PTHREAD__ATOMIC_IS_MEMBAR
234		membar_enter();
235#endif
236		return 0;
237	}
238	return pthread__mutex_lock_slow(ptm, ts);
239}
240
241/* We want function call overhead. */
242NOINLINE static void
243pthread__mutex_pause(void)
244{
245
246	pthread__smt_pause();
247}
248
249/*
250 * Spin while the holder is running.  'lwpctl' gives us the true
251 * status of the thread.
252 */
253NOINLINE static void *
254pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
255{
256	pthread_t thread;
257	unsigned int count, i;
258
259	for (count = 2;; owner = ptm->ptm_owner) {
260		thread = (pthread_t)MUTEX_OWNER(owner);
261		if (thread == NULL)
262			break;
263		if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
264			break;
265		if (count < 128)
266			count += count;
267		for (i = count; i != 0; i--)
268			pthread__mutex_pause();
269	}
270
271	return owner;
272}
273
274NOINLINE static int
275pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
276{
277	void *newval, *owner, *next;
278	struct waiter waiter;
279	pthread_t self;
280	int serrno;
281	int error;
282
283	owner = ptm->ptm_owner;
284	self = pthread__self();
285	serrno = errno;
286
287	pthread__assert(self->pt_lid != 0);
288
289	/* Recursive or errorcheck? */
290	if (MUTEX_OWNER(owner) == (uintptr_t)self) {
291		if (MUTEX_RECURSIVE(owner)) {
292			if (ptm->ptm_recursed == INT_MAX)
293				return EAGAIN;
294			ptm->ptm_recursed++;
295			return 0;
296		}
297		if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck))
298			return EDEADLK;
299	}
300
301	/* priority protect */
302	if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) {
303		error = errno;
304		errno = serrno;
305		return error;
306	}
307
308	for (;;) {
309		/* If it has become free, try to acquire it again. */
310		if (MUTEX_OWNER(owner) == 0) {
311			newval = (void *)((uintptr_t)self | (uintptr_t)owner);
312			next = atomic_cas_ptr(&ptm->ptm_owner, owner, newval);
313			if (__predict_false(next != owner)) {
314				owner = next;
315				continue;
316			}
317			errno = serrno;
318#ifndef PTHREAD__ATOMIC_IS_MEMBAR
319			membar_enter();
320#endif
321			return 0;
322		} else if (MUTEX_OWNER(owner) != (uintptr_t)self) {
323			/* Spin while the owner is running. */
324			owner = pthread__mutex_spin(ptm, owner);
325			if (MUTEX_OWNER(owner) == 0) {
326				continue;
327			}
328		}
329
330		/*
331		 * Nope, still held.  Add thread to the list of waiters.
332		 * Issue a memory barrier to ensure stores to 'waiter'
333		 * are visible before we enter the list.
334		 */
335		waiter.next = ptm->ptm_waiters;
336		waiter.lid = self->pt_lid;
337#ifndef PTHREAD__ATOMIC_IS_MEMBAR
338		membar_producer();
339#endif
340		next = atomic_cas_ptr(&ptm->ptm_waiters, waiter.next, &waiter);
341		if (next != waiter.next) {
342			owner = ptm->ptm_owner;
343			continue;
344		}
345
346		/*
347		 * If the mutex has become free since entering self onto the
348		 * waiters list, need to wake everybody up (including self)
349		 * and retry.  It's possible to race with an unlocking
350		 * thread, so self may have already been awoken.
351		 */
352#ifndef PTHREAD__ATOMIC_IS_MEMBAR
353		membar_enter();
354#endif
355		if (MUTEX_OWNER(ptm->ptm_owner) == 0) {
356			pthread__mutex_wakeup(self,
357			    atomic_swap_ptr(&ptm->ptm_waiters, NULL));
358		}
359
360		/*
361		 * We must not proceed until told that we are no longer
362		 * waiting (via waiter.lid being set to zero).  Otherwise
363		 * it's unsafe to re-enter "waiter" onto the waiters list.
364		 */
365		while (waiter.lid != 0) {
366			error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
367			    __UNCONST(ts), 0, NULL, NULL);
368			if (error < 0 && errno == ETIMEDOUT) {
369				/* Remove self from waiters list */
370				pthread__mutex_wakeup(self,
371				    atomic_swap_ptr(&ptm->ptm_waiters, NULL));
372
373				/*
374				 * Might have raced with another thread to
375				 * do the wakeup.  In any case there will be
376				 * a wakeup for sure.  Eat it and wait for
377				 * waiter.lid to clear.
378				 */
379				while (waiter.lid != 0) {
380					(void)_lwp_park(CLOCK_MONOTONIC, 0,
381					    NULL, 0, NULL, NULL);
382				}
383
384				/* Priority protect */
385				if (MUTEX_PROTECT(owner))
386					(void)_sched_protect(-1);
387				errno = serrno;
388				return ETIMEDOUT;
389			}
390		}
391		owner = ptm->ptm_owner;
392	}
393}
394
395int
396pthread_mutex_trylock(pthread_mutex_t *ptm)
397{
398	pthread_t self;
399	void *val, *new, *next;
400
401	if (__predict_false(__uselibcstub))
402		return __libc_mutex_trylock_stub(ptm);
403
404	pthread__error(EINVAL, "Invalid mutex",
405	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
406
407	self = pthread__self();
408	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
409	if (__predict_true(val == NULL)) {
410#ifndef PTHREAD__ATOMIC_IS_MEMBAR
411		membar_enter();
412#endif
413		return 0;
414	}
415
416	if (MUTEX_RECURSIVE(val)) {
417		if (MUTEX_OWNER(val) == 0) {
418			new = (void *)((uintptr_t)self | (uintptr_t)val);
419			next = atomic_cas_ptr(&ptm->ptm_owner, val, new);
420			if (__predict_true(next == val)) {
421#ifndef PTHREAD__ATOMIC_IS_MEMBAR
422				membar_enter();
423#endif
424				return 0;
425			}
426		}
427		if (MUTEX_OWNER(val) == (uintptr_t)self) {
428			if (ptm->ptm_recursed == INT_MAX)
429				return EAGAIN;
430			ptm->ptm_recursed++;
431			return 0;
432		}
433	}
434
435	return EBUSY;
436}
437
438int
439pthread_mutex_unlock(pthread_mutex_t *ptm)
440{
441	pthread_t self;
442	void *val, *newval;
443	int error;
444
445	if (__predict_false(__uselibcstub))
446		return __libc_mutex_unlock_stub(ptm);
447
448	pthread__error(EINVAL, "Invalid mutex",
449	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
450
451#ifndef PTHREAD__ATOMIC_IS_MEMBAR
452	membar_exit();
453#endif
454	error = 0;
455	self = pthread__self();
456	newval = NULL;
457
458	val = atomic_cas_ptr(&ptm->ptm_owner, self, newval);
459	if (__predict_false(val != self)) {
460		bool weown = (MUTEX_OWNER(val) == (uintptr_t)self);
461		if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
462			if (!weown) {
463				error = EPERM;
464				newval = val;
465			} else {
466				newval = NULL;
467			}
468		} else if (MUTEX_RECURSIVE(val)) {
469			if (!weown) {
470				error = EPERM;
471				newval = val;
472			} else if (ptm->ptm_recursed) {
473				ptm->ptm_recursed--;
474				newval = val;
475			} else {
476				newval = (pthread_t)MUTEX_RECURSIVE_BIT;
477			}
478		} else {
479			pthread__error(EPERM,
480			    "Unlocking unlocked mutex", (val != NULL));
481			pthread__error(EPERM,
482			    "Unlocking mutex owned by another thread", weown);
483			newval = NULL;
484		}
485
486		/*
487		 * Release the mutex.  If there appear to be waiters, then
488		 * wake them up.
489		 */
490		if (newval != val) {
491			val = atomic_swap_ptr(&ptm->ptm_owner, newval);
492			if (__predict_false(MUTEX_PROTECT(val))) {
493				/* restore elevated priority */
494				(void)_sched_protect(-1);
495			}
496		}
497	}
498
499	/*
500	 * Finally, wake any waiters and return.
501	 */
502#ifndef PTHREAD__ATOMIC_IS_MEMBAR
503	membar_enter();
504#endif
505	if (MUTEX_OWNER(newval) == 0 && ptm->ptm_waiters != NULL) {
506		pthread__mutex_wakeup(self,
507		    atomic_swap_ptr(&ptm->ptm_waiters, NULL));
508	}
509	return error;
510}
511
512/*
513 * pthread__mutex_wakeup: unpark threads waiting for us
514 */
515
516static void
517pthread__mutex_wakeup(pthread_t self, struct pthread__waiter *cur)
518{
519	lwpid_t lids[PTHREAD__UNPARK_MAX];
520	const size_t mlid = pthread__unpark_max;
521	struct pthread__waiter *next;
522	size_t nlid;
523
524	/*
525	 * Pull waiters from the queue and add to our list.  Use a memory
526	 * barrier to ensure that we safely read the value of waiter->next
527	 * before the awoken thread sees waiter->lid being cleared.
528	 */
529	membar_datadep_consumer(); /* for alpha */
530	for (nlid = 0; cur != NULL; cur = next) {
531		if (nlid == mlid) {
532			(void)_lwp_unpark_all(lids, nlid, NULL);
533			nlid = 0;
534		}
535		next = cur->next;
536		pthread__assert(cur->lid != 0);
537		lids[nlid++] = cur->lid;
538		membar_exit();
539		cur->lid = 0;
540		/* No longer safe to touch 'cur' */
541	}
542	if (nlid == 1) {
543		(void)_lwp_unpark(lids[0], NULL);
544	} else if (nlid > 1) {
545		(void)_lwp_unpark_all(lids, nlid, NULL);
546	}
547}
548
549int
550pthread_mutexattr_init(pthread_mutexattr_t *attr)
551{
552#if 0
553	if (__predict_false(__uselibcstub))
554		return __libc_mutexattr_init_stub(attr);
555#endif
556
557	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
558	attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;
559	return 0;
560}
561
562int
563pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
564{
565	if (__predict_false(__uselibcstub))
566		return __libc_mutexattr_destroy_stub(attr);
567
568	pthread__error(EINVAL, "Invalid mutex attribute",
569	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
570
571	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
572
573	return 0;
574}
575
576int
577pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
578{
579
580	pthread__error(EINVAL, "Invalid mutex attribute",
581	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
582
583	*typep = MUTEX_GET_TYPE(attr->ptma_private);
584	return 0;
585}
586
587int
588pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
589{
590
591	if (__predict_false(__uselibcstub))
592		return __libc_mutexattr_settype_stub(attr, type);
593
594	pthread__error(EINVAL, "Invalid mutex attribute",
595	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
596
597	switch (type) {
598	case PTHREAD_MUTEX_NORMAL:
599	case PTHREAD_MUTEX_ERRORCHECK:
600	case PTHREAD_MUTEX_RECURSIVE:
601		MUTEX_SET_TYPE(attr->ptma_private, type);
602		return 0;
603	default:
604		return EINVAL;
605	}
606}
607
608int
609pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto)
610{
611
612	pthread__error(EINVAL, "Invalid mutex attribute",
613	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
614
615	*proto = MUTEX_GET_PROTOCOL(attr->ptma_private);
616	return 0;
617}
618
619int
620pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto)
621{
622
623	pthread__error(EINVAL, "Invalid mutex attribute",
624	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
625
626	switch (proto) {
627	case PTHREAD_PRIO_NONE:
628	case PTHREAD_PRIO_PROTECT:
629		MUTEX_SET_PROTOCOL(attr->ptma_private, proto);
630		return 0;
631	case PTHREAD_PRIO_INHERIT:
632		return ENOTSUP;
633	default:
634		return EINVAL;
635	}
636}
637
638int
639pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil)
640{
641
642	pthread__error(EINVAL, "Invalid mutex attribute",
643		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
644
645	*ceil = MUTEX_GET_CEILING(attr->ptma_private);
646	return 0;
647}
648
649int
650pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil)
651{
652
653	pthread__error(EINVAL, "Invalid mutex attribute",
654		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
655
656	if (ceil & ~0xff)
657		return EINVAL;
658
659	MUTEX_SET_CEILING(attr->ptma_private, ceil);
660	return 0;
661}
662
663#ifdef _PTHREAD_PSHARED
664int
665pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr,
666    int * __restrict pshared)
667{
668
669	pthread__error(EINVAL, "Invalid mutex attribute",
670		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
671
672	*pshared = PTHREAD_PROCESS_PRIVATE;
673	return 0;
674}
675
676int
677pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
678{
679
680	pthread__error(EINVAL, "Invalid mutex attribute",
681		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
682
683	switch(pshared) {
684	case PTHREAD_PROCESS_PRIVATE:
685		return 0;
686	case PTHREAD_PROCESS_SHARED:
687		return ENOSYS;
688	}
689	return EINVAL;
690}
691#endif
692
693/*
694 * In order to avoid unnecessary contention on interlocking mutexes, we try
695 * to defer waking up threads until we unlock the mutex.  The threads will
696 * be woken up when the calling thread (self) releases the mutex.
697 */
698void
699pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm,
700    struct pthread__waiter *head)
701{
702	struct pthread__waiter *tail, *n, *o;
703
704	pthread__assert(head != NULL);
705
706	if (__predict_false(ptm == NULL ||
707	    MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) {
708	    	pthread__mutex_wakeup(self, head);
709	    	return;
710	}
711
712	/* This is easy if no existing waiters on mutex. */
713	if (atomic_cas_ptr(&ptm->ptm_waiters, NULL, head) == NULL) {
714		return;
715	}
716
717	/* Oops need to append.  Find the tail of the new queue. */
718	for (tail = head; tail->next != NULL; tail = tail->next) {
719		/* nothing */
720	}
721
722	/* Append atomically. */
723	for (o = ptm->ptm_waiters;; o = n) {
724		tail->next = o;
725#ifndef PTHREAD__ATOMIC_IS_MEMBAR
726		membar_producer();
727#endif
728		n = atomic_cas_ptr(&ptm->ptm_waiters, o, head);
729		if (__predict_true(n == o)) {
730			break;
731		}
732	}
733}
734
735int
736pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
737{
738
739	pthread__error(EINVAL, "Invalid mutex",
740	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
741
742	*ceil = ptm->ptm_ceiling;
743	return 0;
744}
745
746int
747pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil)
748{
749	int error;
750
751	pthread__error(EINVAL, "Invalid mutex",
752	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
753
754	error = pthread_mutex_lock(ptm);
755	if (error == 0) {
756		*old_ceil = ptm->ptm_ceiling;
757		/*check range*/
758		ptm->ptm_ceiling = ceil;
759		pthread_mutex_unlock(ptm);
760	}
761	return error;
762}
763
764int
765_pthread_mutex_held_np(pthread_mutex_t *ptm)
766{
767
768	return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self();
769}
770
771pthread_t
772_pthread_mutex_owner_np(pthread_mutex_t *ptm)
773{
774
775	return (pthread_t)MUTEX_OWNER(ptm->ptm_owner);
776}
777