thr_umtx.c revision 297706
1144518Sdavidxu/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3144518Sdavidxu * All rights reserved.
4144518Sdavidxu *
5144518Sdavidxu * Redistribution and use in source and binary forms, with or without
6144518Sdavidxu * modification, are permitted provided that the following conditions
7144518Sdavidxu * are met:
8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright
12144518Sdavidxu *    notice, this list of conditions and the following disclaimer in the
13144518Sdavidxu *    documentation and/or other materials provided with the distribution.
14144518Sdavidxu *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25144518Sdavidxu */
26144518Sdavidxu
27297706Skib#include <sys/cdefs.h>
28297706Skib__FBSDID("$FreeBSD: head/lib/libthr/thread/thr_umtx.c 297706 2016-04-08 11:15:26Z kib $");
29297706Skib
30144518Sdavidxu#include "thr_private.h"
31144518Sdavidxu#include "thr_umtx.h"
32144518Sdavidxu
33177853Sdavidxu#ifndef HAS__UMTX_OP_ERR
34177853Sdavidxuint _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
35177853Sdavidxu{
36177853Sdavidxu	if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
37177853Sdavidxu		return (errno);
38177853Sdavidxu	return (0);
39177853Sdavidxu}
40177853Sdavidxu#endif
41177853Sdavidxu
42163334Sdavidxuvoid
43163334Sdavidxu_thr_umutex_init(struct umutex *mtx)
44163334Sdavidxu{
45293858Svangyzen	static const struct umutex default_mtx = DEFAULT_UMUTEX;
46163334Sdavidxu
47163334Sdavidxu	*mtx = default_mtx;
48163334Sdavidxu}
49163334Sdavidxu
50212077Sdavidxuvoid
51212077Sdavidxu_thr_urwlock_init(struct urwlock *rwl)
52212077Sdavidxu{
53293858Svangyzen	static const struct urwlock default_rwl = DEFAULT_URWLOCK;
54293858Svangyzen
55212077Sdavidxu	*rwl = default_rwl;
56212077Sdavidxu}
57212077Sdavidxu
58144518Sdavidxuint
59179970Sdavidxu__thr_umutex_lock(struct umutex *mtx, uint32_t id)
60161680Sdavidxu{
61179970Sdavidxu	uint32_t owner;
62179970Sdavidxu
63179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
64179970Sdavidxu		for (;;) {
65179970Sdavidxu			/* wait in kernel */
66179970Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
67179970Sdavidxu
68179970Sdavidxu			owner = mtx->m_owner;
69179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
70179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
71179970Sdavidxu				return (0);
72179970Sdavidxu		}
73179970Sdavidxu	}
74179970Sdavidxu
75179970Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
76161680Sdavidxu}
77161680Sdavidxu
78216641Sdavidxu#define SPINLOOPS 1000
79216641Sdavidxu
80161680Sdavidxuint
81216641Sdavidxu__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
82216641Sdavidxu{
83216641Sdavidxu	uint32_t owner;
84216641Sdavidxu
85216641Sdavidxu	if (!_thr_is_smp)
86216641Sdavidxu		return __thr_umutex_lock(mtx, id);
87216641Sdavidxu
88216641Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
89216641Sdavidxu		for (;;) {
90216641Sdavidxu			int count = SPINLOOPS;
91216641Sdavidxu			while (count--) {
92216641Sdavidxu				owner = mtx->m_owner;
93216641Sdavidxu				if ((owner & ~UMUTEX_CONTESTED) == 0) {
94216641Sdavidxu					if (atomic_cmpset_acq_32(
95216641Sdavidxu					    &mtx->m_owner,
96216641Sdavidxu					    owner, id|owner)) {
97216641Sdavidxu						return (0);
98216641Sdavidxu					}
99216641Sdavidxu				}
100216641Sdavidxu				CPU_SPINWAIT;
101216641Sdavidxu			}
102216641Sdavidxu
103216641Sdavidxu			/* wait in kernel */
104216641Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
105216641Sdavidxu		}
106216641Sdavidxu	}
107216641Sdavidxu
108216641Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
109216641Sdavidxu}
110216641Sdavidxu
111216641Sdavidxuint
112179970Sdavidxu__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
113232144Sdavidxu	const struct timespec *abstime)
114161680Sdavidxu{
115232144Sdavidxu	struct _umtx_time *tm_p, timeout;
116232144Sdavidxu	size_t tm_size;
117179970Sdavidxu	uint32_t owner;
118179970Sdavidxu	int ret;
119179970Sdavidxu
120232144Sdavidxu	if (abstime == NULL) {
121232144Sdavidxu		tm_p = NULL;
122232144Sdavidxu		tm_size = 0;
123232144Sdavidxu	} else {
124232144Sdavidxu		timeout._clockid = CLOCK_REALTIME;
125232144Sdavidxu		timeout._flags = UMTX_ABSTIME;
126232144Sdavidxu		timeout._timeout = *abstime;
127232144Sdavidxu		tm_p = &timeout;
128232144Sdavidxu		tm_size = sizeof(timeout);
129232144Sdavidxu	}
130179970Sdavidxu
131179970Sdavidxu	for (;;) {
132179970Sdavidxu		if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
133179970Sdavidxu
134179970Sdavidxu			/* wait in kernel */
135232144Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
136232144Sdavidxu				 (void *)tm_size, __DECONST(void *, tm_p));
137179970Sdavidxu
138179970Sdavidxu			/* now try to lock it */
139179970Sdavidxu			owner = mtx->m_owner;
140179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
141179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
142179970Sdavidxu				return (0);
143179970Sdavidxu		} else {
144232144Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
145232144Sdavidxu				 (void *)tm_size, __DECONST(void *, tm_p));
146179970Sdavidxu			if (ret == 0)
147179970Sdavidxu				break;
148179970Sdavidxu		}
149179970Sdavidxu		if (ret == ETIMEDOUT)
150179970Sdavidxu			break;
151179970Sdavidxu	}
152179970Sdavidxu	return (ret);
153161680Sdavidxu}
154161680Sdavidxu
155161680Sdavidxuint
156179970Sdavidxu__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
157161680Sdavidxu{
158177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
159161680Sdavidxu}
160161680Sdavidxu
161161680Sdavidxuint
162163334Sdavidxu__thr_umutex_trylock(struct umutex *mtx)
163161680Sdavidxu{
164177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
165161680Sdavidxu}
166161680Sdavidxu
167161680Sdavidxuint
168161680Sdavidxu__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
169161680Sdavidxu	uint32_t *oldceiling)
170161680Sdavidxu{
171177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
172161680Sdavidxu}
173161680Sdavidxu
174161680Sdavidxuint
175173801Sdavidxu_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
176144518Sdavidxu{
177144518Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
178144518Sdavidxu		timeout->tv_nsec <= 0)))
179144518Sdavidxu		return (ETIMEDOUT);
180177853Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
181177853Sdavidxu		__DECONST(void*, timeout));
182144518Sdavidxu}
183144518Sdavidxu
184144518Sdavidxuint
185178647Sdavidxu_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
186144518Sdavidxu{
187173801Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
188173801Sdavidxu		timeout->tv_nsec <= 0)))
189173801Sdavidxu		return (ETIMEDOUT);
190178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
191178647Sdavidxu			shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
192178647Sdavidxu			__DECONST(void*, timeout));
193173801Sdavidxu}
194173801Sdavidxu
195173801Sdavidxuint
196216641Sdavidxu_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
197216641Sdavidxu	const struct timespec *abstime, int shared)
198216641Sdavidxu{
199232144Sdavidxu	struct _umtx_time *tm_p, timeout;
200232144Sdavidxu	size_t tm_size;
201232144Sdavidxu
202232144Sdavidxu	if (abstime == NULL) {
203232144Sdavidxu		tm_p = NULL;
204232144Sdavidxu		tm_size = 0;
205232144Sdavidxu	} else {
206233134Sdavidxu		timeout._clockid = clockid;
207232144Sdavidxu		timeout._flags = UMTX_ABSTIME;
208232144Sdavidxu		timeout._timeout = *abstime;
209232144Sdavidxu		tm_p = &timeout;
210232144Sdavidxu		tm_size = sizeof(timeout);
211232144Sdavidxu	}
212232144Sdavidxu
213216641Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
214231989Sdavidxu		shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
215232144Sdavidxu		(void *)tm_size, __DECONST(void *, tm_p));
216216641Sdavidxu}
217216641Sdavidxu
218216641Sdavidxuint
219178647Sdavidxu_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
220173801Sdavidxu{
221178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
222177853Sdavidxu		nr_wakeup, 0, 0);
223144518Sdavidxu}
224164877Sdavidxu
225164902Sdavidxuvoid
226164902Sdavidxu_thr_ucond_init(struct ucond *cv)
227164902Sdavidxu{
228164902Sdavidxu	bzero(cv, sizeof(struct ucond));
229164902Sdavidxu}
230164902Sdavidxu
231164877Sdavidxuint
232164877Sdavidxu_thr_ucond_wait(struct ucond *cv, struct umutex *m,
233227604Sdavidxu	const struct timespec *timeout, int flags)
234164877Sdavidxu{
235164877Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
236164877Sdavidxu	    timeout->tv_nsec <= 0))) {
237179970Sdavidxu		struct pthread *curthread = _get_curthread();
238179970Sdavidxu		_thr_umutex_unlock(m, TID(curthread));
239164877Sdavidxu                return (ETIMEDOUT);
240164877Sdavidxu	}
241227604Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
242177853Sdavidxu		     m, __DECONST(void*, timeout));
243164877Sdavidxu}
244164877Sdavidxu
245164877Sdavidxuint
246164877Sdavidxu_thr_ucond_signal(struct ucond *cv)
247164877Sdavidxu{
248165110Sdavidxu	if (!cv->c_has_waiters)
249165110Sdavidxu		return (0);
250177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
251164877Sdavidxu}
252164877Sdavidxu
253164877Sdavidxuint
254164877Sdavidxu_thr_ucond_broadcast(struct ucond *cv)
255164877Sdavidxu{
256165110Sdavidxu	if (!cv->c_has_waiters)
257165110Sdavidxu		return (0);
258177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
259164877Sdavidxu}
260177850Sdavidxu
261177850Sdavidxuint
262232209Sdavidxu__thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
263232209Sdavidxu	const struct timespec *tsp)
264177850Sdavidxu{
265232209Sdavidxu	struct _umtx_time timeout, *tm_p;
266232209Sdavidxu	size_t tm_size;
267232209Sdavidxu
268232209Sdavidxu	if (tsp == NULL) {
269232209Sdavidxu		tm_p = NULL;
270232209Sdavidxu		tm_size = 0;
271232209Sdavidxu	} else {
272232209Sdavidxu		timeout._timeout = *tsp;
273232209Sdavidxu		timeout._flags = UMTX_ABSTIME;
274232209Sdavidxu		timeout._clockid = CLOCK_REALTIME;
275232209Sdavidxu		tm_p = &timeout;
276232209Sdavidxu		tm_size = sizeof(timeout);
277232209Sdavidxu	}
278232209Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p);
279177850Sdavidxu}
280177850Sdavidxu
281177850Sdavidxuint
282232209Sdavidxu__thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
283177850Sdavidxu{
284232209Sdavidxu	struct _umtx_time timeout, *tm_p;
285232209Sdavidxu	size_t tm_size;
286232209Sdavidxu
287232209Sdavidxu	if (tsp == NULL) {
288232209Sdavidxu		tm_p = NULL;
289232209Sdavidxu		tm_size = 0;
290232209Sdavidxu	} else {
291232209Sdavidxu		timeout._timeout = *tsp;
292232209Sdavidxu		timeout._flags = UMTX_ABSTIME;
293232209Sdavidxu		timeout._clockid = CLOCK_REALTIME;
294232209Sdavidxu		tm_p = &timeout;
295232209Sdavidxu		tm_size = sizeof(timeout);
296232209Sdavidxu	}
297232209Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p);
298177850Sdavidxu}
299177850Sdavidxu
300177850Sdavidxuint
301177850Sdavidxu__thr_rwlock_unlock(struct urwlock *rwlock)
302177850Sdavidxu{
303177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
304177850Sdavidxu}
305212076Sdavidxu
306212076Sdavidxuvoid
307212076Sdavidxu_thr_rwl_rdlock(struct urwlock *rwlock)
308212076Sdavidxu{
309212076Sdavidxu	int ret;
310212076Sdavidxu
311212076Sdavidxu	for (;;) {
312212076Sdavidxu		if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
313212076Sdavidxu			return;
314212076Sdavidxu		ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
315212076Sdavidxu		if (ret == 0)
316212076Sdavidxu			return;
317212076Sdavidxu		if (ret != EINTR)
318212076Sdavidxu			PANIC("rdlock error");
319212076Sdavidxu	}
320212076Sdavidxu}
321212076Sdavidxu
322212076Sdavidxuvoid
323212076Sdavidxu_thr_rwl_wrlock(struct urwlock *rwlock)
324212076Sdavidxu{
325212076Sdavidxu	int ret;
326212076Sdavidxu
327212076Sdavidxu	for (;;) {
328212076Sdavidxu		if (_thr_rwlock_trywrlock(rwlock) == 0)
329212076Sdavidxu			return;
330212076Sdavidxu		ret = __thr_rwlock_wrlock(rwlock, NULL);
331212076Sdavidxu		if (ret == 0)
332212076Sdavidxu			return;
333212076Sdavidxu		if (ret != EINTR)
334212076Sdavidxu			PANIC("wrlock error");
335212076Sdavidxu	}
336212076Sdavidxu}
337212076Sdavidxu
338212076Sdavidxuvoid
339212076Sdavidxu_thr_rwl_unlock(struct urwlock *rwlock)
340212076Sdavidxu{
341212076Sdavidxu	if (_thr_rwlock_unlock(rwlock))
342212076Sdavidxu		PANIC("unlock error");
343212076Sdavidxu}
344