thr_umtx.c revision 233134
1144518Sdavidxu/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3144518Sdavidxu * All rights reserved.
4144518Sdavidxu *
5144518Sdavidxu * Redistribution and use in source and binary forms, with or without
6144518Sdavidxu * modification, are permitted provided that the following conditions
7144518Sdavidxu * are met:
8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright
12144518Sdavidxu *    notice, this list of conditions and the following disclaimer in the
13144518Sdavidxu *    documentation and/or other materials provided with the distribution.
14144518Sdavidxu *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25144518Sdavidxu *
26144518Sdavidxu * $FreeBSD: head/lib/libthr/thread/thr_umtx.c 233134 2012-03-19 00:07:10Z davidxu $
27144518Sdavidxu *
28144518Sdavidxu */
29144518Sdavidxu
30144518Sdavidxu#include "thr_private.h"
31144518Sdavidxu#include "thr_umtx.h"
32144518Sdavidxu
33177853Sdavidxu#ifndef HAS__UMTX_OP_ERR
34177853Sdavidxuint _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
35177853Sdavidxu{
36177853Sdavidxu	if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
37177853Sdavidxu		return (errno);
38177853Sdavidxu	return (0);
39177853Sdavidxu}
40177853Sdavidxu#endif
41177853Sdavidxu
42163334Sdavidxuvoid
43163334Sdavidxu_thr_umutex_init(struct umutex *mtx)
44163334Sdavidxu{
45163334Sdavidxu	static struct umutex default_mtx = DEFAULT_UMUTEX;
46163334Sdavidxu
47163334Sdavidxu	*mtx = default_mtx;
48163334Sdavidxu}
49163334Sdavidxu
50212077Sdavidxuvoid
51212077Sdavidxu_thr_urwlock_init(struct urwlock *rwl)
52212077Sdavidxu{
53212077Sdavidxu	static struct urwlock default_rwl = DEFAULT_URWLOCK;
54212077Sdavidxu	*rwl = default_rwl;
55212077Sdavidxu}
56212077Sdavidxu
57144518Sdavidxuint
58179970Sdavidxu__thr_umutex_lock(struct umutex *mtx, uint32_t id)
59161680Sdavidxu{
60179970Sdavidxu	uint32_t owner;
61179970Sdavidxu
62179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
63179970Sdavidxu		for (;;) {
64179970Sdavidxu			/* wait in kernel */
65179970Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
66179970Sdavidxu
67179970Sdavidxu			owner = mtx->m_owner;
68179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
69179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
70179970Sdavidxu				return (0);
71179970Sdavidxu		}
72179970Sdavidxu	}
73179970Sdavidxu
74179970Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
75161680Sdavidxu}
76161680Sdavidxu
77216641Sdavidxu#define SPINLOOPS 1000
78216641Sdavidxu
79161680Sdavidxuint
80216641Sdavidxu__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
81216641Sdavidxu{
82216641Sdavidxu	uint32_t owner;
83216641Sdavidxu
84216641Sdavidxu	if (!_thr_is_smp)
85216641Sdavidxu		return __thr_umutex_lock(mtx, id);
86216641Sdavidxu
87216641Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
88216641Sdavidxu		for (;;) {
89216641Sdavidxu			int count = SPINLOOPS;
90216641Sdavidxu			while (count--) {
91216641Sdavidxu				owner = mtx->m_owner;
92216641Sdavidxu				if ((owner & ~UMUTEX_CONTESTED) == 0) {
93216641Sdavidxu					if (atomic_cmpset_acq_32(
94216641Sdavidxu					    &mtx->m_owner,
95216641Sdavidxu					    owner, id|owner)) {
96216641Sdavidxu						return (0);
97216641Sdavidxu					}
98216641Sdavidxu				}
99216641Sdavidxu				CPU_SPINWAIT;
100216641Sdavidxu			}
101216641Sdavidxu
102216641Sdavidxu			/* wait in kernel */
103216641Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
104216641Sdavidxu		}
105216641Sdavidxu	}
106216641Sdavidxu
107216641Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
108216641Sdavidxu}
109216641Sdavidxu
110216641Sdavidxuint
111179970Sdavidxu__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
112232144Sdavidxu	const struct timespec *abstime)
113161680Sdavidxu{
114232144Sdavidxu	struct _umtx_time *tm_p, timeout;
115232144Sdavidxu	size_t tm_size;
116179970Sdavidxu	uint32_t owner;
117179970Sdavidxu	int ret;
118179970Sdavidxu
119232144Sdavidxu	if (abstime == NULL) {
120232144Sdavidxu		tm_p = NULL;
121232144Sdavidxu		tm_size = 0;
122232144Sdavidxu	} else {
123232144Sdavidxu		timeout._clockid = CLOCK_REALTIME;
124232144Sdavidxu		timeout._flags = UMTX_ABSTIME;
125232144Sdavidxu		timeout._timeout = *abstime;
126232144Sdavidxu		tm_p = &timeout;
127232144Sdavidxu		tm_size = sizeof(timeout);
128232144Sdavidxu	}
129179970Sdavidxu
130179970Sdavidxu	for (;;) {
131179970Sdavidxu		if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
132179970Sdavidxu
133179970Sdavidxu			/* wait in kernel */
134232144Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
135232144Sdavidxu				 (void *)tm_size, __DECONST(void *, tm_p));
136179970Sdavidxu
137179970Sdavidxu			/* now try to lock it */
138179970Sdavidxu			owner = mtx->m_owner;
139179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
140179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
141179970Sdavidxu				return (0);
142179970Sdavidxu		} else {
143232144Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
144232144Sdavidxu				 (void *)tm_size, __DECONST(void *, tm_p));
145179970Sdavidxu			if (ret == 0)
146179970Sdavidxu				break;
147179970Sdavidxu		}
148179970Sdavidxu		if (ret == ETIMEDOUT)
149179970Sdavidxu			break;
150179970Sdavidxu	}
151179970Sdavidxu	return (ret);
152161680Sdavidxu}
153161680Sdavidxu
154161680Sdavidxuint
155179970Sdavidxu__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
156161680Sdavidxu{
157177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
158161680Sdavidxu}
159161680Sdavidxu
160161680Sdavidxuint
161163334Sdavidxu__thr_umutex_trylock(struct umutex *mtx)
162161680Sdavidxu{
163177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
164161680Sdavidxu}
165161680Sdavidxu
166161680Sdavidxuint
167161680Sdavidxu__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
168161680Sdavidxu	uint32_t *oldceiling)
169161680Sdavidxu{
170177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
171161680Sdavidxu}
172161680Sdavidxu
173161680Sdavidxuint
174173801Sdavidxu_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
175144518Sdavidxu{
176144518Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
177144518Sdavidxu		timeout->tv_nsec <= 0)))
178144518Sdavidxu		return (ETIMEDOUT);
179177853Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
180177853Sdavidxu		__DECONST(void*, timeout));
181144518Sdavidxu}
182144518Sdavidxu
183144518Sdavidxuint
184178647Sdavidxu_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
185144518Sdavidxu{
186173801Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
187173801Sdavidxu		timeout->tv_nsec <= 0)))
188173801Sdavidxu		return (ETIMEDOUT);
189178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
190178647Sdavidxu			shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
191178647Sdavidxu			__DECONST(void*, timeout));
192173801Sdavidxu}
193173801Sdavidxu
194173801Sdavidxuint
195216641Sdavidxu_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
196216641Sdavidxu	const struct timespec *abstime, int shared)
197216641Sdavidxu{
198232144Sdavidxu	struct _umtx_time *tm_p, timeout;
199232144Sdavidxu	size_t tm_size;
200232144Sdavidxu
201232144Sdavidxu	if (abstime == NULL) {
202232144Sdavidxu		tm_p = NULL;
203232144Sdavidxu		tm_size = 0;
204232144Sdavidxu	} else {
205233134Sdavidxu		timeout._clockid = clockid;
206232144Sdavidxu		timeout._flags = UMTX_ABSTIME;
207232144Sdavidxu		timeout._timeout = *abstime;
208232144Sdavidxu		tm_p = &timeout;
209232144Sdavidxu		tm_size = sizeof(timeout);
210232144Sdavidxu	}
211232144Sdavidxu
212216641Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
213231989Sdavidxu		shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
214232144Sdavidxu		(void *)tm_size, __DECONST(void *, tm_p));
215216641Sdavidxu}
216216641Sdavidxu
217216641Sdavidxuint
218178647Sdavidxu_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
219173801Sdavidxu{
220178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
221177853Sdavidxu		nr_wakeup, 0, 0);
222144518Sdavidxu}
223164877Sdavidxu
224164902Sdavidxuvoid
225164902Sdavidxu_thr_ucond_init(struct ucond *cv)
226164902Sdavidxu{
227164902Sdavidxu	bzero(cv, sizeof(struct ucond));
228164902Sdavidxu}
229164902Sdavidxu
230164877Sdavidxuint
231164877Sdavidxu_thr_ucond_wait(struct ucond *cv, struct umutex *m,
232227604Sdavidxu	const struct timespec *timeout, int flags)
233164877Sdavidxu{
234164877Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
235164877Sdavidxu	    timeout->tv_nsec <= 0))) {
236179970Sdavidxu		struct pthread *curthread = _get_curthread();
237179970Sdavidxu		_thr_umutex_unlock(m, TID(curthread));
238164877Sdavidxu                return (ETIMEDOUT);
239164877Sdavidxu	}
240227604Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
241177853Sdavidxu		     m, __DECONST(void*, timeout));
242164877Sdavidxu}
243164877Sdavidxu
244164877Sdavidxuint
245164877Sdavidxu_thr_ucond_signal(struct ucond *cv)
246164877Sdavidxu{
247165110Sdavidxu	if (!cv->c_has_waiters)
248165110Sdavidxu		return (0);
249177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
250164877Sdavidxu}
251164877Sdavidxu
252164877Sdavidxuint
253164877Sdavidxu_thr_ucond_broadcast(struct ucond *cv)
254164877Sdavidxu{
255165110Sdavidxu	if (!cv->c_has_waiters)
256165110Sdavidxu		return (0);
257177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
258164877Sdavidxu}
259177850Sdavidxu
260177850Sdavidxuint
261232209Sdavidxu__thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
262232209Sdavidxu	const struct timespec *tsp)
263177850Sdavidxu{
264232209Sdavidxu	struct _umtx_time timeout, *tm_p;
265232209Sdavidxu	size_t tm_size;
266232209Sdavidxu
267232209Sdavidxu	if (tsp == NULL) {
268232209Sdavidxu		tm_p = NULL;
269232209Sdavidxu		tm_size = 0;
270232209Sdavidxu	} else {
271232209Sdavidxu		timeout._timeout = *tsp;
272232209Sdavidxu		timeout._flags = UMTX_ABSTIME;
273232209Sdavidxu		timeout._clockid = CLOCK_REALTIME;
274232209Sdavidxu		tm_p = &timeout;
275232209Sdavidxu		tm_size = sizeof(timeout);
276232209Sdavidxu	}
277232209Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p);
278177850Sdavidxu}
279177850Sdavidxu
280177850Sdavidxuint
281232209Sdavidxu__thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
282177850Sdavidxu{
283232209Sdavidxu	struct _umtx_time timeout, *tm_p;
284232209Sdavidxu	size_t tm_size;
285232209Sdavidxu
286232209Sdavidxu	if (tsp == NULL) {
287232209Sdavidxu		tm_p = NULL;
288232209Sdavidxu		tm_size = 0;
289232209Sdavidxu	} else {
290232209Sdavidxu		timeout._timeout = *tsp;
291232209Sdavidxu		timeout._flags = UMTX_ABSTIME;
292232209Sdavidxu		timeout._clockid = CLOCK_REALTIME;
293232209Sdavidxu		tm_p = &timeout;
294232209Sdavidxu		tm_size = sizeof(timeout);
295232209Sdavidxu	}
296232209Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p);
297177850Sdavidxu}
298177850Sdavidxu
299177850Sdavidxuint
300177850Sdavidxu__thr_rwlock_unlock(struct urwlock *rwlock)
301177850Sdavidxu{
302177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
303177850Sdavidxu}
304212076Sdavidxu
305212076Sdavidxuvoid
306212076Sdavidxu_thr_rwl_rdlock(struct urwlock *rwlock)
307212076Sdavidxu{
308212076Sdavidxu	int ret;
309212076Sdavidxu
310212076Sdavidxu	for (;;) {
311212076Sdavidxu		if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
312212076Sdavidxu			return;
313212076Sdavidxu		ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
314212076Sdavidxu		if (ret == 0)
315212076Sdavidxu			return;
316212076Sdavidxu		if (ret != EINTR)
317212076Sdavidxu			PANIC("rdlock error");
318212076Sdavidxu	}
319212076Sdavidxu}
320212076Sdavidxu
321212076Sdavidxuvoid
322212076Sdavidxu_thr_rwl_wrlock(struct urwlock *rwlock)
323212076Sdavidxu{
324212076Sdavidxu	int ret;
325212076Sdavidxu
326212076Sdavidxu	for (;;) {
327212076Sdavidxu		if (_thr_rwlock_trywrlock(rwlock) == 0)
328212076Sdavidxu			return;
329212076Sdavidxu		ret = __thr_rwlock_wrlock(rwlock, NULL);
330212076Sdavidxu		if (ret == 0)
331212076Sdavidxu			return;
332212076Sdavidxu		if (ret != EINTR)
333212076Sdavidxu			PANIC("wrlock error");
334212076Sdavidxu	}
335212076Sdavidxu}
336212076Sdavidxu
337212076Sdavidxuvoid
338212076Sdavidxu_thr_rwl_unlock(struct urwlock *rwlock)
339212076Sdavidxu{
340212076Sdavidxu	if (_thr_rwlock_unlock(rwlock))
341212076Sdavidxu		PANIC("unlock error");
342212076Sdavidxu}
343