thr_umtx.c revision 231989
1144518Sdavidxu/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3144518Sdavidxu * All rights reserved.
4144518Sdavidxu *
5144518Sdavidxu * Redistribution and use in source and binary forms, with or without
6144518Sdavidxu * modification, are permitted provided that the following conditions
7144518Sdavidxu * are met:
8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright
12144518Sdavidxu *    notice, this list of conditions and the following disclaimer in the
13144518Sdavidxu *    documentation and/or other materials provided with the distribution.
14144518Sdavidxu *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25144518Sdavidxu *
26144518Sdavidxu * $FreeBSD: head/lib/libthr/thread/thr_umtx.c 231989 2012-02-22 03:22:49Z davidxu $
27144518Sdavidxu *
28144518Sdavidxu */
29144518Sdavidxu
30144518Sdavidxu#include "thr_private.h"
31144518Sdavidxu#include "thr_umtx.h"
32144518Sdavidxu
33177853Sdavidxu#ifndef HAS__UMTX_OP_ERR
34177853Sdavidxuint _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
35177853Sdavidxu{
36177853Sdavidxu	if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
37177853Sdavidxu		return (errno);
38177853Sdavidxu	return (0);
39177853Sdavidxu}
40177853Sdavidxu#endif
41177853Sdavidxu
42163334Sdavidxuvoid
43163334Sdavidxu_thr_umutex_init(struct umutex *mtx)
44163334Sdavidxu{
45163334Sdavidxu	static struct umutex default_mtx = DEFAULT_UMUTEX;
46163334Sdavidxu
47163334Sdavidxu	*mtx = default_mtx;
48163334Sdavidxu}
49163334Sdavidxu
50212077Sdavidxuvoid
51212077Sdavidxu_thr_urwlock_init(struct urwlock *rwl)
52212077Sdavidxu{
53212077Sdavidxu	static struct urwlock default_rwl = DEFAULT_URWLOCK;
54212077Sdavidxu	*rwl = default_rwl;
55212077Sdavidxu}
56212077Sdavidxu
57144518Sdavidxuint
58179970Sdavidxu__thr_umutex_lock(struct umutex *mtx, uint32_t id)
59161680Sdavidxu{
60179970Sdavidxu	uint32_t owner;
61179970Sdavidxu
62179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
63179970Sdavidxu		for (;;) {
64179970Sdavidxu			/* wait in kernel */
65179970Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
66179970Sdavidxu
67179970Sdavidxu			owner = mtx->m_owner;
68179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
69179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
70179970Sdavidxu				return (0);
71179970Sdavidxu		}
72179970Sdavidxu	}
73179970Sdavidxu
74179970Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
75161680Sdavidxu}
76161680Sdavidxu
77216641Sdavidxu#define SPINLOOPS 1000
78216641Sdavidxu
79161680Sdavidxuint
80216641Sdavidxu__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
81216641Sdavidxu{
82216641Sdavidxu	uint32_t owner;
83216641Sdavidxu
84216641Sdavidxu	if (!_thr_is_smp)
85216641Sdavidxu		return __thr_umutex_lock(mtx, id);
86216641Sdavidxu
87216641Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
88216641Sdavidxu		for (;;) {
89216641Sdavidxu			int count = SPINLOOPS;
90216641Sdavidxu			while (count--) {
91216641Sdavidxu				owner = mtx->m_owner;
92216641Sdavidxu				if ((owner & ~UMUTEX_CONTESTED) == 0) {
93216641Sdavidxu					if (atomic_cmpset_acq_32(
94216641Sdavidxu					    &mtx->m_owner,
95216641Sdavidxu					    owner, id|owner)) {
96216641Sdavidxu						return (0);
97216641Sdavidxu					}
98216641Sdavidxu				}
99216641Sdavidxu				CPU_SPINWAIT;
100216641Sdavidxu			}
101216641Sdavidxu
102216641Sdavidxu			/* wait in kernel */
103216641Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
104216641Sdavidxu		}
105216641Sdavidxu	}
106216641Sdavidxu
107216641Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
108216641Sdavidxu}
109216641Sdavidxu
110216641Sdavidxuint
111179970Sdavidxu__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
112179970Sdavidxu	const struct timespec *ets)
113161680Sdavidxu{
114179970Sdavidxu	struct timespec timo, cts;
115179970Sdavidxu	uint32_t owner;
116179970Sdavidxu	int ret;
117179970Sdavidxu
118179970Sdavidxu	clock_gettime(CLOCK_REALTIME, &cts);
119179970Sdavidxu	TIMESPEC_SUB(&timo, ets, &cts);
120179970Sdavidxu
121179970Sdavidxu	if (timo.tv_sec < 0)
122161680Sdavidxu		return (ETIMEDOUT);
123179970Sdavidxu
124179970Sdavidxu	for (;;) {
125179970Sdavidxu		if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
126179970Sdavidxu
127179970Sdavidxu			/* wait in kernel */
128179970Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo);
129179970Sdavidxu
130179970Sdavidxu			/* now try to lock it */
131179970Sdavidxu			owner = mtx->m_owner;
132179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
133179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
134179970Sdavidxu				return (0);
135179970Sdavidxu		} else {
136179970Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo);
137179970Sdavidxu			if (ret == 0)
138179970Sdavidxu				break;
139179970Sdavidxu		}
140179970Sdavidxu		if (ret == ETIMEDOUT)
141179970Sdavidxu			break;
142179970Sdavidxu		clock_gettime(CLOCK_REALTIME, &cts);
143179970Sdavidxu		TIMESPEC_SUB(&timo, ets, &cts);
144179970Sdavidxu		if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) {
145179970Sdavidxu			ret = ETIMEDOUT;
146179970Sdavidxu			break;
147179970Sdavidxu		}
148179970Sdavidxu	}
149179970Sdavidxu	return (ret);
150161680Sdavidxu}
151161680Sdavidxu
152161680Sdavidxuint
153179970Sdavidxu__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
154161680Sdavidxu{
155200498Smarcel#ifndef __ia64__
156200498Smarcel	/* XXX this logic has a race-condition on ia64. */
157179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
158179970Sdavidxu		atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED);
159179970Sdavidxu		return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0);
160179970Sdavidxu	}
161200498Smarcel#endif /* __ia64__ */
162177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
163161680Sdavidxu}
164161680Sdavidxu
165161680Sdavidxuint
166163334Sdavidxu__thr_umutex_trylock(struct umutex *mtx)
167161680Sdavidxu{
168177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
169161680Sdavidxu}
170161680Sdavidxu
171161680Sdavidxuint
172161680Sdavidxu__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
173161680Sdavidxu	uint32_t *oldceiling)
174161680Sdavidxu{
175177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
176161680Sdavidxu}
177161680Sdavidxu
178161680Sdavidxuint
179173801Sdavidxu_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
180144518Sdavidxu{
181144518Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
182144518Sdavidxu		timeout->tv_nsec <= 0)))
183144518Sdavidxu		return (ETIMEDOUT);
184177853Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
185177853Sdavidxu		__DECONST(void*, timeout));
186144518Sdavidxu}
187144518Sdavidxu
188144518Sdavidxuint
189178647Sdavidxu_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
190144518Sdavidxu{
191173801Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
192173801Sdavidxu		timeout->tv_nsec <= 0)))
193173801Sdavidxu		return (ETIMEDOUT);
194178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
195178647Sdavidxu			shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
196178647Sdavidxu			__DECONST(void*, timeout));
197173801Sdavidxu}
198173801Sdavidxu
199173801Sdavidxuint
200216641Sdavidxu_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
201216641Sdavidxu	const struct timespec *abstime, int shared)
202216641Sdavidxu{
203216641Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
204231989Sdavidxu		shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
205231989Sdavidxu	       	abstime != NULL ? (void *)(uintptr_t)((clockid << 16) | UMTX_WAIT_ABSTIME) : 0,
206231989Sdavidxu		__DECONST(void *, abstime));
207216641Sdavidxu}
208216641Sdavidxu
209216641Sdavidxuint
210178647Sdavidxu_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
211173801Sdavidxu{
212178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
213177853Sdavidxu		nr_wakeup, 0, 0);
214144518Sdavidxu}
215164877Sdavidxu
216164902Sdavidxuvoid
217164902Sdavidxu_thr_ucond_init(struct ucond *cv)
218164902Sdavidxu{
219164902Sdavidxu	bzero(cv, sizeof(struct ucond));
220164902Sdavidxu}
221164902Sdavidxu
222164877Sdavidxuint
223164877Sdavidxu_thr_ucond_wait(struct ucond *cv, struct umutex *m,
224227604Sdavidxu	const struct timespec *timeout, int flags)
225164877Sdavidxu{
226164877Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
227164877Sdavidxu	    timeout->tv_nsec <= 0))) {
228179970Sdavidxu		struct pthread *curthread = _get_curthread();
229179970Sdavidxu		_thr_umutex_unlock(m, TID(curthread));
230164877Sdavidxu                return (ETIMEDOUT);
231164877Sdavidxu	}
232227604Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
233177853Sdavidxu		     m, __DECONST(void*, timeout));
234164877Sdavidxu}
235164877Sdavidxu
236164877Sdavidxuint
237164877Sdavidxu_thr_ucond_signal(struct ucond *cv)
238164877Sdavidxu{
239165110Sdavidxu	if (!cv->c_has_waiters)
240165110Sdavidxu		return (0);
241177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
242164877Sdavidxu}
243164877Sdavidxu
244164877Sdavidxuint
245164877Sdavidxu_thr_ucond_broadcast(struct ucond *cv)
246164877Sdavidxu{
247165110Sdavidxu	if (!cv->c_has_waiters)
248165110Sdavidxu		return (0);
249177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
250164877Sdavidxu}
251177850Sdavidxu
252177850Sdavidxuint
253177850Sdavidxu__thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
254177850Sdavidxu{
255177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp);
256177850Sdavidxu}
257177850Sdavidxu
258177850Sdavidxuint
259177850Sdavidxu__thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
260177850Sdavidxu{
261177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp);
262177850Sdavidxu}
263177850Sdavidxu
264177850Sdavidxuint
265177850Sdavidxu__thr_rwlock_unlock(struct urwlock *rwlock)
266177850Sdavidxu{
267177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
268177850Sdavidxu}
269212076Sdavidxu
270212076Sdavidxuvoid
271212076Sdavidxu_thr_rwl_rdlock(struct urwlock *rwlock)
272212076Sdavidxu{
273212076Sdavidxu	int ret;
274212076Sdavidxu
275212076Sdavidxu	for (;;) {
276212076Sdavidxu		if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
277212076Sdavidxu			return;
278212076Sdavidxu		ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
279212076Sdavidxu		if (ret == 0)
280212076Sdavidxu			return;
281212076Sdavidxu		if (ret != EINTR)
282212076Sdavidxu			PANIC("rdlock error");
283212076Sdavidxu	}
284212076Sdavidxu}
285212076Sdavidxu
286212076Sdavidxuvoid
287212076Sdavidxu_thr_rwl_wrlock(struct urwlock *rwlock)
288212076Sdavidxu{
289212076Sdavidxu	int ret;
290212076Sdavidxu
291212076Sdavidxu	for (;;) {
292212076Sdavidxu		if (_thr_rwlock_trywrlock(rwlock) == 0)
293212076Sdavidxu			return;
294212076Sdavidxu		ret = __thr_rwlock_wrlock(rwlock, NULL);
295212076Sdavidxu		if (ret == 0)
296212076Sdavidxu			return;
297212076Sdavidxu		if (ret != EINTR)
298212076Sdavidxu			PANIC("wrlock error");
299212076Sdavidxu	}
300212076Sdavidxu}
301212076Sdavidxu
302212076Sdavidxuvoid
303212076Sdavidxu_thr_rwl_unlock(struct urwlock *rwlock)
304212076Sdavidxu{
305212076Sdavidxu	if (_thr_rwlock_unlock(rwlock))
306212076Sdavidxu		PANIC("unlock error");
307212076Sdavidxu}
308