thr_umtx.c revision 212076
1144518Sdavidxu/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3144518Sdavidxu * All rights reserved.
4144518Sdavidxu *
5144518Sdavidxu * Redistribution and use in source and binary forms, with or without
6144518Sdavidxu * modification, are permitted provided that the following conditions
7144518Sdavidxu * are met:
8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright
12144518Sdavidxu *    notice, this list of conditions and the following disclaimer in the
13144518Sdavidxu *    documentation and/or other materials provided with the distribution.
14144518Sdavidxu *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25144518Sdavidxu *
26144518Sdavidxu * $FreeBSD: head/lib/libthr/thread/thr_umtx.c 212076 2010-09-01 02:18:33Z davidxu $
27144518Sdavidxu *
28144518Sdavidxu */
29144518Sdavidxu
30144518Sdavidxu#include "thr_private.h"
31144518Sdavidxu#include "thr_umtx.h"
32144518Sdavidxu
33177853Sdavidxu#ifndef HAS__UMTX_OP_ERR
34177853Sdavidxuint _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
35177853Sdavidxu{
36177853Sdavidxu	if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
37177853Sdavidxu		return (errno);
38177853Sdavidxu	return (0);
39177853Sdavidxu}
40177853Sdavidxu#endif
41177853Sdavidxu
42163334Sdavidxuvoid
43163334Sdavidxu_thr_umutex_init(struct umutex *mtx)
44163334Sdavidxu{
45163334Sdavidxu	static struct umutex default_mtx = DEFAULT_UMUTEX;
46163334Sdavidxu
47163334Sdavidxu	*mtx = default_mtx;
48163334Sdavidxu}
49163334Sdavidxu
50144518Sdavidxuint
51179970Sdavidxu__thr_umutex_lock(struct umutex *mtx, uint32_t id)
52161680Sdavidxu{
53179970Sdavidxu	uint32_t owner;
54179970Sdavidxu
55179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
56179970Sdavidxu		for (;;) {
57179970Sdavidxu			/* wait in kernel */
58179970Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
59179970Sdavidxu
60179970Sdavidxu			owner = mtx->m_owner;
61179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
62179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
63179970Sdavidxu				return (0);
64179970Sdavidxu		}
65179970Sdavidxu	}
66179970Sdavidxu
67179970Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
68161680Sdavidxu}
69161680Sdavidxu
70161680Sdavidxuint
71179970Sdavidxu__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
72179970Sdavidxu	const struct timespec *ets)
73161680Sdavidxu{
74179970Sdavidxu	struct timespec timo, cts;
75179970Sdavidxu	uint32_t owner;
76179970Sdavidxu	int ret;
77179970Sdavidxu
78179970Sdavidxu	clock_gettime(CLOCK_REALTIME, &cts);
79179970Sdavidxu	TIMESPEC_SUB(&timo, ets, &cts);
80179970Sdavidxu
81179970Sdavidxu	if (timo.tv_sec < 0)
82161680Sdavidxu		return (ETIMEDOUT);
83179970Sdavidxu
84179970Sdavidxu	for (;;) {
85179970Sdavidxu		if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
86179970Sdavidxu
87179970Sdavidxu			/* wait in kernel */
88179970Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo);
89179970Sdavidxu
90179970Sdavidxu			/* now try to lock it */
91179970Sdavidxu			owner = mtx->m_owner;
92179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
93179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
94179970Sdavidxu				return (0);
95179970Sdavidxu		} else {
96179970Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo);
97179970Sdavidxu			if (ret == 0)
98179970Sdavidxu				break;
99179970Sdavidxu		}
100179970Sdavidxu		if (ret == ETIMEDOUT)
101179970Sdavidxu			break;
102179970Sdavidxu		clock_gettime(CLOCK_REALTIME, &cts);
103179970Sdavidxu		TIMESPEC_SUB(&timo, ets, &cts);
104179970Sdavidxu		if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) {
105179970Sdavidxu			ret = ETIMEDOUT;
106179970Sdavidxu			break;
107179970Sdavidxu		}
108179970Sdavidxu	}
109179970Sdavidxu	return (ret);
110161680Sdavidxu}
111161680Sdavidxu
112161680Sdavidxuint
113179970Sdavidxu__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
114161680Sdavidxu{
115200498Smarcel#ifndef __ia64__
116200498Smarcel	/* XXX this logic has a race-condition on ia64. */
117179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
118179970Sdavidxu		atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED);
119179970Sdavidxu		return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0);
120179970Sdavidxu	}
121200498Smarcel#endif /* __ia64__ */
122177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
123161680Sdavidxu}
124161680Sdavidxu
125161680Sdavidxuint
126163334Sdavidxu__thr_umutex_trylock(struct umutex *mtx)
127161680Sdavidxu{
128177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
129161680Sdavidxu}
130161680Sdavidxu
131161680Sdavidxuint
132161680Sdavidxu__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
133161680Sdavidxu	uint32_t *oldceiling)
134161680Sdavidxu{
135177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
136161680Sdavidxu}
137161680Sdavidxu
138161680Sdavidxuint
139173801Sdavidxu_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
140144518Sdavidxu{
141144518Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
142144518Sdavidxu		timeout->tv_nsec <= 0)))
143144518Sdavidxu		return (ETIMEDOUT);
144177853Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
145177853Sdavidxu		__DECONST(void*, timeout));
146144518Sdavidxu}
147144518Sdavidxu
148144518Sdavidxuint
149178647Sdavidxu_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
150144518Sdavidxu{
151173801Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
152173801Sdavidxu		timeout->tv_nsec <= 0)))
153173801Sdavidxu		return (ETIMEDOUT);
154178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
155178647Sdavidxu			shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
156178647Sdavidxu			__DECONST(void*, timeout));
157173801Sdavidxu}
158173801Sdavidxu
159173801Sdavidxuint
160178647Sdavidxu_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
161173801Sdavidxu{
162178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
163177853Sdavidxu		nr_wakeup, 0, 0);
164144518Sdavidxu}
165164877Sdavidxu
166164902Sdavidxuvoid
167164902Sdavidxu_thr_ucond_init(struct ucond *cv)
168164902Sdavidxu{
169164902Sdavidxu	bzero(cv, sizeof(struct ucond));
170164902Sdavidxu}
171164902Sdavidxu
172164877Sdavidxuint
173164877Sdavidxu_thr_ucond_wait(struct ucond *cv, struct umutex *m,
174164877Sdavidxu	const struct timespec *timeout, int check_unparking)
175164877Sdavidxu{
176164877Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
177164877Sdavidxu	    timeout->tv_nsec <= 0))) {
178179970Sdavidxu		struct pthread *curthread = _get_curthread();
179179970Sdavidxu		_thr_umutex_unlock(m, TID(curthread));
180164877Sdavidxu                return (ETIMEDOUT);
181164877Sdavidxu	}
182177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_WAIT,
183164878Sdavidxu		     check_unparking ? UMTX_CHECK_UNPARKING : 0,
184177853Sdavidxu		     m, __DECONST(void*, timeout));
185164877Sdavidxu}
186164877Sdavidxu
187164877Sdavidxuint
188164877Sdavidxu_thr_ucond_signal(struct ucond *cv)
189164877Sdavidxu{
190165110Sdavidxu	if (!cv->c_has_waiters)
191165110Sdavidxu		return (0);
192177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
193164877Sdavidxu}
194164877Sdavidxu
195164877Sdavidxuint
196164877Sdavidxu_thr_ucond_broadcast(struct ucond *cv)
197164877Sdavidxu{
198165110Sdavidxu	if (!cv->c_has_waiters)
199165110Sdavidxu		return (0);
200177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
201164877Sdavidxu}
202177850Sdavidxu
203177850Sdavidxuint
204177850Sdavidxu__thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
205177850Sdavidxu{
206177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp);
207177850Sdavidxu}
208177850Sdavidxu
209177850Sdavidxuint
210177850Sdavidxu__thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
211177850Sdavidxu{
212177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp);
213177850Sdavidxu}
214177850Sdavidxu
215177850Sdavidxuint
216177850Sdavidxu__thr_rwlock_unlock(struct urwlock *rwlock)
217177850Sdavidxu{
218177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
219177850Sdavidxu}
220212076Sdavidxu
221212076Sdavidxuvoid
222212076Sdavidxu_thr_rwl_rdlock(struct urwlock *rwlock)
223212076Sdavidxu{
224212076Sdavidxu	int ret;
225212076Sdavidxu
226212076Sdavidxu	for (;;) {
227212076Sdavidxu		if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
228212076Sdavidxu			return;
229212076Sdavidxu		ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
230212076Sdavidxu		if (ret == 0)
231212076Sdavidxu			return;
232212076Sdavidxu		if (ret != EINTR)
233212076Sdavidxu			PANIC("rdlock error");
234212076Sdavidxu	}
235212076Sdavidxu}
236212076Sdavidxu
237212076Sdavidxuvoid
238212076Sdavidxu_thr_rwl_wrlock(struct urwlock *rwlock)
239212076Sdavidxu{
240212076Sdavidxu	int ret;
241212076Sdavidxu
242212076Sdavidxu	for (;;) {
243212076Sdavidxu		if (_thr_rwlock_trywrlock(rwlock) == 0)
244212076Sdavidxu			return;
245212076Sdavidxu		ret = __thr_rwlock_wrlock(rwlock, NULL);
246212076Sdavidxu		if (ret == 0)
247212076Sdavidxu			return;
248212076Sdavidxu		if (ret != EINTR)
249212076Sdavidxu			PANIC("wrlock error");
250212076Sdavidxu	}
251212076Sdavidxu}
252212076Sdavidxu
253212076Sdavidxuvoid
254212076Sdavidxu_thr_rwl_unlock(struct urwlock *rwlock)
255212076Sdavidxu{
256212076Sdavidxu	if (_thr_rwlock_unlock(rwlock))
257212076Sdavidxu		PANIC("unlock error");
258212076Sdavidxu}
259