1144518Sdavidxu/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3144518Sdavidxu * All rights reserved.
4144518Sdavidxu *
5144518Sdavidxu * Redistribution and use in source and binary forms, with or without
6144518Sdavidxu * modification, are permitted provided that the following conditions
7144518Sdavidxu * are met:
8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright
12144518Sdavidxu *    notice, this list of conditions and the following disclaimer in the
13144518Sdavidxu *    documentation and/or other materials provided with the distribution.
14144518Sdavidxu *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25144518Sdavidxu *
26144518Sdavidxu * $FreeBSD$
27144518Sdavidxu *
28144518Sdavidxu */
29144518Sdavidxu
30144518Sdavidxu#include "thr_private.h"
31144518Sdavidxu#include "thr_umtx.h"
32144518Sdavidxu
33177853Sdavidxu#ifndef HAS__UMTX_OP_ERR
34177853Sdavidxuint _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
35177853Sdavidxu{
36177853Sdavidxu	if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
37177853Sdavidxu		return (errno);
38177853Sdavidxu	return (0);
39177853Sdavidxu}
40177853Sdavidxu#endif
41177853Sdavidxu
42163334Sdavidxuvoid
43163334Sdavidxu_thr_umutex_init(struct umutex *mtx)
44163334Sdavidxu{
45163334Sdavidxu	static struct umutex default_mtx = DEFAULT_UMUTEX;
46163334Sdavidxu
47163334Sdavidxu	*mtx = default_mtx;
48163334Sdavidxu}
49163334Sdavidxu
50212077Sdavidxuvoid
51212077Sdavidxu_thr_urwlock_init(struct urwlock *rwl)
52212077Sdavidxu{
53212077Sdavidxu	static struct urwlock default_rwl = DEFAULT_URWLOCK;
54212077Sdavidxu	*rwl = default_rwl;
55212077Sdavidxu}
56212077Sdavidxu
57144518Sdavidxuint
58179970Sdavidxu__thr_umutex_lock(struct umutex *mtx, uint32_t id)
59161680Sdavidxu{
60179970Sdavidxu	uint32_t owner;
61179970Sdavidxu
62179970Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
63179970Sdavidxu		for (;;) {
64179970Sdavidxu			/* wait in kernel */
65179970Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
66179970Sdavidxu
67179970Sdavidxu			owner = mtx->m_owner;
68179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
69179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
70179970Sdavidxu				return (0);
71179970Sdavidxu		}
72179970Sdavidxu	}
73179970Sdavidxu
74179970Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
75161680Sdavidxu}
76161680Sdavidxu
77216641Sdavidxu#define SPINLOOPS 1000
78216641Sdavidxu
79161680Sdavidxuint
80216641Sdavidxu__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
81216641Sdavidxu{
82216641Sdavidxu	uint32_t owner;
83216641Sdavidxu
84216641Sdavidxu	if (!_thr_is_smp)
85216641Sdavidxu		return __thr_umutex_lock(mtx, id);
86216641Sdavidxu
87216641Sdavidxu	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
88216641Sdavidxu		for (;;) {
89216641Sdavidxu			int count = SPINLOOPS;
90216641Sdavidxu			while (count--) {
91216641Sdavidxu				owner = mtx->m_owner;
92216641Sdavidxu				if ((owner & ~UMUTEX_CONTESTED) == 0) {
93216641Sdavidxu					if (atomic_cmpset_acq_32(
94216641Sdavidxu					    &mtx->m_owner,
95216641Sdavidxu					    owner, id|owner)) {
96216641Sdavidxu						return (0);
97216641Sdavidxu					}
98216641Sdavidxu				}
99216641Sdavidxu				CPU_SPINWAIT;
100216641Sdavidxu			}
101216641Sdavidxu
102216641Sdavidxu			/* wait in kernel */
103216641Sdavidxu			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
104216641Sdavidxu		}
105216641Sdavidxu	}
106216641Sdavidxu
107216641Sdavidxu	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
108216641Sdavidxu}
109216641Sdavidxu
110216641Sdavidxuint
111179970Sdavidxu__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
112179970Sdavidxu	const struct timespec *ets)
113161680Sdavidxu{
114179970Sdavidxu	struct timespec timo, cts;
115179970Sdavidxu	uint32_t owner;
116179970Sdavidxu	int ret;
117179970Sdavidxu
118179970Sdavidxu	clock_gettime(CLOCK_REALTIME, &cts);
119179970Sdavidxu	TIMESPEC_SUB(&timo, ets, &cts);
120179970Sdavidxu
121179970Sdavidxu	if (timo.tv_sec < 0)
122161680Sdavidxu		return (ETIMEDOUT);
123179970Sdavidxu
124179970Sdavidxu	for (;;) {
125179970Sdavidxu		if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
126179970Sdavidxu
127179970Sdavidxu			/* wait in kernel */
128179970Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo);
129179970Sdavidxu
130179970Sdavidxu			/* now try to lock it */
131179970Sdavidxu			owner = mtx->m_owner;
132179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
133179970Sdavidxu			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
134179970Sdavidxu				return (0);
135179970Sdavidxu		} else {
136179970Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo);
137179970Sdavidxu			if (ret == 0)
138179970Sdavidxu				break;
139179970Sdavidxu		}
140179970Sdavidxu		if (ret == ETIMEDOUT)
141179970Sdavidxu			break;
142179970Sdavidxu		clock_gettime(CLOCK_REALTIME, &cts);
143179970Sdavidxu		TIMESPEC_SUB(&timo, ets, &cts);
144179970Sdavidxu		if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) {
145179970Sdavidxu			ret = ETIMEDOUT;
146179970Sdavidxu			break;
147179970Sdavidxu		}
148179970Sdavidxu	}
149179970Sdavidxu	return (ret);
150161680Sdavidxu}
151161680Sdavidxu
152161680Sdavidxuint
153179970Sdavidxu__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
154161680Sdavidxu{
155234372Sdavidxu	static int wake2_avail = 0;
156234372Sdavidxu
157234372Sdavidxu	if (__predict_false(wake2_avail == 0)) {
158234372Sdavidxu		struct umutex test = DEFAULT_UMUTEX;
159234372Sdavidxu
160234372Sdavidxu		if (_umtx_op(&test, UMTX_OP_MUTEX_WAKE2, test.m_flags, 0, 0) == -1)
161234372Sdavidxu			wake2_avail = -1;
162234372Sdavidxu		else
163234372Sdavidxu			wake2_avail = 1;
164179970Sdavidxu	}
165234372Sdavidxu
166234372Sdavidxu	if (wake2_avail != 1)
167234372Sdavidxu		goto unlock;
168234372Sdavidxu
169234372Sdavidxu	uint32_t flags = mtx->m_flags;
170234372Sdavidxu
171234372Sdavidxu	if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
172234372Sdavidxu		uint32_t owner;
173234372Sdavidxu		do {
174234372Sdavidxu			owner = mtx->m_owner;
175234372Sdavidxu			if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
176234372Sdavidxu				return (EPERM);
177234372Sdavidxu		} while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner,
178234372Sdavidxu					 owner, UMUTEX_UNOWNED)));
179234372Sdavidxu		if ((owner & UMUTEX_CONTESTED))
180234372Sdavidxu			(void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0);
181234372Sdavidxu		return (0);
182234372Sdavidxu	}
183234372Sdavidxuunlock:
184177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
185161680Sdavidxu}
186161680Sdavidxu
187161680Sdavidxuint
188163334Sdavidxu__thr_umutex_trylock(struct umutex *mtx)
189161680Sdavidxu{
190177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
191161680Sdavidxu}
192161680Sdavidxu
193161680Sdavidxuint
194161680Sdavidxu__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
195161680Sdavidxu	uint32_t *oldceiling)
196161680Sdavidxu{
197177853Sdavidxu	return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
198161680Sdavidxu}
199161680Sdavidxu
200161680Sdavidxuint
201173801Sdavidxu_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
202144518Sdavidxu{
203144518Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
204144518Sdavidxu		timeout->tv_nsec <= 0)))
205144518Sdavidxu		return (ETIMEDOUT);
206177853Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
207177853Sdavidxu		__DECONST(void*, timeout));
208144518Sdavidxu}
209144518Sdavidxu
210144518Sdavidxuint
211178647Sdavidxu_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
212144518Sdavidxu{
213173801Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
214173801Sdavidxu		timeout->tv_nsec <= 0)))
215173801Sdavidxu		return (ETIMEDOUT);
216178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
217178647Sdavidxu			shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
218178647Sdavidxu			__DECONST(void*, timeout));
219173801Sdavidxu}
220173801Sdavidxu
221173801Sdavidxuint
222216641Sdavidxu_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
223216641Sdavidxu	const struct timespec *abstime, int shared)
224216641Sdavidxu{
225216641Sdavidxu	struct timespec ts, ts2, *tsp;
226216641Sdavidxu
227216641Sdavidxu	if (abstime != NULL) {
228216641Sdavidxu		clock_gettime(clockid, &ts);
229216641Sdavidxu		TIMESPEC_SUB(&ts2, abstime, &ts);
230231996Sdavidxu		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
231216641Sdavidxu			return (ETIMEDOUT);
232216641Sdavidxu		tsp = &ts2;
233216641Sdavidxu	} else {
234216641Sdavidxu		tsp = NULL;
235216641Sdavidxu	}
236216641Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx),
237216641Sdavidxu		shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, NULL,
238216641Sdavidxu			tsp);
239216641Sdavidxu}
240216641Sdavidxu
241216641Sdavidxuint
242178647Sdavidxu_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
243173801Sdavidxu{
244178647Sdavidxu	return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
245177853Sdavidxu		nr_wakeup, 0, 0);
246144518Sdavidxu}
247164877Sdavidxu
248164902Sdavidxuvoid
249164902Sdavidxu_thr_ucond_init(struct ucond *cv)
250164902Sdavidxu{
251164902Sdavidxu	bzero(cv, sizeof(struct ucond));
252164902Sdavidxu}
253164902Sdavidxu
254164877Sdavidxuint
255164877Sdavidxu_thr_ucond_wait(struct ucond *cv, struct umutex *m,
256227853Sdavidxu	const struct timespec *timeout, int flags)
257164877Sdavidxu{
258164877Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
259164877Sdavidxu	    timeout->tv_nsec <= 0))) {
260179970Sdavidxu		struct pthread *curthread = _get_curthread();
261179970Sdavidxu		_thr_umutex_unlock(m, TID(curthread));
262164877Sdavidxu                return (ETIMEDOUT);
263164877Sdavidxu	}
264227853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
265177853Sdavidxu		     m, __DECONST(void*, timeout));
266164877Sdavidxu}
267164877Sdavidxu
268164877Sdavidxuint
269164877Sdavidxu_thr_ucond_signal(struct ucond *cv)
270164877Sdavidxu{
271165110Sdavidxu	if (!cv->c_has_waiters)
272165110Sdavidxu		return (0);
273177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
274164877Sdavidxu}
275164877Sdavidxu
276164877Sdavidxuint
277164877Sdavidxu_thr_ucond_broadcast(struct ucond *cv)
278164877Sdavidxu{
279165110Sdavidxu	if (!cv->c_has_waiters)
280165110Sdavidxu		return (0);
281177853Sdavidxu	return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
282164877Sdavidxu}
283177850Sdavidxu
284177850Sdavidxuint
285177850Sdavidxu__thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
286177850Sdavidxu{
287177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp);
288177850Sdavidxu}
289177850Sdavidxu
290177850Sdavidxuint
291177850Sdavidxu__thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
292177850Sdavidxu{
293177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp);
294177850Sdavidxu}
295177850Sdavidxu
296177850Sdavidxuint
297177850Sdavidxu__thr_rwlock_unlock(struct urwlock *rwlock)
298177850Sdavidxu{
299177853Sdavidxu	return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
300177850Sdavidxu}
301212076Sdavidxu
302212076Sdavidxuvoid
303212076Sdavidxu_thr_rwl_rdlock(struct urwlock *rwlock)
304212076Sdavidxu{
305212076Sdavidxu	int ret;
306212076Sdavidxu
307212076Sdavidxu	for (;;) {
308212076Sdavidxu		if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
309212076Sdavidxu			return;
310212076Sdavidxu		ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
311212076Sdavidxu		if (ret == 0)
312212076Sdavidxu			return;
313212076Sdavidxu		if (ret != EINTR)
314212076Sdavidxu			PANIC("rdlock error");
315212076Sdavidxu	}
316212076Sdavidxu}
317212076Sdavidxu
318212076Sdavidxuvoid
319212076Sdavidxu_thr_rwl_wrlock(struct urwlock *rwlock)
320212076Sdavidxu{
321212076Sdavidxu	int ret;
322212076Sdavidxu
323212076Sdavidxu	for (;;) {
324212076Sdavidxu		if (_thr_rwlock_trywrlock(rwlock) == 0)
325212076Sdavidxu			return;
326212076Sdavidxu		ret = __thr_rwlock_wrlock(rwlock, NULL);
327212076Sdavidxu		if (ret == 0)
328212076Sdavidxu			return;
329212076Sdavidxu		if (ret != EINTR)
330212076Sdavidxu			PANIC("wrlock error");
331212076Sdavidxu	}
332212076Sdavidxu}
333212076Sdavidxu
334212076Sdavidxuvoid
335212076Sdavidxu_thr_rwl_unlock(struct urwlock *rwlock)
336212076Sdavidxu{
337212076Sdavidxu	if (_thr_rwlock_unlock(rwlock))
338212076Sdavidxu		PANIC("unlock error");
339212076Sdavidxu}
340