1331722Seadler/*
2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3144518Sdavidxu * All rights reserved.
4144518Sdavidxu *
5144518Sdavidxu * Redistribution and use in source and binary forms, with or without
6144518Sdavidxu * modification, are permitted provided that the following conditions
7144518Sdavidxu * are met:
8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright
9144518Sdavidxu *    notice unmodified, this list of conditions, and the following
10144518Sdavidxu *    disclaimer.
11144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright
12144518Sdavidxu *    notice, this list of conditions and the following disclaimer in the
13144518Sdavidxu *    documentation and/or other materials provided with the distribution.
14144518Sdavidxu *
15144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25144518Sdavidxu */
26144518Sdavidxu
27297706Skib#include <sys/cdefs.h>
28297706Skib__FBSDID("$FreeBSD: stable/11/lib/libthr/thread/thr_umtx.c 319430 2017-06-01 14:49:53Z vangyzen $");
29297706Skib
30144518Sdavidxu#include "thr_private.h"
31144518Sdavidxu#include "thr_umtx.h"
32144518Sdavidxu
33177853Sdavidxu#ifndef HAS__UMTX_OP_ERR
34177853Sdavidxuint _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
35177853Sdavidxu{
36300043Skib
37177853Sdavidxu	if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
38177853Sdavidxu		return (errno);
39177853Sdavidxu	return (0);
40177853Sdavidxu}
41177853Sdavidxu#endif
42177853Sdavidxu
43163334Sdavidxuvoid
44163334Sdavidxu_thr_umutex_init(struct umutex *mtx)
45163334Sdavidxu{
46293858Svangyzen	static const struct umutex default_mtx = DEFAULT_UMUTEX;
47163334Sdavidxu
48163334Sdavidxu	*mtx = default_mtx;
49163334Sdavidxu}
50163334Sdavidxu
51212077Sdavidxuvoid
52212077Sdavidxu_thr_urwlock_init(struct urwlock *rwl)
53212077Sdavidxu{
54293858Svangyzen	static const struct urwlock default_rwl = DEFAULT_URWLOCK;
55293858Svangyzen
56212077Sdavidxu	*rwl = default_rwl;
57212077Sdavidxu}
58212077Sdavidxu
59144518Sdavidxuint
60179970Sdavidxu__thr_umutex_lock(struct umutex *mtx, uint32_t id)
61161680Sdavidxu{
62179970Sdavidxu	uint32_t owner;
63179970Sdavidxu
64300043Skib	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
65300043Skib		return	(_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
66179970Sdavidxu
67300043Skib	for (;;) {
68300043Skib		owner = mtx->m_owner;
69300043Skib		if ((owner & ~UMUTEX_CONTESTED) == 0 &&
70300043Skib		     atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner))
71300043Skib			return (0);
72300043Skib		if (owner == UMUTEX_RB_OWNERDEAD &&
73300043Skib		     atomic_cmpset_acq_32(&mtx->m_owner, owner,
74300043Skib		     id | UMUTEX_CONTESTED))
75300043Skib			return (EOWNERDEAD);
76300043Skib		if (owner == UMUTEX_RB_NOTRECOV)
77300043Skib			return (ENOTRECOVERABLE);
78300043Skib
79300043Skib		/* wait in kernel */
80300043Skib		_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
81179970Sdavidxu	}
82161680Sdavidxu}
83161680Sdavidxu
84216641Sdavidxu#define SPINLOOPS 1000
85216641Sdavidxu
86161680Sdavidxuint
87216641Sdavidxu__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
88216641Sdavidxu{
89216641Sdavidxu	uint32_t owner;
90300043Skib	int count;
91216641Sdavidxu
92216641Sdavidxu	if (!_thr_is_smp)
93300043Skib		return (__thr_umutex_lock(mtx, id));
94300043Skib	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
95300043Skib		return	(_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
96216641Sdavidxu
97300043Skib	for (;;) {
98300043Skib		count = SPINLOOPS;
99300043Skib		while (count--) {
100300043Skib			owner = mtx->m_owner;
101300043Skib			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
102300043Skib			    atomic_cmpset_acq_32(&mtx->m_owner, owner,
103300043Skib			    id | owner))
104300043Skib				return (0);
105300043Skib			if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
106300043Skib			    atomic_cmpset_acq_32(&mtx->m_owner, owner,
107300043Skib			    id | UMUTEX_CONTESTED))
108300043Skib				return (EOWNERDEAD);
109300043Skib			if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
110300043Skib				return (ENOTRECOVERABLE);
111300043Skib			CPU_SPINWAIT;
112300043Skib		}
113216641Sdavidxu
114300043Skib		/* wait in kernel */
115300043Skib		_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
116216641Sdavidxu	}
117216641Sdavidxu}
118216641Sdavidxu
119216641Sdavidxuint
120179970Sdavidxu__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
121232144Sdavidxu	const struct timespec *abstime)
122161680Sdavidxu{
123232144Sdavidxu	struct _umtx_time *tm_p, timeout;
124232144Sdavidxu	size_t tm_size;
125179970Sdavidxu	uint32_t owner;
126179970Sdavidxu	int ret;
127179970Sdavidxu
128232144Sdavidxu	if (abstime == NULL) {
129232144Sdavidxu		tm_p = NULL;
130232144Sdavidxu		tm_size = 0;
131232144Sdavidxu	} else {
132232144Sdavidxu		timeout._clockid = CLOCK_REALTIME;
133232144Sdavidxu		timeout._flags = UMTX_ABSTIME;
134232144Sdavidxu		timeout._timeout = *abstime;
135232144Sdavidxu		tm_p = &timeout;
136232144Sdavidxu		tm_size = sizeof(timeout);
137232144Sdavidxu	}
138179970Sdavidxu
139179970Sdavidxu	for (;;) {
140300043Skib		if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
141300043Skib		    UMUTEX_PRIO_INHERIT)) == 0) {
142300043Skib			/* try to lock it */
143179970Sdavidxu			owner = mtx->m_owner;
144179970Sdavidxu			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
145300043Skib			     atomic_cmpset_acq_32(&mtx->m_owner, owner,
146300043Skib			     id | owner))
147179970Sdavidxu				return (0);
148300043Skib			if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
149300043Skib			     atomic_cmpset_acq_32(&mtx->m_owner, owner,
150300043Skib			     id | UMUTEX_CONTESTED))
151300043Skib				return (EOWNERDEAD);
152300043Skib			if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
153300043Skib				return (ENOTRECOVERABLE);
154300043Skib			/* wait in kernel */
155300043Skib			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
156300043Skib			    (void *)tm_size, __DECONST(void *, tm_p));
157179970Sdavidxu		} else {
158232144Sdavidxu			ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
159300043Skib			    (void *)tm_size, __DECONST(void *, tm_p));
160300043Skib			if (ret == 0 || ret == EOWNERDEAD ||
161300043Skib			    ret == ENOTRECOVERABLE)
162179970Sdavidxu				break;
163179970Sdavidxu		}
164179970Sdavidxu		if (ret == ETIMEDOUT)
165179970Sdavidxu			break;
166179970Sdavidxu	}
167179970Sdavidxu	return (ret);
168161680Sdavidxu}
169161680Sdavidxu
170161680Sdavidxuint
171319430Svangyzen__thr_umutex_unlock(struct umutex *mtx)
172161680Sdavidxu{
173300043Skib
174300043Skib	return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0));
175161680Sdavidxu}
176161680Sdavidxu
177161680Sdavidxuint
178163334Sdavidxu__thr_umutex_trylock(struct umutex *mtx)
179161680Sdavidxu{
180300043Skib
181300043Skib	return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0));
182161680Sdavidxu}
183161680Sdavidxu
184161680Sdavidxuint
185161680Sdavidxu__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
186300043Skib    uint32_t *oldceiling)
187161680Sdavidxu{
188300043Skib
189300043Skib	return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0));
190161680Sdavidxu}
191161680Sdavidxu
192161680Sdavidxuint
193173801Sdavidxu_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
194144518Sdavidxu{
195300043Skib
196144518Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
197300043Skib	    timeout->tv_nsec <= 0)))
198144518Sdavidxu		return (ETIMEDOUT);
199300043Skib	return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
200300043Skib	    __DECONST(void*, timeout)));
201144518Sdavidxu}
202144518Sdavidxu
203144518Sdavidxuint
204300043Skib_thr_umtx_wait_uint(volatile u_int *mtx, u_int id,
205300043Skib    const struct timespec *timeout, int shared)
206144518Sdavidxu{
207300043Skib
208173801Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
209300043Skib	    timeout->tv_nsec <= 0)))
210173801Sdavidxu		return (ETIMEDOUT);
211300043Skib	return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
212300043Skib	    UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
213300043Skib	    __DECONST(void*, timeout)));
214173801Sdavidxu}
215173801Sdavidxu
216173801Sdavidxuint
217216641Sdavidxu_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
218300043Skib    const struct timespec *abstime, int shared)
219216641Sdavidxu{
220232144Sdavidxu	struct _umtx_time *tm_p, timeout;
221232144Sdavidxu	size_t tm_size;
222232144Sdavidxu
223232144Sdavidxu	if (abstime == NULL) {
224232144Sdavidxu		tm_p = NULL;
225232144Sdavidxu		tm_size = 0;
226232144Sdavidxu	} else {
227233134Sdavidxu		timeout._clockid = clockid;
228232144Sdavidxu		timeout._flags = UMTX_ABSTIME;
229232144Sdavidxu		timeout._timeout = *abstime;
230232144Sdavidxu		tm_p = &timeout;
231232144Sdavidxu		tm_size = sizeof(timeout);
232232144Sdavidxu	}
233232144Sdavidxu
234300043Skib	return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
235300043Skib	    UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
236300043Skib	    (void *)tm_size, __DECONST(void *, tm_p)));
237216641Sdavidxu}
238216641Sdavidxu
239216641Sdavidxuint
240178647Sdavidxu_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
241173801Sdavidxu{
242300043Skib
243300043Skib	return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
244300043Skib	    UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0));
245144518Sdavidxu}
246164877Sdavidxu
247164902Sdavidxuvoid
248164902Sdavidxu_thr_ucond_init(struct ucond *cv)
249164902Sdavidxu{
250300043Skib
251164902Sdavidxu	bzero(cv, sizeof(struct ucond));
252164902Sdavidxu}
253164902Sdavidxu
254164877Sdavidxuint
255164877Sdavidxu_thr_ucond_wait(struct ucond *cv, struct umutex *m,
256227604Sdavidxu	const struct timespec *timeout, int flags)
257164877Sdavidxu{
258300043Skib	struct pthread *curthread;
259300043Skib
260164877Sdavidxu	if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
261164877Sdavidxu	    timeout->tv_nsec <= 0))) {
262300043Skib		curthread = _get_curthread();
263179970Sdavidxu		_thr_umutex_unlock(m, TID(curthread));
264164877Sdavidxu                return (ETIMEDOUT);
265164877Sdavidxu	}
266300043Skib	return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m,
267300043Skib	    __DECONST(void*, timeout)));
268164877Sdavidxu}
269164877Sdavidxu
270164877Sdavidxuint
271164877Sdavidxu_thr_ucond_signal(struct ucond *cv)
272164877Sdavidxu{
273300043Skib
274165110Sdavidxu	if (!cv->c_has_waiters)
275165110Sdavidxu		return (0);
276300043Skib	return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL));
277164877Sdavidxu}
278164877Sdavidxu
279164877Sdavidxuint
280164877Sdavidxu_thr_ucond_broadcast(struct ucond *cv)
281164877Sdavidxu{
282300043Skib
283165110Sdavidxu	if (!cv->c_has_waiters)
284165110Sdavidxu		return (0);
285300043Skib	return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL));
286164877Sdavidxu}
287177850Sdavidxu
288177850Sdavidxuint
289232209Sdavidxu__thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
290232209Sdavidxu	const struct timespec *tsp)
291177850Sdavidxu{
292232209Sdavidxu	struct _umtx_time timeout, *tm_p;
293232209Sdavidxu	size_t tm_size;
294232209Sdavidxu
295232209Sdavidxu	if (tsp == NULL) {
296232209Sdavidxu		tm_p = NULL;
297232209Sdavidxu		tm_size = 0;
298232209Sdavidxu	} else {
299232209Sdavidxu		timeout._timeout = *tsp;
300232209Sdavidxu		timeout._flags = UMTX_ABSTIME;
301232209Sdavidxu		timeout._clockid = CLOCK_REALTIME;
302232209Sdavidxu		tm_p = &timeout;
303232209Sdavidxu		tm_size = sizeof(timeout);
304232209Sdavidxu	}
305300043Skib	return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags,
306300043Skib	    (void *)tm_size, tm_p));
307177850Sdavidxu}
308177850Sdavidxu
309177850Sdavidxuint
310232209Sdavidxu__thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
311177850Sdavidxu{
312232209Sdavidxu	struct _umtx_time timeout, *tm_p;
313232209Sdavidxu	size_t tm_size;
314232209Sdavidxu
315232209Sdavidxu	if (tsp == NULL) {
316232209Sdavidxu		tm_p = NULL;
317232209Sdavidxu		tm_size = 0;
318232209Sdavidxu	} else {
319232209Sdavidxu		timeout._timeout = *tsp;
320232209Sdavidxu		timeout._flags = UMTX_ABSTIME;
321232209Sdavidxu		timeout._clockid = CLOCK_REALTIME;
322232209Sdavidxu		tm_p = &timeout;
323232209Sdavidxu		tm_size = sizeof(timeout);
324232209Sdavidxu	}
325300043Skib	return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size,
326300043Skib	    tm_p));
327177850Sdavidxu}
328177850Sdavidxu
329177850Sdavidxuint
330177850Sdavidxu__thr_rwlock_unlock(struct urwlock *rwlock)
331177850Sdavidxu{
332300043Skib
333300043Skib	return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL));
334177850Sdavidxu}
335212076Sdavidxu
336212076Sdavidxuvoid
337212076Sdavidxu_thr_rwl_rdlock(struct urwlock *rwlock)
338212076Sdavidxu{
339212076Sdavidxu	int ret;
340212076Sdavidxu
341212076Sdavidxu	for (;;) {
342212076Sdavidxu		if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
343212076Sdavidxu			return;
344212076Sdavidxu		ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
345212076Sdavidxu		if (ret == 0)
346212076Sdavidxu			return;
347212076Sdavidxu		if (ret != EINTR)
348212076Sdavidxu			PANIC("rdlock error");
349212076Sdavidxu	}
350212076Sdavidxu}
351212076Sdavidxu
352212076Sdavidxuvoid
353212076Sdavidxu_thr_rwl_wrlock(struct urwlock *rwlock)
354212076Sdavidxu{
355212076Sdavidxu	int ret;
356212076Sdavidxu
357212076Sdavidxu	for (;;) {
358212076Sdavidxu		if (_thr_rwlock_trywrlock(rwlock) == 0)
359212076Sdavidxu			return;
360212076Sdavidxu		ret = __thr_rwlock_wrlock(rwlock, NULL);
361212076Sdavidxu		if (ret == 0)
362212076Sdavidxu			return;
363212076Sdavidxu		if (ret != EINTR)
364212076Sdavidxu			PANIC("wrlock error");
365212076Sdavidxu	}
366212076Sdavidxu}
367212076Sdavidxu
368212076Sdavidxuvoid
369212076Sdavidxu_thr_rwl_unlock(struct urwlock *rwlock)
370212076Sdavidxu{
371300043Skib
372212076Sdavidxu	if (_thr_rwlock_unlock(rwlock))
373212076Sdavidxu		PANIC("unlock error");
374212076Sdavidxu}
375