thr_umtx.h revision 212077
1/*-
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_umtx.h 212077 2010-09-01 03:11:21Z davidxu $
27 */
28
29#ifndef _THR_FBSD_UMTX_H_
30#define _THR_FBSD_UMTX_H_
31
32#include <strings.h>
33#include <sys/umtx.h>
34
35#define DEFAULT_UMUTEX	{0,0,{0,0},{0,0,0,0}}
36#define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}}
37
38int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
39int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
40	const struct timespec *timeout) __hidden;
41int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden;
42int __thr_umutex_trylock(struct umutex *mtx) __hidden;
43int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
44	uint32_t *oldceiling) __hidden;
45
46void _thr_umutex_init(struct umutex *mtx) __hidden;
47void _thr_urwlock_init(struct urwlock *rwl) __hidden;
48
49int _thr_umtx_wait(volatile long *mtx, long exp,
50	const struct timespec *timeout) __hidden;
51int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp,
52	const struct timespec *timeout, int shared) __hidden;
53int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden;
54int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
55        const struct timespec *timeout, int check_unpaking) __hidden;
56void _thr_ucond_init(struct ucond *cv) __hidden;
57int _thr_ucond_signal(struct ucond *cv) __hidden;
58int _thr_ucond_broadcast(struct ucond *cv) __hidden;
59
60int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) __hidden;
61int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) __hidden;
62int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
63
64/* Internal used only */
65void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden;
66void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden;
67void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
68
69static inline int
70_thr_umutex_trylock(struct umutex *mtx, uint32_t id)
71{
72    if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
73	return (0);
74    if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
75    	return (EBUSY);
76    return (__thr_umutex_trylock(mtx));
77}
78
79static inline int
80_thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
81{
82    if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
83	return (0);
84    if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
85        __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0))
86    	if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED))
87		return (0);
88    return (EBUSY);
89}
90
91static inline int
92_thr_umutex_lock(struct umutex *mtx, uint32_t id)
93{
94    if (_thr_umutex_trylock2(mtx, id) == 0)
95	return (0);
96    return (__thr_umutex_lock(mtx, id));
97}
98
99static inline int
100_thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
101	const struct timespec *timeout)
102{
103    if (_thr_umutex_trylock2(mtx, id) == 0)
104	return (0);
105    return (__thr_umutex_timedlock(mtx, id, timeout));
106}
107
108static inline int
109_thr_umutex_unlock(struct umutex *mtx, uint32_t id)
110{
111    if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED))
112	return (0);
113    return (__thr_umutex_unlock(mtx, id));
114}
115
116static inline int
117_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
118{
119	int32_t state;
120	int32_t wrflags;
121
122	if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER)
123		wrflags = URWLOCK_WRITE_OWNER;
124	else
125		wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
126	state = rwlock->rw_state;
127	while (!(state & wrflags)) {
128		if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS))
129			return (EAGAIN);
130		if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
131			return (0);
132		state = rwlock->rw_state;
133	}
134
135	return (EBUSY);
136}
137
138static inline int
139_thr_rwlock_trywrlock(struct urwlock *rwlock)
140{
141	int32_t state;
142
143	state = rwlock->rw_state;
144	while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
145		if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER))
146			return (0);
147		state = rwlock->rw_state;
148	}
149
150	return (EBUSY);
151}
152
153static inline int
154_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
155{
156	if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
157		return (0);
158	return (__thr_rwlock_rdlock(rwlock, flags, tsp));
159}
160
161static inline int
162_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
163{
164	if (_thr_rwlock_trywrlock(rwlock) == 0)
165		return (0);
166	return (__thr_rwlock_wrlock(rwlock, tsp));
167}
168
169static inline int
170_thr_rwlock_unlock(struct urwlock *rwlock)
171{
172	int32_t state;
173
174	state = rwlock->rw_state;
175	if (state & URWLOCK_WRITE_OWNER) {
176		if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0))
177			return (0);
178	} else {
179		for (;;) {
180			if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
181				return (EPERM);
182			if (!((state & (URWLOCK_WRITE_WAITERS |
183			    URWLOCK_READ_WAITERS)) &&
184			    URWLOCK_READER_COUNT(state) == 1)) {
185				if (atomic_cmpset_rel_32(&rwlock->rw_state,
186				    state, state-1))
187					return (0);
188				state = rwlock->rw_state;
189			} else {
190				break;
191			}
192		}
193    	}
194    	return (__thr_rwlock_unlock(rwlock));
195}
196#endif
197