thr_umtx.h revision 212076
1/*-
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_umtx.h 212076 2010-09-01 02:18:33Z davidxu $
27 */
28
29#ifndef _THR_FBSD_UMTX_H_
30#define _THR_FBSD_UMTX_H_
31
32#include <strings.h>
33#include <sys/umtx.h>
34
35#define DEFAULT_UMUTEX	{0,0, {0,0},{0,0,0,0}}
36
37int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
38int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
39	const struct timespec *timeout) __hidden;
40int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden;
41int __thr_umutex_trylock(struct umutex *mtx) __hidden;
42int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
43	uint32_t *oldceiling) __hidden;
44
45void _thr_umutex_init(struct umutex *mtx) __hidden;
46int _thr_umtx_wait(volatile long *mtx, long exp,
47	const struct timespec *timeout) __hidden;
48int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp,
49	const struct timespec *timeout, int shared) __hidden;
50int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden;
51int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
52        const struct timespec *timeout, int check_unpaking) __hidden;
53void _thr_ucond_init(struct ucond *cv) __hidden;
54int _thr_ucond_signal(struct ucond *cv) __hidden;
55int _thr_ucond_broadcast(struct ucond *cv) __hidden;
56
57int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) __hidden;
58int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) __hidden;
59int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
60
61/* Internal used only */
62void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden;
63void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden;
64void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
65
66static inline int
67_thr_umutex_trylock(struct umutex *mtx, uint32_t id)
68{
69    if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
70	return (0);
71    if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
72    	return (EBUSY);
73    return (__thr_umutex_trylock(mtx));
74}
75
76static inline int
77_thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
78{
79    if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
80	return (0);
81    if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
82        __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0))
83    	if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED))
84		return (0);
85    return (EBUSY);
86}
87
88static inline int
89_thr_umutex_lock(struct umutex *mtx, uint32_t id)
90{
91    if (_thr_umutex_trylock2(mtx, id) == 0)
92	return (0);
93    return (__thr_umutex_lock(mtx, id));
94}
95
96static inline int
97_thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
98	const struct timespec *timeout)
99{
100    if (_thr_umutex_trylock2(mtx, id) == 0)
101	return (0);
102    return (__thr_umutex_timedlock(mtx, id, timeout));
103}
104
105static inline int
106_thr_umutex_unlock(struct umutex *mtx, uint32_t id)
107{
108    if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED))
109	return (0);
110    return (__thr_umutex_unlock(mtx, id));
111}
112
113static inline int
114_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
115{
116	int32_t state;
117	int32_t wrflags;
118
119	if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER)
120		wrflags = URWLOCK_WRITE_OWNER;
121	else
122		wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
123	state = rwlock->rw_state;
124	while (!(state & wrflags)) {
125		if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS))
126			return (EAGAIN);
127		if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
128			return (0);
129		state = rwlock->rw_state;
130	}
131
132	return (EBUSY);
133}
134
135static inline int
136_thr_rwlock_trywrlock(struct urwlock *rwlock)
137{
138	int32_t state;
139
140	state = rwlock->rw_state;
141	while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
142		if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER))
143			return (0);
144		state = rwlock->rw_state;
145	}
146
147	return (EBUSY);
148}
149
150static inline int
151_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
152{
153	if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
154		return (0);
155	return (__thr_rwlock_rdlock(rwlock, flags, tsp));
156}
157
158static inline int
159_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
160{
161	if (_thr_rwlock_trywrlock(rwlock) == 0)
162		return (0);
163	return (__thr_rwlock_wrlock(rwlock, tsp));
164}
165
166static inline int
167_thr_rwlock_unlock(struct urwlock *rwlock)
168{
169	int32_t state;
170
171	state = rwlock->rw_state;
172	if (state & URWLOCK_WRITE_OWNER) {
173		if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0))
174			return (0);
175	} else {
176		for (;;) {
177			if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
178				return (EPERM);
179			if (!((state & (URWLOCK_WRITE_WAITERS |
180			    URWLOCK_READ_WAITERS)) &&
181			    URWLOCK_READER_COUNT(state) == 1)) {
182				if (atomic_cmpset_rel_32(&rwlock->rw_state,
183				    state, state-1))
184					return (0);
185				state = rwlock->rw_state;
186			} else {
187				break;
188			}
189		}
190    	}
191    	return (__thr_rwlock_unlock(rwlock));
192}
193#endif
194