1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/sys/rwlock.h 367457 2020-11-07 18:10:59Z dim $
26 */
27
28#ifndef _SYS_RWLOCK_H_
29#define _SYS_RWLOCK_H_
30
31#include <sys/_lock.h>
32#include <sys/_rwlock.h>
33#include <sys/lock_profile.h>
34#include <sys/lockstat.h>
35
36#ifdef _KERNEL
37#include <sys/pcpu.h>
38#include <machine/atomic.h>
39#endif
40
41/*
42 * The rw_lock field consists of several fields.  The low bit indicates
43 * if the lock is locked with a read (shared) or write (exclusive) lock.
44 * A value of 0 indicates a write lock, and a value of 1 indicates a read
45 * lock.  Bit 1 is a boolean indicating if there are any threads waiting
46 * for a read lock.  Bit 2 is a boolean indicating if there are any threads
47 * waiting for a write lock.  The rest of the variable's definition is
48 * dependent on the value of the first bit.  For a write lock, it is a
49 * pointer to the thread holding the lock, similar to the mtx_lock field of
50 * mutexes.  For read locks, it is a count of read locks that are held.
51 *
52 * When the lock is not locked by any thread, it is encoded as a read lock
53 * with zero waiters.
54 */
55
56#define	RW_LOCK_READ		0x01
57#define	RW_LOCK_READ_WAITERS	0x02
58#define	RW_LOCK_WRITE_WAITERS	0x04
59#define	RW_LOCK_WRITE_SPINNER	0x08
60#define	RW_LOCK_WRITER_RECURSED	0x10
61#define	RW_LOCK_FLAGMASK						\
62	(RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS |	\
63	RW_LOCK_WRITE_SPINNER | RW_LOCK_WRITER_RECURSED)
64#define	RW_LOCK_WAITERS		(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
65
66#define	RW_OWNER(x)		((x) & ~RW_LOCK_FLAGMASK)
67#define	RW_READERS_SHIFT	5
68#define	RW_READERS(x)		(RW_OWNER((x)) >> RW_READERS_SHIFT)
69#define	RW_READERS_LOCK(x)	((x) << RW_READERS_SHIFT | RW_LOCK_READ)
70#define	RW_ONE_READER		(1 << RW_READERS_SHIFT)
71
72#define	RW_UNLOCKED		RW_READERS_LOCK(0)
73#define	RW_DESTROYED		(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
74
75#ifdef _KERNEL
76
77#define	rw_recurse	lock_object.lo_data
78
79#define	RW_READ_VALUE(x)	((x)->rw_lock)
80
81/* Very simple operations on rw_lock. */
82
83/* Try to obtain a write lock once. */
84#define	_rw_write_lock(rw, tid)						\
85	atomic_cmpset_acq_ptr(&(rw)->rw_lock, RW_UNLOCKED, (tid))
86
87#define	_rw_write_lock_fetch(rw, vp, tid)				\
88	atomic_fcmpset_acq_ptr(&(rw)->rw_lock, vp, (tid))
89
90/* Release a write lock quickly if there are no waiters. */
91#define	_rw_write_unlock(rw, tid)					\
92	atomic_cmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
93
94#define	_rw_write_unlock_fetch(rw, tid)					\
95	atomic_fcmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
96
97/*
98 * Full lock operations that are suitable to be inlined in non-debug
99 * kernels.  If the lock cannot be acquired or released trivially then
100 * the work is deferred to another function.
101 */
102
103/* Acquire a write lock. */
104#define	__rw_wlock(rw, tid, file, line) do {				\
105	uintptr_t _tid = (uintptr_t)(tid);				\
106	uintptr_t _v = RW_UNLOCKED;					\
107									\
108	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||	\
109	    !_rw_write_lock_fetch((rw), &_v, _tid)))			\
110		_rw_wlock_hard((rw), _v, (file), (line));		\
111} while (0)
112
113/* Release a write lock. */
114#define	__rw_wunlock(rw, tid, file, line) do {				\
115	uintptr_t _v = (uintptr_t)(tid);				\
116									\
117	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||	\
118	    !_rw_write_unlock_fetch((rw), &_v)))			\
119		_rw_wunlock_hard((rw), _v, (file), (line));		\
120} while (0)
121
122/*
123 * Function prototypes.  Routines that start with _ are not part of the
124 * external API and should not be called directly.  Wrapper macros should
125 * be used instead.
126 */
127void	_rw_init_flags(volatile uintptr_t *c, const char *name, int opts);
128void	_rw_destroy(volatile uintptr_t *c);
129void	rw_sysinit(void *arg);
130int	_rw_wowned(const volatile uintptr_t *c);
131void	_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line);
132int	__rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
133int	__rw_try_wlock(volatile uintptr_t *c, const char *file, int line);
134void	_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line);
135void	__rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
136void	__rw_rlock(volatile uintptr_t *c, const char *file, int line);
137int	__rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
138int	__rw_try_rlock(volatile uintptr_t *c, const char *file, int line);
139void	_rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
140void	_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line);
141void	__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v
142	    LOCK_FILE_LINE_ARG_DEF);
143void	__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v
144	    LOCK_FILE_LINE_ARG_DEF);
145int	__rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
146int	__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line);
147void	__rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
148void	__rw_downgrade(volatile uintptr_t *c, const char *file, int line);
149#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
150void	__rw_assert(const volatile uintptr_t *c, int what, const char *file,
151	    int line);
152#endif
153
154/*
155 * Top-level macros to provide lock cookie once the actual rwlock is passed.
156 * They will also prevent passing a malformed object to the rwlock KPI by
157 * failing compilation as the rw_lock reserved member will not be found.
158 */
159#define	rw_init(rw, n)							\
160	_rw_init_flags(&(rw)->rw_lock, n, 0)
161#define	rw_init_flags(rw, n, o)						\
162	_rw_init_flags(&(rw)->rw_lock, n, o)
163#define	rw_destroy(rw)							\
164	_rw_destroy(&(rw)->rw_lock)
165#define	rw_wowned(rw)							\
166	_rw_wowned(&(rw)->rw_lock)
167#define	_rw_wlock(rw, f, l)						\
168	_rw_wlock_cookie(&(rw)->rw_lock, f, l)
169#define	_rw_try_wlock(rw, f, l)						\
170	__rw_try_wlock(&(rw)->rw_lock, f, l)
171#define	_rw_wunlock(rw, f, l)						\
172	_rw_wunlock_cookie(&(rw)->rw_lock, f, l)
173#define	_rw_try_rlock(rw, f, l)						\
174	__rw_try_rlock(&(rw)->rw_lock, f, l)
175#if LOCK_DEBUG > 0
176#define	_rw_rlock(rw, f, l)						\
177	__rw_rlock(&(rw)->rw_lock, f, l)
178#define	_rw_runlock(rw, f, l)						\
179	_rw_runlock_cookie(&(rw)->rw_lock, f, l)
180#else
181#define	_rw_rlock(rw, f, l)						\
182	__rw_rlock_int((struct rwlock *)rw)
183#define	_rw_runlock(rw, f, l)						\
184	_rw_runlock_cookie_int((struct rwlock *)rw)
185#endif
186#if LOCK_DEBUG > 0
187#define	_rw_wlock_hard(rw, v, f, l)					\
188	__rw_wlock_hard(&(rw)->rw_lock, v, f, l)
189#define	_rw_wunlock_hard(rw, v, f, l)					\
190	__rw_wunlock_hard(&(rw)->rw_lock, v, f, l)
191#define	_rw_try_upgrade(rw, f, l)					\
192	__rw_try_upgrade(&(rw)->rw_lock, f, l)
193#define	_rw_downgrade(rw, f, l)						\
194	__rw_downgrade(&(rw)->rw_lock, f, l)
195#else
196#define	_rw_wlock_hard(rw, v, f, l)					\
197	__rw_wlock_hard(&(rw)->rw_lock, v)
198#define	_rw_wunlock_hard(rw, v, f, l)					\
199	__rw_wunlock_hard(&(rw)->rw_lock, v)
200#define	_rw_try_upgrade(rw, f, l)					\
201	__rw_try_upgrade_int(rw)
202#define	_rw_downgrade(rw, f, l)						\
203	__rw_downgrade_int(rw)
204#endif
205#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
206#define	_rw_assert(rw, w, f, l)						\
207	__rw_assert(&(rw)->rw_lock, w, f, l)
208#endif
209
210
211/*
212 * Public interface for lock operations.
213 */
214
215#ifndef LOCK_DEBUG
216#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/rwlock.h>
217#endif
218#if LOCK_DEBUG > 0 || defined(RWLOCK_NOINLINE)
219#define	rw_wlock(rw)		_rw_wlock((rw), LOCK_FILE, LOCK_LINE)
220#define	rw_wunlock(rw)		_rw_wunlock((rw), LOCK_FILE, LOCK_LINE)
221#else
222#define	rw_wlock(rw)							\
223	__rw_wlock((rw), curthread, LOCK_FILE, LOCK_LINE)
224#define	rw_wunlock(rw)							\
225	__rw_wunlock((rw), curthread, LOCK_FILE, LOCK_LINE)
226#endif
227#define	rw_rlock(rw)		_rw_rlock((rw), LOCK_FILE, LOCK_LINE)
228#define	rw_runlock(rw)		_rw_runlock((rw), LOCK_FILE, LOCK_LINE)
229#define	rw_try_rlock(rw)	_rw_try_rlock((rw), LOCK_FILE, LOCK_LINE)
230#define	rw_try_upgrade(rw)	_rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE)
231#define	rw_try_wlock(rw)	_rw_try_wlock((rw), LOCK_FILE, LOCK_LINE)
232#define	rw_downgrade(rw)	_rw_downgrade((rw), LOCK_FILE, LOCK_LINE)
233#define	rw_unlock(rw)	do {						\
234	if (rw_wowned(rw))						\
235		rw_wunlock(rw);						\
236	else								\
237		rw_runlock(rw);						\
238} while (0)
239#define	rw_sleep(chan, rw, pri, wmesg, timo)				\
240	_sleep((chan), &(rw)->lock_object, (pri), (wmesg),		\
241	    tick_sbt * (timo), 0, C_HARDCLOCK)
242
243#define	rw_initialized(rw)	lock_initialized(&(rw)->lock_object)
244
245struct rw_args {
246	void		*ra_rw;
247	const char 	*ra_desc;
248	int		ra_flags;
249};
250
251#define	RW_SYSINIT_FLAGS(name, rw, desc, flags)				\
252	static struct rw_args name##_args = {				\
253		(rw),							\
254		(desc),							\
255		(flags),						\
256	};								\
257	SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
258	    rw_sysinit, &name##_args);					\
259	SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
260	    _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock))
261
262#define	RW_SYSINIT(name, rw, desc)	RW_SYSINIT_FLAGS(name, rw, desc, 0)
263
264/*
265 * Options passed to rw_init_flags().
266 */
267#define	RW_DUPOK	0x01
268#define	RW_NOPROFILE	0x02
269#define	RW_NOWITNESS	0x04
270#define	RW_QUIET	0x08
271#define	RW_RECURSE	0x10
272#define	RW_NEW		0x20
273
274/*
275 * The INVARIANTS-enabled rw_assert() functionality.
276 *
277 * The constants need to be defined for INVARIANT_SUPPORT infrastructure
278 * support as _rw_assert() itself uses them and the latter implies that
279 * _rw_assert() must build.
280 */
281#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
282#define	RA_LOCKED		LA_LOCKED
283#define	RA_RLOCKED		LA_SLOCKED
284#define	RA_WLOCKED		LA_XLOCKED
285#define	RA_UNLOCKED		LA_UNLOCKED
286#define	RA_RECURSED		LA_RECURSED
287#define	RA_NOTRECURSED		LA_NOTRECURSED
288#endif
289
290#ifdef INVARIANTS
291#define	rw_assert(rw, what)	_rw_assert((rw), (what), LOCK_FILE, LOCK_LINE)
292#else
293#define	rw_assert(rw, what)
294#endif
295
296#endif /* _KERNEL */
297#endif /* !_SYS_RWLOCK_H_ */
298