sx.h revision 298981
1175164Sjhb/*-
2175164Sjhb * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3175164Sjhb * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4175164Sjhb * All rights reserved.
5175164Sjhb *
6175164Sjhb * Redistribution and use in source and binary forms, with or without
7175164Sjhb * modification, are permitted provided that the following conditions
8175164Sjhb * are met:
9175164Sjhb * 1. Redistributions of source code must retain the above copyright
10175164Sjhb *    notice(s), this list of conditions and the following disclaimer as
11175164Sjhb *    the first lines of this file unmodified other than the possible
12175164Sjhb *    addition of one or more copyright notices.
13175164Sjhb * 2. Redistributions in binary form must reproduce the above copyright
14175164Sjhb *    notice(s), this list of conditions and the following disclaimer in the
15175164Sjhb *    documentation and/or other materials provided with the distribution.
16175164Sjhb *
17175164Sjhb * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18175164Sjhb * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19175164Sjhb * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20175164Sjhb * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21175164Sjhb * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22175164Sjhb * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23175164Sjhb * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24175164Sjhb * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25175164Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26175164Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27175164Sjhb * DAMAGE.
28175164Sjhb *
29175164Sjhb * $FreeBSD: head/sys/sys/sx.h 298981 2016-05-03 15:14:17Z pfg $
30175164Sjhb */
31175164Sjhb
32175164Sjhb#ifndef	_SYS_SX_H_
33175164Sjhb#define	_SYS_SX_H_
34175164Sjhb
35175164Sjhb#include <sys/_lock.h>
36175164Sjhb#include <sys/_sx.h>
37175164Sjhb
38175164Sjhb#ifdef	_KERNEL
39175164Sjhb#include <sys/pcpu.h>
40175164Sjhb#include <sys/lock_profile.h>
41175164Sjhb#include <sys/lockstat.h>
42175164Sjhb#include <machine/atomic.h>
43175164Sjhb#endif
44175164Sjhb
45175164Sjhb/*
46175164Sjhb * In general, the sx locks and rwlocks use very similar algorithms.
47175164Sjhb * The main difference in the implementations is how threads are
48175164Sjhb * blocked when a lock is unavailable.  For this, sx locks use sleep
49175164Sjhb * queues which do not support priority propagation, and rwlocks use
50175164Sjhb * turnstiles which do.
51175164Sjhb *
52175164Sjhb * The sx_lock field consists of several fields.  The low bit
53175164Sjhb * indicates if the lock is locked with a shared or exclusive lock.  A
54175164Sjhb * value of 0 indicates an exclusive lock, and a value of 1 indicates
55175164Sjhb * a shared lock.  Bit 1 is a boolean indicating if there are any
56175164Sjhb * threads waiting for a shared lock.  Bit 2 is a boolean indicating
57175164Sjhb * if there are any threads waiting for an exclusive lock.  Bit 3 is a
58175164Sjhb * boolean indicating if an exclusive lock is recursively held.  The
59175164Sjhb * rest of the variable's definition is dependent on the value of the
60175164Sjhb * first bit.  For an exclusive lock, it is a pointer to the thread
61175164Sjhb * holding the lock, similar to the mtx_lock field of mutexes.  For
62175164Sjhb * shared locks, it is a count of read locks that are held.
63175164Sjhb *
64175164Sjhb * When the lock is not locked by any thread, it is encoded as a
65175164Sjhb * shared lock with zero waiters.
66175164Sjhb */
67175164Sjhb
68175164Sjhb#define	SX_LOCK_SHARED			0x01
69175164Sjhb#define	SX_LOCK_SHARED_WAITERS		0x02
70175164Sjhb#define	SX_LOCK_EXCLUSIVE_WAITERS	0x04
71175164Sjhb#define	SX_LOCK_RECURSED		0x08
72175164Sjhb#define	SX_LOCK_FLAGMASK						\
73175164Sjhb	(SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS |			\
74175164Sjhb	SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED)
75175164Sjhb
76175164Sjhb#define	SX_OWNER(x)			((x) & ~SX_LOCK_FLAGMASK)
77175164Sjhb#define	SX_SHARERS_SHIFT		4
78175164Sjhb#define	SX_SHARERS(x)			(SX_OWNER(x) >> SX_SHARERS_SHIFT)
79175164Sjhb#define	SX_SHARERS_LOCK(x)						\
80175164Sjhb	((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED)
81175164Sjhb#define	SX_ONE_SHARER			(1 << SX_SHARERS_SHIFT)
82175164Sjhb
83175164Sjhb#define	SX_LOCK_UNLOCKED		SX_SHARERS_LOCK(0)
84175164Sjhb#define	SX_LOCK_DESTROYED						\
85175164Sjhb	(SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)
86175164Sjhb
87175164Sjhb#ifdef _KERNEL
88175164Sjhb
89175164Sjhb#define	sx_recurse	lock_object.lo_data
90175164Sjhb
91175164Sjhb/*
92175164Sjhb * Function prototipes.  Routines that start with an underscore are not part
93175164Sjhb * of the public interface and are wrappered with a macro.
94175164Sjhb */
95175164Sjhbvoid	sx_sysinit(void *arg);
96175164Sjhb#define	sx_init(sx, desc)	sx_init_flags((sx), (desc), 0)
97175164Sjhbvoid	sx_init_flags(struct sx *sx, const char *description, int opts);
98175164Sjhbvoid	sx_destroy(struct sx *sx);
99175164Sjhbint	sx_try_slock_(struct sx *sx, const char *file, int line);
100175164Sjhbint	sx_try_xlock_(struct sx *sx, const char *file, int line);
101175164Sjhbint	sx_try_upgrade_(struct sx *sx, const char *file, int line);
102175164Sjhbvoid	sx_downgrade_(struct sx *sx, const char *file, int line);
103175164Sjhbint	_sx_slock(struct sx *sx, int opts, const char *file, int line);
104175164Sjhbint	_sx_xlock(struct sx *sx, int opts, const char *file, int line);
105175164Sjhbvoid	_sx_sunlock(struct sx *sx, const char *file, int line);
106175164Sjhbvoid	_sx_xunlock(struct sx *sx, const char *file, int line);
107175164Sjhbint	_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts,
108175164Sjhb	    const char *file, int line);
109175164Sjhbint	_sx_slock_hard(struct sx *sx, int opts, const char *file, int line);
110175164Sjhbvoid	_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
111175164Sjhb	    line);
112175164Sjhbvoid	_sx_sunlock_hard(struct sx *sx, const char *file, int line);
113175164Sjhb#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
114175164Sjhbvoid	_sx_assert(const struct sx *sx, int what, const char *file, int line);
115175164Sjhb#endif
116175164Sjhb#ifdef DDB
117175164Sjhbint	sx_chain(struct thread *td, struct thread **ownerp);
118175164Sjhb#endif
119175164Sjhb
120175164Sjhbstruct sx_args {
121175164Sjhb	struct sx 	*sa_sx;
122175164Sjhb	const char	*sa_desc;
123175164Sjhb	int		sa_flags;
124175164Sjhb};
125175164Sjhb
126175164Sjhb#define	SX_SYSINIT_FLAGS(name, sxa, desc, flags)			\
127175164Sjhb	static struct sx_args name##_args = {				\
128175164Sjhb		(sxa),							\
129175164Sjhb		(desc),							\
130175164Sjhb		(flags)							\
131175164Sjhb	};								\
132175164Sjhb	SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
133175164Sjhb	    sx_sysinit, &name##_args);					\
134175164Sjhb	SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
135175164Sjhb	    sx_destroy, (sxa))
136175164Sjhb
137175164Sjhb#define	SX_SYSINIT(name, sxa, desc)	SX_SYSINIT_FLAGS(name, sxa, desc, 0)
138175164Sjhb
139175164Sjhb/*
140175164Sjhb * Full lock operations that are suitable to be inlined in non-debug kernels.
141175164Sjhb * If the lock can't be acquired or released trivially then the work is
142175164Sjhb * deferred to 'tougher' functions.
143175164Sjhb */
144175164Sjhb
145175164Sjhb/* Acquire an exclusive lock. */
146175164Sjhbstatic __inline int
147175164Sjhb__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
148175164Sjhb    int line)
149175164Sjhb{
150175164Sjhb	uintptr_t tid = (uintptr_t)td;
151175164Sjhb	int error = 0;
152175164Sjhb
153175164Sjhb	if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
154175164Sjhb		error = _sx_xlock_hard(sx, tid, opts, file, line);
155175164Sjhb	else
156175164Sjhb		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
157175164Sjhb		    0, 0, file, line, LOCKSTAT_WRITER);
158175164Sjhb
159175164Sjhb	return (error);
160175164Sjhb}
161175164Sjhb
162175164Sjhb/* Release an exclusive lock. */
163175164Sjhbstatic __inline void
164175164Sjhb__sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
165175164Sjhb{
166175164Sjhb	uintptr_t tid = (uintptr_t)td;
167175164Sjhb
168175164Sjhb	if (sx->sx_recurse == 0)
169175164Sjhb		LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx,
170175164Sjhb		    LOCKSTAT_WRITER);
171175164Sjhb	if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
172175164Sjhb		_sx_xunlock_hard(sx, tid, file, line);
173175164Sjhb}
174175164Sjhb
175175164Sjhb/* Acquire a shared lock. */
176175164Sjhbstatic __inline int
177175164Sjhb__sx_slock(struct sx *sx, int opts, const char *file, int line)
178175164Sjhb{
179175164Sjhb	uintptr_t x = sx->sx_lock;
180175164Sjhb	int error = 0;
181175164Sjhb
182175164Sjhb	if (!(x & SX_LOCK_SHARED) ||
183175164Sjhb	    !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
184175164Sjhb		error = _sx_slock_hard(sx, opts, file, line);
185175164Sjhb	else
186175164Sjhb		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
187175164Sjhb		    0, 0, file, line, LOCKSTAT_READER);
188175164Sjhb
189175164Sjhb	return (error);
190175164Sjhb}
191175164Sjhb
192175164Sjhb/*
193175164Sjhb * Release a shared lock.  We can just drop a single shared lock so
194175164Sjhb * long as we aren't trying to drop the last shared lock when other
195175164Sjhb * threads are waiting for an exclusive lock.  This takes advantage of
196175164Sjhb * the fact that an unlocked lock is encoded as a shared lock with a
197175164Sjhb * count of 0.
198175164Sjhb */
199175164Sjhbstatic __inline void
200175164Sjhb__sx_sunlock(struct sx *sx, const char *file, int line)
201175164Sjhb{
202175164Sjhb	uintptr_t x = sx->sx_lock;
203175164Sjhb
204175164Sjhb	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
205175164Sjhb	if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
206175164Sjhb	    !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
207175164Sjhb		_sx_sunlock_hard(sx, file, line);
208175164Sjhb}
209175164Sjhb
210175164Sjhb/*
211175164Sjhb * Public interface for lock operations.
212175164Sjhb */
213175164Sjhb#ifndef LOCK_DEBUG
214175164Sjhb#error	"LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>"
215175164Sjhb#endif
216175164Sjhb#if	(LOCK_DEBUG > 0) || defined(SX_NOINLINE)
217175164Sjhb#define	sx_xlock_(sx, file, line)					\
218175164Sjhb	(void)_sx_xlock((sx), 0, (file), (line))
219175164Sjhb#define	sx_xlock_sig_(sx, file, line)					\
220175164Sjhb	_sx_xlock((sx), SX_INTERRUPTIBLE, (file), (line))
221175164Sjhb#define	sx_xunlock_(sx, file, line)					\
222175164Sjhb	_sx_xunlock((sx), (file), (line))
223175164Sjhb#define	sx_slock_(sx, file, line)					\
224175164Sjhb	(void)_sx_slock((sx), 0, (file), (line))
225175164Sjhb#define	sx_slock_sig_(sx, file, line)					\
226175164Sjhb	_sx_slock((sx), SX_INTERRUPTIBLE, (file) , (line))
227175164Sjhb#define	sx_sunlock_(sx, file, line)					\
228175164Sjhb	_sx_sunlock((sx), (file), (line))
229175164Sjhb#else
230175164Sjhb#define	sx_xlock_(sx, file, line)					\
231175164Sjhb	(void)__sx_xlock((sx), curthread, 0, (file), (line))
232175164Sjhb#define	sx_xlock_sig_(sx, file, line)					\
233175164Sjhb	__sx_xlock((sx), curthread, SX_INTERRUPTIBLE, (file), (line))
234175164Sjhb#define	sx_xunlock_(sx, file, line)					\
235175164Sjhb	__sx_xunlock((sx), curthread, (file), (line))
236175164Sjhb#define	sx_slock_(sx, file, line)					\
237175164Sjhb	(void)__sx_slock((sx), 0, (file), (line))
238175164Sjhb#define	sx_slock_sig_(sx, file, line)					\
239175164Sjhb	__sx_slock((sx), SX_INTERRUPTIBLE, (file), (line))
240175164Sjhb#define	sx_sunlock_(sx, file, line)					\
241175164Sjhb	__sx_sunlock((sx), (file), (line))
242175164Sjhb#endif	/* LOCK_DEBUG > 0 || SX_NOINLINE */
243175164Sjhb#define	sx_try_slock(sx)	sx_try_slock_((sx), LOCK_FILE, LOCK_LINE)
244175164Sjhb#define	sx_try_xlock(sx)	sx_try_xlock_((sx), LOCK_FILE, LOCK_LINE)
245175164Sjhb#define	sx_try_upgrade(sx)	sx_try_upgrade_((sx), LOCK_FILE, LOCK_LINE)
246175164Sjhb#define	sx_downgrade(sx)	sx_downgrade_((sx), LOCK_FILE, LOCK_LINE)
247175164Sjhb#ifdef INVARIANTS
248175164Sjhb#define	sx_assert_(sx, what, file, line)				\
249175164Sjhb	_sx_assert((sx), (what), (file), (line))
250175164Sjhb#else
251175164Sjhb#define	sx_assert_(sx, what, file, line)	(void)0
252175164Sjhb#endif
253175164Sjhb
254175164Sjhb#define	sx_xlock(sx)		sx_xlock_((sx), LOCK_FILE, LOCK_LINE)
255175164Sjhb#define	sx_xlock_sig(sx)	sx_xlock_sig_((sx), LOCK_FILE, LOCK_LINE)
256175164Sjhb#define	sx_xunlock(sx)		sx_xunlock_((sx), LOCK_FILE, LOCK_LINE)
257175164Sjhb#define	sx_slock(sx)		sx_slock_((sx), LOCK_FILE, LOCK_LINE)
258175164Sjhb#define	sx_slock_sig(sx)	sx_slock_sig_((sx), LOCK_FILE, LOCK_LINE)
259175164Sjhb#define	sx_sunlock(sx)		sx_sunlock_((sx), LOCK_FILE, LOCK_LINE)
260175164Sjhb#define	sx_assert(sx, what)	sx_assert_((sx), (what), __FILE__, __LINE__)
261175164Sjhb
262175164Sjhb/*
263175164Sjhb * Return a pointer to the owning thread if the lock is exclusively
264175164Sjhb * locked.
265175164Sjhb */
266175164Sjhb#define	sx_xholder(sx)							\
267175164Sjhb	((sx)->sx_lock & SX_LOCK_SHARED ? NULL :			\
268175164Sjhb	(struct thread *)SX_OWNER((sx)->sx_lock))
269175164Sjhb
270175164Sjhb#define	sx_xlocked(sx)							\
271175164Sjhb	(((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) ==	\
272175164Sjhb	    (uintptr_t)curthread)
273175164Sjhb
274175164Sjhb#define	sx_unlock_(sx, file, line) do {					\
275175164Sjhb	if (sx_xlocked(sx))						\
276175164Sjhb		sx_xunlock_(sx, file, line);				\
277175164Sjhb	else								\
278175164Sjhb		sx_sunlock_(sx, file, line);				\
279175164Sjhb} while (0)
280175164Sjhb
281175164Sjhb#define	sx_unlock(sx)	sx_unlock_((sx), LOCK_FILE, LOCK_LINE)
282175164Sjhb
283175164Sjhb#define	sx_sleep(chan, sx, pri, wmesg, timo)				\
284175164Sjhb	_sleep((chan), &(sx)->lock_object, (pri), (wmesg),		\
285175164Sjhb	    tick_sbt * (timo), 0,  C_HARDCLOCK)
286175164Sjhb
287175164Sjhb/*
288175164Sjhb * Options passed to sx_init_flags().
289175164Sjhb */
290176075Salc#define	SX_DUPOK		0x01
291176075Salc#define	SX_NOPROFILE		0x02
292176075Salc#define	SX_NOWITNESS		0x04
293176075Salc#define	SX_QUIET		0x08
294175164Sjhb#define	SX_NOADAPTIVE		0x10
295175164Sjhb#define	SX_RECURSE		0x20
296175164Sjhb#define	SX_NEW			0x40
297175164Sjhb
298175164Sjhb/*
299175164Sjhb * Options passed to sx_*lock_hard().
300175164Sjhb */
301175164Sjhb#define	SX_INTERRUPTIBLE	0x40
302175164Sjhb
303175164Sjhb#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
304175164Sjhb#define	SA_LOCKED		LA_LOCKED
305175164Sjhb#define	SA_SLOCKED		LA_SLOCKED
306175164Sjhb#define	SA_XLOCKED		LA_XLOCKED
307175164Sjhb#define	SA_UNLOCKED		LA_UNLOCKED
308175164Sjhb#define	SA_RECURSED		LA_RECURSED
309175164Sjhb#define	SA_NOTRECURSED		LA_NOTRECURSED
310175164Sjhb
311175164Sjhb/* Backwards compatibility. */
312175164Sjhb#define	SX_LOCKED		LA_LOCKED
313175164Sjhb#define	SX_SLOCKED		LA_SLOCKED
314175164Sjhb#define	SX_XLOCKED		LA_XLOCKED
315175164Sjhb#define	SX_UNLOCKED		LA_UNLOCKED
316175164Sjhb#define	SX_RECURSED		LA_RECURSED
317175164Sjhb#define	SX_NOTRECURSED		LA_NOTRECURSED
318175164Sjhb#endif
319175164Sjhb
320175164Sjhb#endif /* _KERNEL */
321175164Sjhb
322178181Salc#endif /* !_SYS_SX_H_ */
323178181Salc