mutex.h revision 88088
1135669Scognet/*-
2135669Scognet * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3139735Simp *
4135669Scognet * Redistribution and use in source and binary forms, with or without
5135669Scognet * modification, are permitted provided that the following conditions
6135669Scognet * are met:
7135669Scognet * 1. Redistributions of source code must retain the above copyright
8135669Scognet *    notice, this list of conditions and the following disclaimer.
9135669Scognet * 2. Redistributions in binary form must reproduce the above copyright
10135669Scognet *    notice, this list of conditions and the following disclaimer in the
11135669Scognet *    documentation and/or other materials provided with the distribution.
12135669Scognet * 3. Berkeley Software Design Inc's name may not be used to endorse or
13135669Scognet *    promote products derived from this software without specific prior
14135669Scognet *    written permission.
15135669Scognet *
16135669Scognet * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17135669Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18135669Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19135669Scognet * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20135669Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21135669Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22135669Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23135669Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24135669Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25135669Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26135669Scognet * SUCH DAMAGE.
27135669Scognet *
28135669Scognet *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
29135669Scognet * $FreeBSD: head/sys/sys/mutex.h 88088 2001-12-18 00:27:18Z jhb $
30135669Scognet */
31135669Scognet
32135669Scognet#ifndef _SYS_MUTEX_H_
33135669Scognet#define _SYS_MUTEX_H_
34135669Scognet
35135669Scognet#ifndef LOCORE
36135669Scognet#include <sys/queue.h>
37135669Scognet#include <sys/_lock.h>
38135669Scognet#include <sys/_mutex.h>
39135669Scognet
40135669Scognet#ifdef _KERNEL
41135669Scognet#include <sys/pcpu.h>
42135669Scognet#include <machine/atomic.h>
43135669Scognet#include <machine/cpufunc.h>
44135669Scognet#endif	/* _KERNEL_ */
45135669Scognet#endif	/* !LOCORE */
46135669Scognet
47135669Scognet#include <machine/mutex.h>
48135669Scognet
49135669Scognet#ifdef _KERNEL
50135669Scognet
51135669Scognet/*
52135669Scognet * Mutex types and options passed to mtx_init().  MTX_QUIET can also be
53135669Scognet * passed in.
54135669Scognet */
55135669Scognet#define	MTX_DEF		0x00000000	/* DEFAULT (sleep) lock */
56135669Scognet#define MTX_SPIN	0x00000001	/* Spin lock (disables interrupts) */
57135669Scognet#define MTX_RECURSE	0x00000004	/* Option: lock allowed to recurse */
58135669Scognet#define	MTX_NOWITNESS	0x00000008	/* Don't do any witness checking. */
59135669Scognet#define	MTX_SLEEPABLE	0x00000010	/* We can sleep with this lock. */
60135669Scognet
61135669Scognet/*
62135669Scognet * Option flags passed to certain lock/unlock routines, through the use
63135669Scognet * of corresponding mtx_{lock,unlock}_flags() interface macros.
64135669Scognet */
65135669Scognet#define	MTX_NOSWITCH	LOP_NOSWITCH	/* Do not switch on release */
66135669Scognet#define	MTX_QUIET	LOP_QUIET	/* Don't log a mutex event */
67135669Scognet
68135669Scognet/*
69135669Scognet * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
70135669Scognet * with the exception of MTX_UNOWNED, applies to spin locks.
71135669Scognet */
72135669Scognet#define	MTX_RECURSED	0x00000001	/* lock recursed (for MTX_DEF only) */
73135669Scognet#define	MTX_CONTESTED	0x00000002	/* lock contested (for MTX_DEF only) */
74135669Scognet#define MTX_UNOWNED	0x00000004	/* Cookie for free mutex */
75135669Scognet#define	MTX_FLAGMASK	~(MTX_RECURSED | MTX_CONTESTED)
76135669Scognet
77135669Scognet#endif	/* _KERNEL */
78135669Scognet
79135669Scognet#ifndef LOCORE
80135669Scognet
81135669Scognet/*
82135669Scognet * XXX: Friendly reminder to fix things in MP code that is presently being
83135669Scognet * XXX: worked on.
84135669Scognet */
85135669Scognet#define mp_fixme(string)
86135669Scognet
87135669Scognet#ifdef _KERNEL
88135669Scognet
89135669Scognet/*
90135669Scognet * Prototypes
91135669Scognet *
92135669Scognet * NOTE: Functions prepended with `_' (underscore) are exported to other parts
93135669Scognet *	 of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
94161592Scognet *	 and LOCK_LINE. These functions should not be called directly by any
95135669Scognet *	 code using the API. Their macros cover their functionality.
96135669Scognet *
97161592Scognet * [See below for descriptions]
98135669Scognet *
99135669Scognet */
100135669Scognetvoid	mtx_init(struct mtx *m, const char *description, int opts);
101135669Scognetvoid	mtx_destroy(struct mtx *m);
102161592Scognetvoid	_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
103135669Scognetvoid	_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
104135669Scognetvoid	_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line);
105161592Scognetvoid	_mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
106135669Scognetint	_mtx_trylock(struct mtx *m, int opts, const char *file, int line);
107135669Scognetvoid	_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
108135669Scognetvoid	_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
109135669Scognetvoid	_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
110135669Scognet			     int line);
111135669Scognetvoid	_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
112135669Scognet			     int line);
113135669Scognet#ifdef INVARIANT_SUPPORT
114135669Scognetvoid	_mtx_assert(struct mtx *m, int what, const char *file, int line);
115135669Scognet#endif
116135669Scognetint	mtx_lock_giant(int sysctlvar);
117135669Scognetvoid	mtx_unlock_giant(int s);
118135669Scognet
119135669Scognet/*
120135669Scognet * We define our machine-independent (unoptimized) mutex micro-operations
121135669Scognet * here, if they are not already defined in the machine-dependent mutex.h
122135669Scognet */
123135669Scognet
124135669Scognet/* Actually obtain mtx_lock */
125135669Scognet#ifndef _obtain_lock
126135669Scognet#define _obtain_lock(mp, tid)						\
127135669Scognet	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
128135669Scognet#endif
129135669Scognet
130135669Scognet/* Actually release mtx_lock */
131135669Scognet#ifndef _release_lock
132135669Scognet#define _release_lock(mp, tid)						\
133135669Scognet	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
134135669Scognet#endif
135135669Scognet
136135669Scognet/* Actually release mtx_lock quickly, assuming we own it. */
137135669Scognet#ifndef _release_lock_quick
138135669Scognet#define _release_lock_quick(mp)						\
139135669Scognet	atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
140135669Scognet#endif
141135669Scognet
142135669Scognet/*
143135669Scognet * Obtain a sleep lock inline, or call the "hard" function if we can't get it
144135669Scognet * easy.
145135669Scognet */
146135669Scognet#ifndef _get_sleep_lock
147135669Scognet#define _get_sleep_lock(mp, tid, opts, file, line) do {			\
148135669Scognet	if (!_obtain_lock((mp), (tid)))					\
149135669Scognet		_mtx_lock_sleep((mp), (opts), (file), (line));		\
150135669Scognet} while (0)
151135669Scognet#endif
152135669Scognet
153135669Scognet/*
154135669Scognet * Obtain a spin lock inline, or call the "hard" function if we can't get it
155135669Scognet * easy. For spinlocks, we handle recursion inline (it turns out that function
156135669Scognet * calls can be significantly expensive on some architectures).
157135669Scognet * Since spin locks are not _too_ common, inlining this code is not too big
158135669Scognet * a deal.
159135669Scognet */
160135669Scognet#ifndef _get_spin_lock
161135669Scognet#define _get_spin_lock(mp, tid, opts, file, line) do {			\
162135669Scognet	critical_enter();						\
163135669Scognet	if (!_obtain_lock((mp), (tid))) {				\
164135669Scognet		if ((mp)->mtx_lock == (uintptr_t)(tid))			\
165135669Scognet			(mp)->mtx_recurse++;				\
166135669Scognet		else							\
167135669Scognet			_mtx_lock_spin((mp), (opts), (file), (line));	\
168135669Scognet	}								\
169135669Scognet} while (0)
170135669Scognet#endif
171135669Scognet
172135669Scognet/*
173135669Scognet * Release a sleep lock inline, or call the "hard" function if we can't do it
174135669Scognet * easy.
175135669Scognet */
176135669Scognet#ifndef _rel_sleep_lock
177135669Scognet#define _rel_sleep_lock(mp, tid, opts, file, line) do {			\
178135669Scognet	if (!_release_lock((mp), (tid)))				\
179135669Scognet		_mtx_unlock_sleep((mp), (opts), (file), (line));	\
180135669Scognet} while (0)
181135669Scognet#endif
182135669Scognet
183135669Scognet/*
184135669Scognet * For spinlocks, we can handle everything inline, as it's pretty simple and
185135669Scognet * a function call would be too expensive (at least on some architectures).
186135669Scognet * Since spin locks are not _too_ common, inlining this code is not too big
187135669Scognet * a deal.
188135669Scognet *
189135669Scognet * Since we always perform a critical_enter() when attempting to acquire a
190135669Scognet * spin lock, we need to always perform a matching critical_exit() when
191135669Scognet * releasing a spin lock.  This includes the recursion cases.
192135669Scognet */
193135669Scognet#ifndef _rel_spin_lock
194135669Scognet#define _rel_spin_lock(mp) do {						\
195135669Scognet	if (mtx_recursed((mp)))						\
196135669Scognet		(mp)->mtx_recurse--;					\
197135669Scognet	else								\
198135669Scognet		_release_lock_quick((mp));				\
199135669Scognet	critical_exit();					\
200135669Scognet} while (0)
201135669Scognet#endif
202135669Scognet
203135669Scognet/*
204135669Scognet * Exported lock manipulation interface.
205135669Scognet *
206135669Scognet * mtx_lock(m) locks MTX_DEF mutex `m'
207135669Scognet *
208135669Scognet * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
209135669Scognet *
210135669Scognet * mtx_unlock(m) unlocks MTX_DEF mutex `m'
211135669Scognet *
212135669Scognet * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
213135669Scognet *
214135669Scognet * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
215135669Scognet *     and passes option flags `opts' to the "hard" function, if required.
216135669Scognet *     With these routines, it is possible to pass flags such as MTX_QUIET
217135669Scognet *     and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
218135669Scognet *
219135669Scognet * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
220135669Scognet *     it cannot. Rather, it returns 0 on failure and non-zero on success.
221135669Scognet *     It does NOT handle recursion as we assume that if a caller is properly
222135669Scognet *     using this part of the interface, he will know that the lock in question
223135669Scognet *     is _not_ recursed.
224135669Scognet *
225135669Scognet * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
226135669Scognet *     relevant option flags `opts.'
227135669Scognet *
228135669Scognet * mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
229135669Scognet *
230135669Scognet * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
231135669Scognet *
232135669Scognet * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
233135669Scognet */
234135669Scognet#define mtx_lock(m)		mtx_lock_flags((m), 0)
235135669Scognet#define mtx_lock_spin(m)	mtx_lock_spin_flags((m), 0)
236135669Scognet#define mtx_trylock(m)		mtx_trylock_flags((m), 0)
237135669Scognet#define mtx_unlock(m)		mtx_unlock_flags((m), 0)
238135669Scognet#define mtx_unlock_spin(m)	mtx_unlock_spin_flags((m), 0)
239135669Scognet
240135669Scognetstruct mtx *mtx_pool_find(void *ptr);
241135669Scognetstruct mtx *mtx_pool_alloc(void);
242135669Scognetvoid mtx_pool_lock(void *ptr);
243135669Scognetvoid mtx_pool_unlock(void *ptr);
244135669Scognet
245135669Scognetextern int mtx_pool_valid;
246135669Scognet
247135669Scognet#ifndef LOCK_DEBUG
248135669Scognet#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
249135669Scognet#endif
250135669Scognet#if LOCK_DEBUG > 0
251135669Scognet#define	mtx_lock_flags(m, opts)						\
252135669Scognet	_mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE)
253135669Scognet#define	mtx_unlock_flags(m, opts)					\
254135669Scognet	_mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE)
255135669Scognet#define	mtx_lock_spin_flags(m, opts)					\
256135669Scognet	_mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
257135669Scognet#define	mtx_unlock_spin_flags(m, opts)					\
258135669Scognet	_mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
259135669Scognet#else
260135669Scognet#define	mtx_lock_flags(m, opts)						\
261135669Scognet	_get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
262135669Scognet#define	mtx_unlock_flags(m, opts)					\
263135669Scognet	_rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
264135669Scognet#define	mtx_lock_spin_flags(m, opts)					\
265135669Scognet	_get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
266135669Scognet#define	mtx_unlock_spin_flags(m, opts)					\
267135669Scognet	_rel_spin_lock((m))
268135669Scognet#endif
269135669Scognet
270135669Scognet#define mtx_trylock_flags(m, opts)					\
271135669Scognet	_mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE)
272135669Scognet
273135669Scognet#define	mtx_initialized(m)	((m)->mtx_object.lo_flags & LO_INITIALIZED)
274135669Scognet
275135669Scognet#define mtx_owned(m)	(((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread)
276135669Scognet
277135669Scognet#define mtx_recursed(m)	((m)->mtx_recurse != 0)
278135669Scognet
279135669Scognet/*
280135669Scognet * Global locks.
281135669Scognet */
282135669Scognetextern struct mtx	sched_lock;
283135669Scognetextern struct mtx	Giant;
284135669Scognet
285135669Scognet/*
286135669Scognet * Giant lock sysctl variables used by other modules
287135669Scognet */
288135669Scognetextern int kern_giant_proc;
289135669Scognetextern int kern_giant_file;
290135669Scognet
291135669Scognet/*
292135669Scognet * Giant lock manipulation and clean exit macros.
293135669Scognet * Used to replace return with an exit Giant and return.
294135669Scognet *
295135669Scognet * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
296135669Scognet */
297135669Scognet#define DROP_GIANT_NOSWITCH()						\
298135669Scognetdo {									\
299135669Scognet	int _giantcnt;							\
300135669Scognet	WITNESS_SAVE_DECL(Giant);					\
301135669Scognet									\
302135669Scognet	if (mtx_owned(&Giant))						\
303135669Scognet		WITNESS_SAVE(&Giant.mtx_object, Giant);			\
304135669Scognet	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
305135669Scognet		mtx_unlock_flags(&Giant, MTX_NOSWITCH)
306135669Scognet
307135669Scognet#define DROP_GIANT()							\
308135669Scognetdo {									\
309135669Scognet	int _giantcnt;							\
310135669Scognet	WITNESS_SAVE_DECL(Giant);					\
311135669Scognet									\
312135669Scognet	if (mtx_owned(&Giant))						\
313135669Scognet		WITNESS_SAVE(&Giant.mtx_object, Giant);			\
314135669Scognet	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
315135669Scognet		mtx_unlock(&Giant)
316135669Scognet
317135669Scognet#define PICKUP_GIANT()							\
318135669Scognet	mtx_assert(&Giant, MA_NOTOWNED);				\
319135669Scognet	while (_giantcnt--)						\
320135669Scognet		mtx_lock(&Giant);					\
321135669Scognet	if (mtx_owned(&Giant))						\
322135669Scognet		WITNESS_RESTORE(&Giant.mtx_object, Giant);		\
323135669Scognet} while (0)
324135669Scognet
325135669Scognet#define PARTIAL_PICKUP_GIANT()						\
326135669Scognet	mtx_assert(&Giant, MA_NOTOWNED);				\
327135669Scognet	while (_giantcnt--)						\
328135669Scognet		mtx_lock(&Giant);					\
329135669Scognet	if (mtx_owned(&Giant))						\
330135669Scognet		WITNESS_RESTORE(&Giant.mtx_object, Giant)
331135669Scognet
332135669Scognet#define	UGAR(rval) do {							\
333135669Scognet	int _val = (rval);						\
334135669Scognet	mtx_unlock(&Giant);						\
335135669Scognet	return (_val);							\
336135669Scognet} while (0)
337135669Scognet
338135669Scognet/*
339135669Scognet * The INVARIANTS-enabled mtx_assert() functionality.
340135669Scognet *
341135669Scognet * The constants need to be defined for INVARIANT_SUPPORT infrastructure
342161592Scognet * support as _mtx_assert() itself uses them and the latter implies that
343161592Scognet * _mtx_assert() must build.
344161592Scognet */
345161592Scognet#ifdef INVARIANT_SUPPORT
346161592Scognet#define MA_OWNED	0x01
347135669Scognet#define MA_NOTOWNED	0x02
348161592Scognet#define MA_RECURSED	0x04
349161592Scognet#define MA_NOTRECURSED	0x08
350135669Scognet#endif /* INVARIANT_SUPPORT */
351161592Scognet
352161592Scognet#ifdef INVARIANTS
353161592Scognet#define	mtx_assert(m, what)						\
354161592Scognet	_mtx_assert((m), (what), __FILE__, __LINE__)
355161592Scognet
356135669Scognet#define GIANT_REQUIRED	mtx_assert(&Giant, MA_OWNED)
357161592Scognet
358161592Scognet#else	/* INVARIANTS */
359135669Scognet#define mtx_assert(m, what)
360135669Scognet#define GIANT_REQUIRED
361135669Scognet#endif	/* INVARIANTS */
362135669Scognet
363135669Scognet#endif	/* _KERNEL */
364135669Scognet#endif	/* !LOCORE */
365135669Scognet#endif	/* _SYS_MUTEX_H_ */
366135669Scognet