mutex.h revision 167387
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
29 * $FreeBSD: head/sys/sys/mutex.h 167387 2007-03-09 22:41:01Z jhb $
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35#ifndef LOCORE
36#include <sys/queue.h>
37#include <sys/_lock.h>
38#include <sys/_mutex.h>
39
40#ifdef _KERNEL
41#include <sys/pcpu.h>
42#include <sys/lock_profile.h>
43#include <machine/atomic.h>
44#include <machine/cpufunc.h>
45#endif	/* _KERNEL_ */
46#endif	/* !LOCORE */
47
48#include <machine/mutex.h>
49
50#ifdef _KERNEL
51
52/*
53 * Mutex types and options passed to mtx_init().  MTX_QUIET and MTX_DUPOK
54 * can also be passed in.
55 */
56#define	MTX_DEF		0x00000000	/* DEFAULT (sleep) lock */
57#define MTX_SPIN	0x00000001	/* Spin lock (disables interrupts) */
58#define MTX_RECURSE	0x00000004	/* Option: lock allowed to recurse */
59#define	MTX_NOWITNESS	0x00000008	/* Don't do any witness checking. */
60#define MTX_NOPROFILE   0x00000020	/* Don't profile this lock */
61
62/*
63 * Option flags passed to certain lock/unlock routines, through the use
64 * of corresponding mtx_{lock,unlock}_flags() interface macros.
65 */
66#define	MTX_QUIET	LOP_QUIET	/* Don't log a mutex event */
67#define	MTX_DUPOK	LOP_DUPOK	/* Don't log a duplicate acquire */
68
69/*
70 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
71 * with the exception of MTX_UNOWNED, applies to spin locks.
72 */
73#define	MTX_RECURSED	0x00000001	/* lock recursed (for MTX_DEF only) */
74#define	MTX_CONTESTED	0x00000002	/* lock contested (for MTX_DEF only) */
75#define MTX_UNOWNED	0x00000004	/* Cookie for free mutex */
76#define	MTX_FLAGMASK	(MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED)
77
78/*
79 * Value stored in mutex->mtx_lock to denote a destroyed mutex.
80 */
81#define	MTX_DESTROYED	(MTX_CONTESTED | MTX_UNOWNED)
82
83#endif	/* _KERNEL */
84
85#ifndef LOCORE
86
87/*
88 * XXX: Friendly reminder to fix things in MP code that is presently being
89 * XXX: worked on.
90 */
91#define mp_fixme(string)
92
93#ifdef _KERNEL
94
95/*
96 * Prototypes
97 *
98 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
99 *	 of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
100 *	 and LOCK_LINE. These functions should not be called directly by any
101 *	 code using the API. Their macros cover their functionality.
102 *
103 * [See below for descriptions]
104 *
105 */
106void	mtx_init(struct mtx *m, const char *name, const char *type, int opts);
107void	mtx_destroy(struct mtx *m);
108void	mtx_sysinit(void *arg);
109void	mutex_init(void);
110void	_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts,
111	    const char *file, int line);
112void	_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
113#ifdef SMP
114void	_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts,
115	    const char *file, int line);
116#endif
117void	_mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
118int	_mtx_trylock(struct mtx *m, int opts, const char *file, int line);
119void	_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
120void	_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
121void	_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
122	     int line);
123void	_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
124	     int line);
125#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
126void	_mtx_assert(struct mtx *m, int what, const char *file, int line);
127#endif
128
129/*
130 * We define our machine-independent (unoptimized) mutex micro-operations
131 * here, if they are not already defined in the machine-dependent mutex.h
132 */
133
134/* Try to obtain mtx_lock once. */
135#ifndef _obtain_lock
136#define _obtain_lock(mp, tid)						\
137	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
138#endif
139
140/* Try to release mtx_lock if it is unrecursed and uncontested. */
141#ifndef _release_lock
142#define _release_lock(mp, tid)						\
143	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
144#endif
145
146/* Release mtx_lock quickly, assuming we own it. */
147#ifndef _release_lock_quick
148#define _release_lock_quick(mp)						\
149	atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
150#endif
151
152/*
153 * Obtain a sleep lock inline, or call the "hard" function if we can't get it
154 * easy.
155 */
156#ifndef _get_sleep_lock
157#define _get_sleep_lock(mp, tid, opts, file, line) do {			\
158	uintptr_t _tid = (uintptr_t)(tid);				\
159	int contested = 0;						\
160	uint64_t waittime = 0;						\
161	if (!_obtain_lock((mp), _tid)) {				\
162		lock_profile_obtain_lock_failed(&(mp)->mtx_object,	\
163		    &contested, &waittime);				\
164		_mtx_lock_sleep((mp), _tid, (opts), (file), (line));	\
165	}								\
166	lock_profile_obtain_lock_success(&(mp)->mtx_object, contested,	\
167	    waittime, (file), (line));					\
168} while (0)
169#endif
170
171/*
172 * Obtain a spin lock inline, or call the "hard" function if we can't get it
173 * easy. For spinlocks, we handle recursion inline (it turns out that function
174 * calls can be significantly expensive on some architectures).
175 * Since spin locks are not _too_ common, inlining this code is not too big
176 * a deal.
177 */
178#ifndef _get_spin_lock
179#ifdef SMP
180#define _get_spin_lock(mp, tid, opts, file, line) do {	\
181	uintptr_t _tid = (uintptr_t)(tid);				\
182	int contested = 0;						\
183	uint64_t waittime = 0;						\
184	spinlock_enter();						\
185	if (!_obtain_lock((mp), _tid)) {				\
186		if ((mp)->mtx_lock == _tid)				\
187			(mp)->mtx_recurse++;				\
188		else {							\
189			lock_profile_obtain_lock_failed(&(mp)->mtx_object, \
190			    &contested, &waittime);			\
191			_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
192		}							\
193	}								\
194	lock_profile_obtain_lock_success(&(mp)->mtx_object, contested,	\
195	    waittime, (file), (line));					\
196} while (0)
197#else /* SMP */
198#define _get_spin_lock(mp, tid, opts, file, line) do {			\
199	uintptr_t _tid = (uintptr_t)(tid);				\
200									\
201	spinlock_enter();						\
202	if ((mp)->mtx_lock == _tid)					\
203		(mp)->mtx_recurse++;					\
204	else {								\
205		KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
206		(mp)->mtx_lock = _tid;				\
207	}								\
208} while (0)
209#endif /* SMP */
210#endif
211
212/*
213 * Release a sleep lock inline, or call the "hard" function if we can't do it
214 * easy.
215 */
216#ifndef _rel_sleep_lock
217#define _rel_sleep_lock(mp, tid, opts, file, line) do {			\
218	uintptr_t _tid = (uintptr_t)(tid);				\
219									\
220	if (!_release_lock((mp), _tid))					\
221		_mtx_unlock_sleep((mp), (opts), (file), (line));	\
222} while (0)
223#endif
224
225/*
226 * For spinlocks, we can handle everything inline, as it's pretty simple and
227 * a function call would be too expensive (at least on some architectures).
228 * Since spin locks are not _too_ common, inlining this code is not too big
229 * a deal.
230 *
231 * Since we always perform a spinlock_enter() when attempting to acquire a
232 * spin lock, we need to always perform a matching spinlock_exit() when
233 * releasing a spin lock.  This includes the recursion cases.
234 */
235#ifndef _rel_spin_lock
236#ifdef SMP
237#define _rel_spin_lock(mp) do {						\
238	if (mtx_recursed((mp)))						\
239		(mp)->mtx_recurse--;					\
240	else								\
241		_release_lock_quick((mp));				\
242	spinlock_exit();						\
243} while (0)
244#else /* SMP */
245#define _rel_spin_lock(mp) do {						\
246	if (mtx_recursed((mp)))						\
247		(mp)->mtx_recurse--;					\
248	else								\
249		(mp)->mtx_lock = MTX_UNOWNED;				\
250	spinlock_exit();						\
251} while (0)
252#endif /* SMP */
253#endif
254
255/*
256 * Exported lock manipulation interface.
257 *
258 * mtx_lock(m) locks MTX_DEF mutex `m'
259 *
260 * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
261 *
262 * mtx_unlock(m) unlocks MTX_DEF mutex `m'
263 *
264 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
265 *
266 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
267 *     and passes option flags `opts' to the "hard" function, if required.
268 *     With these routines, it is possible to pass flags such as MTX_QUIET
269 *     to the appropriate lock manipulation routines.
270 *
271 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
272 *     it cannot. Rather, it returns 0 on failure and non-zero on success.
273 *     It does NOT handle recursion as we assume that if a caller is properly
274 *     using this part of the interface, he will know that the lock in question
275 *     is _not_ recursed.
276 *
277 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
278 *     relevant option flags `opts.'
279 *
280 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
281 *
282 * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
283 *
284 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
285 */
286#define mtx_lock(m)		mtx_lock_flags((m), 0)
287#define mtx_lock_spin(m)	mtx_lock_spin_flags((m), 0)
288#define mtx_trylock(m)		mtx_trylock_flags((m), 0)
289#define mtx_unlock(m)		mtx_unlock_flags((m), 0)
290#define mtx_unlock_spin(m)	mtx_unlock_spin_flags((m), 0)
291
292struct mtx_pool;
293
294struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts);
295void mtx_pool_destroy(struct mtx_pool **poolp);
296struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr);
297struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
298#define mtx_pool_lock(pool, ptr)					\
299	mtx_lock(mtx_pool_find((pool), (ptr)))
300#define mtx_pool_lock_spin(pool, ptr)					\
301	mtx_lock_spin(mtx_pool_find((pool), (ptr)))
302#define mtx_pool_unlock(pool, ptr)					\
303	mtx_unlock(mtx_pool_find((pool), (ptr)))
304#define mtx_pool_unlock_spin(pool, ptr)					\
305	mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
306
307/*
308 * mtxpool_lockbuilder is a pool of sleep locks that is not witness
309 * checked and should only be used for building higher level locks.
310 *
311 * mtxpool_sleep is a general purpose pool of sleep mutexes.
312 */
313extern struct mtx_pool *mtxpool_lockbuilder;
314extern struct mtx_pool *mtxpool_sleep;
315
316#ifndef LOCK_DEBUG
317#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
318#endif
319#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
320#define	mtx_lock_flags(m, opts)						\
321	_mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE)
322#define	mtx_unlock_flags(m, opts)					\
323	_mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE)
324#define	mtx_lock_spin_flags(m, opts)					\
325	_mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
326#define	mtx_unlock_spin_flags(m, opts)					\
327	_mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
328#else	/* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */
329#define	mtx_lock_flags(m, opts)						\
330	_get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
331#define	mtx_unlock_flags(m, opts)					\
332	_rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
333#define	mtx_lock_spin_flags(m, opts)					\
334	_get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
335#define	mtx_unlock_spin_flags(m, opts)					\
336	_rel_spin_lock((m))
337#endif	/* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
338
339#define mtx_trylock_flags(m, opts)					\
340	_mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE)
341
342#define	mtx_sleep(chan, mtx, pri, wmesg, timo)				\
343	_sleep((chan), &(mtx)->mtx_object, (pri), (wmesg), (timo))
344
345#define	mtx_initialized(m)	lock_initalized(&(m)->mtx_object)
346
347#define mtx_owned(m)	(((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread)
348
349#define mtx_recursed(m)	((m)->mtx_recurse != 0)
350
351#define mtx_name(m)	((m)->mtx_object.lo_name)
352
353/*
354 * Global locks.
355 */
356extern struct mtx sched_lock;
357extern struct mtx Giant;
358
359/*
360 * Giant lock manipulation and clean exit macros.
361 * Used to replace return with an exit Giant and return.
362 *
363 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
364 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT.
365 */
366#ifndef DROP_GIANT
367#define DROP_GIANT()							\
368do {									\
369	int _giantcnt;							\
370	WITNESS_SAVE_DECL(Giant);					\
371									\
372	if (mtx_owned(&Giant))						\
373		WITNESS_SAVE(&Giant.mtx_object, Giant);			\
374	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
375		mtx_unlock(&Giant)
376
377#define PICKUP_GIANT()							\
378	mtx_assert(&Giant, MA_NOTOWNED);				\
379	while (_giantcnt--)						\
380		mtx_lock(&Giant);					\
381	if (mtx_owned(&Giant))						\
382		WITNESS_RESTORE(&Giant.mtx_object, Giant);		\
383} while (0)
384
385#define PARTIAL_PICKUP_GIANT()						\
386	mtx_assert(&Giant, MA_NOTOWNED);				\
387	while (_giantcnt--)						\
388		mtx_lock(&Giant);					\
389	if (mtx_owned(&Giant))						\
390		WITNESS_RESTORE(&Giant.mtx_object, Giant)
391#endif
392
393/*
394 * Network MPSAFE temporary workarounds.  When debug_mpsafenet
395 * is 1 the network is assumed to operate without Giant on the
396 * input path and protocols that require Giant must collect it
397 * on entry.  When 0 Giant is grabbed in the network interface
398 * ISR's and in the netisr path and there is no need to grab
399 * the Giant lock.  Note that, unlike GIANT_PICKUP() and
400 * GIANT_DROP(), these macros directly wrap mutex operations
401 * without special recursion handling.
402 *
403 * This mechanism is intended as temporary until everything of
404 * importance is properly locked.  Note: the semantics for
405 * NET_{LOCK,UNLOCK}_GIANT() are not the same as DROP_GIANT()
406 * and PICKUP_GIANT(), as they are plain mutex operations
407 * without a recursion counter.
408 */
409extern	int debug_mpsafenet;		/* defined in net/netisr.c */
410#define	NET_LOCK_GIANT() do {						\
411	if (!debug_mpsafenet)						\
412		mtx_lock(&Giant);					\
413} while (0)
414#define	NET_UNLOCK_GIANT() do {						\
415	if (!debug_mpsafenet)						\
416		mtx_unlock(&Giant);					\
417} while (0)
418#define	NET_ASSERT_GIANT() do {						\
419	if (!debug_mpsafenet)						\
420		mtx_assert(&Giant, MA_OWNED);				\
421} while (0)
422#define	NET_CALLOUT_MPSAFE	(debug_mpsafenet ? CALLOUT_MPSAFE : 0)
423
424#define	UGAR(rval) do {							\
425	int _val = (rval);						\
426	mtx_unlock(&Giant);						\
427	return (_val);							\
428} while (0)
429
430struct mtx_args {
431	struct mtx	*ma_mtx;
432	const char 	*ma_desc;
433	int		 ma_opts;
434};
435
436#define	MTX_SYSINIT(name, mtx, desc, opts)				\
437	static struct mtx_args name##_args = {				\
438		(mtx),							\
439		(desc),							\
440		(opts)							\
441	};								\
442	SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
443	    mtx_sysinit, &name##_args);					\
444	SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
445	    mtx_destroy, (mtx))
446
447/*
448 * The INVARIANTS-enabled mtx_assert() functionality.
449 *
450 * The constants need to be defined for INVARIANT_SUPPORT infrastructure
451 * support as _mtx_assert() itself uses them and the latter implies that
452 * _mtx_assert() must build.
453 */
454#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
455#define MA_OWNED	0x01
456#define MA_NOTOWNED	0x02
457#define MA_RECURSED	0x04
458#define MA_NOTRECURSED	0x08
459#endif
460
461#ifdef INVARIANTS
462#define	mtx_assert(m, what)						\
463	_mtx_assert((m), (what), __FILE__, __LINE__)
464
465#define GIANT_REQUIRED	mtx_assert(&Giant, MA_OWNED)
466
467#else	/* INVARIANTS */
468#define mtx_assert(m, what)
469#define GIANT_REQUIRED
470#endif	/* INVARIANTS */
471
472/*
473 * Common lock type names.
474 */
475#define	MTX_NETWORK_LOCK	"network driver"
476
477#endif	/* _KERNEL */
478#endif	/* !LOCORE */
479#endif	/* _SYS_MUTEX_H_ */
480