mutex.h revision 67463
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
29 * $FreeBSD: head/sys/sys/mutex.h 67463 2000-10-23 09:14:20Z phk $
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35#ifndef LOCORE
36#include <sys/queue.h>
37
38#ifdef _KERNEL
39#include <sys/ktr.h>
40#include <machine/atomic.h>
41#include <machine/bus.h>
42#include <machine/cpufunc.h>
43#include <machine/globals.h>
44#endif	/* _KERNEL_ */
45#endif	/* !LOCORE */
46
47#include <machine/mutex.h>
48
49#ifndef LOCORE
50#ifdef _KERNEL
51
52/*
53 * If kern_mutex.c is being built, compile non-inlined versions of various
54 * functions so that kernel modules can use them.
55 */
56#ifndef _KERN_MUTEX_C_
57#define _MTX_INLINE	static __inline
58#else
59#define _MTX_INLINE
60#endif
61
62/*
63 * Mutex flags
64 *
65 * Types
66 */
67#define	MTX_DEF		0x0		/* Default (spin/sleep) */
68#define MTX_SPIN	0x1		/* Spin only lock */
69
70/* Options */
71#define	MTX_RLIKELY	0x4		/* (opt) Recursion likely */
72#define	MTX_NORECURSE	0x8		/* No recursion possible */
73#define	MTX_NOSPIN	0x10		/* Don't spin before sleeping */
74#define	MTX_NOSWITCH	0x20		/* Do not switch on release */
75#define	MTX_FIRST	0x40		/* First spin lock holder */
76#define MTX_TOPHALF	0x80		/* Interrupts not disabled on spin */
77#define MTX_COLD	0x100		/* Mutex init'd before malloc works */
78
79/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
80#define	MTX_HARDOPTS	(MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
81
82/* Flags/value used in mtx_lock */
83#define	MTX_RECURSE	0x01		/* (non-spin) lock held recursively */
84#define	MTX_CONTESTED	0x02		/* (non-spin) lock contested */
85#define	MTX_FLAGMASK	~(MTX_RECURSE | MTX_CONTESTED)
86#define MTX_UNOWNED	0x8		/* Cookie for free mutex */
87
88#endif	/* _KERNEL */
89
90#ifdef MUTEX_DEBUG
91struct mtx_debug {
92	/* If you add anything here, adjust the mtxf_t definition below */
93	struct witness	*mtxd_witness;
94	LIST_ENTRY(mtx)	mtxd_held;
95	const char	*mtxd_file;
96	int		mtxd_line;
97	const char	*mtxd_description;
98};
99
100#define mtx_description	mtx_debug->mtxd_description
101#define mtx_held	mtx_debug->mtxd_held
102#define	mtx_line	mtx_debug->mtxd_line
103#define	mtx_file	mtx_debug->mtxd_file
104#define	mtx_witness	mtx_debug->mtxd_witness
105#endif
106
107/*
108 * Sleep/spin mutex
109 */
110struct mtx {
111	volatile uintptr_t mtx_lock;	/* lock owner/gate/flags */
112	volatile u_int	mtx_recurse;	/* number of recursive holds */
113	u_int		mtx_saveintr;	/* saved flags (for spin locks) */
114#ifdef MUTEX_DEBUG
115	struct mtx_debug *mtx_debug;
116#else
117	const char	*mtx_description;
118#endif
119	TAILQ_HEAD(, proc) mtx_blocked;
120	LIST_ENTRY(mtx)	mtx_contested;
121	struct mtx	*mtx_next;	/* all locks in system */
122	struct mtx	*mtx_prev;
123};
124
125#ifdef	MUTEX_DEBUG
126#define	MUTEX_DECLARE(modifiers, name)					\
127	static struct mtx_debug __mtx_debug_##name;			\
128	modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
129#else
130#define MUTEX_DECLARE(modifiers, name)	modifiers struct mtx name
131#endif
132
133#define mp_fixme(string)
134
135#ifdef _KERNEL
136/* Misc */
137#define CURTHD	CURPROC	/* Current thread ID */
138
139/* Prototypes */
140void	mtx_init(struct mtx *m, const char *description, int flag);
141void	mtx_enter_hard(struct mtx *, int type, int saveintr);
142void	mtx_exit_hard(struct mtx *, int type);
143void	mtx_destroy(struct mtx *m);
144
145/*
146 * Wrap the following functions with cpp macros so that filenames and line
147 * numbers are embedded in the code correctly.
148 */
149#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
150void	_mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
151int	_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
152void	_mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
153#endif
154
155#define	mtx_enter(mtxp, type)						\
156	_mtx_enter((mtxp), (type), __FILE__, __LINE__)
157
158#define	mtx_try_enter(mtxp, type)					\
159	_mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
160
161#define	mtx_exit(mtxp, type)						\
162	_mtx_exit((mtxp), (type), __FILE__, __LINE__)
163
164/* Global locks */
165extern struct mtx	sched_lock;
166extern struct mtx	Giant;
167
168/*
169 * Used to replace return with an exit Giant and return.
170 */
171
172#define EGAR(a)								\
173do {									\
174	mtx_exit(&Giant, MTX_DEF);					\
175	return (a);							\
176} while (0)
177
178#define VEGAR								\
179do {									\
180	mtx_exit(&Giant, MTX_DEF);					\
181	return;								\
182} while (0)
183
184#define DROP_GIANT()							\
185do {									\
186	int _giantcnt;							\
187	WITNESS_SAVE_DECL(Giant);					\
188									\
189	if (mtx_owned(&Giant))						\
190		WITNESS_SAVE(&Giant, Giant);				\
191	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
192		mtx_exit(&Giant, MTX_DEF)
193
194#define PICKUP_GIANT()							\
195	mtx_assert(&Giant, MA_NOTOWNED);				\
196	while (_giantcnt--)						\
197		mtx_enter(&Giant, MTX_DEF);				\
198	if (mtx_owned(&Giant))						\
199		WITNESS_RESTORE(&Giant, Giant);				\
200} while (0)
201
202#define PARTIAL_PICKUP_GIANT()						\
203	mtx_assert(&Giant, MA_NOTOWNED);				\
204	while (_giantcnt--)						\
205		mtx_enter(&Giant, MTX_DEF);				\
206	if (mtx_owned(&Giant))						\
207		WITNESS_RESTORE(&Giant, Giant)
208
209
210/*
211 * Debugging
212 */
213#ifdef INVARIANTS
214#define MA_OWNED	1
215#define MA_NOTOWNED	2
216#define mtx_assert(m, what) {						\
217	switch ((what)) {						\
218	case MA_OWNED:							\
219		if (!mtx_owned((m)))					\
220			panic("mutex %s not owned at %s:%d",		\
221			    (m)->mtx_description, __FILE__, __LINE__);	\
222		break;							\
223	case MA_NOTOWNED:						\
224		if (mtx_owned((m)))					\
225			panic("mutex %s owned at %s:%d",		\
226			    (m)->mtx_description, __FILE__, __LINE__);	\
227		break;							\
228	default:							\
229		panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
230	}								\
231}
232#else	/* INVARIANTS */
233#define mtx_assert(m, what)
234#endif	/* INVARIANTS */
235
236#ifdef MUTEX_DEBUG
237#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d",	\
238                #ex, __FILE__, __LINE__)
239#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
240                what, __FILE__, __LINE__)
241
242#else	/* MUTEX_DEBUG */
243#define	MPASS(ex)
244#define	MPASS2(ex, where)
245#endif	/* MUTEX_DEBUG */
246
247#ifdef	WITNESS
248#ifndef	MUTEX_DEBUG
249#error	WITNESS requires MUTEX_DEBUG
250#endif	/* MUTEX_DEBUG */
251#define WITNESS_ENTER(m, t, f, l)					\
252	if ((m)->mtx_witness != NULL)					\
253		witness_enter((m), (t), (f), (l))
254#define WITNESS_EXIT(m, t, f, l)					\
255	if ((m)->mtx_witness != NULL)					\
256		witness_exit((m), (t), (f), (l))
257
258#define	WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
259#define	WITNESS_SAVE_DECL(n)						\
260	const char * __CONCAT(n, __wf);					\
261	int __CONCAT(n, __wl)
262
263#define	WITNESS_SAVE(m, n) 						\
264do {									\
265	if ((m)->mtx_witness != NULL) 					\
266	    witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl));	\
267} while (0)
268
269#define	WITNESS_RESTORE(m, n) 						\
270do {									\
271	if ((m)->mtx_witness != NULL)					\
272	    witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl));	\
273} while (0)
274
275void	witness_init(struct mtx *, int flag);
276void	witness_destroy(struct mtx *);
277void	witness_enter(struct mtx *, int, const char *, int);
278void	witness_try_enter(struct mtx *, int, const char *, int);
279void	witness_exit(struct mtx *, int, const char *, int);
280void	witness_display(void(*)(const char *fmt, ...));
281void	witness_list(struct proc *);
282int	witness_sleep(int, struct mtx *, const char *, int);
283void	witness_save(struct mtx *, const char **, int *);
284void	witness_restore(struct mtx *, const char *, int);
285#else	/* WITNESS */
286#define WITNESS_ENTER(m, t, f, l)
287#define WITNESS_EXIT(m, t, f, l)
288#define	WITNESS_SLEEP(check, m)
289#define	WITNESS_SAVE_DECL(n)
290#define	WITNESS_SAVE(m, n)
291#define	WITNESS_RESTORE(m, n)
292
293/*
294 * flag++ is slezoid way of shutting up unused parameter warning
295 * in mtx_init()
296 */
297#define witness_init(m, flag) flag++
298#define witness_destroy(m)
299#define witness_enter(m, t, f, l)
300#define witness_try_enter(m, t, f, l)
301#define witness_exit(m, t, f, l)
302#endif	/* WITNESS */
303
304/*
305 * Assembly macros (for internal use only)
306 *------------------------------------------------------------------------------
307 */
308
309#define	_V(x)	__STRING(x)
310
311/*
312 * Default, unoptimized mutex micro-operations
313 */
314
315#ifndef _obtain_lock
316/* Actually obtain mtx_lock */
317#define _obtain_lock(mp, tid)						\
318	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
319#endif
320
321#ifndef _release_lock
322/* Actually release mtx_lock */
323#define _release_lock(mp, tid)		       				\
324	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
325#endif
326
327#ifndef _release_lock_quick
328/* Actually release mtx_lock quickly assuming that we own it */
329#define	_release_lock_quick(mp) 					\
330	atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
331#endif
332
333#ifndef _getlock_sleep
334/* Get a sleep lock, deal with recursion inline. */
335#define	_getlock_sleep(mp, tid, type) do {				\
336	if (!_obtain_lock(mp, tid)) {					\
337		if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
338			mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0);	\
339		else {							\
340			atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE);	\
341			(mp)->mtx_recurse++;				\
342		}							\
343	}								\
344} while (0)
345#endif
346
347#ifndef _getlock_spin_block
348/* Get a spin lock, handle recursion inline (as the less common case) */
349#define	_getlock_spin_block(mp, tid, type) do {				\
350	u_int _mtx_intr = save_intr();					\
351	disable_intr();							\
352	if (!_obtain_lock(mp, tid))					\
353		mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr);	\
354	else								\
355		(mp)->mtx_saveintr = _mtx_intr;				\
356} while (0)
357#endif
358
359#ifndef _getlock_norecurse
360/*
361 * Get a lock without any recursion handling. Calls the hard enter function if
362 * we can't get it inline.
363 */
364#define	_getlock_norecurse(mp, tid, type) do {				\
365	if (!_obtain_lock(mp, tid))					\
366		mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0);		\
367} while (0)
368#endif
369
370#ifndef _exitlock_norecurse
371/*
372 * Release a sleep lock assuming we haven't recursed on it, recursion is handled
373 * in the hard function.
374 */
375#define	_exitlock_norecurse(mp, tid, type) do {				\
376	if (!_release_lock(mp, tid))					\
377		mtx_exit_hard((mp), (type) & MTX_HARDOPTS);		\
378} while (0)
379#endif
380
381#ifndef _exitlock
382/*
383 * Release a sleep lock when its likely we recursed (the code to
384 * deal with simple recursion is inline).
385 */
386#define	_exitlock(mp, tid, type) do {					\
387	if (!_release_lock(mp, tid)) {					\
388		if ((mp)->mtx_lock & MTX_RECURSE) {			\
389			if (--((mp)->mtx_recurse) == 0)			\
390				atomic_clear_ptr(&(mp)->mtx_lock,	\
391				    MTX_RECURSE);			\
392		} else {						\
393			mtx_exit_hard((mp), (type) & MTX_HARDOPTS);	\
394		}							\
395	}								\
396} while (0)
397#endif
398
399#ifndef _exitlock_spin
400/* Release a spin lock (with possible recursion). */
401#define	_exitlock_spin(mp) do {						\
402	if ((mp)->mtx_recurse == 0) {					\
403		int _mtx_intr = (mp)->mtx_saveintr;			\
404									\
405		_release_lock_quick(mp);				\
406		restore_intr(_mtx_intr);				\
407	} else {							\
408		(mp)->mtx_recurse--;					\
409	}								\
410} while (0)
411#endif
412
413/*
414 * Externally visible mutex functions.
415 *------------------------------------------------------------------------------
416 */
417
418/*
419 * Return non-zero if a mutex is already owned by the current thread.
420 */
421#define	mtx_owned(m)    (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
422
423/* Common strings */
424#ifdef _KERN_MUTEX_C_
425#ifdef KTR_EXTEND
426
427/*
428 * KTR_EXTEND saves file name and line for all entries, so we don't need them
429 * here.  Theoretically we should also change the entries which refer to them
430 * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
431 * parameters, it doesn't do any harm to leave them.
432 */
433char	STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
434char	STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
435char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
436#else
437char	STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
438char	STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
439char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
440#endif
441char	STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
442char	STR_mtx_owned[] = "mtx_owned(mpp)";
443char	STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
444#else	/* _KERN_MUTEX_C_ */
445extern	char STR_mtx_enter_fmt[];
446extern	char STR_mtx_bad_type[];
447extern	char STR_mtx_exit_fmt[];
448extern	char STR_mtx_owned[];
449extern	char STR_mtx_recurse[];
450extern	char STR_mtx_try_enter_fmt[];
451#endif	/* _KERN_MUTEX_C_ */
452
453#ifndef KLD_MODULE
454/*
455 * Get lock 'm', the macro handles the easy (and most common cases) and leaves
456 * the slow stuff to the mtx_enter_hard() function.
457 *
458 * Note: since type is usually a constant much of this code is optimized out.
459 */
460_MTX_INLINE void
461_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
462{
463	struct mtx	*mpp = mtxp;
464
465	/* bits only valid on mtx_exit() */
466	MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
467	    STR_mtx_bad_type);
468
469	if ((type) & MTX_SPIN) {
470		/*
471		 * Easy cases of spin locks:
472		 *
473		 * 1) We already own the lock and will simply recurse on it (if
474		 *    RLIKELY)
475		 *
476		 * 2) The lock is free, we just get it
477		 */
478		if ((type) & MTX_RLIKELY) {
479			/*
480			 * Check for recursion, if we already have this
481			 * lock we just bump the recursion count.
482			 */
483			if (mpp->mtx_lock == (uintptr_t)CURTHD) {
484				mpp->mtx_recurse++;
485				goto done;
486			}
487		}
488
489		if (((type) & MTX_TOPHALF) == 0) {
490			/*
491			 * If an interrupt thread uses this we must block
492			 * interrupts here.
493			 */
494			if ((type) & MTX_FIRST) {
495				ASS_IEN;
496				disable_intr();
497				_getlock_norecurse(mpp, CURTHD,
498				    (type) & MTX_HARDOPTS);
499			} else {
500				_getlock_spin_block(mpp, CURTHD,
501				    (type) & MTX_HARDOPTS);
502			}
503		} else
504			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
505	} else {
506		/* Sleep locks */
507		if ((type) & MTX_RLIKELY)
508			_getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
509		else
510			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
511	}
512	done:
513	WITNESS_ENTER(mpp, type, file, line);
514	CTR5(KTR_LOCK, STR_mtx_enter_fmt,
515	    mpp->mtx_description, mpp, file, line,
516	    mpp->mtx_recurse);
517}
518
519/*
520 * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
521 *
522 * XXX DOES NOT HANDLE RECURSION
523 */
524_MTX_INLINE int
525_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
526{
527	struct mtx	*const mpp = mtxp;
528	int	rval;
529
530	rval = _obtain_lock(mpp, CURTHD);
531#ifdef MUTEX_DEBUG
532	if (rval && mpp->mtx_witness != NULL) {
533		MPASS(mpp->mtx_recurse == 0);
534		witness_try_enter(mpp, type, file, line);
535	}
536#endif
537	CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
538	    mpp->mtx_description, mpp, file, line, rval);
539
540	return rval;
541}
542
543/*
544 * Release lock m.
545 */
546_MTX_INLINE void
547_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
548{
549	struct mtx	*const mpp = mtxp;
550
551	MPASS2(mtx_owned(mpp), STR_mtx_owned);
552	WITNESS_EXIT(mpp, type, file, line);
553	CTR5(KTR_LOCK, STR_mtx_exit_fmt,
554	    mpp->mtx_description, mpp, file, line,
555	    mpp->mtx_recurse);
556	if ((type) & MTX_SPIN) {
557		if ((type) & MTX_NORECURSE) {
558			int mtx_intr = mpp->mtx_saveintr;
559
560			MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
561			_release_lock_quick(mpp);
562			if (((type) & MTX_TOPHALF) == 0) {
563				if ((type) & MTX_FIRST) {
564					ASS_IDIS;
565					enable_intr();
566				} else
567					restore_intr(mtx_intr);
568			}
569		} else {
570			if (((type & MTX_TOPHALF) == 0) &&
571			    (type & MTX_FIRST)) {
572				ASS_IDIS;
573				ASS_SIEN(mpp);
574			}
575			_exitlock_spin(mpp);
576		}
577	} else {
578		/* Handle sleep locks */
579		if ((type) & MTX_RLIKELY)
580			_exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
581		else {
582			_exitlock_norecurse(mpp, CURTHD,
583			    (type) & MTX_HARDOPTS);
584		}
585	}
586}
587
588#endif	/* KLD_MODULE */
589
590/* Avoid namespace pollution */
591#ifndef _KERN_MUTEX_C_
592#undef	_obtain_lock
593#undef	_release_lock
594#undef	_release_lock_quick
595#undef	_getlock_sleep
596#undef	_getlock_spin_block
597#undef	_getlock_norecurse
598#undef	_exitlock_norecurse
599#undef	_exitlock
600#undef	_exitlock_spin
601#endif	/* !_KERN_MUTEX_C_ */
602
603#endif	/* _KERNEL */
604#endif	/* !LOCORE */
605#endif	/* _SYS_MUTEX_H_ */
606