mutex.h revision 67464
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
29 * $FreeBSD: head/sys/sys/mutex.h 67464 2000-10-23 09:22:18Z phk $
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35#ifndef LOCORE
36#include <sys/queue.h>
37
38#ifdef _KERNEL
39#include <sys/ktr.h>
40#include <machine/atomic.h>
41#include <machine/bus.h>
42#include <machine/cpufunc.h>
43#include <machine/globals.h>
44#ifndef curproc
45struct proc;
46extern struct proc *curproc;
47#endif
48#endif	/* _KERNEL_ */
49#endif	/* !LOCORE */
50
51#include <machine/mutex.h>
52
53#ifndef LOCORE
54#ifdef _KERNEL
55
56/*
57 * If kern_mutex.c is being built, compile non-inlined versions of various
58 * functions so that kernel modules can use them.
59 */
60#ifndef _KERN_MUTEX_C_
61#define _MTX_INLINE	static __inline
62#else
63#define _MTX_INLINE
64#endif
65
66/*
67 * Mutex flags
68 *
69 * Types
70 */
71#define	MTX_DEF		0x0		/* Default (spin/sleep) */
72#define MTX_SPIN	0x1		/* Spin only lock */
73
74/* Options */
75#define	MTX_RLIKELY	0x4		/* (opt) Recursion likely */
76#define	MTX_NORECURSE	0x8		/* No recursion possible */
77#define	MTX_NOSPIN	0x10		/* Don't spin before sleeping */
78#define	MTX_NOSWITCH	0x20		/* Do not switch on release */
79#define	MTX_FIRST	0x40		/* First spin lock holder */
80#define MTX_TOPHALF	0x80		/* Interrupts not disabled on spin */
81#define MTX_COLD	0x100		/* Mutex init'd before malloc works */
82
83/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
84#define	MTX_HARDOPTS	(MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
85
86/* Flags/value used in mtx_lock */
87#define	MTX_RECURSE	0x01		/* (non-spin) lock held recursively */
88#define	MTX_CONTESTED	0x02		/* (non-spin) lock contested */
89#define	MTX_FLAGMASK	~(MTX_RECURSE | MTX_CONTESTED)
90#define MTX_UNOWNED	0x8		/* Cookie for free mutex */
91
92#endif	/* _KERNEL */
93
94#ifdef MUTEX_DEBUG
95struct mtx_debug {
96	/* If you add anything here, adjust the mtxf_t definition below */
97	struct witness	*mtxd_witness;
98	LIST_ENTRY(mtx)	mtxd_held;
99	const char	*mtxd_file;
100	int		mtxd_line;
101	const char	*mtxd_description;
102};
103
104#define mtx_description	mtx_debug->mtxd_description
105#define mtx_held	mtx_debug->mtxd_held
106#define	mtx_line	mtx_debug->mtxd_line
107#define	mtx_file	mtx_debug->mtxd_file
108#define	mtx_witness	mtx_debug->mtxd_witness
109#endif
110
111/*
112 * Sleep/spin mutex
113 */
114struct mtx {
115	volatile uintptr_t mtx_lock;	/* lock owner/gate/flags */
116	volatile u_int	mtx_recurse;	/* number of recursive holds */
117	u_int		mtx_saveintr;	/* saved flags (for spin locks) */
118#ifdef MUTEX_DEBUG
119	struct mtx_debug *mtx_debug;
120#else
121	const char	*mtx_description;
122#endif
123	TAILQ_HEAD(, proc) mtx_blocked;
124	LIST_ENTRY(mtx)	mtx_contested;
125	struct mtx	*mtx_next;	/* all locks in system */
126	struct mtx	*mtx_prev;
127};
128
129#ifdef	MUTEX_DEBUG
130#define	MUTEX_DECLARE(modifiers, name)					\
131	static struct mtx_debug __mtx_debug_##name;			\
132	modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
133#else
134#define MUTEX_DECLARE(modifiers, name)	modifiers struct mtx name
135#endif
136
137#define mp_fixme(string)
138
139#ifdef _KERNEL
140/* Misc */
141#define CURTHD	CURPROC	/* Current thread ID */
142
143/* Prototypes */
144void	mtx_init(struct mtx *m, const char *description, int flag);
145void	mtx_enter_hard(struct mtx *, int type, int saveintr);
146void	mtx_exit_hard(struct mtx *, int type);
147void	mtx_destroy(struct mtx *m);
148
149/*
150 * Wrap the following functions with cpp macros so that filenames and line
151 * numbers are embedded in the code correctly.
152 */
153#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
154void	_mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
155int	_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
156void	_mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
157#endif
158
159#define	mtx_enter(mtxp, type)						\
160	_mtx_enter((mtxp), (type), __FILE__, __LINE__)
161
162#define	mtx_try_enter(mtxp, type)					\
163	_mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
164
165#define	mtx_exit(mtxp, type)						\
166	_mtx_exit((mtxp), (type), __FILE__, __LINE__)
167
168/* Global locks */
169extern struct mtx	sched_lock;
170extern struct mtx	Giant;
171
172/*
173 * Used to replace return with an exit Giant and return.
174 */
175
176#define EGAR(a)								\
177do {									\
178	mtx_exit(&Giant, MTX_DEF);					\
179	return (a);							\
180} while (0)
181
182#define VEGAR								\
183do {									\
184	mtx_exit(&Giant, MTX_DEF);					\
185	return;								\
186} while (0)
187
188#define DROP_GIANT()							\
189do {									\
190	int _giantcnt;							\
191	WITNESS_SAVE_DECL(Giant);					\
192									\
193	if (mtx_owned(&Giant))						\
194		WITNESS_SAVE(&Giant, Giant);				\
195	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
196		mtx_exit(&Giant, MTX_DEF)
197
198#define PICKUP_GIANT()							\
199	mtx_assert(&Giant, MA_NOTOWNED);				\
200	while (_giantcnt--)						\
201		mtx_enter(&Giant, MTX_DEF);				\
202	if (mtx_owned(&Giant))						\
203		WITNESS_RESTORE(&Giant, Giant);				\
204} while (0)
205
206#define PARTIAL_PICKUP_GIANT()						\
207	mtx_assert(&Giant, MA_NOTOWNED);				\
208	while (_giantcnt--)						\
209		mtx_enter(&Giant, MTX_DEF);				\
210	if (mtx_owned(&Giant))						\
211		WITNESS_RESTORE(&Giant, Giant)
212
213
214/*
215 * Debugging
216 */
217#ifdef INVARIANTS
218#define MA_OWNED	1
219#define MA_NOTOWNED	2
220#define mtx_assert(m, what) {						\
221	switch ((what)) {						\
222	case MA_OWNED:							\
223		if (!mtx_owned((m)))					\
224			panic("mutex %s not owned at %s:%d",		\
225			    (m)->mtx_description, __FILE__, __LINE__);	\
226		break;							\
227	case MA_NOTOWNED:						\
228		if (mtx_owned((m)))					\
229			panic("mutex %s owned at %s:%d",		\
230			    (m)->mtx_description, __FILE__, __LINE__);	\
231		break;							\
232	default:							\
233		panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
234	}								\
235}
236#else	/* INVARIANTS */
237#define mtx_assert(m, what)
238#endif	/* INVARIANTS */
239
240#ifdef MUTEX_DEBUG
241#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d",	\
242                #ex, __FILE__, __LINE__)
243#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
244                what, __FILE__, __LINE__)
245
246#else	/* MUTEX_DEBUG */
247#define	MPASS(ex)
248#define	MPASS2(ex, where)
249#endif	/* MUTEX_DEBUG */
250
251#ifdef	WITNESS
252#ifndef	MUTEX_DEBUG
253#error	WITNESS requires MUTEX_DEBUG
254#endif	/* MUTEX_DEBUG */
255#define WITNESS_ENTER(m, t, f, l)					\
256	if ((m)->mtx_witness != NULL)					\
257		witness_enter((m), (t), (f), (l))
258#define WITNESS_EXIT(m, t, f, l)					\
259	if ((m)->mtx_witness != NULL)					\
260		witness_exit((m), (t), (f), (l))
261
262#define	WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
263#define	WITNESS_SAVE_DECL(n)						\
264	const char * __CONCAT(n, __wf);					\
265	int __CONCAT(n, __wl)
266
267#define	WITNESS_SAVE(m, n) 						\
268do {									\
269	if ((m)->mtx_witness != NULL) 					\
270	    witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl));	\
271} while (0)
272
273#define	WITNESS_RESTORE(m, n) 						\
274do {									\
275	if ((m)->mtx_witness != NULL)					\
276	    witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl));	\
277} while (0)
278
279void	witness_init(struct mtx *, int flag);
280void	witness_destroy(struct mtx *);
281void	witness_enter(struct mtx *, int, const char *, int);
282void	witness_try_enter(struct mtx *, int, const char *, int);
283void	witness_exit(struct mtx *, int, const char *, int);
284void	witness_display(void(*)(const char *fmt, ...));
285void	witness_list(struct proc *);
286int	witness_sleep(int, struct mtx *, const char *, int);
287void	witness_save(struct mtx *, const char **, int *);
288void	witness_restore(struct mtx *, const char *, int);
289#else	/* WITNESS */
290#define WITNESS_ENTER(m, t, f, l)
291#define WITNESS_EXIT(m, t, f, l)
292#define	WITNESS_SLEEP(check, m)
293#define	WITNESS_SAVE_DECL(n)
294#define	WITNESS_SAVE(m, n)
295#define	WITNESS_RESTORE(m, n)
296
297/*
298 * flag++ is slezoid way of shutting up unused parameter warning
299 * in mtx_init()
300 */
301#define witness_init(m, flag) flag++
302#define witness_destroy(m)
303#define witness_enter(m, t, f, l)
304#define witness_try_enter(m, t, f, l)
305#define witness_exit(m, t, f, l)
306#endif	/* WITNESS */
307
308/*
309 * Assembly macros (for internal use only)
310 *------------------------------------------------------------------------------
311 */
312
313#define	_V(x)	__STRING(x)
314
315/*
316 * Default, unoptimized mutex micro-operations
317 */
318
319#ifndef _obtain_lock
320/* Actually obtain mtx_lock */
321#define _obtain_lock(mp, tid)						\
322	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
323#endif
324
325#ifndef _release_lock
326/* Actually release mtx_lock */
327#define _release_lock(mp, tid)		       				\
328	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
329#endif
330
331#ifndef _release_lock_quick
332/* Actually release mtx_lock quickly assuming that we own it */
333#define	_release_lock_quick(mp) 					\
334	atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
335#endif
336
337#ifndef _getlock_sleep
338/* Get a sleep lock, deal with recursion inline. */
339#define	_getlock_sleep(mp, tid, type) do {				\
340	if (!_obtain_lock(mp, tid)) {					\
341		if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
342			mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0);	\
343		else {							\
344			atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE);	\
345			(mp)->mtx_recurse++;				\
346		}							\
347	}								\
348} while (0)
349#endif
350
351#ifndef _getlock_spin_block
352/* Get a spin lock, handle recursion inline (as the less common case) */
353#define	_getlock_spin_block(mp, tid, type) do {				\
354	u_int _mtx_intr = save_intr();					\
355	disable_intr();							\
356	if (!_obtain_lock(mp, tid))					\
357		mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr);	\
358	else								\
359		(mp)->mtx_saveintr = _mtx_intr;				\
360} while (0)
361#endif
362
363#ifndef _getlock_norecurse
364/*
365 * Get a lock without any recursion handling. Calls the hard enter function if
366 * we can't get it inline.
367 */
368#define	_getlock_norecurse(mp, tid, type) do {				\
369	if (!_obtain_lock(mp, tid))					\
370		mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0);		\
371} while (0)
372#endif
373
374#ifndef _exitlock_norecurse
375/*
376 * Release a sleep lock assuming we haven't recursed on it, recursion is handled
377 * in the hard function.
378 */
379#define	_exitlock_norecurse(mp, tid, type) do {				\
380	if (!_release_lock(mp, tid))					\
381		mtx_exit_hard((mp), (type) & MTX_HARDOPTS);		\
382} while (0)
383#endif
384
385#ifndef _exitlock
386/*
387 * Release a sleep lock when its likely we recursed (the code to
388 * deal with simple recursion is inline).
389 */
390#define	_exitlock(mp, tid, type) do {					\
391	if (!_release_lock(mp, tid)) {					\
392		if ((mp)->mtx_lock & MTX_RECURSE) {			\
393			if (--((mp)->mtx_recurse) == 0)			\
394				atomic_clear_ptr(&(mp)->mtx_lock,	\
395				    MTX_RECURSE);			\
396		} else {						\
397			mtx_exit_hard((mp), (type) & MTX_HARDOPTS);	\
398		}							\
399	}								\
400} while (0)
401#endif
402
403#ifndef _exitlock_spin
404/* Release a spin lock (with possible recursion). */
405#define	_exitlock_spin(mp) do {						\
406	if ((mp)->mtx_recurse == 0) {					\
407		int _mtx_intr = (mp)->mtx_saveintr;			\
408									\
409		_release_lock_quick(mp);				\
410		restore_intr(_mtx_intr);				\
411	} else {							\
412		(mp)->mtx_recurse--;					\
413	}								\
414} while (0)
415#endif
416
417/*
418 * Externally visible mutex functions.
419 *------------------------------------------------------------------------------
420 */
421
422/*
423 * Return non-zero if a mutex is already owned by the current thread.
424 */
425#define	mtx_owned(m)    (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
426
427/* Common strings */
428#ifdef _KERN_MUTEX_C_
429#ifdef KTR_EXTEND
430
431/*
432 * KTR_EXTEND saves file name and line for all entries, so we don't need them
433 * here.  Theoretically we should also change the entries which refer to them
434 * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
435 * parameters, it doesn't do any harm to leave them.
436 */
437char	STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
438char	STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
439char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
440#else
441char	STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
442char	STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
443char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
444#endif
445char	STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
446char	STR_mtx_owned[] = "mtx_owned(mpp)";
447char	STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
448#else	/* _KERN_MUTEX_C_ */
449extern	char STR_mtx_enter_fmt[];
450extern	char STR_mtx_bad_type[];
451extern	char STR_mtx_exit_fmt[];
452extern	char STR_mtx_owned[];
453extern	char STR_mtx_recurse[];
454extern	char STR_mtx_try_enter_fmt[];
455#endif	/* _KERN_MUTEX_C_ */
456
457#ifndef KLD_MODULE
458/*
459 * Get lock 'm', the macro handles the easy (and most common cases) and leaves
460 * the slow stuff to the mtx_enter_hard() function.
461 *
462 * Note: since type is usually a constant much of this code is optimized out.
463 */
464_MTX_INLINE void
465_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
466{
467	struct mtx	*mpp = mtxp;
468
469	/* bits only valid on mtx_exit() */
470	MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
471	    STR_mtx_bad_type);
472
473	if ((type) & MTX_SPIN) {
474		/*
475		 * Easy cases of spin locks:
476		 *
477		 * 1) We already own the lock and will simply recurse on it (if
478		 *    RLIKELY)
479		 *
480		 * 2) The lock is free, we just get it
481		 */
482		if ((type) & MTX_RLIKELY) {
483			/*
484			 * Check for recursion, if we already have this
485			 * lock we just bump the recursion count.
486			 */
487			if (mpp->mtx_lock == (uintptr_t)CURTHD) {
488				mpp->mtx_recurse++;
489				goto done;
490			}
491		}
492
493		if (((type) & MTX_TOPHALF) == 0) {
494			/*
495			 * If an interrupt thread uses this we must block
496			 * interrupts here.
497			 */
498			if ((type) & MTX_FIRST) {
499				ASS_IEN;
500				disable_intr();
501				_getlock_norecurse(mpp, CURTHD,
502				    (type) & MTX_HARDOPTS);
503			} else {
504				_getlock_spin_block(mpp, CURTHD,
505				    (type) & MTX_HARDOPTS);
506			}
507		} else
508			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
509	} else {
510		/* Sleep locks */
511		if ((type) & MTX_RLIKELY)
512			_getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
513		else
514			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
515	}
516	done:
517	WITNESS_ENTER(mpp, type, file, line);
518	CTR5(KTR_LOCK, STR_mtx_enter_fmt,
519	    mpp->mtx_description, mpp, file, line,
520	    mpp->mtx_recurse);
521}
522
523/*
524 * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
525 *
526 * XXX DOES NOT HANDLE RECURSION
527 */
528_MTX_INLINE int
529_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
530{
531	struct mtx	*const mpp = mtxp;
532	int	rval;
533
534	rval = _obtain_lock(mpp, CURTHD);
535#ifdef MUTEX_DEBUG
536	if (rval && mpp->mtx_witness != NULL) {
537		MPASS(mpp->mtx_recurse == 0);
538		witness_try_enter(mpp, type, file, line);
539	}
540#endif
541	CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
542	    mpp->mtx_description, mpp, file, line, rval);
543
544	return rval;
545}
546
547/*
548 * Release lock m.
549 */
550_MTX_INLINE void
551_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
552{
553	struct mtx	*const mpp = mtxp;
554
555	MPASS2(mtx_owned(mpp), STR_mtx_owned);
556	WITNESS_EXIT(mpp, type, file, line);
557	CTR5(KTR_LOCK, STR_mtx_exit_fmt,
558	    mpp->mtx_description, mpp, file, line,
559	    mpp->mtx_recurse);
560	if ((type) & MTX_SPIN) {
561		if ((type) & MTX_NORECURSE) {
562			int mtx_intr = mpp->mtx_saveintr;
563
564			MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
565			_release_lock_quick(mpp);
566			if (((type) & MTX_TOPHALF) == 0) {
567				if ((type) & MTX_FIRST) {
568					ASS_IDIS;
569					enable_intr();
570				} else
571					restore_intr(mtx_intr);
572			}
573		} else {
574			if (((type & MTX_TOPHALF) == 0) &&
575			    (type & MTX_FIRST)) {
576				ASS_IDIS;
577				ASS_SIEN(mpp);
578			}
579			_exitlock_spin(mpp);
580		}
581	} else {
582		/* Handle sleep locks */
583		if ((type) & MTX_RLIKELY)
584			_exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
585		else {
586			_exitlock_norecurse(mpp, CURTHD,
587			    (type) & MTX_HARDOPTS);
588		}
589	}
590}
591
592#endif	/* KLD_MODULE */
593
594/* Avoid namespace pollution */
595#ifndef _KERN_MUTEX_C_
596#undef	_obtain_lock
597#undef	_release_lock
598#undef	_release_lock_quick
599#undef	_getlock_sleep
600#undef	_getlock_spin_block
601#undef	_getlock_norecurse
602#undef	_exitlock_norecurse
603#undef	_exitlock
604#undef	_exitlock_spin
605#endif	/* !_KERN_MUTEX_C_ */
606
607#endif	/* _KERNEL */
608#endif	/* !LOCORE */
609#endif	/* _SYS_MUTEX_H_ */
610