mutex.h revision 69429
11590Srgrimes/*-
21590Srgrimes * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
31590Srgrimes *
41590Srgrimes * Redistribution and use in source and binary forms, with or without
51590Srgrimes * modification, are permitted provided that the following conditions
61590Srgrimes * are met:
71590Srgrimes * 1. Redistributions of source code must retain the above copyright
81590Srgrimes *    notice, this list of conditions and the following disclaimer.
91590Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
101590Srgrimes *    notice, this list of conditions and the following disclaimer in the
111590Srgrimes *    documentation and/or other materials provided with the distribution.
121590Srgrimes * 3. Berkeley Software Design Inc's name may not be used to endorse or
131590Srgrimes *    promote products derived from this software without specific prior
141590Srgrimes *    written permission.
151590Srgrimes *
161590Srgrimes * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
171590Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
181590Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
191590Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
201590Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
211590Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
221590Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
231590Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
241590Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
251590Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
261590Srgrimes * SUCH DAMAGE.
271590Srgrimes *
281590Srgrimes *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
291590Srgrimes * $FreeBSD: head/sys/sys/mutex.h 69429 2000-12-01 00:10:59Z jhb $
301590Srgrimes */
311590Srgrimes
321590Srgrimes#ifndef _SYS_MUTEX_H_
331590Srgrimes#define _SYS_MUTEX_H_
3487710Smarkm
351590Srgrimes#ifndef LOCORE
361590Srgrimes#include <sys/queue.h>
3714443Sjoerg
3814443Sjoerg#ifdef _KERNEL
3914443Sjoerg#include <sys/ktr.h>
4014443Sjoerg#include <machine/atomic.h>
4114443Sjoerg#include <machine/cpufunc.h>
4214443Sjoerg#include <machine/globals.h>
4314443Sjoerg#endif	/* _KERNEL_ */
441590Srgrimes#endif	/* !LOCORE */
4514443Sjoerg
461590Srgrimes#include <machine/mutex.h>
471590Srgrimes
481590Srgrimes#ifndef LOCORE
491590Srgrimes#ifdef _KERNEL
501590Srgrimes
5187710Smarkm/*
521590Srgrimes * If kern_mutex.c is being built, compile non-inlined versions of various
531590Srgrimes * functions so that kernel modules can use them.
541590Srgrimes */
551590Srgrimes#ifndef _KERN_MUTEX_C_
561590Srgrimes#define _MTX_INLINE	static __inline
571590Srgrimes#else
581590Srgrimes#define _MTX_INLINE
591590Srgrimes#endif
601590Srgrimes
611590Srgrimes/*
621590Srgrimes * Mutex flags
631590Srgrimes *
641590Srgrimes * Types
651590Srgrimes */
661590Srgrimes#define	MTX_DEF		0x0		/* Default (spin/sleep) */
671590Srgrimes#define MTX_SPIN	0x1		/* Spin only lock */
6814443Sjoerg
6992922Simp/* Options */
7092922Simp#define	MTX_RLIKELY	0x4		/* (opt) Recursion likely */
7192922Simp#define	MTX_NORECURSE	0x8		/* No recursion possible */
7292922Simp#define	MTX_NOSPIN	0x10		/* Don't spin before sleeping */
7392922Simp#define	MTX_NOSWITCH	0x20		/* Do not switch on release */
7492922Simp#define	MTX_FIRST	0x40		/* First spin lock holder */
7592922Simp#define MTX_TOPHALF	0x80		/* Interrupts not disabled on spin */
7692922Simp#define MTX_COLD	0x100		/* Mutex init'd before malloc works */
7792922Simp
7892922Simp/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
7992922Simp#define	MTX_HARDOPTS	(MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
8092922Simp
8192922Simp/* Flags/value used in mtx_lock */
8292922Simp#define	MTX_RECURSE	0x01		/* (non-spin) lock held recursively */
8392922Simp#define	MTX_CONTESTED	0x02		/* (non-spin) lock contested */
8492922Simp#define	MTX_FLAGMASK	~(MTX_RECURSE | MTX_CONTESTED)
8592922Simp#define MTX_UNOWNED	0x8		/* Cookie for free mutex */
8692922Simp
8792922Simp#endif	/* _KERNEL */
8892922Simp
8992922Simp#ifdef WITNESS
9092922Simpstruct mtx_debug {
9192922Simp	/* If you add anything here, adjust the mtxf_t definition below */
9292922Simp	struct witness	*mtxd_witness;
9392922Simp	LIST_ENTRY(mtx)	mtxd_held;
94128445Scognet	const char	*mtxd_file;
9592922Simp	int		mtxd_line;
9692922Simp	const char	*mtxd_description;
97128445Scognet};
98
99#define mtx_description	mtx_debug->mtxd_description
100#define mtx_held	mtx_debug->mtxd_held
101#define	mtx_line	mtx_debug->mtxd_line
102#define	mtx_file	mtx_debug->mtxd_file
103#define	mtx_witness	mtx_debug->mtxd_witness
104#endif	/* WITNESS */
105
106/*
107 * Sleep/spin mutex
108 */
109struct mtx {
110	volatile uintptr_t mtx_lock;	/* lock owner/gate/flags */
111	volatile u_int	mtx_recurse;	/* number of recursive holds */
112	u_int		mtx_saveintr;	/* saved flags (for spin locks) */
113#ifdef WITNESS
114	struct mtx_debug *mtx_debug;
115#else
116	const char	*mtx_description;
117#endif
118	TAILQ_HEAD(, proc) mtx_blocked;
119	LIST_ENTRY(mtx)	mtx_contested;
120	struct mtx	*mtx_next;	/* all locks in system */
121	struct mtx	*mtx_prev;
122};
123
124#ifdef	WITNESS
125#define	MUTEX_DECLARE(modifiers, name)					\
126	static struct mtx_debug __mtx_debug_##name;			\
127	modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
128#else
129#define MUTEX_DECLARE(modifiers, name)	modifiers struct mtx name
130#endif
131
132#define mp_fixme(string)
133
134#ifdef _KERNEL
135/* Prototypes */
136void	mtx_init(struct mtx *m, const char *description, int flag);
137void	mtx_enter_hard(struct mtx *, int type, int saveintr);
138void	mtx_exit_hard(struct mtx *, int type);
139void	mtx_destroy(struct mtx *m);
140
141/*
142 * Wrap the following functions with cpp macros so that filenames and line
143 * numbers are embedded in the code correctly.
144 */
145#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
146void	_mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
147int	_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
148void	_mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
149#endif
150
151#define	mtx_enter(mtxp, type)						\
152	_mtx_enter((mtxp), (type), __FILE__, __LINE__)
153
154#define	mtx_try_enter(mtxp, type)					\
155	_mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
156
157#define	mtx_exit(mtxp, type)						\
158	_mtx_exit((mtxp), (type), __FILE__, __LINE__)
159
160/* Global locks */
161extern struct mtx	sched_lock;
162extern struct mtx	Giant;
163
164/*
165 * Used to replace return with an exit Giant and return.
166 */
167
168#define EGAR(a)								\
169do {									\
170	mtx_exit(&Giant, MTX_DEF);					\
171	return (a);							\
172} while (0)
173
174#define VEGAR								\
175do {									\
176	mtx_exit(&Giant, MTX_DEF);					\
177	return;								\
178} while (0)
179
180#define DROP_GIANT_NOSWITCH()						\
181do {									\
182	int _giantcnt;							\
183	WITNESS_SAVE_DECL(Giant);					\
184									\
185	if (mtx_owned(&Giant))						\
186		WITNESS_SAVE(&Giant, Giant);				\
187	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
188		mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH)
189
190#define DROP_GIANT()							\
191do {									\
192	int _giantcnt;							\
193	WITNESS_SAVE_DECL(Giant);					\
194									\
195	if (mtx_owned(&Giant))						\
196		WITNESS_SAVE(&Giant, Giant);				\
197	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
198		mtx_exit(&Giant, MTX_DEF)
199
200#define PICKUP_GIANT()							\
201	mtx_assert(&Giant, MA_NOTOWNED);				\
202	while (_giantcnt--)						\
203		mtx_enter(&Giant, MTX_DEF);				\
204	if (mtx_owned(&Giant))						\
205		WITNESS_RESTORE(&Giant, Giant);				\
206} while (0)
207
208#define PARTIAL_PICKUP_GIANT()						\
209	mtx_assert(&Giant, MA_NOTOWNED);				\
210	while (_giantcnt--)						\
211		mtx_enter(&Giant, MTX_DEF);				\
212	if (mtx_owned(&Giant))						\
213		WITNESS_RESTORE(&Giant, Giant)
214
215
216/*
217 * Debugging
218 */
219#ifdef INVARIANTS
220#define MA_OWNED	1
221#define MA_NOTOWNED	2
222#define MA_RECURSED	4
223#define MA_NOTRECURSED	8
224#define mtx_assert(m, what) do {					\
225	switch ((what)) {						\
226	case MA_OWNED:							\
227	case MA_OWNED | MA_RECURSED:					\
228	case MA_OWNED | MA_NOTRECURSED:					\
229		if (!mtx_owned((m)))					\
230			panic("mutex %s not owned at %s:%d",		\
231			    (m)->mtx_description, __FILE__, __LINE__);	\
232		if (mtx_recursed((m))) {				\
233			if (((what) & MA_NOTRECURSED) != 0)		\
234				panic("mutex %s recursed at %s:%d",	\
235				    (m)->mtx_description, __FILE__, __LINE__); \
236		} else if (((what) & MA_RECURSED) != 0)			\
237				panic("mutex %s unrecursed at %s:%d",	\
238				    (m)->mtx_description, __FILE__, __LINE__); \
239		break;							\
240	case MA_NOTOWNED:						\
241		if (mtx_owned((m)))					\
242			panic("mutex %s owned at %s:%d",		\
243			    (m)->mtx_description, __FILE__, __LINE__);	\
244		break;							\
245	default:							\
246		panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
247	}								\
248} while(0)
249#else	/* INVARIANTS */
250#define mtx_assert(m, what)
251#endif	/* INVARIANTS */
252
253#ifdef MUTEX_DEBUG
254#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d",	\
255                #ex, __FILE__, __LINE__)
256#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
257                what, __FILE__, __LINE__)
258
259#else	/* MUTEX_DEBUG */
260#define	MPASS(ex)
261#define	MPASS2(ex, where)
262#endif	/* MUTEX_DEBUG */
263
264#ifdef	WITNESS
265#define WITNESS_ENTER(m, t, f, l)					\
266	if ((m)->mtx_witness != NULL)					\
267		witness_enter((m), (t), (f), (l))
268#define WITNESS_EXIT(m, t, f, l)					\
269	if ((m)->mtx_witness != NULL)					\
270		witness_exit((m), (t), (f), (l))
271
272#define	WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
273#define	WITNESS_SAVE_DECL(n)						\
274	const char * __CONCAT(n, __wf);					\
275	int __CONCAT(n, __wl)
276
277#define	WITNESS_SAVE(m, n) 						\
278do {									\
279	if ((m)->mtx_witness != NULL) 					\
280	    witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl));	\
281} while (0)
282
283#define	WITNESS_RESTORE(m, n) 						\
284do {									\
285	if ((m)->mtx_witness != NULL)					\
286	    witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl));	\
287} while (0)
288
289void	witness_init(struct mtx *, int flag);
290void	witness_destroy(struct mtx *);
291void	witness_enter(struct mtx *, int, const char *, int);
292void	witness_try_enter(struct mtx *, int, const char *, int);
293void	witness_exit(struct mtx *, int, const char *, int);
294void	witness_display(void(*)(const char *fmt, ...));
295void	witness_list(struct proc *);
296int	witness_sleep(int, struct mtx *, const char *, int);
297void	witness_save(struct mtx *, const char **, int *);
298void	witness_restore(struct mtx *, const char *, int);
299#else	/* WITNESS */
300#define WITNESS_ENTER(m, t, f, l)
301#define WITNESS_EXIT(m, t, f, l)
302#define	WITNESS_SLEEP(check, m)
303#define	WITNESS_SAVE_DECL(n)
304#define	WITNESS_SAVE(m, n)
305#define	WITNESS_RESTORE(m, n)
306
307/*
308 * flag++ is slezoid way of shutting up unused parameter warning
309 * in mtx_init()
310 */
311#define witness_init(m, flag) flag++
312#define witness_destroy(m)
313#define witness_enter(m, t, f, l)
314#define witness_try_enter(m, t, f, l)
315#define witness_exit(m, t, f, l)
316#endif	/* WITNESS */
317
318/*
319 * Assembly macros (for internal use only)
320 *------------------------------------------------------------------------------
321 */
322
323#define	_V(x)	__STRING(x)
324
325/*
326 * Default, unoptimized mutex micro-operations
327 */
328
329#ifndef _obtain_lock
330/* Actually obtain mtx_lock */
331#define _obtain_lock(mp, tid)						\
332	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
333#endif
334
335#ifndef _release_lock
336/* Actually release mtx_lock */
337#define _release_lock(mp, tid)		       				\
338	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
339#endif
340
341#ifndef _release_lock_quick
342/* Actually release mtx_lock quickly assuming that we own it */
343#define	_release_lock_quick(mp) 					\
344	atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
345#endif
346
347#ifndef _getlock_sleep
348/* Get a sleep lock, deal with recursion inline. */
349#define	_getlock_sleep(mp, tid, type) do {				\
350	if (!_obtain_lock(mp, tid)) {					\
351		if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
352			mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0);	\
353		else {							\
354			atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE);	\
355			(mp)->mtx_recurse++;				\
356		}							\
357	}								\
358} while (0)
359#endif
360
361#ifndef _getlock_spin_block
362/* Get a spin lock, handle recursion inline (as the less common case) */
363#define	_getlock_spin_block(mp, tid, type) do {				\
364	u_int _mtx_intr = save_intr();					\
365	disable_intr();							\
366	if (!_obtain_lock(mp, tid))					\
367		mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr);	\
368	else								\
369		(mp)->mtx_saveintr = _mtx_intr;				\
370} while (0)
371#endif
372
373#ifndef _getlock_norecurse
374/*
375 * Get a lock without any recursion handling. Calls the hard enter function if
376 * we can't get it inline.
377 */
378#define	_getlock_norecurse(mp, tid, type) do {				\
379	if (!_obtain_lock(mp, tid))					\
380		mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0);		\
381} while (0)
382#endif
383
384#ifndef _exitlock_norecurse
385/*
386 * Release a sleep lock assuming we haven't recursed on it, recursion is handled
387 * in the hard function.
388 */
389#define	_exitlock_norecurse(mp, tid, type) do {				\
390	if (!_release_lock(mp, tid))					\
391		mtx_exit_hard((mp), (type) & MTX_HARDOPTS);		\
392} while (0)
393#endif
394
395#ifndef _exitlock
396/*
397 * Release a sleep lock when its likely we recursed (the code to
398 * deal with simple recursion is inline).
399 */
400#define	_exitlock(mp, tid, type) do {					\
401	if (!_release_lock(mp, tid)) {					\
402		if ((mp)->mtx_lock & MTX_RECURSE) {			\
403			if (--((mp)->mtx_recurse) == 0)			\
404				atomic_clear_ptr(&(mp)->mtx_lock,	\
405				    MTX_RECURSE);			\
406		} else {						\
407			mtx_exit_hard((mp), (type) & MTX_HARDOPTS);	\
408		}							\
409	}								\
410} while (0)
411#endif
412
413#ifndef _exitlock_spin
414/* Release a spin lock (with possible recursion). */
415#define	_exitlock_spin(mp) do {						\
416	if ((mp)->mtx_recurse == 0) {					\
417		int _mtx_intr = (mp)->mtx_saveintr;			\
418									\
419		_release_lock_quick(mp);				\
420		restore_intr(_mtx_intr);				\
421	} else {							\
422		(mp)->mtx_recurse--;					\
423	}								\
424} while (0)
425#endif
426
427/*
428 * Externally visible mutex functions.
429 *------------------------------------------------------------------------------
430 */
431
432/*
433 * Return non-zero if a mutex is already owned by the current thread.
434 */
435#define	mtx_owned(m)    (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
436
437/*
438 * Return non-zero if a mutex has been recursively acquired.
439 */
440#define mtx_recursed(m)	((m)->mtx_recurse != 0)
441
442/* Common strings */
443#ifdef _KERN_MUTEX_C_
444#ifdef KTR_EXTEND
445
446/*
447 * KTR_EXTEND saves file name and line for all entries, so we don't need them
448 * here.  Theoretically we should also change the entries which refer to them
449 * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
450 * parameters, it doesn't do any harm to leave them.
451 */
452char	STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d";
453char	STR_mtx_exit_fmt[] = "REL %s [%p] r=%d";
454char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d";
455#else
456char	STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d at %s:%d";
457char	STR_mtx_exit_fmt[] = "REL %s [%p] r=%d at %s:%d";
458char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d at %s:%d";
459#endif
460char	STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
461char	STR_mtx_owned[] = "mtx_owned(mpp)";
462char	STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
463#else	/* _KERN_MUTEX_C_ */
464extern	char STR_mtx_enter_fmt[];
465extern	char STR_mtx_bad_type[];
466extern	char STR_mtx_exit_fmt[];
467extern	char STR_mtx_owned[];
468extern	char STR_mtx_recurse[];
469extern	char STR_mtx_try_enter_fmt[];
470#endif	/* _KERN_MUTEX_C_ */
471
472#ifndef KLD_MODULE
473/*
474 * Get lock 'm', the macro handles the easy (and most common cases) and leaves
475 * the slow stuff to the mtx_enter_hard() function.
476 *
477 * Note: since type is usually a constant much of this code is optimized out.
478 */
479_MTX_INLINE void
480_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
481{
482	struct mtx	*mpp = mtxp;
483
484	/* bits only valid on mtx_exit() */
485	MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
486	    STR_mtx_bad_type);
487
488	if ((type) & MTX_SPIN) {
489		/*
490		 * Easy cases of spin locks:
491		 *
492		 * 1) We already own the lock and will simply recurse on it (if
493		 *    RLIKELY)
494		 *
495		 * 2) The lock is free, we just get it
496		 */
497		if ((type) & MTX_RLIKELY) {
498			/*
499			 * Check for recursion, if we already have this
500			 * lock we just bump the recursion count.
501			 */
502			if (mpp->mtx_lock == (uintptr_t)CURTHD) {
503				mpp->mtx_recurse++;
504				goto done;
505			}
506		}
507
508		if (((type) & MTX_TOPHALF) == 0) {
509			/*
510			 * If an interrupt thread uses this we must block
511			 * interrupts here.
512			 */
513			if ((type) & MTX_FIRST) {
514				ASS_IEN;
515				disable_intr();
516				_getlock_norecurse(mpp, CURTHD,
517				    (type) & MTX_HARDOPTS);
518			} else {
519				_getlock_spin_block(mpp, CURTHD,
520				    (type) & MTX_HARDOPTS);
521			}
522		} else
523			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
524	} else {
525		/* Sleep locks */
526		if ((type) & MTX_RLIKELY)
527			_getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
528		else
529			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
530	}
531	done:
532	WITNESS_ENTER(mpp, type, file, line);
533	CTR5(KTR_LOCK, STR_mtx_enter_fmt,
534	    mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
535
536}
537
538/*
539 * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
540 *
541 * XXX DOES NOT HANDLE RECURSION
542 */
543_MTX_INLINE int
544_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
545{
546	struct mtx	*const mpp = mtxp;
547	int	rval;
548
549	rval = _obtain_lock(mpp, CURTHD);
550#ifdef WITNESS
551	if (rval && mpp->mtx_witness != NULL) {
552		MPASS(mpp->mtx_recurse == 0);
553		witness_try_enter(mpp, type, file, line);
554	}
555#endif	/* WITNESS */
556	CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
557	    mpp->mtx_description, mpp, rval, file, line);
558
559	return rval;
560}
561
562/*
563 * Release lock m.
564 */
565_MTX_INLINE void
566_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
567{
568	struct mtx	*const mpp = mtxp;
569
570	MPASS2(mtx_owned(mpp), STR_mtx_owned);
571	WITNESS_EXIT(mpp, type, file, line);
572	CTR5(KTR_LOCK, STR_mtx_exit_fmt,
573	    mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
574	if ((type) & MTX_SPIN) {
575		if ((type) & MTX_NORECURSE) {
576			int mtx_intr = mpp->mtx_saveintr;
577
578			MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
579			_release_lock_quick(mpp);
580			if (((type) & MTX_TOPHALF) == 0) {
581				if ((type) & MTX_FIRST) {
582					ASS_IDIS;
583					enable_intr();
584				} else
585					restore_intr(mtx_intr);
586			}
587		} else {
588			if (((type & MTX_TOPHALF) == 0) &&
589			    (type & MTX_FIRST)) {
590				ASS_IDIS;
591				ASS_SIEN(mpp);
592			}
593			_exitlock_spin(mpp);
594		}
595	} else {
596		/* Handle sleep locks */
597		if ((type) & MTX_RLIKELY)
598			_exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
599		else {
600			_exitlock_norecurse(mpp, CURTHD,
601			    (type) & MTX_HARDOPTS);
602		}
603	}
604}
605
606#endif	/* KLD_MODULE */
607
608/* Avoid namespace pollution */
609#ifndef _KERN_MUTEX_C_
610#undef	_obtain_lock
611#undef	_release_lock
612#undef	_release_lock_quick
613#undef	_getlock_sleep
614#undef	_getlock_spin_block
615#undef	_getlock_norecurse
616#undef	_exitlock_norecurse
617#undef	_exitlock
618#undef	_exitlock_spin
619#endif	/* !_KERN_MUTEX_C_ */
620
621#endif	/* _KERNEL */
622#endif	/* !LOCORE */
623#endif	/* _SYS_MUTEX_H_ */
624