mutex.h revision 69750
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
29 * $FreeBSD: head/sys/sys/mutex.h 69750 2000-12-08 09:51:13Z jake $
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35#ifndef LOCORE
36#include <sys/queue.h>
37
38#ifdef _KERNEL
39#include <sys/ktr.h>
40#include <machine/atomic.h>
41#include <machine/cpufunc.h>
42#include <machine/globals.h>
43#endif	/* _KERNEL_ */
44#endif	/* !LOCORE */
45
46#include <machine/mutex.h>
47
48#ifndef LOCORE
49#ifdef _KERNEL
50
51/*
52 * If kern_mutex.c is being built, compile non-inlined versions of various
53 * functions so that kernel modules can use them.
54 */
55#ifndef _KERN_MUTEX_C_
56#define _MTX_INLINE	static __inline
57#else
58#define _MTX_INLINE
59#endif
60
61/*
62 * Mutex flags
63 *
64 * Types
65 */
66#define	MTX_DEF		0x0		/* Default (spin/sleep) */
67#define MTX_SPIN	0x1		/* Spin only lock */
68
69/* Options */
70#define	MTX_RLIKELY	0x4		/* (opt) Recursion likely */
71#define	MTX_NORECURSE	0x8		/* No recursion possible */
72#define	MTX_NOSPIN	0x10		/* Don't spin before sleeping */
73#define	MTX_NOSWITCH	0x20		/* Do not switch on release */
74#define	MTX_FIRST	0x40		/* First spin lock holder */
75#define MTX_TOPHALF	0x80		/* Interrupts not disabled on spin */
76#define MTX_COLD	0x100		/* Mutex init'd before malloc works */
77
78/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
79#define	MTX_HARDOPTS	(MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
80
81/* Flags/value used in mtx_lock */
82#define	MTX_RECURSE	0x01		/* (non-spin) lock held recursively */
83#define	MTX_CONTESTED	0x02		/* (non-spin) lock contested */
84#define	MTX_FLAGMASK	~(MTX_RECURSE | MTX_CONTESTED)
85#define MTX_UNOWNED	0x8		/* Cookie for free mutex */
86
87#endif	/* _KERNEL */
88
89#ifdef WITNESS
90struct mtx_debug {
91	/* If you add anything here, adjust the mtxf_t definition below */
92	struct witness	*mtxd_witness;
93	LIST_ENTRY(mtx)	mtxd_held;
94	const char	*mtxd_file;
95	int		mtxd_line;
96	const char	*mtxd_description;
97};
98
99#define mtx_description	mtx_debug->mtxd_description
100#define mtx_held	mtx_debug->mtxd_held
101#define	mtx_line	mtx_debug->mtxd_line
102#define	mtx_file	mtx_debug->mtxd_file
103#define	mtx_witness	mtx_debug->mtxd_witness
104#endif	/* WITNESS */
105
106/*
107 * Sleep/spin mutex
108 */
109struct mtx {
110	volatile uintptr_t mtx_lock;	/* lock owner/gate/flags */
111	volatile u_int	mtx_recurse;	/* number of recursive holds */
112	u_int		mtx_saveintr;	/* saved flags (for spin locks) */
113#ifdef WITNESS
114	struct mtx_debug *mtx_debug;
115#else
116	const char	*mtx_description;
117#endif
118	TAILQ_HEAD(, proc) mtx_blocked;
119	LIST_ENTRY(mtx)	mtx_contested;
120	struct mtx	*mtx_next;	/* all locks in system */
121	struct mtx	*mtx_prev;
122};
123
124#ifdef	WITNESS
125#define	MUTEX_DECLARE(modifiers, name)					\
126	static struct mtx_debug __mtx_debug_##name;			\
127	modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
128#else
129#define MUTEX_DECLARE(modifiers, name)	modifiers struct mtx name
130#endif
131
132#define mp_fixme(string)
133
134#ifdef _KERNEL
135/* Prototypes */
136void	mtx_init(struct mtx *m, const char *description, int flag);
137void	mtx_enter_hard(struct mtx *, int type, int saveintr);
138void	mtx_exit_hard(struct mtx *, int type);
139void	mtx_destroy(struct mtx *m);
140
141/*
142 * Wrap the following functions with cpp macros so that filenames and line
143 * numbers are embedded in the code correctly.
144 */
145#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
146void	_mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
147int	_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
148void	_mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
149#endif
150
151#define	mtx_enter(mtxp, type)						\
152	_mtx_enter((mtxp), (type), __FILE__, __LINE__)
153
154#define	mtx_try_enter(mtxp, type)					\
155	_mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
156
157#define	mtx_exit(mtxp, type)						\
158	_mtx_exit((mtxp), (type), __FILE__, __LINE__)
159
160/* Global locks */
161extern struct mtx	sched_lock;
162extern struct mtx	Giant;
163
164/*
165 * Used to replace return with an exit Giant and return.
166 */
167
168#define EGAR(a)								\
169do {									\
170	mtx_exit(&Giant, MTX_DEF);					\
171	return (a);							\
172} while (0)
173
174#define VEGAR								\
175do {									\
176	mtx_exit(&Giant, MTX_DEF);					\
177	return;								\
178} while (0)
179
180#define DROP_GIANT_NOSWITCH()						\
181do {									\
182	int _giantcnt;							\
183	WITNESS_SAVE_DECL(Giant);					\
184									\
185	if (mtx_owned(&Giant))						\
186		WITNESS_SAVE(&Giant, Giant);				\
187	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
188		mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH)
189
190#define DROP_GIANT()							\
191do {									\
192	int _giantcnt;							\
193	WITNESS_SAVE_DECL(Giant);					\
194									\
195	if (mtx_owned(&Giant))						\
196		WITNESS_SAVE(&Giant, Giant);				\
197	for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)		\
198		mtx_exit(&Giant, MTX_DEF)
199
200#define PICKUP_GIANT()							\
201	mtx_assert(&Giant, MA_NOTOWNED);				\
202	while (_giantcnt--)						\
203		mtx_enter(&Giant, MTX_DEF);				\
204	if (mtx_owned(&Giant))						\
205		WITNESS_RESTORE(&Giant, Giant);				\
206} while (0)
207
208#define PARTIAL_PICKUP_GIANT()						\
209	mtx_assert(&Giant, MA_NOTOWNED);				\
210	while (_giantcnt--)						\
211		mtx_enter(&Giant, MTX_DEF);				\
212	if (mtx_owned(&Giant))						\
213		WITNESS_RESTORE(&Giant, Giant)
214
215
216/*
217 * Debugging
218 */
219#ifdef INVARIANTS
220#define MA_OWNED	1
221#define MA_NOTOWNED	2
222#define MA_RECURSED	4
223#define MA_NOTRECURSED	8
224#define mtx_assert(m, what) do {					\
225	switch ((what)) {						\
226	case MA_OWNED:							\
227	case MA_OWNED | MA_RECURSED:					\
228	case MA_OWNED | MA_NOTRECURSED:					\
229		if (!mtx_owned((m)))					\
230			panic("mutex %s not owned at %s:%d",		\
231			    (m)->mtx_description, __FILE__, __LINE__);	\
232		if (mtx_recursed((m))) {				\
233			if (((what) & MA_NOTRECURSED) != 0)		\
234				panic("mutex %s recursed at %s:%d",	\
235				    (m)->mtx_description, __FILE__, __LINE__); \
236		} else if (((what) & MA_RECURSED) != 0)			\
237				panic("mutex %s unrecursed at %s:%d",	\
238				    (m)->mtx_description, __FILE__, __LINE__); \
239		break;							\
240	case MA_NOTOWNED:						\
241		if (mtx_owned((m)))					\
242			panic("mutex %s owned at %s:%d",		\
243			    (m)->mtx_description, __FILE__, __LINE__);	\
244		break;							\
245	default:							\
246		panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
247	}								\
248} while(0)
249#else	/* INVARIANTS */
250#define mtx_assert(m, what)
251#endif	/* INVARIANTS */
252
253#ifdef MUTEX_DEBUG
254#define MPASS(ex)							\
255	if (!(ex))							\
256		panic("Assertion %s failed at %s:%d", #ex, __FILE__, __LINE__)
257#define MPASS2(ex, what)						\
258	if (!(ex))							\
259		panic("Assertion %s failed at %s:%d", what, __FILE__, __LINE__)
260#define MPASS3(ex, file, line)						\
261	if (!(ex))							\
262		panic("Assertion %s failed at %s:%d", #ex, file, line)
263#define MPASS4(ex, what, file, line)					\
264	if (!(ex))							\
265		panic("Assertion %s failed at %s:%d", what, file, line)
266#else	/* MUTEX_DEBUG */
267#define	MPASS(ex)
268#define	MPASS2(ex, what)
269#define	MPASS3(ex, file, line)
270#define	MPASS4(ex, what, file, line)
271#endif	/* MUTEX_DEBUG */
272
273#ifdef	WITNESS
274#define WITNESS_ENTER(m, t, f, l)					\
275	if ((m)->mtx_witness != NULL)					\
276		witness_enter((m), (t), (f), (l))
277#define WITNESS_EXIT(m, t, f, l)					\
278	if ((m)->mtx_witness != NULL)					\
279		witness_exit((m), (t), (f), (l))
280
281#define	WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
282#define	WITNESS_SAVE_DECL(n)						\
283	const char * __CONCAT(n, __wf);					\
284	int __CONCAT(n, __wl)
285
286#define	WITNESS_SAVE(m, n) 						\
287do {									\
288	if ((m)->mtx_witness != NULL) 					\
289		witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl));\
290} while (0)
291
292#define	WITNESS_RESTORE(m, n) 						\
293do {									\
294	if ((m)->mtx_witness != NULL)					\
295		witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl));\
296} while (0)
297
298void	witness_init(struct mtx *, int flag);
299void	witness_destroy(struct mtx *);
300void	witness_enter(struct mtx *, int, const char *, int);
301void	witness_try_enter(struct mtx *, int, const char *, int);
302void	witness_exit(struct mtx *, int, const char *, int);
303void	witness_display(void(*)(const char *fmt, ...));
304void	witness_list(struct proc *);
305int	witness_sleep(int, struct mtx *, const char *, int);
306void	witness_save(struct mtx *, const char **, int *);
307void	witness_restore(struct mtx *, const char *, int);
308#else	/* WITNESS */
309#define WITNESS_ENTER(m, t, f, l)
310#define WITNESS_EXIT(m, t, f, l)
311#define	WITNESS_SLEEP(check, m)
312#define	WITNESS_SAVE_DECL(n)
313#define	WITNESS_SAVE(m, n)
314#define	WITNESS_RESTORE(m, n)
315
316/*
317 * flag++ is slezoid way of shutting up unused parameter warning
318 * in mtx_init()
319 */
320#define witness_init(m, flag) flag++
321#define witness_destroy(m)
322#define witness_enter(m, t, f, l)
323#define witness_try_enter(m, t, f, l)
324#define witness_exit(m, t, f, l)
325#endif	/* WITNESS */
326
327/*
328 * Assembly macros (for internal use only)
329 *------------------------------------------------------------------------------
330 */
331
332#define	_V(x)	__STRING(x)
333
334/*
335 * Default, unoptimized mutex micro-operations
336 */
337
338#ifndef _obtain_lock
339/* Actually obtain mtx_lock */
340#define _obtain_lock(mp, tid)						\
341	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
342#endif
343
344#ifndef _release_lock
345/* Actually release mtx_lock */
346#define _release_lock(mp, tid)		       				\
347	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
348#endif
349
350#ifndef _release_lock_quick
351/* Actually release mtx_lock quickly assuming that we own it */
352#define	_release_lock_quick(mp) 					\
353	atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
354#endif
355
356#ifndef _getlock_sleep
357/* Get a sleep lock, deal with recursion inline. */
358#define	_getlock_sleep(mp, tid, type) do {				\
359	if (!_obtain_lock(mp, tid)) {					\
360		if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
361			mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0);	\
362		else {							\
363			atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE);	\
364			(mp)->mtx_recurse++;				\
365		}							\
366	}								\
367} while (0)
368#endif
369
370#ifndef _getlock_spin_block
371/* Get a spin lock, handle recursion inline (as the less common case) */
372#define	_getlock_spin_block(mp, tid, type) do {				\
373	u_int _mtx_intr = save_intr();					\
374	disable_intr();							\
375	if (!_obtain_lock(mp, tid))					\
376		mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr);	\
377	else								\
378		(mp)->mtx_saveintr = _mtx_intr;				\
379} while (0)
380#endif
381
382#ifndef _getlock_norecurse
383/*
384 * Get a lock without any recursion handling. Calls the hard enter function if
385 * we can't get it inline.
386 */
387#define	_getlock_norecurse(mp, tid, type) do {				\
388	if (!_obtain_lock(mp, tid))					\
389		mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0);		\
390} while (0)
391#endif
392
393#ifndef _exitlock_norecurse
394/*
395 * Release a sleep lock assuming we haven't recursed on it, recursion is handled
396 * in the hard function.
397 */
398#define	_exitlock_norecurse(mp, tid, type) do {				\
399	if (!_release_lock(mp, tid))					\
400		mtx_exit_hard((mp), (type) & MTX_HARDOPTS);		\
401} while (0)
402#endif
403
404#ifndef _exitlock
405/*
406 * Release a sleep lock when its likely we recursed (the code to
407 * deal with simple recursion is inline).
408 */
409#define	_exitlock(mp, tid, type) do {					\
410	if (!_release_lock(mp, tid)) {					\
411		if ((mp)->mtx_lock & MTX_RECURSE) {			\
412			if (--((mp)->mtx_recurse) == 0)			\
413				atomic_clear_ptr(&(mp)->mtx_lock,	\
414				    MTX_RECURSE);			\
415		} else {						\
416			mtx_exit_hard((mp), (type) & MTX_HARDOPTS);	\
417		}							\
418	}								\
419} while (0)
420#endif
421
422#ifndef _exitlock_spin
423/* Release a spin lock (with possible recursion). */
424#define	_exitlock_spin(mp) do {						\
425	if ((mp)->mtx_recurse == 0) {					\
426		int _mtx_intr = (mp)->mtx_saveintr;			\
427									\
428		_release_lock_quick(mp);				\
429		restore_intr(_mtx_intr);				\
430	} else {							\
431		(mp)->mtx_recurse--;					\
432	}								\
433} while (0)
434#endif
435
436/*
437 * Externally visible mutex functions.
438 *------------------------------------------------------------------------------
439 */
440
441/*
442 * Return non-zero if a mutex is already owned by the current thread.
443 */
444#define	mtx_owned(m)	(((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
445
446/*
447 * Return non-zero if a mutex has been recursively acquired.
448 */
449#define mtx_recursed(m)	((m)->mtx_recurse != 0)
450
451/* Common strings */
452#ifdef _KERN_MUTEX_C_
453#ifdef KTR_EXTEND
454
455/*
456 * KTR_EXTEND saves file name and line for all entries, so we don't need them
457 * here.  Theoretically we should also change the entries which refer to them
458 * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
459 * parameters, it doesn't do any harm to leave them.
460 */
461char	STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d";
462char	STR_mtx_exit_fmt[] = "REL %s [%p] r=%d";
463char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d";
464#else
465char	STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d at %s:%d";
466char	STR_mtx_exit_fmt[] = "REL %s [%p] r=%d at %s:%d";
467char	STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d at %s:%d";
468#endif
469char	STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
470char	STR_mtx_owned[] = "mtx_owned(mpp)";
471char	STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
472#else	/* _KERN_MUTEX_C_ */
473extern	char STR_mtx_enter_fmt[];
474extern	char STR_mtx_bad_type[];
475extern	char STR_mtx_exit_fmt[];
476extern	char STR_mtx_owned[];
477extern	char STR_mtx_recurse[];
478extern	char STR_mtx_try_enter_fmt[];
479#endif	/* _KERN_MUTEX_C_ */
480
481#ifndef KLD_MODULE
482/*
483 * Get lock 'm', the macro handles the easy (and most common cases) and leaves
484 * the slow stuff to the mtx_enter_hard() function.
485 *
486 * Note: since type is usually a constant much of this code is optimized out.
487 */
488_MTX_INLINE void
489_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
490{
491	struct mtx	*mpp = mtxp;
492
493	/* bits only valid on mtx_exit() */
494	MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
495	    STR_mtx_bad_type, file, line);
496
497	if ((type) & MTX_SPIN) {
498		/*
499		 * Easy cases of spin locks:
500		 *
501		 * 1) We already own the lock and will simply recurse on it (if
502		 *    RLIKELY)
503		 *
504		 * 2) The lock is free, we just get it
505		 */
506		if ((type) & MTX_RLIKELY) {
507			/*
508			 * Check for recursion, if we already have this
509			 * lock we just bump the recursion count.
510			 */
511			if (mpp->mtx_lock == (uintptr_t)CURTHD) {
512				mpp->mtx_recurse++;
513				goto done;
514			}
515		}
516
517		if (((type) & MTX_TOPHALF) == 0) {
518			/*
519			 * If an interrupt thread uses this we must block
520			 * interrupts here.
521			 */
522			if ((type) & MTX_FIRST) {
523				ASS_IEN;
524				disable_intr();
525				_getlock_norecurse(mpp, CURTHD,
526				    (type) & MTX_HARDOPTS);
527			} else {
528				_getlock_spin_block(mpp, CURTHD,
529				    (type) & MTX_HARDOPTS);
530			}
531		} else
532			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
533	} else {
534		/* Sleep locks */
535		if ((type) & MTX_RLIKELY)
536			_getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
537		else
538			_getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
539	}
540done:
541	WITNESS_ENTER(mpp, type, file, line);
542	CTR5(KTR_LOCK, STR_mtx_enter_fmt,
543	    mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
544
545}
546
547/*
548 * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
549 *
550 * XXX DOES NOT HANDLE RECURSION
551 */
552_MTX_INLINE int
553_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
554{
555	struct mtx	*const mpp = mtxp;
556	int	rval;
557
558	rval = _obtain_lock(mpp, CURTHD);
559#ifdef WITNESS
560	if (rval && mpp->mtx_witness != NULL) {
561		MPASS(mpp->mtx_recurse == 0);
562		witness_try_enter(mpp, type, file, line);
563	}
564#endif	/* WITNESS */
565	CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
566	    mpp->mtx_description, mpp, rval, file, line);
567
568	return rval;
569}
570
571/*
572 * Release lock m.
573 */
574_MTX_INLINE void
575_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
576{
577	struct mtx	*const mpp = mtxp;
578
579	MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
580	WITNESS_EXIT(mpp, type, file, line);
581	CTR5(KTR_LOCK, STR_mtx_exit_fmt,
582	    mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
583	if ((type) & MTX_SPIN) {
584		if ((type) & MTX_NORECURSE) {
585			int mtx_intr = mpp->mtx_saveintr;
586
587			MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
588			    file, line);
589			_release_lock_quick(mpp);
590			if (((type) & MTX_TOPHALF) == 0) {
591				if ((type) & MTX_FIRST) {
592					ASS_IDIS;
593					enable_intr();
594				} else
595					restore_intr(mtx_intr);
596			}
597		} else {
598			if (((type & MTX_TOPHALF) == 0) &&
599			    (type & MTX_FIRST)) {
600				ASS_IDIS;
601				ASS_SIEN(mpp);
602			}
603			_exitlock_spin(mpp);
604		}
605	} else {
606		/* Handle sleep locks */
607		if ((type) & MTX_RLIKELY)
608			_exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
609		else {
610			_exitlock_norecurse(mpp, CURTHD,
611			    (type) & MTX_HARDOPTS);
612		}
613	}
614}
615
616#endif	/* KLD_MODULE */
617
618/* Avoid namespace pollution */
619#ifndef _KERN_MUTEX_C_
620#undef	_obtain_lock
621#undef	_release_lock
622#undef	_release_lock_quick
623#undef	_getlock_sleep
624#undef	_getlock_spin_block
625#undef	_getlock_norecurse
626#undef	_exitlock_norecurse
627#undef	_exitlock
628#undef	_exitlock_spin
629#endif	/* !_KERN_MUTEX_C_ */
630
631#endif	/* _KERNEL */
632#endif	/* !LOCORE */
633#endif	/* _SYS_MUTEX_H_ */
634