1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 *    promote products derived from this software without specific prior
16 *    written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32 */
33
34/*
35 * Machine independent bits of mutex implementation.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD$");
40
41#include "opt_adaptive_mutexes.h"
42#include "opt_ddb.h"
43#include "opt_hwpmc_hooks.h"
44#include "opt_sched.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/bus.h>
49#include <sys/conf.h>
50#include <sys/kdb.h>
51#include <sys/kernel.h>
52#include <sys/ktr.h>
53#include <sys/lock.h>
54#include <sys/malloc.h>
55#include <sys/mutex.h>
56#include <sys/proc.h>
57#include <sys/resourcevar.h>
58#include <sys/sched.h>
59#include <sys/sbuf.h>
60#include <sys/smp.h>
61#include <sys/sysctl.h>
62#include <sys/turnstile.h>
63#include <sys/vmmeter.h>
64#include <sys/lock_profile.h>
65
66#include <machine/atomic.h>
67#include <machine/bus.h>
68#include <machine/cpu.h>
69
70#include <ddb/ddb.h>
71
72#include <fs/devfs/devfs_int.h>
73
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76
77#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78#define	ADAPTIVE_MUTEXES
79#endif
80
81#ifdef HWPMC_HOOKS
82#include <sys/pmckern.h>
83PMC_SOFT_DEFINE( , , lock, failed);
84#endif
85
86/*
87 * Return the mutex address when the lock cookie address is provided.
88 * This functionality assumes that struct mtx* have a member named mtx_lock.
89 */
90#define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
91
92/*
93 * Internal utility macros.
94 */
95#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
96
97#define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98
99static void	assert_mtx(const struct lock_object *lock, int what);
100#ifdef DDB
101static void	db_show_mtx(const struct lock_object *lock);
102#endif
103static void	lock_mtx(struct lock_object *lock, uintptr_t how);
104static void	lock_spin(struct lock_object *lock, uintptr_t how);
105#ifdef KDTRACE_HOOKS
106static int	owner_mtx(const struct lock_object *lock,
107		    struct thread **owner);
108#endif
109static uintptr_t unlock_mtx(struct lock_object *lock);
110static uintptr_t unlock_spin(struct lock_object *lock);
111
112/*
113 * Lock classes for sleep and spin mutexes.
114 */
115struct lock_class lock_class_mtx_sleep = {
116	.lc_name = "sleep mutex",
117	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118	.lc_assert = assert_mtx,
119#ifdef DDB
120	.lc_ddb_show = db_show_mtx,
121#endif
122	.lc_lock = lock_mtx,
123	.lc_unlock = unlock_mtx,
124#ifdef KDTRACE_HOOKS
125	.lc_owner = owner_mtx,
126#endif
127};
128struct lock_class lock_class_mtx_spin = {
129	.lc_name = "spin mutex",
130	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
131	.lc_assert = assert_mtx,
132#ifdef DDB
133	.lc_ddb_show = db_show_mtx,
134#endif
135	.lc_lock = lock_spin,
136	.lc_unlock = unlock_spin,
137#ifdef KDTRACE_HOOKS
138	.lc_owner = owner_mtx,
139#endif
140};
141
142#ifdef ADAPTIVE_MUTEXES
143static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
144
145static struct lock_delay_config __read_frequently mtx_delay;
146
147SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
148    0, "");
149SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
150    0, "");
151
152LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
153#endif
154
155static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
156    "mtx spin debugging");
157
158static struct lock_delay_config __read_frequently mtx_spin_delay;
159
160SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
161    &mtx_spin_delay.base, 0, "");
162SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
163    &mtx_spin_delay.max, 0, "");
164
165LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
166
167/*
168 * System-wide mutexes
169 */
170struct mtx blocked_lock;
171struct mtx __exclusive_cache_line Giant;
172
173static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
174
175void
176assert_mtx(const struct lock_object *lock, int what)
177{
178
179	/*
180	 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
181	 *
182	 * Some callers of lc_assert uses LA_LOCKED to indicate that either
183	 * a shared lock or write lock was held, while other callers uses
184	 * the more strict LA_XLOCKED (used as MA_OWNED).
185	 *
186	 * Mutex is the only lock class that can not be shared, as a result,
187	 * we can reasonably consider the caller really intends to assert
188	 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
189	 */
190	if (what & LA_LOCKED) {
191		what &= ~LA_LOCKED;
192		what |= LA_XLOCKED;
193	}
194	mtx_assert((const struct mtx *)lock, what);
195}
196
197void
198lock_mtx(struct lock_object *lock, uintptr_t how)
199{
200
201	mtx_lock((struct mtx *)lock);
202}
203
204void
205lock_spin(struct lock_object *lock, uintptr_t how)
206{
207
208	panic("spin locks can only use msleep_spin");
209}
210
211uintptr_t
212unlock_mtx(struct lock_object *lock)
213{
214	struct mtx *m;
215
216	m = (struct mtx *)lock;
217	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
218	mtx_unlock(m);
219	return (0);
220}
221
222uintptr_t
223unlock_spin(struct lock_object *lock)
224{
225
226	panic("spin locks can only use msleep_spin");
227}
228
229#ifdef KDTRACE_HOOKS
230int
231owner_mtx(const struct lock_object *lock, struct thread **owner)
232{
233	const struct mtx *m;
234	uintptr_t x;
235
236	m = (const struct mtx *)lock;
237	x = m->mtx_lock;
238	*owner = (struct thread *)(x & ~MTX_FLAGMASK);
239	return (*owner != NULL);
240}
241#endif
242
243/*
244 * Function versions of the inlined __mtx_* macros.  These are used by
245 * modules and can also be called from assembly language if needed.
246 */
247void
248__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
249{
250	struct mtx *m;
251	uintptr_t tid, v;
252
253	m = mtxlock2mtx(c);
254
255	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
256	    !TD_IS_IDLETHREAD(curthread),
257	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
258	    curthread, m->lock_object.lo_name, file, line));
259	KASSERT(m->mtx_lock != MTX_DESTROYED,
260	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
261	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
262	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
263	    file, line));
264	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
265	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
266
267	tid = (uintptr_t)curthread;
268	v = MTX_UNOWNED;
269	if (!_mtx_obtain_lock_fetch(m, &v, tid))
270		_mtx_lock_sleep(m, v, opts, file, line);
271	else
272		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
273		    m, 0, 0, file, line);
274	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
275	    line);
276	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
277	    file, line);
278	TD_LOCKS_INC(curthread);
279}
280
281void
282__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
283{
284	struct mtx *m;
285
286	m = mtxlock2mtx(c);
287
288	KASSERT(m->mtx_lock != MTX_DESTROYED,
289	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
290	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
291	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
292	    file, line));
293	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
294	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
295	    line);
296	mtx_assert(m, MA_OWNED);
297
298#ifdef LOCK_PROFILING
299	__mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
300#else
301	__mtx_unlock(m, curthread, opts, file, line);
302#endif
303	TD_LOCKS_DEC(curthread);
304}
305
306void
307__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
308    int line)
309{
310	struct mtx *m;
311#ifdef SMP
312	uintptr_t tid, v;
313#endif
314
315	m = mtxlock2mtx(c);
316
317	KASSERT(m->mtx_lock != MTX_DESTROYED,
318	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
319	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
320	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
321	    m->lock_object.lo_name, file, line));
322	if (mtx_owned(m))
323		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
324		    (opts & MTX_RECURSE) != 0,
325	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
326		    m->lock_object.lo_name, file, line));
327	opts &= ~MTX_RECURSE;
328	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
329	    file, line, NULL);
330#ifdef SMP
331	spinlock_enter();
332	tid = (uintptr_t)curthread;
333	v = MTX_UNOWNED;
334	if (!_mtx_obtain_lock_fetch(m, &v, tid))
335		_mtx_lock_spin(m, v, opts, file, line);
336	else
337		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
338		    m, 0, 0, file, line);
339#else
340	__mtx_lock_spin(m, curthread, opts, file, line);
341#endif
342	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
343	    line);
344	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
345}
346
347int
348__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
349    int line)
350{
351	struct mtx *m;
352
353	if (SCHEDULER_STOPPED())
354		return (1);
355
356	m = mtxlock2mtx(c);
357
358	KASSERT(m->mtx_lock != MTX_DESTROYED,
359	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
360	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
361	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
362	    m->lock_object.lo_name, file, line));
363	KASSERT((opts & MTX_RECURSE) == 0,
364	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
365	    m->lock_object.lo_name, file, line));
366	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
367		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
368		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
369		return (1);
370	}
371	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
372	return (0);
373}
374
375void
376__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
377    int line)
378{
379	struct mtx *m;
380
381	m = mtxlock2mtx(c);
382
383	KASSERT(m->mtx_lock != MTX_DESTROYED,
384	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
385	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
386	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
387	    m->lock_object.lo_name, file, line));
388	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
389	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
390	    line);
391	mtx_assert(m, MA_OWNED);
392
393	__mtx_unlock_spin(m);
394}
395
396/*
397 * The important part of mtx_trylock{,_flags}()
398 * Tries to acquire lock `m.'  If this function is called on a mutex that
399 * is already owned, it will recursively acquire the lock.
400 */
401int
402_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
403{
404	struct thread *td;
405	uintptr_t tid, v;
406#ifdef LOCK_PROFILING
407	uint64_t waittime = 0;
408	int contested = 0;
409#endif
410	int rval;
411	bool recursed;
412
413	td = curthread;
414	tid = (uintptr_t)td;
415	if (SCHEDULER_STOPPED_TD(td))
416		return (1);
417
418	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
419	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
420	    curthread, m->lock_object.lo_name, file, line));
421	KASSERT(m->mtx_lock != MTX_DESTROYED,
422	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
423	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
424	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
425	    file, line));
426
427	rval = 1;
428	recursed = false;
429	v = MTX_UNOWNED;
430	for (;;) {
431		if (_mtx_obtain_lock_fetch(m, &v, tid))
432			break;
433		if (v == MTX_UNOWNED)
434			continue;
435		if (v == tid &&
436		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
437		    (opts & MTX_RECURSE) != 0)) {
438			m->mtx_recurse++;
439			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
440			recursed = true;
441			break;
442		}
443		rval = 0;
444		break;
445	}
446
447	opts &= ~MTX_RECURSE;
448
449	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
450	if (rval) {
451		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
452		    file, line);
453		TD_LOCKS_INC(curthread);
454		if (!recursed)
455			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
456			    m, contested, waittime, file, line);
457	}
458
459	return (rval);
460}
461
462int
463_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
464{
465	struct mtx *m;
466
467	m = mtxlock2mtx(c);
468	return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
469}
470
471/*
472 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
473 *
474 * We call this if the lock is either contested (i.e. we need to go to
475 * sleep waiting for it), or if we need to recurse on it.
476 */
477#if LOCK_DEBUG > 0
478void
479__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
480    int line)
481#else
482void
483__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
484#endif
485{
486	struct thread *td;
487	struct mtx *m;
488	struct turnstile *ts;
489	uintptr_t tid;
490	struct thread *owner;
491#ifdef LOCK_PROFILING
492	int contested = 0;
493	uint64_t waittime = 0;
494#endif
495#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
496	struct lock_delay_arg lda;
497#endif
498#ifdef KDTRACE_HOOKS
499	u_int sleep_cnt = 0;
500	int64_t sleep_time = 0;
501	int64_t all_time = 0;
502#endif
503#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
504	int doing_lockprof = 0;
505#endif
506
507	td = curthread;
508	tid = (uintptr_t)td;
509	m = mtxlock2mtx(c);
510
511#ifdef KDTRACE_HOOKS
512	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
513		while (v == MTX_UNOWNED) {
514			if (_mtx_obtain_lock_fetch(m, &v, tid))
515				goto out_lockstat;
516		}
517		doing_lockprof = 1;
518		all_time -= lockstat_nsecs(&m->lock_object);
519	}
520#endif
521#ifdef LOCK_PROFILING
522	doing_lockprof = 1;
523#endif
524
525	if (SCHEDULER_STOPPED_TD(td))
526		return;
527
528#if defined(ADAPTIVE_MUTEXES)
529	lock_delay_arg_init(&lda, &mtx_delay);
530#elif defined(KDTRACE_HOOKS)
531	lock_delay_arg_init(&lda, NULL);
532#endif
533
534	if (__predict_false(v == MTX_UNOWNED))
535		v = MTX_READ_VALUE(m);
536
537	if (__predict_false(lv_mtx_owner(v) == td)) {
538		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
539		    (opts & MTX_RECURSE) != 0,
540	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
541		    m->lock_object.lo_name, file, line));
542#if LOCK_DEBUG > 0
543		opts &= ~MTX_RECURSE;
544#endif
545		m->mtx_recurse++;
546		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
547		if (LOCK_LOG_TEST(&m->lock_object, opts))
548			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
549		return;
550	}
551#if LOCK_DEBUG > 0
552	opts &= ~MTX_RECURSE;
553#endif
554
555#ifdef HWPMC_HOOKS
556	PMC_SOFT_CALL( , , lock, failed);
557#endif
558	lock_profile_obtain_lock_failed(&m->lock_object,
559		    &contested, &waittime);
560	if (LOCK_LOG_TEST(&m->lock_object, opts))
561		CTR4(KTR_LOCK,
562		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
563		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
564
565	for (;;) {
566		if (v == MTX_UNOWNED) {
567			if (_mtx_obtain_lock_fetch(m, &v, tid))
568				break;
569			continue;
570		}
571#ifdef KDTRACE_HOOKS
572		lda.spin_cnt++;
573#endif
574#ifdef ADAPTIVE_MUTEXES
575		/*
576		 * If the owner is running on another CPU, spin until the
577		 * owner stops running or the state of the lock changes.
578		 */
579		owner = lv_mtx_owner(v);
580		if (TD_IS_RUNNING(owner)) {
581			if (LOCK_LOG_TEST(&m->lock_object, 0))
582				CTR3(KTR_LOCK,
583				    "%s: spinning on %p held by %p",
584				    __func__, m, owner);
585			KTR_STATE1(KTR_SCHED, "thread",
586			    sched_tdname((struct thread *)tid),
587			    "spinning", "lockname:\"%s\"",
588			    m->lock_object.lo_name);
589			do {
590				lock_delay(&lda);
591				v = MTX_READ_VALUE(m);
592				owner = lv_mtx_owner(v);
593			} while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
594			KTR_STATE0(KTR_SCHED, "thread",
595			    sched_tdname((struct thread *)tid),
596			    "running");
597			continue;
598		}
599#endif
600
601		ts = turnstile_trywait(&m->lock_object);
602		v = MTX_READ_VALUE(m);
603retry_turnstile:
604
605		/*
606		 * Check if the lock has been released while spinning for
607		 * the turnstile chain lock.
608		 */
609		if (v == MTX_UNOWNED) {
610			turnstile_cancel(ts);
611			continue;
612		}
613
614#ifdef ADAPTIVE_MUTEXES
615		/*
616		 * The current lock owner might have started executing
617		 * on another CPU (or the lock could have changed
618		 * owners) while we were waiting on the turnstile
619		 * chain lock.  If so, drop the turnstile lock and try
620		 * again.
621		 */
622		owner = lv_mtx_owner(v);
623		if (TD_IS_RUNNING(owner)) {
624			turnstile_cancel(ts);
625			continue;
626		}
627#endif
628
629		/*
630		 * If the mutex isn't already contested and a failure occurs
631		 * setting the contested bit, the mutex was either released
632		 * or the state of the MTX_RECURSED bit changed.
633		 */
634		if ((v & MTX_CONTESTED) == 0 &&
635		    !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
636			goto retry_turnstile;
637		}
638
639		/*
640		 * We definitely must sleep for this lock.
641		 */
642		mtx_assert(m, MA_NOTOWNED);
643
644		/*
645		 * Block on the turnstile.
646		 */
647#ifdef KDTRACE_HOOKS
648		sleep_time -= lockstat_nsecs(&m->lock_object);
649#endif
650#ifndef ADAPTIVE_MUTEXES
651		owner = mtx_owner(m);
652#endif
653		MPASS(owner == mtx_owner(m));
654		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
655#ifdef KDTRACE_HOOKS
656		sleep_time += lockstat_nsecs(&m->lock_object);
657		sleep_cnt++;
658#endif
659		v = MTX_READ_VALUE(m);
660	}
661#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
662	if (__predict_true(!doing_lockprof))
663		return;
664#endif
665#ifdef KDTRACE_HOOKS
666	all_time += lockstat_nsecs(&m->lock_object);
667	if (sleep_time)
668		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
669
670	/*
671	 * Only record the loops spinning and not sleeping.
672	 */
673	if (lda.spin_cnt > sleep_cnt)
674		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
675out_lockstat:
676#endif
677	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
678	    waittime, file, line);
679}
680
681#ifdef SMP
682/*
683 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
684 *
685 * This is only called if we need to actually spin for the lock. Recursion
686 * is handled inline.
687 */
688#if LOCK_DEBUG > 0
689void
690_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
691    const char *file, int line)
692#else
693void
694_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
695#endif
696{
697	struct mtx *m;
698	struct lock_delay_arg lda;
699	uintptr_t tid;
700#ifdef LOCK_PROFILING
701	int contested = 0;
702	uint64_t waittime = 0;
703#endif
704#ifdef KDTRACE_HOOKS
705	int64_t spin_time = 0;
706#endif
707#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
708	int doing_lockprof = 0;
709#endif
710
711	tid = (uintptr_t)curthread;
712	m = mtxlock2mtx(c);
713
714#ifdef KDTRACE_HOOKS
715	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
716		while (v == MTX_UNOWNED) {
717			if (_mtx_obtain_lock_fetch(m, &v, tid))
718				goto out_lockstat;
719		}
720		doing_lockprof = 1;
721		spin_time -= lockstat_nsecs(&m->lock_object);
722	}
723#endif
724#ifdef LOCK_PROFILING
725	doing_lockprof = 1;
726#endif
727
728	if (__predict_false(v == MTX_UNOWNED))
729		v = MTX_READ_VALUE(m);
730
731	if (__predict_false(v == tid)) {
732		m->mtx_recurse++;
733		return;
734	}
735
736	if (SCHEDULER_STOPPED())
737		return;
738
739	lock_delay_arg_init(&lda, &mtx_spin_delay);
740
741	if (LOCK_LOG_TEST(&m->lock_object, opts))
742		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
743	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
744	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
745
746#ifdef HWPMC_HOOKS
747	PMC_SOFT_CALL( , , lock, failed);
748#endif
749	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
750
751	for (;;) {
752		if (v == MTX_UNOWNED) {
753			if (_mtx_obtain_lock_fetch(m, &v, tid))
754				break;
755			continue;
756		}
757		/* Give interrupts a chance while we spin. */
758		spinlock_exit();
759		do {
760			if (__predict_true(lda.spin_cnt < 10000000)) {
761				lock_delay(&lda);
762			} else {
763				_mtx_lock_indefinite_check(m, &lda);
764			}
765			v = MTX_READ_VALUE(m);
766		} while (v != MTX_UNOWNED);
767		spinlock_enter();
768	}
769
770	if (LOCK_LOG_TEST(&m->lock_object, opts))
771		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
772	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
773	    "running");
774
775#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
776	if (__predict_true(!doing_lockprof))
777		return;
778#endif
779#ifdef KDTRACE_HOOKS
780	spin_time += lockstat_nsecs(&m->lock_object);
781	if (lda.spin_cnt != 0)
782		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
783out_lockstat:
784#endif
785	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
786	    contested, waittime, file, line);
787}
788#endif /* SMP */
789
790#ifdef INVARIANTS
791static void
792thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
793{
794
795	KASSERT(m->mtx_lock != MTX_DESTROYED,
796	    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
797	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
798	    ("thread_lock() of sleep mutex %s @ %s:%d",
799	    m->lock_object.lo_name, file, line));
800	if (mtx_owned(m))
801		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
802		    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
803		    m->lock_object.lo_name, file, line));
804	WITNESS_CHECKORDER(&m->lock_object,
805	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
806}
807#else
808#define thread_lock_validate(m, opts, file, line) do { } while (0)
809#endif
810
811#ifndef LOCK_PROFILING
812#if LOCK_DEBUG > 0
813void
814_thread_lock(struct thread *td, int opts, const char *file, int line)
815#else
816void
817_thread_lock(struct thread *td)
818#endif
819{
820	struct mtx *m;
821	uintptr_t tid, v;
822
823	tid = (uintptr_t)curthread;
824
825	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
826		goto slowpath_noirq;
827	spinlock_enter();
828	m = td->td_lock;
829	thread_lock_validate(m, 0, file, line);
830	v = MTX_READ_VALUE(m);
831	if (__predict_true(v == MTX_UNOWNED)) {
832		if (__predict_false(!_mtx_obtain_lock(m, tid)))
833			goto slowpath_unlocked;
834	} else if (v == tid) {
835		m->mtx_recurse++;
836	} else
837		goto slowpath_unlocked;
838	if (__predict_true(m == td->td_lock)) {
839		WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
840		return;
841	}
842	MPASS(m->mtx_recurse == 0);
843	_mtx_release_lock_quick(m);
844slowpath_unlocked:
845	spinlock_exit();
846slowpath_noirq:
847#if LOCK_DEBUG > 0
848	thread_lock_flags_(td, opts, file, line);
849#else
850	thread_lock_flags_(td, 0, 0, 0);
851#endif
852}
853#endif
854
855void
856thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
857{
858	struct mtx *m;
859	uintptr_t tid, v;
860	struct lock_delay_arg lda;
861#ifdef LOCK_PROFILING
862	int contested = 0;
863	uint64_t waittime = 0;
864#endif
865#ifdef KDTRACE_HOOKS
866	int64_t spin_time = 0;
867#endif
868#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
869	int doing_lockprof = 1;
870#endif
871
872	tid = (uintptr_t)curthread;
873
874	if (SCHEDULER_STOPPED()) {
875		/*
876		 * Ensure that spinlock sections are balanced even when the
877		 * scheduler is stopped, since we may otherwise inadvertently
878		 * re-enable interrupts while dumping core.
879		 */
880		spinlock_enter();
881		return;
882	}
883
884	lock_delay_arg_init(&lda, &mtx_spin_delay);
885
886#ifdef HWPMC_HOOKS
887	PMC_SOFT_CALL( , , lock, failed);
888#endif
889
890#ifdef LOCK_PROFILING
891	doing_lockprof = 1;
892#elif defined(KDTRACE_HOOKS)
893	doing_lockprof = lockstat_enabled;
894	if (__predict_false(doing_lockprof))
895		spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
896#endif
897	spinlock_enter();
898
899	for (;;) {
900retry:
901		m = td->td_lock;
902		thread_lock_validate(m, opts, file, line);
903		v = MTX_READ_VALUE(m);
904		for (;;) {
905			if (v == MTX_UNOWNED) {
906				if (_mtx_obtain_lock_fetch(m, &v, tid))
907					break;
908				continue;
909			}
910			if (v == tid) {
911				m->mtx_recurse++;
912				MPASS(m == td->td_lock);
913				break;
914			}
915			lock_profile_obtain_lock_failed(&m->lock_object,
916			    &contested, &waittime);
917			/* Give interrupts a chance while we spin. */
918			spinlock_exit();
919			do {
920				if (__predict_true(lda.spin_cnt < 10000000)) {
921					lock_delay(&lda);
922				} else {
923					_mtx_lock_indefinite_check(m, &lda);
924				}
925				if (m != td->td_lock) {
926					spinlock_enter();
927					goto retry;
928				}
929				v = MTX_READ_VALUE(m);
930			} while (v != MTX_UNOWNED);
931			spinlock_enter();
932		}
933		if (m == td->td_lock)
934			break;
935		MPASS(m->mtx_recurse == 0);
936		_mtx_release_lock_quick(m);
937	}
938	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
939	    line);
940	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
941
942#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
943	if (__predict_true(!doing_lockprof))
944		return;
945#endif
946#ifdef KDTRACE_HOOKS
947	spin_time += lockstat_nsecs(&m->lock_object);
948#endif
949	if (m->mtx_recurse == 0)
950		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
951		    contested, waittime, file, line);
952#ifdef KDTRACE_HOOKS
953	if (lda.spin_cnt != 0)
954		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
955#endif
956}
957
958struct mtx *
959thread_lock_block(struct thread *td)
960{
961	struct mtx *lock;
962
963	THREAD_LOCK_ASSERT(td, MA_OWNED);
964	lock = td->td_lock;
965	td->td_lock = &blocked_lock;
966	mtx_unlock_spin(lock);
967
968	return (lock);
969}
970
971void
972thread_lock_unblock(struct thread *td, struct mtx *new)
973{
974	mtx_assert(new, MA_OWNED);
975	MPASS(td->td_lock == &blocked_lock);
976	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
977}
978
979void
980thread_lock_set(struct thread *td, struct mtx *new)
981{
982	struct mtx *lock;
983
984	mtx_assert(new, MA_OWNED);
985	THREAD_LOCK_ASSERT(td, MA_OWNED);
986	lock = td->td_lock;
987	td->td_lock = new;
988	mtx_unlock_spin(lock);
989}
990
991/*
992 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
993 *
994 * We are only called here if the lock is recursed, contested (i.e. we
995 * need to wake up a blocked thread) or lockstat probe is active.
996 */
997#if LOCK_DEBUG > 0
998void
999__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1000    const char *file, int line)
1001#else
1002void
1003__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1004#endif
1005{
1006	struct mtx *m;
1007	struct turnstile *ts;
1008	uintptr_t tid;
1009
1010	if (SCHEDULER_STOPPED())
1011		return;
1012
1013	tid = (uintptr_t)curthread;
1014	m = mtxlock2mtx(c);
1015
1016	if (__predict_false(v == tid))
1017		v = MTX_READ_VALUE(m);
1018
1019	if (__predict_false(v & MTX_RECURSED)) {
1020		if (--(m->mtx_recurse) == 0)
1021			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1022		if (LOCK_LOG_TEST(&m->lock_object, opts))
1023			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1024		return;
1025	}
1026
1027	LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1028	if (v == tid && _mtx_release_lock(m, tid))
1029		return;
1030
1031	/*
1032	 * We have to lock the chain before the turnstile so this turnstile
1033	 * can be removed from the hash list if it is empty.
1034	 */
1035	turnstile_chain_lock(&m->lock_object);
1036	_mtx_release_lock_quick(m);
1037	ts = turnstile_lookup(&m->lock_object);
1038	MPASS(ts != NULL);
1039	if (LOCK_LOG_TEST(&m->lock_object, opts))
1040		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1041	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1042
1043	/*
1044	 * This turnstile is now no longer associated with the mutex.  We can
1045	 * unlock the chain lock so a new turnstile may take it's place.
1046	 */
1047	turnstile_unpend(ts);
1048	turnstile_chain_unlock(&m->lock_object);
1049}
1050
1051/*
1052 * All the unlocking of MTX_SPIN locks is done inline.
1053 * See the __mtx_unlock_spin() macro for the details.
1054 */
1055
1056/*
1057 * The backing function for the INVARIANTS-enabled mtx_assert()
1058 */
1059#ifdef INVARIANT_SUPPORT
1060void
1061__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1062{
1063	const struct mtx *m;
1064
1065	if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
1066		return;
1067
1068	m = mtxlock2mtx(c);
1069
1070	switch (what) {
1071	case MA_OWNED:
1072	case MA_OWNED | MA_RECURSED:
1073	case MA_OWNED | MA_NOTRECURSED:
1074		if (!mtx_owned(m))
1075			panic("mutex %s not owned at %s:%d",
1076			    m->lock_object.lo_name, file, line);
1077		if (mtx_recursed(m)) {
1078			if ((what & MA_NOTRECURSED) != 0)
1079				panic("mutex %s recursed at %s:%d",
1080				    m->lock_object.lo_name, file, line);
1081		} else if ((what & MA_RECURSED) != 0) {
1082			panic("mutex %s unrecursed at %s:%d",
1083			    m->lock_object.lo_name, file, line);
1084		}
1085		break;
1086	case MA_NOTOWNED:
1087		if (mtx_owned(m))
1088			panic("mutex %s owned at %s:%d",
1089			    m->lock_object.lo_name, file, line);
1090		break;
1091	default:
1092		panic("unknown mtx_assert at %s:%d", file, line);
1093	}
1094}
1095#endif
1096
1097/*
1098 * General init routine used by the MTX_SYSINIT() macro.
1099 */
1100void
1101mtx_sysinit(void *arg)
1102{
1103	struct mtx_args *margs = arg;
1104
1105	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1106	    margs->ma_opts);
1107}
1108
1109/*
1110 * Mutex initialization routine; initialize lock `m' of type contained in
1111 * `opts' with options contained in `opts' and name `name.'  The optional
1112 * lock type `type' is used as a general lock category name for use with
1113 * witness.
1114 */
1115void
1116_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1117{
1118	struct mtx *m;
1119	struct lock_class *class;
1120	int flags;
1121
1122	m = mtxlock2mtx(c);
1123
1124	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1125	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1126	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1127	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1128	    &m->mtx_lock));
1129
1130	/* Determine lock class and lock flags. */
1131	if (opts & MTX_SPIN)
1132		class = &lock_class_mtx_spin;
1133	else
1134		class = &lock_class_mtx_sleep;
1135	flags = 0;
1136	if (opts & MTX_QUIET)
1137		flags |= LO_QUIET;
1138	if (opts & MTX_RECURSE)
1139		flags |= LO_RECURSABLE;
1140	if ((opts & MTX_NOWITNESS) == 0)
1141		flags |= LO_WITNESS;
1142	if (opts & MTX_DUPOK)
1143		flags |= LO_DUPOK;
1144	if (opts & MTX_NOPROFILE)
1145		flags |= LO_NOPROFILE;
1146	if (opts & MTX_NEW)
1147		flags |= LO_NEW;
1148
1149	/* Initialize mutex. */
1150	lock_init(&m->lock_object, class, name, type, flags);
1151
1152	m->mtx_lock = MTX_UNOWNED;
1153	m->mtx_recurse = 0;
1154}
1155
1156/*
1157 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1158 * passed in as a flag here because if the corresponding mtx_init() was
1159 * called with MTX_QUIET set, then it will already be set in the mutex's
1160 * flags.
1161 */
1162void
1163_mtx_destroy(volatile uintptr_t *c)
1164{
1165	struct mtx *m;
1166
1167	m = mtxlock2mtx(c);
1168
1169	if (!mtx_owned(m))
1170		MPASS(mtx_unowned(m));
1171	else {
1172		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1173
1174		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1175		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1176			spinlock_exit();
1177		else
1178			TD_LOCKS_DEC(curthread);
1179
1180		lock_profile_release_lock(&m->lock_object);
1181		/* Tell witness this isn't locked to make it happy. */
1182		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1183		    __LINE__);
1184	}
1185
1186	m->mtx_lock = MTX_DESTROYED;
1187	lock_destroy(&m->lock_object);
1188}
1189
1190/*
1191 * Intialize the mutex code and system mutexes.  This is called from the MD
1192 * startup code prior to mi_startup().  The per-CPU data space needs to be
1193 * setup before this is called.
1194 */
1195void
1196mutex_init(void)
1197{
1198
1199	/* Setup turnstiles so that sleep mutexes work. */
1200	init_turnstiles();
1201
1202	/*
1203	 * Initialize mutexes.
1204	 */
1205	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1206	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1207	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1208	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1209	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1210	mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1211	mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1212	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1213	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1214	mtx_lock(&Giant);
1215}
1216
1217static void __noinline
1218_mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1219{
1220	struct thread *td;
1221
1222	ldap->spin_cnt++;
1223	if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL)
1224		cpu_lock_delay();
1225	else {
1226		td = mtx_owner(m);
1227
1228		/* If the mutex is unlocked, try again. */
1229		if (td == NULL)
1230			return;
1231
1232		printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1233		    m, m->lock_object.lo_name, td, td->td_tid);
1234#ifdef WITNESS
1235		witness_display_spinlock(&m->lock_object, td, printf);
1236#endif
1237		panic("spin lock held too long");
1238	}
1239	cpu_spinwait();
1240}
1241
1242void
1243mtx_spin_wait_unlocked(struct mtx *m)
1244{
1245	struct lock_delay_arg lda;
1246
1247	KASSERT(m->mtx_lock != MTX_DESTROYED,
1248	    ("%s() of destroyed mutex %p", __func__, m));
1249	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1250	    ("%s() of sleep mutex %p (%s)", __func__, m,
1251	    m->lock_object.lo_name));
1252	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1253	    m->lock_object.lo_name));
1254
1255	lda.spin_cnt = 0;
1256
1257	while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1258		if (__predict_true(lda.spin_cnt < 10000000)) {
1259			cpu_spinwait();
1260			lda.spin_cnt++;
1261		} else {
1262			_mtx_lock_indefinite_check(m, &lda);
1263		}
1264	}
1265}
1266
1267#ifdef DDB
1268void
1269db_show_mtx(const struct lock_object *lock)
1270{
1271	struct thread *td;
1272	const struct mtx *m;
1273
1274	m = (const struct mtx *)lock;
1275
1276	db_printf(" flags: {");
1277	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1278		db_printf("SPIN");
1279	else
1280		db_printf("DEF");
1281	if (m->lock_object.lo_flags & LO_RECURSABLE)
1282		db_printf(", RECURSE");
1283	if (m->lock_object.lo_flags & LO_DUPOK)
1284		db_printf(", DUPOK");
1285	db_printf("}\n");
1286	db_printf(" state: {");
1287	if (mtx_unowned(m))
1288		db_printf("UNOWNED");
1289	else if (mtx_destroyed(m))
1290		db_printf("DESTROYED");
1291	else {
1292		db_printf("OWNED");
1293		if (m->mtx_lock & MTX_CONTESTED)
1294			db_printf(", CONTESTED");
1295		if (m->mtx_lock & MTX_RECURSED)
1296			db_printf(", RECURSED");
1297	}
1298	db_printf("}\n");
1299	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1300		td = mtx_owner(m);
1301		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1302		    td->td_tid, td->td_proc->p_pid, td->td_name);
1303		if (mtx_recursed(m))
1304			db_printf(" recursed: %d\n", m->mtx_recurse);
1305	}
1306}
1307#endif
1308