subr_turnstile.c revision 78766
1139827Simp/*-
2165972Srwatson * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
315885Sjulian *
415885Sjulian * Redistribution and use in source and binary forms, with or without
515885Sjulian * modification, are permitted provided that the following conditions
615885Sjulian * are met:
715885Sjulian * 1. Redistributions of source code must retain the above copyright
815885Sjulian *    notice, this list of conditions and the following disclaimer.
915885Sjulian * 2. Redistributions in binary form must reproduce the above copyright
1015885Sjulian *    notice, this list of conditions and the following disclaimer in the
1115885Sjulian *    documentation and/or other materials provided with the distribution.
1215885Sjulian * 3. Berkeley Software Design Inc's name may not be used to endorse or
1315885Sjulian *    promote products derived from this software without specific prior
1415885Sjulian *    written permission.
1515885Sjulian *
1615885Sjulian * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1715885Sjulian * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1815885Sjulian * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1915885Sjulian * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2015885Sjulian * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2115885Sjulian * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22139827Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23139827Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2415885Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2515885Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2615885Sjulian * SUCH DAMAGE.
2715885Sjulian *
2815885Sjulian *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
2915885Sjulian *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
3015885Sjulian * $FreeBSD: head/sys/kern/subr_turnstile.c 78766 2001-06-25 18:29:32Z jhb $
3115885Sjulian */
3233834Sbde
3315885Sjulian/*
3415885Sjulian * Machine independent bits of mutex implementation and implementation of
3518207Sbde * `witness' structure & related debugging routines.
3618207Sbde */
3715885Sjulian
3815885Sjulian/*
39149992Srodrigc *	Main Entry: witness
4015885Sjulian *	Pronunciation: 'wit-n&s
4129185Sbde *	Function: noun
42165972Srwatson *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43165972Srwatson *	    testimony, witness, from 2wit
44165972Srwatson *	Date: before 12th century
45165972Srwatson *	1 : attestation of a fact or event : TESTIMONY
46165972Srwatson *	2 : one that gives evidence; specifically : one who testifies in
47165972Srwatson *	    a cause or before a judicial tribunal
48165972Srwatson *	3 : one asked to be present at a transaction so as to be able to
49165972Srwatson *	    testify to its having taken place
50165972Srwatson *	4 : one who has personal knowledge of something
51165972Srwatson *	5 a : something serving as evidence or proof : SIGN
5215885Sjulian *	  b : public affirmation by word or example of usually
5315885Sjulian *	      religious faith or conviction <the heroic witness to divine
5429185Sbde *	      life -- Pilot>
55152242Sru *	6 capitalized : a member of the Jehovah's Witnesses
56152242Sru */
57152242Sru
58152242Sru#include "opt_ddb.h"
59178888Sjulian
60152242Sru#include <sys/param.h>
61165972Srwatson#include <sys/bus.h>
6215885Sjulian#include <sys/kernel.h>
6315885Sjulian#include <sys/lock.h>
6415885Sjulian#include <sys/malloc.h>
65#include <sys/mutex.h>
66#include <sys/proc.h>
67#include <sys/resourcevar.h>
68#include <sys/sysctl.h>
69#include <sys/systm.h>
70#include <sys/vmmeter.h>
71#include <sys/ktr.h>
72
73#include <machine/atomic.h>
74#include <machine/bus.h>
75#include <machine/clock.h>
76#include <machine/cpu.h>
77
78#include <ddb/ddb.h>
79
80#include <vm/vm.h>
81#include <vm/vm_extern.h>
82
83/*
84 * Internal utility macros.
85 */
86#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
87
88#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
89	: (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
90
91#define SET_PRIO(p, pri)	(p)->p_pri.pri_level = (pri)
92
93/*
94 * Lock classes for sleep and spin mutexes.
95 */
96struct lock_class lock_class_mtx_sleep = {
97	"sleep mutex",
98	LC_SLEEPLOCK | LC_RECURSABLE
99};
100struct lock_class lock_class_mtx_spin = {
101	"spin mutex",
102	LC_SPINLOCK | LC_RECURSABLE
103};
104
105/*
106 * Prototypes for non-exported routines.
107 */
108static void	propagate_priority(struct proc *);
109
110static void
111propagate_priority(struct proc *p)
112{
113	int pri = p->p_pri.pri_level;
114	struct mtx *m = p->p_blocked;
115
116	mtx_assert(&sched_lock, MA_OWNED);
117	for (;;) {
118		struct proc *p1;
119
120		p = mtx_owner(m);
121
122		if (p == NULL) {
123			/*
124			 * This really isn't quite right. Really
125			 * ought to bump priority of process that
126			 * next acquires the mutex.
127			 */
128			MPASS(m->mtx_lock == MTX_CONTESTED);
129			return;
130		}
131
132		MPASS(p->p_magic == P_MAGIC);
133		KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
134		if (p->p_pri.pri_level <= pri)
135			return;
136
137		/*
138		 * Bump this process' priority.
139		 */
140		SET_PRIO(p, pri);
141
142		/*
143		 * If lock holder is actually running, just bump priority.
144		 */
145		if (p->p_oncpu != NOCPU) {
146			MPASS(p->p_stat == SRUN || p->p_stat == SZOMB || p->p_stat == SSTOP);
147			return;
148		}
149
150#ifndef SMP
151		/*
152		 * For UP, we check to see if p is curproc (this shouldn't
153		 * ever happen however as it would mean we are in a deadlock.)
154		 */
155		KASSERT(p != curproc, ("Deadlock detected"));
156#endif
157
158		/*
159		 * If on run queue move to new run queue, and
160		 * quit.
161		 */
162		if (p->p_stat == SRUN) {
163			MPASS(p->p_blocked == NULL);
164			remrunqueue(p);
165			setrunqueue(p);
166			return;
167		}
168
169		/*
170		 * If we aren't blocked on a mutex, we should be.
171		 */
172		KASSERT(p->p_stat == SMTX, (
173		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
174		    p->p_pid, p->p_comm, p->p_stat,
175		    m->mtx_object.lo_name));
176
177		/*
178		 * Pick up the mutex that p is blocked on.
179		 */
180		m = p->p_blocked;
181		MPASS(m != NULL);
182
183		/*
184		 * Check if the proc needs to be moved up on
185		 * the blocked chain
186		 */
187		if (p == TAILQ_FIRST(&m->mtx_blocked)) {
188			continue;
189		}
190
191		p1 = TAILQ_PREV(p, procqueue, p_procq);
192		if (p1->p_pri.pri_level <= pri) {
193			continue;
194		}
195
196		/*
197		 * Remove proc from blocked chain and determine where
198		 * it should be moved up to.  Since we know that p1 has
199		 * a lower priority than p, we know that at least one
200		 * process in the chain has a lower priority and that
201		 * p1 will thus not be NULL after the loop.
202		 */
203		TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
204		TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
205			MPASS(p1->p_magic == P_MAGIC);
206			if (p1->p_pri.pri_level > pri)
207				break;
208		}
209
210		MPASS(p1 != NULL);
211		TAILQ_INSERT_BEFORE(p1, p, p_procq);
212		CTR4(KTR_LOCK,
213		    "propagate_priority: p %p moved before %p on [%p] %s",
214		    p, p1, m, m->mtx_object.lo_name);
215	}
216}
217
218/*
219 * Function versions of the inlined __mtx_* macros.  These are used by
220 * modules and can also be called from assembly language if needed.
221 */
222void
223_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
224{
225
226	__mtx_lock_flags(m, opts, file, line);
227}
228
229void
230_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
231{
232
233	__mtx_unlock_flags(m, opts, file, line);
234}
235
236void
237_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
238{
239
240	__mtx_lock_spin_flags(m, opts, file, line);
241}
242
243void
244_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
245{
246
247	__mtx_unlock_spin_flags(m, opts, file, line);
248}
249
250/*
251 * The important part of mtx_trylock{,_flags}()
252 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
253 * if we're called, it's because we know we don't already own this lock.
254 */
255int
256_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
257{
258	int rval;
259
260	MPASS(curproc != NULL);
261
262	/*
263	 * _mtx_trylock does not accept MTX_NOSWITCH option.
264	 */
265	KASSERT((opts & MTX_NOSWITCH) == 0,
266	    ("mtx_trylock() called with invalid option flag(s) %d", opts));
267
268	rval = _obtain_lock(m, curproc);
269
270	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
271	if (rval) {
272		/*
273		 * We do not handle recursion in _mtx_trylock; see the
274		 * note at the top of the routine.
275		 */
276		KASSERT(!mtx_recursed(m),
277		    ("mtx_trylock() called on a recursed mutex"));
278		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
279		    file, line);
280	}
281
282	return (rval);
283}
284
285/*
286 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
287 *
288 * We call this if the lock is either contested (i.e. we need to go to
289 * sleep waiting for it), or if we need to recurse on it.
290 */
291void
292_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
293{
294	struct proc *p = curproc;
295
296	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
297		m->mtx_recurse++;
298		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
299		if (LOCK_LOG_TEST(&m->mtx_object, opts))
300			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
301		return;
302	}
303
304	if (LOCK_LOG_TEST(&m->mtx_object, opts))
305		CTR4(KTR_LOCK,
306		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
307		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
308
309	while (!_obtain_lock(m, p)) {
310		uintptr_t v;
311		struct proc *p1;
312
313		mtx_lock_spin(&sched_lock);
314		/*
315		 * Check if the lock has been released while spinning for
316		 * the sched_lock.
317		 */
318		if ((v = m->mtx_lock) == MTX_UNOWNED) {
319			mtx_unlock_spin(&sched_lock);
320			continue;
321		}
322
323		/*
324		 * The mutex was marked contested on release. This means that
325		 * there are processes blocked on it.
326		 */
327		if (v == MTX_CONTESTED) {
328			p1 = TAILQ_FIRST(&m->mtx_blocked);
329			MPASS(p1 != NULL);
330			m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
331
332			if (p1->p_pri.pri_level < p->p_pri.pri_level)
333				SET_PRIO(p, p1->p_pri.pri_level);
334			mtx_unlock_spin(&sched_lock);
335			return;
336		}
337
338		/*
339		 * If the mutex isn't already contested and a failure occurs
340		 * setting the contested bit, the mutex was either released
341		 * or the state of the MTX_RECURSED bit changed.
342		 */
343		if ((v & MTX_CONTESTED) == 0 &&
344		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
345			(void *)(v | MTX_CONTESTED))) {
346			mtx_unlock_spin(&sched_lock);
347			continue;
348		}
349
350		/*
351		 * We deffinately must sleep for this lock.
352		 */
353		mtx_assert(m, MA_NOTOWNED);
354
355#ifdef notyet
356		/*
357		 * If we're borrowing an interrupted thread's VM context, we
358		 * must clean up before going to sleep.
359		 */
360		if (p->p_ithd != NULL) {
361			struct ithd *it = p->p_ithd;
362
363			if (it->it_interrupted) {
364				if (LOCK_LOG_TEST(&m->mtx_object, opts))
365					CTR2(KTR_LOCK,
366				    "_mtx_lock_sleep: %p interrupted %p",
367					    it, it->it_interrupted);
368				intr_thd_fixup(it);
369			}
370		}
371#endif
372
373		/*
374		 * Put us on the list of threads blocked on this mutex.
375		 */
376		if (TAILQ_EMPTY(&m->mtx_blocked)) {
377			p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
378			LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
379			TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
380		} else {
381			TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
382				if (p1->p_pri.pri_level > p->p_pri.pri_level)
383					break;
384			if (p1)
385				TAILQ_INSERT_BEFORE(p1, p, p_procq);
386			else
387				TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
388		}
389
390		/*
391		 * Save who we're blocked on.
392		 */
393		p->p_blocked = m;
394		p->p_mtxname = m->mtx_object.lo_name;
395		p->p_stat = SMTX;
396		propagate_priority(p);
397
398		if (LOCK_LOG_TEST(&m->mtx_object, opts))
399			CTR3(KTR_LOCK,
400			    "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
401			    m->mtx_object.lo_name);
402
403		p->p_stats->p_ru.ru_nvcsw++;
404		mi_switch();
405
406		if (LOCK_LOG_TEST(&m->mtx_object, opts))
407			CTR3(KTR_LOCK,
408			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
409			  p, m, m->mtx_object.lo_name);
410
411		mtx_unlock_spin(&sched_lock);
412	}
413
414	return;
415}
416
417/*
418 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
419 *
420 * This is only called if we need to actually spin for the lock. Recursion
421 * is handled inline.
422 */
423void
424_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
425	       int line)
426{
427	int i = 0;
428
429	if (LOCK_LOG_TEST(&m->mtx_object, opts))
430		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
431
432	for (;;) {
433		if (_obtain_lock(m, curproc))
434			break;
435
436		/* Give interrupts a chance while we spin. */
437		critical_exit(mtx_crit);
438		while (m->mtx_lock != MTX_UNOWNED) {
439			if (i++ < 1000000)
440				continue;
441			if (i++ < 6000000)
442				DELAY(1);
443#ifdef DDB
444			else if (!db_active)
445#else
446			else
447#endif
448			panic("spin lock %s held by %p for > 5 seconds",
449			    m->mtx_object.lo_name, (void *)m->mtx_lock);
450		}
451		mtx_crit = critical_enter();
452	}
453
454	m->mtx_savecrit = mtx_crit;
455	if (LOCK_LOG_TEST(&m->mtx_object, opts))
456		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
457
458	return;
459}
460
461/*
462 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
463 *
464 * We are only called here if the lock is recursed or contested (i.e. we
465 * need to wake up a blocked thread).
466 */
467void
468_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
469{
470	struct proc *p, *p1;
471	struct mtx *m1;
472	int pri;
473
474	p = curproc;
475
476	if (mtx_recursed(m)) {
477		if (--(m->mtx_recurse) == 0)
478			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
479		if (LOCK_LOG_TEST(&m->mtx_object, opts))
480			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
481		return;
482	}
483
484	mtx_lock_spin(&sched_lock);
485	if (LOCK_LOG_TEST(&m->mtx_object, opts))
486		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
487
488	p1 = TAILQ_FIRST(&m->mtx_blocked);
489	MPASS(p->p_magic == P_MAGIC);
490	MPASS(p1->p_magic == P_MAGIC);
491
492	TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
493
494	if (TAILQ_EMPTY(&m->mtx_blocked)) {
495		LIST_REMOVE(m, mtx_contested);
496		_release_lock_quick(m);
497		if (LOCK_LOG_TEST(&m->mtx_object, opts))
498			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
499	} else
500		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
501
502	pri = PRI_MAX;
503	LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
504		int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_pri.pri_level;
505		if (cp < pri)
506			pri = cp;
507	}
508
509	if (pri > p->p_pri.pri_native)
510		pri = p->p_pri.pri_native;
511	SET_PRIO(p, pri);
512
513	if (LOCK_LOG_TEST(&m->mtx_object, opts))
514		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
515		    m, p1);
516
517	p1->p_blocked = NULL;
518	p1->p_stat = SRUN;
519	setrunqueue(p1);
520
521	if ((opts & MTX_NOSWITCH) == 0 && p1->p_pri.pri_level < pri) {
522#ifdef notyet
523		if (p->p_ithd != NULL) {
524			struct ithd *it = p->p_ithd;
525
526			if (it->it_interrupted) {
527				if (LOCK_LOG_TEST(&m->mtx_object, opts))
528					CTR2(KTR_LOCK,
529				    "_mtx_unlock_sleep: %p interrupted %p",
530					    it, it->it_interrupted);
531				intr_thd_fixup(it);
532			}
533		}
534#endif
535		setrunqueue(p);
536		if (LOCK_LOG_TEST(&m->mtx_object, opts))
537			CTR2(KTR_LOCK,
538			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
539			    (void *)m->mtx_lock);
540
541		p->p_stats->p_ru.ru_nivcsw++;
542		mi_switch();
543		if (LOCK_LOG_TEST(&m->mtx_object, opts))
544			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
545			    m, (void *)m->mtx_lock);
546	}
547
548	mtx_unlock_spin(&sched_lock);
549
550	return;
551}
552
553/*
554 * All the unlocking of MTX_SPIN locks is done inline.
555 * See the _rel_spin_lock() macro for the details.
556 */
557
558/*
559 * The backing function for the INVARIANTS-enabled mtx_assert()
560 */
561#ifdef INVARIANT_SUPPORT
562void
563_mtx_assert(struct mtx *m, int what, const char *file, int line)
564{
565	switch (what) {
566	case MA_OWNED:
567	case MA_OWNED | MA_RECURSED:
568	case MA_OWNED | MA_NOTRECURSED:
569		if (!mtx_owned(m))
570			panic("mutex %s not owned at %s:%d",
571			    m->mtx_object.lo_name, file, line);
572		if (mtx_recursed(m)) {
573			if ((what & MA_NOTRECURSED) != 0)
574				panic("mutex %s recursed at %s:%d",
575				    m->mtx_object.lo_name, file, line);
576		} else if ((what & MA_RECURSED) != 0) {
577			panic("mutex %s unrecursed at %s:%d",
578			    m->mtx_object.lo_name, file, line);
579		}
580		break;
581	case MA_NOTOWNED:
582		if (mtx_owned(m))
583			panic("mutex %s owned at %s:%d",
584			    m->mtx_object.lo_name, file, line);
585		break;
586	default:
587		panic("unknown mtx_assert at %s:%d", file, line);
588	}
589}
590#endif
591
592/*
593 * The MUTEX_DEBUG-enabled mtx_validate()
594 *
595 * Most of these checks have been moved off into the LO_INITIALIZED flag
596 * maintained by the witness code.
597 */
598#ifdef MUTEX_DEBUG
599
600void	mtx_validate __P((struct mtx *));
601
602void
603mtx_validate(struct mtx *m)
604{
605
606/*
607 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
608 * we can re-enable the kernacc() checks.
609 */
610#ifndef __alpha__
611	if (!kernacc((caddr_t)m, sizeof(m), VM_PROT_READ | VM_PROT_WRITE))
612		panic("Can't read and write to mutex %p", m);
613#endif
614}
615#endif
616
617/*
618 * Mutex initialization routine; initialize lock `m' of type contained in
619 * `opts' with options contained in `opts' and description `description.'
620 */
621void
622mtx_init(struct mtx *m, const char *description, int opts)
623{
624	struct lock_object *lock;
625
626	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
627	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
628
629#ifdef MUTEX_DEBUG
630	/* Diagnostic and error correction */
631	mtx_validate(m);
632#endif
633
634	bzero(m, sizeof(*m));
635	lock = &m->mtx_object;
636	if (opts & MTX_SPIN)
637		lock->lo_class = &lock_class_mtx_spin;
638	else
639		lock->lo_class = &lock_class_mtx_sleep;
640	lock->lo_name = description;
641	if (opts & MTX_QUIET)
642		lock->lo_flags = LO_QUIET;
643	if (opts & MTX_RECURSE)
644		lock->lo_flags |= LO_RECURSABLE;
645	if (opts & MTX_SLEEPABLE)
646		lock->lo_flags |= LO_SLEEPABLE;
647	if ((opts & MTX_NOWITNESS) == 0)
648		lock->lo_flags |= LO_WITNESS;
649
650	m->mtx_lock = MTX_UNOWNED;
651	TAILQ_INIT(&m->mtx_blocked);
652
653	LOCK_LOG_INIT(lock, opts);
654
655	WITNESS_INIT(lock);
656}
657
658/*
659 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
660 * passed in as a flag here because if the corresponding mtx_init() was
661 * called with MTX_QUIET set, then it will already be set in the mutex's
662 * flags.
663 */
664void
665mtx_destroy(struct mtx *m)
666{
667
668	LOCK_LOG_DESTROY(&m->mtx_object, 0);
669
670	if (!mtx_owned(m))
671		MPASS(mtx_unowned(m));
672	else {
673		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
674
675		/* Tell witness this isn't locked to make it happy. */
676		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
677		    __FILE__, __LINE__);
678	}
679
680	WITNESS_DESTROY(&m->mtx_object);
681}
682