kern_mutex.c revision 83679
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/kern_mutex.c 83679 2001-09-19 22:52:59Z jhb $
31 */
32
33/*
34 * Machine independent bits of mutex implementation and implementation of
35 * `witness' structure & related debugging routines.
36 */
37
38/*
39 *	Main Entry: witness
40 *	Pronunciation: 'wit-n&s
41 *	Function: noun
42 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43 *	    testimony, witness, from 2wit
44 *	Date: before 12th century
45 *	1 : attestation of a fact or event : TESTIMONY
46 *	2 : one that gives evidence; specifically : one who testifies in
47 *	    a cause or before a judicial tribunal
48 *	3 : one asked to be present at a transaction so as to be able to
49 *	    testify to its having taken place
50 *	4 : one who has personal knowledge of something
51 *	5 a : something serving as evidence or proof : SIGN
52 *	  b : public affirmation by word or example of usually
53 *	      religious faith or conviction <the heroic witness to divine
54 *	      life -- Pilot>
55 *	6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/param.h>
61#include <sys/bus.h>
62#include <sys/kernel.h>
63#include <sys/lock.h>
64#include <sys/malloc.h>
65#include <sys/mutex.h>
66#include <sys/proc.h>
67#include <sys/resourcevar.h>
68#include <sys/sysctl.h>
69#include <sys/systm.h>
70#include <sys/vmmeter.h>
71#include <sys/ktr.h>
72
73#include <machine/atomic.h>
74#include <machine/bus.h>
75#include <machine/clock.h>
76#include <machine/cpu.h>
77
78#include <ddb/ddb.h>
79
80#include <vm/vm.h>
81#include <vm/vm_extern.h>
82
83/*
84 * Internal utility macros.
85 */
86#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
87
88#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
89	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
90
91#define SET_PRIO(td, pri)	(td)->td_ksegrp->kg_pri.pri_level = (pri)
92
93/*
94 * Lock classes for sleep and spin mutexes.
95 */
96struct lock_class lock_class_mtx_sleep = {
97	"sleep mutex",
98	LC_SLEEPLOCK | LC_RECURSABLE
99};
100struct lock_class lock_class_mtx_spin = {
101	"spin mutex",
102	LC_SPINLOCK | LC_RECURSABLE
103};
104
105/*
106 * Prototypes for non-exported routines.
107 */
108static void	propagate_priority(struct thread *);
109
110static void
111propagate_priority(struct thread *td)
112{
113	struct ksegrp *kg = td->td_ksegrp;
114	int pri = kg->kg_pri.pri_level;
115	struct mtx *m = td->td_blocked;
116
117	mtx_assert(&sched_lock, MA_OWNED);
118	for (;;) {
119		struct thread *td1;
120
121		td = mtx_owner(m);
122
123		if (td == NULL) {
124			/*
125			 * This really isn't quite right. Really
126			 * ought to bump priority of thread that
127			 * next acquires the mutex.
128			 */
129			MPASS(m->mtx_lock == MTX_CONTESTED);
130			return;
131		}
132		kg = td->td_ksegrp;
133
134		MPASS(td->td_proc->p_magic == P_MAGIC);
135		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
136		if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
137			return;
138
139		/*
140		 * Bump this thread's priority.
141		 */
142		SET_PRIO(td, pri);
143
144		/*
145		 * If lock holder is actually running, just bump priority.
146		 */
147		 /* XXXKSE this test is not sufficient */
148		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
149			MPASS(td->td_proc->p_stat == SRUN
150			|| td->td_proc->p_stat == SZOMB
151			|| td->td_proc->p_stat == SSTOP);
152			return;
153		}
154
155#ifndef SMP
156		/*
157		 * For UP, we check to see if td is curthread (this shouldn't
158		 * ever happen however as it would mean we are in a deadlock.)
159		 */
160		KASSERT(td != curthread, ("Deadlock detected"));
161#endif
162
163		/*
164		 * If on run queue move to new run queue, and quit.
165		 * XXXKSE this gets a lot more complicated under threads
166		 * but try anyhow.
167		 */
168		if (td->td_proc->p_stat == SRUN) {
169			MPASS(td->td_blocked == NULL);
170			remrunqueue(td);
171			setrunqueue(td);
172			return;
173		}
174
175		/*
176		 * If we aren't blocked on a mutex, we should be.
177		 */
178		KASSERT(td->td_proc->p_stat == SMTX, (
179		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
180		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
181		    m->mtx_object.lo_name));
182
183		/*
184		 * Pick up the mutex that td is blocked on.
185		 */
186		m = td->td_blocked;
187		MPASS(m != NULL);
188
189		/*
190		 * Check if the thread needs to be moved up on
191		 * the blocked chain
192		 */
193		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
194			continue;
195		}
196
197		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
198		if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
199			continue;
200		}
201
202		/*
203		 * Remove thread from blocked chain and determine where
204		 * it should be moved up to.  Since we know that td1 has
205		 * a lower priority than td, we know that at least one
206		 * thread in the chain has a lower priority and that
207		 * td1 will thus not be NULL after the loop.
208		 */
209		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
210		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
211			MPASS(td1->td_proc->p_magic == P_MAGIC);
212			if (td1->td_ksegrp->kg_pri.pri_level > pri)
213				break;
214		}
215
216		MPASS(td1 != NULL);
217		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
218		CTR4(KTR_LOCK,
219		    "propagate_priority: p %p moved before %p on [%p] %s",
220		    td, td1, m, m->mtx_object.lo_name);
221	}
222}
223
224/*
225 * Function versions of the inlined __mtx_* macros.  These are used by
226 * modules and can also be called from assembly language if needed.
227 */
228void
229_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
230{
231
232	__mtx_lock_flags(m, opts, file, line);
233}
234
235void
236_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
237{
238
239	__mtx_unlock_flags(m, opts, file, line);
240}
241
242void
243_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
244{
245
246	__mtx_lock_spin_flags(m, opts, file, line);
247}
248
249void
250_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
251{
252
253	__mtx_unlock_spin_flags(m, opts, file, line);
254}
255
256/*
257 * The important part of mtx_trylock{,_flags}()
258 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
259 * if we're called, it's because we know we don't already own this lock.
260 */
261int
262_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
263{
264	int rval;
265
266	MPASS(curthread != NULL);
267
268	/*
269	 * _mtx_trylock does not accept MTX_NOSWITCH option.
270	 */
271	KASSERT((opts & MTX_NOSWITCH) == 0,
272	    ("mtx_trylock() called with invalid option flag(s) %d", opts));
273
274	rval = _obtain_lock(m, curthread);
275
276	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
277	if (rval) {
278		/*
279		 * We do not handle recursion in _mtx_trylock; see the
280		 * note at the top of the routine.
281		 */
282		KASSERT(!mtx_recursed(m),
283		    ("mtx_trylock() called on a recursed mutex"));
284		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
285		    file, line);
286	}
287
288	return (rval);
289}
290
291/*
292 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
293 *
294 * We call this if the lock is either contested (i.e. we need to go to
295 * sleep waiting for it), or if we need to recurse on it.
296 */
297void
298_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
299{
300	struct thread *td = curthread;
301	struct ksegrp *kg = td->td_ksegrp;
302
303	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
304		m->mtx_recurse++;
305		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
306		if (LOCK_LOG_TEST(&m->mtx_object, opts))
307			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
308		return;
309	}
310
311	if (LOCK_LOG_TEST(&m->mtx_object, opts))
312		CTR4(KTR_LOCK,
313		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
314		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
315
316	while (!_obtain_lock(m, td)) {
317		uintptr_t v;
318		struct thread *td1;
319
320		mtx_lock_spin(&sched_lock);
321		/*
322		 * Check if the lock has been released while spinning for
323		 * the sched_lock.
324		 */
325		if ((v = m->mtx_lock) == MTX_UNOWNED) {
326			mtx_unlock_spin(&sched_lock);
327			continue;
328		}
329
330		/*
331		 * The mutex was marked contested on release. This means that
332		 * there are threads blocked on it.
333		 */
334		if (v == MTX_CONTESTED) {
335			td1 = TAILQ_FIRST(&m->mtx_blocked);
336			MPASS(td1 != NULL);
337			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
338
339			if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
340				SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
341			mtx_unlock_spin(&sched_lock);
342			return;
343		}
344
345		/*
346		 * If the mutex isn't already contested and a failure occurs
347		 * setting the contested bit, the mutex was either released
348		 * or the state of the MTX_RECURSED bit changed.
349		 */
350		if ((v & MTX_CONTESTED) == 0 &&
351		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
352			(void *)(v | MTX_CONTESTED))) {
353			mtx_unlock_spin(&sched_lock);
354			continue;
355		}
356
357		/*
358		 * We deffinately must sleep for this lock.
359		 */
360		mtx_assert(m, MA_NOTOWNED);
361
362#ifdef notyet
363		/*
364		 * If we're borrowing an interrupted thread's VM context, we
365		 * must clean up before going to sleep.
366		 */
367		if (td->td_ithd != NULL) {
368			struct ithd *it = td->td_ithd;
369
370			if (it->it_interrupted) {
371				if (LOCK_LOG_TEST(&m->mtx_object, opts))
372					CTR2(KTR_LOCK,
373				    "_mtx_lock_sleep: %p interrupted %p",
374					    it, it->it_interrupted);
375				intr_thd_fixup(it);
376			}
377		}
378#endif
379
380		/*
381		 * Put us on the list of threads blocked on this mutex.
382		 */
383		if (TAILQ_EMPTY(&m->mtx_blocked)) {
384			td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK);
385			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
386			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
387		} else {
388			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
389				if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
390					break;
391			if (td1)
392				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
393			else
394				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
395		}
396
397		/*
398		 * Save who we're blocked on.
399		 */
400		td->td_blocked = m;
401		td->td_mtxname = m->mtx_object.lo_name;
402		td->td_proc->p_stat = SMTX;
403		propagate_priority(td);
404
405		if (LOCK_LOG_TEST(&m->mtx_object, opts))
406			CTR3(KTR_LOCK,
407			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
408			    m->mtx_object.lo_name);
409
410		td->td_proc->p_stats->p_ru.ru_nvcsw++;
411		mi_switch();
412
413		if (LOCK_LOG_TEST(&m->mtx_object, opts))
414			CTR3(KTR_LOCK,
415			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
416			  td, m, m->mtx_object.lo_name);
417
418		mtx_unlock_spin(&sched_lock);
419	}
420
421	return;
422}
423
424/*
425 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
426 *
427 * This is only called if we need to actually spin for the lock. Recursion
428 * is handled inline.
429 */
430void
431_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
432	       int line)
433{
434	int i = 0;
435
436	if (LOCK_LOG_TEST(&m->mtx_object, opts))
437		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
438
439	for (;;) {
440		if (_obtain_lock(m, curthread))
441			break;
442
443		/* Give interrupts a chance while we spin. */
444		critical_exit(mtx_crit);
445		while (m->mtx_lock != MTX_UNOWNED) {
446			if (i++ < 1000000)
447				continue;
448			if (i++ < 6000000)
449				DELAY(1);
450#ifdef DDB
451			else if (!db_active)
452#else
453			else
454#endif
455			panic("spin lock %s held by %p for > 5 seconds",
456			    m->mtx_object.lo_name, (void *)m->mtx_lock);
457		}
458		mtx_crit = critical_enter();
459	}
460
461	m->mtx_savecrit = mtx_crit;
462	if (LOCK_LOG_TEST(&m->mtx_object, opts))
463		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
464
465	return;
466}
467
468/*
469 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
470 *
471 * We are only called here if the lock is recursed or contested (i.e. we
472 * need to wake up a blocked thread).
473 */
474void
475_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
476{
477	struct thread *td, *td1;
478	struct mtx *m1;
479	int pri;
480	struct ksegrp *kg;
481
482	td = curthread;
483	kg = td->td_ksegrp;
484
485	if (mtx_recursed(m)) {
486		if (--(m->mtx_recurse) == 0)
487			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
488		if (LOCK_LOG_TEST(&m->mtx_object, opts))
489			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
490		return;
491	}
492
493	mtx_lock_spin(&sched_lock);
494	if (LOCK_LOG_TEST(&m->mtx_object, opts))
495		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
496
497	td1 = TAILQ_FIRST(&m->mtx_blocked);
498	MPASS(td->td_proc->p_magic == P_MAGIC);
499	MPASS(td1->td_proc->p_magic == P_MAGIC);
500
501	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
502
503	if (TAILQ_EMPTY(&m->mtx_blocked)) {
504		LIST_REMOVE(m, mtx_contested);
505		_release_lock_quick(m);
506		if (LOCK_LOG_TEST(&m->mtx_object, opts))
507			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
508	} else
509		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
510
511	pri = PRI_MAX;
512	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
513		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
514		if (cp < pri)
515			pri = cp;
516	}
517
518	if (pri > kg->kg_pri.pri_native)
519		pri = kg->kg_pri.pri_native;
520	SET_PRIO(td, pri);
521
522	if (LOCK_LOG_TEST(&m->mtx_object, opts))
523		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
524		    m, td1);
525
526	td1->td_blocked = NULL;
527	td1->td_proc->p_stat = SRUN;
528	setrunqueue(td1);
529
530	if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
531#ifdef notyet
532		if (td->td_ithd != NULL) {
533			struct ithd *it = td->td_ithd;
534
535			if (it->it_interrupted) {
536				if (LOCK_LOG_TEST(&m->mtx_object, opts))
537					CTR2(KTR_LOCK,
538				    "_mtx_unlock_sleep: %p interrupted %p",
539					    it, it->it_interrupted);
540				intr_thd_fixup(it);
541			}
542		}
543#endif
544		setrunqueue(td);
545		if (LOCK_LOG_TEST(&m->mtx_object, opts))
546			CTR2(KTR_LOCK,
547			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
548			    (void *)m->mtx_lock);
549
550		td->td_proc->p_stats->p_ru.ru_nivcsw++;
551		mi_switch();
552		if (LOCK_LOG_TEST(&m->mtx_object, opts))
553			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
554			    m, (void *)m->mtx_lock);
555	}
556
557	mtx_unlock_spin(&sched_lock);
558
559	return;
560}
561
562/*
563 * All the unlocking of MTX_SPIN locks is done inline.
564 * See the _rel_spin_lock() macro for the details.
565 */
566
567/*
568 * The backing function for the INVARIANTS-enabled mtx_assert()
569 */
570#ifdef INVARIANT_SUPPORT
571void
572_mtx_assert(struct mtx *m, int what, const char *file, int line)
573{
574
575	if (panicstr != NULL)
576		return;
577	switch (what) {
578	case MA_OWNED:
579	case MA_OWNED | MA_RECURSED:
580	case MA_OWNED | MA_NOTRECURSED:
581		if (!mtx_owned(m))
582			panic("mutex %s not owned at %s:%d",
583			    m->mtx_object.lo_name, file, line);
584		if (mtx_recursed(m)) {
585			if ((what & MA_NOTRECURSED) != 0)
586				panic("mutex %s recursed at %s:%d",
587				    m->mtx_object.lo_name, file, line);
588		} else if ((what & MA_RECURSED) != 0) {
589			panic("mutex %s unrecursed at %s:%d",
590			    m->mtx_object.lo_name, file, line);
591		}
592		break;
593	case MA_NOTOWNED:
594		if (mtx_owned(m))
595			panic("mutex %s owned at %s:%d",
596			    m->mtx_object.lo_name, file, line);
597		break;
598	default:
599		panic("unknown mtx_assert at %s:%d", file, line);
600	}
601}
602#endif
603
604/*
605 * The MUTEX_DEBUG-enabled mtx_validate()
606 *
607 * Most of these checks have been moved off into the LO_INITIALIZED flag
608 * maintained by the witness code.
609 */
610#ifdef MUTEX_DEBUG
611
612void	mtx_validate __P((struct mtx *));
613
614void
615mtx_validate(struct mtx *m)
616{
617
618/*
619 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
620 * we can re-enable the kernacc() checks.
621 */
622#ifndef __alpha__
623	/*
624	 * Can't call kernacc() from early init386(), especially when
625	 * initializing Giant mutex, because some stuff in kernacc()
626	 * requires Giant itself.
627	 */
628	if (!cold)
629		if (!kernacc((caddr_t)m, sizeof(m),
630		    VM_PROT_READ | VM_PROT_WRITE))
631			panic("Can't read and write to mutex %p", m);
632#endif
633}
634#endif
635
636/*
637 * Mutex initialization routine; initialize lock `m' of type contained in
638 * `opts' with options contained in `opts' and description `description.'
639 */
640void
641mtx_init(struct mtx *m, const char *description, int opts)
642{
643	struct lock_object *lock;
644
645	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
646	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
647
648#ifdef MUTEX_DEBUG
649	/* Diagnostic and error correction */
650	mtx_validate(m);
651#endif
652
653	bzero(m, sizeof(*m));
654	lock = &m->mtx_object;
655	if (opts & MTX_SPIN)
656		lock->lo_class = &lock_class_mtx_spin;
657	else
658		lock->lo_class = &lock_class_mtx_sleep;
659	lock->lo_name = description;
660	if (opts & MTX_QUIET)
661		lock->lo_flags = LO_QUIET;
662	if (opts & MTX_RECURSE)
663		lock->lo_flags |= LO_RECURSABLE;
664	if (opts & MTX_SLEEPABLE)
665		lock->lo_flags |= LO_SLEEPABLE;
666	if ((opts & MTX_NOWITNESS) == 0)
667		lock->lo_flags |= LO_WITNESS;
668
669	m->mtx_lock = MTX_UNOWNED;
670	TAILQ_INIT(&m->mtx_blocked);
671
672	LOCK_LOG_INIT(lock, opts);
673
674	WITNESS_INIT(lock);
675}
676
677/*
678 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
679 * passed in as a flag here because if the corresponding mtx_init() was
680 * called with MTX_QUIET set, then it will already be set in the mutex's
681 * flags.
682 */
683void
684mtx_destroy(struct mtx *m)
685{
686
687	LOCK_LOG_DESTROY(&m->mtx_object, 0);
688
689	if (!mtx_owned(m))
690		MPASS(mtx_unowned(m));
691	else {
692		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
693
694		/* Tell witness this isn't locked to make it happy. */
695		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
696		    __FILE__, __LINE__);
697	}
698
699	WITNESS_DESTROY(&m->mtx_object);
700}
701