subr_turnstile.c revision 86411
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/subr_turnstile.c 86411 2001-11-15 19:08:55Z jhb $
31 */
32
33/*
34 * Machine independent bits of mutex implementation.
35 */
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/kernel.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mutex.h>
45#include <sys/proc.h>
46#include <sys/resourcevar.h>
47#include <sys/sysctl.h>
48#include <sys/systm.h>
49#include <sys/vmmeter.h>
50#include <sys/ktr.h>
51
52#include <machine/atomic.h>
53#include <machine/bus.h>
54#include <machine/clock.h>
55#include <machine/cpu.h>
56
57#include <ddb/ddb.h>
58
59#include <vm/vm.h>
60#include <vm/vm_extern.h>
61
62/*
63 * Internal utility macros.
64 */
65#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
66
67#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
68	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
69
70#define SET_PRIO(td, pri)	(td)->td_ksegrp->kg_pri.pri_level = (pri)
71
72/*
73 * Lock classes for sleep and spin mutexes.
74 */
75struct lock_class lock_class_mtx_sleep = {
76	"sleep mutex",
77	LC_SLEEPLOCK | LC_RECURSABLE
78};
79struct lock_class lock_class_mtx_spin = {
80	"spin mutex",
81	LC_SPINLOCK | LC_RECURSABLE
82};
83
84/*
85 * Prototypes for non-exported routines.
86 */
87static void	propagate_priority(struct thread *);
88
89static void
90propagate_priority(struct thread *td)
91{
92	struct ksegrp *kg = td->td_ksegrp;
93	int pri = kg->kg_pri.pri_level;
94	struct mtx *m = td->td_blocked;
95
96	mtx_assert(&sched_lock, MA_OWNED);
97	for (;;) {
98		struct thread *td1;
99
100		td = mtx_owner(m);
101
102		if (td == NULL) {
103			/*
104			 * This really isn't quite right. Really
105			 * ought to bump priority of thread that
106			 * next acquires the mutex.
107			 */
108			MPASS(m->mtx_lock == MTX_CONTESTED);
109			return;
110		}
111		kg = td->td_ksegrp;
112
113		MPASS(td->td_proc->p_magic == P_MAGIC);
114		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
115		if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
116			return;
117
118		/*
119		 * Bump this thread's priority.
120		 */
121		SET_PRIO(td, pri);
122
123		/*
124		 * If lock holder is actually running, just bump priority.
125		 */
126		 /* XXXKSE this test is not sufficient */
127		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
128			MPASS(td->td_proc->p_stat == SRUN
129			|| td->td_proc->p_stat == SZOMB
130			|| td->td_proc->p_stat == SSTOP);
131			return;
132		}
133
134#ifndef SMP
135		/*
136		 * For UP, we check to see if td is curthread (this shouldn't
137		 * ever happen however as it would mean we are in a deadlock.)
138		 */
139		KASSERT(td != curthread, ("Deadlock detected"));
140#endif
141
142		/*
143		 * If on run queue move to new run queue, and quit.
144		 * XXXKSE this gets a lot more complicated under threads
145		 * but try anyhow.
146		 */
147		if (td->td_proc->p_stat == SRUN) {
148			MPASS(td->td_blocked == NULL);
149			remrunqueue(td);
150			setrunqueue(td);
151			return;
152		}
153
154		/*
155		 * If we aren't blocked on a mutex, we should be.
156		 */
157		KASSERT(td->td_proc->p_stat == SMTX, (
158		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
159		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
160		    m->mtx_object.lo_name));
161
162		/*
163		 * Pick up the mutex that td is blocked on.
164		 */
165		m = td->td_blocked;
166		MPASS(m != NULL);
167
168		/*
169		 * Check if the thread needs to be moved up on
170		 * the blocked chain
171		 */
172		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
173			continue;
174		}
175
176		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
177		if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
178			continue;
179		}
180
181		/*
182		 * Remove thread from blocked chain and determine where
183		 * it should be moved up to.  Since we know that td1 has
184		 * a lower priority than td, we know that at least one
185		 * thread in the chain has a lower priority and that
186		 * td1 will thus not be NULL after the loop.
187		 */
188		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
189		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
190			MPASS(td1->td_proc->p_magic == P_MAGIC);
191			if (td1->td_ksegrp->kg_pri.pri_level > pri)
192				break;
193		}
194
195		MPASS(td1 != NULL);
196		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
197		CTR4(KTR_LOCK,
198		    "propagate_priority: p %p moved before %p on [%p] %s",
199		    td, td1, m, m->mtx_object.lo_name);
200	}
201}
202
203/*
204 * Function versions of the inlined __mtx_* macros.  These are used by
205 * modules and can also be called from assembly language if needed.
206 */
207void
208_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
209{
210
211	MPASS(curthread != NULL);
212	KASSERT((opts & MTX_NOSWITCH) == 0,
213	    ("MTX_NOSWITCH used at %s:%d", file, line));
214	_get_sleep_lock(m, curthread, opts, file, line);
215	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
216	    line);
217	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
218}
219
220void
221_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
222{
223
224	MPASS(curthread != NULL);
225	mtx_assert(m, MA_OWNED);
226 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
227	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
228	    line);
229	_rel_sleep_lock(m, curthread, opts, file, line);
230}
231
232void
233_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
234{
235
236	MPASS(curthread != NULL);
237	_get_spin_lock(m, curthread, opts, file, line);
238	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
239	    line);
240	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
241}
242
243void
244_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
245{
246
247	MPASS(curthread != NULL);
248	mtx_assert(m, MA_OWNED);
249 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
250	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
251	    line);
252	_rel_spin_lock(m);
253}
254
255/*
256 * The important part of mtx_trylock{,_flags}()
257 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
258 * if we're called, it's because we know we don't already own this lock.
259 */
260int
261_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
262{
263	int rval;
264
265	MPASS(curthread != NULL);
266
267	/*
268	 * _mtx_trylock does not accept MTX_NOSWITCH option.
269	 */
270	KASSERT((opts & MTX_NOSWITCH) == 0,
271	    ("mtx_trylock() called with invalid option flag(s) %d", opts));
272
273	rval = _obtain_lock(m, curthread);
274
275	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
276	if (rval) {
277		/*
278		 * We do not handle recursion in _mtx_trylock; see the
279		 * note at the top of the routine.
280		 */
281		KASSERT(!mtx_recursed(m),
282		    ("mtx_trylock() called on a recursed mutex"));
283		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
284		    file, line);
285	}
286
287	return (rval);
288}
289
290/*
291 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
292 *
293 * We call this if the lock is either contested (i.e. we need to go to
294 * sleep waiting for it), or if we need to recurse on it.
295 */
296void
297_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
298{
299	struct thread *td = curthread;
300	struct ksegrp *kg = td->td_ksegrp;
301
302	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
303		m->mtx_recurse++;
304		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
305		if (LOCK_LOG_TEST(&m->mtx_object, opts))
306			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
307		return;
308	}
309
310	if (LOCK_LOG_TEST(&m->mtx_object, opts))
311		CTR4(KTR_LOCK,
312		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
313		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
314
315	while (!_obtain_lock(m, td)) {
316		uintptr_t v;
317		struct thread *td1;
318
319		mtx_lock_spin(&sched_lock);
320		/*
321		 * Check if the lock has been released while spinning for
322		 * the sched_lock.
323		 */
324		if ((v = m->mtx_lock) == MTX_UNOWNED) {
325			mtx_unlock_spin(&sched_lock);
326			continue;
327		}
328
329		/*
330		 * The mutex was marked contested on release. This means that
331		 * there are threads blocked on it.
332		 */
333		if (v == MTX_CONTESTED) {
334			td1 = TAILQ_FIRST(&m->mtx_blocked);
335			MPASS(td1 != NULL);
336			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
337
338			if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
339				SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
340			mtx_unlock_spin(&sched_lock);
341			return;
342		}
343
344		/*
345		 * If the mutex isn't already contested and a failure occurs
346		 * setting the contested bit, the mutex was either released
347		 * or the state of the MTX_RECURSED bit changed.
348		 */
349		if ((v & MTX_CONTESTED) == 0 &&
350		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
351			(void *)(v | MTX_CONTESTED))) {
352			mtx_unlock_spin(&sched_lock);
353			continue;
354		}
355
356		/*
357		 * We deffinately must sleep for this lock.
358		 */
359		mtx_assert(m, MA_NOTOWNED);
360
361#ifdef notyet
362		/*
363		 * If we're borrowing an interrupted thread's VM context, we
364		 * must clean up before going to sleep.
365		 */
366		if (td->td_ithd != NULL) {
367			struct ithd *it = td->td_ithd;
368
369			if (it->it_interrupted) {
370				if (LOCK_LOG_TEST(&m->mtx_object, opts))
371					CTR2(KTR_LOCK,
372				    "_mtx_lock_sleep: %p interrupted %p",
373					    it, it->it_interrupted);
374				intr_thd_fixup(it);
375			}
376		}
377#endif
378
379		/*
380		 * Put us on the list of threads blocked on this mutex.
381		 */
382		if (TAILQ_EMPTY(&m->mtx_blocked)) {
383			td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK);
384			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
385			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
386		} else {
387			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
388				if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
389					break;
390			if (td1)
391				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
392			else
393				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
394		}
395
396		/*
397		 * Save who we're blocked on.
398		 */
399		td->td_blocked = m;
400		td->td_mtxname = m->mtx_object.lo_name;
401		td->td_proc->p_stat = SMTX;
402		propagate_priority(td);
403
404		if (LOCK_LOG_TEST(&m->mtx_object, opts))
405			CTR3(KTR_LOCK,
406			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
407			    m->mtx_object.lo_name);
408
409		td->td_proc->p_stats->p_ru.ru_nvcsw++;
410		mi_switch();
411
412		if (LOCK_LOG_TEST(&m->mtx_object, opts))
413			CTR3(KTR_LOCK,
414			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
415			  td, m, m->mtx_object.lo_name);
416
417		mtx_unlock_spin(&sched_lock);
418	}
419
420	return;
421}
422
423/*
424 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
425 *
426 * This is only called if we need to actually spin for the lock. Recursion
427 * is handled inline.
428 */
429void
430_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
431	       int line)
432{
433	int i = 0;
434
435	if (LOCK_LOG_TEST(&m->mtx_object, opts))
436		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
437
438	for (;;) {
439		if (_obtain_lock(m, curthread))
440			break;
441
442		/* Give interrupts a chance while we spin. */
443		critical_exit(mtx_crit);
444		while (m->mtx_lock != MTX_UNOWNED) {
445			if (i++ < 1000000)
446				continue;
447			if (i++ < 6000000)
448				DELAY(1);
449#ifdef DDB
450			else if (!db_active)
451#else
452			else
453#endif
454			panic("spin lock %s held by %p for > 5 seconds",
455			    m->mtx_object.lo_name, (void *)m->mtx_lock);
456		}
457		mtx_crit = critical_enter();
458	}
459
460	m->mtx_savecrit = mtx_crit;
461	if (LOCK_LOG_TEST(&m->mtx_object, opts))
462		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
463
464	return;
465}
466
467/*
468 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
469 *
470 * We are only called here if the lock is recursed or contested (i.e. we
471 * need to wake up a blocked thread).
472 */
473void
474_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
475{
476	struct thread *td, *td1;
477	struct mtx *m1;
478	int pri;
479	struct ksegrp *kg;
480
481	td = curthread;
482	kg = td->td_ksegrp;
483
484	if (mtx_recursed(m)) {
485		if (--(m->mtx_recurse) == 0)
486			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
487		if (LOCK_LOG_TEST(&m->mtx_object, opts))
488			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
489		return;
490	}
491
492	mtx_lock_spin(&sched_lock);
493	if (LOCK_LOG_TEST(&m->mtx_object, opts))
494		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
495
496	td1 = TAILQ_FIRST(&m->mtx_blocked);
497	MPASS(td->td_proc->p_magic == P_MAGIC);
498	MPASS(td1->td_proc->p_magic == P_MAGIC);
499
500	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
501
502	if (TAILQ_EMPTY(&m->mtx_blocked)) {
503		LIST_REMOVE(m, mtx_contested);
504		_release_lock_quick(m);
505		if (LOCK_LOG_TEST(&m->mtx_object, opts))
506			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
507	} else
508		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
509
510	pri = PRI_MAX;
511	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
512		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
513		if (cp < pri)
514			pri = cp;
515	}
516
517	if (pri > kg->kg_pri.pri_native)
518		pri = kg->kg_pri.pri_native;
519	SET_PRIO(td, pri);
520
521	if (LOCK_LOG_TEST(&m->mtx_object, opts))
522		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
523		    m, td1);
524
525	td1->td_blocked = NULL;
526	td1->td_proc->p_stat = SRUN;
527	setrunqueue(td1);
528
529	if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
530#ifdef notyet
531		if (td->td_ithd != NULL) {
532			struct ithd *it = td->td_ithd;
533
534			if (it->it_interrupted) {
535				if (LOCK_LOG_TEST(&m->mtx_object, opts))
536					CTR2(KTR_LOCK,
537				    "_mtx_unlock_sleep: %p interrupted %p",
538					    it, it->it_interrupted);
539				intr_thd_fixup(it);
540			}
541		}
542#endif
543		setrunqueue(td);
544		if (LOCK_LOG_TEST(&m->mtx_object, opts))
545			CTR2(KTR_LOCK,
546			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
547			    (void *)m->mtx_lock);
548
549		td->td_proc->p_stats->p_ru.ru_nivcsw++;
550		mi_switch();
551		if (LOCK_LOG_TEST(&m->mtx_object, opts))
552			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
553			    m, (void *)m->mtx_lock);
554	}
555
556	mtx_unlock_spin(&sched_lock);
557
558	return;
559}
560
561/*
562 * All the unlocking of MTX_SPIN locks is done inline.
563 * See the _rel_spin_lock() macro for the details.
564 */
565
566/*
567 * The backing function for the INVARIANTS-enabled mtx_assert()
568 */
569#ifdef INVARIANT_SUPPORT
570void
571_mtx_assert(struct mtx *m, int what, const char *file, int line)
572{
573
574	if (panicstr != NULL)
575		return;
576	switch (what) {
577	case MA_OWNED:
578	case MA_OWNED | MA_RECURSED:
579	case MA_OWNED | MA_NOTRECURSED:
580		if (!mtx_owned(m))
581			panic("mutex %s not owned at %s:%d",
582			    m->mtx_object.lo_name, file, line);
583		if (mtx_recursed(m)) {
584			if ((what & MA_NOTRECURSED) != 0)
585				panic("mutex %s recursed at %s:%d",
586				    m->mtx_object.lo_name, file, line);
587		} else if ((what & MA_RECURSED) != 0) {
588			panic("mutex %s unrecursed at %s:%d",
589			    m->mtx_object.lo_name, file, line);
590		}
591		break;
592	case MA_NOTOWNED:
593		if (mtx_owned(m))
594			panic("mutex %s owned at %s:%d",
595			    m->mtx_object.lo_name, file, line);
596		break;
597	default:
598		panic("unknown mtx_assert at %s:%d", file, line);
599	}
600}
601#endif
602
603/*
604 * The MUTEX_DEBUG-enabled mtx_validate()
605 *
606 * Most of these checks have been moved off into the LO_INITIALIZED flag
607 * maintained by the witness code.
608 */
609#ifdef MUTEX_DEBUG
610
611void	mtx_validate __P((struct mtx *));
612
613void
614mtx_validate(struct mtx *m)
615{
616
617/*
618 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
619 * we can re-enable the kernacc() checks.
620 */
621#ifndef __alpha__
622	/*
623	 * Can't call kernacc() from early init386(), especially when
624	 * initializing Giant mutex, because some stuff in kernacc()
625	 * requires Giant itself.
626	 */
627	if (!cold)
628		if (!kernacc((caddr_t)m, sizeof(m),
629		    VM_PROT_READ | VM_PROT_WRITE))
630			panic("Can't read and write to mutex %p", m);
631#endif
632}
633#endif
634
635/*
636 * Mutex initialization routine; initialize lock `m' of type contained in
637 * `opts' with options contained in `opts' and description `description.'
638 */
639void
640mtx_init(struct mtx *m, const char *description, int opts)
641{
642	struct lock_object *lock;
643
644	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
645	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
646
647#ifdef MUTEX_DEBUG
648	/* Diagnostic and error correction */
649	mtx_validate(m);
650#endif
651
652	lock = &m->mtx_object;
653	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
654	    ("mutex %s %p already initialized", description, m));
655	bzero(m, sizeof(*m));
656	if (opts & MTX_SPIN)
657		lock->lo_class = &lock_class_mtx_spin;
658	else
659		lock->lo_class = &lock_class_mtx_sleep;
660	lock->lo_name = description;
661	if (opts & MTX_QUIET)
662		lock->lo_flags = LO_QUIET;
663	if (opts & MTX_RECURSE)
664		lock->lo_flags |= LO_RECURSABLE;
665	if (opts & MTX_SLEEPABLE)
666		lock->lo_flags |= LO_SLEEPABLE;
667	if ((opts & MTX_NOWITNESS) == 0)
668		lock->lo_flags |= LO_WITNESS;
669
670	m->mtx_lock = MTX_UNOWNED;
671	TAILQ_INIT(&m->mtx_blocked);
672
673	LOCK_LOG_INIT(lock, opts);
674
675	WITNESS_INIT(lock);
676}
677
678/*
679 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
680 * passed in as a flag here because if the corresponding mtx_init() was
681 * called with MTX_QUIET set, then it will already be set in the mutex's
682 * flags.
683 */
684void
685mtx_destroy(struct mtx *m)
686{
687
688	LOCK_LOG_DESTROY(&m->mtx_object, 0);
689
690	if (!mtx_owned(m))
691		MPASS(mtx_unowned(m));
692	else {
693		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
694
695		/* Tell witness this isn't locked to make it happy. */
696		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
697		    __FILE__, __LINE__);
698	}
699
700	WITNESS_DESTROY(&m->mtx_object);
701}
702
703/*
704 * Encapsulated Giant mutex routines.  These routines provide encapsulation
705 * control for the Giant mutex, allowing sysctls to be used to turn on and
706 * off Giant around certain subsystems.  The default value for the sysctls
707 * are set to what developers believe is stable and working in regards to
708 * the Giant pushdown.  Developers should not turn off Giant via these
709 * sysctls unless they know what they are doing.
710 *
711 * Callers of mtx_lock_giant() are expected to pass the return value to an
712 * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
713 * effected by a Giant wrap, all related sysctl variables must be zero for
714 * the subsystem call to operate without Giant (as determined by the caller).
715 */
716
717SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
718
719static int kern_giant_all = 0;
720SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
721
722int kern_giant_proc = 1;	/* Giant around PROC locks */
723int kern_giant_file = 1;	/* Giant around struct file & filedesc */
724SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
725SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
726
727int
728mtx_lock_giant(int sysctlvar)
729{
730	if (sysctlvar || kern_giant_all) {
731		mtx_lock(&Giant);
732		return(1);
733	}
734	return(0);
735}
736
737void
738mtx_unlock_giant(int s)
739{
740	if (s)
741		mtx_unlock(&Giant);
742}
743
744